keyword
stringclasses
7 values
repo_name
stringlengths
8
98
file_path
stringlengths
4
244
file_extension
stringclasses
29 values
file_size
int64
0
84.1M
line_count
int64
0
1.6M
content
stringlengths
1
84.1M
language
stringclasses
14 values
3D
OpenMS/OpenMS
src/openms/source/PROCESSING/FILTERING/ThresholdMower.cpp
.cpp
1,226
49
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Mathias Walzer $ // $Authors: Timo Sachsenberg $ // -------------------------------------------------------------------------- // #include <OpenMS/PROCESSING/FILTERING/ThresholdMower.h> using namespace std; namespace OpenMS { ThresholdMower::ThresholdMower() : DefaultParamHandler("ThresholdMower") { defaults_.setValue("threshold", 0.05, "Intensity threshold, peaks below this threshold are discarded"); defaultsToParam_(); } ThresholdMower::~ThresholdMower() = default; ThresholdMower::ThresholdMower(const ThresholdMower & source) : DefaultParamHandler(source) { } ThresholdMower & ThresholdMower::operator=(const ThresholdMower & source) { if (this != &source) { DefaultParamHandler::operator=(source); } return *this; } void ThresholdMower::filterPeakSpectrum(PeakSpectrum & spectrum) { filterSpectrum(spectrum); } void ThresholdMower::filterPeakMap(PeakMap & exp) { for (auto& s : exp) filterSpectrum(s); } }
C++
3D
OpenMS/OpenMS
src/openms/source/PROCESSING/MISC/SplinePackage.cpp
.cpp
1,530
65
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Lars Nilse $ // $Authors: Lars Nilse $ // -------------------------------------------------------------------------- #include <OpenMS/KERNEL/StandardTypes.h> #include <OpenMS/PROCESSING/MISC/SplinePackage.h> using namespace std; namespace OpenMS { SplinePackage::SplinePackage(std::vector<double> pos, const std::vector<double>& intensity) : spline_(pos, intensity) { if (!(pos.size() == intensity.size() && pos.size() > 1)) { throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "m/z (or RT) and intensity vectors either not of the same size or too short."); } pos_min_ = pos.front(); pos_max_ = pos.back(); pos_step_width_ = (pos_max_ - pos_min_) / (pos.size() - 1); } SplinePackage::~SplinePackage() = default; double SplinePackage::getPosMin() const { return pos_min_; } double SplinePackage::getPosMax() const { return pos_max_; } double SplinePackage::getPosStepWidth() const { return pos_step_width_; } bool SplinePackage::isInPackage(double pos) const { return pos >= pos_min_ && pos <= pos_max_; } double SplinePackage::eval(double pos) const { if (this->isInPackage(pos)) { return max(0.0, spline_.eval(pos)); } else { return 0; } } }
C++
3D
OpenMS/OpenMS
src/openms/source/PROCESSING/MISC/SplineInterpolatedPeaks.cpp
.cpp
9,432
301
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Lars Nilse $ // $Authors: Lars Nilse $ // -------------------------------------------------------------------------- #include <OpenMS/KERNEL/StandardTypes.h> #include <OpenMS/PROCESSING/MISC/SplinePackage.h> #include <OpenMS/PROCESSING/MISC/SplineInterpolatedPeaks.h> #include <OpenMS/KERNEL/MSSpectrum.h> #include <OpenMS/KERNEL/MSExperiment.h> using namespace std; namespace OpenMS { SplineInterpolatedPeaks::SplineInterpolatedPeaks(const std::vector<double>& pos, const std::vector<double>& intensity) { SplineInterpolatedPeaks::init_(pos, intensity); } SplineInterpolatedPeaks::SplineInterpolatedPeaks(const MSSpectrum& raw_spectrum) { std::vector<double> pos; std::vector<double> intensity; for (const auto &it : raw_spectrum) { pos.push_back(it.getMZ()); intensity.push_back(it.getIntensity()); } SplineInterpolatedPeaks::init_(pos, intensity); } SplineInterpolatedPeaks::SplineInterpolatedPeaks(const MSChromatogram& raw_chromatogram) { std::vector<double> rt; std::vector<double> intensity; for (const auto &it : raw_chromatogram) { rt.push_back(it.getRT()); intensity.push_back(it.getIntensity()); } SplineInterpolatedPeaks::init_(rt, intensity); } SplineInterpolatedPeaks::~SplineInterpolatedPeaks() = default; void SplineInterpolatedPeaks::init_(const std::vector<double>& pos, const std::vector<double>& intensity) { if (!(pos.size() == intensity.size() && pos.size() > 2)) { throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "m/z and intensity vectors either not of the same size or too short."); } const double new_package = 2; // start a new package if delta m/z is greater than new_package times previous one pos_min_ = pos.front(); pos_max_ = pos.back(); // remove unnecessary zeros, i.e. zero intensity data points with zeros to the left and right std::vector<double> pos_slim1; // slimmer vector after removal of zero-intensity datapoints from pos std::vector<double> intensity_slim1; // slimmer vector after removal of zero-intensity datapoints from intensity pos_slim1.reserve(pos.size()); intensity_slim1.reserve(intensity.size()); if (intensity[0] != 0 || intensity[1] != 0) { pos_slim1.push_back(pos[0]); intensity_slim1.push_back(intensity[0]); } bool last_intensity_zero = (intensity[0] == 0); bool current_intensity_zero = (intensity[0] == 0); bool next_intensity_zero = (intensity[1] == 0); for (size_t i = 1; i < pos.size() - 1; ++i) { last_intensity_zero = current_intensity_zero; current_intensity_zero = next_intensity_zero; next_intensity_zero = (intensity[i + 1] == 0); if (!last_intensity_zero || !current_intensity_zero || !next_intensity_zero) { pos_slim1.push_back(pos[i]); intensity_slim1.push_back(intensity[i]); } } if (intensity[pos.size() - 1] != 0 || intensity[pos.size() - 2] != 0) { pos_slim1.push_back(pos[pos.size() - 1]); intensity_slim1.push_back(intensity[pos.size() - 1]); } // remove Thermo bug zeros // (In some Thermo data appear odd zero intensity data points. Normal data points are sometimes quickly followed by a zero. // These zeros are clearly not part of the profile, but bugs. The following code snippet removes them. A datapoint is // "quickly followed" by a second one, if the m/z step is shorter than scaling_Thermo_bug times the previous m/z step.) std::vector<double> pos_slim2; // slimmer vector after removal of Thermo bugs from pos_slim1 std::vector<double> intensity_slim2; // slimmer vector after removal of Thermo bugs from intensity_slim1 const double scaling_Thermo_bug = 1.0 / 50.0; // scaling factor for Thermo bug pos_slim2.reserve(pos_slim1.size()); intensity_slim2.reserve(intensity_slim1.size()); pos_slim2.push_back(pos_slim1[0]); pos_slim2.push_back(pos_slim1[1]); intensity_slim2.push_back(intensity_slim1[0]); intensity_slim2.push_back(intensity_slim1[1]); for (size_t i = 2; i < pos_slim1.size(); ++i) { if (intensity_slim1[i] == 0) { if ((pos_slim1[i] - pos_slim1[i - 1]) < (pos_slim1[i - 1] - pos_slim1[i - 2]) * scaling_Thermo_bug) { continue; } } pos_slim2.push_back(pos_slim1[i]); intensity_slim2.push_back(intensity_slim1[i]); } // subdivide spectrum into packages std::vector<bool> start_package; start_package.push_back(true); start_package.push_back(false); for (size_t i = 2; i < pos_slim2.size(); ++i) { start_package.push_back((pos_slim2[i] - pos_slim2[i - 1]) / (pos_slim2[i - 1] - pos_slim2[i - 2]) > new_package); } // fill the packages std::vector<double> pos_package; std::vector<double> intensity_package; for (size_t i = 0; i < pos_slim2.size(); ++i) { if (start_package[i] && i > 0) { if (intensity_package.size() > 1) { // Two or more data points in package. At least one of them will be non-zero since unnecessary zeros removed above. packages_.emplace_back(pos_package, intensity_package); } pos_package.clear(); intensity_package.clear(); } pos_package.push_back(pos_slim2[i]); intensity_package.push_back(intensity_slim2[i]); } // add the last package if (intensity_package.size() > 1) { packages_.emplace_back(pos_package, intensity_package); } } double SplineInterpolatedPeaks::getPosMin() const { return pos_min_; } double SplineInterpolatedPeaks::getPosMax() const { return pos_max_; } size_t SplineInterpolatedPeaks::size() const { return packages_.size(); } SplineInterpolatedPeaks::Navigator SplineInterpolatedPeaks::getNavigator(double scaling) { if (packages_.empty()) { throw Exception::InvalidSize(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, 0, "packages_ must not be empty"); } return Navigator(&packages_, pos_max_, scaling); } SplineInterpolatedPeaks::Navigator::Navigator(const std::vector<SplinePackage>* packages, double pos_max, double scaling) : packages_(packages), last_package_(0), pos_max_(pos_max), pos_step_width_scaling_(scaling) { } SplineInterpolatedPeaks::Navigator::Navigator() = default; SplineInterpolatedPeaks::Navigator::~Navigator() = default; double SplineInterpolatedPeaks::Navigator::eval(double pos) { if (pos < (*packages_)[last_package_].getPosMin()) { // look left for (int i = (int) last_package_; i >= 0; --i) { if (pos > (*packages_)[i].getPosMax()) { last_package_ = i; return 0.0; } if (pos >= (*packages_)[i].getPosMin()) { last_package_ = i; return (*packages_)[i].eval(pos); } } } else { // look right for (size_t i = last_package_; i < (size_t)(*packages_).size(); ++i) { if (pos < (*packages_)[i].getPosMin()) { last_package_ = i; return 0.0; } if (pos <= (*packages_)[i].getPosMax()) { last_package_ = i; return (*packages_)[i].eval(pos); } } } return 0.0; } double SplineInterpolatedPeaks::Navigator::getNextPos(double pos) { int min_index = 0; int max_index = static_cast<Int>((*packages_).size()) - 1; int i = static_cast<Int>(last_package_); SplinePackage package = (*packages_)[i]; // find correct package while (!(package.isInPackage(pos))) { if (pos < package.getPosMin()) { --i; // check index limit if (i < min_index) { last_package_ = min_index; return (*packages_)[min_index].getPosMin(); } // m/z in the gap? package = (*packages_)[i]; if (pos > package.getPosMax()) { last_package_ = i + 1; return (*packages_)[i + 1].getPosMin(); } } else if (pos > package.getPosMax()) { ++i; // check index limit if (i > max_index) { last_package_ = max_index; return pos_max_; } // m/z in the gap? package = (*packages_)[i]; if (pos < package.getPosMin()) { last_package_ = i; return package.getPosMin(); } } } // find m/z in the package if (pos + pos_step_width_scaling_ * package.getPosStepWidth() > package.getPosMax()) { // The next step gets us outside the current package. // Let's move to the package to the right. ++i; // check index limit if (i > max_index) { last_package_ = max_index; return pos_max_; } // jump to min m/z of next package last_package_ = i; return (*packages_)[i].getPosMin(); } else { // make a small step within the package last_package_ = i; return pos + pos_step_width_scaling_ * package.getPosStepWidth(); } } }
C++
3D
OpenMS/OpenMS
src/openms/source/PROCESSING/MISC/DataFilters.cpp
.cpp
12,249
492
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg $ // $Authors: Marc Sturm $ // -------------------------------------------------------------------------- #include <OpenMS/PROCESSING/MISC/DataFilters.h> #include <OpenMS/KERNEL/Feature.h> #include <OpenMS/METADATA/MetaInfo.h> #include <OpenMS/KERNEL/ConsensusFeature.h> using namespace std; namespace OpenMS { bool DataFilters::DataFilter::operator==(const DataFilter & rhs) const { return field == rhs.field && op == rhs.op && value == rhs.value && value_string == rhs.value_string && meta_name == rhs.meta_name && value_is_numerical == rhs.value_is_numerical; } ///Inequality operator bool DataFilters::DataFilter::operator!=(const DataFilter & rhs) const { return !operator==(rhs); } String DataFilters::DataFilter::toString() const { String out; //field if (field == INTENSITY) { out = "Intensity "; } else if (field == QUALITY) { out = "Quality "; } else if (field == CHARGE) { out = "Charge "; } else if (field == SIZE) { out = "Size "; } else if (field == META_DATA) { out = "Meta::" + meta_name + " "; } //operation if (op == GREATER_EQUAL) { out += ">= "; } else if (op == EQUAL) { out += "= "; } else if (op == LESS_EQUAL) { out += "<= "; } else if (op == EXISTS) { out += "exists"; } //value if (field == META_DATA) { if (op != EXISTS) { if (value_is_numerical) { out = out + value; } else { out = out + "\"" + value_string + "\""; } } return out; } out = out + value; return out; } void DataFilters::DataFilter::fromString(const String & filter) { bool meta = false; String tmp = filter; tmp.trim(); StringList parts; tmp.split(' ', parts); SignedSize size = parts.size(); if (size < 2) { throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "invalid filter format", tmp); } //field tmp = parts[0]; tmp.toLower(); if (tmp == "intensity") { field = INTENSITY; } else if (tmp == "charge") { field = CHARGE; } else if (tmp == "size") { field = SIZE; } else if (tmp == "quality") { field = QUALITY; } else if (tmp.hasPrefix(String("meta::"))) { meta = true; field = META_DATA; meta_name = tmp.suffix(tmp.size() - 6); } else { throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "invalid field name", tmp); } //operation tmp = parts[1]; if (tmp == ">=") { op = GREATER_EQUAL; } else if (tmp == "=") { op = EQUAL; } else if (tmp == "<=") { op = LESS_EQUAL; } else if (tmp == "exists" && meta) { op = EXISTS; return; } else throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "invalid operator", tmp); //value if (size > 3) // string values may contain spaces, implode to a single string { tmp.concatenate(parts.begin() + 2, parts.end(), " "); } else if (size == 3) { tmp = parts[2]; } else // size < 3 && operation is binary (only "exists" is unary) --> invalid { throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "invalid filter format", tmp); } try { value = tmp.toDouble(); value_is_numerical = true; } catch (Exception::ConversionError&) { value_is_numerical = false; if (!(tmp.hasPrefix("\"") && tmp.hasSuffix("\""))) { throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "invalid value", tmp); } else { tmp = tmp.substr(1, tmp.size() - 2); } if (!meta) // non meta values must be numerical { throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "invalid value", tmp); } else { value_string = tmp; } } } void DataFilters::add(const DataFilter& filter) { //activate if not empty is_active_ = true; filters_.push_back(filter); if (filter.field == DataFilters::META_DATA) { meta_indices_.push_back(MetaInfo::registry().getIndex(filter.meta_name)); } else { meta_indices_.push_back(0); } } void DataFilters::remove(Size index) { if (index >= filters_.size()) { throw Exception::IndexOverflow(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, index, filters_.size()); } filters_.erase(filters_.begin() + index); meta_indices_.erase(meta_indices_.begin() + index); //disable if empty if (size() == 0) { is_active_ = false; } } void DataFilters::replace(Size index, const DataFilter & filter) { if (index >= filters_.size()) { throw Exception::IndexOverflow(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, index, filters_.size()); } filters_[index] = filter; if (filter.field == DataFilters::META_DATA) { meta_indices_[index] = MetaInfo::registry().getIndex(filter.meta_name); } else { meta_indices_[index] = 0; } } void DataFilters::clear() { filters_.clear(); meta_indices_.clear(); is_active_ = false; } Size DataFilters::size() const { return filters_.size(); } const DataFilters::DataFilter & DataFilters::operator[](Size index) const { if (index >= filters_.size()) { throw Exception::IndexOverflow(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, index, filters_.size()); } return filters_[index]; } bool DataFilters::passes(const Feature & feature) const { if (!is_active_) { return true; } for (Size i = 0; i < filters_.size(); i++) { const DataFilters::DataFilter & filter = filters_[i]; if (filter.field == INTENSITY) { if (filter.op == GREATER_EQUAL && feature.getIntensity() < filter.value) { return false; } else if (filter.op == LESS_EQUAL && feature.getIntensity() > filter.value) { return false; } else if (filter.op == EQUAL && feature.getIntensity() != filter.value) { return false; } } else if (filter.field == QUALITY) { if (filter.op == GREATER_EQUAL && feature.getOverallQuality() < filter.value) { return false; } else if (filter.op == LESS_EQUAL && feature.getOverallQuality() > filter.value) { return false; } else if (filter.op == EQUAL && feature.getOverallQuality() != filter.value) { return false; } } else if (filter.field == CHARGE) { if (filter.op == EQUAL && feature.getCharge() != filter.value) { return false; } else if (filter.op == GREATER_EQUAL && feature.getCharge() < filter.value) { return false; } else if (filter.op == LESS_EQUAL && feature.getCharge() > filter.value) { return false; } } else if (filter.field == SIZE) { if (filter.op == EQUAL && feature.getSubordinates().size() != filter.value) { return false; } else if (filter.op == GREATER_EQUAL && feature.getSubordinates().size() < filter.value) { return false; } else if (filter.op == LESS_EQUAL && feature.getSubordinates().size() > filter.value) { return false; } } else if (filter.field == META_DATA) { const MetaInfoInterface & mii = static_cast<MetaInfoInterface>(feature); if (!metaPasses_(mii, filter, meta_indices_[i])) { return false; } } } return true; } bool DataFilters::passes(const ConsensusFeature & consensus_feature) const { if (!is_active_) { return true; } for (Size i = 0; i < filters_.size(); i++) { const DataFilters::DataFilter & filter = filters_[i]; if (filter.field == INTENSITY) { if (filter.op == GREATER_EQUAL && consensus_feature.getIntensity() < filter.value) { return false; } else if (filter.op == LESS_EQUAL && consensus_feature.getIntensity() > filter.value) { return false; } else if (filter.op == EQUAL && consensus_feature.getIntensity() != filter.value) { return false; } } else if (filter.field == QUALITY) { if (filter.op == GREATER_EQUAL && consensus_feature.getQuality() < filter.value) { return false; } else if (filter.op == LESS_EQUAL && consensus_feature.getQuality() > filter.value) { return false; } else if (filter.op == EQUAL && consensus_feature.getQuality() != filter.value) { return false; } } else if (filter.field == CHARGE) { if (filter.op == EQUAL && consensus_feature.getCharge() != filter.value) { return false; } else if (filter.op == GREATER_EQUAL && consensus_feature.getCharge() < filter.value) { return false; } else if (filter.op == LESS_EQUAL && consensus_feature.getCharge() > filter.value) { return false; } } else if (filter.field == SIZE) { if (filter.op == EQUAL && consensus_feature.size() != filter.value) { return false; } else if (filter.op == GREATER_EQUAL && consensus_feature.size() < filter.value) { return false; } else if (filter.op == LESS_EQUAL && consensus_feature.size() > filter.value) { return false; } } else if (filter.field == META_DATA) { const MetaInfoInterface & mii = static_cast<MetaInfoInterface>(consensus_feature); if (!metaPasses_(mii, filter, meta_indices_[i])) { return false; } } } return true; } bool DataFilters::metaPasses_(const MetaInfoInterface& meta_interface, const DataFilters::DataFilter& filter, Size index) const { if (!meta_interface.metaValueExists((UInt)index)) { return false; } else if (filter.op != EXISTS) { const DataValue& data_value = meta_interface.getMetaValue((UInt)index); if (!filter.value_is_numerical) { if (data_value.valueType() != DataValue::STRING_VALUE) { return false; } else { // for string values, equality is the only valid operation (besides "exists", see above) if (filter.op != EQUAL) { return false; } else if (filter.value_string != data_value.toString()) { return false; } } } else // value_is_numerical { if (data_value.valueType() == DataValue::STRING_VALUE || data_value.valueType() == DataValue::EMPTY_VALUE) { return false; } else { if (filter.op == EQUAL && (double)data_value != filter.value) { return false; } else if (filter.op == LESS_EQUAL && (double)data_value > filter.value) { return false; } else if (filter.op == GREATER_EQUAL && (double)data_value < filter.value) { return false; } } } } return true; } void DataFilters::setActive(bool is_active) { is_active_ = is_active; } } //Namespace
C++
3D
OpenMS/OpenMS
src/openms/source/PROCESSING/SPECTRAMERGING/SpectraMerger.cpp
.cpp
6,278
125
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Chris Bielow $ // $Authors: Chris Bielow, Andreas Bertsch $ // -------------------------------------------------------------------------- // #include <OpenMS/PROCESSING/SPECTRAMERGING/SpectraMerger.h> using namespace std; namespace OpenMS { SpectraMerger::SpectraMerger() : DefaultParamHandler("SpectraMerger") { // common defaults_.setValue("mz_binning_width", 5.0, "minimum m/z distance for two data points (profile data) or peaks (centroided data) to be considered distinct. Closer data points or peaks will be merged.", {"advanced"}); defaults_.setMinFloat("mz_binning_width", 0.0); defaults_.setValue("mz_binning_width_unit", "ppm", "Unit in which the distance between two data points or peaks is given.", {"advanced"}); defaults_.setValidStrings("mz_binning_width_unit", {"Da", "ppm"}); defaults_.setValue("sort_blocks", "RT_ascending", "Sort blocks by <?> before merging them (useful for precursor order)", {"advanced"}); defaults_.setValidStrings("sort_blocks", {"RT_ascending", "RT_descending"}); // Gaussian average defaults_.setValue("average_gaussian:spectrum_type", "automatic", "Spectrum type of the MS level to be averaged"); defaults_.setValidStrings("average_gaussian:spectrum_type", {"profile", "centroid", "automatic"}); defaults_ .setValue("average_gaussian:ms_level", 1, "If set to be 0, each MS level will be merged from 1 to max. Otherwise, average spectra of this level. All other spectra remain unchanged."); defaults_.setMinInt("average_gaussian:ms_level", 0); defaults_.setValue("average_gaussian:rt_FWHM", 5.0, "FWHM of Gauss curve in seconds to be averaged over."); defaults_.setMinFloat("average_gaussian:rt_FWHM", 0.0); defaults_.setMaxFloat("average_gaussian:rt_FWHM", 10e10); defaults_.setValue("average_gaussian:cutoff", 0.01, "Intensity cutoff for Gaussian. The Gaussian RT profile decreases from 1 at its apex to 0 at infinity. Spectra for which the intensity of the Gaussian drops below the cutoff do not contribute to the average.", {"advanced"}); defaults_.setMinFloat("average_gaussian:cutoff", 0.0); defaults_.setMaxFloat("average_gaussian:cutoff", 1.0); defaults_.setValue("average_gaussian:precursor_mass_tol", 0.0, "PPM mass tolerance for precursor mass. If set, MSn (n>2) spectra of precursor masses within the tolerance are averaged."); defaults_.setValue("average_gaussian:precursor_max_charge", 1, "Possible maximum precursor ion charge. Effective only when average_gaussian:precursor_mass_tol option is active."); defaults_.setMinFloat("average_gaussian:precursor_mass_tol", 0.0); defaults_.setMinInt("average_gaussian:precursor_max_charge", 1); // top-hat average defaults_.setValue("average_tophat:spectrum_type", "automatic", "Spectrum type of the MS level to be averaged"); defaults_.setValidStrings("average_tophat:spectrum_type", {"profile", "centroid", "automatic"}); defaults_ .setValue("average_tophat:ms_level", 1, "If set to be 0, each MS level will be merged from 1 to max. Otherwise, average spectra of this level. All other spectra remain unchanged."); defaults_.setMinInt("average_tophat:ms_level", 0); defaults_.setValue("average_tophat:rt_range", 5.0, "RT range to be averaged over, i.e. +/-(RT range)/2 from each spectrum."); defaults_.setMinFloat("average_tophat:rt_range", 0.0); defaults_.setMaxFloat("average_tophat:rt_range", 10e10); defaults_.setValue("average_tophat:rt_unit", "scans", "Unit for RT range."); defaults_.setValidStrings("average_tophat:rt_unit", {"scans", "seconds"}); // block merging defaults_.setValue("block_method:ms_levels", ListUtils::create<Int>("1"), "Merge spectra of this level. All spectra with other MS levels remain untouched."); defaults_.setMinInt("block_method:ms_levels", 1); defaults_.setValue("block_method:rt_block_size", 5, "Maximum number of scans to be summed up."); defaults_.setMinInt("block_method:rt_block_size", 1); defaults_.setValue("block_method:rt_max_length", 0.0, "Maximum RT size of the block in seconds (0.0 = no size restriction)."); defaults_.setMinFloat("block_method:rt_max_length", 0.0); defaults_.setMaxFloat("block_method:rt_max_length", 10e10); // same precursor MS/MS merging defaults_.setValue("precursor_method:mz_tolerance", 10e-5, "Max m/z distance of the precursor entries of two spectra to be merged in [Da]."); defaults_.setMinFloat("precursor_method:mz_tolerance", 0); defaults_.setValue("precursor_method:mass_tolerance", .0, "Max mass distance of the precursor entries of two spectra to be merged in [Da]. Active when set to a positive value."); defaults_.setMinFloat("precursor_method:mass_tolerance", 0); defaults_.setValue("precursor_method:rt_tolerance", 5.0, "Max RT distance of the precursor entries of two spectra to be merged in [s]."); defaults_.setMinFloat("precursor_method:rt_tolerance", 0); defaultsToParam_(); } SpectraMerger::SpectraMerger(const SpectraMerger & source) : DefaultParamHandler(source), ProgressLogger() //we probably want a new ProgressLogger when we copy { } SpectraMerger::~SpectraMerger() = default; SpectraMerger & SpectraMerger::operator=(const SpectraMerger & source) { if (this != &source) { DefaultParamHandler::operator=(source); } return *this; } }
C++
3D
OpenMS/OpenMS
src/openms/source/PROCESSING/SCALING/Normalizer.cpp
.cpp
1,450
58
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Mathias Walzer $ // $Authors: Andreas Bertsch $ // -------------------------------------------------------------------------- // #include <OpenMS/PROCESSING/SCALING/Normalizer.h> using namespace std; namespace OpenMS { Normalizer::Normalizer() : DefaultParamHandler("Normalizer") { defaults_.setValue("method", "to_one", "Normalize via dividing by TIC ('to_TIC') per spectrum (i.e. all peaks sum to 1) or normalize to max. intensity to one ('to_one') per spectrum."); defaults_.setValidStrings("method", {"to_one","to_TIC"}); defaultsToParam_(); } Normalizer::~Normalizer() = default; Normalizer::Normalizer(const Normalizer & source) : DefaultParamHandler(source) { } Normalizer & Normalizer::operator=(const Normalizer & source) { if (this != &source) { DefaultParamHandler::operator=(source); } return *this; } void Normalizer::filterPeakSpectrum(PeakSpectrum& spectrum) const { filterSpectrum(spectrum); } void Normalizer::filterPeakMap(PeakMap& exp) const { for (auto& spectrum : exp) { filterSpectrum(spectrum); } } void Normalizer::updateMembers_() { method_ = param_.getValue("method").toString(); } }
C++
3D
OpenMS/OpenMS
src/openms/source/PROCESSING/SCALING/SqrtScaler.cpp
.cpp
1,020
47
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Mathias Walzer $ // $Authors: $ // -------------------------------------------------------------------------- // #include <OpenMS/PROCESSING/SCALING/SqrtScaler.h> using namespace std; namespace OpenMS { SqrtScaler::SqrtScaler() : DefaultParamHandler("SqrtScaler") { } SqrtScaler::~SqrtScaler() = default; SqrtScaler::SqrtScaler(const SqrtScaler & source) = default; SqrtScaler & SqrtScaler::operator=(const SqrtScaler & source) { if (this != &source) { DefaultParamHandler::operator=(source); } return *this; } void SqrtScaler::filterPeakSpectrum(PeakSpectrum & spectrum) { filterSpectrum(spectrum); } void SqrtScaler::filterPeakMap(PeakMap & exp) { for (auto& spectrum : exp) { filterSpectrum(spectrum); } } }
C++
3D
OpenMS/OpenMS
src/openms/source/PROCESSING/SCALING/RankScaler.cpp
.cpp
1,019
46
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Mathias Walzer $ // $Authors: $ // -------------------------------------------------------------------------- // #include <OpenMS/PROCESSING/SCALING/RankScaler.h> using namespace std; namespace OpenMS { RankScaler::RankScaler() : DefaultParamHandler("RankScaler") { } RankScaler::~RankScaler() = default; RankScaler::RankScaler(const RankScaler & source) = default; RankScaler & RankScaler::operator=(const RankScaler & source) { if (this != &source) { DefaultParamHandler::operator=(source); } return *this; } void RankScaler::filterPeakSpectrum(PeakSpectrum & spectrum) { filterSpectrum(spectrum); } void RankScaler::filterPeakMap(PeakMap & exp) { for (auto& spectrum : exp) { filterSpectrum(spectrum); } } }
C++
3D
OpenMS/OpenMS
src/openms/source/PROCESSING/DEISOTOPING/Deisotoper.cpp
.cpp
19,690
546
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg $ // $Authors: Timo Sachsenberg $ // -------------------------------------------------------------------------- #include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/CoarseIsotopePatternGenerator.h> #include <OpenMS/CONCEPT/Constants.h> #include <OpenMS/PROCESSING/DEISOTOPING/Deisotoper.h> #include <OpenMS/PROCESSING/FILTERING/NLargest.h> #include <OpenMS/PROCESSING/FILTERING/ThresholdMower.h> #include <OpenMS/KERNEL/MSSpectrum.h> #include <OpenMS/KERNEL/StandardTypes.h> #include <OpenMS/MATH/MathFunctions.h> namespace OpenMS { // static void Deisotoper::deisotopeWithAveragineModel(MSSpectrum& spec, double fragment_tolerance, bool fragment_unit_ppm, int number_of_final_peaks, int min_charge, int max_charge, bool keep_only_deisotoped, unsigned int min_isopeaks, unsigned int max_isopeaks, bool make_single_charged, bool annotate_charge, bool annotate_iso_peak_count, bool add_up_intensity) { OPENMS_PRECONDITION(spec.isSorted(), "Spectrum must be sorted."); if ((fragment_unit_ppm && fragment_tolerance > 100) || (!fragment_unit_ppm && fragment_tolerance > 0.1)) { throw Exception::IllegalArgument( __FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Fragment tolerance must not be greater than 100 ppm or 0.1 Da"); } if (min_isopeaks < 2 || max_isopeaks < 2 || min_isopeaks > max_isopeaks) { throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Minimum/maximum number of isotopic peaks must be at least 2 (and min_isopeaks <= max_isopeaks)."); } if (spec.empty()) { return; } // remove 0 intensity peaks ThresholdMower threshold_mower_filter; threshold_mower_filter.filterPeakSpectrum(spec); // discard low-intensity peaks if (number_of_final_peaks > 0) { // only keep number_of_final_peaks highest peaks NLargest nlargest_filter = NLargest(number_of_final_peaks); nlargest_filter.filterPeakSpectrum(spec); spec.sortByPosition(); } // store additional information if specified Size charge_index(0); Size iso_peak_count_index(0); // reserve integer data array to store charge of peaks if (annotate_charge) { // expand to hold one additional integer data array to hold the charge spec.getIntegerDataArrays().resize(spec.getIntegerDataArrays().size() + 1); spec.getIntegerDataArrays().back().setName("charge"); charge_index = spec.getIntegerDataArrays().size() - 1; } // reserve integer data array to store number of isotopic peaks for each isotopic pattern if (annotate_iso_peak_count) { spec.getIntegerDataArrays().resize(spec.getIntegerDataArrays().size() + 1); spec.getIntegerDataArrays().back().setName("iso_peak_count"); iso_peak_count_index = spec.getIntegerDataArrays().size() - 1; } // during discovery phase, work on a constant reference (just to make sure we do not modify spec) const MSSpectrum& old_spectrum = spec; // determine charge seeds and extend them std::vector<Size> mono_isotopic_peak_charge(old_spectrum.size(), 0); std::vector<Int> features(old_spectrum.size(), -1); std::vector<double> mono_iso_peak_intensity(old_spectrum.size(), 0); std::vector<Size> iso_peak_count(old_spectrum.size(), 1); int feature_number = 0; std::vector<Size> extensions; // this should be a vector of vectors of length <= max_isopeaks to reflect multiple possible extensions, but for runtime, this is implemented as a flat vector std::vector<Size> clusters; // keeps track of the stored clusters sizes std::vector<Size> stored_cluster_size; std::vector<int> charges_of_extensions; const float averagine_check_threshold[7] = {0.0f, 0.0f, 0.05f, 0.1f, 0.2f, 0.4f, 0.6f}; bool has_precursor_data(false); double precursor_mass(0); if (old_spectrum.getPrecursors().size() == 1) { has_precursor_data = true; int precursor_charge = old_spectrum.getPrecursors()[0].getCharge(); precursor_mass = (old_spectrum.getPrecursors()[0].getMZ() * precursor_charge) - (Constants::PROTON_MASS_U * precursor_charge); } for (size_t current_peak = 0; current_peak != old_spectrum.size(); ++current_peak) { // only process peaks which are not already in a cluster. Would form clusters identical to the cluster they are assigned to, // excluding its first peak(s), because only peaks with higher mz are considered for cluster-formation. if (features[current_peak] != -1) continue; const double current_mz = old_spectrum[current_peak].getMZ(); const double current_intensity = old_spectrum[current_peak].getIntensity(); const double tolerance_dalton = fragment_unit_ppm ? Math::ppmToMass(fragment_tolerance, current_mz) : fragment_tolerance; clusters.clear(); stored_cluster_size.clear(); charges_of_extensions.clear(); for (int q = min_charge; q <= max_charge; ++q) // all charges are always considered -> order does not matter { // try to extend isotopes from mono-isotopic peak // do not bother testing charges q (and masses m) with: m/q > precursor_mass/q (or m > precursor_mass) if (has_precursor_data) { double current_theo_mass = (current_mz * q) - (Constants::PROTON_MASS_U * q); if (current_theo_mass > (precursor_mass + tolerance_dalton)) { continue; } } // fail early if you do not find a single extension const double expected_first_mz = current_mz + Constants::C13C12_MASSDIFF_U / static_cast<double>(q); Int p = old_spectrum.findNearest(expected_first_mz, tolerance_dalton); if (p == -1) // test for missing peak { continue; } bool found_first_extension = true; bool has_min_isopeaks = true; extensions.clear(); extensions.push_back(current_peak); // Save frequently used values for performance reasons std::vector<double> extensions_intensities = {current_intensity}; // generate averagine distribution for peptide mass corresponding to current mz and charge std::vector<double> distr = CoarseIsotopePatternGenerator::approximateIntensities(q * (current_mz - Constants::PROTON_MASS_U), max_isopeaks); // sum of intensities of both observed and generated peaks is needed for normalization double spec_total_intensity = current_intensity; double dist_total_intensity = distr[0]; for (UInt i = 1; i < max_isopeaks; ++i) { // find next extension (if not first; we found that already) if (found_first_extension) { found_first_extension = false; } else { const double expected_mz = current_mz + static_cast<double>(i) * Constants::C13C12_MASSDIFF_U / static_cast<double>(q); p = old_spectrum.findNearest(expected_mz, tolerance_dalton); if (p == -1)// test for missing peak { has_min_isopeaks = (i >= min_isopeaks); break; } } extensions_intensities.push_back(old_spectrum[p].getIntensity()); // compare to averagine distribution // update sums of intensities spec_total_intensity += extensions_intensities.back(); dist_total_intensity += distr[extensions.size()]; // compute KL divergence (Sum over all x: P(x) * log(P(x) / Q(x)); float KL = 0; for (unsigned int peak = 0; peak != extensions.size() + 1; ++peak) { // normalize spectrum intensities and averagine distribution intensities as this is a density measure and // thresholds were probably determined for distributions adding up to 1 double Px = extensions_intensities[peak] / spec_total_intensity; KL += Px * log(Px / (distr[peak] / dist_total_intensity)); } // choose threshold corresponding to cluster size float curr_threshold = (extensions.size() + 1 >= 6) ? averagine_check_threshold[6] : averagine_check_threshold[extensions.size() + 1]; // compare to threshold and stop extension if distribution does not fit well enough if (KL > curr_threshold) { has_min_isopeaks = (i >= min_isopeaks); break; } // if model check passed: extensions.push_back(p); if (annotate_iso_peak_count) { // with "+ 1" the monoisotopic peak is counted as well // current peak is the monoisotopic peak iso_peak_count[current_peak] = i + 1; } } // cluster has been formed if (has_min_isopeaks) { for (UInt i = 0; i != max_isopeaks; ++i) { clusters.push_back(i < extensions.size() ? extensions[i] : 0); } stored_cluster_size.push_back(extensions.size()); charges_of_extensions.push_back(q); } } // all charges tested, clusters complete // if current_peak is possible monoisotopic peak, pick the best of its clusters and annotate peaks with a feature number if (!clusters.empty()) { // pick cluster with largest size and highest charge (since all have the same monoisotopic peak) UInt best_idx = 0; if (stored_cluster_size.size() > 1)// more than one cluster was found { Size largest_size = 0; Int best_cluster_charge = min_charge; for (UInt i = 0; i != stored_cluster_size.size(); ++i) { if ((stored_cluster_size[i] > largest_size) || ((stored_cluster_size[i] == largest_size) && (charges_of_extensions[i] > best_cluster_charge))) { largest_size = stored_cluster_size[i]; best_cluster_charge = charges_of_extensions[i]; best_idx = i; } } } // save result mono_isotopic_peak_charge[current_peak] = charges_of_extensions[best_idx]; for (UInt i = 0; i != stored_cluster_size[best_idx]; ++i) { features[clusters[best_idx * max_isopeaks + i]] = feature_number; if (add_up_intensity) { mono_iso_peak_intensity[current_peak] += old_spectrum[clusters[best_idx * max_isopeaks + i]].getIntensity(); } } ++feature_number; } } // apply changes, i.e. select the indices which should survive std::vector<Size> select_idx; for (size_t i = 0; i != spec.size(); ++i) { Size z = mono_isotopic_peak_charge[i]; if (annotate_charge) { spec.getIntegerDataArrays()[charge_index].push_back((int)z); } if (annotate_iso_peak_count) { spec.getIntegerDataArrays()[iso_peak_count_index].push_back((int)iso_peak_count[i]); } if (!keep_only_deisotoped) { // keep all unassigned peaks if (features[i] < 0) // this peak is not part of a cluster { select_idx.push_back(i); continue; } } if (z != 0) // this is a monoisotopic peak { if (add_up_intensity) { spec[i].setIntensity(mono_iso_peak_intensity[i]); } // convert mono-isotopic peak with charge assigned by deisotoping if (make_single_charged) { spec[i].setMZ(spec[i].getMZ() * z - (z - 1) * Constants::PROTON_MASS_U); } select_idx.push_back(i); } } // properly subsets all datapoints (incl. dataArrays) spec.select(select_idx); if (make_single_charged) { spec.sortByPosition(); } return; } // static void Deisotoper::deisotopeAndSingleCharge(MSSpectrum& spec, double fragment_tolerance, bool fragment_unit_ppm, int min_charge, int max_charge, bool keep_only_deisotoped, unsigned int min_isopeaks, unsigned int max_isopeaks, bool make_single_charged, bool annotate_charge, bool annotate_iso_peak_count, bool use_decreasing_model, unsigned int start_intensity_check, bool add_up_intensity, bool annotate_features) { OPENMS_PRECONDITION(spec.isSorted(), "Spectrum must be sorted."); if ((fragment_unit_ppm && fragment_tolerance > 100) || (!fragment_unit_ppm && fragment_tolerance > 0.1)) { throw Exception::IllegalArgument( __FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Fragment tolerance must not be greater than 100 ppm or 0.1 Da"); } if (min_isopeaks < 2 || max_isopeaks < 2 || min_isopeaks > max_isopeaks) { throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Minimum/maximum number of isotopic peaks must be at least 2 (and min_isopeaks <= max_isopeaks)."); } if (spec.empty()) { return; } Size charge_index{}; Size iso_peak_count_index{}, feature_number_dataarray_index{}; // reserve integer data array to store charge of peaks if (annotate_charge) { // expand to hold one additional integer data array to hold the charge spec.getIntegerDataArrays().resize(spec.getIntegerDataArrays().size() + 1); spec.getIntegerDataArrays().back().setName("charge"); charge_index = spec.getIntegerDataArrays().size()-1; } // reserve integer data array to store number of isotopic peaks for each isotopic pattern if (annotate_iso_peak_count) { spec.getIntegerDataArrays().resize(spec.getIntegerDataArrays().size() + 1); spec.getIntegerDataArrays().back().setName("iso_peak_count"); iso_peak_count_index = spec.getIntegerDataArrays().size()-1; } if (annotate_features) { spec.getIntegerDataArrays().resize(spec.getIntegerDataArrays().size() + 1); spec.getIntegerDataArrays().back().setName("feature_number"); feature_number_dataarray_index = spec.getIntegerDataArrays().size() - 1; } // during discovery phase, work on a constant reference (just to make sure we do not modify spec) const MSSpectrum& old_spectrum = spec; // determine charge seeds and extend them std::vector<size_t> mono_isotopic_peak(old_spectrum.size(), 0); std::vector<int> features(old_spectrum.size(), -1); std::vector<double> mono_iso_peak_intensity(old_spectrum.size(), 0); std::vector<Size> iso_peak_count(old_spectrum.size(), 1); int feature_number = 0; std::vector<size_t> extensions; bool has_precursor_data(false); double precursor_mass(0); if (old_spectrum.getPrecursors().size() == 1) { has_precursor_data = true; int precursor_charge = old_spectrum.getPrecursors()[0].getCharge(); precursor_mass = (old_spectrum.getPrecursors()[0].getMZ() * precursor_charge) - (Constants::PROTON_MASS * precursor_charge); } for (size_t current_peak = 0; current_peak != old_spectrum.size(); ++current_peak) { const double current_mz = old_spectrum[current_peak].getMZ(); if (add_up_intensity) { mono_iso_peak_intensity[current_peak] = old_spectrum[current_peak].getIntensity(); } for (int q = max_charge; q >= min_charge; --q) // important: test charge hypothesis from high to low { // try to extend isotopes from mono-isotopic peak // if extension larger then min_isopeaks possible: // - save charge q in mono_isotopic_peak[] // - annotate_charge all isotopic peaks with feature number if (features[current_peak] == -1) // only process peaks which have no assigned feature number { bool has_min_isopeaks = true; const double tolerance_dalton = fragment_unit_ppm ? Math::ppmToMass(fragment_tolerance, current_mz) : fragment_tolerance; // do not bother testing charges q (and masses m) with: m/q > precursor_mass/q (or m > precursor_mass) if (has_precursor_data) { double current_theo_mass = (current_mz * q) - (Constants::PROTON_MASS * q); if (current_theo_mass > (precursor_mass + tolerance_dalton)) { continue; } } extensions.clear(); extensions.push_back(current_peak); for (unsigned int i = 1; i < max_isopeaks; ++i) { const double expected_mz = current_mz + static_cast<double>(i) * Constants::C13C12_MASSDIFF_U / static_cast<double>(q); const int p = old_spectrum.findNearest(expected_mz, tolerance_dalton); if (p == -1) // test for missing peak { has_min_isopeaks = (i >= min_isopeaks); break; } else { // Possible improvement: include proper averagine model filtering // for now start at the peak with i = start_intensity_check to test hypothesis // if start_intensity_check = 0 or 1, start checking by comparing monoisotopic and second isotopic peak // if start_intensity_check = 2, start checking by comparing second isotopic peak with the third, etc. // Note: this is a common approach used in several other search engines if (use_decreasing_model && (i >= start_intensity_check) && (old_spectrum[p].getIntensity() > old_spectrum[extensions.back()].getIntensity())) { has_min_isopeaks = (i >= min_isopeaks); break; } // averagine check passed or skipped extensions.push_back(p); if (annotate_iso_peak_count) { iso_peak_count[current_peak] = i + 1; // with "+ 1" the monoisotopic peak is counted as well } } } if (has_min_isopeaks) { mono_isotopic_peak[current_peak] = q; for (unsigned int i = 0; i != extensions.size(); ++i) { features[extensions[i]] = feature_number; // monoisotopic peak intensity is already set above, add up the other intensities here if (add_up_intensity && (i != 0)) { mono_iso_peak_intensity[current_peak] += old_spectrum[extensions[i]].getIntensity(); } } ++feature_number; } } } } if (annotate_features) { // assign feature indices without copy spec.getIntegerDataArrays()[feature_number_dataarray_index].std::vector<Int>::swap(features); } // apply changes, i.e. select the indices which should survive std::vector<Size> select_idx; for (size_t i = 0; i != spec.size(); ++i) { Size z = mono_isotopic_peak[i]; if (annotate_charge) { spec.getIntegerDataArrays()[charge_index].push_back((int)z); } if (annotate_iso_peak_count) { spec.getIntegerDataArrays()[iso_peak_count_index].push_back((int)iso_peak_count[i]); } if (add_up_intensity) { spec[i].setIntensity(mono_iso_peak_intensity[i]); } if (!keep_only_deisotoped) { // keep all unassigned peaks if (features[i] < 0) { select_idx.push_back(i); continue; } } if (z == 0) { continue; } // convert mono-isotopic peak with charge assigned by deisotoping if (make_single_charged) { spec[i].setMZ(spec[i].getMZ() * z - (z - 1) * Constants::PROTON_MASS_U); } select_idx.push_back(i); } // properly subsets all datapoints (incl. dataArrays) spec.select(select_idx); spec.sortByPosition(); return; } } // namespace
C++
3D
OpenMS/OpenMS
src/openms/source/PROCESSING/CENTROIDING/PeakPickerHiRes.cpp
.cpp
24,233
629
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Author: Erhan Kenar $ // $Maintainer: Timo Sachsenberg $ // -------------------------------------------------------------------------- // #include <OpenMS/PROCESSING/CENTROIDING/PeakPickerHiRes.h> #include <OpenMS/PROCESSING/NOISEESTIMATION/SignalToNoiseEstimatorMedian.h> #include <OpenMS/KERNEL/OnDiscMSExperiment.h> #include <OpenMS/KERNEL/MSChromatogram.h> #include <OpenMS/KERNEL/Mobilogram.h> #include <OpenMS/MATH/MISC/SplineBisection.h> #include <OpenMS/MATH/MISC/CubicSpline2d.h> #include <OpenMS/KERNEL/SpectrumHelper.h> using namespace std; namespace OpenMS { PeakPickerHiRes::PeakPickerHiRes() : DefaultParamHandler("PeakPickerHiRes"), ProgressLogger() { // set default parameter values defaults_.setValue("signal_to_noise", 0.0, "Minimal signal-to-noise ratio for a peak to be picked (0.0 disables SNT estimation!)"); defaults_.setMinFloat("signal_to_noise", 0.0); defaults_.setValue("spacing_difference_gap", 4.0, "The extension of a peak is stopped if the spacing between two subsequent data points exceeds 'spacing_difference_gap * min_spacing'. 'min_spacing' is the smaller of the two spacings from the peak apex to its two neighboring points. '0' to disable the constraint. Not applicable to chromatograms.", {"advanced"}); defaults_.setMinFloat("spacing_difference_gap", 0.0); defaults_.setValue("spacing_difference", 1.5, "Maximum allowed difference between points during peak extension, in multiples of the minimal difference between the peak apex and its two neighboring points. If this difference is exceeded a missing point is assumed (see parameter 'missing'). A higher value implies a less stringent peak definition, since individual signals within the peak are allowed to be further apart. '0' to disable the constraint. Not applicable to chromatograms.", {"advanced"}); defaults_.setMinFloat("spacing_difference", 0.0); defaults_.setValue("missing", 1, "Maximum number of missing points allowed when extending a peak to the left or to the right. A missing data point occurs if the spacing between two subsequent data points exceeds 'spacing_difference * min_spacing'. 'min_spacing' is the smaller of the two spacings from the peak apex to its two neighboring points. Not applicable to chromatograms.", {"advanced"}); defaults_.setMinInt("missing", 0); defaults_.setValue("ms_levels", ListUtils::create<Int>(""), "List of MS levels for which the peak picking is applied. If empty, auto mode is enabled, all peaks which aren't picked yet will get picked. Other scans are copied to the output without changes."); defaults_.setMinInt("ms_levels", 1); defaults_.setValue("report_FWHM", "false", "Add metadata for FWHM (as floatDataArray named 'FWHM' or 'FWHM_ppm', depending on param 'report_FWHM_unit') for each picked peak."); defaults_.setValidStrings("report_FWHM", {"true","false"}); defaults_.setValue("report_FWHM_unit", "relative", "Unit of FWHM. Either absolute in the unit of input, e.g. 'm/z' for spectra, or relative as ppm (only sensible for spectra, not chromatograms)."); defaults_.setValidStrings("report_FWHM_unit", {"relative","absolute"}); // parameters for STN estimator defaults_.insert("SignalToNoise:", SignalToNoiseEstimatorMedian< MSSpectrum >().getDefaults()); // write defaults into Param object param_ defaultsToParam_(); updateMembers_(); } PeakPickerHiRes::~PeakPickerHiRes() = default; void PeakPickerHiRes::pick(const MSSpectrum& input, MSSpectrum& output) const { std::vector<PeakBoundary> boundaries; pick(input, output, boundaries); } void PeakPickerHiRes::pick(const MSChromatogram& input, MSChromatogram& output) const { std::vector<PeakBoundary> boundaries; pick(input, output, boundaries); } void PeakPickerHiRes::pick(const Mobilogram& input, Mobilogram& output) const { std::vector<PeakBoundary> boundaries; pick(input, output, boundaries); } void PeakPickerHiRes::pick(const MSSpectrum& input, MSSpectrum& output, std::vector<PeakBoundary>& boundaries, bool check_spacings) const { // copy meta data of the input spectrum copySpectrumMeta(input, output); output.setType(SpectrumSettings::SpectrumType::CENTROID); int im_data_index = -1; if (input.containsIMData()) { // will throw if IM float data array is missing [[ maybe_unused ]] const auto [tmp_index, im_unit] = input.getIMData(); im_data_index = tmp_index; } pick_(input, output, boundaries, check_spacings, im_data_index); } void PeakPickerHiRes::pick(const MSChromatogram& input, MSChromatogram& output, std::vector<PeakBoundary>& boundaries, bool check_spacings) const { // copy meta data of the input chromatogram output.clear(true); output.ChromatogramSettings::operator=(input); output.MetaInfoInterface::operator=(input); output.setName(input.getName()); pick_(input, output, boundaries, check_spacings); } void PeakPickerHiRes::pick(const Mobilogram& input, Mobilogram& output, std::vector<PeakBoundary>& boundaries, bool check_spacings) const { // copy meta data of the input mobilogram output.clear(); output.setRT(input.getRT()); output.setDriftTimeUnit(input.getDriftTimeUnit()); pick_(input, output, boundaries, check_spacings); } template <typename ContainerType> void PeakPickerHiRes::pick_(const ContainerType& input, ContainerType& output, std::vector<PeakBoundary>& boundaries, bool check_spacings, int im_data_index) const { OPENMS_PRECONDITION( im_data_index < 0 || input.getFloatDataArrays()[im_data_index].size() == input.size(), "Ion Mobility array needs to have the same length as the m/z and intensity array.") bool has_im = (im_data_index >= 0); Size out_im_index = 0; if (has_im) { out_im_index = output.getFloatDataArrays().size(); output.getFloatDataArrays().resize(output.getFloatDataArrays().size() + 1); output.getFloatDataArrays()[out_im_index].setName( input.getFloatDataArrays()[im_data_index].getName() ); } Size out_fwhm_index = 0; if (report_FWHM_) { out_fwhm_index = output.getFloatDataArrays().size(); output.getFloatDataArrays().resize(output.getFloatDataArrays().size() + 1); output.getFloatDataArrays()[out_fwhm_index].setName( report_FWHM_as_ppm_ ? "FWHM_ppm" : "FWHM"); } // don't pick a spectrum with less than 5 data points if (input.size() < 5) { return; } // if both spacing constraints are disabled, don't check spacings at all: if ((spacing_difference_ == std::numeric_limits<double>::infinity()) && (spacing_difference_gap_ == std::numeric_limits<double>::infinity())) { check_spacings = false; } // signal-to-noise estimation SignalToNoiseEstimatorMedian< ContainerType > snt; snt.setParameters(param_.copy("SignalToNoise:", true)); if (signal_to_noise_ > 0.0) { snt.init(input); } // find local maxima in profile data for (Size i = 2; i < input.size() - 2; ++i) { double central_peak_mz = input[i].getPos(), central_peak_int = input[i].getIntensity(); double left_neighbor_mz = input[i - 1].getPos(), left_neighbor_int = input[i - 1].getIntensity(); double right_neighbor_mz = input[i + 1].getPos(), right_neighbor_int = input[i + 1].getIntensity(); // do not interpolate when the left or right support is a zero-data-point if (std::fabs(left_neighbor_int) < std::numeric_limits<double>::epsilon()) { continue; } if (std::fabs(right_neighbor_int) < std::numeric_limits<double>::epsilon()) { continue; } // MZ spacing sanity checks double left_to_central = 0.0, central_to_right = 0.0, min_spacing = 0.0; if (check_spacings) { left_to_central = central_peak_mz - left_neighbor_mz; central_to_right = right_neighbor_mz - central_peak_mz; min_spacing = (left_to_central < central_to_right) ? left_to_central : central_to_right; } double act_snt = 0.0, act_snt_l1 = 0.0, act_snt_r1 = 0.0; if (signal_to_noise_ > 0.0) { act_snt = snt.getSignalToNoise(i); act_snt_l1 = snt.getSignalToNoise(i - 1); act_snt_r1 = snt.getSignalToNoise(i + 1); } // look for peak cores meeting MZ and intensity/SNT criteria if ((central_peak_int > left_neighbor_int) && (central_peak_int > right_neighbor_int) && (act_snt >= signal_to_noise_) && (act_snt_l1 >= signal_to_noise_) && (act_snt_r1 >= signal_to_noise_) && (!check_spacings || ((left_to_central < spacing_difference_ * min_spacing) && (central_to_right < spacing_difference_ * min_spacing)))) { // special case: if a peak core is surrounded by more intense // satellite peaks (indicates oscillation rather than // real peaks) -> remove double act_snt_l2 = 0.0, act_snt_r2 = 0.0; if (signal_to_noise_ > 0.0) { act_snt_l2 = snt.getSignalToNoise(i - 2); act_snt_r2 = snt.getSignalToNoise(i + 2); } // checking signal-to-noise? if ((i > 1) && (i + 2 < input.size()) && (left_neighbor_int < input[i - 2].getIntensity()) && (right_neighbor_int < input[i + 2].getIntensity()) && (act_snt_l2 >= signal_to_noise_) && (act_snt_r2 >= signal_to_noise_) && (!check_spacings || ((left_neighbor_mz - input[i - 2].getPos() < spacing_difference_ * min_spacing) && (input[i + 2].getPos() - right_neighbor_mz < spacing_difference_ * min_spacing)))) { ++i; continue; } std::map<double, double> peak_raw_data; double weighted_im = 0; peak_raw_data[central_peak_mz] = central_peak_int; peak_raw_data[left_neighbor_mz] = left_neighbor_int; peak_raw_data[right_neighbor_mz] = right_neighbor_int; if (has_im) { weighted_im += input.getFloatDataArrays()[im_data_index][i] * input[i].getIntensity(); weighted_im += input.getFloatDataArrays()[im_data_index][i-1] * input[i-1].getIntensity(); weighted_im += input.getFloatDataArrays()[im_data_index][i+1] * input[i+1].getIntensity(); } // peak core found, now extend it // to the left Size k = 2; bool previous_zero_left(false); // no need to extend peak if previous intensity was zero Size missing_left(0); Size left_boundary(i - 1); // index of the left boundary for the spline interpolation while ((k <= i) && // prevent underflow (i - k + 1 > 0) && !previous_zero_left && (missing_left <= missing_) && (input[i - k].getIntensity() <= peak_raw_data.begin()->second) && (!check_spacings || (peak_raw_data.begin()->first - input[i - k].getPos() < spacing_difference_gap_ * min_spacing))) { double act_snt_lk = 0.0; if (signal_to_noise_ > 0.0) { act_snt_lk = snt.getSignalToNoise(i - k); } if ((act_snt_lk >= signal_to_noise_) && (!check_spacings || (peak_raw_data.begin()->first - input[i - k].getPos() < spacing_difference_ * min_spacing))) { peak_raw_data[input[i - k].getPos()] = input[i - k].getIntensity(); if (has_im) weighted_im += input.getFloatDataArrays()[im_data_index][i - k] * input[i - k].getIntensity(); } else { ++missing_left; if (missing_left <= missing_) { peak_raw_data[input[i - k].getPos()] = input[i - k].getIntensity(); if (has_im) weighted_im += input.getFloatDataArrays()[im_data_index][i - k] * input[i - k].getIntensity(); } } previous_zero_left = (input[i - k].getIntensity() == 0); left_boundary = i - k; ++k; } // to the right k = 2; bool previous_zero_right(false); // no need to extend peak if previous intensity was zero Size missing_right(0); Size right_boundary(i+1); // index of the right boundary for the spline interpolation while ((i + k < input.size()) && !previous_zero_right && (missing_right <= missing_) && (input[i + k].getIntensity() <= peak_raw_data.rbegin()->second) && (!check_spacings || (input[i + k].getPos() - peak_raw_data.rbegin()->first < spacing_difference_gap_ * min_spacing))) { double act_snt_rk = 0.0; if (signal_to_noise_ > 0.0) { act_snt_rk = snt.getSignalToNoise(i + k); } if ((act_snt_rk >= signal_to_noise_) && (!check_spacings || (input[i + k].getPos() - peak_raw_data.rbegin()->first < spacing_difference_ * min_spacing))) { peak_raw_data[input[i + k].getPos()] = input[i + k].getIntensity(); if (has_im) weighted_im += input.getFloatDataArrays()[im_data_index][i + k] * input[i + k].getIntensity(); } else { ++missing_right; if (missing_right <= missing_) { peak_raw_data[input[i + k].getPos()] = input[i + k].getIntensity(); if (has_im) weighted_im += input.getFloatDataArrays()[im_data_index][i + k] * input[i + k].getIntensity(); } } previous_zero_right = (input[i + k].getIntensity() == 0); right_boundary = i + k; ++k; } // skip if the minimal number of 3 points for fitting is not reached if (peak_raw_data.size() < 3) { continue; } CubicSpline2d peak_spline (peak_raw_data); // calculate maximum by evaluating the spline's 1st derivative // (bisection method) double max_peak_mz = central_peak_mz; double max_peak_int = central_peak_int; double threshold = 1e-6; OpenMS::Math::spline_bisection(peak_spline, left_neighbor_mz, right_neighbor_mz, max_peak_mz, max_peak_int, threshold); // // compute FWHM // if (report_FWHM_) { double fwhm_int = max_peak_int / 2.0; threshold = 0.01 * fwhm_int; double mz_mid, int_mid; // left: double mz_left = peak_raw_data.begin()->first; double mz_center = max_peak_mz; if (peak_spline.eval(mz_left) > fwhm_int) { // the spline ends before half max is reached -- take the leftmost point (probably an underestimation) mz_mid = mz_left; } else { do { mz_mid = mz_left / 2 + mz_center / 2; int_mid = peak_spline.eval(mz_mid); if (int_mid < fwhm_int) { mz_left = mz_mid; } else { mz_center = mz_mid; } } while (fabs(int_mid - fwhm_int) > threshold); } const double fwhm_left_mz = mz_mid; // right ... double mz_right = peak_raw_data.rbegin()->first; mz_center = max_peak_mz; if (peak_spline.eval(mz_right) > fwhm_int) { // the spline ends before half max is reached -- take the rightmost point (probably an underestimation) mz_mid = mz_right; } else { do { mz_mid = (mz_right + mz_center) / 2; int_mid = peak_spline.eval(mz_mid); if (int_mid < fwhm_int) { mz_right = mz_mid; } else { mz_center = mz_mid; } } while (fabs(int_mid - fwhm_int) > threshold); } const double fwhm_right_mz = mz_mid; const double fwhm_absolute = fwhm_right_mz - fwhm_left_mz; output.getFloatDataArrays()[out_fwhm_index].push_back( report_FWHM_as_ppm_ ? fwhm_absolute / max_peak_mz * 1e6 : fwhm_absolute); } // FWHM // compute the intensity-weighted mean ion mobility if (has_im) { double total_intensity(0); for (const auto& t : peak_raw_data) {total_intensity += t.second;} output.getFloatDataArrays()[out_im_index].push_back(weighted_im / total_intensity); } // save picked peak into output spectrum typename ContainerType::PeakType peak; PeakBoundary peak_boundary; peak.setPos(max_peak_mz); peak.setIntensity(max_peak_int); peak_boundary.mz_min = input[left_boundary].getPos(); peak_boundary.mz_max = input[right_boundary].getPos(); output.push_back(peak); boundaries.push_back(peak_boundary); // jump over profile data points that have been considered already i += k - 1; } } return; } void PeakPickerHiRes::pickExperiment(const PeakMap& input, PeakMap& output, const bool check_spectrum_type) const { std::vector<std::vector<PeakBoundary> > boundaries_spec; std::vector<std::vector<PeakBoundary> > boundaries_chrom; pickExperiment(input, output, boundaries_spec, boundaries_chrom, check_spectrum_type); } struct SpectraPickInfo { uint32_t picked{0}; ///< number of picked spectra uint32_t total{0}; ///< overall number of spectra }; void PeakPickerHiRes::pickExperiment(const PeakMap& input, PeakMap& output, std::vector<std::vector<PeakBoundary> >& boundaries_spec, std::vector<std::vector<PeakBoundary> >& boundaries_chrom, const bool check_spectrum_type) const { // make sure that output is clear output.clear(true); // copy experimental settings static_cast<ExperimentalSettings &>(output) = input; // resize output with respect to input output.resize(input.size()); Size progress = 0; startProgress(0, input.size() + input.getChromatograms().size(), "picking peaks"); // MSLevel -> stats map<int, SpectraPickInfo> pick_info; if (input.getNrSpectra() > 0) { for (Size scan_idx = 0; scan_idx != input.size(); ++scan_idx) { bool was_picked{false}; // auto mode if (ms_levels_.empty()) { SpectrumSettings::SpectrumType spectrum_type = input[scan_idx].getType(true); // uses meta-info and inspects data if needed if (spectrum_type == SpectrumSettings::SpectrumType::CENTROID) { output[scan_idx] = input[scan_idx]; } else { std::vector<PeakBoundary> boundaries_s; // peak boundaries of a single spectrum pick(input[scan_idx], output[scan_idx], boundaries_s); was_picked = true; boundaries_spec.push_back(std::move(boundaries_s)); } } // manual mode else if (!ListUtils::contains(ms_levels_, input[scan_idx].getMSLevel())) { output[scan_idx] = input[scan_idx]; } else { std::vector<PeakBoundary> boundaries_s; // peak boundaries of a single spectrum SpectrumSettings::SpectrumType spectrum_type = input[scan_idx].getType(true); // uses meta-info and inspects data if needed if (spectrum_type == SpectrumSettings::SpectrumType::CENTROID && check_spectrum_type) { throw OpenMS::Exception::IllegalArgument(__FILE__, __LINE__, __FUNCTION__, "Error: Centroided data provided but profile spectra expected."); } pick(input[scan_idx], output[scan_idx], boundaries_s); was_picked = true; boundaries_spec.push_back(std::move(boundaries_s)); } pick_info[input[scan_idx].getMSLevel()].picked += was_picked; ++pick_info[input[scan_idx].getMSLevel()].total; setProgress(++progress); } } for (Size i = 0; i < input.getChromatograms().size(); ++i) { MSChromatogram chromatogram; std::vector<PeakBoundary> boundaries_c; // peak boundaries of a single chromatogram pick(input.getChromatograms()[i], chromatogram, boundaries_c); output.addChromatogram(chromatogram); boundaries_chrom.push_back(boundaries_c); setProgress(++progress); } endProgress(); OPENMS_LOG_INFO << "#Spectra that needed to and could be picked by MS-level:\n"; for (const auto& info : pick_info) { OPENMS_LOG_INFO << " MS-level " << info.first << ": " << info.second.picked << " / " << info.second.total << "\n"; } return; } void PeakPickerHiRes::pickExperiment(/* const */ OnDiscMSExperiment& input, PeakMap& output, const bool check_spectrum_type) const { // make sure that output is clear output.clear(true); // copy experimental settings static_cast<ExperimentalSettings &>(output) = *input.getExperimentalSettings(); Size progress = 0; startProgress(0, input.size() + input.getNrChromatograms(), "picking peaks"); // resize output with respect to input output.resize(input.size()); if (input.getNrSpectra() > 0) { for (Size scan_idx = 0; scan_idx != input.size(); ++scan_idx) { if (ms_levels_.empty()) //auto mode { MSSpectrum s = input[scan_idx]; s.sortByPosition(); // determine type of spectral data (profile or centroided) SpectrumSettings::SpectrumType spectrumType = s.getType(); if (spectrumType == SpectrumSettings::SpectrumType::CENTROID) { output[scan_idx] = input[scan_idx]; } else { pick(s, output[scan_idx]); } } else if (!ListUtils::contains(ms_levels_, input[scan_idx].getMSLevel())) // manual mode { output[scan_idx] = input[scan_idx]; } else { MSSpectrum s = input[scan_idx]; s.sortByPosition(); // determine type of spectral data (profile or centroided) SpectrumSettings::SpectrumType spectrum_type = s.getType(); if (spectrum_type == SpectrumSettings::SpectrumType::CENTROID && check_spectrum_type) { throw OpenMS::Exception::IllegalArgument(__FILE__, __LINE__, __FUNCTION__, "Error: Centroided data provided but profile spectra expected."); } pick(s, output[scan_idx]); } setProgress(++progress); } } for (Size i = 0; i < input.getNrChromatograms(); ++i) { MSChromatogram chromatogram; pick(input.getChromatogram(i), chromatogram); output.addChromatogram(chromatogram); setProgress(++progress); } endProgress(); return; } void PeakPickerHiRes::updateMembers_() { signal_to_noise_ = param_.getValue("signal_to_noise"); spacing_difference_gap_ = param_.getValue("spacing_difference_gap"); if (spacing_difference_gap_ == 0.0) { spacing_difference_gap_ = std::numeric_limits<double>::infinity(); } spacing_difference_ = param_.getValue("spacing_difference"); if (spacing_difference_ == 0.0) { spacing_difference_ = std::numeric_limits<double>::infinity(); } missing_ = param_.getValue("missing"); ms_levels_ = getParameters().getValue("ms_levels"); report_FWHM_ = getParameters().getValue("report_FWHM").toBool(); report_FWHM_as_ppm_ = getParameters().getValue("report_FWHM_unit")!="absolute"; } }
C++
3D
OpenMS/OpenMS
src/openms/source/PROCESSING/CENTROIDING/PeakPickerIM.cpp
.cpp
53,059
1,291
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Author: Timo Sachsenberg, Mohammed Alhigaylan $ // $Maintainer: Timo Sachsenberg $ // -------------------------------------------------------------------------- #include <OpenMS/PROCESSING/CENTROIDING/PeakPickerIM.h> #include <OpenMS/PROCESSING/CENTROIDING/PeakPickerHiRes.h> #include <OpenMS/PROCESSING/SMOOTHING/GaussFilter.h> #include <OpenMS/PROCESSING/SMOOTHING/SavitzkyGolayFilter.h> #include <OpenMS/KERNEL/MSSpectrum.h> #include <OpenMS/CONCEPT/Constants.h> #include <OpenMS/CONCEPT/Exception.h> #include <OpenMS/DATASTRUCTURES/Param.h> #include <OpenMS/PROCESSING/RESAMPLING/LinearResamplerAlign.h> #include <OpenMS/MATH/MISC/CubicSpline2d.h> #include <OpenMS/MATH/MISC/SplineBisection.h> #include <OpenMS/IONMOBILITY/IMDataConverter.h> #include <OpenMS/CONCEPT/LogStream.h> #include <OpenMS/FEATUREFINDER/MassTraceDetection.h> #include <OpenMS/FEATUREFINDER/ElutionPeakDetection.h> #include <iostream> #include <deque> #include <algorithm> #include <limits> #include <numeric> #include <unordered_map> #include <utility> using namespace std; #ifdef DEBUG_PICKER #include <OpenMS/FORMAT/MzMLFile.h> #endif namespace OpenMS { double PeakPickerIM::computeOptimalSamplingRate(const vector<MSSpectrum>& spectra) { vector<double> mz_diffs; Size upper_peak_limit = 0; for (size_t s = 0; s < spectra.size(); ++s) { upper_peak_limit += spectra[s].size(); } mz_diffs.reserve(upper_peak_limit); for (size_t s = 0; s < spectra.size(); ++s) { const MSSpectrum& spectrum = spectra[s]; // The spectrum could have multiple ion mobility peaks at the same x position. // Sum the peak intensity MSSpectrum summed_trace; sumFrame_(spectrum, summed_trace, sum_tolerance_im_, false); if (summed_trace.size() < 20) { #ifdef DEBUG_PICKER OPENMS_LOG_DEBUG << "Skipping trace " << s << " because it has too few points (" << summed_trace.size() << ").\n"; #endif continue; // skip this spectrum } for (size_t i = 1; i < summed_trace.size(); ++i) { double diff = summed_trace[i].getMZ() - summed_trace[i - 1].getMZ(); mz_diffs.push_back(diff); } if (mz_diffs.size() > 1000) break; // 1000 diffs should be enough to estimate sampling } // If we found no valid m/z differences (traces too short) if (mz_diffs.empty()) { #ifdef DEBUG_PICKER OPENMS_LOG_DEBUG << "Warning: No valid m/z differences found in any spectra. Using default sampling rate of 0.01\n"; #endif return 0.01; // Fallback value } // Sort the differences to compute the 75th percentile threshold // This is needed in case there is a gap in the mobilogram. i+1 peak will skew the computed // sampling rate. std::sort(mz_diffs.begin(), mz_diffs.end()); size_t percentile_index = static_cast<size_t>(mz_diffs.size() * 0.75); double threshold = mz_diffs[percentile_index]; #ifdef DEBUG_PICKER OPENMS_LOG_DEBUG << "75th percentile of position differences is: " << threshold << '\n'; #endif // Filter out large differences (keep diffs <= threshold) vector<double> small_mz_diffs; for (double diff : mz_diffs) { if (diff <= threshold) { small_mz_diffs.push_back(diff); } } if (small_mz_diffs.empty()) { OPENMS_LOG_WARN << "Warning: No valid small m/z differences found after filtering. Using default sampling rate of 0.01\n"; return 0.01; } // Compute the mode std::unordered_map<double, int> freq_map; for (double diff : small_mz_diffs) { freq_map[diff]++; } double mode_sampling_rate = small_mz_diffs.front(); int max_count = 0; for (const auto& [diff, count] : freq_map) { if (count > max_count) { mode_sampling_rate = diff; max_count = count; } } #ifdef DEBUG_PICKER OPENMS_LOG_DEBUG << "Computed optimal sampling rate: " << mode_sampling_rate << '\n'; #endif return mode_sampling_rate; } // Function to compute the lower and upper m/z bounds based on ppm tolerance std::pair<double, double> PeakPickerIM::ppmBounds(double mz, double ppm) { ppm = ppm / 1e6; double delta_mz = (ppm * mz) / 2.0; double low = mz - delta_mz; double high = mz + delta_mz; return std::make_pair(low, high); } // PRECONDITION: input_spectrum is sorted by m/z // This function sums peaks if they are nearly identical // OpenMS represents TimsTOF data in MSSpectrum() objects as one-array. // Example: There could be multiple 500.0 m/z peaks with different ion mobility values. // Example2: extracted mobilogram could have multiple 0.88 1/k values from different m/z peaks. // Peak picking (such as HiRes) will not work properly if there are multiple y measurements at a given x position. // Note: does not clear the output_spectrum but add peaks to it (required for fast padding) void PeakPickerIM::sumFrame_(const MSSpectrum& input_spectrum, MSSpectrum& output_spectrum, double tolerance, bool use_ppm) { OPENMS_PRECONDITION(input_spectrum.isSorted(), "Spectrum must be sorted by m/z before summing peaks."); if (input_spectrum.empty()) return; double current_mz = input_spectrum[0].getMZ(); double current_intensity = input_spectrum[0].getIntensity(); for (Size i = 1; i < input_spectrum.size(); ++i) { double next_mz = input_spectrum[i].getMZ(); double next_intensity = input_spectrum[i].getIntensity(); double delta_mz = std::abs(next_mz - current_mz); bool within_tolerance = use_ppm ? ((delta_mz / current_mz) * 1e6 <= tolerance) : (delta_mz <= tolerance); if (within_tolerance) { current_intensity += next_intensity; } else // new peak is outside of tolerance window { output_spectrum.emplace_back(current_mz, current_intensity); current_mz = next_mz; current_intensity = next_intensity; } } output_spectrum.emplace_back(current_mz, current_intensity); } // We use peak FWHM (from PeakPickerHiRes) to extract ion mobility traces. // Given a picked m/z peak, we write a temporary MSSpectrum() object with ion mobility measurements // in place of m/z in Peak1D object. This facilitates peak picking in the ion mobility dimension. // To enable recomputing of m/z center after ion mobility peak picking, we tack raw m/z peak values // in FloatDataArrays(). vector<MSSpectrum> PeakPickerIM::extractIonMobilityTraces( const MSSpectrum& picked_spectrum, const MSSpectrum& raw_spectrum) { const auto& float_data_arrays = picked_spectrum.getFloatDataArrays(); // Find FWHM array in picked_spectrum const MSSpectrum::FloatDataArray* fwhm_array = nullptr; for (const auto& array : float_data_arrays) { if (array.getName() == "FWHM_ppm") { fwhm_array = &array; break; } } if (!fwhm_array) { OPENMS_LOG_WARN << "FWHM data array not found!\n"; return {}; } if (fwhm_array->size() != picked_spectrum.size()) { OPENMS_LOG_WARN << "Size mismatch between FWHM array and picked peaks!\n"; return {}; } // Get the Ion Mobility array index from raw_spectrum if (!raw_spectrum.containsIMData()) { OPENMS_LOG_WARN << "No ion mobility data found in raw_spectrum.\n"; return {}; } const auto [im_data_index, im_unit] = raw_spectrum.getIMData(); const auto& ion_mobility_array = raw_spectrum.getFloatDataArrays()[im_data_index]; // Vector of MSSpectra for each picked m/z peak (each spectrum is a mobilogram trace) vector<MSSpectrum> mobility_traces; for (size_t i = 0; i < picked_spectrum.size(); ++i) { double picked_mz = picked_spectrum[i].getMZ(); double fwhm_ppm = (*fwhm_array)[i]; auto bounds = ppmBounds(picked_mz, fwhm_ppm); double lower_bound = bounds.first; double upper_bound = bounds.second; SignedSize center_idx = raw_spectrum.findNearest(picked_mz); if (center_idx == -1) { OPENMS_LOG_WARN << "No raw peaks found near picked m/z: " << picked_mz << '\n'; mobility_traces.emplace_back(); continue; } MSSpectrum trace_spectrum; // A single mobilogram trace // Prepare FloatDataArray to store raw m/z values MSSpectrum::FloatDataArray raw_mz_array; raw_mz_array.setName("raw_mz"); // Expand left SignedSize left_idx = center_idx; while (left_idx >= 0 && raw_spectrum[left_idx].getMZ() >= lower_bound) { trace_spectrum.emplace_back(ion_mobility_array[left_idx], raw_spectrum[left_idx].getIntensity()); // IM stored as m/z temporarily // Store the raw m/z raw_mz_array.push_back(raw_spectrum[left_idx].getMZ()); --left_idx; } // Expand right SignedSize right_idx = center_idx + 1; while (right_idx < static_cast<SignedSize>(raw_spectrum.size()) && raw_spectrum[right_idx].getMZ() <= upper_bound) { trace_spectrum.emplace_back(ion_mobility_array[right_idx], raw_spectrum[right_idx].getIntensity()); // Store the raw m/z data in floatDataArrays() raw_mz_array.push_back(raw_spectrum[right_idx].getMZ()); ++right_idx; } // Attach the raw m/z array to trace_spectrum auto& trace_float_arrays = trace_spectrum.getFloatDataArrays(); trace_float_arrays.push_back(std::move(raw_mz_array)); // Sort the trace_spectrum by ion mobility (m/z), while keeping raw m/z aligned trace_spectrum.sortByPosition(); // Note: having the float arrays attached ensures that sorting is performed on everything mobility_traces.push_back(std::move(trace_spectrum)); } return mobility_traces; } // Function to compute m/z centers from mobilogram_traces and picked_traces MSSpectrum PeakPickerIM::computeCentroids_(const vector<MSSpectrum>& mobilogram_traces, const vector<MSSpectrum>& picked_traces) { MSSpectrum centroided_frame; // Create float data arrays to house ion mobility data and peaks FWHM MSSpectrum::FloatDataArray ion_mobility_array; ion_mobility_array.setName(Constants::UserParam::ION_MOBILITY_CENTROID); MSSpectrum::FloatDataArray ion_mobility_fwhm; ion_mobility_fwhm.setName("IM FWHM"); MSSpectrum::FloatDataArray mz_fwhm_array; mz_fwhm_array.setName("MZ FWHM"); #ifdef DEBUG_PICKER OPENMS_LOG_DEBUG << "picked_traces.size(): " << picked_traces.size() << '\n'; #endif // Loop over picked traces and their corresponding raw mobilogram traces for (size_t i = 0; i < picked_traces.size(); ++i) { // std::cout << "Looping through picked_trace that has .. " << picked_traces[i].size() << '\n'; const MSSpectrum& picked_trace = picked_traces[i]; const MSSpectrum& raw_trace = mobilogram_traces[i]; const auto& picked_float_arrays = picked_trace.getFloatDataArrays(); if (picked_float_arrays.empty()) { OPENMS_LOG_WARN << "No IM FWHM array found for picked_trace " << i << "!\n"; continue; } // Assuming the first FloatDataArray holds the ion mobility peak FWHM values const auto& fwhm_array = picked_float_arrays[0]; if (fwhm_array.size() != picked_trace.size()) { OPENMS_LOG_WARN << "FWHM array size mismatch with picked_trace size!\n"; continue; } // Get the FloatDataArrays from raw_trace (assumed to hold the raw m/z values) const auto& raw_float_arrays = raw_trace.getFloatDataArrays(); if (raw_float_arrays.empty()) { OPENMS_LOG_WARN << "No raw m/z peaks found for raw_trace " << i << "!\n"; continue; } // Assume the first array holds the raw m/z values const auto& raw_mz_values = raw_float_arrays[0]; if (raw_mz_values.size() != raw_trace.size()) { OPENMS_LOG_WARN << "raw_mz_values size mismatch with raw_trace size!\n"; continue; } #ifdef DEBUG_PICKER OPENMS_LOG_DEBUG << "\n--- Processing picked_trace " << i << " ---\n"; #endif // Create reusable objects outside the loop to reduce memory allocations MSSpectrum raw_peaks_within_bounds; MSSpectrum raw_mz_peaks; vector<double> mz_values; vector<double> intensity_values; vector<size_t> indices; vector<double> sorted_mz; vector<double> sorted_intensity; // Iterate through picked peaks in this trace for (Size j = 0; j < picked_trace.size(); ++j) { double centroid_im = picked_trace[j].getMZ(); // Ion mobility centroid (stored as m/z) double fwhm = fwhm_array[j]; double im_lower = centroid_im - (fwhm / 2.0); double im_upper = centroid_im + (fwhm / 2.0); #ifdef DEBUG_PICKER OPENMS_LOG_DEBUG << "Picked peak " << j << " IM centroid: " << centroid_im << " ion mobility FWHM: " << fwhm << " --> IM bounds: [" << im_lower << ", " << im_upper << "]\n"; #endif // Use findNearest() to get the index of the closest peak in the raw mobilogram trace SignedSize center_idx = raw_trace.findNearest(centroid_im); if (center_idx == -1) { OPENMS_LOG_WARN << "Could not find nearest peak to centroid_im in raw_trace!\n"; continue; } // Clear the spectrum for reuse raw_peaks_within_bounds.clear(true); // --- Expand Left --- SignedSize left_idx = center_idx; while (left_idx >= 0 && raw_trace[left_idx].getMZ() >= im_lower) { Peak1D new_peak; new_peak.setMZ(raw_mz_values[left_idx]); // m/z from FloatDataArray new_peak.setIntensity(raw_trace[left_idx].getIntensity()); // intensity from raw_trace raw_peaks_within_bounds.push_back(new_peak); --left_idx; } // --- Expand Right --- SignedSize right_idx = center_idx + 1; while (right_idx < static_cast<SignedSize>(raw_trace.size()) && raw_trace[right_idx].getMZ() <= im_upper) { Peak1D new_peak; new_peak.setMZ(raw_mz_values[right_idx]); new_peak.setIntensity(raw_trace[right_idx].getIntensity()); raw_peaks_within_bounds.push_back(new_peak); ++right_idx; } #ifdef DEBUG_PICKER OPENMS_LOG_DEBUG << "Picked IM peak " << j << ": collected " << raw_peaks_within_bounds.size() << " raw m/z points between IM [" << im_lower << ", " << im_upper << "]\n"; #endif // If we only retrieved one raw peak, pass it over to centroided_frame as is // Resampling and smoothing the raw data distorts the intensity values. // We recompute the m/z peak maxima and intensity using spline if (raw_peaks_within_bounds.size() == 1) { const Peak1D& single_peak = raw_peaks_within_bounds.front(); // Add it directly to centroided_frame centroided_frame.push_back(single_peak); // Push corresponding ion mobility and FWHM arrays ion_mobility_array.push_back(centroid_im); ion_mobility_fwhm.push_back(fwhm); mz_fwhm_array.push_back(0.0); #ifdef DEBUG_PICKER OPENMS_LOG_DEBUG << "[INFO] Only one raw peak found. Added directly to centroided_frame. m/z: " << single_peak.getMZ() << " intensity: " << single_peak.getIntensity() << '\n'; #endif // Skip the rest of the loop and move on to the next picked_trace peak continue; } // Sort by m/z for sumFrame_() which requires sorted input. // Data is typically unsorted because peaks were collected by walking through IM-sorted // data (expand-left/right), and m/z has no correlation with IM order within FWHM window. raw_peaks_within_bounds.sortByPosition(); // Clear the spectrum for reuse raw_mz_peaks.clear(true); sumFrame_(raw_peaks_within_bounds, raw_mz_peaks, 0.1, true); if (raw_mz_peaks.empty()) { OPENMS_LOG_DEBUG << "No data in raw_mz_peaks for picked IM peak " << j << "!\n"; continue; } // Summing peaks could result in spectrum.size() == 1 which causes error. // in this case, simply sum the intensity values if (raw_mz_peaks.size() == 1) { centroided_frame.push_back(raw_mz_peaks[0]); ion_mobility_array.push_back(centroid_im); ion_mobility_fwhm.push_back(fwhm); mz_fwhm_array.push_back(0.0); #ifdef DEBUG_PICKER const Peak1D& single_peak = raw_mz_peaks[0]; OPENMS_LOG_DEBUG << "[INFO] sumFrame_ reduced peaks to a single entry. Added directly to centroided_frame. m/z: " << single_peak.getMZ() << " intensity: " << single_peak.getIntensity() << '\n'; #endif continue; } // Clear and reuse vectors for spline data mz_values.clear(); intensity_values.clear(); // Reserve space for efficiency mz_values.reserve(raw_mz_peaks.size()); intensity_values.reserve(raw_mz_peaks.size()); // Initialize sorting flag bool is_sorted = true; // Populate the vectors and check if sorted at the same time for (size_t i = 0; i < raw_mz_peaks.size(); ++i) { double current_mz = raw_mz_peaks[i].getMZ(); double current_intensity = raw_mz_peaks[i].getIntensity(); // Check if still sorted (compare with previous value if not the first element) if (i > 0 && current_mz < mz_values.back()) { is_sorted = false; } mz_values.push_back(current_mz); intensity_values.push_back(current_intensity); } // Sort vectors if needed (safety check - data should already be sorted from sumFrame_) // CubicSpline2d requires sorted x-coordinates if (!is_sorted) { // Reuse indices vector indices.resize(mz_values.size()); for (size_t i = 0; i < indices.size(); ++i) { indices[i] = i; } // Sort indices based on m/z values std::sort(indices.begin(), indices.end(), [&mz_values](size_t i1, size_t i2) { return mz_values[i1] < mz_values[i2]; }); // Reuse sorted vectors sorted_mz.resize(mz_values.size()); sorted_intensity.resize(intensity_values.size()); // Reorder both vectors using the sorted indices for (size_t i = 0; i < indices.size(); ++i) { sorted_mz[i] = mz_values[indices[i]]; sorted_intensity[i] = intensity_values[indices[i]]; } // Replace the original vectors with the sorted ones mz_values = std::move(sorted_mz); intensity_values = std::move(sorted_intensity); } // Initialize spline with the two vectors CubicSpline2d spline(mz_values, intensity_values); // Define boundaries const double left_bound = mz_values.front(); const double right_bound = mz_values.back(); // Find maximum via spline bisection double apex_mz = (left_bound + right_bound) / 2.0; double apex_intensity = 0.0; const double max_search_threshold = 1e-6; Math::spline_bisection(spline, left_bound, right_bound, apex_mz, apex_intensity, max_search_threshold); #ifdef DEBUG_PICKER OPENMS_LOG_DEBUG << "Apex m/z: " << apex_mz << '\n'; OPENMS_LOG_DEBUG << "Apex intensity: " << apex_intensity << '\n'; #endif // FWHM calculation (same binary search as before) double half_height = apex_intensity / 2.0; const double fwhm_search_threshold = 0.01 * half_height; // ---- Left side search ---- double mz_left = left_bound; double mz_center = apex_mz; double int_mid = 0.0; double mz_mid = mz_left; if (spline.eval(mz_left) > half_height) { mz_mid = mz_left; } else { do { mz_mid = (mz_left + mz_center) / 2.0; int_mid = spline.eval(mz_mid); if (int_mid < half_height) { mz_left = mz_mid; } else { mz_center = mz_mid; } } while (std::fabs(int_mid - half_height) > fwhm_search_threshold); } double fwhm_left_mz = mz_mid; // ---- Right side search ---- double mz_right = right_bound; mz_center = apex_mz; if (spline.eval(mz_right) > half_height) { mz_mid = mz_right; } else { do { mz_mid = (mz_right + mz_center) / 2.0; int_mid = spline.eval(mz_mid); if (int_mid < half_height) { mz_right = mz_mid; } else { mz_center = mz_mid; } } while (std::fabs(int_mid - half_height) > fwhm_search_threshold); } double fwhm_right_mz = mz_mid; // ---- FWHM result ---- double mz_fwhm = fwhm_right_mz - fwhm_left_mz; #ifdef DEBUG_PICKER OPENMS_LOG_DEBUG << "Left m/z at half height: " << fwhm_left_mz << '\n'; OPENMS_LOG_DEBUG << "Right m/z at half height: " << fwhm_right_mz << '\n'; OPENMS_LOG_DEBUG << "m/z FWHM: " << mz_fwhm << '\n'; #endif centroided_frame.emplace_back(apex_mz, apex_intensity); ion_mobility_array.push_back(centroid_im); ion_mobility_fwhm.push_back(fwhm); mz_fwhm_array.push_back(mz_fwhm); } #ifdef DEBUG_PICKER OPENMS_LOG_DEBUG << "--- Finished processing picked_trace " << i << " ---\n\n"; #endif } auto& centroided_frame_fda = centroided_frame.getFloatDataArrays(); centroided_frame_fda.push_back(std::move(ion_mobility_array)); centroided_frame_fda.push_back(std::move(ion_mobility_fwhm)); centroided_frame_fda.push_back(std::move(mz_fwhm_array)); centroided_frame.sortByPosition(); #ifdef DEBUG_PICKER OPENMS_LOG_DEBUG << "Peaks in centroided frame: " << centroided_frame.size() << '\n'; OPENMS_LOG_DEBUG << "Printing centroided_frame inside ComputerCenters function \n"; for (const auto& peak : centroided_frame) { OPENMS_LOG_DEBUG << "m/z: " << peak.getMZ() << ", intensity: " << peak.getIntensity() << '\n'; } #endif return centroided_frame; } void removeAllFloatDataArraysExcept(OpenMS::MSSpectrum& spectrum, const String& keep_name) { auto& float_arrays = spectrum.getFloatDataArrays(); // Use remove_if to move all elements to remove to the end auto new_end = std::remove_if(float_arrays.begin(), float_arrays.end(), [&keep_name](const MSSpectrum::FloatDataArray& array) { return array.getName() != keep_name; // Remove if NOT the one we want to keep }); // Erase the removed elements float_arrays.erase(new_end, float_arrays.end()); } PeakPickerIM::PeakPickerIM() : DefaultParamHandler("PeakPickerIM") { // --- PickIMTraces parameters --- defaults_.setValue("pickIMTraces:sum_tolerance_mz", 1.0, "Tolerance for summing adjacent m/z peaks (ppm)"); defaults_.setValue("pickIMTraces:sum_tolerance_im", 0.0006,"Tolerance for summing adjacent ion mobility peaks (1/k0)"); defaults_.setValue("pickIMTraces:gauss_ppm_tolerance", 5.0, "Gaussian smoothing m/z tolerance in ppm"); defaults_.setValue("pickIMTraces:sgolay_frame_length", 5, "Savitzky-Golay smoothing frame length"); defaults_.setValue("pickIMTraces:sgolay_polynomial_order", 3, "Savitzky-Golay smoothing polynomial order"); // --- PickIMCluster parameters --- defaults_.setValue("pickIMCluster:ppm_tolerance_cluster", 50.0, "m/z tolerance in ppm for clustering"); defaults_.setValue("pickIMCluster:im_tolerance_cluster", 0.1, "Ion mobility tolerance in 1/k for clustering"); // --- PickIMElutionProfiles parameters --- defaults_.setValue("pickIMElutionProfiles:ppm_tolerance_elution", 50.0, "Mass trace m/z tolerance in ppm"); defaultsToParam_(); // copies defaults_ into param_ updateMembers_(); // caches into member variables } void PeakPickerIM::updateMembers_() { sum_tolerance_mz_ = (double)param_.getValue("pickIMTraces:sum_tolerance_mz"); sum_tolerance_im_ = (double)param_.getValue("pickIMTraces:sum_tolerance_im"); gauss_ppm_tolerance_ = (double)param_.getValue("pickIMTraces:gauss_ppm_tolerance"); sgolay_frame_length_ = (int)param_.getValue("pickIMTraces:sgolay_frame_length"); sgolay_polynomial_order_= (int)param_.getValue("pickIMTraces:sgolay_polynomial_order"); ppm_tolerance_cluster_ = (double)param_.getValue("pickIMCluster:ppm_tolerance_cluster"); im_tolerance_cluster_ = (double)param_.getValue("pickIMCluster:im_tolerance_cluster"); ppm_tolerance_elution_ = (double)param_.getValue("pickIMElutionProfiles:ppm_tolerance_elution"); } namespace { /** * @brief Helper function to validate that a spectrum contains IM data in the correct format for peak picking * * @param[in] spectrum The spectrum to validate * @return true if the spectrum should be processed (has concatenated IM data) * @return false if the spectrum should be skipped (no IM data) * @throws Exception::InvalidValue if the data is already centroided, UNKNOWN, or unhandled format */ bool validateIMFormatForPicking(const MSSpectrum& spectrum) { IMFormat format = IMTypes::determineIMFormat(spectrum); switch (format) { case IMFormat::NONE: return false; // no IM data - skip silently case IMFormat::CENTROIDED: throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Ion mobility data is already centroided. PeakPickerIM expects raw (concatenated) IM data. " "Re-picking already centroided data is not supported.", String(NamesOfIMFormat[(size_t)format])); case IMFormat::UNKNOWN: throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "IMFormat set to UNKNOWN after determineIMFormat. This should never happen.", String(NamesOfIMFormat[(size_t)format])); case IMFormat::CONCATENATED: OPENMS_LOG_DEBUG << "Processing concatenated IM data.\n"; return true; // continue processing default: throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Unhandled IMFormat after determineIMFormat. This should never happen.", String(NamesOfIMFormat[(size_t)format])); } } } void PeakPickerIM::pickIMTraces(MSSpectrum& spectrum) { // Validate IM format - returns false if we should skip processing if (!validateIMFormatForPicking(spectrum)) { return; } // Spectrum is in CONCATENATED IM format. Now sort by m/z to prepare for m/z peak picking spectrum.sortByPosition(); // ************************************************* PART I ***************************************************** // ------------------------------------------ mass-to-charge peak picking ------------------------------------- // ------------------------------------------ Step a1: Sum m/z peaks ------------------------------------------ // project all timsTOF peaks into the m/z axis using sumFrame_ // The ppm tolerance is a dynamic way of testing m/z floats being almost identical. The raw intensity is summed. MSSpectrum summed_spectrum; sumFrame_(spectrum, summed_spectrum, sum_tolerance_mz_, true); #ifdef DEBUG_PICKER OPENMS_LOG_DEBUG << "Spectrum after sumFrame_ has " << summed_spectrum.size() << " peaks.\n"; #endif // ------------------------------------------ step 2a: smooth ------------------------------------------ // Apply gaussian smoothing to the peaks projected into the m/z axis. This facilitates peak picking // in the m/z dimension and subseqent mobilogram extraction for each picked m/z peak. #ifdef DEBUG_PICKER OPENMS_LOG_DEBUG << "Applying Gaussian smoothing...\n"; #endif GaussFilter gauss_filter; Param gauss_params; gauss_params.setValue("ppm_tolerance", gauss_ppm_tolerance_); gauss_params.setValue("use_ppm_tolerance", "true"); gauss_filter.setParameters(gauss_params); gauss_filter.filter(summed_spectrum); #ifdef DEBUG_PICKER OPENMS_LOG_DEBUG << "Spectrum after Gaussian smoothing has " << summed_spectrum.size() << " peaks.\n"; for (const auto& peak : summed_spectrum) { OPENMS_LOG_DEBUG << "m/z: " << peak.getMZ() << ", intensity: " << peak.getIntensity() << '\n'; } #endif // ------------------------------------------ step 3a: m/z Peak Picking ------------------------------------------ // Pick peaks in the m/z axis and toggle reporting peak width at half max (FWHM) // we will use the FWHM of each picked m/z peak to extract mobilograms. PeakPickerHiRes picker_mz; Param picker_mz_p; picker_mz_p.setValue("signal_to_noise", 0.0); picker_mz_p.setValue("report_FWHM", "true"); picker_mz_p.setValue("report_FWHM_unit", "relative"); picker_mz.setParameters(picker_mz_p); MSSpectrum picked_spectrum; picker_mz.pick(summed_spectrum, picked_spectrum); #ifdef DEBUG_PICKER OPENMS_LOG_DEBUG << "Size of picked spectrum: " << picked_spectrum.size() << '\n'; #endif if (picked_spectrum.empty()) { OPENMS_LOG_WARN << "No m/z peaks picked. Returning empty spectrum.\n"; spectrum.clear(true); return; } // ------------------------------------------ step 4a: Extraction mobilograms ------------------------------------------ // Using m/z peaks FWHM, we iteratively extract ion mobility traces (mobilograms) from the raw spectrum. // To rescue weak signal in the extracted mobilograms, we use linear resampling. // For linear resampling, it is recommended to use a sampling rate equal or higher than the raw sampling rate. // We dynamically determine the raw sampling rate from well-populated extracted mobilograms // (currently we have this hard-coded as +20 raw peaks in a mobilogram to be considered well-populated). auto mobilogram_traces = extractIonMobilityTraces(picked_spectrum, spectrum); // Compute optimal sampling rate from the native spacing of mobilogram data points double sampling_rate = computeOptimalSamplingRate(mobilogram_traces); Param resampler_param; resampler_param.setValue("spacing", sampling_rate); resampler_param.setValue("ppm", "false"); #ifdef DEBUG_PICKER OPENMS_LOG_DEBUG << "Using sampling rate... : " << sampling_rate << '\n'; #endif #ifdef DEBUG_PICKER for (size_t i = 0; i < mobilogram_traces.size(); ++i) { OPENMS_LOG_DEBUG << "Trace " << i << " contains " << mobilogram_traces[i].size() << " points in ion mobility space.\n"; } #endif // ************************************************* PART II ***************************************************** // ------------------------------------------ Ion mobility peak picking ------------------------------------------ // ------------------------------------------ part 1b: sum ion mobility peaks ------------------------------------ // An extract ion mobilogram can have two peaks with identicial 1/k value and cuase issues in the peak picking steps. // Example: if raw sampling rate is 0.0012 1/k -- then ion mobility peak 0.8800 1/k and 0.8806 1/k should be combined. // Use 0.0006 1/k as default. This parameter may need to change depending on ion mobility ramp tamp // (it is currently optimized for 100 ms ramp time) // prepare picked ion mobility objects (we are internally using MSSpectrum object for downstram peak picking inputs). vector<MSSpectrum> picked_traces; // Remove empty traces that can occur when no raw peaks are found within the FWHM window // of a picked m/z peak during extractIonMobilityTraces() mobilogram_traces.erase( std::remove_if(mobilogram_traces.begin(), mobilogram_traces.end(), [](const auto& trace) { return trace.empty(); }), mobilogram_traces.end()); for (size_t i = 0; i < mobilogram_traces.size(); ++i) { MSSpectrum& trace = mobilogram_traces[i]; #ifdef DEBUG_PICKER OPENMS_LOG_DEBUG << "\n--- Processing Trace " << i << " ---\n"; OPENMS_LOG_DEBUG << "Original trace has " << trace.size() << " peaks.\n"; #endif MSSpectrum summed_trace; summed_trace.reserve(trace.size() + 1); summed_trace.emplace_back(-1.0, -1.0); sumFrame_(trace, summed_trace, sum_tolerance_im_, false); #ifdef DEBUG_PICKER OPENMS_LOG_DEBUG << "Trace after sumFrame_ has " << summed_trace.size() << " peaks.\n"; #endif // ------------------------------------------ part 2b: smooth and resample -------------------------------- // Prepare mobilograms for SGolay smoothing. // To avoid edge effect, we will pad the edges with (sgolay_frame_length_ -1 / 2.0) points. double im_start = summed_trace[1].getMZ(); double im_end = summed_trace.back().getMZ(); #ifdef DEBUG_PICKER OPENMS_LOG_DEBUG << "Original summed trace ion mobility range: [" << im_start << ", " << im_end << "]\n"; #endif int padding_points = static_cast<int>(std::ceil((sgolay_frame_length_ - 1) / 2.0)); Peak1D front_padding; front_padding.setMZ(im_start - padding_points * sampling_rate); front_padding.setIntensity(0.0); summed_trace[0] = front_padding; Peak1D back_padding; back_padding.setMZ(im_end + padding_points * sampling_rate); back_padding.setIntensity(0.0); summed_trace.push_back(back_padding); #ifdef DEBUG_PICKER OPENMS_LOG_DEBUG << "Padded summed trace im range: [" << summed_trace.front().getMZ() << ", " << summed_trace.back().getMZ() << "]\n"; #endif // linear resample to rescue weak signal LinearResamplerAlign lin_resampler; lin_resampler.setParameters(resampler_param); lin_resampler.raster(summed_trace); #ifdef DEBUG_PICKER OPENMS_LOG_DEBUG << "Size of resampled trace: " << summed_trace.size() << " peaks.\n"; for (const auto& peak : summed_trace) { OPENMS_LOG_DEBUG << "m/z: " << peak.getMZ() << ", intensity: " << peak.getIntensity() << '\n'; } #endif // SGolay smooth prior to peak picking SavitzkyGolayFilter sgolay_filter; Param sgolay_params; sgolay_params.setValue("frame_length", sgolay_frame_length_); sgolay_params.setValue("polynomial_order", sgolay_polynomial_order_); sgolay_filter.setParameters(sgolay_params); sgolay_filter.filter(summed_trace); #ifdef DEBUG_PICKER OPENMS_LOG_DEBUG << "Trace after Savitzky-Golay smoothing has " << summed_trace.size() << " peaks.\n"; for (const auto& peak : summed_trace) { OPENMS_LOG_DEBUG << "m/z: " << peak.getMZ() << ", intensity: " << peak.getIntensity() << '\n'; } #endif // ------------------------------------------ part 3b: im peak picking -------------------------------- // apply PeakPickerHiRes to pick ion mobility peaks. // PeakPickerHiRes can be applied to chromatograms. We reasoned the same set of parameters ideal for // chromatograms is also applicable for mobilograms. // Each raw mobilogram contains a float data array with raw m/z values. // We will use the ion mobility peak FWHM to define min/max ion mobility boundary // and recompute the m/z center based on the ion mobility peak. PeakPickerHiRes picker_im; Param picker_im_p; picker_im_p.setValue("signal_to_noise", 0.0); picker_im_p.setValue("spacing_difference_gap", 0.0); picker_im_p.setValue("spacing_difference", 0.0); picker_im_p.setValue("missing", 0); picker_im_p.setValue("report_FWHM", "true"); picker_im_p.setValue("report_FWHM_unit", "absolute"); picker_im.setParameters(picker_im_p); MSSpectrum picked_trace; picker_im.pick(summed_trace, picked_trace); picked_traces.push_back(std::move(picked_trace)); #ifdef DEBUG_PICKER OPENMS_LOG_DEBUG << "--- Finished Processing Trace " << i << " ---\n\n"; #endif } // Recompute m/z centers and output centroided frame MSSpectrum centroided_frame = computeCentroids_(mobilogram_traces, picked_traces); #ifdef DEBUG_PICKER OPENMS_LOG_DEBUG << "--- Centroided frame has " << centroided_frame.size() << " --- peaks.\n"; #endif // Copy only SpectrumSettings from the input into the centroided result static_cast<SpectrumSettings&>(centroided_frame) = static_cast<const SpectrumSettings&>(spectrum); centroided_frame.setMSLevel(spectrum.getMSLevel()); centroided_frame.setName(spectrum.getName()); centroided_frame.setRT(spectrum.getRT()); removeAllFloatDataArraysExcept(centroided_frame, Constants::UserParam::ION_MOBILITY_CENTROID); centroided_frame.setIMFormat(IMFormat::CENTROIDED); spectrum = std::move(centroided_frame); #ifdef DEBUG_PICKER // Print peaks for debugging OPENMS_LOG_DEBUG << "--- Spectrum final output object has .. " << spectrum.size() << " --- peaks.\n"; for (const auto& peak : spectrum) { OPENMS_LOG_DEBUG << "m/z: " << peak.getMZ() << ", intensity: " << peak.getIntensity() << '\n'; } #endif } void PeakPickerIM::pickIMCluster(OpenMS::MSSpectrum& spectrum) const { if (spectrum.empty()) return; // Validate IM format - returns false if we should skip processing if (!validateIMFormatForPicking(spectrum)) { return; } // Get IM data array if (!spectrum.containsIMData()) { OPENMS_LOG_WARN << "No ion mobility data found in spectrum.\n"; return; } const auto [im_data_index, im_unit] = spectrum.getIMData(); auto& im_data = spectrum.getFloatDataArrays()[im_data_index]; struct Point { double mz; double im; float intensity; OpenMS::Size original_index; Point(double mz_val, double im_val, float int_val, OpenMS::Size idx) : mz(mz_val), im(im_val), intensity(int_val), original_index(idx) {} }; // Convert peaks to Points (same as before) std::vector<Point> points; points.reserve(spectrum.size()); for (OpenMS::Size i = 0; i < spectrum.size(); ++i) { const auto& peak = spectrum[i]; points.emplace_back(peak.getMZ(), im_data[i], peak.getIntensity(), i); } // --- Setup for Clustering --- // 1. Create m/z-sorted indices (needed for cluster expansion) std::vector<OpenMS::Size> mz_sorted_indices(points.size()); std::iota(mz_sorted_indices.begin(), mz_sorted_indices.end(), 0); std::sort(mz_sorted_indices.begin(), mz_sorted_indices.end(), [&](OpenMS::Size a, OpenMS::Size b) { return points[a].mz < points[b].mz; }); // Create reverse lookup: original_index -> mz_sorted_position (needed for cluster expansion) std::vector<OpenMS::Size> original_to_sorted_pos(points.size()); for (OpenMS::Size i = 0; i < points.size(); ++i) { original_to_sorted_pos[mz_sorted_indices[i]] = i; } // 2. ***OPTIMIZATION: Create intensity-sorted indices for seed picking*** std::vector<OpenMS::Size> intensity_sorted_indices(points.size()); std::iota(intensity_sorted_indices.begin(), intensity_sorted_indices.end(), 0); std::sort(intensity_sorted_indices.begin(), intensity_sorted_indices.end(), [&](OpenMS::Size a, OpenMS::Size b) { // Sort DESCENDING by intensity return points[a].intensity > points[b].intensity; }); // 3. Keep track of used points std::vector<bool> used(points.size(), false); OpenMS::Size num_used = 0; // Store results temporarily std::vector<Point> averaged_points; averaged_points.reserve(points.size()); double ppm_factor = ppm_tolerance_cluster_ * 1e-6; // --- Main Clustering Loop --- // Iterate through peaks in descending order of intensity to find seeds for (OpenMS::Size current_intensity_rank = 0; current_intensity_rank < points.size(); ++current_intensity_rank) { // 4. Get the original index of the next potential seed (highest intensity first) OpenMS::Size seed_original_idx = intensity_sorted_indices[current_intensity_rank]; // 5. Check if this peak has already been used (part of a previous cluster) if (used[seed_original_idx]) { continue; // Skip to the next highest intensity peak } // --- Found an unused seed, start clustering --- // 6. Initialize the cluster with the seed std::vector<OpenMS::Size> current_cluster_indices; current_cluster_indices.push_back(seed_original_idx); used[seed_original_idx] = true; num_used++; double cluster_mz_min = points[seed_original_idx].mz; double cluster_mz_max = points[seed_original_idx].mz; double cluster_im_min = points[seed_original_idx].im; double cluster_im_max = points[seed_original_idx].im; // 7. Expand the cluster using m/z sorted neighbors (same logic as before) OpenMS::Size seed_sorted_pos = original_to_sorted_pos[seed_original_idx]; OpenMS::SignedSize left_idx = static_cast<OpenMS::SignedSize>(seed_sorted_pos) - 1; OpenMS::SignedSize right_idx = static_cast<OpenMS::SignedSize>(seed_sorted_pos) + 1; bool changed = true; while (changed) { changed = false; // Check left neighbor while (left_idx >= 0) { OpenMS::Size candidate_original_idx = mz_sorted_indices[left_idx]; if (!used[candidate_original_idx]) { const auto& candidate_point = points[candidate_original_idx]; double potential_mz_min = std::min(cluster_mz_min, candidate_point.mz); double potential_mz_max = std::max(cluster_mz_max, candidate_point.mz); // Fixed: Use max for the max value double potential_im_min = std::min(cluster_im_min, candidate_point.im); double potential_im_max = std::max(cluster_im_max, candidate_point.im); // Fixed m/z tolerance calculation bool mz_ok = (potential_mz_max - potential_mz_min) <= (potential_mz_min * ppm_factor); bool im_ok = (potential_im_max - potential_im_min) <= im_tolerance_cluster_; if (mz_ok && im_ok) { current_cluster_indices.push_back(candidate_original_idx); used[candidate_original_idx] = true; num_used++; cluster_mz_min = potential_mz_min; cluster_mz_max = potential_mz_max; // Fixed: Update max value cluster_im_min = potential_im_min; cluster_im_max = potential_im_max; left_idx--; changed = true; break; // Added point, break inner while to re-evaluate from outer changed loop } else { left_idx = -1; // Stop checking left this round break; } } else { left_idx--; // Skip used point } } // Check right neighbor while (right_idx < static_cast<OpenMS::SignedSize>(points.size())) { OpenMS::Size candidate_original_idx = mz_sorted_indices[right_idx]; if (!used[candidate_original_idx]) { const auto& candidate_point = points[candidate_original_idx]; double potential_mz_min = std::min(cluster_mz_min, candidate_point.mz); // Fixed: Use min double potential_mz_max = std::max(cluster_mz_max, candidate_point.mz); double potential_im_min = std::min(cluster_im_min, candidate_point.im); double potential_im_max = std::max(cluster_im_max, candidate_point.im); // Fixed m/z tolerance calculation bool mz_ok = (potential_mz_max - potential_mz_min) <= (potential_mz_min * ppm_factor); bool im_ok = (potential_im_max - potential_im_min) <= im_tolerance_cluster_; if (mz_ok && im_ok) { current_cluster_indices.push_back(candidate_original_idx); used[candidate_original_idx] = true; num_used++; cluster_mz_min = potential_mz_min; // Fixed: Update min value cluster_mz_max = potential_mz_max; cluster_im_min = potential_im_min; cluster_im_max = potential_im_max; right_idx++; changed = true; break; // Added point, break inner while to re-evaluate from outer changed loop } else { right_idx = points.size(); // Stop checking right this round break; } } else { right_idx++; // Skip used point } } } // End cluster expansion (while changed) // 8. Finalize the current cluster (same logic as before) if (!current_cluster_indices.empty()) { double sum_intensity = 0.0; double sum_mz_intensity = 0.0; double sum_im_intensity = 0.0; for (OpenMS::Size original_idx : current_cluster_indices) { const auto& p = points[original_idx]; sum_intensity += p.intensity; sum_mz_intensity += p.mz * p.intensity; sum_im_intensity += p.im * p.intensity; } if (sum_intensity > std::numeric_limits<double>::epsilon()) { averaged_points.emplace_back( sum_mz_intensity / sum_intensity, sum_im_intensity / sum_intensity, static_cast<float>(sum_intensity), 0 // Original index is meaningless for averaged points ); } } // Optimization: Check if all points are processed if (num_used == points.size()) { break; // Exit the main loop early } } // End main loop (for intensity_sorted_indices) // 9. Update spectrum (same logic as before) spectrum.resize(averaged_points.size()); spectrum.shrink_to_fit(); im_data.resize(averaged_points.size()); im_data.shrink_to_fit(); for (size_t i = 0; i != averaged_points.size(); ++i) { const auto& p = averaged_points[i]; spectrum[i].setMZ(p.mz); spectrum[i].setIntensity(p.intensity); im_data[i] = p.im; } spectrum.sortByPosition(); spectrum.updateRanges(); // ensure the output IM array is updated spectrum.getFloatDataArrays()[im_data_index].setName(Constants::UserParam::ION_MOBILITY_CENTROID); spectrum.setIMFormat(IMFormat::CENTROIDED); removeAllFloatDataArraysExcept(spectrum, Constants::UserParam::ION_MOBILITY_CENTROID); } // End of pickIMCluster function void PeakPickerIM::pickIMElutionProfiles(MSSpectrum& input) const { if (input.empty()) return; // Validate IM format - returns false if we should skip processing if (!validateIMFormatForPicking(input)) { return; } // Get IM data array if (!input.containsIMData()) { OPENMS_LOG_WARN << "No ion mobility data found in spectrum.\n"; return; } const auto [im_data_index, im_unit] = input.getIMData(); auto& im_data = input.getFloatDataArrays()[im_data_index]; // convert to MSExperiment and set drift time as RT MSExperiment frame_as_spectra = IMDataConverter::reshapeIMFrameToMany(input); for (auto& s : frame_as_spectra) { s.setRT(s.getDriftTime()); s.setDriftTime(-1); s.setMSLevel(1); } #ifdef DEBUG_IM_PICKER // write out IM frame as RT/MZ for debugging purposes to test algorithm that yet don't support the IM dimension MzMLFile().store("debug" + String(input.getRT()) + ".mzML", frame_as_spectra); #endif if (frame_as_spectra.size() <= 3 ) return; // detect mass traces in IM frame MassTraceDetection mte; Param param = mte.getParameters(); // disable most filter criteria param.setValue("min_trace_length", -1.0); param.setValue("max_trace_length", -1.0); param.setValue("noise_threshold_int", 0.1); // only ignore 0 peaks param.setValue("chrom_peak_snr", 0.0); param.setValue("reestimate_mt_sd", "false"); param.setValue("mass_error_ppm", ppm_tolerance_elution_); param.setValue("trace_termination_criterion", "outlier"); param.setValue("trace_termination_outlier", 1); mte.setLogType(ProgressLogger::NONE); mte.setParameters(param); vector<MassTrace> output_mt; mte.run(frame_as_spectra, output_mt); ElutionPeakDetection epd; param = epd.getParameters(); param.setValue("chrom_fwhm", 0.01); param.setValue("chrom_peak_snr", 0.0); param.setValue("width_filtering", "off"); param.setValue("min_fwhm", -1.0); param.setValue("max_fwhm", 1e6); param.setValue("masstrace_snr_filtering", "false"); epd.setParameters(param); std::vector<MassTrace> split_mtraces; epd.detectPeaks(output_mt, split_mtraces); output_mt.clear(); // copy mass traces centroids back to peaks input.resize(split_mtraces.size()); input.shrink_to_fit(); im_data.resize(split_mtraces.size()); im_data.shrink_to_fit(); for (Size i = 0; i < split_mtraces.size(); ++i) { const MassTrace& mt = split_mtraces[i]; input[i].setMZ(mt.getCentroidMZ()); // Use computeIntensitySum() instead of getIntensity() because getIntensity() depends on // FWHM indices which may not be set for short traces (returns 0 if fwhm_start_idx_ == fwhm_end_idx_ == 0) input[i].setIntensity(mt.computeIntensitySum()); im_data[i] = mt.getCentroidRT(); // IM } input.sortByPosition(); input.updateRanges(); // ensure the output im name is updated input.getFloatDataArrays()[im_data_index].setName(Constants::UserParam::ION_MOBILITY_CENTROID); input.setIMFormat(IMFormat::CENTROIDED); removeAllFloatDataArraysExcept(input, Constants::UserParam::ION_MOBILITY_CENTROID); } } // namespace OpenMS
C++
3D
OpenMS/OpenMS
src/openms/source/PROCESSING/CENTROIDING/PeakPickerIterative.cpp
.cpp
455
17
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Hannes Roest $ // $Authors: Hannes Roest $ // -------------------------------------------------------------------------- #include <OpenMS/PROCESSING/CENTROIDING/PeakPickerIterative.h> using namespace std; namespace OpenMS { }
C++
3D
OpenMS/OpenMS
src/openms/source/PROCESSING/BASELINE/MorphologicalFilter.cpp
.cpp
363
14
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg $ // $Authors: $ // -------------------------------------------------------------------------- // namespace OpenMS { }
C++
3D
OpenMS/OpenMS
src/openms/source/PROCESSING/SMOOTHING/GaussFilter.cpp
.cpp
7,699
214
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg $ // $Authors: Eva Lange $ // -------------------------------------------------------------------------- #include <OpenMS/PROCESSING/SMOOTHING/GaussFilter.h> #include <OpenMS/CONCEPT/LogStream.h> #include <OpenMS/KERNEL/Mobilogram.h> #include <OpenMS/KERNEL/MSExperiment.h> #include <cmath> namespace OpenMS { GaussFilter::GaussFilter() : ProgressLogger(), DefaultParamHandler("GaussFilter"), spacing_(0.01) { //Parameter settings defaults_.setValue("gaussian_width", 0.2, "Use a gaussian filter width which has approximately the same width as your mass peaks (FWHM in m/z)."); defaults_.setMinFloat("gaussian_width", 0.0); defaults_.setValue("ppm_tolerance", 10.0, "Gaussian width, depending on the m/z position.\nThe higher the value, the wider the peak and therefore the wider the gaussian."); defaults_.setMinFloat("ppm_tolerance", 0.0); defaults_.setValue("use_ppm_tolerance", "false", "If true, instead of the gaussian_width value, the ppm_tolerance is used. The gaussian is calculated in each step anew, so " "this is much slower."); defaults_.setValidStrings("use_ppm_tolerance", {"true","false"}); defaults_.setValue("write_log_messages", "false", "true: Warn if no signal was found by the Gauss filter algorithm."); defaults_.setValidStrings("write_log_messages", {"true","false"}); defaultsToParam_(); } void GaussFilter::updateMembers_() { gauss_algo_.initialize( (double)param_.getValue("gaussian_width"), spacing_, (double)param_.getValue("ppm_tolerance"), param_.getValue("use_ppm_tolerance").toBool()); write_log_messages_ = param_.getValue("write_log_messages").toBool(); } void GaussFilter::filter(MSSpectrum & spectrum) { // make sure the right data type is set spectrum.setType(SpectrumSettings::SpectrumType::PROFILE); bool found_signal = false; const Size data_size = spectrum.size(); std::vector<double> mz_in(data_size), int_in(data_size), mz_out(data_size), int_out(data_size); // copy spectrum to container for (Size p = 0; p < spectrum.size(); ++p) { mz_in[p] = spectrum[p].getMZ(); int_in[p] = static_cast<double>(spectrum[p].getIntensity()); } // apply filter auto mz_out_it = mz_out.begin(); auto int_out_it = int_out.begin(); found_signal = gauss_algo_.filter(mz_in.begin(), mz_in.end(), int_in.begin(), mz_out_it, int_out_it); // If all intensities are zero in the scan and the scan has a reasonable size, throw an exception. // This is the case if the Gaussian filter is smaller than the spacing of raw data if (!found_signal && spectrum.size() >= 3) { if (write_log_messages_) { String error_message = "Found no signal. The Gaussian width is probably smaller than the spacing in your profile data. Try to use a bigger width."; if (spectrum.getRT() > 0.0) { error_message += String(" The error occurred in the spectrum with retention time ") + spectrum.getRT() + "."; } OPENMS_LOG_WARN << error_message << std::endl; } } else { // copy the new data into the spectrum for (Size p = 0; p < spectrum.size(); ++p) { spectrum[p].setIntensity(int_out[p]); spectrum[p].setMZ(mz_out[p]); } } } void GaussFilter::filter(MSChromatogram & chromatogram) { if (param_.getValue("use_ppm_tolerance").toBool()) { throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "GaussFilter: Cannot use ppm tolerance on chromatograms"); } bool found_signal = false; const Size data_size = chromatogram.size(); std::vector<double> rt_in(data_size), int_in(data_size), rt_out(data_size), int_out(data_size); // copy spectrum to container for (Size p = 0; p < chromatogram.size(); ++p) { rt_in[p] = chromatogram[p].getRT(); int_in[p] = chromatogram[p].getIntensity(); } // apply filter auto mz_out_it = rt_out.begin(); auto int_out_it = int_out.begin(); found_signal = gauss_algo_.filter(rt_in.begin(), rt_in.end(), int_in.begin(), mz_out_it, int_out_it); // If all intensities are zero in the scan and the scan has a reasonable size, throw an exception. // This is the case if the Gaussian filter is smaller than the spacing of raw data if (!found_signal && chromatogram.size() >= 3) { if (write_log_messages_) { String error_message = "Found no signal. The Gaussian width is probably smaller than the spacing in your chromatogram data. Try to use a bigger width."; if (chromatogram.getMZ() > 0.0) { error_message += String(" The error occurred in the chromatogram with m/z ratio ") + chromatogram.getMZ() + "."; } OPENMS_LOG_ERROR << error_message << std::endl; } } else { // copy the new data into the chromatogram for (Size p = 0; p < chromatogram.size(); ++p) { chromatogram[p].setIntensity(int_out[p]); chromatogram[p].setRT(rt_out[p]); } } } void GaussFilter::filter(Mobilogram & mobilogram) { if (param_.getValue("use_ppm_tolerance").toBool()) { throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "GaussFilter: Cannot use ppm tolerance on mobilogram"); } bool found_signal = false; const Size data_size = mobilogram.size(); std::vector<double> im_in(data_size), int_in(data_size), im_out(data_size), int_out(data_size); // copy spectrum to container for (Size p = 0; p < mobilogram.size(); ++p) { im_in[p] = mobilogram[p].getMobility(); int_in[p] = mobilogram[p].getIntensity(); } // apply filter auto im_out_it = im_out.begin(); auto int_out_it = int_out.begin(); found_signal = gauss_algo_.filter(im_in.begin(), im_in.end(), int_in.begin(), im_out_it, int_out_it); // If all intensities are zero in the scan and the scan has a reasonable size, throw an exception. // This is the case if the Gaussian filter is smaller than the spacing of raw data if (!found_signal && mobilogram.size() >= 3) { if (write_log_messages_) { String error_message = "Found no signal. The Gaussian width is probably smaller than the spacing in your mobilogram data. Try to use a bigger width."; if (mobilogram.getRT() > 0.0) { error_message += String(" The error occurred in the mobilogram with RT ") + mobilogram.getRT() + "."; } OPENMS_LOG_ERROR << error_message << std::endl; } } else { // copy the new data into the spectrum auto im_it = im_out.begin(); auto int_it = int_out.begin(); for (Size p = 0; im_it != im_out.end(); im_it++, int_it++, p++) { mobilogram[p].setIntensity(*int_it); mobilogram[p].setMobility(*im_it); } } } void GaussFilter::filterExperiment(PeakMap & map) { Size progress = 0; startProgress(0, map.size() + map.getChromatograms().size(), "smoothing data"); for (Size i = 0; i < map.size(); ++i) { filter(map[i]); setProgress(++progress); } for (Size i = 0; i < map.getChromatograms().size(); ++i) { filter(map.getChromatogram(i)); setProgress(++progress); } endProgress(); } }
C++
3D
OpenMS/OpenMS
src/openms/source/PROCESSING/SMOOTHING/SavitzkyGolayFilter.cpp
.cpp
2,634
81
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg $ // $Authors: Eva Lange $ // -------------------------------------------------------------------------- #include <OpenMS/PROCESSING/SMOOTHING/SavitzkyGolayFilter.h> #include <OpenMS/MATH/MathFunctions.h> #include <Eigen/Core> #include <Eigen/SVD> namespace OpenMS { SavitzkyGolayFilter::SavitzkyGolayFilter() : ProgressLogger(), DefaultParamHandler("SavitzkyGolayFilter"), coeffs_() { defaults_.setValue("frame_length", 11, "The number of subsequent data points used for smoothing.\nThis number has to be uneven. If it is not, 1 will be added."); defaults_.setValue("polynomial_order", 4, "Order or the polynomial that is fitted."); defaultsToParam_(); } SavitzkyGolayFilter::~SavitzkyGolayFilter() = default; void SavitzkyGolayFilter::updateMembers_() { frame_size_ = (UInt)param_.getValue("frame_length"); order_ = (UInt)param_.getValue("polynomial_order"); //recalculate coefficients if (!Math::isOdd(frame_size_)) { frame_size_ += 1; } if (frame_size_ <= order_) { throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "The degree of the polynomial has to be less than the frame length.", String(order_)); } coeffs_.resize(frame_size_ * (frame_size_ / 2 + 1)); for (int nl = 0; nl <= (int) (frame_size_ / 2); ++nl) { int nr = frame_size_ - 1 - nl; // compute a Vandermonde matrix whose columns are powers of the vector [-nL,...,nR] Eigen::MatrixXd A (frame_size_, order_ + 1); for (int i = -nl; i <= nr; i++) { for (int j = 0; j <= static_cast<int>(order_); j++) { A(i + nl, j) = std::pow((float)i, j); // pow(int, int) is not defined } } // compute the singular-value decomposition of A Eigen::JacobiSVD<Eigen::MatrixXd> svd (A, Eigen::ComputeThinU | Eigen::ComputeThinV); Eigen::VectorXd B (order_ + 1); for (UInt i = 0; i <= order_; ++i) { B(i) = svd.matrixV()(0, i) / svd.singularValues()(i); } // compute B*transpose(U)*b, where b is the unit vector b=[1 0 ... 0] for (UInt i = 0; i < frame_size_; ++i) { coeffs_[(nl + 1) * frame_size_ - i - 1] = 0; for (UInt j = 0; j <= order_; ++j) { coeffs_[(nl + 1) * frame_size_ - i - 1] += B(j) * svd.matrixU()(i, j); } } } } }
C++
3D
OpenMS/OpenMS
src/openms/source/PROCESSING/SMOOTHING/GaussFilterAlgorithm.cpp
.cpp
1,585
51
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Hannes Roest $ // $Authors: Eva Lange $ // -------------------------------------------------------------------------- #include <OpenMS/PROCESSING/SMOOTHING/GaussFilterAlgorithm.h> namespace OpenMS { GaussFilterAlgorithm::GaussFilterAlgorithm() : coeffs_(), sigma_(0.1), spacing_(0.01), use_ppm_tolerance_(false), ppm_tolerance_(10.0) { initialize(sigma_ * 8, spacing_, ppm_tolerance_, use_ppm_tolerance_); } GaussFilterAlgorithm::~GaussFilterAlgorithm() = default; void GaussFilterAlgorithm::initialize(double gaussian_width, double spacing, double ppm_tolerance, bool use_ppm_tolerance) { spacing_ = spacing; use_ppm_tolerance_ = use_ppm_tolerance; ppm_tolerance_ = ppm_tolerance; sigma_ = gaussian_width / 8.0; Size number_of_points_right = (Size)(ceil(4 * sigma_ / spacing_)) + 1; coeffs_.resize(number_of_points_right); coeffs_[0] = 1.0 / (sigma_ * sqrt(2.0 * Constants::PI)); for (Size i = 1; i < number_of_points_right; i++) { coeffs_[i] = 1.0 / (sigma_ * sqrt(2.0 * Constants::PI)) * exp(-((i * spacing_) * (i * spacing_)) / (2 * sigma_ * sigma_)); } #ifdef DEBUG_FILTERING std::cout << "Coeffs: " << std::endl; for (Size i = 0; i < number_of_points_right; i++) { std::cout << i * spacing_ << ' ' << coeffs_[i] << std::endl; } #endif } }
C++
3D
OpenMS/OpenMS
src/openms/source/PROCESSING/SMOOTHING/LowessSmoothing.cpp
.cpp
3,588
115
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg$ // $Authors: Erhan Kenar, Holger Franken $ // -------------------------------------------------------------------------- #include <OpenMS/PROCESSING/SMOOTHING/LowessSmoothing.h> #include <OpenMS/ML/REGRESSION/QuadraticRegression.h> #include <algorithm> #include <cmath> namespace OpenMS { LowessSmoothing::LowessSmoothing() : DefaultParamHandler("LowessSmoothing") { defaults_.setValue("window_size", 10, "The number of peaks to be included for local fitting in one window."); defaultsToParam_(); } LowessSmoothing::~LowessSmoothing() = default; void LowessSmoothing::smoothData(const DoubleVector& input_x, const DoubleVector& input_y, DoubleVector& smoothed_output) { if (input_x.size() != input_y.size()) { throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Sizes of x and y values not equal! Aborting... ", String(input_x.size())); } // unable to smooth over 2 or less data points (we need at least 3) if (input_x.size() <= 2) { smoothed_output = input_y; return; } Size input_size = input_y.size(); // const Size q = floor( input_size * alpha ); const Size q = (window_size_ < input_size) ? static_cast<Size>(window_size_) : input_size - 1; DoubleVector distances(input_size, 0.0); DoubleVector sortedDistances(input_size, 0.0); for (Size outer_idx = 0; outer_idx < input_size; ++outer_idx) { // Compute distances. // Size inner_idx = 0; for (Size inner_idx = 0; inner_idx < input_size; ++inner_idx) { distances[inner_idx] = std::fabs(input_x[outer_idx] - input_x[inner_idx]); sortedDistances[inner_idx] = distances[inner_idx]; } // Sort distances in order from smallest to largest. std::sort(sortedDistances.begin(), sortedDistances.end()); // Compute weigths. std::vector<double> weigths(input_size, 0); for (Size inner_idx = 0; inner_idx < input_size; ++inner_idx) { weigths.at(inner_idx) = tricube_(distances[inner_idx], sortedDistances[q]); } //calculate regression Math::QuadraticRegression qr; std::vector<double>::const_iterator w_begin = weigths.begin(); qr.computeRegressionWeighted(input_x.begin(), input_x.end(), input_y.begin(), w_begin); //smooth y-values double rt = input_x[outer_idx]; smoothed_output.push_back(qr.eval(rt)); } return; } double LowessSmoothing::tricube_(double u, double t) { // In our case, u represents a distance and hence should be strictly positive. if (u < 0) { throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Value of u must be strictly positive! Aborting...", String(u)); } // 0 <= u < t; u is regarded as 0.0 if fabs(u) falls below epsilon if ((std::fabs(u) < std::numeric_limits<double>::epsilon() || (0.0 < u)) && (u < t)) { // (1 - (u/t)^3)^3 // return pow( ( 1.0 - pow(u/t, 3.0)), 3.0 ); double quot(u / t); double inner_term(1.0 - quot * quot * quot); return inner_term * inner_term * inner_term; } // u >= t else { return 0.0; } } void LowessSmoothing::updateMembers_() { window_size_ = (Size)param_.getValue("window_size"); } } //namespace OpenMS
C++
3D
OpenMS/OpenMS
src/openms/source/PROCESSING/SMOOTHING/FastLowessSmoothing.cpp
.cpp
21,495
601
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Hannes Roest $ // $Authors: Hannes Roest $ // -------------------------------------------------------------------------- /* * This code below is C code, obtained from the common lisp stat project under the BSD licence: * https://raw.githubusercontent.com/blindglobe/common-lisp-stat/3bdd28c4ae3de28dce32d8b9158c1f8d1b2e3924/lib/lowess.c * * Like much lowess code, it is derived from the initial FORTRAN code by W. S. * Cleveland published at NETLIB. The original FORTRAN code can be found at * http://www.netlib.org/go/lowess.f * * Other implementations and ports of the same code can be found at the R project: http://svn.r-project.org/R/trunk/src/library/stats/src/lowess.c * while a Cython version is available at: https://github.com/statsmodels/statsmodels/blob/master/statsmodels/nonparametric/_smoothers_lowess.pyx * */ #include <OpenMS/PROCESSING/SMOOTHING/FastLowessSmoothing.h> #include <cmath> #include <algorithm> // std::min, std::max #include <cstdlib> #include <vector> #include <functional> namespace c_lowess { /* The lowess code below is Translated from RATFOR lowess code of W. S. Cleveland as obtained from NETLIB. It is based on two functions written in ratfor (see below), namely lowest and lowess. The code has since been refactored and commented further. */ /* ratfor code for lowest: * * subroutine lowest(x,y,n,xs,ys,nleft,nright,w,userw,rw,ok) * real x(n),y(n),w(n),rw(n) * logical userw,ok * range = x(n)-x(1) * h = amax1(xs-x(nleft),x(nright)-xs) * h9 = .999*h * h1 = .001*h * a = 0.0 # sum of weights * for(j=nleft; j<=n; j=j+1){ # compute weights (pick up all ties on right) * w(j)=0. * r = abs(x(j)-xs) * if (r<=h9) { # small enough for non-zero weight * if (r>h1) w(j) = (1.0-(r/h)**3)**3 * else w(j) = 1. * if (userw) w(j) = rw(j)*w(j) * a = a+w(j) * } * else if(x(j)>xs)break # get out at first zero wt on right * } * nrt=j-1 # rightmost pt (may be greater than nright because of ties) * if (a<=0.0) ok = FALSE * else { # weighted least squares * ok = TRUE * do j = nleft,nrt * w(j) = w(j)/a # make sum of w(j) == 1 * if (h>0.) { # use linear fit * a = 0.0 * do j = nleft,nrt * a = a+w(j)*x(j) # weighted center of x values * b = xs-a * c = 0.0 * do j = nleft,nrt * c = c+w(j)*(x(j)-a)**2 * if(sqrt(c)>.001*range) { * # points are spread out enough to compute slope * b = b/c * do j = nleft,nrt * w(j) = w(j)*(1.0+b*(x(j)-a)) * } * } * ys = 0.0 * do j = nleft,nrt * ys = ys+w(j)*y(j) * } * return * end */ /* ratfor code for lowess: * * subroutine lowess(x,y,n,f,nsteps,delta,ys,rw,res) * real x(n),y(n),ys(n),rw(n),res(n) * logical ok * if (n<2){ ys(1) = y(1); return } * ns = max0(min0(ifix(f*float(n)),n),2) # at least two, at most n points * for(iter=1; iter<=nsteps+1; iter=iter+1){ # robustness iterations * nleft = 1; nright = ns * last = 0 # index of prev estimated point * i = 1 # index of current point * repeat{ * while(nright<n){ * # move nleft, nright to right if radius decreases * d1 = x(i)-x(nleft) * d2 = x(nright+1)-x(i) * # if d1<=d2 with x(nright+1)==x(nright), lowest fixes * if (d1<=d2) break * # radius will not decrease by move right * nleft = nleft+1 * nright = nright+1 * } * call lowest(x,y,n,x(i),ys(i),nleft,nright,res,iter>1,rw,ok) * # fitted value at x(i) * if (!ok) ys(i) = y(i) * # all weights zero - copy over value (all rw==0) * if (last<i-1) { # skipped points -- interpolate * denom = x(i)-x(last) # non-zero - proof? * for(j=last+1; j<i; j=j+1){ * alpha = (x(j)-x(last))/denom * ys(j) = alpha*ys(i)+(1.0-alpha)*ys(last) * } * } * last = i # last point actually estimated * cut = x(last)+delta # x coord of close points * for(i=last+1; i<=n; i=i+1){ # find close points * if (x(i)>cut) break # i one beyond last pt within cut * if(x(i)==x(last)){ # exact match in x * ys(i) = ys(last) * last = i * } * } * i=max0(last+1,i-1) * # back 1 point so interpolation within delta, but always go forward * } until(last>=n) * do i = 1,n # residuals * res(i) = y(i)-ys(i) * if (iter>nsteps) break # compute robustness weights except last time * do i = 1,n * rw(i) = abs(res(i)) * call sort(rw,n) * m1 = 1+n/2; m2 = n-m1+1 * cmad = 3.0*(rw(m1)+rw(m2)) # 6 median abs resid * c9 = .999*cmad; c1 = .001*cmad * do i = 1,n { * r = abs(res(i)) * if(r<=c1) rw(i)=1. # near 0, avoid underflow * else if(r>c9) rw(i)=0. # near 1, avoid underflow * else rw(i) = (1.0-(r/cmad)**2)**2 * } * } * return * end */ /// Templated lowess class, call with template container (can be anything /// that supports random access) template <typename ContainerType, typename ValueType> class TemplatedLowess { inline ValueType pow2(ValueType x) { return x * x; } inline ValueType pow3(ValueType x) { return x * x * x; } /// Calculate weights for weighted regression. bool calculate_weights(const ContainerType& x, const size_t n, const ValueType current_x, const bool use_resid_weights, const size_t nleft, const ContainerType& resid_weights, ContainerType& weights, size_t& nrt, const ValueType h) { ValueType r; size_t j; ValueType h9 = .999 * h; ValueType h1 = .001 * h; ValueType a = 0.0; // sum of weights // compute weights (pick up all ties on right) for (j = nleft; j < n; j++) { // Compute the distance measure, then apply the tricube // function on the distance to get the weight. // use_resid_weights will be False on the first iteration, then True // on the subsequent ones, after some residuals have been calculated. weights[j] = 0.0; r = std::abs(x[j] - current_x); if (r <= h9) { if (r > h1) { // small enough for non-zero weight // compute tricube function: ( 1 - (r/h)^3 )^3 weights[j] = pow3(1.0 - pow3(r / h)); } else { weights[j] = 1.0; } if (use_resid_weights) { weights[j] = resid_weights[j] * weights[j]; } a += weights[j]; } else if (x[j] > current_x) { // get out at first zero wt on right break; } } // rightmost pt (may be greater than nright because of ties) nrt = j - 1; if (a <= 0.0) { return false; } else { // normalize weights (make sum of w[j] == 1) for (j = nleft; j <= nrt; j++) { weights[j] = weights[j] / a; } return true; } } /// Calculate smoothed/fitted y-value by weighted regression. void calculate_y_fit(const ContainerType& x, const ContainerType& y, const ValueType current_x, const size_t n, const size_t nleft, const size_t nrt, const ValueType h, ValueType& ys, ContainerType& weights) { ValueType range = x[n - 1] - x[0]; if (h > 0.0) { // use linear fit // No regression function (e.g. lstsq) is called. Instead a "projection // vector" p_i_j is calculated, and y_fit[i] = sum(p_i_j * y[j]) = y_fit[i] // for j s.t. x[j] is in the neighborhood of x[i]. p_i_j is a function of // the weights, x[i], and its neighbors. // To save space, p_i_j is computed in place using the weight vector. // find weighted center of x values ValueType sum_weighted_x = 0.0; // originally variable a for (size_t j = nleft; j <= nrt; j++) { sum_weighted_x += weights[j] * x[j]; } ValueType b = current_x - sum_weighted_x; // originally variable b ValueType weighted_sqdev = 0.0; // originally variable c for (size_t j = nleft; j <= nrt; j++) { weighted_sqdev += weights[j] * (x[j] - sum_weighted_x) * (x[j] - sum_weighted_x); } if (sqrt(weighted_sqdev) > .001 * range) { // points are spread out enough to compute slope b = b / weighted_sqdev; for (size_t j = nleft; j <= nrt; j++) { // Compute p_i_j in place weights[j] = weights[j] * (1.0 + b * (x[j] - sum_weighted_x)); } } } ys = 0.0; for (size_t j = nleft; j <= nrt; j++) { ys += weights[j] * y[j]; } } bool lowest(const ContainerType& x, const ContainerType& y, size_t n, ValueType current_x, //xs ValueType& ys, size_t nleft, size_t nright, ContainerType& weights, // vector w bool use_resid_weights, // userw const ContainerType& resid_weights) { ValueType h; size_t nrt; // rightmost pt (may be greater than nright because of ties) h = std::max(current_x - x[nleft], x[nright] - current_x); // Calculate the weights for the regression in this neighborhood. // Determine if at least some weights are positive, so a regression // is ok. bool fit_ok = calculate_weights(x, n, current_x, use_resid_weights, nleft, resid_weights, weights, nrt, h); if (!fit_ok) { return fit_ok; } // If it is ok to fit, run the weighted least squares regression calculate_y_fit(x, y, current_x, n, nleft, nrt, h, ys, weights); return fit_ok; } /// Find the indices bounding the k-nearest-neighbors of the current point. void update_neighborhood(const ContainerType& x, const size_t n, const size_t i, size_t& nleft, size_t& nright) { ValueType d1, d2; // A subtle loop. Start from the current neighborhood range: // [nleft, nright). Shift both ends rightwards by one // (so that the neighborhood still contains ns points), until // the current point is in the center (or just to the left of // the center) of the neighborhood. This neighborhood will // contain the ns-nearest neighbors of x[i]. // // Once the right end hits the end of the data, hold the // neighborhood the same for the remaining x[i]s. while (nright < n - 1) { // move nleft, nright to right if radius decreases d1 = x[i] - x[nleft]; d2 = x[nright + 1] - x[i]; // if d1 <= d2 with x[nright+1] == x[nright], lowest fixes if (d1 <= d2) break; // radius will not decrease by move right nleft++; nright++; } } /// Update the counters of the local regression. void update_indices(const ContainerType& x, const size_t n, const ValueType delta, size_t& i, size_t& last, ContainerType& ys) { // For most points within delta of the current point, we skip the // weighted linear regression (which save much computation of // weights and fitted points). Instead, we'll jump to the last // point within delta, fit the weighted regression at that point, // and linearly interpolate in between. // the last point actually estimated last = i; // This loop increments until we fall just outside of delta distance, // copying the results for any repeated x's along the way. ValueType cut = x[last] + delta; for (i = last + 1; i < n; i++) { // find close points if (x[i] > cut) break; // i one beyond last pt within cut if (x[i] == x[last]) { // exact match in x // if tied with previous x-value, just use the already // fitted y, and update the last-fit counter. ys[i] = ys[last]; last = i; } } // the next point to fit the regression at is either one prior to i (since // i should be the first point outside of delta) or it is "last + 1" in the // case that i never got incremented. This insures we always step forward. // -> back 1 point so interpolation within delta, but always go forward i = std::max(last + 1, i - 1); } /// Calculate smoothed/fitted y by linear interpolation between the current /// and previous y fitted by weighted regression. void interpolate_skipped_fits(const ContainerType& x, const size_t i, const size_t last, ContainerType& ys) { // skipped points -- interpolate ValueType alpha; ValueType denom = x[i] - x[last]; // non-zero - proof? for (size_t j = last + 1; j < i; j = j + 1) { alpha = (x[j] - x[last]) / denom; ys[j] = alpha * ys[i] + (1.0 - alpha) * ys[last]; } } /// Calculate residual weights for the next `robustifying` iteration. void calculate_residual_weights(const size_t n, const ContainerType& weights, ContainerType& resid_weights) { ValueType r; for (size_t i = 0; i < n; i++) { resid_weights[i] = std::abs(weights[i]); } // *********************************** // Compute pseudo-median (take average even if we have an odd number of // elements), following the original implementation. We could also use a // true median calculation here: // ValueType cmad = 6.0 * median(resid_weights.begin(), resid_weights.end()); // *********************************** size_t m1 = n / 2; // FORTRAN starts with one, CPP with zero // size_t m1 = 1 + n / 2; // original FORTRAN code // size_t m2 = n - m1 + 1; // see below, we don't explicitly sort but use max_element // Use nth element to find element m1, which produces a partially sorted // vector. This means we can get element m2 by looking for the maximum in the // remainder. typename ContainerType::iterator it_m1 = resid_weights.begin() + m1; std::nth_element(resid_weights.begin(), it_m1, resid_weights.end()); typename ContainerType::iterator it_m2 = std::max_element( resid_weights.begin(), it_m1); ValueType cmad = 3.0 * (*it_m1 + *it_m2); ValueType c9 = .999 * cmad; ValueType c1 = .001 * cmad; for (size_t i = 0; i < n; i++) { r = std::abs(weights[i]); if (r <= c1) { // near 0, avoid underflow resid_weights[i] = 1.0; } else if (r > c9) { // near 1, avoid underflow resid_weights[i] = 0.0; } else { resid_weights[i] = pow2(1.0 - pow2(r / cmad)); } } } public: int lowess(const ContainerType& x, const ContainerType& y, double frac, // parameter f int nsteps, ValueType delta, ContainerType& ys, ContainerType& resid_weights, // vector rw ContainerType& weights // vector res ) { bool fit_ok; size_t ns, n(x.size()); if (n < 2) { ys[0] = y[0]; return 1; } // how many points around estimation point should be used for regression: // at least two, at most n points size_t tmp = (size_t)(frac * (double)n); ns = std::max(std::min(tmp, n), (size_t)2); // robustness iterations for (int iter = 1; iter <= nsteps + 1; iter++) { // start of array in C++ at 0 / in FORTRAN at 1 // last: index of prev estimated point // i: index of current point size_t i(0), last(-1), nleft(0), nright(ns -1); // Fit all data points y[i] until the end of the array do { // Identify the neighborhood around the current x[i] // -> get the nearest ns points update_neighborhood(x, n, i, nleft, nright); // Calculate weights and apply fit (original lowest function) fit_ok = lowest(x, y, n, x[i], ys[i], nleft, nright, weights, (iter > 1), resid_weights); // if something went wrong during the fit, use y[i] as the // fitted value at x[i] if (!fit_ok) ys[i] = y[i]; // If we skipped some points (because of how delta was set), go back // and fit them by linear interpolation. if (last < i - 1) { interpolate_skipped_fits(x, i, last, ys); } // Update the last fit counter to indicate we've now fit this point. // Find the next i for which we'll run a regression. update_indices(x, n, delta, i, last, ys); } while (last < n - 1); // compute current residuals for (i = 0; i < n; i++) { weights[i] = y[i] - ys[i]; } // compute robustness weights except last time if (iter > nsteps) break; calculate_residual_weights(n, weights, resid_weights); } return 0; } }; } namespace OpenMS::FastLowessSmoothing { int lowess(const std::vector<double>& x, const std::vector<double>& y, double f, int nsteps, double delta, std::vector<double>& result) { OPENMS_PRECONDITION(delta >= 0.0, "lowess: parameter delta must be zero or larger") OPENMS_PRECONDITION(f > 0.0, "lowess: parameter f must be larger than 0") OPENMS_PRECONDITION(f <= 1.0, "lowess: parameter f must be smaller or equal to 1") OPENMS_PRECONDITION(nsteps >= 0, "lowess: parameter nstesp must be zero or larger") OPENMS_PRECONDITION(x.size() == y.size(), "Vectors x and y must have the same length") OPENMS_PRECONDITION(x.size() >= 2, "Need at least two points for smoothing") OPENMS_PRECONDITION(std::adjacent_find(x.begin(), x.end(), std::greater<double>()) == x.end(), "The vector x needs to be sorted") size_t n = x.size(); // result as well as working vectors need to have the correct size result.clear(); result.resize(n); std::vector<double> resid_weights(n); std::vector<double> weights(n); c_lowess::TemplatedLowess<std::vector<double>, double> clowess; int retval = clowess.lowess(x, y, f, nsteps, delta, result, resid_weights, weights); return retval; } int lowess(const std::vector<double>& x, const std::vector<double>& y, std::vector<double>& result) { OPENMS_PRECONDITION(x.size() == y.size(), "Vectors x and y must have the same length") OPENMS_PRECONDITION(x.size() >= 2, "Need at least two points for smoothing") OPENMS_PRECONDITION(std::adjacent_find(x.begin(), x.end(), std::greater<double>()) == x.end(), "The vector x needs to be sorted") double delta = 0.01 * (x[ x.size()-1 ] - x[0]); // x is sorted return lowess(x, y, 2.0/3, 3, delta, result); } }
C++
3D
OpenMS/OpenMS
src/openms/source/PROCESSING/NOISEESTIMATION/SignalToNoiseEstimator.cpp
.cpp
1,507
56
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Chris Bielow $ // $Authors: Chris Bielow $ // -------------------------------------------------------------------------- // #include <OpenMS/PROCESSING/NOISEESTIMATION/SignalToNoiseEstimator.h> #include <OpenMS/KERNEL/MSExperiment.h> #include <random> using namespace std; namespace OpenMS { float estimateNoiseFromRandomScans(const MSExperiment& exp, const UInt ms_level, const UInt n_scans, const double percentile) { vector<Size> spec_indices; for (Size i = 0; i < exp.size(); ++i) { if (exp[i].getMSLevel() == ms_level && !exp[i].empty()) { spec_indices.push_back(i); } } if (spec_indices.empty()) return 0.0f; std::default_random_engine generator(time(nullptr)); std::uniform_real_distribution<double> distribution(0.0, 1.0); float noise = 0.0; UInt count = 0; vector<float> tmp; while (count++ < n_scans) { UInt scan = (UInt)(distribution(generator) * (spec_indices.size() - 1)); tmp.clear(); for (const auto& peak : exp[scan]) { tmp.push_back(peak.getIntensity()); } Size idx = tmp.size() * percentile / 100.0; std::nth_element(tmp.begin(), tmp.begin() + idx, tmp.end()); noise += tmp[idx]; } return noise / n_scans; } }
C++
3D
OpenMS/OpenMS
src/openms/source/PROCESSING/NOISEESTIMATION/SignalToNoiseEstimatorMedianRapid.cpp
.cpp
4,097
108
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Hannes Roest $ // $Authors: Hannes Roest $ // -------------------------------------------------------------------------- #include <OpenMS/PROCESSING/NOISEESTIMATION/SignalToNoiseEstimatorMedianRapid.h> #include <algorithm> #include <numeric> // array_wrapper needs to be included before it is used // only in boost1.64+. See issue #2790 #if OPENMS_BOOST_VERSION_MINOR >= 64 #include <boost/serialization/array_wrapper.hpp> #endif #include <boost/accumulators/accumulators.hpp> #include <boost/accumulators/statistics/mean.hpp> #include <boost/accumulators/statistics/variance.hpp> namespace OpenMS { void SignalToNoiseEstimatorMedianRapid::computeNoiseInWindows_( const std::vector<double>& mz_array, std::vector<double> int_array, std::vector<double> & result, double mz_start) { // PRECONDITION assert(mz_array.size() == int_array.size()); assert(mz_array.size() > 2); // compute mean and standard deviation double sum = std::accumulate(int_array.begin(), int_array.end(), 0.0); double int_mean = sum / int_array.size(); double sq_sum = std::inner_product(int_array.begin(), int_array.end(), int_array.begin(), 0.0); double int_stdev = std::sqrt(sq_sum / int_array.size() - int_mean * int_mean); std::vector<double>::const_iterator mz_start_it = mz_array.begin(); std::vector<double>::const_iterator mz_end_it; std::vector<double>::iterator int_start_win = int_array.begin(); std::vector<double>::iterator int_end_win = int_array.begin(); for (size_t i = 0; i < result.size(); i++) { // Compute the the correct windows in m/z double mz_end = mz_start + window_length_; mz_end_it = std::lower_bound(mz_start_it, (std::vector<double>::const_iterator)mz_array.end(), mz_end); // Compute the the correct windows in intensity std::iterator_traits< std::vector<double>::const_iterator >::difference_type iterator_pos = std::distance(mz_start_it, mz_end_it); std::advance(int_end_win, iterator_pos); // compute median of all data between intensity start and intensity end double median = computeMedian_(int_start_win, int_end_win); result[i] = median; // Deal with a median of zero // // If we find a zero here, try to impute some value that might make sense as noise value ... // alternatively, one could also remove all zeros and compute the median on that if (result[i] == 0) { // Legacy implementation from SignalToNoiseEstimatorMedian // // max_intensity_ = gauss_global.mean + std::sqrt(gauss_global.variance) * auto_max_stdev_Factor_; // From the maximum intensity we can compute the value of the lowest // bin in the histogram of the SignalToNoiseEstimatorMedian algorithm: // maximum intensity divided by 60 result[i] = (int_mean + 3.0 * int_stdev) / 60; } mz_start_it = mz_end_it; int_start_win = int_end_win; mz_start += window_length_; } } double SignalToNoiseEstimatorMedianRapid::computeMedian_(std::vector<double>::iterator & first, std::vector<double>::iterator & last) { std::iterator_traits< std::vector<double>::const_iterator >::difference_type iterator_pos = std::distance(first, last); std::nth_element(first, first + iterator_pos / 2, last); double median; if (iterator_pos == 0) { median = 0.0; } else if (iterator_pos % 2 == 0) { // even case // compute the arithmetic mean between the two middle elements double f = *(first + iterator_pos / 2); std::nth_element(first, first + iterator_pos / 2 -1, last); double s = *(first + iterator_pos / 2 - 1); median = (f+s)/2.0; } else { // odd case median = *(first + iterator_pos / 2); } return median; } } // namespace OpenMS
C++
3D
OpenMS/OpenMS
src/openms/source/PROCESSING/NOISEESTIMATION/SignalToNoiseEstimatorMedian.cpp
.cpp
488
16
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Chris Bielow $ // $Authors: $ // -------------------------------------------------------------------------- // #include <OpenMS/PROCESSING/NOISEESTIMATION/SignalToNoiseEstimatorMedian.h> namespace OpenMS { SignalToNoiseEstimatorMedian<> default_sn_median2; }
C++
3D
OpenMS/OpenMS
src/openms/source/PROCESSING/NOISEESTIMATION/SignalToNoiseEstimatorMeanIterative.cpp
.cpp
502
16
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Chris Bielow $ // $Authors: $ // -------------------------------------------------------------------------- // #include <OpenMS/PROCESSING/NOISEESTIMATION/SignalToNoiseEstimatorMeanIterative.h> namespace OpenMS { SignalToNoiseEstimatorMeanIterative<> default_sn_meanit2; }
C++
3D
OpenMS/OpenMS
src/openms/source/PROCESSING/ID/IDFilter.cpp
.cpp
27,275
811
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Mathias Walzer $ // $Authors: Nico Pfeifer, Mathias Walzer, Hendrik Weisser $ // -------------------------------------------------------------------------- #include <OpenMS/CHEMISTRY/ModificationsDB.h> #include <OpenMS/PROCESSING/ID/IDFilter.h> #include <regex> using namespace std; namespace OpenMS { struct IDFilter::HasMinPeptideLength { typedef PeptideHit argument_type; // for use as a predicate Size length_; explicit HasMinPeptideLength(Size length) : length_(length) { } bool operator()(const PeptideHit& hit) const { return hit.getSequence().size() >= length_; } }; struct IDFilter::HasMinCharge { typedef PeptideHit argument_type; // for use as a predicate Int charge_; explicit HasMinCharge(Int charge) : charge_(charge) { } bool operator()(const PeptideHit& hit) const { return hit.getCharge() >= charge_; } }; struct IDFilter::HasLowMZError { typedef PeptideHit argument_type; // for use as a predicate double precursor_mz_, tolerance_; HasLowMZError(double precursor_mz, double tolerance, bool unit_ppm) : precursor_mz_(precursor_mz), tolerance_(tolerance) { if (unit_ppm) this->tolerance_ *= precursor_mz / 1.0e6; } bool operator()(const PeptideHit& hit) const { Int z = hit.getCharge(); if (z == 0) z = 1; double peptide_mz = hit.getSequence().getMZ(z); return fabs(precursor_mz_ - peptide_mz) <= tolerance_; } }; struct IDFilter::HasMatchingModification { typedef PeptideHit argument_type; // for use as a predicate const set<String>& mods_; explicit HasMatchingModification(const set<String>& mods) : mods_(mods) { } bool operator()(const PeptideHit& hit) const { const AASequence& seq = hit.getSequence(); if (mods_.empty()) { return seq.isModified(); } for (Size i = 0; i < seq.size(); ++i) { if (seq[i].isModified()) { String mod_name = seq[i].getModification()->getFullId(); if (mods_.count(mod_name) > 0) return true; } } // terminal modifications: if (seq.hasNTerminalModification()) { String mod_name = seq.getNTerminalModification()->getFullId(); if (mods_.count(mod_name) > 0) return true; } if (seq.hasCTerminalModification()) { String mod_name = seq.getCTerminalModification()->getFullId(); if (mods_.count(mod_name) > 0) return true; } return false; } }; struct IDFilter::HasMatchingSequence { typedef PeptideHit argument_type; // for use as a predicate const set<String>& sequences_; bool ignore_mods_; explicit HasMatchingSequence(const set<String>& sequences, bool ignore_mods = false) : sequences_(sequences), ignore_mods_(ignore_mods) { } bool operator()(const PeptideHit& hit) const { const String& query = (ignore_mods_ ? hit.getSequence().toUnmodifiedString() : hit.getSequence().toString()); return (sequences_.count(query) > 0); } }; struct IDFilter::HasNoEvidence { typedef PeptideHit argument_type; // for use as a predicate bool operator()(const PeptideHit& hit) const { return hit.getPeptideEvidences().empty(); } }; struct IDFilter::HasRTInRange { typedef PeptideIdentification argument_type; // for use as a predicate double rt_min_, rt_max_; HasRTInRange(double rt_min, double rt_max) : rt_min_(rt_min), rt_max_(rt_max) { } bool operator()(const PeptideIdentification& id) const { double rt = id.getRT(); return (rt >= rt_min_) && (rt <= rt_max_); } }; struct IDFilter::HasMZInRange { typedef PeptideIdentification argument_type; // for use as a predicate double mz_min_, mz_max_; HasMZInRange(double mz_min, double mz_max) : mz_min_(mz_min), mz_max_(mz_max) { } bool operator()(const PeptideIdentification& id) const { double mz = id.getMZ(); return (mz >= mz_min_) && (mz <= mz_max_); } }; void IDFilter::extractPeptideSequences(const PeptideIdentificationList& peptides, set<String>& sequences, bool ignore_mods) { for (const PeptideIdentification& pep : peptides) { for (const PeptideHit& hit : pep.getHits()) { if (ignore_mods) { sequences.insert(hit.getSequence().toUnmodifiedString()); } else { sequences.insert(hit.getSequence().toString()); } } } } map<String, vector<ProteinHit>> IDFilter::extractUnassignedProteins(ConsensusMap& cmap) { // collect accessions that are referenced by peptides for each ID run: map<String, unordered_set<String>> run_to_accessions; for (const auto& f : cmap) { for (const auto& pepid : f.getPeptideIdentifications()) { const String& run_id = pepid.getIdentifier(); // extract protein accessions of each peptide hit: for (const PeptideHit& hit : pepid.getHits()) { const set<String>& current_accessions = hit.extractProteinAccessionsSet(); run_to_accessions[run_id].insert(current_accessions.begin(), current_accessions.end()); } } } vector<ProteinIdentification>& prots = cmap.getProteinIdentifications(); map<String, vector<ProteinHit>> result {}; for (ProteinIdentification& prot : prots) { const String& run_id = prot.getIdentifier(); auto target = result.emplace(run_id, vector<ProteinHit> {}); const unordered_set<String>& accessions = run_to_accessions[run_id]; struct HasMatchingAccessionUnordered<ProteinHit> acc_filter(accessions); moveMatchingItems(prot.getHits(), std::not_fn(acc_filter), target.first->second); } return result; } void IDFilter::removeUnreferencedProteins(ConsensusMap& cmap, bool include_unassigned) { // collect accessions that are referenced by peptides for each ID run: map<String, unordered_set<String>> run_to_accessions; auto add_references_to_map = [&run_to_accessions](const PeptideIdentification& pepid) { const String& run_id = pepid.getIdentifier(); // extract protein accessions of each peptide hit: for (const PeptideHit& hit : pepid.getHits()) { const set<String>& current_accessions = hit.extractProteinAccessionsSet(); run_to_accessions[run_id].insert(current_accessions.begin(), current_accessions.end()); } }; cmap.applyFunctionOnPeptideIDs(add_references_to_map, include_unassigned); vector<ProteinIdentification>& prots = cmap.getProteinIdentifications(); for (ProteinIdentification& prot : prots) { const String& run_id = prot.getIdentifier(); const unordered_set<String>& accessions = run_to_accessions[run_id]; struct HasMatchingAccessionUnordered<ProteinHit> acc_filter(accessions); keepMatchingItems(prot.getHits(), acc_filter); } } void IDFilter::removeUnreferencedProteins(ProteinIdentification& proteins, const PeptideIdentificationList& peptides) { // collect accessions that are referenced by peptides for each ID run: map<String, unordered_set<String>> run_to_accessions; for (const PeptideIdentification& pep : peptides) { const String& run_id = pep.getIdentifier(); // extract protein accessions of each peptide hit: for (const PeptideHit& hit : pep.getHits()) { const set<String>& current_accessions = hit.extractProteinAccessionsSet(); run_to_accessions[run_id].insert(current_accessions.begin(), current_accessions.end()); } } const String& run_id = proteins.getIdentifier(); const unordered_set<String>& accessions = run_to_accessions[run_id]; struct HasMatchingAccessionUnordered<ProteinHit> acc_filter(accessions); keepMatchingItems(proteins.getHits(), acc_filter); } void IDFilter::removeUnreferencedProteins(vector<ProteinIdentification>& proteins, const PeptideIdentificationList& peptides) { // collect accessions that are referenced by peptides for each ID run: map<String, unordered_set<String>> run_to_accessions; for (const PeptideIdentification& pep : peptides) { const String& run_id = pep.getIdentifier(); // extract protein accessions of each peptide hit: for (const PeptideHit& hit : pep.getHits()) { const set<String>& current_accessions = hit.extractProteinAccessionsSet(); run_to_accessions[run_id].insert(current_accessions.begin(), current_accessions.end()); } } for (ProteinIdentification& prot : proteins) { const String& run_id = prot.getIdentifier(); const unordered_set<String>& accessions = run_to_accessions[run_id]; struct HasMatchingAccessionUnordered<ProteinHit> acc_filter(accessions); keepMatchingItems(prot.getHits(), acc_filter); } } // TODO write version where you look up in a specific run (e.g. first inference run) void IDFilter::removeDanglingProteinReferences(ConsensusMap& cmap, bool remove_peptides_without_reference) { vector<ProteinIdentification>& proteins = cmap.getProteinIdentifications(); // collect valid protein accessions for each ID run: map<String, unordered_set<String>> run_to_accessions; for (const ProteinIdentification& prot : proteins) { const String& run_id = prot.getIdentifier(); for (const ProteinHit& hit : prot.getHits()) { run_to_accessions[run_id].insert(hit.getAccession()); } } auto check_prots_avail = [&run_to_accessions, &remove_peptides_without_reference](PeptideIdentification& pep) -> void { const String& run_id = pep.getIdentifier(); const unordered_set<String>& accessions = run_to_accessions[run_id]; struct HasMatchingAccessionUnordered<PeptideEvidence> acc_filter(accessions); // check protein accessions of each peptide hit for (PeptideHit& hit : pep.getHits()) { // no non-const "PeptideHit::getPeptideEvidences" implemented, so we // can't use "keepMatchingItems": vector<PeptideEvidence> evidences; remove_copy_if(hit.getPeptideEvidences().begin(), hit.getPeptideEvidences().end(), back_inserter(evidences), std::not_fn(acc_filter)); hit.setPeptideEvidences(evidences); } if (remove_peptides_without_reference) { removeMatchingItems(pep.getHits(), HasNoEvidence()); } }; cmap.applyFunctionOnPeptideIDs(check_prots_avail); } void IDFilter::removeDanglingProteinReferences(ConsensusMap& cmap, const ProteinIdentification& ref_run, bool remove_peptides_without_reference) { // collect valid protein accessions for each ID run: unordered_set<String> accessions_avail; for (const ProteinHit& hit : ref_run.getHits()) { accessions_avail.insert(hit.getAccession()); } // TODO could be refactored and pulled out auto check_prots_avail = [&accessions_avail, &remove_peptides_without_reference](PeptideIdentification& pep) -> void { const unordered_set<String>& accessions = accessions_avail; struct HasMatchingAccessionUnordered<PeptideEvidence> acc_filter(accessions); // check protein accessions of each peptide hit for (PeptideHit& hit : pep.getHits()) { // no non-const "PeptideHit::getPeptideEvidences" implemented, so we // can't use "keepMatchingItems": vector<PeptideEvidence> evidences; remove_copy_if(hit.getPeptideEvidences().begin(), hit.getPeptideEvidences().end(), back_inserter(evidences), std::not_fn(acc_filter)); hit.setPeptideEvidences(evidences); } if (remove_peptides_without_reference) { removeMatchingItems(pep.getHits(), HasNoEvidence()); } }; cmap.applyFunctionOnPeptideIDs(check_prots_avail); } void IDFilter::removeDanglingProteinReferences(PeptideIdentificationList& peptides, const vector<ProteinIdentification>& proteins, bool remove_peptides_without_reference) { // collect valid protein accessions for each ID run: map<String, unordered_set<String>> run_to_accessions; for (const ProteinIdentification& prot : proteins) { const String& run_id = prot.getIdentifier(); for (const ProteinHit& hit : prot.getHits()) { run_to_accessions[run_id].insert(hit.getAccession()); } } for (PeptideIdentification& pep : peptides) { const String& run_id = pep.getIdentifier(); const unordered_set<String>& accessions = run_to_accessions[run_id]; struct HasMatchingAccessionUnordered<PeptideEvidence> acc_filter(accessions); // check protein accessions of each peptide hit for (PeptideHit& hit : pep.getHits()) { // no non-const "PeptideHit::getPeptideEvidences" implemented, so we // can't use "keepMatchingItems": vector<PeptideEvidence> evidences; remove_copy_if(hit.getPeptideEvidences().begin(), hit.getPeptideEvidences().end(), back_inserter(evidences), std::not_fn(acc_filter)); hit.setPeptideEvidences(evidences); } if (remove_peptides_without_reference) { removeMatchingItems(pep.getHits(), HasNoEvidence()); } } } bool IDFilter::updateProteinGroups(vector<ProteinIdentification::ProteinGroup>& groups, const vector<ProteinHit>& hits) { if (groups.empty()) return true; // nothing to update // we'll do lots of look-ups, so use a suitable data structure: unordered_set<String> valid_accessions; for (const ProteinHit& hit : hits) { valid_accessions.insert(hit.getAccession()); } bool valid = true; vector<ProteinIdentification::ProteinGroup> filtered_groups; for (ProteinIdentification::ProteinGroup& group : groups) { ProteinIdentification::ProteinGroup filtered; for (const String& acc : group.accessions) { if (valid_accessions.find(acc) != valid_accessions.end()) { filtered.accessions.push_back(acc); } } if (!filtered.accessions.empty()) { if (filtered.accessions.size() < group.accessions.size()) { valid = false; // some proteins removed from group } filtered.probability = group.probability; filtered_groups.push_back(filtered); } } groups.swap(filtered_groups); return valid; } void IDFilter::removeUngroupedProteins(const vector<ProteinIdentification::ProteinGroup>& groups, vector<ProteinHit>& hits) { if (hits.empty()) { return; // nothing to update } // we'll do lots of look-ups, so use a suitable data structure: unordered_set<String> valid_accessions; for (const auto& grp : groups) { valid_accessions.insert(grp.accessions.begin(), grp.accessions.end()); } hits.erase(std::remove_if(hits.begin(), hits.end(), std::not_fn(HasMatchingAccessionUnordered<ProteinHit>(valid_accessions))), hits.end()); } void IDFilter::keepBestPeptideHits(PeptideIdentificationList& peptides, bool strict) { for (PeptideIdentification& pep : peptides) { vector<PeptideHit>& hits = pep.getHits(); if (hits.size() > 1) { pep.sort(); double top_score = hits[0].getScore(); bool higher_better = pep.isHigherScoreBetter(); struct HasGoodScore<PeptideHit> good_score(top_score, higher_better); if (strict) // only one best score allowed { if (good_score(hits[1])) // two (or more) best-scoring hits { hits.clear(); } else { hits.resize(1); } } else { // we could use keepMatchingHits() here, but it would be less // efficient (since the hits are already sorted by score): for (vector<PeptideHit>::iterator hit_it = ++hits.begin(); hit_it != hits.end(); ++hit_it) { if (!good_score(*hit_it)) { hits.erase(hit_it, hits.end()); break; } } } } } } void IDFilter::filterGroupsByScore(std::vector<ProteinIdentification::ProteinGroup>& grps, double threshold_score, bool higher_better) { const auto& pred = [&threshold_score, &higher_better](ProteinIdentification::ProteinGroup& g) { return ((higher_better && (threshold_score >= g.probability)) || (!higher_better && (threshold_score < g.probability))); }; grps.erase(std::remove_if(grps.begin(), grps.end(), pred), grps.end()); } void IDFilter::filterPeptidesByLength(PeptideIdentificationList& peptides, Size min_length, Size max_length) { if (min_length > 0) { struct HasMinPeptideLength length_filter(min_length); for (PeptideIdentification& pep : peptides) { keepMatchingItems(pep.getHits(), length_filter); } } if (max_length == std::numeric_limits<decltype(max_length)>::max()) return; // no upper end filtering needed ++max_length; // the predicate tests for ">=", we need ">" if (max_length > min_length) { struct HasMinPeptideLength length_filter(max_length); for (PeptideIdentification& pep : peptides) { removeMatchingItems(pep.getHits(), length_filter); } } } void IDFilter::filterPeptidesByCharge(PeptideIdentificationList& peptides, Int min_charge, Int max_charge) { struct HasMinCharge charge_filter(min_charge); for (PeptideIdentification& pep : peptides) { keepMatchingItems(pep.getHits(), charge_filter); } if (max_charge == std::numeric_limits<decltype(max_charge)>::max()) return; // no upper end filtering needed ++max_charge; // the predicate tests for ">=", we need ">" if (max_charge > min_charge) { charge_filter = HasMinCharge(max_charge); for (PeptideIdentification& pep : peptides) { removeMatchingItems(pep.getHits(), charge_filter); } } } void IDFilter::filterPeptidesByRT(PeptideIdentificationList& peptides, double min_rt, double max_rt) { struct HasRTInRange rt_filter(min_rt, max_rt); keepMatchingItems(peptides, rt_filter); } void IDFilter::filterPeptidesByMZ(PeptideIdentificationList& peptides, double min_mz, double max_mz) { struct HasMZInRange mz_filter(min_mz, max_mz); keepMatchingItems(peptides, mz_filter); } void IDFilter::filterPeptidesByMZError(PeptideIdentificationList& peptides, double mass_error, bool unit_ppm) { for (PeptideIdentification& pep : peptides) { struct HasLowMZError error_filter(pep.getMZ(), mass_error, unit_ppm); keepMatchingItems(pep.getHits(), error_filter); } } void IDFilter::filterPeptidesByRTPredictPValue(PeptideIdentificationList& peptides, const String& metavalue_key, double threshold) { Size n_initial = 0, n_metavalue = 0; // keep track of numbers of hits struct HasMetaValue<PeptideHit> present_filter(metavalue_key, DataValue()); double cutoff = 1 - threshold; // why? - Hendrik struct HasMaxMetaValue<PeptideHit> pvalue_filter(metavalue_key, cutoff); for (PeptideIdentification& pep : peptides) { n_initial += pep.getHits().size(); keepMatchingItems(pep.getHits(), present_filter); n_metavalue += pep.getHits().size(); keepMatchingItems(pep.getHits(), pvalue_filter); } if (n_metavalue < n_initial) { OPENMS_LOG_WARN << "Filtering peptides by RTPredict p-value removed " << (n_initial - n_metavalue) << " of " << n_initial << " hits (total) that were missing the required meta value ('" << metavalue_key << "', added by RTPredict)." << endl; } } void IDFilter::removePeptidesWithMatchingModifications(PeptideIdentificationList& peptides, const set<String>& modifications) { struct HasMatchingModification mod_filter(modifications); for (PeptideIdentification& pep : peptides) { removeMatchingItems(pep.getHits(), mod_filter); } } void IDFilter::removePeptidesWithMatchingRegEx(PeptideIdentificationList& peptides, const String& regex) { const std::regex re(regex); // true if regex matches to parts or entire unmodified sequence auto regex_matches = [&re](const PeptideHit& ph) -> bool { return std::regex_search(ph.getSequence().toUnmodifiedString(), re); }; for (auto& pep : peptides) { removeMatchingItems(pep.getHits(), regex_matches); } } void IDFilter::keepPeptidesWithMatchingModifications(PeptideIdentificationList& peptides, const set<String>& modifications) { struct HasMatchingModification mod_filter(modifications); for (PeptideIdentification& pep : peptides) { keepMatchingItems(pep.getHits(), mod_filter); } } void IDFilter::removePeptidesWithMatchingSequences(PeptideIdentificationList& peptides, const PeptideIdentificationList& bad_peptides, bool ignore_mods) { set<String> bad_seqs; extractPeptideSequences(bad_peptides, bad_seqs, ignore_mods); struct HasMatchingSequence seq_filter(bad_seqs, ignore_mods); for (PeptideIdentification& pep : peptides) { removeMatchingItems(pep.getHits(), seq_filter); } } void IDFilter::keepPeptidesWithMatchingSequences(PeptideIdentificationList& peptides, const PeptideIdentificationList& good_peptides, bool ignore_mods) { set<String> good_seqs; extractPeptideSequences(good_peptides, good_seqs, ignore_mods); struct HasMatchingSequence seq_filter(good_seqs, ignore_mods); for (PeptideIdentification& pep : peptides) { keepMatchingItems(pep.getHits(), seq_filter); } } void IDFilter::keepUniquePeptidesPerProtein(PeptideIdentificationList& peptides) { Size n_initial = 0, n_metavalue = 0; // keep track of numbers of hits struct HasMetaValue<PeptideHit> present_filter("protein_references", DataValue()); struct HasMetaValue<PeptideHit> unique_filter("protein_references", DataValue("unique")); for (PeptideIdentification& pep : peptides) { n_initial += pep.getHits().size(); keepMatchingItems(pep.getHits(), present_filter); n_metavalue += pep.getHits().size(); keepMatchingItems(pep.getHits(), unique_filter); } if (n_metavalue < n_initial) { OPENMS_LOG_WARN << "Filtering peptides by unique match to a protein removed " << (n_initial - n_metavalue) << " of " << n_initial << " hits (total) that were missing the required meta value " << "('protein_references', added by PeptideIndexer)." << endl; } } // @TODO: generalize this to protein hits? void IDFilter::removeDuplicatePeptideHits(PeptideIdentificationList& peptides, bool seq_only) { for (PeptideIdentification& pep : peptides) { vector<PeptideHit> filtered_hits; if (seq_only) { set<AASequence> seqs; for (PeptideHit& hit : pep.getHits()) { if (seqs.insert(hit.getSequence()).second) // new sequence { filtered_hits.push_back(hit); } } } else { // there's no "PeptideHit::operator<" defined, so we can't use a set nor // "sort" + "unique" from the standard library: for (PeptideHit& hit : pep.getHits()) { if (find(filtered_hits.begin(), filtered_hits.end(), hit) == filtered_hits.end()) { filtered_hits.push_back(hit); } } } pep.getHits().swap(filtered_hits); } } void IDFilter::keepNBestSpectra(PeptideIdentificationList& peptides, Size n) { String score_type; for (PeptideIdentification& p : peptides) { p.sort(); if (score_type.empty()) { score_type = p.getScoreType(); } else { if (p.getScoreType() != score_type) { throw Exception::Precondition(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("PSM score types must be identical to allow proper filtering.")); } } } // there might be fewer spectra identified than n -> adapt n = std::min(n, peptides.size()); auto has_better_peptidehit = [](const PeptideIdentification& l, const PeptideIdentification& r) { if (r.getHits().empty()) { return true; // right has no hit? -> left is better } if (l.getHits().empty()) { return false; // left has no hit but right has a hit? -> right is better } const bool higher_better = l.isHigherScoreBetter(); const double l_score = l.getHits()[0].getScore(); const double r_score = r.getHits()[0].getScore(); // both have hits? better score of best PSM is better if (higher_better) { return l_score > r_score; } return l_score < r_score; }; std::partial_sort(peptides.begin(), peptides.begin() + n, peptides.end(), has_better_peptidehit); peptides.resize(n); } void IDFilter::keepBestMatchPerObservation(IdentificationData& id_data, IdentificationData::ScoreTypeRef score_ref) { if (id_data.getObservationMatches().size() <= 1) return; // nothing to do vector<IdentificationData::ObservationMatchRef> best_matches = id_data.getBestMatchPerObservation(score_ref); auto best_match_it = best_matches.begin(); // predicate to compare the best match(es) to all (ordered) observation matches // returns false if the current om is a best match (-> not to be removed) // returns true if an inferior om was found (-> will be removed) auto has_worse_score = [&best_match_it](IdentificationData::ObservationMatchRef it) -> bool { if (it == *best_match_it) { ++best_match_it; return false; } return true; }; id_data.removeObservationMatchesIf(has_worse_score); } void IDFilter::filterObservationMatchesByScore(IdentificationData& id_data, IdentificationData::ScoreTypeRef score_ref, double cutoff) { // predicate to compare the score of observation matches to a cutoff // returns true if the current score is worse than the cutoff // returns false otherwise auto is_worse_than_cutoff = [&](IdentificationData::ObservationMatchRef it) -> bool { pair<double, bool> score = it->getScore(score_ref); return !score.second || score_ref->isBetterScore(cutoff, score.first); }; id_data.removeObservationMatchesIf(is_worse_than_cutoff); } void IDFilter::removeDecoys(IdentificationData& id_data) { // predicate to compare the target/decoy status of a parent sequence // returns true if decoy // returns false if target auto is_decoy = [&](IdentificationData::ParentSequenceRef it) -> bool { return it->is_decoy; }; id_data.removeParentSequencesIf(is_decoy); } } // namespace OpenMS
C++
3D
OpenMS/OpenMS
src/openms/source/PROCESSING/RESAMPLING/LinearResamplerAlign.cpp
.cpp
434
15
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Hannes Roest $ // $Authors: Hannes Roest $ // -------------------------------------------------------------------------- #include <OpenMS/PROCESSING/RESAMPLING/LinearResamplerAlign.h> namespace OpenMS { }
C++
3D
OpenMS/OpenMS
src/openms/source/PROCESSING/RESAMPLING/LinearResampler.cpp
.cpp
520
17
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg $ // $Authors: Eva Lange $ // -------------------------------------------------------------------------- // #include <OpenMS/PROCESSING/RESAMPLING/LinearResampler.h> namespace OpenMS { LinearResampler default_linear_resampler; LinearResampler default_linear_resampler2; }
C++
3D
OpenMS/OpenMS
src/openms/source/ML/NNLS/NonNegativeLeastSquaresSolver.cpp
.cpp
4,248
152
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Chris Bielow $ // $Authors: Chris Bielow $ // -------------------------------------------------------------------------- #include <OpenMS/ML/NNLS/NonNegativeLeastSquaresSolver.h> #include <OpenMS/ML/NNLS/NNLS.h> #include <vector> namespace OpenMS { Int NonNegativeLeastSquaresSolver::solve(double* A, int A_rows, int A_cols, std::vector<double>& b, std::vector<double>& x) { if (A_rows != (int)b.size()) { throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "NNSL::solve() #rows of A does not match #rows of b !"); } x.resize(A_cols); // prepare solution array double rnorm; double* w = new double[A_cols]; double* zz = new double[A_rows]; int* indx = new int[A_cols]; int mode; NNLS::nnls_(A, &A_rows, &A_rows, &A_cols, b.data(), x.data(), &rnorm, w, zz, indx, &mode); // clean up delete[] w; delete[] zz; delete[] indx; if (mode == 1) { return SOLVED; } else if (mode == 2) // this should not happen (dimensions are bad) { throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "NonNegativeLeastSquaresSolver::solve() Bad dimension reported!"); } else /*if (mode==3)*/ { return ITERATION_EXCEEDED; } } Int NonNegativeLeastSquaresSolver::solve(Matrix<double>& A, std::vector<double>& b, std::vector<double>& x) { int a_rows = (int)A.rows(); int a_cols = (int)A.cols(); return solve(A.data(), a_rows, a_cols, b, x); } Int NonNegativeLeastSquaresSolver::solve(const Matrix<double>& A, const Matrix<double>& b, Matrix<double>& x) { if (A.rows() != b.rows()) { throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "NNSL::solve() #rows of A does not match #rows of b !"); } // this needs to be int (not Int, Size or anything else), because the external nnls constructor expects it this way! int a_rows = (int)A.rows(); int a_cols = (int)A.cols(); // translate A to array a (column major order) - need to copy since NNLS modifies input double* a_vec = new double[A.rows() * A.cols()]; size_t idx = 0; for (Size col = 0; col < A.cols(); ++col) { for (Size row = 0; row < A.rows(); ++row) { a_vec[idx] = A(row, col); idx++; } } #ifdef NNLS_DEBUG //std::cout << "A:\n" << A << std::endl; #endif // translate b double* b_vec = new double[a_rows]; for (Size row = 0; row < b.rows(); ++row) { b_vec[row] = b(row, 0); } #ifdef NNLS_DEBUG std::cout << "b:\n" << b << std::endl; #endif // prepare solution array (directly copied from example) double* x_vec = new double[a_cols]; double rnorm; double* w = new double[a_cols]; double* zz = new double[a_rows]; int* indx = new int[a_cols]; int mode; #ifdef NNLS_DEBUG std::cout << "solving ..." << std::endl; #endif NNLS::nnls_(a_vec, &a_rows, &a_rows, &a_cols, b_vec, x_vec, &rnorm, w, zz, indx, &mode); // translate solution back to Matrix: x.resize(a_cols, 1); x.fill(0.0); for (Int row = 0; row < a_cols; ++row) { x(row, 0) = x_vec[row]; } #ifdef NNLS_DEBUG std::cout << "done" << std::endl; std::cout << "solution x:\n" << x << std::endl; #endif delete[] a_vec; delete[] b_vec; delete[] x_vec; delete[] w; delete[] zz; delete[] indx; if (mode == 1) { return SOLVED; } else if (mode == 2) // this should not happen (dimensions are bad) { throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "NonNegativeLeastSquaresSolver::solve() Bad dimension reported!"); } else /*if (mode==3)*/ { return ITERATION_EXCEEDED; } } } // namespace OpenMS
C++
3D
OpenMS/OpenMS
src/openms/source/ML/NNLS/NNLS.cpp
.cpp
23,923
797
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Chris Bielow $ // $Authors: Chris Bielow $ // -------------------------------------------------------------------------- #include <cmath> #include <algorithm> #include <OpenMS/ML/NNLS/NNLS.h> /* The code below was converted from FORTRAN using f2c from http://www.netlib.org/lawson-hanson/all Some modifications were made, in order for it to run properly (search for "--removed", "-- added" and "--changed" in the code below) Made thread-safe by Julianus Pfeuffer by removing static variables, 2023 */ namespace OpenMS::NNLS { /* start of original code (with modification as described above) */ /* nnls.F -- translated by f2c (version 20100827). You must link the resulting object file with libf2c: on Microsoft Windows system, link with libf2c.lib; on Linux or Unix systems, link with .../path/to/libf2c.a -lm or, if you install libf2c.a in a standard place, with -lf2c -lm -- in that order, at the end of the command line, as in cc *.o -lf2c -lm Source for libf2c is in /netlib/f2c/libf2c.zip, e.g., http://www.netlib.org/f2c/libf2c.zip */ /* #include "f2c.h" -- removed */ /* Table of constant values */ integer c__1 = 1; integer c__0 = 0; integer c__2 = 2; /* SUBROUTINE NNLS (A,MDA,M,N,B,X,RNORM,W,ZZ,INDEX,MODE) */ /* Algorithm NNLS: NONNEGATIVE LEAST SQUARES */ /* The original version of this code was developed by */ /* Charles L. Lawson and Richard J. Hanson at Jet Propulsion Laboratory */ /* 1973 JUN 15, and published in the book */ /* "SOLVING LEAST SQUARES PROBLEMS", Prentice-HalL, 1974. */ /* Revised FEB 1995 to accompany reprinting of the book by SIAM. */ /* GIVEN AN M BY N MATRIX, A, AND AN M-VECTOR, B, COMPUTE AN */ /* N-VECTOR, X, THAT SOLVES THE LEAST SQUARES PROBLEM */ /* A * X = B SUBJECT TO X .GE. 0 */ /* ------------------------------------------------------------------ */ /* Subroutine Arguments */ /* A(),MDA,M,N MDA IS THE FIRST DIMENSIONING PARAMETER FOR THE */ /* ARRAY, A(). ON ENTRY A() CONTAINS THE M BY N */ /* MATRIX, A. ON EXIT A() CONTAINS */ /* THE PRODUCT MATRIX, Q*A , WHERE Q IS AN */ /* M BY M ORTHOGONAL MATRIX GENERATED IMPLICITLY BY */ /* THIS SUBROUTINE. */ /* B() ON ENTRY B() CONTAINS THE M-VECTOR, B. ON EXIT B() CON- */ /* TAINS Q*B. */ /* X() ON ENTRY X() NEED NOT BE INITIALIZED. ON EXIT X() WILL */ /* CONTAIN THE SOLUTION VECTOR. */ /* RNORM ON EXIT RNORM CONTAINS THE EUCLIDEAN NORM OF THE */ /* RESIDUAL VECTOR. */ /* W() AN N-ARRAY OF WORKING SPACE. ON EXIT W() WILL CONTAIN */ /* THE DUAL SOLUTION VECTOR. W WILL SATISFY W(I) = 0. */ /* FOR ALL I IN SET P AND W(I) .LE. 0. FOR ALL I IN SET Z */ /* ZZ() AN M-ARRAY OF WORKING SPACE. */ /* INDEX() AN INTEGER WORKING ARRAY OF LENGTH AT LEAST N. */ /* ON EXIT THE CONTENTS OF THIS ARRAY DEFINE THE SETS */ /* P AND Z AS FOLLOWS.. */ /* INDEX(1) THRU INDEX(NSETP) = SET P. */ /* INDEX(IZ1) THRU INDEX(IZ2) = SET Z. */ /* IZ1 = NSETP + 1 = NPP1 */ /* IZ2 = N */ /* MODE THIS IS A SUCCESS-FAILURE FLAG WITH THE FOLLOWING */ /* MEANINGS. */ /* 1 THE SOLUTION HAS BEEN COMPUTED SUCCESSFULLY. */ /* 2 THE DIMENSIONS OF THE PROBLEM ARE BAD. */ /* EITHER M .LE. 0 OR N .LE. 0. */ /* 3 ITERATION COUNT EXCEEDED. MORE THAN 3*N ITERATIONS. */ /* ------------------------------------------------------------------ */ /* Subroutine */ int nnls_(double * a, integer * mda, integer * m, integer * n, double * b, double * x, double * rnorm, double * w, double * zz, integer * index, integer * mode) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2; double d__1, d__2; /* Builtin functions */ /* double sqrt(double); --removed */ /* integer s_wsfe(cilist *), do_fio(integer *, char *, ftnlen), e_wsfe(void); -- removed */ /* Local variables */ integer i__, j, l; double t; /* Subroutine */ int g1_(double *, double *, double *, double *, double *); double cc; /* Subroutine */ int h12_(integer *, integer *, integer *, integer *, double *, integer *, double *, double *, integer *, integer *, integer *); integer ii, jj, ip; double sm; integer iz, jz; double up, ss; integer iz1, iz2, npp1; double diff_(double *, double *); integer iter; double temp, wmax, alpha, asave; integer itmax, izmax, nsetp; double dummy, unorm, ztest; integer rtnkey; /* Fortran I/O blocks */ /* cilist io___22 = { 0, 6, 0, "(/a)", 0 }; --removed */ /* ------------------------------------------------------------------ */ /* integer INDEX(N) */ /* double precision A(MDA,N), B(M), W(N), X(N), ZZ(M) */ /* ------------------------------------------------------------------ */ /* Parameter adjustments */ a_dim1 = *mda; a_offset = 1 + a_dim1; a -= a_offset; --b; --x; --w; --zz; --index; /* Function Body */ *mode = 1; if (*m <= 0 || *n <= 0) { *mode = 2; return 0; } iter = 0; itmax = *n * 3; /* INITIALIZE THE ARRAYS INDEX() AND X(). */ i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { x[i__] = 0.; /* L20: */ index[i__] = i__; } iz2 = *n; iz1 = 1; nsetp = 0; npp1 = 1; /* ****** MAIN LOOP BEGINS HERE ****** */ L30: /* QUIT IF ALL COEFFICIENTS ARE ALREADY IN THE SOLUTION. */ /* OR IF M COLS OF A HAVE BEEN TRIANGULARIZED. */ if (iz1 > iz2 || nsetp >= *m) { goto L350; } /* COMPUTE COMPONENTS OF THE DUAL (NEGATIVE GRADIENT) VECTOR W(). */ i__1 = iz2; for (iz = iz1; iz <= i__1; ++iz) { j = index[iz]; sm = 0.; i__2 = *m; for (l = npp1; l <= i__2; ++l) { /* L40: */ sm += a[l + j * a_dim1] * b[l]; } w[j] = sm; /* L50: */ } /* FIND LARGEST POSITIVE W(J). */ L60: wmax = 0.; i__1 = iz2; for (iz = iz1; iz <= i__1; ++iz) { j = index[iz]; if (w[j] > wmax) { wmax = w[j]; izmax = iz; } /* L70: */ } /* IF WMAX .LE. 0. GO TO TERMINATION. */ /* THIS INDICATES SATISFACTION OF THE KUHN-TUCKER CONDITIONS. */ if (wmax <= 0.) { goto L350; } iz = izmax; j = index[iz]; /* THE SIGN OF W(J) IS OK FOR J TO BE MOVED TO SET P. */ /* BEGIN THE TRANSFORMATION AND CHECK NEW DIAGONAL ELEMENT TO AVOID */ /* NEAR LINEAR DEPENDENCE. */ asave = a[npp1 + j * a_dim1]; i__1 = npp1 + 1; h12_(&c__1, &npp1, &i__1, m, &a[j * a_dim1 + 1], &c__1, &up, &dummy, & c__1, &c__1, &c__0); unorm = 0.; if (nsetp != 0) { i__1 = nsetp; for (l = 1; l <= i__1; ++l) { /* L90: */ /* Computing 2nd power */ d__1 = a[l + j * a_dim1]; unorm += d__1 * d__1; } } unorm = sqrt(unorm); d__2 = unorm + (d__1 = a[npp1 + j * a_dim1], fabs(d__1)) * .01; /* --changed */ if (diff_(&d__2, &unorm) > 0.) { /* COL J IS SUFFICIENTLY INDEPENDENT. COPY B INTO ZZ, UPDATE ZZ */ /* AND SOLVE FOR ZTEST ( = PROPOSED NEW VALUE FOR X(J) ). */ i__1 = *m; for (l = 1; l <= i__1; ++l) { /* L120: */ zz[l] = b[l]; } i__1 = npp1 + 1; h12_(&c__2, &npp1, &i__1, m, &a[j * a_dim1 + 1], &c__1, &up, &zz[1], & c__1, &c__1, &c__1); ztest = zz[npp1] / a[npp1 + j * a_dim1]; /* SEE IF ZTEST IS POSITIVE */ if (ztest > 0.) { goto L140; } } /* REJECT J AS A CANDIDATE TO BE MOVED FROM SET Z TO SET P. */ /* RESTORE A(NPP1,J), SET W(J)=0., AND LOOP BACK TO TEST DUAL */ /* COEFFS AGAIN. */ a[npp1 + j * a_dim1] = asave; w[j] = 0.; goto L60; /* THE INDEX J=INDEX(IZ) HAS BEEN SELECTED TO BE MOVED FROM */ /* SET Z TO SET P. UPDATE B, UPDATE INDICES, APPLY HOUSEHOLDER */ /* TRANSFORMATIONS TO COLS IN NEW SET Z, ZERO SUBDIAGONAL ELTS IN */ /* COL J, SET W(J)=0. */ L140: i__1 = *m; for (l = 1; l <= i__1; ++l) { /* L150: */ b[l] = zz[l]; } index[iz] = index[iz1]; index[iz1] = j; ++iz1; nsetp = npp1; ++npp1; if (iz1 <= iz2) { i__1 = iz2; for (jz = iz1; jz <= i__1; ++jz) { jj = index[jz]; h12_(&c__2, &nsetp, &npp1, m, &a[j * a_dim1 + 1], &c__1, &up, &a[ jj * a_dim1 + 1], &c__1, mda, &c__1); /* L160: */ } } if (nsetp != *m) { i__1 = *m; for (l = npp1; l <= i__1; ++l) { /* L180: */ a[l + j * a_dim1] = 0.; } } w[j] = 0.; /* SOLVE THE TRIANGULAR SYSTEM. */ /* STORE THE SOLUTION TEMPORARILY IN ZZ(). */ rtnkey = 1; goto L400; L200: /* ****** SECONDARY LOOP BEGINS HERE ****** */ /* ITERATION COUNTER. */ L210: ++iter; if (iter > itmax) { *mode = 3; /* s_wsfe(&io___22); do_fio(&c__1, " NNLS quitting on iteration count.", (ftnlen)34); e_wsfe(); --removed */ goto L350; } /* SEE IF ALL NEW CONSTRAINED COEFFS ARE FEASIBLE. */ /* IF NOT COMPUTE ALPHA. */ alpha = 2.; i__1 = nsetp; for (ip = 1; ip <= i__1; ++ip) { l = index[ip]; if (zz[ip] <= 0.) { t = -x[l] / (zz[ip] - x[l]); if (alpha > t) { alpha = t; jj = ip; } } /* L240: */ } /* IF ALL NEW CONSTRAINED COEFFS ARE FEASIBLE THEN ALPHA WILL */ /* STILL = 2. IF SO EXIT FROM SECONDARY LOOP TO MAIN LOOP. */ if (alpha == 2.) { goto L330; } /* OTHERWISE USE ALPHA WHICH WILL BE BETWEEN 0. AND 1. TO */ /* INTERPOLATE BETWEEN THE OLD X AND THE NEW ZZ. */ i__1 = nsetp; for (ip = 1; ip <= i__1; ++ip) { l = index[ip]; x[l] += alpha * (zz[ip] - x[l]); /* L250: */ } /* MODIFY A AND B AND THE INDEX ARRAYS TO MOVE COEFFICIENT I */ /* FROM SET P TO SET Z. */ i__ = index[jj]; L260: x[i__] = 0.; if (jj != nsetp) { ++jj; i__1 = nsetp; for (j = jj; j <= i__1; ++j) { ii = index[j]; index[j - 1] = ii; g1_(&a[j - 1 + ii * a_dim1], &a[j + ii * a_dim1], &cc, &ss, &a[j - 1 + ii * a_dim1]); a[j + ii * a_dim1] = 0.; i__2 = *n; for (l = 1; l <= i__2; ++l) { if (l != ii) { /* Apply procedure G2 (CC,SS,A(J-1,L),A(J,L)) */ temp = a[j - 1 + l * a_dim1]; a[j - 1 + l * a_dim1] = cc * temp + ss * a[j + l * a_dim1]; a[j + l * a_dim1] = -ss * temp + cc * a[j + l * a_dim1]; } /* L270: */ } /* Apply procedure G2 (CC,SS,B(J-1),B(J)) */ temp = b[j - 1]; b[j - 1] = cc * temp + ss * b[j]; b[j] = -ss * temp + cc * b[j]; /* L280: */ } } npp1 = nsetp; --nsetp; --iz1; index[iz1] = i__; /* SEE IF THE REMAINING COEFFS IN SET P ARE FEASIBLE. THEY SHOULD */ /* BE BECAUSE OF THE WAY ALPHA WAS DETERMINED. */ /* IF ANY ARE INFEASIBLE IT IS DUE TO ROUND-OFF ERROR. ANY */ /* THAT ARE NON-POSITIVE WILL BE SET TO ZERO */ /* AND MOVED FROM SET P TO SET Z. */ i__1 = nsetp; for (jj = 1; jj <= i__1; ++jj) { i__ = index[jj]; if (x[i__] <= 0.) { goto L260; } /* L300: */ } /* COPY B( ) INTO ZZ( ). THEN SOLVE AGAIN AND LOOP BACK. */ i__1 = *m; for (i__ = 1; i__ <= i__1; ++i__) { /* L310: */ zz[i__] = b[i__]; } rtnkey = 2; goto L400; L320: goto L210; /* ****** END OF SECONDARY LOOP ****** */ L330: i__1 = nsetp; for (ip = 1; ip <= i__1; ++ip) { i__ = index[ip]; /* L340: */ x[i__] = zz[ip]; } /* ALL NEW COEFFS ARE POSITIVE. LOOP BACK TO BEGINNING. */ goto L30; /* ****** END OF MAIN LOOP ****** */ /* COME TO HERE FOR TERMINATION. */ /* COMPUTE THE NORM OF THE FINAL RESIDUAL VECTOR. */ L350: sm = 0.; if (npp1 <= *m) { i__1 = *m; for (i__ = npp1; i__ <= i__1; ++i__) { /* L360: */ /* Computing 2nd power */ d__1 = b[i__]; sm += d__1 * d__1; } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { /* L380: */ w[j] = 0.; } } *rnorm = sqrt(sm); return 0; /* THE FOLLOWING BLOCK OF CODE IS USED AS AN INTERNAL SUBROUTINE */ /* TO SOLVE THE TRIANGULAR SYSTEM, PUTTING THE SOLUTION IN ZZ(). */ L400: i__1 = nsetp; for (l = 1; l <= i__1; ++l) { ip = nsetp + 1 - l; if (l != 1) { i__2 = ip; for (ii = 1; ii <= i__2; ++ii) { zz[ii] -= a[ii + jj * a_dim1] * zz[ip + 1]; /* L410: */ } } jj = index[ip]; zz[ip] /= a[ip + jj * a_dim1]; /* L430: */ } switch (rtnkey) { case 1: goto L200; case 2: goto L320; } return 0; } /* nnls_ */ /* Subroutine */ int g1_(double * a, double * b, double * cterm, double * sterm, double * sig) { /* System generated locals */ double d__1; /* Builtin functions */ /* double sqrt(double), d_sign(double *, double *); --removed */ /* Local variables */ double xr, yr; /* COMPUTE ORTHOGONAL ROTATION MATRIX.. */ /* The original version of this code was developed by */ /* Charles L. Lawson and Richard J. Hanson at Jet Propulsion Laboratory */ /* 1973 JUN 12, and published in the book */ /* "SOLVING LEAST SQUARES PROBLEMS", Prentice-HalL, 1974. */ /* Revised FEB 1995 to accompany reprinting of the book by SIAM. */ /* COMPUTE.. MATRIX (C, S) SO THAT (C, S)(A) = (SQRT(A**2+B**2)) */ /* (-S,C) (-S,C)(B) ( 0 ) */ /* COMPUTE SIG = SQRT(A**2+B**2) */ /* SIG IS COMPUTED LAST TO ALLOW FOR THE POSSIBILITY THAT */ /* SIG MAY BE IN THE SAME LOCATION AS A OR B . */ /* ------------------------------------------------------------------ */ /* ------------------------------------------------------------------ */ if (fabs(*a) > fabs(*b)) { xr = *b / *a; /* Computing 2nd power */ d__1 = xr; yr = sqrt(d__1 * d__1 + 1.); d__1 = 1. / yr; *cterm = d_sign_(d__1, *a); /* --changed */ *sterm = *cterm * xr; *sig = fabs(*a) * yr; return 0; } if (*b != 0.) { xr = *a / *b; /* Computing 2nd power */ d__1 = xr; yr = sqrt(d__1 * d__1 + 1.); d__1 = 1. / yr; *sterm = d_sign_(d__1, *b); /* --changed */ *cterm = *sterm * xr; *sig = fabs(*b) * yr; return 0; } *sig = 0.; *cterm = 0.; *sterm = 1.; return 0; } /* g1_ */ /* SUBROUTINE H12 (MODE,LPIVOT,L1,M,U,IUE,UP,C,ICE,ICV,NCV) */ /* CONSTRUCTION AND/OR APPLICATION OF A SINGLE */ /* HOUSEHOLDER TRANSFORMATION.. Q = I + U*(U**T)/B */ /* The original version of this code was developed by */ /* Charles L. Lawson and Richard J. Hanson at Jet Propulsion Laboratory */ /* 1973 JUN 12, and published in the book */ /* "SOLVING LEAST SQUARES PROBLEMS", Prentice-HalL, 1974. */ /* Revised FEB 1995 to accompany reprinting of the book by SIAM. */ /* ------------------------------------------------------------------ */ /* Subroutine Arguments */ /* MODE = 1 OR 2 Selects Algorithm H1 to construct and apply a */ /* Householder transformation, or Algorithm H2 to apply a */ /* previously constructed transformation. */ /* LPIVOT IS THE INDEX OF THE PIVOT ELEMENT. */ /* L1,M IF L1 .LE. M THE TRANSFORMATION WILL BE CONSTRUCTED TO */ /* ZERO ELEMENTS INDEXED FROM L1 THROUGH M. IF L1 GT. M */ /* THE SUBROUTINE DOES AN IDENTITY TRANSFORMATION. */ /* U(),IUE,UP On entry with MODE = 1, U() contains the pivot */ /* vector. IUE is the storage increment between elements. */ /* On exit when MODE = 1, U() and UP contain quantities */ /* defining the vector U of the Householder transformation. */ /* on entry with MODE = 2, U() and UP should contain */ /* quantities previously computed with MODE = 1. These will */ /* not be modified during the entry with MODE = 2. */ /* C() ON ENTRY with MODE = 1 or 2, C() CONTAINS A MATRIX WHICH */ /* WILL BE REGARDED AS A SET OF VECTORS TO WHICH THE */ /* HOUSEHOLDER TRANSFORMATION IS TO BE APPLIED. */ /* ON EXIT C() CONTAINS THE SET OF TRANSFORMED VECTORS. */ /* ICE STORAGE INCREMENT BETWEEN ELEMENTS OF VECTORS IN C(). */ /* ICV STORAGE INCREMENT BETWEEN VECTORS IN C(). */ /* NCV NUMBER OF VECTORS IN C() TO BE TRANSFORMED. IF NCV .LE. 0 */ /* NO OPERATIONS WILL BE DONE ON C(). */ /* ------------------------------------------------------------------ */ /* Subroutine */ int h12_(integer * mode, integer * lpivot, integer * l1, integer * m, double * u, integer * iue, double * up, double * c__, integer * ice, integer * icv, integer * ncv) { /* System generated locals */ integer u_dim1, u_offset, i__1, i__2; double d__1, d__2; /* Builtin functions */ /* double sqrt(double); --removed */ /* Local variables */ double b; integer i__, j, i2, i3, i4; double cl, sm; integer incr; double clinv; /* ------------------------------------------------------------------ */ /* double precision U(IUE,M) */ /* ------------------------------------------------------------------ */ /* Parameter adjustments */ u_dim1 = *iue; u_offset = 1 + u_dim1; u -= u_offset; --c__; /* Function Body */ if (0 >= *lpivot || *lpivot >= *l1 || *l1 > *m) { return 0; } cl = (d__1 = u[*lpivot * u_dim1 + 1], fabs(d__1)); if (*mode == 2) { goto L60; } /* ****** CONSTRUCT THE TRANSFORMATION. ****** */ i__1 = *m; for (j = *l1; j <= i__1; ++j) { /* L10: */ /* Computing MAX */ d__2 = (d__1 = u[j * u_dim1 + 1], fabs(d__1)); cl = std::max(d__2, cl); /* --changed */ } if (cl <= 0.) { goto L130; } else { goto L20; } L20: clinv = 1. / cl; /* Computing 2nd power */ d__1 = u[*lpivot * u_dim1 + 1] * clinv; sm = d__1 * d__1; i__1 = *m; for (j = *l1; j <= i__1; ++j) { /* L30: */ /* Computing 2nd power */ d__1 = u[j * u_dim1 + 1] * clinv; sm += d__1 * d__1; } cl *= sqrt(sm); if (u[*lpivot * u_dim1 + 1] <= 0.) { goto L50; } else { goto L40; } L40: cl = -cl; L50: *up = u[*lpivot * u_dim1 + 1] - cl; u[*lpivot * u_dim1 + 1] = cl; goto L70; /* ****** APPLY THE TRANSFORMATION I+U*(U**T)/B TO C. ****** */ L60: if (cl <= 0.) { goto L130; } else { goto L70; } L70: if (*ncv <= 0) { return 0; } b = *up * u[*lpivot * u_dim1 + 1]; /* B MUST BE NON-POSITIVE HERE. IF B = 0., RETURN. */ if (b >= 0.) { goto L130; } else { goto L80; } L80: b = 1. / b; i2 = 1 - *icv + *ice * (*lpivot - 1); incr = *ice * (*l1 - *lpivot); i__1 = *ncv; for (j = 1; j <= i__1; ++j) { i2 += *icv; i3 = i2 + incr; i4 = i3; sm = c__[i2] * *up; i__2 = *m; for (i__ = *l1; i__ <= i__2; ++i__) { sm += c__[i3] * u[i__ * u_dim1 + 1]; /* L90: */ i3 += *ice; } if (sm != 0.) { goto L100; } else { goto L120; } L100: sm *= b; c__[i2] += sm * *up; i__2 = *m; for (i__ = *l1; i__ <= i__2; ++i__) { c__[i4] += sm * u[i__ * u_dim1 + 1]; /* L110: */ i4 += *ice; } L120: {} } L130: return 0; } /* h12_ */ double diff_(double * x, double * y) { /* System generated locals */ double ret_val; /* Function used in tests that depend on machine precision. */ /* The original version of this code was developed by */ /* Charles L. Lawson and Richard J. Hanson at Jet Propulsion Laboratory */ /* 1973 JUN 7, and published in the book */ /* "SOLVING LEAST SQUARES PROBLEMS", Prentice-HalL, 1974. */ /* Revised FEB 1995 to accompany reprinting of the book by SIAM. */ ret_val = *x - *y; return ret_val; } /* diff_ */ /* -- added manually */ double d_sign_(double & a, double & b) { double x = (a >= 0 ? a : -a); return b >= 0 ? x : -x; } } // namespace OpenMS // namespace NNLS
C++
3D
OpenMS/OpenMS
src/openms/source/ML/INTERPOLATION/LinearInterpolation.cpp
.cpp
467
16
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg $ // $Authors: $ // -------------------------------------------------------------------------- // #include <OpenMS/ML/INTERPOLATION/LinearInterpolation.h> namespace OpenMS { Math::LinearInterpolation<> default_linint_; }
C++
3D
OpenMS/OpenMS
src/openms/source/ML/INTERPOLATION/BilinearInterpolation.cpp
.cpp
341
10
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg $ // $Authors: $ // -------------------------------------------------------------------------- //
C++
3D
OpenMS/OpenMS
src/openms/source/ML/SVM/SimpleSVM.cpp
.cpp
23,621
673
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Hendrik Weisser $ // $Authors: Hendrik Weisser $ // -------------------------------------------------------------------------- #include <OpenMS/ML/SVM/SimpleSVM.h> #include <OpenMS/CONCEPT/LogStream.h> #include <OpenMS/CONCEPT/ProgressLogger.h> #include <OpenMS/FORMAT/SVOutStream.h> #include <OpenMS/DATASTRUCTURES/ListUtils.h> #include <OpenMS/MATH/StatisticFunctions.h> #include <OpenMS/ML/GRIDSEARCH/GridSearch.h> // Include libSVM in implementation file only // svm.h is copied into the contrib binary include directory during build #include "svm.h" #include <cstdlib> using namespace OpenMS; using namespace std; // Implementation class definition class SimpleSVM::Impl { public: Impl(const Param& param); ~Impl(); void clear_(); // Classification (or regression) performance for different param. combinations (C/gamma/p): typedef std::vector<std::vector<std::vector<double>>> SVMPerformance; // Values of predictors (LIBSVM format) std::vector<std::vector<struct svm_node>> nodes_; // SVM training data (LIBSVM format) struct svm_problem data_; // SVM parameters (LIBSVM format) struct svm_parameter svm_params_; // Pointer to SVM model (LIBSVM format) struct svm_model* model_; // Names of predictors in the model (excluding uninformative ones) std::vector<String> predictor_names_; // Number of partitions for cross-validation Size n_parts_; // Parameter values to try during optimization std::vector<double> log2_C_, log2_gamma_, log2_p_; // Mapping from predictor name to predictor min and max SimpleSVM::ScaleMap scaling_; // Cross-validation results SVMPerformance performance_; // Reference to the parameters from the SimpleSVM class const Param& param_; // Dummy function to suppress LIBSVM output static void printNull_(const char*) {} // Scale predictor values to range 0-1 void scaleData_(SimpleSVM::PredictorMap& predictors); // Convert predictors to LIBSVM format void convertData_(const SimpleSVM::PredictorMap& predictors); // Choose best SVM parameters based on cross-validation results std::tuple<double, double, double> chooseBestParameters_(bool higher_better) const; // Run cross-validation to optimize SVM parameters void optimizeParameters_(bool classification); }; // Implementation class methods SimpleSVM::Impl::Impl(const Param& param) : data_(), model_(nullptr), param_(param) { svm_set_print_string_function(&printNull_); // suppress output of LIBSVM } SimpleSVM::Impl::~Impl() { clear_(); } void SimpleSVM::Impl::clear_() { if (model_ != nullptr) svm_free_and_destroy_model(&model_); // frees model *and* sets ptr to zero delete[] data_.x; delete[] data_.y; data_.x = nullptr; data_.y = nullptr; } void SimpleSVM::Impl::scaleData_(PredictorMap& predictors) { scaling_.clear(); for (PredictorMap::iterator pred_it = predictors.begin(); pred_it != predictors.end(); ++pred_it) { // if (pred_it->second.empty()) continue; vector<double>::iterator val_begin = pred_it->second.begin(); vector<double>::iterator val_end = pred_it->second.end(); double vmin = *min_element(val_begin, val_end); double vmax = *max_element(val_begin, val_end); if (vmin == vmax) { OPENMS_LOG_INFO << "Predictor '" + pred_it->first + "' is uninformative. Ignoring." << endl; pred_it->second.clear(); continue; } double range = vmax - vmin; for (; val_begin != val_end; ++val_begin) { *val_begin = (*val_begin - vmin) / range; } scaling_[pred_it->first] = make_pair(vmin, vmax); // store old range } } void SimpleSVM::Impl::convertData_(const SimpleSVM::PredictorMap& predictors) { Size n_obs = predictors.begin()->second.size(); nodes_.clear(); nodes_.resize(n_obs); predictor_names_.clear(); int pred_index = 0; // "int" for use by LIBSVM for (PredictorMap::const_iterator pred_it = predictors.begin(); pred_it != predictors.end(); ++pred_it) { if (pred_it->second.empty()) continue; // uninformative predictor pred_index++; // LIBSVM counts observations from 1 predictor_names_.push_back(pred_it->first); for (Size obs_index = 0; obs_index < n_obs; ++obs_index) { double value = pred_it->second[obs_index]; // if (value > 0.0) // TODO: why > 0.0? // { svm_node node = {pred_index, value}; nodes_[obs_index].push_back(node); // } } } OPENMS_LOG_DEBUG << "Number of predictors for SVM: " << pred_index << endl; svm_node sentinel = {-1, 0.0}; for (auto node_it = nodes_.begin(); node_it != nodes_.end(); ++node_it) { node_it->push_back(sentinel); } } tuple<double, double, double> SimpleSVM::Impl::chooseBestParameters_(bool higher_better) const { auto is_better = [&higher_better](double l, double r)->bool { return higher_better ? (l > r) : (l < r); }; // which parameter set(s) achieved best cross-validation performance? double best_value = higher_better ? std::numeric_limits<double>::lowest() : std::numeric_limits<double>::max(); vector<tuple<Size, Size, Size>> best_indexes; for (Size g_index = 0; g_index < log2_gamma_.size(); ++g_index) { for (Size c_index = 0; c_index < log2_C_.size(); ++c_index) { for (Size p_index = 0; p_index < log2_p_.size(); ++p_index) { double value = performance_[g_index][c_index][p_index]; // cout << "value " << value << " best_value " << best_value << endl; if (value == best_value) { best_indexes.emplace_back(g_index, c_index, p_index); // tie } else if (is_better(value, best_value)) { best_value = value; best_indexes.clear(); best_indexes.emplace_back(g_index, c_index, p_index); } } } } OPENMS_LOG_INFO << "Best cross-validation performance: " << best_value << " (ties: " << best_indexes.size() << ")" << endl; if (best_indexes.size() == 1) { return make_tuple( log2_C_[std::get<1>(best_indexes[0])], // TODO: check why order changed log2_gamma_[std::get<0>(best_indexes[0])], log2_p_[std::get<2>(best_indexes[0])]); } // break ties between parameter sets - look at "neighboring" parameters: multimap<pair<double, Size>, Size> tiebreaker; for (Size i = 0; i < best_indexes.size(); ++i) { const auto& indexes = best_indexes[i]; Size n_neighbors = 0; double neighbor_value = 0.0; if (std::get<0>(indexes) > 0) { neighbor_value += performance_[std::get<0>(indexes) - 1][std::get<1>(indexes)][std::get<2>(indexes)]; ++n_neighbors; } if (std::get<0>(indexes) + 1 < log2_gamma_.size()) { neighbor_value += performance_[std::get<0>(indexes) + 1][std::get<1>(indexes)][std::get<2>(indexes)]; ++n_neighbors; } if (std::get<1>(indexes) > 0) { neighbor_value += performance_[std::get<0>(indexes)][std::get<1>(indexes) - 1][std::get<2>(indexes)]; ++n_neighbors; } if (std::get<1>(indexes) + 1 < log2_C_.size()) { neighbor_value += performance_[std::get<0>(indexes)][std::get<1>(indexes) + 1][std::get<2>(indexes)]; ++n_neighbors; } if (std::get<2>(indexes) > 0) { neighbor_value += performance_[std::get<0>(indexes)][std::get<1>(indexes)][std::get<2>(indexes) - 1]; ++n_neighbors; } if (std::get<2>(indexes) + 1 < log2_p_.size()) { neighbor_value += performance_[std::get<0>(indexes)][std::get<1>(indexes)][std::get<2>(indexes) + 1]; ++n_neighbors; } neighbor_value /= n_neighbors; // avg. performance of neighbors tiebreaker.insert(make_pair(make_pair(neighbor_value, n_neighbors), i)); } // Use rbegin() for higher values (when higher_better is true) and begin() for lower values const auto& indexes = best_indexes[higher_better ? tiebreaker.rbegin()->second : tiebreaker.begin()->second]; return make_tuple(log2_C_[std::get<1>(indexes)], log2_gamma_[std::get<0>(indexes)], log2_p_[std::get<2>(indexes)]); } void SimpleSVM::Impl::optimizeParameters_(bool classification) { OPENMS_LOG_INFO << "Optimizing parameters." << endl; auto perFoldClassificationAccuracy = [&](const auto& d, const auto& targets)->double { Size n_correct = 0; for (Size i = 0; i < Size(d.l); ++i) { if (targets[i] == d.y[i]) n_correct++; } const double ratio = n_correct / double(d.l); return ratio; }; [[maybe_unused]]auto perFoldRegressionRSquared = [&](const auto& d, const auto& targets)->double { double targets_mean = Math::mean(std::begin(targets), std::end(targets)); // mean of truth y-values double u{}, v{u}; for (Size i = 0; i < Size(d.l); ++i) { u += std::pow(targets[i] - d.y[i], 2.0); v += std::pow(targets[i] - targets_mean, 2.0); } const double Rsquared = (v != 0.0) ? (1.0 - u/v) : -1.0; return Rsquared; }; auto perFoldRMSE = [&](const auto& d, const auto& targets)->double { double err{}; for (Size i = 0; i < Size(d.l); ++i) { err += std::pow(targets[i] - d.y[i], 2.0); } err /= (double)Size(d.l); err = std::sqrt(err); return err; }; log2_C_ = param_.getValue("log2_C"); if (svm_params_.kernel_type == RBF) { log2_gamma_ = param_.getValue("log2_gamma"); } else { log2_gamma_ = vector<double>(1, 0.0); } log2_p_ = param_.getValue("log2_p"); if (classification) { log2_p_ = vector<double >(1, log2(0.1)); } OPENMS_LOG_INFO << "Running cross-validation to find optimal parameters..." << endl; Size prog_counter = 0; ProgressLogger prog_log; prog_log.startProgress(1, log2_gamma_.size() * log2_C_.size() * log2_p_.size(), "testing parameters"); const String& performance_type = classification ? "accuracy: " : "error: "; // classification performance for different parameter pairs: // vary "C"s in inner loop to keep results for all "C"s in one vector: performance_.resize(log2_gamma_.size()); for (int g_index = 0; g_index < (int)log2_gamma_.size(); ++g_index) { svm_params_.gamma = pow(2.0, log2_gamma_[g_index]); performance_[g_index].resize(log2_C_.size()); for (int c_index = 0; c_index < (int)log2_C_.size(); ++c_index) { svm_params_.C = pow(2.0, log2_C_[c_index]); performance_[g_index][c_index].resize(log2_p_.size()); for (int p_index = 0; p_index < (int)log2_p_.size(); ++p_index) { svm_params_.p = pow(2.0, log2_p_[p_index]); double* targets = (double *)malloc(sizeof(double) * data_.l); svm_cross_validation(&data_, &svm_params_, n_parts_, &(targets[0])); double acc = classification ? perFoldClassificationAccuracy(data_, targets) : perFoldRMSE(data_, targets); performance_[g_index][c_index][p_index] = acc; prog_log.setProgress(++prog_counter); OPENMS_LOG_DEBUG << "Performance (log2_C = " << log2_C_[c_index] << ", log2_gamma = " << log2_gamma_[g_index] << ") " << ", log2_p = " << log2_p_[p_index] << ") " << performance_type << acc << endl; free(targets); } } } prog_log.endProgress(); auto best_params = classification ? chooseBestParameters_(true) : chooseBestParameters_(false); //auto best_params = classification ? chooseBestParameters_(true) : chooseBestParameters_(true); // for Rsquared OPENMS_LOG_INFO << "Best SVM parameters: log2_C = " << std::get<0>(best_params) << ", log2_gamma = " << std::get<1>(best_params) << ", log2_p = " << std::get<2>(best_params) << endl; svm_params_.C = pow(2.0, std::get<0>(best_params)); svm_params_.gamma = pow(2.0, std::get<1>(best_params)); svm_params_.p = pow(2.0, std::get<2>(best_params)); OPENMS_LOG_INFO << "... done." << endl; } // SimpleSVM methods that delegate to the implementation SimpleSVM::SimpleSVM() : DefaultParamHandler("SimpleSVM") { pimpl_ = make_unique<Impl>(param_); defaults_.setValue("kernel", "RBF", "SVM kernel"); defaults_.setValidStrings("kernel", {"RBF","linear"}); defaults_.setValue("xval", 5, "Number of partitions for cross-validation (parameter optimization)"); defaults_.setMinInt("xval", 1); String values = "-5,-3,-1,1,3,5,7,9,11,13,15"; defaults_.setValue("log2_C", ListUtils::create<double>(values), "Values to try for the SVM parameter 'C' during parameter optimization. A value 'x' is used as 'C = 2^x'."); values = "-15,-13,-11,-9,-7,-5,-3,-1,1,3"; defaults_.setValue("log2_gamma", ListUtils::create<double>(values), "Values to try for the SVM parameter 'gamma' during parameter optimization (RBF kernel only). A value 'x' is used as 'gamma = 2^x'."); values = "-15,-12,-9,-6,-3.32192809489,0,3.32192809489,6,9,12,15"; defaults_.setValue("log2_p", ListUtils::create<double>(values), "Values to try for the SVM parameter 'epsilon' during parameter optimization (epsilon-SVR only). A value 'x' is used as 'epsilon = 2^x'."); vector<std::string> advanced(1, "advanced"); defaults_.setValue("epsilon", 0.001, "Stopping criterion", advanced); defaults_.setMinFloat("epsilon", 0.0); defaults_.setValue("cache_size", 100.0, "Size of the kernel cache (in MB)", advanced); defaults_.setMinFloat("cache_size", 1.0); defaults_.setValue("no_shrinking", "false", "Disable the shrinking heuristics", advanced); defaults_.setValidStrings("no_shrinking", {"true","false"}); defaultsToParam_(); } SimpleSVM::~SimpleSVM() { pimpl_.reset(); } void SimpleSVM::setup(PredictorMap& predictors, const map<Size, double>& outcomes, bool classification) { if (predictors.empty() || predictors.begin()->second.empty()) { throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Predictors for SVM must not be empty."); } // count elements for first feature dimension to determine number of observations Size n_obs = predictors.begin()->second.size(); pimpl_->n_parts_ = param_.getValue("xval"); // clear old models pimpl_->clear_(); pimpl_->scaleData_(predictors); pimpl_->convertData_(predictors); pimpl_->data_.l = outcomes.size(); pimpl_->data_.x = new svm_node*[pimpl_->data_.l]; pimpl_->data_.y = new double[pimpl_->data_.l]; map<double, Size> label_table; Size index = 0; for (auto it = outcomes.cbegin(); it != outcomes.cend(); ++it, ++index) { const Size& training_index = it->first; const double& outcome = it->second; if (it->first >= n_obs) { String msg = "Invalid training index; there are only " + String(n_obs) + " observations."; throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, msg, String(it->first)); } pimpl_->data_.x[index] = &(pimpl_->nodes_[training_index][0]); pimpl_->data_.y[index] = outcome; label_table[outcome]++; } if (classification) { // check for 2 or more classes if (label_table.size() < 2) { throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Need at least two classes (distinct " "labels) for SVM classification."); } String msg = "Training SVM on " + String(pimpl_->data_.l) + " observations. Classes:"; for (map<double, Size>::iterator it = label_table.begin(); it != label_table.end(); ++it) { if (it->second < pimpl_->n_parts_) { msg = "Not enough observations of class " + String(it->first) + " for " + String(pimpl_->n_parts_) + "-fold cross-validation."; throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, msg); } msg += "\n- '" + String(it->first) + "': " + String(it->second) + " observations"; } OPENMS_LOG_INFO << msg << endl; pimpl_->svm_params_.svm_type = C_SVC; } else { // regression if ((unsigned int)pimpl_->data_.l < pimpl_->n_parts_) // TODO: check minimum amount of points needed for training a regression model. Assume 1 is enough for now. { String msg = "Not enough observations for " + String(pimpl_->n_parts_) + "-fold cross-validation."; throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, msg); } OPENMS_LOG_INFO << "Training SVR on " + String(pimpl_->data_.l) + " observations." << endl; pimpl_->svm_params_.svm_type = EPSILON_SVR; pimpl_->svm_params_.p = 0.1; // epsilon parameter of epsilon-SVR } std::string kernel = param_.getValue("kernel"); pimpl_->svm_params_.kernel_type = (kernel == "RBF") ? RBF : LINEAR; pimpl_->svm_params_.eps = param_.getValue("epsilon"); pimpl_->svm_params_.cache_size = param_.getValue("cache_size"); pimpl_->svm_params_.shrinking = !param_.getValue("no_shrinking").toBool(); pimpl_->svm_params_.nr_weight = 0; // weighting not supported for now pimpl_->svm_params_.probability = 0; // no prob. estimation during cross-validation pimpl_->optimizeParameters_(classification); pimpl_->svm_params_.probability = 1; pimpl_->model_ = svm_train(&pimpl_->data_, &pimpl_->svm_params_); OPENMS_LOG_INFO << "Number of support vectors in the final model: " << pimpl_->model_->l << endl; } // predict on (subset) of training data void SimpleSVM::predict(vector<Prediction>& predictions, vector<Size> indexes) const { if (pimpl_->model_ == nullptr) { throw Exception::Precondition(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "SVM model has not been trained (use the " "'setup' method)"); } Size n_obs = pimpl_->nodes_.size(); if (indexes.empty()) { indexes.reserve(n_obs); for (Size i = 0; i < n_obs; indexes.push_back(i++)){}; } Size n_classes = svm_get_nr_class(pimpl_->model_); vector<int> outcomes(n_classes); svm_get_labels(pimpl_->model_, &(outcomes[0])); vector<double> probabilities(n_classes); predictions.clear(); predictions.reserve(indexes.size()); for (vector<Size>::iterator it = indexes.begin(); it != indexes.end(); ++it) { if (*it >= pimpl_->nodes_.size()) { String msg = "Invalid index for prediction; there are only " + String(n_obs) + " observations."; throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, msg, String(*it)); } Prediction pred; pred.outcome = svm_predict_probability(pimpl_->model_, &(pimpl_->nodes_[*it][0]), &(probabilities[0])); for (Size i = 0; i < n_classes; ++i) { pred.probabilities[outcomes[i]] = probabilities[i]; } predictions.push_back(pred); } } void scaleDataUsingTrainingRanges(SimpleSVM::PredictorMap& predictors, const map<String, pair<double, double>>& scaling) { // scale each feature dimension to the min-max-range for (auto pred_it = predictors.begin(); pred_it != predictors.end(); ++pred_it) { if (pred_it->second.empty()) continue; // uninformative predictor auto val_begin = pred_it->second.begin(); auto val_end = pred_it->second.end(); for (; val_begin != val_end; ++val_begin) { if (scaling.count(pred_it->first) == 0) { //std::cout << "Predictor: '" << pred_it->first << "' not found in scale map because it was uninformative during training." << std::endl; continue; } auto [min, max] = scaling.at(pred_it->first); double range = max - min; *val_begin = (*val_begin - min) / range; } } } // predict on novel e.g., test data void SimpleSVM::predict(PredictorMap& predictors, vector<Prediction>& predictions) const { if (pimpl_->model_ == nullptr) { throw Exception::Precondition(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "SVM/SVR model has not been trained (use the " "'setup' method)"); } Size n_obs = predictors.begin()->second.size(); // length of the first feature ... Size feature_dim = predictors.size(); scaleDataUsingTrainingRanges(predictors, pimpl_->scaling_); //std::cout << "Predicting on novel data with obs./feature dimensionality: " << n_obs << "/" << feature_dim << std::endl; Size n_classes = svm_get_nr_class(pimpl_->model_); vector<int> outcomes(n_classes); svm_get_labels(pimpl_->model_, &(outcomes[0])); vector<double> probabilities(n_classes); predictions.clear(); predictions.reserve(n_obs); svm_node *x = new svm_node[feature_dim + 1]; for (Size i = 0; i != n_obs; ++i) { size_t feature_index{0}; for (auto p : predictors) { x[feature_index].index = feature_index + 1; x[feature_index].value = p.second[i]; // feature value for observation i ++feature_index; } x[feature_dim].index = -1; x[feature_dim].value = 0; Prediction pred; pred.outcome = svm_predict_probability(pimpl_->model_, x, &(probabilities[0])); for (Size c = 0; c < n_classes; ++c) { pred.probabilities[outcomes[c]] = probabilities[c]; } predictions.push_back(pred); } delete[] x; } // only works in classification mode void SimpleSVM::getFeatureWeights(map<String, double>& feature_weights) const { if (pimpl_->model_ == nullptr) { throw Exception::Precondition(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "SVM model has not been trained (use the " "'setup' method)"); } Size k = pimpl_->model_->nr_class; if (k > 2) { throw Exception::Precondition(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Output of feature weights is currently only " "supported for two-class classification"); } feature_weights.clear(); Size n_sv = pimpl_->model_->l; // number of support vectors for (Size l = 0; l < n_sv; ++l) { double sv_coef = pimpl_->model_->sv_coef[0][l]; // LIBSVM uses a sparse representation for data (incl. support vectors): for (Size n = 0; ; ++n) { const struct svm_node& node = pimpl_->model_->SV[l][n]; if (node.index == -1) break; const String& predictor_name = pimpl_->predictor_names_[node.index - 1]; feature_weights[predictor_name] += sv_coef * node.value; } } } const SimpleSVM::ScaleMap& SimpleSVM::getScaling() const { return pimpl_->scaling_; } void SimpleSVM::writeXvalResults(const String& path) const { SVOutStream output(path); output.modifyStrings(false); output << "log2_C" << "log2_gamma" << "log2_p" << "performance" << nl; for (Size g_index = 0; g_index < pimpl_->log2_gamma_.size(); ++g_index) { for (Size c_index = 0; c_index < pimpl_->log2_C_.size(); ++c_index) { for (Size p_index = 0; p_index < pimpl_->log2_p_.size(); ++p_index) { output << pimpl_->log2_C_[c_index] << pimpl_->log2_gamma_[g_index] << pimpl_->log2_p_[p_index] << pimpl_->performance_[g_index][c_index][p_index] << nl; } } } }
C++
3D
OpenMS/OpenMS
src/openms/source/ML/CROSSVALIDATION/CrossValidation.cpp
.cpp
425
15
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Justin Sing $ // $Authors: Justin Sing $ // -------------------------------------------------------------------------- // #include <OpenMS/ML/CROSSVALIDATION/CrossValidation.h> namespace OpenMS { }
C++
3D
OpenMS/OpenMS
src/openms/source/ML/REGRESSION/LinearRegressionWithoutIntercept.cpp
.cpp
1,283
51
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Lars Nilse $ // $Authors: Lars Nilse $ // -------------------------------------------------------------------------- //#include <OpenMS/CONCEPT/Macros.h> #include <OpenMS/ML/REGRESSION/LinearRegressionWithoutIntercept.h> namespace OpenMS::Math { LinearRegressionWithoutIntercept::LinearRegressionWithoutIntercept() : sum_xx_(0), sum_xy_(0), n_(0) { } void LinearRegressionWithoutIntercept::addData(double x, double y) { sum_xx_ += x * x; sum_xy_ += x * y; ++n_; } void LinearRegressionWithoutIntercept::addData(std::vector<double>& x, std::vector<double>& y) { for (unsigned i = 0; i < x.size(); ++i) { addData(x[i], y[i]); } } /** * @brief returns the slope of the estimated regression line. */ double LinearRegressionWithoutIntercept::getSlope() const { if (n_ < 2) { return std::numeric_limits<double>::quiet_NaN(); // not enough data } return sum_xy_ / sum_xx_; } } //OpenMS //Math
C++
3D
OpenMS/OpenMS
src/openms/source/ML/REGRESSION/LinearRegression.cpp
.cpp
10,975
310
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg $ // $Authors: $ // -------------------------------------------------------------------------- #include <OpenMS/CONCEPT/Macros.h> #include <OpenMS/ML/REGRESSION/LinearRegression.h> #include <OpenMS/MATH/StatisticFunctions.h> #include <Mathematics/Vector2.h> #include <Mathematics/ApprHeightLine2.h> #include <Mathematics/LinearSystem.h> #include <boost/math/distributions/normal.hpp> #include <boost/math/special_functions/binomial.hpp> #include <boost/math/distributions.hpp> #include <iostream> using boost::math::detail::inverse_students_t; namespace OpenMS::Math { static void vector2ToStdVec_(const std::vector<gte::Vector2<double>>& points, std::vector<double>& Xout, std::vector<double>& Yout){ unsigned N = static_cast<unsigned>(points.size()); Xout.clear(); Xout.reserve(N); Yout.clear(); Yout.reserve(N); for (unsigned i = 0; i < N; ++i) { Xout.push_back(points[i][0]); Yout.push_back(points[i][1]); } } double LinearRegression::getIntercept() const { return intercept_; } double LinearRegression::getSlope() const { return slope_; } double LinearRegression::getXIntercept() const { return x_intercept_; } double LinearRegression::getLower() const { return lower_; } double LinearRegression::getUpper() const { return upper_; } double LinearRegression::getTValue() const { return t_star_; } double LinearRegression::getRSquared() const { return r_squared_; } double LinearRegression::getStandDevRes() const { return stand_dev_residuals_; } double LinearRegression::getMeanRes() const { return mean_residuals_; } double LinearRegression::getStandErrSlope() const { return stand_error_slope_; } double LinearRegression::getChiSquared() const { return chi_squared_; } double LinearRegression::getRSD() const { return rsd_; } void LinearRegression::computeGoodness_(const std::vector<double>& X, const std::vector<double>& Y, double confidence_interval_P) { OPENMS_PRECONDITION(static_cast<unsigned>(X.size() == Y.size()), "Fitted X and Y have different lengths."); OPENMS_PRECONDITION(static_cast<unsigned>(X.size()) > 2, "Cannot compute goodness of fit for regression with less than 3 data points"); // specifically, boost throws an exception for a t-distribution with zero df Size N = X.size(); // Mean of abscissa and ordinate values double x_mean = Math::mean(X.begin(), X.end()); double y_mean = Math::mean(Y.begin(), Y.end()); // Variance and Covariances double var_X = Math::variance(X.begin(), X.end(), x_mean); double var_Y = Math::variance(Y.begin(), Y.end(), y_mean); double cov_XY = Math::covariance(X.begin(), X.end(), Y.begin(), Y.end()); // S_xx double s_XX = var_X * (N-1); /*for (unsigned i = 0; i < N; ++i) { double d = (X[i] - x_mean); s_XX += d * d; }*/ // Compute the squared Pearson coefficient r_squared_ = (cov_XY * cov_XY) / (var_X * var_Y); // The standard deviation of the residuals double sum = 0; for (unsigned i = 0; i < N; ++i) { double x_i = fabs(Y[i] - (intercept_ + slope_ * X[i])); sum += x_i; } mean_residuals_ = sum / N; stand_dev_residuals_ = sqrt((chi_squared_ - (sum * sum) / N) / (N - 1)); // The Standard error of the slope stand_error_slope_ = stand_dev_residuals_ / sqrt(s_XX); // and the intersection of Y_hat with the x-axis x_intercept_ = -(intercept_ / slope_); double P = 1 - (1 - confidence_interval_P) / 2; boost::math::students_t tdist(N - 2); t_star_ = boost::math::quantile(tdist, P); //Compute the asymmetric 95% confidence interval of around the X-intercept double g = (t_star_ / (slope_ / stand_error_slope_)); g *= g; double left = (x_intercept_ - x_mean) * g; double bottom = 1 - g; double d = (x_intercept_ - x_mean); double right = t_star_ * (stand_dev_residuals_ / slope_) * sqrt((d * d) / s_XX + (bottom / N)); // Confidence interval lower_ <= X_intercept <= upper_ lower_ = x_intercept_ + (left + right) / bottom; upper_ = x_intercept_ + (left - right) / bottom; if (lower_ > upper_) { std::swap(lower_, upper_); } double tmp = 0; for (unsigned i = 0; i < N; ++i) { tmp += (X[i] - x_mean) * (X[i] - x_mean); } // cout << "100.0 / abs( x_intercept_ ) " << (100.0 / fabs( x_intercept_ )) << endl; // cout << "tmp : " << tmp << endl; // cout << "slope_ " << slope_ << endl; // cout << "y_mean " << y_mean << endl; // cout << "N " << N << endl; // cout << "stand_dev_residuals_ " << stand_dev_residuals_ << endl; // cout << " (1.0/ (double) N) " << (1.0/ (double) N) << endl; // cout << "sx hat " << (stand_dev_residuals_ / slope_) * sqrt( (1.0/ (double) N) * (y_mean / (slope_ * slope_ * tmp ) ) ) << endl; // compute relative standard deviation (non-standard formula, taken from Mayr et al. (2006) ) rsd_ = (100.0 / fabs(x_intercept_)) * (stand_dev_residuals_ / slope_) * sqrt((1.0 / (double) N) * (y_mean / (slope_ * slope_ * tmp))); if (rsd_ < 0.0) { std::cout << "rsd < 0.0 " << std::endl; std::cout << "Intercept " << intercept_ << "\nSlope " << slope_ << "\nSquared pearson coefficient " << r_squared_ << "\nValue of the t-distribution " << t_star_ << "\nStandard deviation of the residuals " << stand_dev_residuals_ << "\nStandard error of the slope " << stand_error_slope_ << "\nThe X intercept " << x_intercept_ << "\nThe lower border of confidence interval " << lower_ << "\nThe higher border of confidence interval " << upper_ << "\nChi squared value " << chi_squared_ << "\nx mean " << x_mean << "\nstand_error_slope/slope_ " << (stand_dev_residuals_ / slope_) << "\nCoefficient of Variation " << (stand_dev_residuals_ / slope_) / x_mean * 100 << std::endl << "=========================================" << std::endl; } } void LinearRegression::computeRegression(double confidence_interval_P, std::vector<double>::const_iterator x_begin, std::vector<double>::const_iterator x_end, std::vector<double>::const_iterator y_begin, bool compute_goodness) { std::vector<gte::Vector2<double>> points; for(std::vector<double>::const_iterator xIter = x_begin, yIter = y_begin; xIter!=x_end; ++xIter, ++yIter) { points.emplace_back(std::initializer_list<double>{*xIter, *yIter}); } // Compute the unweighted linear fit. // Get the intercept and the slope of the regression Y_hat=intercept_+slope_*X // and the value of Chi squared (sum( (y - evel(x))^2) auto line = gte::ApprHeightLine2<double>(); bool pass = line.Fit(static_cast<int>(points.size()), &points.front()); slope_ = line.GetParameters().second[0]; intercept_ = -slope_ * line.GetParameters().first[0] + line.GetParameters().first[1]; chi_squared_ = computeChiSquare(x_begin, x_end, y_begin, slope_, intercept_); if (!pass) { throw Exception::UnableToFit(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "UnableToFit-LinearRegression", String("Could not fit a linear model to the data (") + points.size() + " points)."); } if (compute_goodness && points.size() > 2) { std::vector<double> X,Y; vector2ToStdVec_(points, X, Y); computeGoodness_(X, Y , confidence_interval_P); } } void LinearRegression::computeRegressionWeighted(double confidence_interval_P, std::vector<double>::const_iterator x_begin, std::vector<double>::const_iterator x_end, std::vector<double>::const_iterator y_begin, std::vector<double>::const_iterator w_begin, bool compute_goodness) { // Compute the weighted linear fit. // Get the intercept and the slope of the regression Y_hat=intercept_+slope_*X // and the value of Chi squared, the covariances of the intercept and the slope std::vector<gte::Vector2<double>> points; for(std::vector<double>::const_iterator xIter = x_begin, yIter = y_begin; xIter!=x_end; ++xIter, ++yIter) { points.emplace_back(std::initializer_list<double>{*xIter, *yIter}); } // Compute sums for linear system. copy&paste from GeometricToolsEngine (gte) ApprHeightLine2.h // and modified to allow weights int numPoints = static_cast<int>(points.size()); double sumX = 0, sumY = 0; double sumXX = 0, sumXY = 0; double sumW = 0; auto wIter = w_begin; for (int i = 0; i < numPoints; ++i, ++wIter) { sumX += (*wIter) * points[i][0]; sumY += (*wIter) * points[i][1]; sumXX += (*wIter) * points[i][0] * points[i][0]; sumXY += (*wIter) * points[i][0] * points[i][1]; sumW += (*wIter); } //create matrices to solve Ax = B gte::Matrix2x2<double> A { sumXX, sumX, sumX, sumW }; gte::Vector2<double> B { sumXY, sumY }; gte::Vector2<double> X; bool nonsingular = gte::LinearSystem<double>().Solve(A, B, X); if (nonsingular) { slope_ = X[0]; intercept_ = X[1]; } chi_squared_ = computeWeightedChiSquare(x_begin, x_end, y_begin, w_begin, slope_, intercept_); if (nonsingular) { if (compute_goodness && points.size() > 2) { std::vector<double> X,Y; vector2ToStdVec_(points, X, Y); computeGoodness_(X, Y, confidence_interval_P); } } else { throw Exception::UnableToFit(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "UnableToFit-LinearRegression", "Could not fit a linear model to the data"); } } } // OpenMS //Math
C++
3D
OpenMS/OpenMS
src/openms/source/ML/REGRESSION/QuadraticRegression.cpp
.cpp
4,150
131
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg $ // $Authors: Christian Ehrlich, Chris Bielow $ // -------------------------------------------------------------------------- #include <OpenMS/ML/REGRESSION/QuadraticRegression.h> #include <Mathematics/Vector2.h> #include <Mathematics/Vector3.h> #include <Mathematics/Matrix3x3.h> #include <Mathematics/LinearSystem.h> namespace OpenMS::Math { // Note:x, y must be of same size double computeChiSquareWeighted( std::vector<double>::const_iterator x_begin, const std::vector<double>::const_iterator& x_end, std::vector<double>::const_iterator y_begin, std::vector<double>::const_iterator w_begin, const double a, const double b, const double c) { double chi_squared(0.0); for (; x_begin != x_end; ++x_begin, ++y_begin, ++w_begin) { const double& x = *x_begin; const double& y = *y_begin; const double& weight = *w_begin; chi_squared += weight * std::pow(y - a - b * x - c * x * x, 2); } return chi_squared; } QuadraticRegression::QuadraticRegression() : a_(0), b_(0), c_(0), chi_squared_(0) {} double QuadraticRegression::eval(double x) const {return a_ + b_*x + c_*x*x;} double QuadraticRegression::eval(double A, double B, double C, double x) {return A + B*x + C*x*x;} double QuadraticRegression::getA() const {return a_;} double QuadraticRegression::getB() const {return b_;} double QuadraticRegression::getC() const {return c_;} double QuadraticRegression::getChiSquared() const {return chi_squared_;} void QuadraticRegression::computeRegression(std::vector<double>::const_iterator x_begin, std::vector<double>::const_iterator x_end, std::vector<double>::const_iterator y_begin) { std::vector<double> weights(std::distance(x_begin, x_end), 1); computeRegressionWeighted(x_begin, x_end, y_begin, weights.begin()); } void QuadraticRegression::computeRegressionWeighted( std::vector<double>::const_iterator x_begin, std::vector<double>::const_iterator x_end, std::vector<double>::const_iterator y_begin, std::vector<double>::const_iterator w_begin) { // Compute the linear fit of a quadratic function. // Get the coefficients for y = w_1*a +w_2*b*x + w_3*c*x^2. std::vector<gte::Vector2<double>> points; for(std::vector<double>::const_iterator xIter = x_begin, yIter = y_begin; xIter!=x_end; ++xIter, ++yIter) { points.emplace_back(std::initializer_list<double>{*xIter, *yIter}); } // Compute sums for linear system. copy&paste from GeometricTools // and modified to allow quadratic functions int numPoints = static_cast<Int>(points.size()); double sumX = 0, sumXX = 0, sumXXX = 0, sumXXXX = 0; double sumY = 0, sumXY = 0, sumXXY = 0; double sumW = 0; auto wIter = w_begin; for (int i = 0; i < numPoints; ++i, ++wIter) { double x = points[i][0]; double y = points[i][1]; double weight = *wIter; sumX += weight * x; sumXX += weight * x * x; sumXXX += weight * x * x * x; sumXXXX += weight * x * x * x * x; sumY += weight * y; sumXY += weight * x * y; sumXXY += weight * x * x * y; sumW += weight; } //create matrices to solve Ax = B gte::Matrix3x3<double> A { sumW, sumX, sumXX, sumX, sumXX, sumXXX, sumXX, sumXXX, sumXXXX }; gte::Vector3<double> B { sumY, sumXY, sumXXY }; gte::Vector3<double> X; bool nonsingular = gte::LinearSystem<double>().Solve(A, B, X); if (nonsingular) { a_ = X[0]; b_ = X[1]; c_ = X[2]; chi_squared_ = computeChiSquareWeighted(x_begin, x_end, y_begin, w_begin, a_, b_, c_); } else { throw Exception::UnableToFit(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "UnableToFit-QuadraticRegression", "Could not fit a linear model to the data"); } } } //OpenMS //Math
C++
3D
OpenMS/OpenMS
src/openms/source/ML/ROCCURVE/ROCCurve.cpp
.cpp
6,536
241
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg $ // $Authors: $ // -------------------------------------------------------------------------- // #include <OpenMS/ML/ROCCURVE/ROCCurve.h> #include <OpenMS/DATASTRUCTURES/DPosition.h> #include <OpenMS/ANALYSIS/MAPMATCHING/TransformationModelInterpolated.h> #include <cmath> #include <numeric> namespace OpenMS::Math { ROCCurve::ROCCurve() : score_clas_pairs_(), pos_(0), neg_(0) { } ROCCurve::ROCCurve(const ROCCurve & source) : score_clas_pairs_(source.score_clas_pairs_), pos_(source.pos_), neg_(source.neg_) { } ROCCurve::ROCCurve(const std::vector<std::pair<double,bool>> & pairs) : score_clas_pairs_(pairs) { pos_ = std::accumulate(score_clas_pairs_.begin(), score_clas_pairs_.end(), 0u, [&](const UInt& x, const std::pair<double,bool>& y ) { return x + y.second; } ); neg_ = static_cast<UInt>(score_clas_pairs_.size()) - pos_; } ROCCurve & ROCCurve::operator=(const ROCCurve & source) { if (this != &source) { score_clas_pairs_ = source.score_clas_pairs_; pos_ = source.pos_; neg_ = source.neg_; } return *this; } void ROCCurve::insertPair(double score, bool clas) { score_clas_pairs_.emplace_back(std::make_pair(score, clas)); if (clas) { ++pos_; } else { ++neg_; } sorted_ = false; } double ROCCurve::AUC() { if (score_clas_pairs_.empty()) { std::cerr << "ROCCurve::AUC() : unsuitable dataset (no positives or no negatives)\n"; return 0.5; } sort(); double prevscore = -std::numeric_limits<double>::infinity(); UInt prevTP = 0; UInt prevFP = 0; UInt truePos = 0; UInt falsePos = 0; double area = 0.0; for (auto const& pair : score_clas_pairs_) { //since it is sorted we do not need abs here. //TODO not sure if this makes much difference to an equality if ((pair.first - prevscore) > 1e-8) { area += trapezoidal_area(falsePos,prevFP,truePos,prevTP); prevscore = pair.first; prevTP = truePos; prevFP = falsePos; } pair.second ? ++truePos : ++falsePos; } area += trapezoidal_area(falsePos,prevFP,truePos,prevTP); // scale to unit square area /= truePos * falsePos; // update internals pos_ = truePos; neg_ = falsePos; return area; } double ROCCurve::rocN(Size N) { if (score_clas_pairs_.size() < N) { std::cerr << "ROCCurve::rocN() : unsuitable dataset (not enough false positives)\n"; return -1; } sort(); count(); // value that is not in score_clas_pairs_ double prevsim = score_clas_pairs_.begin()->first + 1; UInt truePos = 0; UInt falsePos = 0; std::vector<DPosition<2> > polygon; for (std::vector<std::pair<double, bool> >::const_iterator cit = score_clas_pairs_.begin(); cit != score_clas_pairs_.end() && falsePos <= N; ++cit) { if (fabs(cit->first - prevsim) > 1e-8) { polygon.emplace_back(DPosition<2>((double)falsePos / neg_, (double)truePos / pos_)); } if (cit->second) { ++truePos; } else { ++falsePos; } } polygon.emplace_back(DPosition<2>(1, 1)); std::sort(polygon.begin(), polygon.end()); DPosition<2> last(0, 0); double area(0); for (const DPosition<2>& dp : polygon) { area += (dp.getX() - last.getX()) * (dp.getY()); last = dp; } if (falsePos < N) { std::cerr << "ROCCurve::rocN() : unsuitable dataset (not enough false positives)\n"; return -1; } return area; } std::vector<std::pair<double, double>> ROCCurve::curve(UInt resolution) { sort(); count(); std::vector<std::pair<double, double> > result; UInt position = 0; UInt truePos = 0; UInt falsePos = 0; for (auto const& pair : score_clas_pairs_) { pair.second ? ++truePos : ++falsePos; if (((double)++position / score_clas_pairs_.size()) * resolution > result.size()) { result.emplace_back(std::make_pair((double)falsePos / neg_, (double)truePos / pos_)); } } return result; } double ROCCurve::cutoffPos(double fraction) { sort(); count(); UInt truePos = 0; for (std::vector<std::pair<double, bool> >::const_iterator cit = score_clas_pairs_.begin(); cit != score_clas_pairs_.end(); ++cit) { if (cit->second) { if ((double)truePos++ / pos_ > fraction) { return cit->first; } } } return -1; } double ROCCurve::cutoffNeg(double fraction) { sort(); count(); UInt trueNeg = 0; for (std::vector<std::pair<double, bool> >::const_iterator cit = score_clas_pairs_.begin(); cit != score_clas_pairs_.end(); ++cit) { if (cit->second) { if ((double)trueNeg++ / neg_ > 1 - fraction) { return cit->first; } } } return -1; } void ROCCurve::sort() { if (!sorted_) { std::sort(score_clas_pairs_.begin(),score_clas_pairs_.end(), simsortdec()); sorted_ = true; } } void ROCCurve::count() { if (pos_ == 0 && neg_ == 0) { pos_ = std::accumulate(score_clas_pairs_.begin(), score_clas_pairs_.end(), 0u, [&](const UInt& x, const std::pair<double,bool>& y ) { return x + y.second; } ); neg_ = static_cast<UInt>(score_clas_pairs_.size()) - pos_; } } double ROCCurve::trapezoidal_area(double x1, double x2, double y1, double y2) { double base = fabs(x1 - x2); double avgHeight = (y1+y2)/2.0; return base * avgHeight; } } //OpenMS //Math
C++
3D
OpenMS/OpenMS
src/openms/source/ML/GRIDSEARCH/GridSearch.cpp
.cpp
427
15
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Julianus Pfeuffer $ // $Authors: Julianus Pfeuffer $ // -------------------------------------------------------------------------- // #include <OpenMS/ML/GRIDSEARCH/GridSearch.h> namespace OpenMS { }
C++
3D
OpenMS/OpenMS
src/openms/source/ML/CLUSTERING/CompleteLinkage.cpp
.cpp
4,644
132
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Mathias Walzer $ // $Authors: $ // -------------------------------------------------------------------------- // #include <OpenMS/ML/CLUSTERING/CompleteLinkage.h> #include <OpenMS/DATASTRUCTURES/String.h> namespace OpenMS { CompleteLinkage::CompleteLinkage() : ClusterFunctor(), ProgressLogger() { } CompleteLinkage::CompleteLinkage(const CompleteLinkage & source) : ClusterFunctor(source), ProgressLogger() { } CompleteLinkage::~CompleteLinkage() = default; CompleteLinkage & CompleteLinkage::operator=(const CompleteLinkage & source) { if (this != &source) { ClusterFunctor::operator=(source); ProgressLogger::operator=(source); } return *this; } void CompleteLinkage::operator()(DistanceMatrix<float> & original_distance, std::vector<BinaryTreeNode> & cluster_tree, const float threshold /*=1*/) const { // attention: clustering process is done by clustering the indices // pointing to elements in input vector and distances in input matrix // input MUST have >= 2 elements! if (original_distance.dimensionsize() < 2) { throw ClusterFunctor::InsufficientInput(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Distance matrix to start from only contains one element"); } std::vector<std::set<Size> > clusters(original_distance.dimensionsize()); for (Size i = 0; i < original_distance.dimensionsize(); ++i) { clusters[i].insert(i); } cluster_tree.clear(); cluster_tree.reserve(original_distance.dimensionsize() - 1); // Initial minimum-distance pair original_distance.updateMinElement(); std::pair<Size, Size> min = original_distance.getMinElementCoordinates(); Size overall_cluster_steps(original_distance.dimensionsize()); startProgress(0, original_distance.dimensionsize(), "clustering data"); while (original_distance(min.first, min.second) < threshold) { //grow the tree cluster_tree.emplace_back(*(clusters[min.second].begin()), *(clusters[min.first].begin()), original_distance(min.first, min.second)); if (cluster_tree.back().left_child > cluster_tree.back().right_child) { std::swap(cluster_tree.back().left_child, cluster_tree.back().right_child); } if (original_distance.dimensionsize() > 2) { //pick minimum-distance pair i,j and merge them //pushback elements of second to first (and then erase second) clusters[min.second].insert(clusters[min.first].begin(), clusters[min.first].end()); // erase first one clusters.erase(clusters.begin() + min.first); //update original_distance matrix //complete linkage: new distance between clusters is the minimum distance between elements of each cluster //lance-williams update for d((i,j),k): 0.5* d(i,k) + 0.5* d(j,k) + 0.5* |d(i,k)-d(j,k)| for (Size k = 0; k < min.second; ++k) { float dik = original_distance.getValue(min.first, k); float djk = original_distance.getValue(min.second, k); original_distance.setValueQuick(min.second, k, (0.5f * dik + 0.5f * djk + 0.5f * std::fabs(dik - djk))); } for (Size k = min.second + 1; k < original_distance.dimensionsize(); ++k) { float dik = original_distance.getValue(min.first, k); float djk = original_distance.getValue(min.second, k); original_distance.setValueQuick(k, min.second, (0.5f * dik + 0.5f * djk + 0.5f * std::fabs(dik - djk))); } //reduce original_distance.reduce(min.first); //update minimum-distance pair original_distance.updateMinElement(); //get new min-pair min = original_distance.getMinElementCoordinates(); } else { break; } setProgress(overall_cluster_steps - original_distance.dimensionsize()); //repeat until only two cluster remains or threshold exceeded, last step skips matrix operations } //fill tree with dummy nodes Size sad(*clusters.front().begin()); for (Size i = 1; i < clusters.size() && (cluster_tree.size() < cluster_tree.capacity()); ++i) { cluster_tree.emplace_back(sad, *clusters[i].begin(), -1.0); } //~ while(cluster_tree.size() < cluster_tree.capacity()) //~ { //~ cluster_tree.push_back(BinaryTreeNode(0,1,-1.0)); //~ } endProgress(); } }
C++
3D
OpenMS/OpenMS
src/openms/source/ML/CLUSTERING/EuclideanSimilarity.cpp
.cpp
1,472
54
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Mathias Walzer $ // $Authors: Mathias Walzer $ // -------------------------------------------------------------------------- // #include <OpenMS/ML/CLUSTERING/EuclideanSimilarity.h> namespace OpenMS { EuclideanSimilarity::EuclideanSimilarity() : scale_(1) { } EuclideanSimilarity::EuclideanSimilarity(const EuclideanSimilarity & source) = default; EuclideanSimilarity::~EuclideanSimilarity() = default; EuclideanSimilarity & EuclideanSimilarity::operator=(const EuclideanSimilarity & source) { if (this != &source) { scale_ = source.scale_; } return *this; } float EuclideanSimilarity::operator()(const std::pair<float, float> & c) const { return operator()(c, c); } // calculates euclidean distance between two points float EuclideanSimilarity::operator()(const std::pair<float, float> & a, const std::pair<float, float> & b) const { if (scale_ == 0) { //inapplicable scaling throw Exception::DivisionByZero(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION); } return 1 - (sqrtf((a.first - b.first) * (a.first - b.first) + (a.second - b.second) * (a.second - b.second)) / scale_); } void EuclideanSimilarity::setScale(float x) { scale_ = x; } }
C++
3D
OpenMS/OpenMS
src/openms/source/ML/CLUSTERING/GridBasedCluster.cpp
.cpp
1,830
66
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Lars Nilse $ // $Authors: Lars Nilse $ // -------------------------------------------------------------------------- // #include <OpenMS/ML/CLUSTERING/GridBasedCluster.h> namespace OpenMS { GridBasedCluster::GridBasedCluster(const Point &centre, const Rectangle &bounding_box, const std::vector<int> &point_indices, const int &property_A, const std::vector<int> &properties_B) : centre_(centre), bounding_box_(bounding_box), point_indices_(point_indices), property_A_(property_A), properties_B_(properties_B) { } GridBasedCluster::GridBasedCluster(const Point &centre, const Rectangle &bounding_box, const std::vector<int> &point_indices) : centre_(centre), bounding_box_(bounding_box), point_indices_(point_indices), property_A_(-1), properties_B_(point_indices.size(),-1) { } const GridBasedCluster::Point& GridBasedCluster::getCentre() const { return centre_; } const GridBasedCluster::Rectangle& GridBasedCluster::getBoundingBox() const { return bounding_box_; } const std::vector<int>& GridBasedCluster::getPoints() const { return point_indices_; } int GridBasedCluster::getPropertyA() const { return property_A_; } const std::vector<int>& GridBasedCluster::getPropertiesB() const { return properties_B_; } bool GridBasedCluster::operator<(const GridBasedCluster& other) const { return centre_.getY() < other.centre_.getY(); } bool GridBasedCluster::operator>(const GridBasedCluster& other) const { return centre_.getY() > other.centre_.getY(); } bool GridBasedCluster::operator==(const GridBasedCluster& other) const { return centre_.getY() == other.centre_.getY(); } }
C++
3D
OpenMS/OpenMS
src/openms/source/ML/CLUSTERING/AverageLinkage.cpp
.cpp
4,679
127
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Mathias Walzer $ // $Authors: $ // -------------------------------------------------------------------------- // #include <OpenMS/ML/CLUSTERING/AverageLinkage.h> #include <OpenMS/DATASTRUCTURES/String.h> namespace OpenMS { AverageLinkage::AverageLinkage() : ClusterFunctor(), ProgressLogger() { } AverageLinkage::AverageLinkage(const AverageLinkage & source) = default; AverageLinkage::~AverageLinkage() = default; AverageLinkage & AverageLinkage::operator=(const AverageLinkage & source) { if (this != &source) { ClusterFunctor::operator=(source); ProgressLogger::operator=(source); } return *this; } void AverageLinkage::operator()(DistanceMatrix<float> & original_distance, std::vector<BinaryTreeNode> & cluster_tree, const float threshold /*=1*/) const { // input MUST have >= 2 elements! if (original_distance.dimensionsize() < 2) { throw ClusterFunctor::InsufficientInput(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Distance matrix to start from only contains one element"); } std::vector<std::set<Size> > clusters(original_distance.dimensionsize()); for (Size i = 0; i < original_distance.dimensionsize(); ++i) { clusters[i].insert(i); } cluster_tree.clear(); cluster_tree.reserve(original_distance.dimensionsize() - 1); // Initial minimum-distance pair original_distance.updateMinElement(); std::pair<Size, Size> min = original_distance.getMinElementCoordinates(); Size overall_cluster_steps(original_distance.dimensionsize()); startProgress(0, original_distance.dimensionsize(), "clustering data"); while (original_distance(min.second, min.first) < threshold) { //grow the tree cluster_tree.emplace_back(*(clusters[min.second].begin()), *(clusters[min.first].begin()), original_distance(min.first, min.second)); if (cluster_tree.back().left_child > cluster_tree.back().right_child) { std::swap(cluster_tree.back().left_child, cluster_tree.back().right_child); } if (original_distance.dimensionsize() > 2) { //pick minimum-distance pair i,j and merge them //calculate parameter for lance-williams formula float alpha_i = (float)(clusters[min.first].size() / (float)(clusters[min.first].size() + clusters[min.second].size())); float alpha_j = (float)(clusters[min.second].size() / (float)(clusters[min.first].size() + clusters[min.second].size())); //~ std::cout << alpha_i << '\t' << alpha_j << std::endl; //pushback elements of second to first (and then erase second) clusters[min.second].insert(clusters[min.first].begin(), clusters[min.first].end()); // erase first one clusters.erase(clusters.begin() + min.first); //update original_distance matrix //average linkage: new distance between clusters is the minimum distance between elements of each cluster //lance-williams update for d((i,j),k): (m_i/m_i+m_j)* d(i,k) + (m_j/m_i+m_j)* d(j,k) ; m_x is the number of elements in cluster x for (Size k = 0; k < min.second; ++k) { float dik = original_distance.getValue(min.first, k); float djk = original_distance.getValue(min.second, k); original_distance.setValueQuick(min.second, k, (alpha_i * dik + alpha_j * djk)); } for (Size k = min.second + 1; k < original_distance.dimensionsize(); ++k) { float dik = original_distance.getValue(min.first, k); float djk = original_distance.getValue(min.second, k); original_distance.setValueQuick(k, min.second, (alpha_i * dik + alpha_j * djk)); } //reduce original_distance.reduce(min.first); //update minimum-distance pair original_distance.updateMinElement(); //get min-pair from triangular matrix min = original_distance.getMinElementCoordinates(); } else { break; } setProgress(overall_cluster_steps - original_distance.dimensionsize()); //repeat until only two cluster remains, last step skips matrix operations } //fill tree with dummy nodes Size sad(*clusters.front().begin()); for (Size i = 1; (i < clusters.size()) && (cluster_tree.size() < cluster_tree.capacity()); ++i) { cluster_tree.emplace_back(sad, *clusters[i].begin(), -1.0); } endProgress(); } }
C++
3D
OpenMS/OpenMS
src/openms/source/ML/CLUSTERING/SingleLinkage.cpp
.cpp
5,418
168
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Mathias Walzer $ // $Authors: $ // -------------------------------------------------------------------------- // #include <OpenMS/ML/CLUSTERING/SingleLinkage.h> #include <OpenMS/CONCEPT/LogStream.h> namespace OpenMS { SingleLinkage::SingleLinkage() : ClusterFunctor(), ProgressLogger() { } SingleLinkage::SingleLinkage(const SingleLinkage & source) : ClusterFunctor(source), ProgressLogger() { } SingleLinkage::~SingleLinkage() = default; SingleLinkage & SingleLinkage::operator=(const SingleLinkage & source) { if (this != &source) { ClusterFunctor::operator=(source); ProgressLogger::operator=(source); } return *this; } void SingleLinkage::operator()(DistanceMatrix<float> & original_distance, std::vector<BinaryTreeNode> & cluster_tree, const float threshold /*=1*/) const { // input MUST have >= 2 elements! if (original_distance.dimensionsize() < 2) { throw ClusterFunctor::InsufficientInput(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Distance matrix to start from only contains one element"); } cluster_tree.clear(); if (threshold < 1) { OPENMS_LOG_ERROR << "You tried to use Single Linkage clustering with a threshold. This is currently not supported!" << std::endl; throw Exception::NotImplemented(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION); } //SLINK std::vector<Size> pi; pi.reserve(original_distance.dimensionsize()); std::vector<float> lambda; lambda.reserve(original_distance.dimensionsize()); startProgress(0, original_distance.dimensionsize(), "clustering data"); //initialize first pointer values pi.push_back(0); lambda.push_back(std::numeric_limits<float>::max()); for (Size k = 1; k < original_distance.dimensionsize(); ++k) { std::vector<float> row_k; row_k.reserve(k); //initialize pointer values for element to cluster pi.push_back(k); lambda.push_back(std::numeric_limits<float>::max()); // get the right distances for (Size i = 0; i < k; ++i) { row_k.push_back(original_distance.getValue(i, k)); } //calculate pointer values for element k for (Size i = 0; i < k; ++i) { if (lambda[i] >= row_k[i]) { row_k[pi[i]] = std::min(row_k[pi[i]], lambda[i]); lambda[i] = row_k[i]; pi[i] = k; } else { row_k[pi[i]] = std::min(row_k[pi[i]], row_k[i]); } } //update clustering if necessary for (Size i = 0; i < k; ++i) { if (lambda[i] >= lambda[pi[i]]) { pi[i] = k; } } setProgress(k); } for (Size i = 0; i < pi.size() - 1; ++i) { //strict order is always kept in algorithm: i < pi[i] cluster_tree.emplace_back(i, pi[i], lambda[i]); //~ std::cout << i << '\n' << pi[i] << '\n' << lambda[i] << std::endl; } //sort pre-tree std::sort(cluster_tree.begin(), cluster_tree.end(), compareBinaryTreeNode); // convert -pre-tree to correct format for (Size i = 0; i < cluster_tree.size(); ++i) { if (cluster_tree[i].right_child < cluster_tree[i].left_child) { std::swap(cluster_tree[i].left_child, cluster_tree[i].right_child); } for (Size k = i + 1; k < cluster_tree.size(); ++k) { if (cluster_tree[k].left_child == cluster_tree[i].right_child) { cluster_tree[k].left_child = cluster_tree[i].left_child; } else if (cluster_tree[k].left_child > cluster_tree[i].right_child) { --cluster_tree[k].left_child; } if (cluster_tree[k].right_child == cluster_tree[i].right_child) { cluster_tree[k].right_child = cluster_tree[i].left_child; } else if (cluster_tree[k].right_child > cluster_tree[i].right_child) { --cluster_tree[k].right_child; } } } //~ prepare to redo clustering to get all indices for binarytree in min index element representation std::vector<std::set<Size> > clusters(original_distance.dimensionsize()); for (Size i = 0; i < original_distance.dimensionsize(); ++i) { clusters[i].insert(i); } for (Size cluster_step = 0; cluster_step < cluster_tree.size(); ++cluster_step) { Size new_left_child = *(clusters[cluster_tree[cluster_step].left_child].begin()); Size new_right_child = *(clusters[cluster_tree[cluster_step].right_child].begin()); clusters[cluster_tree[cluster_step].left_child].insert(clusters[cluster_tree[cluster_step].right_child].begin(), clusters[cluster_tree[cluster_step].right_child].end()); clusters.erase(clusters.begin() + cluster_tree[cluster_step].right_child); std::swap(cluster_tree[cluster_step].left_child, new_left_child); std::swap(cluster_tree[cluster_step].right_child, new_right_child); if (cluster_tree[cluster_step].left_child > cluster_tree[cluster_step].right_child) { std::swap(cluster_tree[cluster_step].left_child, cluster_tree[cluster_step].right_child); } } endProgress(); } }
C++
3D
OpenMS/OpenMS
src/openms/source/ML/CLUSTERING/GridBasedClustering.cpp
.cpp
1,232
47
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Lars Nilse $ // $Authors: Lars Nilse $ // -------------------------------------------------------------------------- #include <OpenMS/ML/CLUSTERING/GridBasedClustering.h> using namespace std; namespace OpenMS { MinimumDistance::MinimumDistance(const int &cluster_index, const int &nearest_neighbour_index, const double &distance) :cluster_index_(cluster_index), nearest_neighbour_index_(nearest_neighbour_index), distance_(distance) { } int MinimumDistance::getClusterIndex() const { return cluster_index_; } int MinimumDistance::getNearestNeighbourIndex() const { return nearest_neighbour_index_; } bool MinimumDistance::operator<(const MinimumDistance& other) const { return distance_ < other.distance_; } bool MinimumDistance::operator>(const MinimumDistance& other) const { return distance_ > other.distance_; } bool MinimumDistance::operator==(const MinimumDistance& other) const { return distance_ == other.distance_; } }
C++
3D
OpenMS/OpenMS
src/openms/source/ML/CLUSTERING/ClusteringGrid.cpp
.cpp
3,213
97
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Lars Nilse $ // $Authors: Lars Nilse $ // -------------------------------------------------------------------------- // #include <OpenMS/ML/CLUSTERING/ClusteringGrid.h> #include <functional> #include <sstream> namespace OpenMS { ClusteringGrid::ClusteringGrid(const std::vector<double> &grid_spacing_x, const std::vector<double> &grid_spacing_y) :grid_spacing_x_(grid_spacing_x), grid_spacing_y_(grid_spacing_y), range_x_(grid_spacing_x.front(),grid_spacing_x.back()), range_y_(grid_spacing_y.front(),grid_spacing_y.back()) { } std::vector<double> ClusteringGrid::getGridSpacingX() const { return grid_spacing_x_; } std::vector<double> ClusteringGrid::getGridSpacingY() const { return grid_spacing_y_; } void ClusteringGrid::addCluster(const CellIndex &cell_index, const int &cluster_index) { if (cells_.find(cell_index) == cells_.end()) { // If hash grid cell does not yet exist, create a new one. std::list<int> clusters; clusters.push_back(cluster_index); cells_.insert(std::make_pair(cell_index, clusters)); } else { // If hash grid cell already exists, add the new cluster index to the existing list of clusters. cells_.find(cell_index)->second.push_back(cluster_index); } } void ClusteringGrid::removeCluster(const CellIndex &cell_index, const int &cluster_index) { if (cells_.find(cell_index) != cells_.end()) { cells_.find(cell_index)->second.remove(cluster_index); if (cells_.find(cell_index)->second.empty()) { cells_.erase(cell_index); } } } void ClusteringGrid::removeAllClusters() { cells_.clear(); } std::list<int> ClusteringGrid::getClusters(const CellIndex &cell_index) const { return cells_.find(cell_index)->second; } ClusteringGrid::CellIndex ClusteringGrid::getIndex(const Point &position) const { if (position.getX() < range_x_.first || position.getX() > range_x_.second || position.getY() < range_y_.first || position.getY() > range_y_.second) { std::stringstream stream; stream << "This position (x,y)=(" << position.getX() << "," << position.getY() << ") is outside the range of the grid. (" << range_x_.first << " <= x <= " << range_x_.second << ", " << range_y_.first << " <= y <= " << range_y_.second << ")"; throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, stream.str()); } int i = std::lower_bound(grid_spacing_x_.begin(), grid_spacing_x_.end(), position.getX(), std::less_equal< double >()) - grid_spacing_x_.begin(); int j = std::lower_bound(grid_spacing_y_.begin(), grid_spacing_y_.end(), position.getY(), std::less_equal< double >()) - grid_spacing_y_.begin(); return ClusteringGrid::CellIndex (i,j); } bool ClusteringGrid::isNonEmptyCell(const CellIndex &cell_index) const { return cells_.find(cell_index) != cells_.end(); } int ClusteringGrid::getCellCount() const { return cells_.size(); } }
C++
3D
OpenMS/OpenMS
src/openms/source/ML/CLUSTERING/ClusterAnalyzer.cpp
.cpp
30,167
763
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Mathias Walzer $ // $Authors: $ // -------------------------------------------------------------------------- // #include <OpenMS/ML/CLUSTERING/ClusterAnalyzer.h> #include <OpenMS/DATASTRUCTURES/String.h> #include <map> #include <set> #include <list> //using namespace std; namespace OpenMS { ClusterAnalyzer::ClusterAnalyzer() = default; ClusterAnalyzer::~ClusterAnalyzer() = default; ClusterAnalyzer & ClusterAnalyzer::operator=(const ClusterAnalyzer & source) { //ALWAYS CHECK FOR SELF ASSIGNMENT! if (this == &source) { return *this; } //... return *this; } std::vector<float> ClusterAnalyzer::averageSilhouetteWidth(const std::vector<BinaryTreeNode> & tree, const DistanceMatrix<float> & original) { //throw exception if cannot be legal clustering if (tree.empty()) { throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "tree is empty but minimal clustering hirachy has at least one level"); } std::vector<float> average_silhouette_widths; //for each step from the average silhouette widths of the clusters std::map<Size, float> interdist_i; //for each element i holds the min. average intercluster distance in cluster containing i std::map<Size, Size> cluster_with_interdist; //for each element i holds which cluster originated the min. intercluster distance std::map<Size, float> intradist_i; //for each element i holds the average intracluster distance in [i] //initial leafs std::set<Size> leafs; for (Size i = 0; i < tree.size(); ++i) { leafs.insert(tree[i].left_child); leafs.insert(tree[i].right_child); interdist_i.insert(std::make_pair(tree[i].left_child, std::numeric_limits<float>::max())); interdist_i.insert(std::make_pair(tree[i].right_child, std::numeric_limits<float>::max())); cluster_with_interdist.insert(std::make_pair(tree[i].left_child, 0)); cluster_with_interdist.insert(std::make_pair(tree[i].right_child, 0)); intradist_i.insert(std::make_pair(tree[i].left_child, (float)0.)); intradist_i.insert(std::make_pair(tree[i].right_child, (float)0.)); if (tree[i].distance == -1) { break; } } //initial values for interdis_i and cluster_with_interdist std::set<Size>::iterator leafs_it = leafs.begin(); ++leafs_it; for (; leafs_it != leafs.end(); ++leafs_it) { std::set<Size>::iterator jt = leafs.begin(); for (; *jt < *leafs_it; ++jt) { if (original.getValue(*leafs_it, *jt) < interdist_i[*leafs_it]) { interdist_i[*leafs_it] = original.getValue(*leafs_it, *jt); cluster_with_interdist[*leafs_it] = *jt; } if (original.getValue(*leafs_it, *jt) < interdist_i[*jt]) { interdist_i[*jt] = original.getValue(*leafs_it, *jt); cluster_with_interdist[*jt] = *leafs_it; } } } /* to manually retrace for (Size i = 0; i < original.dimensionsize(); ++i) { std::cout << interdist_i[i] << " | " << cluster_with_interdist[i] << " | " << intradist_i[i] << '\n'; } */ //initial cluster state std::map<Size, std::vector<Size> > clusters; for (std::set<Size>::iterator it = leafs.begin(); it != leafs.end(); ++it) { clusters[*it].push_back(*it); } //subsequent cluster states after silhouette calc for (Size t = 0; t < tree.size() - 1; ++t) //last steps silhouettes would be all 0 respectively not defined { for (std::set<Size>::iterator it = leafs.begin(); it != leafs.end(); ++it) { std::vector<Size>::iterator in_left = std::find(clusters[tree[t].left_child].begin(), clusters[tree[t].left_child].end(), *it); std::vector<Size>::iterator in_right = std::find(clusters[tree[t].right_child].begin(), clusters[tree[t].right_child].end(), *it); if (in_left == clusters[tree[t].left_child].end() && in_right == clusters[tree[t].right_child].end()) //*it (!element_of) left or right { //intradist_i is always kept //handle interdist: if (tree[t].left_child != cluster_with_interdist[*it] && tree[t].right_child != cluster_with_interdist[*it]) //s(i)_nr (!element_of) left or right { float interdist_merged(0); for (Size j = 0; j < clusters[tree[t].left_child].size(); ++j) { interdist_merged += original.getValue(*it, clusters[tree[t].left_child][j]); } for (Size j = 0; j < clusters[tree[t].right_child].size(); ++j) { interdist_merged += original.getValue(*it, clusters[tree[t].right_child][j]); } interdist_merged /= (float)(clusters[tree[t].left_child].size() + clusters[tree[t].right_child].size()); if (interdist_merged < interdist_i[*it]) { interdist_i[*it] = interdist_merged; cluster_with_interdist[*it] = tree[t].left_child; } } else //s(i)_nr (element_of) left or right { //calculate interdist_i to merged Size k; //the one cluster of the two merged which does NOT contain s(i)_nr if (tree[t].right_child != cluster_with_interdist[*it]) { k = tree[t].right_child; } else { k = tree[t].left_child; } float interdist_merged(0); for (Size j = 0; j < clusters[k].size(); ++j) { interdist_merged += original.getValue(*it, clusters[k][j]); } interdist_merged += (clusters[cluster_with_interdist[*it]].size() * interdist_i[*it]); interdist_merged /= (float)(clusters[k].size() + clusters[cluster_with_interdist[*it]].size()); //if new inderdist is smaller that old min. nothing else has to be done if (interdist_merged <= interdist_i[*it]) { interdist_i[*it] = interdist_merged; cluster_with_interdist[*it] = tree[t].left_child; } // else find min av. dist from other clusters to i else { interdist_i[*it] = interdist_merged; cluster_with_interdist[*it] = tree[t].left_child; for (Size u = 0; u < clusters.size(); ++u) { if (u != tree[t].left_child && u != tree[t].right_child && !clusters[u].empty() && clusters[u].end() == std::find(clusters[u].begin(), clusters[u].end(), *it)) { float min_interdist_i(0); for (Size v = 0; v < clusters[u].size(); ++v) { min_interdist_i += original.getValue(clusters[u][v], *it); } min_interdist_i /= (float)clusters[u].size(); if (min_interdist_i < interdist_i[*it]) { interdist_i[*it] = min_interdist_i; cluster_with_interdist[*it] = u; } } } } } } else //i (element_of) left or right { Size k, l; //k is the cluster that is one of the merged but not the one containing i, l the cluster containing i if (in_left == clusters[tree[t].left_child].end()) { l = tree[t].right_child; k = tree[t].left_child; } else { l = tree[t].left_child; k = tree[t].right_child; } if (k != cluster_with_interdist[*it]) //s(i)_nr (!element_of) left or right cluster { //interdist_i is kept //but intradist_i has to be updated intradist_i[*it] *= clusters[l].size() - 1; for (Size j = 0; j < clusters[k].size(); ++j) { intradist_i[*it] += original.getValue(*it, clusters[k][j]); } intradist_i[*it] /= (float)(clusters[k].size() + (clusters[l].size() - 1)); } else //s(i)_nr (element_of) left or right { //intradist_i has to be updated intradist_i[*it] *= clusters[l].size() - 1; intradist_i[*it] += (clusters[k].size() * interdist_i[*it]); intradist_i[*it] /= (float)(clusters[k].size() + (clusters[l].size() - 1)); //find new min av. interdist_i interdist_i[*it] = std::numeric_limits<float>::max(); for (Size u = 0; u < clusters.size(); ++u) { if (u != l && u != k && !clusters[u].empty()) { float av_interdist_i(0); for (Size v = 0; v < clusters[u].size(); ++v) { av_interdist_i += original.getValue(clusters[u][v], *it); } av_interdist_i /= (float)clusters[u].size(); if (av_interdist_i < interdist_i[*it]) { interdist_i[*it] = av_interdist_i; cluster_with_interdist[*it] = u; } } } } } } //redo clustering following tree //pushback elements of right_child to left_child (and then erase second) clusters[tree[t].left_child].insert(clusters[tree[t].left_child].end(), clusters[tree[t].right_child].begin(), clusters[tree[t].right_child].end()); //erase second one clusters[tree[t].right_child].clear(); //~ //adept the cluster indices in clusters with interdist //~ for (Size x = 0; x < cluster_with_interdist.size();++x) //~ { //~ if(cluster_with_interdist[x]>tree[t].right_child) //~ { //~ --cluster_with_interdist[x]; //~ } //~ } /* to manually retrace for (Size x = 0; x < clusters.size();++x) { for (Size y = 0; y < clusters[x].size();++y) { std::cout << clusters[x][y] << " "; } std::cout << " | "; } std::cout << '\n'; std::cout << "---------\n"; for (Size z = 0; z < original.dimensionsize(); ++z) { std::cout << interdist_i[z] << " , " << intradist_i[z] << " , " << cluster_with_interdist[z] << " , "; std::cout << ((interdist_i[z] - intradist_i[z]) / std::max(interdist_i[z],intradist_i[z])) << '\n'; } std::cout << "---------\n"; */ //calculate average silhouette width for clusters and then overall average silhouette width for cluster step float average_overall_silhouette(0); // from cluster step /* to manually retrace std::vector<float> silhouettes(original.dimensionsize(),0.0); */ for (Size g = 0; g < clusters.size(); ++g) { if (clusters[g].size() > 1) { //collect silhouettes clusterwise so that average cluster silhouettes will be easily accessible for (Size h = 0; h < clusters[g].size(); ++h) { if (interdist_i[clusters[g][h]] != 0) { average_overall_silhouette += (interdist_i[clusters[g][h]] - intradist_i[clusters[g][h]]) / std::max(interdist_i[clusters[g][h]], intradist_i[clusters[g][h]]); /* to manually retrace silhouettes[clusters[g][h]] = (interdist_i[clusters[g][h]] - intradist_i[clusters[g][h]]) / std::max(interdist_i[clusters[g][h]],intradist_i[clusters[g][h]]); */ } } } } /* to manually retrace for (Size i = 0; i < silhouettes.size(); ++i) { std::cout << "s(" << (i) << ") = " << silhouettes[i] << '\n'; } std::cout << "---------\n"; */ average_silhouette_widths.push_back(average_overall_silhouette / (float)(tree.size() + 1)); } average_silhouette_widths.push_back(0.0); return average_silhouette_widths; } std::vector<float> ClusterAnalyzer::dunnIndices(const std::vector<BinaryTreeNode> & tree, const DistanceMatrix<float> & original, const bool tree_from_singlelinkage) { //throw exception if cannot be legal clustering if (tree.empty()) { throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "tree is empty but minimal clustering hirachy has at least one level"); } std::vector<float> all_dunn_indices; all_dunn_indices.reserve(tree.size() + 1); std::set<Size> leafs; for (Size i = 0; i < tree.size(); ++i) { leafs.insert(tree[i].left_child); leafs.insert(tree[i].right_child); } //initial cluster state //~ Size sz = *(leafs.rbegin())+1; std::vector<std::vector<Size> > clusters(original.dimensionsize()); std::vector<std::pair<float, Size> > min_intercluster_distances(original.dimensionsize(), std::make_pair<float, Size>(-1, 0)); for (std::set<Size>::iterator it = leafs.begin(); it != leafs.end(); ++it) { clusters[*it].push_back(*it); std::set<Size>::iterator it_2 = leafs.begin(); for (; it_2 != it; ++it_2) { float d = original.getValue(*it, *it_2); if (d < min_intercluster_distances[*it].first || min_intercluster_distances[*it].first == -1) { min_intercluster_distances[*it].first = d; min_intercluster_distances[*it].second = *it_2; } } it_2 = it; ++it_2; for (; it_2 != leafs.end(); ++it_2) { float d = original.getValue(*it, *it_2); if (d < min_intercluster_distances[*it].first || min_intercluster_distances[*it].first == -1) { min_intercluster_distances[*it].first = d; min_intercluster_distances[*it].second = *it_2; } } } Size min_intercluster_distance_index(0); for (Size i = min_intercluster_distance_index + 1; i < min_intercluster_distances.size(); ++i) { if (min_intercluster_distances[min_intercluster_distance_index].first == -1) { min_intercluster_distance_index = i; } else if (min_intercluster_distances[i].first != -1 && min_intercluster_distances[i].first < min_intercluster_distances[min_intercluster_distance_index].first) { min_intercluster_distance_index = i; } } //initial state for min inter and max intra distances float max_intracluster_distance(0); for (Size cluster_step = 0; cluster_step < tree.size() - 1; ++cluster_step) { //max intracluster distance changed? for (Size x = 0; x < clusters[tree[cluster_step].left_child].size(); ++x) { for (Size y = 0; y < clusters[tree[cluster_step].right_child].size(); ++y) { if (original.getValue(clusters[tree[cluster_step].left_child][x], clusters[tree[cluster_step].right_child][y]) > max_intracluster_distance) { max_intracluster_distance = original.getValue(clusters[tree[cluster_step].left_child][x], clusters[tree[cluster_step].right_child][y]); } } } //redo clustering following tree //pushback elements of right_child to left_child (and then erase second) clusters[tree[cluster_step].left_child].insert(clusters[tree[cluster_step].left_child].end(), clusters[tree[cluster_step].right_child].begin(), clusters[tree[cluster_step].right_child].end()); //erase second one clusters[tree[cluster_step].right_child].clear(); //min intercluster distance changed? if (!tree_from_singlelinkage) { min_intercluster_distances[tree[cluster_step].right_child].first = -1; min_intercluster_distances[tree[cluster_step].right_child].second = 0; if ((min_intercluster_distance_index == tree[cluster_step].right_child && min_intercluster_distances[min_intercluster_distance_index].second == tree[cluster_step].left_child) || (min_intercluster_distance_index == tree[cluster_step].left_child && min_intercluster_distances[min_intercluster_distance_index].second == tree[cluster_step].right_child)) { //find new min intercluster distance min_intercluster_distances[tree[cluster_step].left_child].first = std::numeric_limits<float>::max(); for (Size j = 0; j < clusters[tree[cluster_step].left_child].size(); ++j) { Size k(0); for (; k < tree[cluster_step].left_child; ++k) { for (Size l = 0; l < clusters[k].size(); ++l) { if (original.getValue(clusters[tree[cluster_step].left_child][j], clusters[k][l]) < min_intercluster_distances[tree[cluster_step].left_child].first) { min_intercluster_distances[tree[cluster_step].left_child].first = original.getValue(clusters[tree[cluster_step].left_child][j], clusters[k][l]); min_intercluster_distances[tree[cluster_step].left_child].second = k; } } } ++k; for (; k < clusters.size(); ++k) { for (Size l = 0; l < clusters[k].size(); ++l) { if (original.getValue(clusters[tree[cluster_step].left_child][j], clusters[k][l]) < min_intercluster_distances[tree[cluster_step].left_child].first) { min_intercluster_distances[tree[cluster_step].left_child].first = original.getValue(clusters[tree[cluster_step].left_child][j], clusters[k][l]); min_intercluster_distances[tree[cluster_step].left_child].second = k; } } } } min_intercluster_distance_index = 0; for (Size i = min_intercluster_distance_index + 1; i < min_intercluster_distances.size(); ++i) { if (min_intercluster_distances[min_intercluster_distance_index].first == -1) { min_intercluster_distance_index = i; } else if (min_intercluster_distances[i].first != -1 && min_intercluster_distances[i].first < min_intercluster_distances[min_intercluster_distance_index].first) { min_intercluster_distance_index = i; } } } else { if (min_intercluster_distances[tree[cluster_step].right_child].first < min_intercluster_distances[tree[cluster_step].left_child].first) { min_intercluster_distances[tree[cluster_step].left_child].first = min_intercluster_distances[tree[cluster_step].right_child].first; min_intercluster_distances[tree[cluster_step].left_child].second = min_intercluster_distances[tree[cluster_step].right_child].second; } } for (Size k = 0; k < min_intercluster_distances.size(); ++k) { if (min_intercluster_distances[k].second == tree[cluster_step].right_child) { min_intercluster_distances[k].second = tree[cluster_step].left_child; } } } //shortcut for single linkage generated hirachy as merging criterion is min intercluster distance if (tree_from_singlelinkage) { float dunn_index(0); if (max_intracluster_distance > 0) { dunn_index = tree[cluster_step + 1].distance / max_intracluster_distance; } all_dunn_indices.push_back(dunn_index); } else { //find max dunn index and deduct the corresponding cluster step float dunn_index(0); if (max_intracluster_distance > 0) { dunn_index = min_intercluster_distances[min_intercluster_distance_index].first / max_intracluster_distance; } all_dunn_indices.push_back(dunn_index); } /* to manually retrace std::cout << min_intercluster_distance << '\n'; std::cout << clusters_with_min_intercluster_dist.first << " , " << clusters_with_min_intercluster_dist.second << '\n'; std::cout << max_intracluster_distance << '\n'; */ } all_dunn_indices.push_back(0.0); //last one is clearly 0 return all_dunn_indices; } void ClusterAnalyzer::cut(const Size cluster_quantity, const std::vector<BinaryTreeNode> & tree, std::vector<std::vector<Size> > & clusters) { if (cluster_quantity == 0) { throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "You requested 0 clusters. Minimal partitioning contains one cluster, not zero."); } if (cluster_quantity > tree.size() + 1) { throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Cluster count exceeds node count. No partitioning exists."); } std::set<Size> leafs; for (Size i = 0; i < tree.size(); ++i) { leafs.insert(tree[i].left_child); leafs.insert(tree[i].right_child); } std::map<Size, std::vector<Size> > cluster_map; std::set<Size>::iterator it = leafs.begin(); for (; it != leafs.end(); ++it) { cluster_map[*it] = std::vector<Size>(1, *it); } //redo clustering till step (original.dimensionsize()-cluster_quantity) for (Size cluster_step = 0; cluster_step < tree.size() + 1 - cluster_quantity; ++cluster_step) { if (tree[cluster_step].distance == -1) { break; } //pushback elements of right_child to left_child (and then erase second) cluster_map[tree[cluster_step].left_child].insert(cluster_map[tree[cluster_step].left_child].end(), cluster_map[tree[cluster_step].right_child].begin(), cluster_map[tree[cluster_step].right_child].end()); // erase second one cluster_map[tree[cluster_step].right_child].clear(); } // convert Map to Vector std::map<Size, std::vector<Size> >::iterator iter; for (iter = cluster_map.begin(); iter != cluster_map.end(); ++iter) { if (iter->second.empty()) continue; std::vector<Size> actCluster = iter->second; clusters.push_back(actCluster); } //~ sorts by first element contained!! for (Size cluster_num = 0; cluster_num < clusters.size(); ++cluster_num) { std::sort(clusters[cluster_num].begin(), clusters[cluster_num].end()); } std::sort(clusters.begin(), clusters.end()); std::reverse(clusters.begin(), clusters.end()); clusters.resize(cluster_quantity); std::sort(clusters.begin(), clusters.end()); } void ClusterAnalyzer::cut(const Size cluster_quantity, const std::vector<BinaryTreeNode> & tree, std::vector<std::vector<BinaryTreeNode> > & subtrees) { if (cluster_quantity == 0) { throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "minimal partition contains one cluster, not zero"); } if (cluster_quantity >= tree.size() + 1) { throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "maximal partition contains singleton clusters, further separation is not possible"); } subtrees.clear(); subtrees.resize(cluster_quantity); std::vector<std::vector<Size> > clusters; cut(cluster_quantity, tree, clusters); //~ unused nodes are discarded, (tree.begin()+tree.size()+1-cluster_quantity) is maximal tree.end() since cluster_quantity is always > 1! (tree.end()==tree.begin()+tree.size()) std::list<BinaryTreeNode> tc(tree.begin(), (tree.begin() + (tree.size() + 1 - cluster_quantity))); for (Size cluster = 0; cluster < clusters.size(); ++cluster) { std::sort(clusters[cluster].begin(), clusters[cluster].end()); std::list<BinaryTreeNode>::iterator it = tc.begin(); while (it != tc.end()) { std::vector<Size>::iterator left = std::find(clusters[cluster].begin(), clusters[cluster].end(), it->left_child); std::vector<Size>::iterator right = std::find(clusters[cluster].begin(), clusters[cluster].end(), it->right_child); if ((left != clusters[cluster].end() || right != clusters[cluster].end())) { subtrees[cluster].push_back(*it); it = tc.erase(it); } else { ++it; } } } } float ClusterAnalyzer::averagePopulationAberration(Size cluster_quantity, std::vector<BinaryTreeNode> & tree) { if (cluster_quantity == 0) { throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "minimal partition contains one cluster, not zero"); } if (cluster_quantity >= tree.size() + 1) { throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "maximal partition contains singleton clusters, further separation is not possible"); } std::vector<float> average_sizes; average_sizes.reserve(tree.size() + 1); std::vector<std::vector<Size> > clusters; clusters.reserve(tree.size() + 1); clusters.clear(); clusters.reserve(tree.size() + 1); for (Size i = 0; i < tree.size() + 1; ++i) { clusters.emplace_back(1, i); } //redo clustering till step (original.dimensionsize()-cluster_quantity) for (Size cluster_step = 0; cluster_step < tree.size() + 1 - cluster_quantity; ++cluster_step) { //pushback elements of right_child to left_child (and then erase second) clusters[tree[cluster_step].left_child].insert(clusters[tree[cluster_step].left_child].end(), clusters[tree[cluster_step].right_child].begin(), clusters[tree[cluster_step].right_child].end()); // clear second one clusters[tree[cluster_step].right_child].clear(); } float average = (float)(tree.size() + 1) / (float)cluster_quantity; float aberration(0); float cluster_number(0); for (Size i = 0; i < clusters.size(); ++i) { if (!clusters[i].empty()) { aberration += std::fabs((float)clusters[i].size() - average); ++cluster_number; } } aberration /= cluster_number; return aberration; } std::vector<float> ClusterAnalyzer::cohesion(const std::vector<std::vector<Size> > & clusters, const DistanceMatrix<float> & original) { if (clusters.empty() || clusters.size() > original.dimensionsize()) { throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "invalid clustering"); } float av_dist(0); // average of all pairwise distances for (Size i = 0; i < original.dimensionsize(); ++i) { for (Size j = i + 1; j < original.dimensionsize(); ++j) { av_dist += original.getValue(i, j); } } av_dist /= (((float)original.dimensionsize() * (float)(original.dimensionsize() - 1.0)) / 2.0f); std::vector<float> cohesions; cohesions.reserve(clusters.size()); for (Size i = 0; i < clusters.size(); ++i) { float av_c_dist(0); // all pairwise distances in cluster i for (Size j = 0; j < clusters[i].size(); ++j) { for (Size k = 0; k < j; ++k) { av_c_dist += original.getValue(clusters[i][j], clusters[i][k]); } } av_c_dist /= (((float)clusters[i].size() * (float)(clusters[i].size() - 1.0)) / 2.0f); //now av. intra cluster distance if (clusters[i].size() == 1) { av_c_dist = av_dist; } //~ std::cout << " av clu i " << av_c_dist << '\n'; cohesions.push_back(av_c_dist); } return cohesions; } String ClusterAnalyzer::newickTree(const std::vector<BinaryTreeNode> & tree, const bool include_distance) { std::set<Size> leafs; for (Size i = 0; i < tree.size(); ++i) { leafs.insert(tree[i].left_child); leafs.insert(tree[i].right_child); } std::vector<String> clusters(*(leafs.rbegin()) + 1, ""); for (std::set<Size>::iterator it = leafs.begin(); it != leafs.end(); ++it) { clusters[*it] = String(*it); } //redo clustering till step (original.dimensionsize()-1) for (Size cluster_step = 0; cluster_step < tree.size(); ++cluster_step) { //append string right_child to left_child clusters[tree[cluster_step].left_child].insert(0, "( "); if (include_distance) { clusters[tree[cluster_step].left_child] += ":"; clusters[tree[cluster_step].left_child] += String(tree[cluster_step].distance); } clusters[tree[cluster_step].left_child] += " , "; clusters[tree[cluster_step].left_child] += clusters[tree[cluster_step].right_child]; if (include_distance) { clusters[tree[cluster_step].left_child] += ":"; clusters[tree[cluster_step].left_child] += String(tree[cluster_step].distance); } clusters[tree[cluster_step].left_child] += " )"; clusters[tree[cluster_step].right_child] = String(""); } Size first_filled(0); for (Size i = 0; i < clusters.size(); ++i) { if (!clusters[i].empty()) { first_filled = i; break; } } for (Size i = first_filled + 1; i < clusters.size(); ++i) { if (!clusters[i].empty()) { clusters[first_filled].insert(0, "( "); if (include_distance) { clusters[first_filled] += ":"; clusters[first_filled] += String("1"); } clusters[first_filled] += " , "; clusters[first_filled] += clusters[i]; if (include_distance) { clusters[first_filled] += ":"; clusters[first_filled] += String("1"); } clusters[first_filled] += " )"; } } return clusters[first_filled]; //~example inspectable with: http://cgi-www.daimi.au.dk/cgi-chili/phyfi/go [BMC Bioinformatics 2006, 7:315] } bool compareBinaryTreeNode(const BinaryTreeNode & x, const BinaryTreeNode & y) { return x.distance < y.distance; } }
C++
3D
OpenMS/OpenMS
src/openms/source/ML/CLUSTERING/ClusterFunctor.cpp
.cpp
1,157
35
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Mathias Walzer $ // $Authors: $ // -------------------------------------------------------------------------- // #include <OpenMS/ML/CLUSTERING/ClusterFunctor.h> #include <OpenMS/ML/CLUSTERING/SingleLinkage.h> #include <OpenMS/ML/CLUSTERING/CompleteLinkage.h> #include <OpenMS/ML/CLUSTERING/AverageLinkage.h> using namespace std; namespace OpenMS { ClusterFunctor::ClusterFunctor() = default; ClusterFunctor::ClusterFunctor(const ClusterFunctor & /*source*/) = default; ClusterFunctor::~ClusterFunctor() = default; ClusterFunctor & ClusterFunctor::operator=(const ClusterFunctor & /*source*/) = default; ClusterFunctor::InsufficientInput::InsufficientInput(const char * file, int line, const char * function, const char * message) throw() : BaseException(file, line, function, "ClusterFunctor::InsufficentInput", message) { } ClusterFunctor::InsufficientInput::~InsufficientInput() throw() = default; }
C++
3D
OpenMS/OpenMS
src/openms/source/ML/CLUSTERING/ClusterHierarchical.cpp
.cpp
749
24
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Mathias Walzer $ // $Authors: $ // -------------------------------------------------------------------------- // #include <OpenMS/ML/CLUSTERING/ClusterHierarchical.h> //using namespace std; namespace OpenMS { UnnormalizedComparator::UnnormalizedComparator(const char * file, int line, const char * function, const char * message) throw() : BaseException(file, line, function, "ClusterHierarchical::UnnormalizedComparator", message) { } UnnormalizedComparator::~UnnormalizedComparator() throw() = default; }
C++
3D
OpenMS/OpenMS
src/openms/source/ML/RANSAC/RANSACModelQuadratic.cpp
.cpp
2,557
86
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: George Rosenberger $ // $Authors: George Rosenberger, Hannes Roest, Chris Bielow $ // -------------------------------------------------------------------------- #include <OpenMS/ML/RANSAC/RANSACModelQuadratic.h> #include <OpenMS/ML/REGRESSION/QuadraticRegression.h> #include <numeric> namespace OpenMS::Math { // Quadratic regression for RANSAC RansacModelQuadratic::ModelParameters RansacModelQuadratic::rm_fit_impl(const DVecIt& begin, const DVecIt& end) { std::vector<double> x, y; for (DVecIt it = begin; it != end; ++it) { x.push_back(it->first); y.push_back(it->second); } QuadraticRegression quad_reg; quad_reg.computeRegression(x.begin(), x.end(), y.begin()); ModelParameters p; p.push_back(quad_reg.getA()); p.push_back(quad_reg.getB()); p.push_back(quad_reg.getC()); return p; } double RansacModelQuadratic::rm_rsq_impl(const DVecIt& begin, const DVecIt& end) { std::vector<double> x, y; for (DVecIt it = begin; it != end; ++it) { x.push_back(it->first); y.push_back(it->second); } QuadraticRegression quad_reg; quad_reg.computeRegression(x.begin(), x.end(), y.begin()); return quad_reg.getChiSquared(); } double RansacModelQuadratic::rm_rss_impl(const DVecIt& begin, const DVecIt& end, const ModelParameters& coefficients) { double rss = 0; for (DVecIt it = begin; it != end; ++it) { const double value_model = QuadraticRegression::eval(coefficients[0], coefficients[1], coefficients[2], it->first); const double diff = it->second - value_model; rss += diff*diff; } return rss; } RansacModelQuadratic::DVec RansacModelQuadratic::rm_inliers_impl(const DVecIt& begin, const DVecIt& end, const ModelParameters& coefficients, double max_threshold) { DVec alsoinliers; for (DVecIt it = begin; it != end; ++it) { const double value_model = QuadraticRegression::eval(coefficients[0], coefficients[1], coefficients[2], it->first); const double diff = it->second - value_model; if (diff * diff < max_threshold) { alsoinliers.push_back(*it); } } return alsoinliers; } } // OpenMS //Math
C++
3D
OpenMS/OpenMS
src/openms/source/ML/RANSAC/RANSACModel.cpp
.cpp
479
18
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: George Rosenberger $ // $Authors: George Rosenberger, Hannes Roest, Chris Bielow $ // -------------------------------------------------------------------------- namespace OpenMS::Math { // template class -- nothing here } // OpenMS // Math
C++
3D
OpenMS/OpenMS
src/openms/source/ML/RANSAC/RANSAC.cpp
.cpp
413
14
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: George Rosenberger $ // $Authors: George Rosenberger, Hannes Roest, Chris Bielow $ // -------------------------------------------------------------------------- namespace OpenMS { }
C++
3D
OpenMS/OpenMS
src/openms/source/ML/RANSAC/RANSACModelLinear.cpp
.cpp
2,416
83
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: George Rosenberger $ // $Authors: George Rosenberger, Hannes Roest, Chris Bielow $ // -------------------------------------------------------------------------- #include <OpenMS/ML/RANSAC/RANSACModelLinear.h> #include <OpenMS/ML/REGRESSION/LinearRegression.h> #include <numeric> namespace OpenMS::Math { RansacModelLinear::ModelParameters RansacModelLinear::rm_fit_impl(const DVecIt& begin, const DVecIt& end) { std::vector<double> x, y; for (DVecIt it = begin; it != end; ++it) { x.push_back(it->first); y.push_back(it->second); } LinearRegression lin_reg; lin_reg.computeRegression(0.95, x.begin(), x.end(), y.begin(), false); // no goodness of fit computation ModelParameters p; p.push_back(lin_reg.getIntercept()); p.push_back(lin_reg.getSlope()); return p; } double RansacModelLinear::rm_rsq_impl(const DVecIt& begin, const DVecIt& end) { std::vector<double> x, y; for (DVecIt it = begin; it != end; ++it) { x.push_back(it->first); y.push_back(it->second); } LinearRegression lin_reg; lin_reg.computeRegression(0.95, x.begin(), x.end(), y.begin(), true); return lin_reg.getRSquared(); } double RansacModelLinear::rm_rss_impl(const DVecIt& begin, const DVecIt& end, const ModelParameters& coefficients) { double rss = 0; for (DVecIt it = begin; it != end; ++it) { rss += pow(it->second - (coefficients[0] + ( coefficients[1] * it->first)), 2); } return rss; } RansacModelLinear::DVec RansacModelLinear::rm_inliers_impl(const DVecIt& begin, const DVecIt& end, const ModelParameters& coefficients, double max_threshold) { DVec alsoinliers; //std::cerr << "\n\nRANSAC dists: "; for (DVecIt it = begin; it != end; ++it) { double dist = pow(it->second - (coefficients[0] + ( coefficients[1] * it->first)), 2); //std::cerr << dist << ", "; if (dist < max_threshold) { alsoinliers.push_back(*it); } } //std::cerr << "\n\n"; return alsoinliers; } } // OpenMS //Math
C++
3D
OpenMS/OpenMS
src/openms/source/COMPARISON/PeakSpectrumCompareFunctor.cpp
.cpp
1,288
41
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg $ // $Authors: $ // -------------------------------------------------------------------------- // #include <OpenMS/COMPARISON/PeakSpectrumCompareFunctor.h> #include <OpenMS/COMPARISON/SpectrumCheapDPCorr.h> #include <OpenMS/COMPARISON/SpectrumPrecursorComparator.h> #include <OpenMS/COMPARISON/ZhangSimilarityScore.h> #include <OpenMS/COMPARISON/SpectrumAlignmentScore.h> #include <OpenMS/COMPARISON/SteinScottImproveScore.h> #include <OpenMS/COMPARISON/PeakAlignment.h> using namespace std; namespace OpenMS { PeakSpectrumCompareFunctor::PeakSpectrumCompareFunctor() : DefaultParamHandler("PeakSpectrumCompareFunctor") { } PeakSpectrumCompareFunctor::PeakSpectrumCompareFunctor(const PeakSpectrumCompareFunctor & source) = default; PeakSpectrumCompareFunctor::~PeakSpectrumCompareFunctor() = default; PeakSpectrumCompareFunctor & PeakSpectrumCompareFunctor::operator=(const PeakSpectrumCompareFunctor & source) { if (this != &source) { DefaultParamHandler::operator=(source); } return *this; } }
C++
3D
OpenMS/OpenMS
src/openms/source/COMPARISON/SpectrumCheapDPCorr.cpp
.cpp
11,197
360
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg $ // $Authors: $ // -------------------------------------------------------------------------- // #include <OpenMS/COMPARISON/SpectrumCheapDPCorr.h> #include <map> // #define SPECTRUMCHEAPDPCORR_DEBUG // #undef SPECTRUMCHEAPDPCORR_DEBUG #ifdef SPECTRUMCHEAPDPCORR_DEBUG #include <iostream> #endif #include <boost/math/distributions.hpp> using namespace std; namespace OpenMS { SpectrumCheapDPCorr::SpectrumCheapDPCorr() : PeakSpectrumCompareFunctor(), lastconsensus_() { setName("SpectrumCheapDPCorr"); defaults_.setValue("variation", 0.001, "Maximum difference in position (in percent of the current m/z).\nNote that big values of variation ( 1 being the maximum ) result in consideration of all possible pairings which has a running time of O(n*n)"); defaults_.setValue("int_cnt", 0, "How the peak heights are used in the score.\n0 = product\n1 = sqrt(product)\n2 = sum\n3 = agreeing intensity\n"); defaults_.setValue("keeppeaks", 0, "Flag that states if peaks without alignment partner are kept in the consensus spectrum."); factor_ = 0.5; defaultsToParam_(); } SpectrumCheapDPCorr::SpectrumCheapDPCorr(const SpectrumCheapDPCorr & source) : PeakSpectrumCompareFunctor(source), lastconsensus_(source.lastconsensus_), factor_(source.factor_) { } SpectrumCheapDPCorr::~SpectrumCheapDPCorr() = default; SpectrumCheapDPCorr & SpectrumCheapDPCorr::operator=(const SpectrumCheapDPCorr & source) { if (this != &source) { PeakSpectrumCompareFunctor::operator=(source); lastconsensus_ = source.lastconsensus_; factor_ = source.factor_; } return *this; } void SpectrumCheapDPCorr::setFactor(double f) { if (f < 1 && f > 0) { factor_ = f; } else { throw Exception::OutOfRange(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION); } } double SpectrumCheapDPCorr::operator()(const PeakSpectrum & csa) const { return operator()(csa, csa); } /** looks for peak pairs where there is just one or none possibility for alignment and aligns them (if possible). The rest is aligned using dynprog_ */ double SpectrumCheapDPCorr::operator()(const PeakSpectrum & x, const PeakSpectrum & y) const { double var = (double)param_.getValue("variation"); double score(0); bool keeppeaks_ = (int)param_.getValue("keeppeaks"); lastconsensus_ = PeakSpectrum(); Precursor p1, p2; if (!x.getPrecursors().empty()) { p1 = x.getPrecursors()[0]; } if (!y.getPrecursors().empty()) { p2 = y.getPrecursors()[0]; } lastconsensus_.getPrecursors().resize(1); lastconsensus_.getPrecursors()[0].setMZ((p1.getMZ() + p2.getMZ()) / 2); lastconsensus_.getPrecursors()[0].setCharge(p1.getCharge()); peak_map_.clear(); int xpos = 0; int ypos = 0; PeakSpectrum::ConstIterator xit = x.begin(); PeakSpectrum::ConstIterator yit = y.begin(); while (xit != x.end() && yit != y.end()) { double variation = (xit->getMZ() + yit->getMZ()) / 2 * var; //ignore pairs that cannot be paired if (fabs(xit->getMZ() - yit->getMZ()) > variation) { if (xit->getMZ() < yit->getMZ()) // is while more efficient ? { Peak1D consensuspeak; consensuspeak.setMZ(xit->getMZ()); consensuspeak.setIntensity((xit->getIntensity()) * (1 - factor_)); if (keeppeaks_) lastconsensus_.push_back(consensuspeak); ++xit; ++xpos; } else { Peak1D consensuspeak; consensuspeak.setMZ(yit->getMZ()); consensuspeak.setIntensity((yit->getIntensity()) * (factor_)); if (keeppeaks_) lastconsensus_.push_back(consensuspeak); ++yit; ++ypos; } } else { //x/yrun represents the number of peaks in both spectra that could be paired int xrun = 1; int yrun = 1; while (xit + xrun != x.end() && yit + yrun != y.end() && (!((xit + xrun - 1)->getMZ() + variation < (yit + yrun)->getMZ()) || !((yit + yrun - 1)->getMZ() + variation < (xit + xrun)->getMZ()))) { if ((yit + yrun - 1)->getMZ() + variation > (xit + xrun)->getMZ()) { xrun++; } else if ((xit + xrun - 1)->getMZ() + variation > (yit + yrun)->getMZ()) { yrun++; } else { xrun++; yrun++; } if (xit + xrun == x.end()) { break; } if (yit + yrun == y.end()) { break; } } //dynamic programming necessary to calculate optimal pairing if (xrun > 1 && yrun > 1) { score += dynprog_(x, y, xpos, xpos + xrun - 1, ypos, ypos + yrun - 1); xit = xit + xrun; yit = yit + yrun; xpos += xrun; ypos += yrun; } //the optimal pairing of 2 peaks is easy... else { // calculate consensus peak Peak1D consensuspeak; consensuspeak.setMZ((xit->getMZ() * (1 - factor_) + yit->getMZ() * (factor_))); consensuspeak.setIntensity((xit->getIntensity() * (1 - factor_) + yit->getIntensity() * factor_)); lastconsensus_.push_back(consensuspeak); if (!(peak_map_.find(xit - x.begin()) != peak_map_.end())) { peak_map_[xit - x.begin()] = yit - y.begin(); } else { peak_map_[xit - x.begin()] = yit - y.begin() > xit - x.begin() ? xit - x.begin() : yit - y.begin(); } variation = (xit->getMZ() + yit->getMZ()) / 2 * var; score += comparepeaks_(xit->getMZ(), yit->getMZ(), xit->getIntensity(), yit->getIntensity()); ++xit; ++yit; ++xpos; ++ypos; } } } factor_ = 0.5; return score; } double SpectrumCheapDPCorr::dynprog_(const PeakSpectrum & x, const PeakSpectrum & y, int xstart, int xend, int ystart, int yend) const { #ifdef SPECTRUMCHEAPDPCORR_DEBUG cerr << "SpectrumCheapDPCorr::dynprog_(const DDiscreteSpectrum<1>& x, const DDiscreteSpectrum<1>& y, " << xstart << ", " << xend << ", " << ystart << ", " << yend << ")" << endl; #endif double var = (double)param_.getValue("variation"); vector<vector<double> > dparray(xend - xstart + 2, vector<double>(yend - ystart + 2)); vector<vector<int> > trace(xend - xstart + 2, vector<int>(yend - ystart + 2)); double align; for (int i = 1; i < xend - xstart + 2; ++i) { for (int j = 1; j < yend - ystart + 2; ++j) { double variation = (y[ystart + j - 1].getMZ() + x[xstart + i - 1].getMZ()) / 2 * var; //positions too different if (fabs(x[xstart + i - 1].getMZ() - y[ystart + j - 1].getMZ()) > variation) align = 0; //calculate score of alignment else align = comparepeaks_(x[xstart + i - 1].getMZ(), y[ystart + j - 1].getMZ(), x[xstart + i - 1].getIntensity(), y[ystart + j - 1].getIntensity()); //dynamic programming step if ((((dparray[i][j - 1]) > (dparray[i - 1][j - 1] + align)) ? (dparray[i][j - 1]) : (dparray[i - 1][j - 1] + align)) /*== max*/ > dparray[i - 1][j]) { if (dparray[i - 1][j - 1] + align > dparray[i][j - 1]) { dparray[i][j] = dparray[i - 1][j - 1] + align; trace[i][j] = 5; } else { dparray[i][j] = dparray[i][j - 1]; trace[i][j] = -1; } } else { dparray[i][j] = dparray[i - 1][j]; trace[i][j] = 1; } } } unsigned int i = xend - xstart + 1; unsigned int j = yend - ystart + 1; for (;; ) { if (trace[i][j] == 5) { Peak1D consensuspeak; consensuspeak.setMZ((y[ystart + j - 1].getMZ() * (1 - factor_) + x[xstart + i - 1].getMZ() * factor_)); consensuspeak.setIntensity((y[ystart + j - 1].getIntensity() * (1 - factor_) + x[xstart + i - 1].getIntensity() * factor_)); lastconsensus_.push_back(consensuspeak); if (!(peak_map_.find(xstart + i - 1) != peak_map_.end())) { peak_map_[xstart + i - 1] = ystart + j - 1; } else { peak_map_[xstart + i - 1] = ystart + j - 1 > peak_map_[xstart + i - 1] ? peak_map_[xstart + i - 1] : ystart + j - 1; } i--; j--; } else if (trace[i][j] == 1) { Peak1D consensuspeak; consensuspeak.setMZ(x[xstart + i - 1].getMZ()); consensuspeak.setIntensity((x[xstart + i - 1].getIntensity()) * (1 - factor_)); if (keeppeaks_) { lastconsensus_.push_back(consensuspeak); } i--; } else if (trace[i][j] == -1) { Peak1D consensuspeak; consensuspeak.setMZ(y[ystart + j - 1].getMZ()); consensuspeak.setIntensity((y[ystart + j - 1].getIntensity()) * factor_); if (keeppeaks_) { lastconsensus_.push_back(consensuspeak); } j--; } if (!i || !j) { break; } } return dparray[xend - xstart + 1][yend - ystart + 1]; } const PeakSpectrum & SpectrumCheapDPCorr::lastconsensus() const { return lastconsensus_; } std::map<UInt, UInt> SpectrumCheapDPCorr::getPeakMap() const { return peak_map_; } /** @param[in] posa position of peak a @param[in] posb position of peak b @param[in] inta intensity of peak a @param[in] intb intensity of peak b @return score */ double SpectrumCheapDPCorr::comparepeaks_(double posa, double posb, double inta, double intb) const { double variation = (posa + posb) / 2 * (double)param_.getValue("variation"); boost::math::normal_distribution<double> normal(0., variation); unsigned int int_cnt = (unsigned int)param_.getValue("int_cnt"); if (int_cnt == 0) { double p = boost::math::pdf(normal, posa - posb); return p * inta * intb; } else { if (int_cnt == 1) { return boost::math::pdf(normal, posa - posb) * sqrt(inta * intb); } else { if (int_cnt == 2) { return boost::math::pdf(normal, posa - posb) * (inta + intb); } else { if (int_cnt == 3) { return max(0.0, boost::math::pdf(normal, posa - posb) * ((inta + intb) / 2 - fabs(inta - intb))); } else { // TODO exception // cerr << "int_cnt is not in [0,1,2,3]\n"; return -1; } } } } } }
C++
3D
OpenMS/OpenMS
src/openms/source/COMPARISON/PeakAlignment.cpp
.cpp
13,435
411
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Mathias Walzer $ // $Authors: $ // -------------------------------------------------------------------------- // #include <OpenMS/COMPARISON/PeakAlignment.h> #include <OpenMS/CONCEPT/Constants.h> #include <OpenMS/DATASTRUCTURES/Matrix.h> #include <OpenMS/KERNEL/MSSpectrum.h> #include <OpenMS/KERNEL/MSExperiment.h> using namespace std; namespace OpenMS { PeakAlignment::PeakAlignment() : PeakSpectrumCompareFunctor() { defaults_.setValue("epsilon", 0.2, "defines the absolute error of the mass spectrometer"); defaults_.setValue("normalized", 1, "is set 1 if the similarity-measurement is normalized to the range [0,1]"); defaults_.setValue("heuristic_level", 0, "set 0 means no heuristic is applied otherwise the given value is interpreted as unsigned integer, the number of strongest peaks considered for heurisitcs - in those sets of peaks has to be at least one match to conduct comparison"); defaults_.setValue("precursor_mass_tolerance", 3.0, "Mass tolerance of the precursor peak, defines the distance of two PrecursorPeaks for which they are supposed to be from different peptides"); defaultsToParam_(); } PeakAlignment::PeakAlignment(const PeakAlignment& source) = default; PeakAlignment::~PeakAlignment() = default; PeakAlignment& PeakAlignment::operator=(const PeakAlignment& source) { if (this != &source) { PeakSpectrumCompareFunctor::operator=(source); } return *this; } double PeakAlignment::operator()(const PeakSpectrum& spec) const { return operator()(spec, spec); } double PeakAlignment::operator()(const PeakSpectrum& spec1, const PeakSpectrum& spec2) const { PeakSpectrum s1(spec1), s2(spec2); // shortcut similarity calculation by comparing PrecursorPeaks (PrecursorPeaks more than delta away from each other are supposed to be from another peptide) double pre_mz1 = 0.0; if (!spec1.getPrecursors().empty()) { pre_mz1 = spec1.getPrecursors()[0].getMZ(); } double pre_mz2 = 0.0; if (!spec2.getPrecursors().empty()) { pre_mz2 = spec2.getPrecursors()[0].getMZ(); } if (fabs(pre_mz1 - pre_mz2) > (double)param_.getValue("precursor_mass_tolerance")) { return 0; } // heuristic shortcut const double epsilon = (double)param_.getValue("epsilon"); const UInt heuristic_level = (UInt)param_.getValue("heuristic_level"); bool heuristic_filters(true); if (heuristic_level) { s1.sortByIntensity(true); s2.sortByIntensity(true); //heuristic filters (and shortcuts) if spec1 and spec2 have NOT at least one peak in the sets of |heuristic_level|-many highest peaks in common for (PeakSpectrum::ConstIterator it_s1 = s1.begin(); Size(it_s1 - s1.begin()) < heuristic_level && it_s1 != s1.end(); ++it_s1) { for (PeakSpectrum::ConstIterator it_s2 = s2.begin(); Size(it_s2 - s2.begin()) < heuristic_level && it_s2 != s2.end(); ++it_s2) { // determine if it is a match, i.e. mutual peak at certain m/z with epsilon tolerance if (fabs((*it_s2).getMZ() - (*it_s1).getMZ()) < epsilon) { heuristic_filters = false; break; } } } } if (heuristic_filters && heuristic_level) { return 0; } //TODO gapcost dependence on distance ? const double gap = (double)param_.getValue("epsilon"); //initialize alignment matrix with 0 in (0,0) and a multiple of gapcost in the first row/col matrix(row,col,values) Matrix<double> matrix(spec1.size() + 1, spec2.size() + 1, 0); for (long int i = 1; i < matrix.rows(); i++) { matrix(i, 0) = -gap * i; } for (long int i = 1; i < matrix.cols(); i++) { matrix(0, i) = -gap * i; } //get sigma - the standard deviation (sqrt of variance) double mid(0); for (Size i = 0; i < spec1.size(); ++i) { for (Size j = 0; j < spec2.size(); ++j) { double pos1(spec1[i].getMZ()), pos2(spec2[j].getMZ()); mid += fabs(pos1 - pos2); } } // average peak distance mid /= (spec1.size() * spec2.size()); /* to manually retrace cout << "average peak distance " << mid << endl; */ double var(0); for (Size i = 0; i < spec1.size(); ++i) { for (Size j = 0; j < spec2.size(); ++j) { double pos1(spec1[i].getMZ()), pos2(spec2[j].getMZ()); var += (fabs(pos1 - pos2) - mid) * (fabs(pos1 - pos2) - mid); } } // peak distance variance var /= (spec1.size() * spec2.size()); /* to manually retrace cout << "peak distance variance " << var << endl; */ //only in case of only two equal peaks in the spectra sigma is 0 const double sigma((var == 0) ? numeric_limits<double>::min() : sqrt(var)); /* to manually retrace cout << "peak standard deviation " << sigma << endl; */ //fill alignment matrix for (Size i = 1; i < spec1.size() + 1; ++i) { for (Size j = 1; j < spec2.size() + 1; ++j) { double pos1(spec1[i - 1].getMZ()), pos2(spec2[j - 1].getMZ()); //only if peaks are in reasonable proximity alignment is considered else only gaps if (fabs(pos1 - pos2) <= epsilon) { // actual cell = max(upper left cell+score, left cell-gap, upper cell-gap) double from_left(matrix(i, j - 1) - gap); double from_above(matrix(i - 1, j) - gap); double int1(spec1[i - 1].getIntensity()), int2(spec2[j - 1].getIntensity()); double from_diagonal(matrix(i - 1, j - 1) + peakPairScore_(pos1, int1, pos2, int2, sigma)); matrix(i, j) = max(from_left, max(from_above, from_diagonal)); } else { // actual cell = max(left cell-gap, upper cell-gap) double from_left(matrix(i, j - 1) - gap); double from_above(matrix(i - 1, j) - gap); matrix(i, j) = max(from_left, from_above); } } } /* to manually retrace cout << endl << matrix << endl; */ //get best overall score and return double best_score(numeric_limits<double>::min()); for (long int i = 0; i < matrix.cols(); i++) { best_score = max(best_score, matrix(matrix.rows() - 1, i)); } for (long int i = 0; i < matrix.rows(); i++) { best_score = max(best_score, matrix(i, matrix.cols() - 1)); } //calculate self-alignment scores for both input spectra double score_spec1(0), score_spec2(0); for (Size i = 0; i < spec1.size(); ++i) { double int_i(spec1[i].getIntensity()); double pos_i(spec1[i].getMZ()); score_spec1 += peakPairScore_(pos_i, int_i, pos_i, int_i, sigma); } for (Size i = 0; i < spec2.size(); ++i) { double int_i(spec2[i].getIntensity()); double pos_i(spec2[i].getMZ()); score_spec2 += peakPairScore_(pos_i, int_i, pos_i, int_i, sigma); } /* to manually retrace cout << "score_spec1: " << score_spec1 << "score_spec2: " << score_spec2 << endl; */ //normalize score to interval [0,1] with geometric mean double best_score_normalized(best_score / sqrt(score_spec1 * score_spec2)); /* cout << "score_spec1: " << score_spec1 << " score_spec2: " << score_spec2 << " best_score: " << best_score << endl; //normalize score to interval [0,1] with arithmetic mean double best_score_normalized( (best_score*2) / (score_spec1 + score_spec2) ); */ return best_score_normalized; } vector<pair<Size, Size> > PeakAlignment::getAlignmentTraceback(const PeakSpectrum& spec1, const PeakSpectrum& spec2) const { const double epsilon = (double)param_.getValue("epsilon"); //TODO gapcost dependence on distance ? const double gap = (double)param_.getValue("epsilon"); //initialize alignment matrix with 0 in (0,0) and a multiple of gapcost in the first row/col matrix(row,col,values) Matrix<double> matrix(spec1.size() + 1, spec2.size() + 1, 0); for (long int i = 1; i < matrix.rows(); i++) { matrix(i, 0) = -gap * i; } for (long int i = 1; i < matrix.cols(); i++) { matrix(0, i) = -gap * i; } // gives the direction of the matrix cell that originated the respective cell // e.g. matrix(i+1,j+1) could have originated from matrix(i,j), matrix(i+1,j) or matrix(i,j+1) // so traceback(i,j) represents matrix(i+1,j+1) and contains a "1"-from diagonal, a "0"-from left or a "2"-from above Matrix<Size> traceback(spec1.size(), spec2.size()); //get sigma - the standard deviation (sqrt of variance) double mid(0); for (Size i = 0; i < spec1.size(); ++i) { for (Size j = 0; j < spec2.size(); ++j) { double pos1(spec1[i].getMZ()), pos2(spec2[j].getMZ()); mid += fabs(pos1 - pos2); } } mid /= (spec1.size() * spec2.size()); /* to manually retrace cout << mid << endl; */ double var(0); for (Size i = 0; i < spec1.size(); ++i) { for (Size j = 0; j < spec2.size(); ++j) { double pos1(spec1[i].getMZ()), pos2(spec2[j].getMZ()); var += (fabs(pos1 - pos2) - mid) * (fabs(pos1 - pos2) - mid); } } var /= (spec1.size() * spec2.size()); /* to manually retrace cout << var << endl; */ const double sigma(sqrt(var)); /* to manually retrace cout << sigma << endl; */ //fill alignment matrix for (Size i = 1; i < spec1.size() + 1; ++i) { for (Size j = 1; j < spec2.size() + 1; ++j) { double pos1(spec1[i - 1].getMZ()), pos2(spec2[j - 1].getMZ()); //only if peaks are in reasonable proximity alignment is considered else only gaps if (fabs(pos1 - pos2) <= epsilon) { // actual cell = max(upper left cell+score, left cell-gap, upper cell-gap) double from_left(matrix(i, j - 1) - gap); double from_above(matrix(i - 1, j) - gap); double int1(spec1[i - 1].getIntensity()), int2(spec2[j - 1].getIntensity()); double from_diagonal(matrix(i - 1, j - 1) + peakPairScore_(pos1, int1, pos2, int2, sigma)); matrix(i, j) = max(from_left, max(from_above, from_diagonal)); // TODO the cases where all or two values are equal if (from_diagonal > from_left && from_diagonal > from_above) { traceback(i - 1, j - 1) = 1; } else { if (from_left > from_diagonal && from_left > from_above) { traceback(i - 1, j - 1) = 0; } else { if (from_above > from_diagonal && from_above > from_left) { traceback(i - 1, j - 1) = 2; } } } } else { // actual cell = max(left cell-gap, upper cell-gap) double from_left(matrix(i, j - 1) - gap); double from_above(matrix(i - 1, j) - gap); matrix(i, j) = std::max(from_left, from_above); if (from_left > from_above) { traceback(i - 1, j - 1) = 0; } else //from_left <= from_above { traceback(i - 1, j - 1) = 2; } } } } //return track from best alloverscore to 0,0 vector<pair<Size, Size> > ret_val; //get matrix coordinates from best alloverscore Size row_index(0), col_index(0); double best_score(numeric_limits<double>::min()); for (long int i = 0; i < matrix.cols(); i++) { if (best_score < matrix(matrix.rows() - 1, i)) { best_score = matrix(matrix.rows() - 1, i); row_index = matrix.rows() - 1; col_index = i; } } for (long int i = 0; i < matrix.rows(); i++) { if (best_score < matrix(i, matrix.cols() - 1)) { best_score = matrix(i, matrix.cols() - 1); row_index = i; col_index = matrix.cols() - 1; } } // TODO check the invariant! while (row_index > 0 && col_index > 0) { //from diagonal - peaks aligned if (traceback(row_index - 1, col_index - 1) == 1) { //register aligned peaks only ret_val.insert(ret_val.begin(), pair<Size, Size>(row_index - 1, col_index - 1)); row_index = row_index - 1; col_index = col_index - 1; } // gap alignment else if (traceback(row_index - 1, col_index - 1) == 0) { col_index = col_index - 1; } else { row_index = row_index - 1; } } /* to manually retrace cout << endl << matrix << endl << traceback << endl; */ return ret_val; } double PeakAlignment::peakPairScore_(double& pos1, double& intens1, double& pos2, double& intens2, const double& sigma) const { //scoring formula : peak intensity score * peak position score double pi(sqrt(intens1 * intens2)); double pp((1 / (sigma * sqrt(2 * Constants::PI))) * exp(-(fabs(pos1 - pos2)) / 2 * sigma * sigma)); /* to manually retrace cout << fabs(pos1-pos2) << " - "<< pi*pp << endl; */ return pi * pp; } }
C++
3D
OpenMS/OpenMS
src/openms/source/COMPARISON/BinnedSharedPeakCount.cpp
.cpp
1,896
65
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Mathias Walzer $ // $Authors: $ // -------------------------------------------------------------------------- // #include <OpenMS/COMPARISON/BinnedSharedPeakCount.h> using namespace std; #include <Eigen/Sparse> namespace OpenMS { BinnedSharedPeakCount::BinnedSharedPeakCount() : BinnedSpectrumCompareFunctor() { setName("BinnedSharedPeakCount"); defaultsToParam_(); } BinnedSharedPeakCount::BinnedSharedPeakCount(const BinnedSharedPeakCount& source) : BinnedSpectrumCompareFunctor(source) { } BinnedSharedPeakCount::~BinnedSharedPeakCount() = default; BinnedSharedPeakCount& BinnedSharedPeakCount::operator=(const BinnedSharedPeakCount& source) { if (this != &source) { BinnedSpectrumCompareFunctor::operator=(source); } return *this; } double BinnedSharedPeakCount::operator()(const BinnedSpectrum& spec) const { return operator()(spec, spec); } void BinnedSharedPeakCount::updateMembers_() { } double BinnedSharedPeakCount::operator()(const BinnedSpectrum& spec1, const BinnedSpectrum& spec2) const { OPENMS_PRECONDITION(BinnedSpectrum::isCompatible(spec1, spec2), "Binned spectra have different bin size or spread"); size_t denominator(max(spec1.getBins()->nonZeros(), spec2.getBins()->nonZeros())); // Note: keep in single expression for faster computation via expression templates // Calculate coefficient-wise product and count non-zero entries BinnedSpectrum::SparseVectorType s = spec1.getBins()->cwiseProduct(*spec2.getBins()); // resulting score normalized to interval [0,1] return static_cast<double>(s.nonZeros()) / denominator; } }
C++
3D
OpenMS/OpenMS
src/openms/source/COMPARISON/BinnedSpectrumCompareFunctor.cpp
.cpp
1,166
37
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Mathias Walzer $ // $Authors: $ // -------------------------------------------------------------------------- // #include <OpenMS/COMPARISON/BinnedSpectrumCompareFunctor.h> #include <OpenMS/COMPARISON/BinnedSharedPeakCount.h> #include <OpenMS/COMPARISON/BinnedSpectralContrastAngle.h> #include <OpenMS/COMPARISON/BinnedSumAgreeingIntensities.h> using namespace std; namespace OpenMS { BinnedSpectrumCompareFunctor::BinnedSpectrumCompareFunctor() : DefaultParamHandler("BinnedSpectrumCompareFunctor") { } BinnedSpectrumCompareFunctor::BinnedSpectrumCompareFunctor(const BinnedSpectrumCompareFunctor & source) = default; BinnedSpectrumCompareFunctor::~BinnedSpectrumCompareFunctor() = default; BinnedSpectrumCompareFunctor & BinnedSpectrumCompareFunctor::operator=(const BinnedSpectrumCompareFunctor & source) { if (this != &source) { DefaultParamHandler::operator=(source); } return *this; } }
C++
3D
OpenMS/OpenMS
src/openms/source/COMPARISON/SpectrumPrecursorComparator.cpp
.cpp
1,818
68
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg $ // $Authors: $ // -------------------------------------------------------------------------- // #include <OpenMS/COMPARISON/SpectrumPrecursorComparator.h> #include <OpenMS/KERNEL/MSSpectrum.h> #include <OpenMS/KERNEL/MSExperiment.h> using namespace std; namespace OpenMS { SpectrumPrecursorComparator::SpectrumPrecursorComparator() : PeakSpectrumCompareFunctor() { setName("SpectrumPrecursorComparator"); defaults_.setValue("window", 2, "Allowed deviation between precursor peaks."); defaultsToParam_(); } SpectrumPrecursorComparator::SpectrumPrecursorComparator(const SpectrumPrecursorComparator & source) = default; SpectrumPrecursorComparator::~SpectrumPrecursorComparator() = default; SpectrumPrecursorComparator & SpectrumPrecursorComparator::operator=(const SpectrumPrecursorComparator & source) { if (this != &source) { PeakSpectrumCompareFunctor::operator=(source); } return *this; } double SpectrumPrecursorComparator::operator()(const PeakSpectrum & spec) const { return operator()(spec, spec); } double SpectrumPrecursorComparator::operator()(const PeakSpectrum & x, const PeakSpectrum & y) const { double window = (double)param_.getValue("window"); double mz1 = 0.0; if (!x.getPrecursors().empty()) { mz1 = x.getPrecursors()[0].getMZ(); } double mz2 = 0.0; if (!y.getPrecursors().empty()) { mz2 = y.getPrecursors()[0].getMZ(); } if (fabs(mz1 - mz2) > window) { return 0; } return window - fabs(mz1 - mz2); } }
C++
3D
OpenMS/OpenMS
src/openms/source/COMPARISON/BinnedSpectralContrastAngle.cpp
.cpp
1,880
64
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Mathias Walzer $ // $Authors: Timo Sachsenberg $ // -------------------------------------------------------------------------- // #include <OpenMS/COMPARISON/BinnedSpectralContrastAngle.h> #include <Eigen/Sparse> using namespace std; namespace OpenMS { BinnedSpectralContrastAngle::BinnedSpectralContrastAngle() : BinnedSpectrumCompareFunctor() { setName("BinnedSpectralContrastAngle"); defaultsToParam_(); } BinnedSpectralContrastAngle::BinnedSpectralContrastAngle(const BinnedSpectralContrastAngle& source) : BinnedSpectrumCompareFunctor(source) { } BinnedSpectralContrastAngle::~BinnedSpectralContrastAngle() = default; BinnedSpectralContrastAngle& BinnedSpectralContrastAngle::operator=(const BinnedSpectralContrastAngle& source) { if (this != &source) { BinnedSpectrumCompareFunctor::operator=(source); } return *this; } double BinnedSpectralContrastAngle::operator()(const BinnedSpectrum& spec) const { return operator()(spec, spec); } void BinnedSpectralContrastAngle::updateMembers_() { } double BinnedSpectralContrastAngle::operator()(const BinnedSpectrum& spec1, const BinnedSpectrum& spec2) const { OPENMS_PRECONDITION(BinnedSpectrum::isCompatible(spec1, spec2), "Binned spectra have different bin size or spread"); // resulting score standardized to interval [0,1] const double sum1 = spec1.getBins()->dot(*spec1.getBins()); const double sum2 = spec2.getBins()->dot(*spec2.getBins()); const double numerator = spec1.getBins()->dot(*spec2.getBins()); const double score = numerator / (sqrt(sum1 * sum2)); return score; } }
C++
3D
OpenMS/OpenMS
src/openms/source/COMPARISON/SteinScottImproveScore.cpp
.cpp
3,422
124
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg $ // $Authors: Vipul Patel $ // -------------------------------------------------------------------------- // #include <OpenMS/COMPARISON/SteinScottImproveScore.h> #include <OpenMS/KERNEL/MSSpectrum.h> #include <OpenMS/KERNEL/MSExperiment.h> namespace OpenMS { /// default constructor SteinScottImproveScore::SteinScottImproveScore() : PeakSpectrumCompareFunctor() { setName("SteinScottImproveScore"); defaults_.setValue("tolerance", 0.2, "defines the absolute error of the mass spectrometer"); defaults_.setValue("threshold", 0.2, "if the calculated score is smaller than the threshold, a zero is given back"); defaultsToParam_(); } /// copy constructor SteinScottImproveScore::SteinScottImproveScore(const SteinScottImproveScore & source) = default; /// destructor SteinScottImproveScore::~SteinScottImproveScore() = default; /// assignment operator SteinScottImproveScore & SteinScottImproveScore::operator=(const SteinScottImproveScore & source) { if (this != &source) { PeakSpectrumCompareFunctor::operator=(source); } return *this; } /** @brief Similarity pairwise score itself This function return the similarity score of itself based on SteinScott. @param[in] spec const PeakSpectrum Spectrum 1 @see SteinScottImproveScore() */ double SteinScottImproveScore::operator()(const PeakSpectrum & spec) const { return operator()(spec, spec); } /** @brief Similarity pairwise score This function return the similarity score of two spectra based on SteinScott. @param[in] s1 const PeakSpectrum Spectrum 1 @param[in] s2 const PeakSpectrum Spectrum 2 @see SteinScottImproveScore() */ double SteinScottImproveScore::operator()(const PeakSpectrum & s1, const PeakSpectrum & s2) const { const double epsilon = (double)param_.getValue("tolerance"); const double constant = epsilon / 10000; //const double c(0.0004); double score(0), sum(0), sum1(0), sum2(0), sum3(0), sum4(0); /* std::cout << s1 << std::endl; std::cout << std::endl; std::cout << s2 << std::endl;*/ for (const Peak1D& it1 : s1) { double temp = it1.getIntensity(); sum1 += temp * temp; sum3 += temp; } for (const Peak1D& it1 : s2) { double temp = it1.getIntensity(); sum2 += temp * temp; sum4 += temp; } double z = constant * (sum3 * sum4); Size j_left(0); for (Size i = 0; i != s1.size(); ++i) { for (Size j = j_left; j != s2.size(); ++j) { double pos1(s1[i].getMZ()), pos2(s2[j].getMZ()); if (std::abs(pos1 - pos2) <= 2 * epsilon) { sum += s1[i].getIntensity() * s2[j].getIntensity(); } else { if (pos2 > pos1) { break; } else { j_left = j; } } } } //std::cout<< sum << " Sum " << z << " z " << std::endl; score = (sum - z) / (std::sqrt((sum1 * sum2))); // std::cout<<score<< " score" << std::endl; if (score < (float)param_.getValue("threshold")) { score = 0; } return score; } }
C++
3D
OpenMS/OpenMS
src/openms/source/COMPARISON/BinnedSumAgreeingIntensities.cpp
.cpp
2,176
69
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Mathias Walzer $ // $Authors: $ // -------------------------------------------------------------------------- // #include <OpenMS/COMPARISON/BinnedSumAgreeingIntensities.h> #include <Eigen/Sparse> using namespace std; namespace OpenMS { BinnedSumAgreeingIntensities::BinnedSumAgreeingIntensities() : BinnedSpectrumCompareFunctor() { setName("BinnedSumAgreeingIntensities"); defaultsToParam_(); } BinnedSumAgreeingIntensities::BinnedSumAgreeingIntensities(const BinnedSumAgreeingIntensities& source) : BinnedSpectrumCompareFunctor(source) { } BinnedSumAgreeingIntensities::~BinnedSumAgreeingIntensities() = default; BinnedSumAgreeingIntensities& BinnedSumAgreeingIntensities::operator=(const BinnedSumAgreeingIntensities& source) { if (this != &source) { BinnedSpectrumCompareFunctor::operator=(source); } return *this; } double BinnedSumAgreeingIntensities::operator()(const BinnedSpectrum& spec) const { return operator()(spec, spec); } void BinnedSumAgreeingIntensities::updateMembers_() { } double BinnedSumAgreeingIntensities::operator()(const BinnedSpectrum& spec1, const BinnedSpectrum& spec2) const { OPENMS_PRECONDITION(BinnedSpectrum::isCompatible(spec1, spec2), "Binned spectra have different bin size or spread"); const double sum1 = spec1.getBins()->sum(); const double sum2 = spec2.getBins()->sum(); // For maximum speed, keep in single expression // 1. calculate mean minus difference: x = mean(a,b) - abs(a-b) // 2. truncate negative values: y = max(0, x) // 3. calculate sum of entries: sum_nn = y.sum() BinnedSpectrum::SparseVectorType s = ((*spec1.getBins() + *spec2.getBins()) * 0.5) - ((*spec1.getBins() - *spec2.getBins()).cwiseAbs()); double sum_nn = s.coeffs().cwiseMax(0).sum(); // resulting score normalized to interval [0,1] return min(sum_nn / ((sum1 + sum2) / 2.0), 1.0); } }
C++
3D
OpenMS/OpenMS
src/openms/source/COMPARISON/SpectrumAlignmentScore.cpp
.cpp
3,750
105
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg $ // $Authors: Andreas Bertsch $ // -------------------------------------------------------------------------- #include <OpenMS/COMPARISON/SpectrumAlignmentScore.h> using namespace std; namespace OpenMS { SpectrumAlignmentScore::SpectrumAlignmentScore() : PeakSpectrumCompareFunctor() { setName("SpectrumAlignmentScore"); defaults_.setValue("tolerance", 0.3, "Defines the absolute (in Da) or relative (in ppm) tolerance"); defaults_.setValue("is_relative_tolerance", "false", "if true, the tolerance value is interpreted as ppm"); defaults_.setValidStrings("is_relative_tolerance", {"true","false"}); defaults_.setValue("use_linear_factor", "false", "if true, the intensities are weighted with the relative m/z difference"); defaults_.setValidStrings("use_linear_factor", {"true","false"}); defaults_.setValue("use_gaussian_factor", "false", "if true, the intensities are weighted with the relative m/z difference using a gaussian"); defaults_.setValidStrings("use_gaussian_factor", {"true","false"}); defaultsToParam_(); } SpectrumAlignmentScore::SpectrumAlignmentScore(const SpectrumAlignmentScore & source) = default; SpectrumAlignmentScore::~SpectrumAlignmentScore() = default; SpectrumAlignmentScore & SpectrumAlignmentScore::operator=(const SpectrumAlignmentScore & source) { if (this != &source) { PeakSpectrumCompareFunctor::operator=(source); } return *this; } double SpectrumAlignmentScore::operator()(const PeakSpectrum & spec) const { return operator()(spec, spec); } double SpectrumAlignmentScore::operator()(const PeakSpectrum & s1, const PeakSpectrum & s2) const { const double tolerance = (double)param_.getValue("tolerance"); const bool is_relative_tolerance = param_.getValue("is_relative_tolerance").toBool(); const bool use_linear_factor = param_.getValue("use_linear_factor").toBool(); const bool use_gaussian_factor = param_.getValue("use_gaussian_factor").toBool(); OPENMS_PRECONDITION(!(use_linear_factor && use_gaussian_factor), "SpectrumAlignmentScore, use either 'use_linear_factor' or 'use_gaussian_factor") SpectrumAlignment aligner; Param p; p.setValue("tolerance", tolerance); p.setValue("is_relative_tolerance", param_.getValue("is_relative_tolerance")); aligner.setParameters(p); vector<pair<Size, Size>> alignment; aligner.getSpectrumAlignment(alignment, s1, s2); double score(0), sum(0); // calculate sum of squared intensities double sum1(0); for (auto const & p : s1) { sum1 += pow(p.getIntensity(), 2); } double sum2(0); for (auto const & p : s2) { sum2 += pow(p.getIntensity(), 2); } for (auto const & ap : alignment) { const double mz_tolerance = is_relative_tolerance ? tolerance * s1[ap.first].getMZ() * 1e-6 : tolerance; const double mz_difference(fabs(s1[ap.first].getMZ() - s2[ap.second].getMZ())); double factor(1.0); if (use_linear_factor) { factor = (mz_tolerance - mz_difference) / mz_tolerance; } else if (use_gaussian_factor) { const double epsilon = mz_difference / (3.0 * mz_tolerance * sqrt(2)); factor = std::erfc(epsilon); } // calculate weighted sum of the multiplied intensities sum += sqrt(s1[ap.first].getIntensity() * s2[ap.second].getIntensity() * factor); } score = sum / (sqrt(sum1 * sum2)); return score; } }
C++
3D
OpenMS/OpenMS
src/openms/source/COMPARISON/ZhangSimilarityScore.cpp
.cpp
6,130
212
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg $ // $Authors: Andreas Bertsch $ // -------------------------------------------------------------------------- #include <OpenMS/COMPARISON/ZhangSimilarityScore.h> #include <OpenMS/KERNEL/MSSpectrum.h> #include <OpenMS/KERNEL/MSExperiment.h> #include <boost/math/special_functions/erf.hpp> using namespace std; namespace OpenMS { ZhangSimilarityScore::ZhangSimilarityScore() : PeakSpectrumCompareFunctor() { setName("ZhangSimilarityScore"); defaults_.setValue("tolerance", 0.2, "defines the absolute (in Da) or relative (in ppm) tolerance"); defaults_.setValue("is_relative_tolerance", "false", "If set to true, the tolerance is interpreted as relative"); defaults_.setValidStrings("is_relative_tolerance", {"true","false"}); defaults_.setValue("use_linear_factor", "false", "if true, the intensities are weighted with the relative m/z difference"); defaults_.setValidStrings("use_linear_factor", {"true","false"}); defaults_.setValue("use_gaussian_factor", "false", "if true, the intensities are weighted with the relative m/z difference using a gaussian"); defaults_.setValidStrings("use_gaussian_factor", {"true","false"}); defaultsToParam_(); } ZhangSimilarityScore::ZhangSimilarityScore(const ZhangSimilarityScore & source) = default; ZhangSimilarityScore::~ZhangSimilarityScore() = default; ZhangSimilarityScore & ZhangSimilarityScore::operator=(const ZhangSimilarityScore & source) { if (this != &source) { PeakSpectrumCompareFunctor::operator=(source); } return *this; } double ZhangSimilarityScore::operator()(const PeakSpectrum & spec) const { return operator()(spec, spec); } double ZhangSimilarityScore::operator()(const PeakSpectrum & s1, const PeakSpectrum & s2) const { const double tolerance = (double)param_.getValue("tolerance"); bool use_linear_factor = param_.getValue("use_linear_factor").toBool(); bool use_gaussian_factor = param_.getValue("use_gaussian_factor").toBool(); double score(0), sum(0), sum1(0), sum2(0) /*, squared_sum1(0), squared_sum2(0)*/; // TODO remove parameter if (param_.getValue("is_relative_tolerance").toBool() ) { throw Exception::NotImplemented(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION); } for (const Peak1D& it1 : s1) { sum1 += it1.getIntensity(); /* for (PeakSpectrum::ConstIterator it2 = s1.begin(); it2 != s1.end(); ++it2) { if (abs(it1->getPosition()[0] - it2->getPosition()[0]) <= 2 * tolerance) { squared_sum1 += it1->getIntensity() * it2->getIntensity(); } }*/ } /* UInt i_left(0); for (Size i = 0; i != s1.size(); ++i) { sum1 += s1[i].getIntensity(); for (Size j = i_left; j != s1.size(); ++j) { double pos1(s1[i].getPosition()[0]), pos2(s1[j].getPosition()[0]); if (abs(pos1 - pos2) <= 2 * tolerance) { squared_sum1 += s1[i].getIntensity() * s1[j].getIntensity(); } else { if (pos2 > pos1) { break; } else { i_left = i; } } } }*/ /* i_left = 0; for (Size i = 0; i != s2.size(); ++i) { sum2 += s2[i].getIntensity(); for (Size j = i_left; j != s2.size(); ++j) { double pos1(s2[i].getPosition()[0]), pos2(s2[j].getPosition()[0]); if (abs(pos1 - pos2) <= 2 * tolerance) { squared_sum1 += s2[i].getIntensity() * s2[j].getIntensity(); } else { if (pos2 > pos1) { break; } else { i_left = i; } } } }*/ for (const Peak1D& it1 : s2) { sum2 += it1.getIntensity(); /* for (PeakSpectrum::ConstIterator it2 = s2.begin(); it2 != s2.end(); ++it2) { if (abs(it1->getPosition()[0] - it2->getPosition()[0]) <= 2 * tolerance) { squared_sum2 += it1->getIntensity() * it2->getIntensity(); } } */ } Size j_left(0); for (Size i = 0; i != s1.size(); ++i) { for (Size j = j_left; j != s2.size(); ++j) { double pos1(s1[i].getMZ()), pos2(s2[j].getMZ()); if (fabs(pos1 - pos2) < tolerance) { //double factor((tolerance - fabs(pos1 - pos2)) / tolerance); double factor = 1.0; if (use_linear_factor || use_gaussian_factor) { factor = getFactor_(tolerance, fabs(pos1 - pos2), use_gaussian_factor); } sum += sqrt(s1[i].getIntensity() * s2[j].getIntensity() * factor); } else { if (pos2 > pos1) { break; } else { j_left = j; } } } } /* for (PeakSpectrum::ConstIterator it1 = s1.begin(); it1 != s1.end(); ++it1) { for (PeakSpectrum::ConstIterator it2 = s2.begin(); it2 != s2.end(); ++it2) { if (abs(it1->getPosition()[0] - it2->getPosition()[0]) <= 2 * tolerance) { sum += sqrt(it1->getIntensity() * it2->getIntensity()); } } }*/ score = sum / (sqrt(sum1 * sum2)); return score; } double ZhangSimilarityScore::getFactor_(double mz_tolerance, double mz_difference, bool is_gaussian) const { double factor(0.0); if (is_gaussian) { static const double denominator = mz_tolerance * 3.0 * sqrt(2.0); factor = std::erfc(mz_difference / denominator); //cerr << "Factor: " << factor << " " << mz_tolerance << " " << mz_difference << endl; } else { factor = (mz_tolerance - mz_difference) / mz_tolerance; } return factor; } }
C++
3D
OpenMS/OpenMS
src/openms/source/COMPARISON/SpectraSTSimilarityScore.cpp
.cpp
4,424
146
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg $ // $Authors: $ // -------------------------------------------------------------------------- #include <OpenMS/COMPARISON/SpectraSTSimilarityScore.h> #include <Eigen/Sparse> using namespace std; using namespace Eigen; namespace OpenMS { SpectraSTSimilarityScore::SpectraSTSimilarityScore() : PeakSpectrumCompareFunctor() { setName("SpectraSTSimilarityScore"); } SpectraSTSimilarityScore::SpectraSTSimilarityScore(const SpectraSTSimilarityScore & source) = default; SpectraSTSimilarityScore::~SpectraSTSimilarityScore() = default; SpectraSTSimilarityScore & SpectraSTSimilarityScore::operator=(const SpectraSTSimilarityScore & source) { if (this != &source) { PeakSpectrumCompareFunctor::operator=(source); } return *this; } double SpectraSTSimilarityScore::operator()(const PeakSpectrum & spec) const { return operator()(spec, spec); } double SpectraSTSimilarityScore::operator()(const PeakSpectrum & s1, const PeakSpectrum & s2) const { // TODO: check if this operator makes sense (as it doesn't allow to fine tune resolution) BinnedSpectrum bin1(s1, 1, false, 1, BinnedSpectrum::DEFAULT_BIN_OFFSET_LOWRES); BinnedSpectrum bin2(s2, 1, false, 1, BinnedSpectrum::DEFAULT_BIN_OFFSET_LOWRES); // normalized dot product *bin1.getBins() /= bin1.getBins()->norm(); *bin2.getBins() /= bin2.getBins()->norm(); return bin1.getBins()->dot(*bin2.getBins()); } double SpectraSTSimilarityScore::operator()(const BinnedSpectrum & bin1, const BinnedSpectrum & bin2) const { return bin1.getBins()->dot(*bin2.getBins()); } bool SpectraSTSimilarityScore::preprocess(PeakSpectrum & spec, float remove_peak_intensity_threshold, UInt cut_peaks_below, Size min_peak_number, Size max_peak_number) { double min_high_intensity = 0.; if (!spec.empty()) { double max_el = std::max_element(spec.begin(),spec.end(),Peak1D::IntensityLess())->getIntensity(); min_high_intensity = (1.0 / cut_peaks_below) * max_el; } spec.sortByPosition(); PeakSpectrum tmp; Size s = 0; for (PeakSpectrum::iterator k = spec.begin(); k < spec.end() && s < max_peak_number; ++k, ++s) { Peak1D peak; if (k->getIntensity() > remove_peak_intensity_threshold && k->getIntensity() > min_high_intensity) { peak.setIntensity(sqrt(k->getIntensity())); peak.setMZ(k->getMZ()); peak.setPosition(k->getPosition()); tmp.push_back(peak); } } spec = tmp; //if not enough peaks in the spectrum pass that one out return spec.size() >= min_peak_number; } BinnedSpectrum SpectraSTSimilarityScore::transform(const PeakSpectrum & spec) { // TODO: resolution seems rather low. Check with current original implementations. BinnedSpectrum bin(spec, 1, false, 1, BinnedSpectrum::DEFAULT_BIN_OFFSET_LOWRES); *bin.getBins() /= bin.getBins()->norm(); return bin; } double SpectraSTSimilarityScore::dot_bias(const BinnedSpectrum & bin1, const BinnedSpectrum & bin2, double dot_product) const { double numerator = (bin1.getBins()->cwiseProduct(*bin2.getBins())).norm(); if (dot_product != 0) { return (double)numerator / dot_product; } else { return (double)numerator / (*this)(bin1, bin2); } } double SpectraSTSimilarityScore::delta_D(double top_hit, double runner_up) { if (top_hit == 0) { throw Exception::DivisionByZero(__FILE__, __LINE__, __FUNCTION__); } else { return (double)(top_hit - runner_up) / top_hit; } } double SpectraSTSimilarityScore::compute_F(double dot_product, double delta_D, double dot_bias) { double b(0); if (dot_bias < 0.1 || (0.35 < dot_bias && dot_bias <= 0.4)) { b = 0.12; } else if (0.4 < dot_bias && dot_bias <= 0.45) { b = 0.18; } else if (dot_bias > 0.45) { b = 0.24; } return 0.6 * dot_product + 0.4 * delta_D - b; } }
C++
3D
OpenMS/OpenMS
src/openms/source/COMPARISON/SpectrumAlignment.cpp
.cpp
1,198
39
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg $ // $Authors: Andreas Bertsch $ // -------------------------------------------------------------------------- // #include <OpenMS/COMPARISON/SpectrumAlignment.h> using namespace std; namespace OpenMS { SpectrumAlignment::SpectrumAlignment() : DefaultParamHandler("SpectrumAlignment") { defaults_.setValue("tolerance", 0.3, "Defines the absolute (in Da) or relative (in ppm) tolerance"); defaults_.setValue("is_relative_tolerance", "false", "If true, the 'tolerance' is interpreted as ppm-value"); defaults_.setValidStrings("is_relative_tolerance", {"true","false"}); defaultsToParam_(); } SpectrumAlignment::SpectrumAlignment(const SpectrumAlignment & source) = default; SpectrumAlignment::~SpectrumAlignment() = default; SpectrumAlignment & SpectrumAlignment::operator=(const SpectrumAlignment & source) { if (this != &source) { DefaultParamHandler::operator=(source); } return *this; } }
C++
3D
OpenMS/OpenMS
src/openms/source/MATH/MathFunctions.cpp
.cpp
390
13
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg$ // $Authors: Marc Sturm $ // -------------------------------------------------------------------------- namespace OpenMS { } // namespace OpenMS
C++
3D
OpenMS/OpenMS
src/openms/source/MATH/StatisticFunctions.cpp
.cpp
394
13
// Copyright (c) 2002-present, The OpenMS Team -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg$ // $Authors: Marc Sturm $ // -------------------------------------------------------------------------- namespace OpenMS { } // namespace OpenMS
C++
3D
OpenMS/OpenMS
src/openms/source/MATH/MISC/BSpline2d.cpp
.cpp
1,770
60
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg $ // $Authors: Stephan Aiche, Timo Sachsenberg $ // -------------------------------------------------------------------------- #include <OpenMS/MATH/MISC/BSpline2d.h> #include <BSpline/BSplineBase.cpp> #include <BSpline/BSpline.cpp> namespace OpenMS { BSpline2d::BSpline2d(const std::vector<double>& x, const std::vector<double>& y, double wavelength, BoundaryCondition boundary_condition, Size num_nodes) { OPENMS_PRECONDITION(x.size() == y.size(), "x and y vectors passed to BSpline2d constructor must have the same size.") spline_ = new eol_bspline::BSpline<double>(&x[0], static_cast<int>(x.size()), &y[0], wavelength, boundary_condition, num_nodes); } BSpline2d::~BSpline2d() { delete spline_; } bool BSpline2d::solve(const std::vector<double>& y) { OPENMS_PRECONDITION(static_cast<Size>(spline_->nX()) == y.size(), "y vector passed to 'BSpline2d::solve' must match size of x.") // pass vector as array return spline_->solve(&y[0]); } double BSpline2d::eval(const double x) const { OPENMS_PRECONDITION(ok(), "Spline was not initialized properly.") return spline_->evaluate(x); } double BSpline2d::derivative(const double x) const { OPENMS_PRECONDITION(ok(), "Spline was not initialized properly.") return spline_->slope(x); } bool BSpline2d::ok() const { return spline_->ok(); } void BSpline2d::debug(bool enable) { eol_bspline::BSplineBase<double>::Debug(int(enable)); } }
C++
3D
OpenMS/OpenMS
src/openms/source/MATH/MISC/EmgGradientDescent.cpp
.cpp
33,237
797
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Douglas McCloskey, Pasquale Domenico Colaianni $ // $Authors: Douglas McCloskey, Pasquale Domenico Colaianni $ // -------------------------------------------------------------------------- #include <OpenMS/MATH/MISC/EmgGradientDescent.h> #include <OpenMS/DATASTRUCTURES/String.h> #include <numeric> namespace OpenMS { EmgGradientDescent::EmgGradientDescent() : DefaultParamHandler("EmgGradientDescent") { getDefaultParameters(defaults_); defaultsToParam_(); // write defaults into Param object param_ } void EmgGradientDescent::getDefaultParameters(Param& defaults) { defaults.clear(); defaults.setValue("print_debug", (UInt)0, "The level of debug information to print in the terminal. Valid values are: 0, 1, 2. Higher values mean more information."); defaults.setMinInt("print_debug", 0); defaults.setMaxInt("print_debug", 2); defaults.setValue("max_gd_iter", (UInt)100000, "The maximum number of iterations permitted to the gradient descent algorithm."); defaults.setMinInt("max_gd_iter", 0); defaults.setValue("compute_additional_points", "true", "Whether additional points should be added when fitting EMG peak model."); defaults.setValidStrings("compute_additional_points", {"true","false"}); } void EmgGradientDescent::updateMembers_() { print_debug_ = (UInt)param_.getValue("print_debug"); max_gd_iter_ = (UInt)param_.getValue("max_gd_iter"); compute_additional_points_ = param_.getValue("compute_additional_points").toBool(); } double EmgGradientDescent::compute_z( const double x, const double mu, const double sigma, const double tau ) const { return (1.0 / std::sqrt(2.0)) * (sigma / tau - (x - mu) / sigma); } double EmgGradientDescent::E_wrt_h( const std::vector<double>& xs, const std::vector<double>& ys, const double h, const double mu, const double sigma, const double tau ) const { const double u = mu; const double s = sigma; const double t = tau; std::vector<double> diffs(xs.size()); for (Size i = 0; i < xs.size(); ++i) { const double x = xs[i]; const double y = ys[i]; const double z = compute_z(x, mu, sigma, tau); if (z < 0) { diffs[i] = ((s * std::exp((std::pow(s,2.0) + 2.0 * t * u - 4.0 * t * x)/(2.0 * std::pow(t,2.0))) * std::erfc((std::pow(s,2.0) + t * (u - x))/(std::sqrt(2.0) * s * t)) * (PI * h * s * std::exp((std::pow(s,2.0) + 2 * t * u)/(2 * std::pow(t,2.0))) * std::erfc((std::pow(s,2.0) + t * (u - x))/(std::sqrt(2.0) * s * t)) - std::sqrt(2.0 * PI) * t * y * std::exp(x/t)))/std::pow(t,2.0)) / static_cast<double>(xs.size()); } else if (z <= 6.71e7) { diffs[i] = ((std::sqrt(2.0 * PI) * s * std::exp(1.0/2.0 * std::pow((s/t - (x - u)/s),2.0) - std::pow((x - u),2.0)/(2.0 * std::pow(s,2.0))) * std::erfc((s/t - (x - u)/s)/std::sqrt(2.0)) * ((std::sqrt(PI/2.0) * h * s * std::exp(1.0/2.0 * std::pow((s/t - (x - u)/s), 2.0) - std::pow((x - u),2.0)/(2 * std::pow(s,2.0))) * std::erfc((s/t - (x - u)/s)/std::sqrt(2.0)))/t - y))/t) / static_cast<double>(xs.size()); } else { diffs[i] = ((2 * std::exp(-std::pow((x - u),2.0)/(2.0 * std::pow(s,2.0))) * ((h * std::exp(-std::pow((x - u),2.0)/(2 * std::pow(s,2.0))))/(1 - (t * (x - u))/std::pow(s,2.0)) - y))/(1 - (t * (x - u))/std::pow(s,2.0))) / static_cast<double>(xs.size()); } } const double result = std::accumulate(diffs.begin(), diffs.end(), 0.0); if (print_debug_ == 2) { std::cout << std::endl << "E_wrt_h() diffs:" << std::endl; for (const double d : diffs) { std::cout << d << " "; } std::cout << std::endl; std::cout << "result=" << result << std::endl; } return result; } double EmgGradientDescent::E_wrt_mu( const std::vector<double>& xs, const std::vector<double>& ys, const double h, const double mu, const double sigma, const double tau ) const { const double u = mu; const double s = sigma; const double t = tau; std::vector<double> diffs(xs.size()); for (Size i = 0; i < xs.size(); ++i) { const double x = xs[i]; const double y = ys[i]; const double z = compute_z(x, mu, sigma, tau); if (z < 0) { diffs[i] = (2 * ((std::sqrt(PI/2.0) * h * s * std::exp(std::pow(s,2.0)/(2.0 * std::pow(t,2.0)) - (x - u)/t) * std::erfc((s/t - (x - u)/s)/std::sqrt(2.0)))/std::pow(t,2.0) - (h * std::exp(std::pow(s,2.0)/(2.0 * std::pow(t,2.0)) - 1.0/2.0 * std::pow((s/t - (x - u)/s),2.0) - (x - u)/t))/t) * ((std::sqrt(PI/2.0) * h * s * std::exp(std::pow(s,2.0)/(2.0 * std::pow(t,2.0)) - (x - u)/t) * std::erfc((s/t - (x - u)/s)/std::sqrt(2.0)))/t - y)) / static_cast<double>(xs.size()); } else if (z <= 6.71e7) { diffs[i] = (2 * ((std::sqrt(PI/2.0) * h * s * std::exp(1.0/2.0 * std::pow((s/t - (x - u)/s),2.0) - std::pow((x - u),2.0)/(2.0 * std::pow(s,2.0))) * ((x - u)/std::pow(s,2.0) + (s/t - (x - u)/s)/s) * std::erfc((s/t - (x - u)/s)/std::sqrt(2.0)))/t - (h * std::exp(-std::pow((x - u),2.0)/(2.0 * std::pow(s,2.0))))/t) * ((std::sqrt(PI/2.0) * h * s * std::exp(1.0/2.0 * std::pow((s/t - (x - u)/s),2.0) - std::pow((x - u),2.0)/(2.0 * std::pow(s,2.0))) * std::erfc((s/t - (x - u)/s)/std::sqrt(2.0)))/t - y)) / static_cast<double>(xs.size()); } else { diffs[i] = (2.0 * ((h * (x - u) * std::exp(-std::pow((x - u),2.0)/(2.0 * std::pow(s,2.0))))/(std::pow(s,2.0) * (1.0 - (t * (x - u))/std::pow(s,2.0))) - (h * t * std::exp(-std::pow((x - u),2.0)/(2.0 * std::pow(s,2.0))))/(std::pow(s,2.0) * std::pow((1.0 - (t * (x - u))/std::pow(s,2.0)),2.0))) * ((h * std::exp(-std::pow((x - u),2.0)/(2.0 * std::pow(s,2.0))))/(1.0 - (t * (x - u))/std::pow(s,2.0)) - y)) / static_cast<double>(xs.size()); } } const double result = std::accumulate(diffs.begin(), diffs.end(), 0.0); if (print_debug_ == 2) { std::cout << std::endl << "E_wrt_mu() diffs:" << std::endl; for (const double d : diffs) { std::cout << d << " "; } std::cout << std::endl; std::cout << "result=" << result << std::endl; } return result; } double EmgGradientDescent::E_wrt_sigma( const std::vector<double>& xs, const std::vector<double>& ys, const double h, const double mu, const double sigma, const double tau ) const { const double u = mu; const double s = sigma; const double t = tau; std::vector<double> diffs(xs.size()); for (Size i = 0; i < xs.size(); ++i) { const double x = xs[i]; const double y = ys[i]; const double z = compute_z(x, mu, sigma, tau); if (z < 0) { diffs[i] = (2.0 * ((std::sqrt(PI/2.0) * h * std::exp(std::pow(s,2.0)/(2.0 * std::pow(t,2.0)) - (x - u)/t) * std::erfc((s/t - (x - u)/s)/std::sqrt(2.0)))/t + (std::sqrt(PI/2.0) * h * std::pow(s,2.0) * std::exp(std::pow(s,2.0)/(2.0 * std::pow(t,2.0)) - (x - u)/t) * std::erfc((s/t - (x - u)/s)/std::sqrt(2.0)))/std::pow(t,3.0) - (h * s * std::exp(std::pow(s,2.0)/(2.0 * std::pow(t,2.0)) - 1.0/2.0 * std::pow((s/t - (x - u)/s),2.0) - (x - u)/t) * ((x - u)/std::pow(s,2.0) + 1.0/t))/t) * ((std::sqrt(PI/2.0) * h * s * std::exp(std::pow(s,2.0)/(2.0 * std::pow(t,2.0)) - (x - u)/t) * std::erfc((s/t - (x - u)/s)/std::sqrt(2.0)))/t - y)) / static_cast<double>(xs.size()); } else if (z <= 6.71e7) { diffs[i] = (2.0 * ((std::sqrt(PI/2.0) * h * std::exp(1.0/2.0 * std::pow((s/t - (x - u)/s),2.0) - std::pow((x - u),2.0)/(2.0 * std::pow(s,2.0))) * std::erfc((s/t - (x - u)/s)/std::sqrt(2.0)))/t + (std::sqrt(PI/2.0) * h * s * std::exp(1.0/2.0 * std::pow((s/t - (x - u)/s),2.0) - std::pow((x - u),2.0)/(2.0 * std::pow(s,2.0))) * (std::pow((x - u),2.0)/std::pow(s,3.0) + ((x - u)/std::pow(s,2.0) + 1.0/t) * (s/t - (x - u)/s)) * std::erfc((s/t - (x - u)/s)/std::sqrt(2.0)))/t - (h * s * std::exp(-std::pow((x - u),2.0)/(2.0 * std::pow(s,2.0))) * ((x - u)/std::pow(s,2.0) + 1.0/t))/t) * ((std::sqrt(PI/2.0) * h * s * std::exp(1.0/2.0 * std::pow((s/t - (x - u)/s),2.0) - std::pow((x - u),2.0)/(2.0 * std::pow(s,2.0))) * std::erfc((s/t - (x - u)/s)/std::sqrt(2.0)))/t - y)) / static_cast<double>(xs.size()); } else { diffs[i] = (2.0 * ((h * std::pow((x - u),2.0) * std::exp(-std::pow((x - u),2.0)/(2.0 * std::pow(s,2.0))))/(std::pow(s,3.0) * (1.0 - (t * (x - u))/std::pow(s,2.0))) - (2.0 * h * t * (x - u) * std::exp(-std::pow((x - u),2.0)/(2 * std::pow(s,2.0))))/(std::pow(s,3.0) * std::pow((1.0 - (t * (x - u))/std::pow(s,2.0)),2.0))) * ((h * std::exp(-std::pow(x-u,2.0)/(2 * std::pow(s,2.0))))/(1 - (t * (x - u))/std::pow(s,2.0)) - y)) / static_cast<double>(xs.size()); } } const double result = std::accumulate(diffs.begin(), diffs.end(), 0.0); if (print_debug_ == 2) { std::cout << std::endl << "E_wrt_sigma() diffs:" << std::endl; for (const double d : diffs) { std::cout << d << " "; } std::cout << std::endl; std::cout << "result=" << result << std::endl; } return result; } double EmgGradientDescent::E_wrt_tau( const std::vector<double>& xs, const std::vector<double>& ys, const double h, const double mu, const double sigma, const double tau ) const { const double u = mu; const double s = sigma; const double t = tau; std::vector<double> diffs(xs.size()); const double PI = OpenMS::Constants::PI; for (Size i = 0; i < xs.size(); ++i) { const double x = xs[i]; const double y = ys[i]; const double z = compute_z(x, mu, sigma, tau); if (z < 0) { diffs[i] = (2 * (-(std::sqrt(PI/2.0) * h * s * std::exp(std::pow(s,2.0)/(2 * std::pow(t,2.0)) - (x - u)/t) * std::erfc((s/t - (x - u)/s)/std::sqrt(2.0)))/std::pow(t,2.0) + (std::sqrt(PI/2.0) * h * s * std::exp(std::pow(s,2.0)/(2.0 * std::pow(t,2.0)) - (x - u)/t) * ((x - u)/std::pow(t,2.0) - std::pow(s,2.0)/std::pow(t,3.0)) * std::erfc((s/t - (x - u)/s)/std::sqrt(2.0)))/t + (h * std::pow(s,2.0) * std::exp(std::pow(s,2.0)/(2.0 * std::pow(t,2.0)) - 1.0/2.0 * std::pow((s/t - (x - u)/s),2.0) - (x - u)/t))/std::pow(t,3.0)) * ((std::sqrt(PI/2.0) * h * s * std::exp(std::pow(s,2.0)/(2.0 * std::pow(t,2.0)) - (x - u)/t) * std::erfc((s/t - (x - u)/s)/std::sqrt(2.0)))/t - y)) / static_cast<double>(xs.size()); } else if (z <= 6.71e7) { diffs[i] = (2 * (-(std::sqrt(PI/2.0) * h * std::pow(s,2.0) * std::exp(1.0/2.0 * std::pow((s/t - (x - u)/s),2.0) - std::pow(x-u,2.0)/(2.0 * std::pow(s,2.0))) * (s/t - (x - u)/s) * std::erfc((s/t - (x - u)/s)/std::sqrt(2.0)))/std::pow(t,3.0) - (std::sqrt(PI/2.0) * h * s * std::exp(1.0/2.0 * std::pow((s/t - (x - u)/s),2.0) - std::pow(x-u,2.0)/(2.0 * std::pow(s,2.0))) * std::erfc((s/t - (x - u)/s)/std::sqrt(2.0)))/std::pow(t,2.0) + (h * std::pow(s,2.0) * std::exp(-std::pow(x-u,2.0)/(2 * std::pow(s,2.0))))/std::pow(t,3.0)) * ((std::sqrt(PI/2.0) * h * s * std::exp(1.0/2.0 * std::pow((s/t - (x - u)/s),2.0) - std::pow(x-u,2.0)/(2 * std::pow(s,2.0))) * std::erfc((s/t - (x - u)/s)/std::sqrt(2.0)))/t - y)) / static_cast<double>(xs.size()); } else { diffs[i] = ((2.0 * h * (x - u) * std::exp(-std::pow(x-u,2.0)/(2.0 * std::pow(s,2.0))) * ((h * std::exp(-std::pow(x-u,2.0)/(2.0 * std::pow(s,2.0))))/(1.0 - (t * (x - u))/std::pow(s,2.0)) - y))/(std::pow(s,2.0) * std::pow((1.0 - (t * (x - u))/std::pow(s,2.0)),2.0))) / static_cast<double>(xs.size()); } } const double result = std::accumulate(diffs.begin(), diffs.end(), 0.0); if (print_debug_ == 2) { std::cout << std::endl << "E_wrt_tau() diffs:" << std::endl; for (const double d : diffs) { std::cout << d << " "; } std::cout << std::endl; std::cout << "result=" << result << std::endl; } return result; } double EmgGradientDescent::Loss_function( const std::vector<double>& xs, const std::vector<double>& ys, const double h, const double mu, const double sigma, const double tau ) const { std::vector<double> diffs(xs.size()); for (Size i = 0; i < xs.size(); ++i) { diffs[i] = std::pow(emg_point(xs[i], h, mu, sigma, tau) - ys[i], 2.0) / xs.size(); } const double result = std::accumulate(diffs.begin(), diffs.end(), 0.0); if (print_debug_ == 2) { std::cout << std::endl << "Loss_function() diffs:" << std::endl; for (const double d : diffs) { std::cout << d << " "; } std::cout << std::endl; std::cout << "result=" << result << std::endl; } return result; } void EmgGradientDescent::applyEstimatedParameters( const std::vector<double>& xs, const double h, const double mu, const double sigma, const double tau, std::vector<double>& out_xs, std::vector<double>& out_ys ) const { out_xs = xs; // Copy all positions to output out_ys.clear(); for (const double x : out_xs) // For each x, estimate y { out_ys.push_back(emg_point(x, h, mu, sigma, tau)); } if (!compute_additional_points_) { return; } // Compute the sampling step for the additional points double avg_sampling { 0.0 }; for (Size i = 1; i < xs.size(); ++i) { avg_sampling += xs[i] - xs[i - 1]; } avg_sampling /= xs.size() - 1; // Stop adding points if the estimated y <= `est_y_threshold` const double est_y_threshold { 1e-3 }; // Stop adding points if the peak becomes too large std::vector<double>::const_iterator apex_pos_it = std::max_element(out_ys.cbegin(), out_ys.cend()); const double apex_pos = out_xs[std::distance(out_ys.cbegin(), apex_pos_it)]; // Decide on which side the eventual additional points should be added // The loop stops once the last added point's intensity is: // - lower than the intensity on the opposite boundary // - lower than `est_y_threshold` // The loop stops if the cutoff side becomes 3 times larger than the other side if (out_ys.front() > out_ys.back()) { const double pos_boundary = apex_pos - (out_xs.back() - apex_pos) * 3; const double target_intensity = out_ys.back(); while (out_ys.front() > target_intensity && out_ys.front() > est_y_threshold) { const double position = out_xs.front() - avg_sampling; if (position < pos_boundary) { break; } out_xs.insert(out_xs.begin(), position); out_ys.insert(out_ys.begin(), emg_point(position, h, mu, sigma, tau)); } } else { const double pos_boundary = apex_pos + (apex_pos - out_xs.front()) * 3; const double target_intensity = out_ys.front(); while (out_ys.back() > target_intensity && out_ys.back() > est_y_threshold) { const double position = out_xs.back() + avg_sampling; if (position > pos_boundary) { break; } out_xs.push_back(position); out_ys.push_back(emg_point(position, h, mu, sigma, tau)); } } } double EmgGradientDescent::computeInitialMean( const std::vector<double>& xs, const std::vector<double>& ys ) const { if (xs.empty()) { throw Exception::SizeUnderflow(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, 0); } const double max_intensity = *std::max_element(ys.begin(), ys.end()); // The intensity levels at which the mean candidates are computed const std::vector<double> percentages = { 0.6, 0.65, 0.7, 0.75, 0.8, 0.85 }; Size i = 0; Size j = xs.size() - 1; // Make sure left and right positions have an initial value // This is to avoid situations (eg. cutoff peaks) where `max_intensity_threshold` // is higher than the first point on a boundary of the peak. In such a case, // the following nested loops would not get a chance to execute and the // algorithm would fail. // The avoidance of using the highest points of the peak apex also provides // robustness to spurious points or random fluctuations in detector sampling // from inflating the maximum peak height. double left_pos = xs.front(); double right_pos = xs.back(); std::vector<double> mean_candidates; for (const double height_percentage : percentages) { const double max_intensity_threshold = max_intensity * height_percentage; for (; i < xs.size() - 1 && ys[i] <= max_intensity_threshold; ++i) { left_pos = xs[i]; } for (; j >= 1 && ys[j] <= max_intensity_threshold; --j) { right_pos = xs[j]; } mean_candidates.push_back( (left_pos + right_pos) / 2.0 ); } // Return the average of all middle RTs return std::accumulate(mean_candidates.begin(), mean_candidates.end(), 0.0) / mean_candidates.size(); } void EmgGradientDescent::iRpropPlus( const double prev_diff_E_param, double& diff_E_param, double& param_lr, double& param_update, double& param, const double current_E, const double previous_E ) const { if (prev_diff_E_param * diff_E_param > 0.0) { // Using value 2000 as upper bound (iRprop+ paper recommends a value of 50) param_lr = std::min(param_lr * 1.2, 2000.0); param_update = - ( diff_E_param / std::fabs(diff_E_param) ) * param_lr; param += param_update; } else if (prev_diff_E_param * diff_E_param < 0.0) { param_lr = std::max(param_lr * 0.5, 0.0); if (current_E > previous_E) { param -= param_update; } diff_E_param = 0.0; } else { if (diff_E_param) { param_update = - ( diff_E_param / std::fabs(diff_E_param) ) * param_lr; } else { param_update = - param_lr; } param += param_update; } } double EmgGradientDescent::emg_point( const double x, const double h, const double mu, const double sigma, const double tau ) const { const double z = compute_z(x, mu, sigma, tau); const double u = mu; const double s = sigma; const double t = tau; if (z < 0) { return ((h*s)/t) * std::sqrt(PI/2.0) * std::exp((1.0/2.0)*(std::pow(s/t,2.0))-(x-u)/t) * std::erfc((1.0/std::sqrt(2.0)) * (s/t - (x-u)/s)); } else if (z <= 6.71e7) { return h * std::exp(-(1.0/2.0) * std::pow(((x - u)/s),2.0)) * (s/t) * std::sqrt(PI/2.0) * std::exp(std::pow((1.0/std::sqrt(2.0) * (s/t - (x - u)/s)),2.0)) * std::erfc(1.0/std::sqrt(2.0) * (s/t - (x - u)/s)); } else { return (h * std::exp(-(1.0/2.0) * (std::pow(((x-u) / s),2.0)))) / (1.0 - (((x-u) * t) / (std::pow(s,2.0)))); } } void EmgGradientDescent::extractTrainingSet( const std::vector<double>& xs, const std::vector<double>& ys, std::vector<double>& TrX, std::vector<double>& TrY ) const { if (xs.size() < 2) // A valid training set cannot be computed { throw Exception::SizeUnderflow(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, xs.size()); } const double intensity_threshold = *std::max_element(ys.begin(), ys.end()) * 0.8; std::vector<std::pair<double,double>> points; // Add points from the LEFT side, until `intensity_threshold` is reached points.emplace_back(xs.front(), ys.front()); // Add FIRST point, no matter the threshold Size i = 1; for (; i < xs.size() - 1 && ys[i] < intensity_threshold; ++i) { points.emplace_back(xs[i], ys[i]); } // Add points from the RIGHT side, until `intensity_threshold` is reached points.emplace_back(xs.back(), ys.back()); // Add LAST point, no matter the threshold Size j = xs.size() - 2; for (; i <= j && ys[j] < intensity_threshold; --j) { points.emplace_back(xs[j], ys[j]); } // Compute the derivative for points of intensity greater than `intensity_threshold` // According to the value of the highest derivative, it will be decided if // a given point is to be added or to be skipped // `derivatives` contains the information for both directions std::vector<double> derivatives(xs.size() + 1); // One more element to account for derivatives from right to left derivatives.front() = 1.0; derivatives.back() = -1.0; for (Size k = i - 1; k < xs.size() && k <= j + 1 && i > 1; ++k) { derivatives[k] = (ys[k] - ys[k - 1]) / (xs[k] - xs[k - 1]); } const double max_derivative = *std::max_element( derivatives.begin() + i, derivatives.begin() + j + 2, [](const double a, const double b) { return std::fabs(a) < std::fabs(b); } ); const double derivative_percent { 0.3 }; const double derivative_threshold = std::fabs(max_derivative) * derivative_percent; // Starting from `i` and proceeding toward the RIGHT side, // add points until the derivative conditions are satisfied for ( ; i < xs.size() - 1 && i <= j && derivatives[i] > 0.0 && (std::fabs(derivatives[i]) >= derivative_threshold || derivatives[i] / derivatives[i - 1] >= 0.6); ++i ) { points.emplace_back(xs[i], ys[i]); } // Starting from `j` and proceeding toward the LEFT side, // add points until the derivative conditions are satisfied for ( ; j > 0 && i <= j && derivatives[j + 1] < 0.0 && (std::fabs(derivatives[j + 1]) >= derivative_threshold || derivatives[j + 1] / derivatives[j + 2] >= 0.6); --j ) { points.emplace_back(xs[j], ys[j]); } // Create the output vectors containing the training set TrX.clear(); TrY.clear(); for (const std::pair<double,double>& point : points) { TrX.push_back(point.first); TrY.push_back(point.second); } } double EmgGradientDescent::computeMuMaxDistance(const std::vector<double>& xs) const { const std::pair< std::vector<double>::const_iterator, std::vector<double>::const_iterator > p = std::minmax_element(xs.begin(), xs.end()); if (p.first == xs.end() || p.second == xs.end()) return 0.0; const double min_pos = *p.first; const double max_pos = *p.second; // Return the maximum distance permitted for the Mean parameter, to avoid // diverging from the optimal solution in gradient descent return (max_pos - min_pos) * 0.35; } UInt EmgGradientDescent::estimateEmgParameters( const std::vector<double>& xs, const std::vector<double>& ys, double& best_h, double& best_mu, double& best_sigma, double& best_tau ) const { // Initial parameters double h { *std::max_element(ys.begin(), ys.end()) }; double mu { computeInitialMean(xs, ys) }; double sigma { mu * 1e-2 }; double tau { sigma * 2.0 }; const double h_lower_boundary { h }; // Parameter `h` won't decrease below this value std::vector<double> TrX, TrY; // Training set (positions and intensities) extractTrainingSet(xs, ys, TrX, TrY); // Variables containing the "previous" differentials double prev_diff_E_h, prev_diff_E_mu, prev_diff_E_sigma, prev_diff_E_tau, previous_E; prev_diff_E_h = prev_diff_E_mu = prev_diff_E_sigma = prev_diff_E_tau = 0.0; // Part of computation in iRpropPlus() // The parameter will change as much as these terms between iterations double term_h, term_mu, term_sigma, term_tau; term_h = term_mu = term_sigma = term_tau = 0.0; // These variables will contain the values obtained at the best iteration // The best iteration is decided by the smallest E found // Therefore, here `best_E` is initialized with the maximum value for type `double` double best_E; previous_E = best_h = best_mu = best_sigma = best_tau = best_E = std::numeric_limits<double>::max(); // Keep track of the current iteration index, and the best iteration index UInt iter_idx, best_iter; iter_idx = best_iter = 0; // This value will increase according to the number of iterations occurred, // to avoid spamming the terminal with too much debug information UInt info_iter_threshold { 1 }; // Learning rates (used in gradient descent and iRprop+) double lr_h, lr_mu, lr_sigma, lr_tau; lr_h = lr_mu = lr_sigma = lr_tau = 0.0125; // iRprop+ paper recommends 0.0125 // Variables to limit the change in position `mu` const double mu_max_dist = computeMuMaxDistance(TrX); const double mu_left_boundary { mu - mu_max_dist }; const double mu_right_boundary { mu + mu_max_dist }; // The standard deviation between a selection of the precedent Es is computed. // If said standard deviation is lower than a certain value, // the computation of gradient descent is terminated const Size last_few_Es_dim { 10 }; std::vector<double> last_few_Es(last_few_Es_dim, 0.0); Size last_few_Es_idx = 0; const double Es_std_dev_min = 1.0; // NOTE: magic value if (print_debug_ == 1) { std::cout << "GRADIENT DESCENT\nInput vectors size: " << xs.size() << "; Training set size: " << TrX.size() << std::endl; std::cout << "The possible mu range is [" << mu_left_boundary << " " << mu_right_boundary << "]" << std::endl; } while (++iter_idx <= max_gd_iter_) { // Break if parameters are `nan` or `inf` if ( std::isnan(h) || std::isnan(mu) || std::isnan(sigma) || std::isnan(tau) || std::isinf(h) || std::isinf(mu) || std::isinf(sigma) || std::isinf(tau) ) { std::cout << std::endl << "[" << iter_idx << "]" << std::endl; std::cout << "One or more parameters are invalid." << std::endl; std::cout << "Bad: h=" << h << " mu=" << mu << " sigma=" << sigma << " tau=" << tau << std::endl; break; } // Compute the cost given the current parameters const double current_E = Loss_function(TrX, TrY, h, mu, sigma, tau); // Break if the computed cost is an invalid value if (std::isnan(current_E) || std::isinf(current_E)) { std::cout << std::endl << "[" << iter_idx << "]" << std::endl; std::cout << "Bad: E value is invalid. current_E=" << current_E << std::endl; break; } // If the current iteration is the best one, save the relevant values if (current_E < best_E) { best_h = h; best_mu = mu; best_sigma = sigma; best_tau = tau; best_E = current_E; best_iter = iter_idx; } // Compute the partial derivatives given the current parameters double diff_E_h = E_wrt_h(TrX, TrY, h, mu, sigma, tau); double diff_E_mu = E_wrt_mu(TrX, TrY, h, mu, sigma, tau); double diff_E_sigma = E_wrt_sigma(TrX, TrY, h, mu, sigma, tau); double diff_E_tau = E_wrt_tau(TrX, TrY, h, mu, sigma, tau); // Logging info to the terminal if (print_debug_ == 1 && iter_idx % info_iter_threshold == 0) { std::cout << std::endl << "[" << iter_idx << "] [prev. E=" << current_E << "]" << std::endl; std::cout << "[diff_E_h=" << diff_E_h << "] [diff_E_mu=" << diff_E_mu << "] [diff_E_sigma=" << diff_E_sigma << "] [diff_E_tau=" << diff_E_tau << "]" << std::endl; std::cout << "[h=" << h << "] \t[mu=" << mu << "] \t[sigma=" << sigma << "] \t[tau=" << tau << "]" << std::endl; std::cout << "[lr_h=" << lr_h << "] \t[lr_mu=" << lr_mu << "] \t[lr_sigma=" << lr_sigma << "] \t[lr_tau=" << lr_tau << "]" << std::endl; // Avoid spamming the terminal: increase `info_iter_threshold` dynamically (until 10k) if (iter_idx < 10000 && iter_idx / info_iter_threshold >= 10) info_iter_threshold *= 10; } // If the cost function doesn't change enough, the gradient descent algorithm is terminated // This is decided by computing the standard deviation between a selection of the last few Es if (iter_idx % 50 == 0) // NOTE: magic value { last_few_Es[last_few_Es_idx++ % last_few_Es_dim] = current_E; const double mean = std::accumulate(last_few_Es.begin(), last_few_Es.end(), 0.0) / last_few_Es_dim; double squared_diffs {0.0}; for (const double current_E : last_few_Es) { squared_diffs += std::pow(current_E - mean, 2.0); } const double Es_std_dev = std::sqrt(squared_diffs / static_cast<double>(last_few_Es_dim)); if (Es_std_dev < Es_std_dev_min) { if (print_debug_ == 1) { std::cout << std::endl << "[" << iter_idx << "] The cost function is not changing enough, anymore. Breaking."; std::cout << std::endl << "[" << iter_idx << "] [mean=" << mean << "] [Es_std_dev=" << Es_std_dev << "]" << std::endl; } break; } } // Simultaneous update of all parameters for gradient descent iRpropPlus(prev_diff_E_h, diff_E_h, lr_h, term_h, h, current_E, previous_E); iRpropPlus(prev_diff_E_mu, diff_E_mu, lr_mu, term_mu, mu, current_E, previous_E); iRpropPlus(prev_diff_E_sigma, diff_E_sigma, lr_sigma, term_sigma, sigma, current_E, previous_E); iRpropPlus(prev_diff_E_tau, diff_E_tau, lr_tau, term_tau, tau, current_E, previous_E); // Apply the parameters' constraints h = std::max(h_lower_boundary, h); if (mu < mu_left_boundary || mu_right_boundary < mu) { mu = mu < mu_left_boundary ? mu_left_boundary : mu_right_boundary; } sigma = std::min(std::max(1e-4, sigma), 20.0); // NOTE: magic value tau = std::min(std::max(sigma, tau), sigma * 15.0); // NOTE: magic value // Saving values to be used at the next iteration prev_diff_E_h = diff_E_h; prev_diff_E_mu = diff_E_mu; prev_diff_E_sigma = diff_E_sigma; prev_diff_E_tau = diff_E_tau; previous_E = current_E; } if (print_debug_ == 1) { std::cout << std::endl << "[" << best_iter << "] RESULT: best_E=" << best_E << std::endl; // TODO: Remove the following "GEOGEBRA" line std::cout << "[" << best_iter << "] GEOGEBRA: Execute[{\"h = " << best_h << "\", \"mu = " << best_mu << "\",\"sigma = " << best_sigma << "\", \"tau = " << best_tau << "\"}]" << std::endl; } // The method has a maximum number of iterations permitted // (see class parameter `max_gd_iter`). // Said limit is rarely reached, and instead the method will finish after // a lower number of iterations. The method returns such number. return iter_idx; } template <typename PeakContainerT> void EmgGradientDescent::fitEMGPeakModel( const PeakContainerT& input_peak, PeakContainerT& output_peak, const double left_pos, const double right_pos ) const { // Extract points typename PeakContainerT::const_iterator start_it = left_pos ? input_peak.PosBegin(left_pos) : input_peak.begin(); typename PeakContainerT::const_iterator end_it = right_pos ? input_peak.PosEnd(right_pos) : input_peak.end(); std::vector<double> xs, ys; for (typename PeakContainerT::const_iterator it = start_it; it != end_it; ++it) { xs.push_back(it->getPos()); ys.push_back(it->getIntensity()); } // EMG parameter estimation with gradient descent double h, mu, sigma, tau; estimateEmgParameters(xs, ys, h, mu, sigma, tau); // Estimate the intensities for each point std::vector<double> out_xs; std::vector<double> out_ys; applyEstimatedParameters(xs, h, mu, sigma, tau, out_xs, out_ys); // Prepare the output peak output_peak = input_peak; output_peak.clear(false); // Remove the points, but keep the metadata for (Size i = 0; i < out_xs.size(); ++i) { // NOTE: casting to avoid -Wnarrowing compiler warning/error // TODO: remove cast once issue #3379 is solved // https://github.com/OpenMS/OpenMS/issues/3379 typename PeakContainerT::PeakType point { out_xs[i], static_cast<float>(out_ys[i]) }; output_peak.push_back(point); } // Add the EMG parameters as metadata typename PeakContainerT::FloatDataArray fda; fda.setName("emg_parameters"); fda.push_back(h); fda.push_back(mu); fda.push_back(sigma); fda.push_back(tau); output_peak.getFloatDataArrays().push_back(fda); if (print_debug_ == 1) { std::cout << std::endl << "Input size: " << input_peak.size() << ". "; std::cout << "Number of additional points: " << (output_peak.size() - input_peak.size()) << "\n\n" << std::endl; } } template void OPENMS_DLLAPI EmgGradientDescent::fitEMGPeakModel<MSChromatogram>( const MSChromatogram& input_peak, MSChromatogram& output_peak, const double left_pos, const double right_pos ) const; template void OPENMS_DLLAPI EmgGradientDescent::fitEMGPeakModel<MSSpectrum>( const MSSpectrum& input_peak, MSSpectrum& output_peak, const double left_pos, const double right_pos ) const; }
C++
3D
OpenMS/OpenMS
src/openms/source/MATH/MISC/CubicSpline2d.cpp
.cpp
4,770
163
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Lars Nilse $ // $Authors: Lars Nilse $ // -------------------------------------------------------------------------- #include <OpenMS/MATH/MISC/CubicSpline2d.h> #include <OpenMS/CONCEPT/Exception.h> #include <algorithm> #include <functional> using namespace std; namespace OpenMS { CubicSpline2d::CubicSpline2d(const std::vector<double>& x, const std::vector<double>& y) { if (x.size() != y.size()) { throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "x and y vectors are not of the same size."); } if (x.size() < 2) { throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "x and y vectors need to contain two or more elements."); } // assert spectrum is sorted if (std::adjacent_find(x.begin(), x.end(), std::greater<double>()) != x.end()) { throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "x vector is not sorted."); } init_(x, y); } CubicSpline2d::CubicSpline2d(const std::map<double, double>& m) { if (m.size() < 2) { throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Map needs to contain two or more elements."); } std::vector<double> x; std::vector<double> y; x.reserve(m.size()); y.reserve(m.size()); std::map<double, double>::const_iterator map_it; for (map_it = m.begin(); map_it != m.end(); ++map_it) { x.push_back(map_it->first); y.push_back(map_it->second); } init_(x, y); } double CubicSpline2d::eval(double x) const { if (x < x_.front() || x > x_.back()) { throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Argument out of range of spline interpolation."); } // determine index of closest node left of (or exactly at) x unsigned i = static_cast<unsigned>(std::lower_bound(x_.begin(), x_.end(), x) - x_.begin()); if (x_[i] > x || x_.back() == x) { --i; } const double xx = x - x_[i]; return ((d_[i] * xx + c_[i]) * xx + b_[i]) * xx + a_[i]; } double CubicSpline2d::derivative(const double x) const { return derivatives(x, 1); } double CubicSpline2d::derivatives(double x, unsigned order) const { if (x < x_.front() || x > x_.back()) { throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Argument out of range of spline interpolation."); } if (order < 1 || order > 3) { throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Only first, second and third derivative defined on cubic spline"); } // determine index of closest node left of (or exactly at) x unsigned i = static_cast<unsigned>(std::lower_bound(x_.begin(), x_.end(), x) - x_.begin()); if (x_[i] > x || x_.back() == x) // also, i must not point to last index in 'x_', since all other vectors are one element shorter { --i; } const double xx = x - x_[i]; if (order == 1) { return b_[i] + 2 * c_[i] * xx + 3 * d_[i] * xx * xx; } else if (order == 2) { return 2 * c_[i] + 6 * d_[i] * xx; } else { return 6 * d_[i]; } } void CubicSpline2d::init_(const std::vector<double>& x, const std::vector<double>& y) { const size_t n = x.size() - 1; std::vector<double> h; h.reserve(n); a_.reserve(n); x_.reserve(n + 1); // do the 0'th element manually, since the loop below only starts at 1 h.push_back(x[1] - x[0]); x_.push_back(x[0]); a_.push_back(y[0]); std::vector<double> mu(n, 0.0); std::vector<double> z(n, 0.0); for (unsigned i = 1; i < n; ++i) { h.push_back(x[i + 1] - x[i]); const double l = 2 * (x[i + 1] - x[i - 1]) - h[i - 1] * mu[i - 1]; mu[i] = h[i] / l; z[i] = (3 * (y[i + 1] * h[i - 1] - y[i] * (x[i + 1] - x[i - 1]) + y[i - 1] * h[i]) / (h[i - 1] * h[i]) - h[i - 1] * z[i - 1]) / l; // store x,y -- required for evaluation later on x_.push_back(x[i]); a_.push_back(y[i]); } // 'x_' needs to be full length (all other member vectors (except c_) are one element shorter) x_.push_back(x[n]); b_.resize(n); d_.resize(n); c_.resize(n+1); c_.back() = 0; for (int j = static_cast<int>(n) - 1; j >= 0; --j) { c_[j] = z[j] - mu[j] * c_[j + 1]; b_[j] = (y[j + 1] - y[j]) / h[j] - h[j] * (c_[j + 1] + 2 * c_[j]) / 3; d_[j] = (c_[j + 1] - c_[j]) / (3 * h[j]); } } }
C++
3D
OpenMS/OpenMS
src/openms/source/MATH/MISC/SplineBisection.cpp
.cpp
1,109
38
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Hannes Roest $ // $Authors: Hannes Roest $ // -------------------------------------------------------------------------- #include <OpenMS/MATH/MISC/SplineBisection.h> #include <OpenMS/MATH/MISC/BSpline2d.h> #include <OpenMS/MATH/MISC/CubicSpline2d.h> namespace OpenMS::Math { // explicit instantiation. template void spline_bisection<BSpline2d>(const BSpline2d & peak_spline, double const left_neighbor_mz, double const right_neighbor_mz, double & max_peak_mz, double & max_peak_int, double const threshold); // explicit instantiation. template void spline_bisection<CubicSpline2d>(const CubicSpline2d & peak_spline, double const left_neighbor_mz, double const right_neighbor_mz, double & max_peak_mz, double & max_peak_int, double const threshold); } //OpenMS //Math
C++
3D
OpenMS/OpenMS
src/openms/source/MATH/STATISTICS/PosteriorErrorProbabilityModel.cpp
.cpp
54,520
1,155
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg $ // $Authors: David Wojnar $ // -------------------------------------------------------------------------- // #include <OpenMS/MATH/STATISTICS/PosteriorErrorProbabilityModel.h> #include <OpenMS/CONCEPT/Constants.h> #include <OpenMS/CONCEPT/LogStream.h> #include <OpenMS/DATASTRUCTURES/String.h> #include <OpenMS/DATASTRUCTURES/StringListUtils.h> #include <OpenMS/FORMAT/TextFile.h> #include <OpenMS/MATH/STATISTICS/GumbelMaxLikelihoodFitter.h> #include <OpenMS/MATH/StatisticFunctions.h> #include <OpenMS/METADATA/PeptideIdentification.h> #include <OpenMS/METADATA/PeptideIdentificationList.h> #include <OpenMS/METADATA/ProteinIdentification.h> #include <OpenMS/METADATA/PeptideHit.h> #include <QtCore/QDir> #include <algorithm> using namespace std; namespace OpenMS::Math { PosteriorErrorProbabilityModel::PosteriorErrorProbabilityModel() : DefaultParamHandler("PosteriorErrorProbabilityModel"), incorrectly_assigned_fit_param_(GaussFitter::GaussFitResult(-1, -1, -1)), incorrectly_assigned_fit_gumbel_param_(GumbelMaxLikelihoodFitter::GumbelDistributionFitResult(-1,-1)), correctly_assigned_fit_param_(GaussFitter::GaussFitResult(-1, -1, -1)), negative_prior_(0.5), max_incorrectly_(0), max_correctly_(0), smallest_score_(0) { defaults_.setValue("out_plot", "", "If given, the some output files will be saved in the following manner: <out_plot>_scores.txt for the scores and <out_plot> which contains the fitted values for each step of the EM-algorithm, e.g., out_plot = /usr/home/OMSSA123 leads to /usr/home/OMSSA123_scores.txt, /usr/home/OMSSA123 will be written. If no directory is specified, e.g. instead of '/usr/home/OMSSA123' just OMSSA123, the files will be written into the working directory.", {"advanced","output file"}); defaults_.setValue("number_of_bins", 100, "Number of bins used for visualization. Only needed if each iteration step of the EM-Algorithm will be visualized", {"advanced"}); defaults_.setValue("incorrectly_assigned", "Gumbel", "for 'Gumbel', the Gumbel distribution is used to plot incorrectly assigned sequences. For 'Gauss', the Gauss distribution is used.", {"advanced"}); defaults_.setValue("max_nr_iterations", 1000, "Bounds the number of iterations for the EM algorithm when convergence is slow.", {"advanced"}); defaults_.setValidStrings("incorrectly_assigned", {"Gumbel","Gauss"}); defaults_.setValue("neg_log_delta",6, "The negative logarithm of the convergence threshold for the likelihood increase."); defaults_.setValue("outlier_handling","ignore_iqr_outliers", "What to do with outliers:\n" "- ignore_iqr_outliers: ignore outliers outside of 3*IQR from Q1/Q3 for fitting\n" "- set_iqr_to_closest_valid: set IQR-based outliers to the last valid value for fitting\n" "- ignore_extreme_percentiles: ignore everything outside 99th and 1st percentile (also removes equal values like potential censored max values in XTandem)\n" "- none: do nothing"); defaults_.setValidStrings("outlier_handling", {"ignore_iqr_outliers","set_iqr_to_closest_valid","ignore_extreme_percentiles","none"}); defaultsToParam_(); getNegativeGnuplotFormula_ = &PosteriorErrorProbabilityModel::getGumbelGnuplotFormula; getPositiveGnuplotFormula_ = &PosteriorErrorProbabilityModel::getGaussGnuplotFormula; } PosteriorErrorProbabilityModel::~PosteriorErrorProbabilityModel() = default; bool PosteriorErrorProbabilityModel::fitGumbelGauss(std::vector<double>& search_engine_scores, const String& outlier_handling) { // nothing to fit? if (search_engine_scores.empty()) { return false; } //------------------------------------------------------------- // Initializing Parameters //------------------------------------------------------------- sort(search_engine_scores.begin(), search_engine_scores.end()); smallest_score_ = search_engine_scores[0]; vector<double> x_scores{search_engine_scores}; //transform to a positive range for (double & d : x_scores) { d += fabs(smallest_score_) + 0.001; } processOutliers_(x_scores, outlier_handling); incorrectly_assigned_fit_gumbel_param_.a = Math::mean(x_scores.begin(), x_scores.begin() + ceil(0.5 * x_scores.size())) + x_scores[0]; incorrectly_assigned_fit_gumbel_param_.b = Math::sd(x_scores.begin(), x_scores.end(), incorrectly_assigned_fit_gumbel_param_.a); negative_prior_ = 0.7; getNegativeGnuplotFormula_ = &PosteriorErrorProbabilityModel::getGumbelGnuplotFormula; getPositiveGnuplotFormula_ = &PosteriorErrorProbabilityModel::getGaussGnuplotFormula; Size x_score_start = std::min(x_scores.size() - 1, (Size) ceil(x_scores.size() * 0.7)); // if only one score is present, ceil(...) will yield 1, which is an invalid index correctly_assigned_fit_param_.x0 = Math::mean(x_scores.begin() + x_score_start, x_scores.end()) + x_scores[x_score_start]; //(gauss_scores.begin()->getX() + (gauss_scores.end()-1)->getX())/2; correctly_assigned_fit_param_.sigma = incorrectly_assigned_fit_gumbel_param_.b; correctly_assigned_fit_param_.A = 1.0 / sqrt(2.0 * Constants::PI * pow(correctly_assigned_fit_param_.sigma, 2.0)); //------------------------------------------------------------- // create files for output //------------------------------------------------------------- bool output_plots = (String(param_.getValue("out_plot").toString()).trim().length() > 0); TextFile file; if (output_plots) { // create output directory (if not already present) QDir dir(String(param_.getValue("out_plot").toString()).toQString()); if (!dir.cdUp()) { OPENMS_LOG_ERROR << "Could not navigate to output directory for plots from '" << String(dir.dirName()) << "'." << std::endl; return false; } if (!dir.exists() && !dir.mkpath(".")) { OPENMS_LOG_ERROR << "Could not create output directory for plots '" << String(dir.dirName()) << "'." << std::endl; return false; } // file = initPlots(x_scores); } //------------------------------------------------------------- // Estimate Parameters - EM algorithm //------------------------------------------------------------- bool good_fit = true; bool stop_em_init = false; Int max_itns = param_.getValue("max_nr_iterations"); int delta = param_.getValue("neg_log_delta"); int itns = 0; vector<double> incorrect_log_density, correct_log_density; fillLogDensitiesGumbel(x_scores, incorrect_log_density, correct_log_density); vector<double> bins; vector<double> incorrect_posteriors; double maxlike = computeLLAndIncorrectPosteriorsFromLogDensities(incorrect_log_density, correct_log_density, incorrect_posteriors); double sumIncorrectPosteriors = Math::sum(incorrect_posteriors.begin(),incorrect_posteriors.end()); double sumCorrectPosteriors = x_scores.size() - sumIncorrectPosteriors; OpenMS::Math::GumbelMaxLikelihoodFitter gmlf{incorrectly_assigned_fit_gumbel_param_}; do { //------------------------------------------------------------- // E-STEP (gauss) double newGaussMean = 0.0; auto the_x = x_scores.cbegin(); for (auto incorrect = incorrect_posteriors.cbegin(); incorrect != incorrect_posteriors.cend(); ++incorrect, ++the_x) { newGaussMean += (1.-*incorrect) * *the_x; } newGaussMean /= sumCorrectPosteriors; double newGaussSigma = 0.0; the_x = x_scores.cbegin(); for (auto incorrect = incorrect_posteriors.cbegin(); incorrect != incorrect_posteriors.cend(); ++incorrect, ++the_x) { newGaussSigma += (1. - *incorrect) * pow((*the_x) - newGaussMean, 2); } newGaussSigma = sqrt(newGaussSigma/sumCorrectPosteriors); GumbelMaxLikelihoodFitter::GumbelDistributionFitResult newGumbelParams = gmlf.fitWeighted(x_scores, incorrect_posteriors); if (newGumbelParams.b <= 0 || std::isnan(newGumbelParams.b)) { OPENMS_LOG_WARN << "Warning: encountered impossible standard deviations. Aborting fit." << std::endl; break; } // update parameters correctly_assigned_fit_param_.x0 = newGaussMean; correctly_assigned_fit_param_.sigma = newGaussSigma; correctly_assigned_fit_param_.A = 1 / sqrt(2 * Constants::PI * pow(newGaussSigma, 2)); incorrectly_assigned_fit_gumbel_param_ = newGumbelParams; // compute new prior probabilities negative peptides fillLogDensitiesGumbel(x_scores, incorrect_log_density, correct_log_density); double new_maxlike = computeLLAndIncorrectPosteriorsFromLogDensities(incorrect_log_density, correct_log_density, incorrect_posteriors); sumIncorrectPosteriors = Math::sum(incorrect_posteriors.begin(),incorrect_posteriors.end()); sumCorrectPosteriors = x_scores.size() - sumIncorrectPosteriors; negative_prior_ = sumIncorrectPosteriors / x_scores.size(); if (std::isnan(new_maxlike - maxlike)) { OPENMS_LOG_WARN << "Numerical instabilities. Aborting." << endl; return false; } // check termination criterion if ((new_maxlike - maxlike) < pow(10.0, -delta) || itns >= max_itns) { if (itns >= max_itns) { OPENMS_LOG_WARN << "Number of iterations exceeded. Convergence criterion not met. Last log likelihood increase: " << (new_maxlike - maxlike) << endl; OPENMS_LOG_WARN << "Algorithm returns probabilities for suboptimal fit. You might want to try raising the max. number of iterations and have a look at the distribution." << endl; } stop_em_init = true; good_fit = true; } else if (new_maxlike < maxlike) { OPENMS_LOG_WARN << "Log Likelihood of fit decreased: " << (new_maxlike - maxlike) << ". Abort fitting. Please check" " the gnuplot scripts and adapt search engine settings or outlier settings of IDPEP."<< endl; stop_em_init = true; good_fit = false; } if (output_plots) { String formula1, formula2, formula3; formula1 = ((this)->*(getNegativeGnuplotFormula_))(incorrectly_assigned_fit_param_) + "* " + String(negative_prior_); //String(incorrectly_assigned_fit_param_.A) +" * exp(-(x - " + String(incorrectly_assigned_fit_param_.x0) + ") ** 2 / 2 / (" + String(incorrectly_assigned_fit_param_.sigma) + ") ** 2)"+ "*" + String(negative_prior_); formula2 = ((this)->*(getPositiveGnuplotFormula_))(correctly_assigned_fit_param_) + "* (1 - " + String(negative_prior_) + ")"; //String(correctly_assigned_fit_param_.A) +" * exp(-(x - " + String(correctly_assigned_fit_param_.x0) + ") ** 2 / 2 / (" + String(correctly_assigned_fit_param_.sigma) + ") ** 2)"+ "* (1 - " + String(negative_prior_) + ")"; formula3 = getBothGnuplotFormula(incorrectly_assigned_fit_param_, correctly_assigned_fit_param_); // important: use single quotes for paths, since otherwise backslashes will not be accepted on Windows! file.addLine("plot '" + (std::string)param_.getValue("out_plot") + "_scores.txt' with boxes, " + formula1 + " , " + formula2 + " , " + formula3); } //update maximum likelihood maxlike = new_maxlike; ++itns; } while (!stop_em_init); //------------------------------------------------------------- // Finished fitting //------------------------------------------------------------- max_incorrectly_ = getGumbel_(incorrectly_assigned_fit_param_.x0, incorrectly_assigned_fit_param_); max_correctly_ = correctly_assigned_fit_param_.eval(correctly_assigned_fit_param_.x0); if (output_plots) { String formula1 = ((this)->*(getNegativeGnuplotFormula_))(incorrectly_assigned_fit_param_) + "*" + String(negative_prior_); //String(incorrectly_assigned_fit_param_.A) +" * exp(-(x - " + String(incorrectly_assigned_fit_param_.x0) + ") ** 2 / 2 / (" + String(incorrectly_assigned_fit_param_.sigma) + ") ** 2)"+ "*" + String(negative_prior_); String formula2 = ((this)->*(getPositiveGnuplotFormula_))(correctly_assigned_fit_param_) + "* (1 - " + String(negative_prior_) + ")"; // String(correctly_assigned_fit_param_.A) +" * exp(-(x - " + String(correctly_assigned_fit_param_.x0) + ") ** 2 / 2 / (" + String(correctly_assigned_fit_param_.sigma) + ") ** 2)"+ "* (1 - " + String(negative_prior_) + ")"; String formula3 = getBothGnuplotFormula(incorrectly_assigned_fit_param_, correctly_assigned_fit_param_); // important: use single quotes for paths, since otherwise backslashes will not be accepted on Windows! file.addLine("plot '" + (std::string)param_.getValue("out_plot") + "_scores.txt' with boxes, " + formula1 + " , " + formula2 + " , " + formula3); file.store((std::string)param_.getValue("out_plot")); tryGnuplot((std::string)param_.getValue("out_plot")); } return good_fit; } bool PosteriorErrorProbabilityModel::fit(std::vector<double>& search_engine_scores, const String& outlier_handling) { // nothing to fit? if (search_engine_scores.empty()) { return false; } //------------------------------------------------------------- // Initializing Parameters //------------------------------------------------------------- sort(search_engine_scores.begin(), search_engine_scores.end()); smallest_score_ = search_engine_scores[0]; vector<double> x_scores{search_engine_scores}; //transform to a positive range for (double & d : x_scores) { d += fabs(smallest_score_) + 0.001; } processOutliers_(x_scores, outlier_handling); negative_prior_ = 0.7; if (param_.getValue("incorrectly_assigned") == "Gumbel") { incorrectly_assigned_fit_param_.x0 = Math::mean(x_scores.begin(), x_scores.begin() + ceil(0.5 * x_scores.size())) + x_scores[0]; incorrectly_assigned_fit_param_.sigma = Math::sd(x_scores.begin(), x_scores.end(), incorrectly_assigned_fit_param_.x0); incorrectly_assigned_fit_param_.A = 1.0 / sqrt(2.0 * Constants::PI * pow(incorrectly_assigned_fit_param_.sigma, 2.0)); //TODO: Currently, the fit is calculated using the Gauss. getNegativeGnuplotFormula_ = &PosteriorErrorProbabilityModel::getGumbelGnuplotFormula; } else { incorrectly_assigned_fit_param_.x0 = Math::mean(x_scores.begin(), x_scores.begin() + ceil(0.5 * x_scores.size())) + x_scores[0]; incorrectly_assigned_fit_param_.sigma = Math::sd(x_scores.begin(), x_scores.end(), incorrectly_assigned_fit_param_.x0); incorrectly_assigned_fit_param_.A = 1.0 / sqrt(2.0 * Constants::PI * pow(incorrectly_assigned_fit_param_.sigma, 2.0)); getNegativeGnuplotFormula_ = &PosteriorErrorProbabilityModel::getGaussGnuplotFormula; } getPositiveGnuplotFormula_ = &PosteriorErrorProbabilityModel::getGaussGnuplotFormula; Size x_score_start = std::min(x_scores.size() - 1, (Size) ceil(x_scores.size() * 0.7)); // if only one score is present, ceil(...) will yield 1, which is an invalid index correctly_assigned_fit_param_.x0 = Math::mean(x_scores.begin() + x_score_start, x_scores.end()) + x_scores[x_score_start]; //(gauss_scores.begin()->getX() + (gauss_scores.end()-1)->getX())/2; correctly_assigned_fit_param_.sigma = incorrectly_assigned_fit_param_.sigma; correctly_assigned_fit_param_.A = 1.0 / sqrt(2.0 * Constants::PI * pow(correctly_assigned_fit_param_.sigma, 2.0)); //------------------------------------------------------------- // create files for output //------------------------------------------------------------- bool output_plots = (String(param_.getValue("out_plot").toString()).trim().length() > 0); TextFile file; if (output_plots) { // create output directory (if not already present) QDir dir(String(param_.getValue("out_plot").toString()).toQString()); if (!dir.cdUp()) { OPENMS_LOG_ERROR << "Could not navigate to output directory for plots from '" << String(dir.dirName()) << "'." << std::endl; return false; } if (!dir.exists() && !dir.mkpath(".")) { OPENMS_LOG_ERROR << "Could not create output directory for plots '" << String(dir.dirName()) << "'." << std::endl; return false; } // file = initPlots(x_scores); } //------------------------------------------------------------- // Estimate Parameters - EM algorithm //------------------------------------------------------------- bool stop_em_init = false; bool good_fit = true; Int max_itns = param_.getValue("max_nr_iterations"); int delta = param_.getValue("neg_log_delta"); int itns = 0; vector<double> incorrect_log_density, correct_log_density; fillLogDensities(x_scores, incorrect_log_density, correct_log_density); vector<double> incorrect_posteriors; double maxlike = computeLLAndIncorrectPosteriorsFromLogDensities(incorrect_log_density, correct_log_density, incorrect_posteriors); double sumIncorrectPosteriors = Math::sum(incorrect_posteriors.begin(),incorrect_posteriors.end()); double sumCorrectPosteriors = x_scores.size() - sumIncorrectPosteriors; do { //------------------------------------------------------------- // E-STEP std::pair<double,double> newMeans = pos_neg_mean_weighted_posteriors(x_scores, incorrect_posteriors); newMeans.first /= sumCorrectPosteriors; newMeans.second /= sumIncorrectPosteriors; //new standard deviation std::pair<double,double> newSigmas = pos_neg_sigma_weighted_posteriors(x_scores, incorrect_posteriors, newMeans); newSigmas.first = sqrt(newSigmas.first/sumCorrectPosteriors); newSigmas.second = sqrt(newSigmas.second/sumIncorrectPosteriors); if (newSigmas.first <= 0 || newSigmas.second <= 0 || std::isnan(newSigmas.first) || std::isnan(newSigmas.second) ) { OPENMS_LOG_WARN << "Warning: encountered impossible standard deviations. Aborting fit." << std::endl; break; } // update parameters correctly_assigned_fit_param_.x0 = newMeans.first; incorrectly_assigned_fit_param_.x0 = newMeans.second; correctly_assigned_fit_param_.sigma = newSigmas.first; correctly_assigned_fit_param_.A = 1 / sqrt(2 * Constants::PI * pow(correctly_assigned_fit_param_.sigma, 2)); incorrectly_assigned_fit_param_.sigma = newSigmas.second; incorrectly_assigned_fit_param_.A = 1 / sqrt(2 * Constants::PI * pow(incorrectly_assigned_fit_param_.sigma, 2)); // compute new prior probabilities negative peptides fillLogDensities(x_scores, incorrect_log_density, correct_log_density); double new_maxlike = computeLLAndIncorrectPosteriorsFromLogDensities(incorrect_log_density, correct_log_density, incorrect_posteriors); sumIncorrectPosteriors = Math::sum(incorrect_posteriors.begin(),incorrect_posteriors.end()); sumCorrectPosteriors = x_scores.size() - sumIncorrectPosteriors; negative_prior_ = sumIncorrectPosteriors / x_scores.size(); if (std::isnan(new_maxlike - maxlike)) { OPENMS_LOG_WARN << "Numerical instabilities. Aborting." << endl; return false; } // check termination criterion if ((new_maxlike - maxlike) < pow(10.0, -delta) || itns >= max_itns) { if (itns >= max_itns) { OPENMS_LOG_WARN << "Number of iterations exceeded. Convergence criterion not met. Last log likelihood increase: " << (new_maxlike - maxlike) << endl; OPENMS_LOG_WARN << "Algorithm returns probabilities for suboptimal fit. You might want to try raising the max. number of iterations and have a look at the distribution." << endl; } stop_em_init = true; good_fit = true; } else if (new_maxlike < maxlike) { OPENMS_LOG_WARN << "Log Likelihood of fit decreased: " << (new_maxlike - maxlike) << ". Abort fitting. Please check" " the gnuplot scripts and adapt search engine settings or outlier settings of IDPEP."<< endl; stop_em_init = true; good_fit = false; } if (output_plots) { String formula1, formula2, formula3; formula1 = ((this)->*(getNegativeGnuplotFormula_))(incorrectly_assigned_fit_param_) + "* " + String(negative_prior_); //String(incorrectly_assigned_fit_param_.A) +" * exp(-(x - " + String(incorrectly_assigned_fit_param_.x0) + ") ** 2 / 2 / (" + String(incorrectly_assigned_fit_param_.sigma) + ") ** 2)"+ "*" + String(negative_prior_); formula2 = ((this)->*(getPositiveGnuplotFormula_))(correctly_assigned_fit_param_) + "* (1 - " + String(negative_prior_) + ")"; //String(correctly_assigned_fit_param_.A) +" * exp(-(x - " + String(correctly_assigned_fit_param_.x0) + ") ** 2 / 2 / (" + String(correctly_assigned_fit_param_.sigma) + ") ** 2)"+ "* (1 - " + String(negative_prior_) + ")"; formula3 = getBothGnuplotFormula(incorrectly_assigned_fit_param_, correctly_assigned_fit_param_); // important: use single quotes for paths, since otherwise backslashes will not be accepted on Windows! file.addLine("plot '" + (std::string)param_.getValue("out_plot") + "_scores.txt' with boxes, " + formula1 + " , " + formula2 + " , " + formula3); } //update maximum likelihood maxlike = new_maxlike; ++itns; } while (!stop_em_init); //------------------------------------------------------------- // Finished fitting //------------------------------------------------------------- if (param_.getValue("incorrectly_assigned") == "Gumbel") { max_incorrectly_ = getGumbel_(incorrectly_assigned_fit_param_.x0, incorrectly_assigned_fit_param_); } else { max_incorrectly_ = incorrectly_assigned_fit_param_.eval(incorrectly_assigned_fit_param_.x0); } max_correctly_ = correctly_assigned_fit_param_.eval(correctly_assigned_fit_param_.x0); if (output_plots) { String formula1 = ((this)->*(getNegativeGnuplotFormula_))(incorrectly_assigned_fit_param_) + "*" + String(negative_prior_); //String(incorrectly_assigned_fit_param_.A) +" * exp(-(x - " + String(incorrectly_assigned_fit_param_.x0) + ") ** 2 / 2 / (" + String(incorrectly_assigned_fit_param_.sigma) + ") ** 2)"+ "*" + String(negative_prior_); String formula2 = ((this)->*(getPositiveGnuplotFormula_))(correctly_assigned_fit_param_) + "* (1 - " + String(negative_prior_) + ")"; // String(correctly_assigned_fit_param_.A) +" * exp(-(x - " + String(correctly_assigned_fit_param_.x0) + ") ** 2 / 2 / (" + String(correctly_assigned_fit_param_.sigma) + ") ** 2)"+ "* (1 - " + String(negative_prior_) + ")"; String formula3 = getBothGnuplotFormula(incorrectly_assigned_fit_param_, correctly_assigned_fit_param_); // important: use single quotes for paths, since otherwise backslashes will not be accepted on Windows! file.addLine("plot '" + (std::string)param_.getValue("out_plot") + "_scores.txt' with boxes, " + formula1 + " , " + formula2 + " , " + formula3); file.store((std::string)param_.getValue("out_plot")); tryGnuplot((std::string)param_.getValue("out_plot")); } return good_fit; } bool PosteriorErrorProbabilityModel::fit(std::vector<double>& search_engine_scores, vector<double>& probabilities, const String& outlier_handling) { bool return_value = fit(search_engine_scores, outlier_handling); if (!return_value) { return false; } probabilities = std::vector<double>(search_engine_scores); for (double & p : probabilities) { p = computeProbability(p); } return true; } void PosteriorErrorProbabilityModel::fillDensities(const vector<double>& x_scores, vector<double>& incorrect_density, vector<double>& correct_density) { if (incorrect_density.size() != x_scores.size()) { incorrect_density.resize(x_scores.size()); correct_density.resize(x_scores.size()); } auto incorrect(incorrect_density.begin()); auto correct(correct_density.begin()); for (double const & score : x_scores) { // TODO: incorrect is currently filled with gauss as fitting gumble is not supported *incorrect = incorrectly_assigned_fit_param_.eval(score); *correct = correctly_assigned_fit_param_.eval(score); ++incorrect; ++correct; } } void PosteriorErrorProbabilityModel::fillLogDensitiesGumbel(const vector<double>& x_scores, vector<double>& incorrect_density, vector<double>& correct_density) { if (incorrect_density.size() != x_scores.size()) { incorrect_density.resize(x_scores.size()); correct_density.resize(x_scores.size()); } auto incorrect(incorrect_density.begin()); auto correct(correct_density.begin()); for (double const & score : x_scores) { *incorrect = incorrectly_assigned_fit_gumbel_param_.log_eval_no_normalize(score); *correct = correctly_assigned_fit_param_.log_eval_no_normalize(score); ++incorrect; ++correct; } } void PosteriorErrorProbabilityModel::fillLogDensities(const vector<double>& x_scores, vector<double>& incorrect_density, vector<double>& correct_density) { if (incorrect_density.size() != x_scores.size()) { incorrect_density.resize(x_scores.size()); correct_density.resize(x_scores.size()); } auto incorrect(incorrect_density.begin()); auto correct(correct_density.begin()); for (double const & score : x_scores) { // TODO: incorrect is currently filled with gauss as fitting gumble is not supported *incorrect = incorrectly_assigned_fit_param_.log_eval_no_normalize(score); *correct = correctly_assigned_fit_param_.log_eval_no_normalize(score); ++incorrect; ++correct; } } double PosteriorErrorProbabilityModel::computeLogLikelihood(const vector<double>& incorrect_density, const vector<double>& correct_density) const { double maxlike(0); auto incorrect = incorrect_density.cbegin(); for (auto correct = correct_density.cbegin(); correct < correct_density.cend(); ++correct, ++incorrect) { maxlike += log10(negative_prior_ * (*incorrect) + (1 - negative_prior_) * (*correct)); } return maxlike; } double PosteriorErrorProbabilityModel::computeLLAndIncorrectPosteriorsFromLogDensities( const vector<double>& incorrect_log_density, const vector<double>& correct_log_density, vector<double>& incorrect_posterior) const { double loglikelihood = 0.0; double log_prior_pos = log(1. - negative_prior_); double log_prior_neg = log(negative_prior_); auto incorrect = incorrect_log_density.cbegin(); if (incorrect_posterior.size() != incorrect_log_density.size()) { incorrect_posterior.resize(incorrect_log_density.size()); } auto incorrect_posterior_it = incorrect_posterior.begin(); for (auto correct = correct_log_density.cbegin(); correct < correct_log_density.cend(); ++correct, ++incorrect, ++incorrect_posterior_it) { double log_resp_correct = log_prior_pos + *correct; double log_resp_incorrect = log_prior_neg + *incorrect; double max_log_resp = std::max(log_resp_correct,log_resp_incorrect); log_resp_correct -= max_log_resp; log_resp_incorrect -= max_log_resp; double resp_correct = exp(log_resp_correct); double resp_incorrect = exp(log_resp_incorrect); double sum = resp_correct + resp_incorrect; // normalize *incorrect_posterior_it = resp_incorrect / sum; //TODO can we somehow stay in log space (i.e. fill as log posteriors?) loglikelihood += max_log_resp + log(sum); } return loglikelihood; } std::pair<double,double> PosteriorErrorProbabilityModel::pos_neg_mean_weighted_posteriors(const vector<double>& x_scores, const vector<double>& incorrect_posteriors) { double pos_x0(0); double neg_x0(0); auto the_x = x_scores.cbegin(); for (auto incorrect = incorrect_posteriors.cbegin(); incorrect < incorrect_posteriors.end(); ++incorrect, ++the_x) { pos_x0 += (1. - *incorrect) * (*the_x); neg_x0 += (*incorrect) * (*the_x); } return {pos_x0,neg_x0}; } std::pair<double,double> PosteriorErrorProbabilityModel::pos_neg_sigma_weighted_posteriors( const vector<double>& x_scores, const vector<double>& incorrect_posteriors, const std::pair<double,double>& pos_neg_mean) { double pos_sigma(0); double neg_sigma(0); auto the_x = x_scores.cbegin(); for (auto incorrect = incorrect_posteriors.cbegin(); incorrect < incorrect_posteriors.end(); ++incorrect, ++the_x) { pos_sigma += (1. - *incorrect) * pow((*the_x) - pos_neg_mean.first, 2); neg_sigma += (*incorrect) * pow((*the_x) - pos_neg_mean.second, 2); } return {pos_sigma, neg_sigma}; } double PosteriorErrorProbabilityModel::computeProbability(double score) const { // apply the same transformation that was applied before fitting score = score + fabs(smallest_score_) + 0.001; double x_neg, x_pos; // the score is smaller than the peak of incorrectly assigned sequences. // To ensure that the probabilities wont rise again use the incorrectly assigned peak for computation if (score < incorrectly_assigned_fit_param_.x0) { x_neg = max_incorrectly_; x_pos = correctly_assigned_fit_param_.eval(score); } // same as above. However, this time to ensure that probabilities wont drop again. //TODO this does not consider the possibility of using Gauss as negative function anymore! Confusing at best! else if (score > correctly_assigned_fit_param_.x0) { x_neg = getGumbel_(score, incorrectly_assigned_fit_param_); x_pos = max_correctly_; } // if it's in-between use the normal formula else { x_neg = getGumbel_(score, incorrectly_assigned_fit_param_); x_pos = correctly_assigned_fit_param_.eval(score); } return (negative_prior_ * x_neg) / ((negative_prior_ * x_neg) + (1 - negative_prior_) * x_pos); } TextFile PosteriorErrorProbabilityModel::initPlots(vector<double>& x_scores) { std::vector<DPosition<2> > points; Int number_of_bins = param_.getValue("number_of_bins"); points.resize(number_of_bins); DPosition<2> temp; double dividing_score = (x_scores.back() - x_scores[0]) / number_of_bins; temp.setX(dividing_score / 2); temp.setY(0); Int bin = 0; points[bin] = temp; double temp_divider = dividing_score; for (std::vector<double>::iterator it = x_scores.begin(); it < x_scores.end(); ++it) { if (temp_divider - *it >= 0 && bin < number_of_bins - 1) { points[bin].setY(points[bin].getY() + 1); } else if (bin == number_of_bins - 1) { points[bin].setY(points[bin].getY() + 1); } else { temp.setX((temp_divider + temp_divider + dividing_score) / 2); temp.setY(1); ++bin; points[bin] = temp; temp_divider += dividing_score; } } TextFile data_points; for (DPosition<2>& dp : points) { dp.setY(dp.getY() / (x_scores.size() * dividing_score)); data_points << (String(dp.getX()) + "\t" + dp.getY()); } data_points.store((std::string)param_.getValue("out_plot") + "_scores.txt"); TextFile file; file << "set terminal pdf color solid linewidth 2.0 rounded"; //file<<"set style empty solid 0.5 border -1"; //file<<"set style function lines"; file << "set xlabel \"discriminant score\""; file << "set ylabel \"density\""; //TODO: file<<"set title "; file << "set key off"; // important: use single quotes for paths, since otherwise backslashes will not be accepted on Windows! file << "set output '" + (std::string)param_.getValue("out_plot") + ".pdf'"; String formula1 = ((this)->*(getNegativeGnuplotFormula_))(incorrectly_assigned_fit_param_) + "* " + String(negative_prior_); //String(incorrectly_assigned_fit_param_.A) +" * exp(-(x - " + String(incorrectly_assigned_fit_param_.x0) + ") ** 2 / 2 / (" + String(incorrectly_assigned_fit_param_.sigma) + ") ** 2)"+ "*" + String(negative_prior_); String formula2 = ((this)->*(getPositiveGnuplotFormula_))(correctly_assigned_fit_param_) + "* (1 - " + String(negative_prior_) + ")"; //String(correctly_assigned_fit_param_.A) +" * exp(-(x - " + String(correctly_assigned_fit_param_.x0) + ") ** 2 / 2 / (" + String(correctly_assigned_fit_param_.sigma) + ") ** 2)"+ "* (1 - " + String(negative_prior_) + ")"; // important: use single quotes for paths, since otherwise backslashes will not be accepted on Windows! file << ("plot '" + (std::string)param_.getValue("out_plot") + "_scores.txt' with boxes, " + formula1 + " , " + formula2); return file; } //TODO those functions should be members of the Fitter/Function classes! const String PosteriorErrorProbabilityModel::getGumbelGnuplotFormula(const GaussFitter::GaussFitResult& params) const { // build a formula with the fitted parameters for gnuplot stringstream formula; formula << "(1/" << params.sigma << ") * " << "exp(( " << params.x0 << "- x)/" << params.sigma << ") * exp(-exp((" << params.x0 << " - x)/" << params.sigma << "))"; return formula.str(); } const String PosteriorErrorProbabilityModel::getGaussGnuplotFormula(const GaussFitter::GaussFitResult& params) const { stringstream formula; formula << params.A << " * exp(-(x - " << params.x0 << ") ** 2 / 2 / (" << params.sigma << ") ** 2)"; return formula.str(); } const String PosteriorErrorProbabilityModel::getBothGnuplotFormula(const GaussFitter::GaussFitResult& incorrect, const GaussFitter::GaussFitResult& correct) const { stringstream formula; formula << negative_prior_ << "*" << ((this)->*(getNegativeGnuplotFormula_))(incorrect) << " + (1-" << negative_prior_ << ")*" << ((this)->*(getPositiveGnuplotFormula_))(correct); return formula.str(); } void PosteriorErrorProbabilityModel::plotTargetDecoyEstimation(vector<double>& target, vector<double>& decoy) { if (target.empty() || decoy.empty()) { StringList empty; if (target.empty()) { empty.push_back("target"); } if (decoy.empty()) { empty.push_back("decoy"); } OPENMS_LOG_WARN << "Target-Decoy plot was called, but '" << ListUtils::concatenate(empty, "' and '") << "' has no data! Unable to create a target-decoy plot." << std::endl; return; } Int number_of_bins = param_.getValue("number_of_bins"); std::vector<DPosition<3> > points(number_of_bins); DPosition<3> temp; sort(target.begin(), target.end()); sort(decoy.begin(), decoy.end()); double dividing_score = (max(target.back(), decoy.back()) /*scores.back()*/ - min(target[0], decoy[0]) /*scores[0]*/) / number_of_bins; temp[0] = (dividing_score / 2); temp[1] = 0; temp[2] = 0; Int bin = 0; points[bin] = temp; double temp_divider = dividing_score; for (std::vector<double>::iterator it = target.begin(); it < target.end(); ++it) { *it = *it + fabs(smallest_score_) + 0.001; if (temp_divider - *it >= 0 && bin < number_of_bins - 1) { points[bin][1] += 1; } else if (bin == number_of_bins - 1) { points[bin][1] += 1; } else { temp[0] = ((temp_divider + temp_divider + dividing_score) / 2); temp[1] = 1; ++bin; points[bin] = temp; temp_divider += dividing_score; } } bin = 0; temp_divider = dividing_score; for (std::vector<double>::iterator it = decoy.begin(); it < decoy.end(); ++it) { *it = *it + fabs(smallest_score_) + 0.001; if (temp_divider - *it >= 0 && bin < number_of_bins - 1) { points[bin][2] += 1; } else if (bin == number_of_bins - 1) { points[bin][2] += 1; } else { // temp[0] = ((temp_divider + temp_divider + dividing_score)/2); // temp[2] = 1; ++bin; points[bin][2] = 1; temp_divider += dividing_score; } } TextFile data_points; for (DPosition<3>& dpx : points) { (dpx)[1] = ((dpx)[1] / ((decoy.size() + target.size()) * dividing_score)); (dpx)[2] = ((dpx)[2] / ((decoy.size() + target.size()) * dividing_score)); String temp_ = (dpx)[0]; temp_ += "\t"; temp_ += (dpx)[1]; temp_ += "\t"; temp_ += (dpx)[2]; data_points << temp_; } data_points.store((std::string)param_.getValue("out_plot") + "_target_decoy_scores.txt"); TextFile file; file << "set terminal pdf color solid linewidth 2.0 rounded"; //file<<"set style empty solid 0.5 border -1"; //file<<"set style function lines"; file << "set xlabel \"discriminant score\""; file << "set ylabel \"density\""; //TODO: file<<"set title "; file << "set key off"; // important: use single quotes for paths, since otherwise backslashes will not be accepted on Windows! file << "set output '" + (std::string)param_.getValue("out_plot") + "_target_decoy.pdf'"; String formula1, formula2; formula1 = getGumbelGnuplotFormula(getIncorrectlyAssignedFitResult()) + "* " + String(getNegativePrior()); //String(incorrectly_assigned_fit_param_.A) +" * exp(-(x - " + String(incorrectly_assigned_fit_param_.x0) + ") ** 2 / 2 / (" + String(incorrectly_assigned_fit_param_.sigma) + ") ** 2)"+ "*" + String(negative_prior_); formula2 = getGaussGnuplotFormula(getCorrectlyAssignedFitResult()) + "* (1 - " + String(getNegativePrior()) + ")"; //String(correctly_assigned_fit_param_.A) +" * exp(-(x - " + String(correctly_assigned_fit_param_.x0) + ") ** 2 / 2 / (" + String(correctly_assigned_fit_param_.sigma) + ") ** 2)"+ "* (1 - " + String(negative_prior_) + ")"; // important: use single quotes for paths, since otherwise backslashes will not be accepted on Windows! file << ("plot '" + (std::string)param_.getValue("out_plot") + "_target_decoy_scores.txt' using 1:3 with boxes fill solid 0.8 noborder, \"" + (std::string)param_.getValue("out_plot") + "_target_decoy_scores.txt\" using 1:2 with boxes, " + formula1 + " , " + formula2); file.store((std::string)param_.getValue("out_plot") + "_target_decoy"); tryGnuplot((std::string)param_.getValue("out_plot") + "_target_decoy"); } void PosteriorErrorProbabilityModel::tryGnuplot(const String& gp_file) { OPENMS_LOG_INFO << "Attempting to call 'gnuplot' ..."; String cmd = String("gnuplot \"") + gp_file + "\""; if (system(cmd.c_str())) // 0 is success! { OPENMS_LOG_WARN << "Calling 'gnuplot' on '" << gp_file << "' failed. Please create plots manually." << std::endl; } else OPENMS_LOG_INFO << " success!" << std::endl; } void PosteriorErrorProbabilityModel::processOutliers_(vector<double>& x_scores, const String& outlier_handling) const { if (x_scores.empty()) { return; //shouldn't happen, but be safe. } if (outlier_handling != "none") { Size nr_outliers = 0; Size before = x_scores.size(); auto q1 = Math::quantile1st(x_scores.begin(),x_scores.end(), true); auto q3 = Math::quantile3rd(x_scores.begin(),x_scores.end(), true); double iqr = q3 - q1; if (outlier_handling == "ignore_iqr_outliers") { x_scores.erase( std::remove_if(x_scores.begin(),x_scores.end(), [&q1,&q3,&iqr](double x){ return x < q1 - 3 * iqr || x > q3 + 3 * iqr;}), x_scores.end() ); nr_outliers = before - x_scores.size(); } else if (outlier_handling == "set_iqr_to_closest_valid") { auto closest_lower = std::lower_bound(x_scores.begin(), x_scores.end(), q1 - 3 * iqr); auto closest_upper = --std::upper_bound(x_scores.begin(), x_scores.end(), q3 + 3 * iqr); for (auto it = x_scores.begin(); it != closest_lower; ++it) { nr_outliers++; *it = *closest_lower; } auto it = closest_upper; it++; for (; it != x_scores.end(); ++it) { nr_outliers++; *it = *closest_upper; } } else //"ignore_extreme_percentiles" { Size ninetyninth_idx = x_scores.size() * 99.9 / 100.; double ninetyninth_value = x_scores[ninetyninth_idx]; Size first_idx = (x_scores.size() * 1. / 100.) + 1; double first_value = x_scores[first_idx]; x_scores.erase( std::remove_if(x_scores.begin(),x_scores.end(), [&first_value,&ninetyninth_value](double x){ return x <= first_value || x >= ninetyninth_value;}), x_scores.end() ); nr_outliers = before - x_scores.size(); } double outlier_percent = nr_outliers * 100. / before; if (outlier_percent > 2.1) { OPENMS_LOG_WARN << "Warning: " << outlier_percent << "% outliers detected and corrected. Please double check" " the score distribution.\n"; } else { std::cout << nr_outliers << " outliers detected.\n"; } } } double PosteriorErrorProbabilityModel::getScore_(const std::vector<String>& requested_score_types, const PeptideHit & hit, const String& actual_score_type) { for (const auto& requested_score_type : requested_score_types) { if (actual_score_type == requested_score_type) { return hit.getScore(); } else { if (hit.metaValueExists(requested_score_type)) { return static_cast<double>(hit.getMetaValue(requested_score_type)); } if (hit.metaValueExists(requested_score_type+"_score")) { return static_cast<double>(hit.getMetaValue(requested_score_type+"_score")); } } } std::cout << actual_score_type << std::endl; throw Exception::UnableToFit(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Expected score type for search engine not found", "None of the expected score types " + ListUtils::concatenate(requested_score_types, ',') + " for search engine found"); return 0.; } double PosteriorErrorProbabilityModel::transformScore_(const String & engine, const PeptideHit & hit, const String& current_score_type) { //TODO implement censoring. 1) if value is below censoring take cumulative density below it, instead of point estimate if (engine == "OMSSA") { return (-1) * log10(getScore_({"OMSSA"}, hit, current_score_type)); //OMSSA??? TODO make sure to fix in new ID datastructure } else if (engine == "MYRIMATCH") { return getScore_({"mvh"}, hit, current_score_type); } else if (engine == "XTANDEM") { return (-1) * log10(getScore_({"E-Value"}, hit, current_score_type)); } else if (engine == "MASCOT") { // issue #740: unable to fit data with score 0 if (hit.getScore() == 0.0) { return numeric_limits<double>::quiet_NaN(); } // end issue #740 return (-1) * log10(getScore_({"EValue","expect"}, hit, current_score_type)); } else if (engine == "SPECTRAST") { return 100 * getScore_({"f-val"}, hit, current_score_type); // f-val } else if (engine == "SIMTANDEM") { return (-1) * log10(getScore_({"E-Value"}, hit, current_score_type)); } else if ((engine == "MSGFPLUS") || (engine == "MS-GF+")) { return (-1) * log10(getScore_({"MS:1002053","expect"}, hit, current_score_type)); } else if (engine == "COMET") { return (-1) * log10(getScore_({"MS:1002257","expect"}, hit, current_score_type)); } else if (engine == "SIMPLESEARCHENGINE") { return getScore_({"hyperscore"}, hit, current_score_type); //TODO evaluate transformations } else if (engine == "SAGE") { return getScore_({"hyperscore", "ln(hyperscore)"}, hit, current_score_type); // support hyperscore for backwards compatibility (same as ln(hyperscore)) } else if (engine == "MSFRAGGER") { return (-1) * log10(getScore_({"expect"}, hit, current_score_type)); } throw Exception::UnableToFit(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "No parameters for chosen search engine", "The chosen search engine is currently not supported"); } map<String, vector<vector<double>>> PosteriorErrorProbabilityModel::extractAndTransformScores( const vector<ProteinIdentification> & protein_ids, const PeptideIdentificationList & peptide_ids, const bool split_charge, const bool top_hits_only, const bool target_decoy_available, const double fdr_for_targets_smaller) { std::set<Int> charges; const StringList search_engines = {"XTandem","OMSSA","MASCOT","SpectraST","MyriMatch", "SimTandem","MSGFPlus","MS-GF+","Comet","MSFragger", "tide-search","Sage","SimpleSearchEngine", "OpenMS/ConsensusID_best","OpenMS/ConsensusID_worst","OpenMS/ConsensusID_average"}; if (split_charge) { // determine different charges in data for (PeptideIdentification const & pep_id : peptide_ids) { const vector<PeptideHit>& hits = pep_id.getHits(); for (PeptideHit const & hit : hits) { charges.insert(hit.getCharge()); } } if (charges.empty()) { throw Exception::Precondition(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "'split_charge' is set, but the list of charge states is empty"); } } set<Int>::iterator charge_it = charges.begin(); // charges can be empty, no problem if split_charge is not set map<String, vector<vector<double> > > all_scores; char splitter = ','; // to split the engine from the charge state later on do { vector<double> scores, decoy, target; for (String supported_engine : search_engines) { supported_engine.toUpper(); for (ProteinIdentification const & prot : protein_ids) { String search_engine = prot.getSearchEngine(); if (search_engine.hasPrefix("OpenMS/ConsensusID")) { search_engine = prot.getMetaValue("ConsensusIDBaseSearch"); search_engine = search_engine.prefix(':'); } search_engine.toUpper(); if (supported_engine == search_engine) { for (PeptideIdentification pep : peptide_ids) { // make sure we are comparing peptide and proteins of the same search run if (prot.getIdentifier() == pep.getIdentifier()) { pep.sort(); vector<PeptideHit>& hits = pep.getHits(); if (top_hits_only) { if (!hits.empty() && (!split_charge || hits[0].getCharge() == *charge_it)) { double score = PosteriorErrorProbabilityModel::transformScore_(supported_engine, hits[0], pep.getScoreType()); if (!std::isnan(score)) // issue #740: ignore scores with 0 values, otherwise you will get the error "unable to fit data" { scores.push_back(score); if (target_decoy_available) { if (hits[0].getScore() < fdr_for_targets_smaller) { target.push_back(score); } else { decoy.push_back(score); } } } } } else { for (PeptideHit const & hit : hits) { if (!split_charge || (hit.getCharge() == *charge_it)) { double score = PosteriorErrorProbabilityModel::transformScore_(supported_engine, hit, pep.getScoreType()); if (!std::isnan(score)) // issue #740: ignore scores with 0 values, otherwise you will get the error "unable to fit data" { scores.push_back(score); } } } } } } } } if (scores.size() > 2) { vector<vector<double> > tmp; tmp.push_back(scores); tmp.push_back(target); tmp.push_back(decoy); if (split_charge) { String engine_with_charge_state = supported_engine + String(splitter) + String(*charge_it); all_scores.insert(make_pair(engine_with_charge_state, tmp)); } else { all_scores.insert(make_pair(supported_engine, tmp)); } } scores.clear(); target.clear(); decoy.clear(); } if (split_charge) { ++charge_it; } } while (charge_it != charges.end()); return all_scores; } void PosteriorErrorProbabilityModel::updateScores( const PosteriorErrorProbabilityModel & PEP_model, const String & search_engine, const Int charge, const bool prob_correct, const bool split_charge, vector<ProteinIdentification> & protein_ids, PeptideIdentificationList & peptide_ids, bool & unable_to_fit_data, bool & data_might_not_be_well_fit) { String engine(search_engine); unable_to_fit_data = false; data_might_not_be_well_fit = false; engine.toUpper(); for (ProteinIdentification& prot : protein_ids) { String se = prot.getSearchEngine(); se.toUpper(); if (engine == se) { for (PeptideIdentification & pep : peptide_ids) { if (prot.getIdentifier() == pep.getIdentifier()) { String score_type = pep.getScoreType() + "_score"; vector<PeptideHit> hits = pep.getHits(); for (PeptideHit & hit : hits) { if (!split_charge || (hit.getCharge() == charge)) { double score; hit.setMetaValue(score_type, hit.getScore()); score = PosteriorErrorProbabilityModel::transformScore_(engine, hit, pep.getScoreType()); //TODO they should be ignored during fitting already! // and in this issue the -log(10^99) should actually be an acceptable value. if (std::isnan(score)) // issue #740: ignore scores with 0 values, otherwise you will get the error "unable to fit data" { score = 1.0; } else { score = PEP_model.computeProbability(score); // invalid score? invalid fit! if ((score < 0.0) || (score > 1.0)) { unable_to_fit_data = true; } //TODO implement something to check the quality of fit and set data_might_not_be_well_fit } hit.setScore(score); if (prob_correct) { hit.setScore(1.0 - score); } else { hit.setScore(score); } } } pep.setHits(hits); } } } } } } // namespace OpenMS // namespace Math
C++
3D
OpenMS/OpenMS
src/openms/source/MATH/STATISTICS/GammaDistributionFitter.cpp
.cpp
4,919
147
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg $ // $Authors: Andreas Bertsch $ // -------------------------------------------------------------------------- // #include <sstream> #include <iostream> #include <cmath> #include <boost/math/special_functions/gamma.hpp> #include <boost/math/special_functions/digamma.hpp> #include <unsupported/Eigen/NonLinearOptimization> #include <OpenMS/MATH/STATISTICS/GammaDistributionFitter.h> // #define GAMMA_DISTRIBUTION_FITTER_VERBOSE // #undef GAMMA_DISTRIBUTION_FITTER_VERBOSE namespace OpenMS::Math { GammaDistributionFitter::GammaDistributionFitter() : init_param_(1.0, 5.0) { } GammaDistributionFitter::~GammaDistributionFitter() = default; void GammaDistributionFitter::setInitialParameters(const GammaDistributionFitResult& param) { init_param_ = param; } struct GammaFunctor { int inputs() const { return m_inputs; } int values() const { return m_values; } GammaFunctor(unsigned dimensions, const std::vector<DPosition<2> >* data) : m_inputs(dimensions), m_values(static_cast<int>(data->size())), m_data(data) { } int operator()(const Eigen::VectorXd& x, Eigen::VectorXd& fvec) const { double b = x(0); double p = x(1); UInt i = 0; // gamma distribution is only defined for positive parameter values if (b > 0.0 && p > 0.0) { for (std::vector<DPosition<2> >::const_iterator it = m_data->begin(); it != m_data->end(); ++it, ++i) { double the_x = it->getX(); fvec(i) = std::pow(b, p) / std::tgamma(p) * std::pow(the_x, p - 1) * std::exp(-b * the_x) - it->getY(); } } else { for (std::vector<DPosition<2> >::const_iterator it = m_data->begin(); it != m_data->end(); ++it, ++i) { fvec(i) = -it->getY(); } } return 0; } // compute Jacobian matrix for the different parameters int df(const Eigen::VectorXd& x, Eigen::MatrixXd& J) const { double b = x(0); double p = x(1); UInt i = 0; // gamma distribution is only defined for positive parameter values if (b > 0.0 && p > 0.0) { for (std::vector<DPosition<2> >::const_iterator it = m_data->begin(); it != m_data->end(); ++it, ++i) { double the_x = it->getX(); // partial deviation regarding b double part_dev_b = std::pow(the_x, p - 1) * std::exp(-the_x * b) / std::tgamma(p) * (p * std::pow(b, p - 1) - the_x * std::pow(b, p)); J(i, 0) = part_dev_b; // partial deviation regarding p double factor = std::exp(-b * the_x) * std::pow(the_x, p - 1) * std::pow(b, p) / std::pow(std::tgamma(p), 2); double argument = (std::log(b) + std::log(the_x)) * std::tgamma(p) - std::tgamma(p) * boost::math::digamma(p); double part_dev_p = factor * argument; J(i, 1) = part_dev_p; } } else { for (std::vector<DPosition<2> >::const_iterator it = m_data->begin(); it != m_data->end(); ++it, ++i) { J(i, 0) = 0.0; J(i, 1) = 0.0; } } return 0; } const int m_inputs, m_values; const std::vector<DPosition<2> >* m_data; }; GammaDistributionFitter::GammaDistributionFitResult GammaDistributionFitter::fit(const std::vector<DPosition<2> >& input) const { Eigen::VectorXd x_init(2); x_init << init_param_.b, init_param_.p; GammaFunctor functor(2, &input); Eigen::LevenbergMarquardt<GammaFunctor> lmSolver(functor); Eigen::LevenbergMarquardtSpace::Status status = lmSolver.minimize(x_init); //the states are poorly documented. after checking the source, we believe that //all states except NotStarted, Running and ImproperInputParameters are good //termination states. if (status <= Eigen::LevenbergMarquardtSpace::ImproperInputParameters) { throw Exception::UnableToFit(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "UnableToFit-GammaDistributionFitter", "Could not fit the gamma distribution to the data"); } #ifdef GAMMA_DISTRIBUTION_FITTER_VERBOSE std::stringstream formula; formula << "f(x)=" << "(" << x_init(0) << " ** " << x_init(1) << ") / gamma(" << x_init(1) << ") * x ** (" << x_init(1) << " - 1) * exp(- " << x_init(0) << " * x)"; std::cout << formula.str() << std::endl; #endif return GammaDistributionFitResult(x_init(0), x_init(1)); } } // namespace OpenMS //namespace Math
C++
3D
OpenMS/OpenMS
src/openms/source/MATH/STATISTICS/GumbelMaxLikelihoodFitter.cpp
.cpp
4,641
127
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Julianus Pfeuffer $ // $Authors: Julianus Pfeuffer $ // -------------------------------------------------------------------------- // #include <OpenMS/MATH/STATISTICS/GumbelMaxLikelihoodFitter.h> #include <OpenMS/CONCEPT/Exception.h> #include <unsupported/Eigen/NonLinearOptimization> using namespace std; namespace OpenMS::Math { namespace // anonymous namespace to prevent name clashes with GumbleDistributionFitter { // Generic functor template<typename _Scalar, int NX = Eigen::Dynamic, int NY = Eigen::Dynamic> struct Functor { typedef _Scalar Scalar; enum { InputsAtCompileTime = NX, ValuesAtCompileTime = NY }; typedef Eigen::Matrix<Scalar,InputsAtCompileTime,1> InputType; typedef Eigen::Matrix<Scalar,ValuesAtCompileTime,1> ValueType; typedef Eigen::Matrix<Scalar,ValuesAtCompileTime,InputsAtCompileTime> JacobianType; int m_inputs, m_values; Functor() : m_inputs(InputsAtCompileTime), m_values(ValuesAtCompileTime) {} Functor(int inputs, int values) : m_inputs(inputs), m_values(values) {} int inputs() const { return m_inputs; } int values() const { return m_values; } }; struct GumbelDistributionFunctor : Functor<double> { GumbelDistributionFunctor(const std::vector<double>& data, const std::vector<double>& weights): Functor<double>(2,2), m_data(data), m_weights(weights) { } int operator()(const Eigen::VectorXd &x, Eigen::VectorXd &fvec) const { fvec(0) = 0.0; double sigma = fabs(x(1)); double logsigma = log(sigma); auto wit = m_weights.cbegin(); for (auto it = m_data.cbegin(); it != m_data.cend(); ++it, ++wit) { double diff = (*it - x(0)) / sigma; fvec(0) += *wit * (-logsigma - diff - exp(-diff)); } double foo = -fvec(0); fvec(0) = foo; fvec(1) = 0.0; return 0; } const std::vector<double>& m_data; const std::vector<double>& m_weights; }; } GumbelMaxLikelihoodFitter::GumbelDistributionFitResult GumbelMaxLikelihoodFitter::fitWeighted(const std::vector<double> & x, const std::vector<double> & w) { Eigen::VectorXd x_init (2); x_init(0) = init_param_.a; x_init(1) = init_param_.b; GumbelDistributionFunctor functor (x, w); Eigen::NumericalDiff<GumbelDistributionFunctor> numDiff(functor); Eigen::LevenbergMarquardt<Eigen::NumericalDiff<GumbelDistributionFunctor>,double> lm(numDiff); Eigen::LevenbergMarquardtSpace::Status status = lm.minimize(x_init); //the states are poorly documented. after checking the source, we believe that //all states except NotStarted, Running and ImproperInputParameters are good //termination states. if (status <= Eigen::LevenbergMarquardtSpace::Status::ImproperInputParameters) { throw Exception::UnableToFit(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "UnableToFit-GumbelMaxLikelihoodFitter", "Could not fit the gumbel distribution to the data"); } #ifdef GUMBEL_DISTRIBUTION_FITTER_VERBOSE // build a formula with the fitted parameters for gnuplot stringstream formula; formula << "f(x)=" << "(1/" << x_init(1) << ") * " << "exp(( " << x_init(0) << "- x)/" << x_init(1) << ") * exp(-exp((" << x_init(0) << " - x)/" << x_init(1) << "))"; cout << formula.str() << endl; #endif init_param_.a = x_init(0); init_param_.b = fabs(x_init(1)); return {x_init(0), fabs(x_init(1))}; } double GumbelMaxLikelihoodFitter::GumbelDistributionFitResult::log_eval_no_normalize(const double x) const { // -log b is a constant again double diff = (x - a)/b; return -log(b) - diff - exp(- diff); } GumbelMaxLikelihoodFitter::GumbelMaxLikelihoodFitter(): init_param_({0.25, 0.1}) {} GumbelMaxLikelihoodFitter::GumbelMaxLikelihoodFitter(GumbelMaxLikelihoodFitter::GumbelDistributionFitResult init): init_param_(init) {} GumbelMaxLikelihoodFitter::~GumbelMaxLikelihoodFitter() = default; void GumbelMaxLikelihoodFitter::setInitialParameters(const GumbelDistributionFitResult & param) { init_param_ = param; } } // namespace OpenMS //namespace Math
C++
3D
OpenMS/OpenMS
src/openms/source/MATH/STATISTICS/GumbelDistributionFitter.cpp
.cpp
4,395
125
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg $ // $Authors: David Wojnar $ // -------------------------------------------------------------------------- // #include <unsupported/Eigen/NonLinearOptimization> #include <OpenMS/MATH/STATISTICS/GumbelDistributionFitter.h> using namespace std; // #define GUMBEL_DISTRIBUTION_FITTER_VERBOSE // #undef GUMBEL_DISTRIBUTION_FITTER_VERBOSE namespace OpenMS::Math { double GumbelDistributionFitter::GumbelDistributionFitResult::log_eval_no_normalize(const double x) const { // -log b is a constant again double diff = (x - a)/b; return -log(b) - diff - exp(- diff); } GumbelDistributionFitter::GumbelDistributionFitter() { init_param_ = GumbelDistributionFitResult(0.25, 0.1); } GumbelDistributionFitter::~GumbelDistributionFitter() = default; void GumbelDistributionFitter::setInitialParameters(const GumbelDistributionFitResult & param) { init_param_ = param; } namespace // anonymous namespace to prevent name clashes with GumbleMaxLikelihoodFitter { struct GumbelDistributionFunctor { int inputs() const { return m_inputs; } int values() const { return m_values; } GumbelDistributionFunctor(unsigned dimensions, const std::vector<DPosition<2> >* data) : m_inputs(dimensions), m_values(static_cast<int>(data->size())), m_data(data) { } int operator()(const Eigen::VectorXd &x, Eigen::VectorXd &fvec) const { double a = x(0); //location double b = x(1); //scale UInt i = 0; for (vector<DPosition<2> >::const_iterator it = m_data->begin(); it != m_data->end(); ++it, ++i) { double the_x = it->getX(); double z = exp((a - the_x) / b); fvec(i) = (z * exp(-1 * z)) / b - it->getY(); } return 0; } // compute Jacobian matrix for the different parameters int df(const Eigen::VectorXd &x, Eigen::MatrixXd &J) const { double a = x(0); double b = x(1); UInt i = 0; for (vector<DPosition<2> >::const_iterator it = m_data->begin(); it != m_data->end(); ++it, ++i) { double the_x = it->getX(); double z = exp((a - the_x) / b); double f = z * exp(-1 * z); double part_dev_a = (f - pow(z, 2) * exp(-1 * z)) / pow(b, 2); J(i,0) = part_dev_a; double dev_z = ((the_x - a) / pow(b, 2)); double cum = f * dev_z; double part_dev_b = ((cum - z * cum) * b - f) / pow(b, 2); J(i,1) = part_dev_b; } return 0; } const int m_inputs, m_values; const std::vector<DPosition<2> >* m_data; }; } GumbelDistributionFitter::GumbelDistributionFitResult GumbelDistributionFitter::fit(vector<DPosition<2> > & input) const { Eigen::VectorXd x_init (2); x_init(0) = init_param_.a; x_init(1) = init_param_.b; GumbelDistributionFunctor functor (2, &input); Eigen::LevenbergMarquardt<GumbelDistributionFunctor> lmSolver (functor); Eigen::LevenbergMarquardtSpace::Status status = lmSolver.minimize(x_init); //the states are poorly documented. after checking the source, we believe that //all states except NotStarted, Running and ImproperInputParameters are good //termination states. if (status <= Eigen::LevenbergMarquardtSpace::ImproperInputParameters) { throw Exception::UnableToFit(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "UnableToFit-GumbelDistributionFitter", "Could not fit the gumbel distribution to the data"); } #ifdef GUMBEL_DISTRIBUTION_FITTER_VERBOSE // build a formula with the fitted parameters for gnuplot stringstream formula; formula << "f(x)=" << "(1/" << x_init(1) << ") * " << "exp(( " << x_init(0) << "- x)/" << x_init(1) << ") * exp(-exp((" << x_init(0) << " - x)/" << x_init(1) << "))"; cout << formula.str() << endl; #endif return {x_init(0), x_init(1)}; } } // namespace OpenMS //namespace Math
C++
3D
OpenMS/OpenMS
src/openms/source/MATH/STATISTICS/GaussFitter.cpp
.cpp
5,421
148
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Chris Bielow $ // $Authors: Andreas Bertsch, Chris Bielow $ // -------------------------------------------------------------------------- // #include <OpenMS/MATH/STATISTICS/GaussFitter.h> #include <boost/math/distributions/normal.hpp> #include <unsupported/Eigen/NonLinearOptimization> using namespace std; // #define GAUSS_FITTER_VERBOSE // #undef GAUSS_FITTER_VERBOSE namespace OpenMS::Math { GaussFitter::GaussFitter() : init_param_(0.06, 3.0, 0.5) { } GaussFitter::~GaussFitter() = default; void GaussFitter::setInitialParameters(const GaussFitResult & param) { init_param_ = param; } struct GaussFunctor { int inputs() const { return m_inputs; } int values() const { return m_values; } GaussFunctor(int dimensions, const std::vector<DPosition<2> >* data) : m_inputs(dimensions), m_values(static_cast<int>(data->size())), m_data(data) {} int operator()(const Eigen::VectorXd &x, Eigen::VectorXd &fvec) const { const double A = x(0); const double x0 = x(1); const double sig = x(2); const double sig2 = 2 * sig * sig; UInt i = 0; for (std::vector<DPosition<2> >::const_iterator it = m_data->begin(); it != m_data->end(); ++it, ++i) { fvec(i) = A * std::exp(- (it->getX() - x0) * (it->getX() - x0) / sig2) - it->getY(); } return 0; } // compute Jacobian matrix for the different parameters int df(const Eigen::VectorXd &x, Eigen::MatrixXd &J) const { const double A = x(0); const double x0 = x(1); const double sig = x(2); const double sig2 = 2 * sig * sig; const double sig3 = 2 * sig2 * sig; UInt i = 0; for (std::vector<DPosition<2> >::const_iterator it = m_data->begin(); it != m_data->end(); ++it, ++i) { const double xd = (it->getX() - x0); const double xd2 = xd*xd; double j0 = std::exp(-1.0 * xd2 / sig2); J(i,0) = j0; J(i,1) = (A * j0 * (-(-2 * it->getX() + 2.0 * x0) / sig2)); J(i,2) = (A * j0 * (xd2 / sig3)); } return 0; } const int m_inputs, m_values; const std::vector<DPosition<2> >* m_data; }; GaussFitter::GaussFitResult GaussFitter::fit(vector<DPosition<2> > & input) const { Eigen::VectorXd x_init (3); x_init(0) = init_param_.A; x_init(1) = init_param_.x0; x_init(2) = init_param_.sigma; GaussFunctor functor (3, &input); Eigen::LevenbergMarquardt<GaussFunctor> lmSolver (functor); Eigen::LevenbergMarquardtSpace::Status status = lmSolver.minimize(x_init); // the states are poorly documented. after checking the source and // http://www.ultimatepp.org/reference%24Eigen_demo%24en-us.html we believe that // all states except TooManyFunctionEvaluation and ImproperInputParameters are good // termination states. if (status == Eigen::LevenbergMarquardtSpace::ImproperInputParameters || status == Eigen::LevenbergMarquardtSpace::TooManyFunctionEvaluation) { throw Exception::UnableToFit(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "UnableToFit-GaussFitter", "Could not fit the Gaussian to the data: Error " + String(status)); } x_init(2) = fabs(x_init(2)); // sigma can be negative, but |sigma| would actually be the correct solution #ifdef GAUSS_FITTER_VERBOSE std::stringstream formula; formula << "f(x)=" << result.A << " * exp(-(x - " << result.x0 << ") ** 2 / 2 / (" << result.sigma << ") ** 2)"; std::cout << formular.str() << std::endl; #endif return GaussFitResult (x_init(0), x_init(1), x_init(2)); } // static std::vector<double> GaussFitter::eval(const std::vector<double>& evaluation_points, const GaussFitter::GaussFitResult& model) { std::vector<double> out; out.reserve(evaluation_points.size()); boost::math::normal_distribution<> ndf(model.x0, model.sigma); double int0 = model.A / boost::math::pdf(ndf, model.x0); // intensity normalization factor of the max @ x0 (simply multiplying the CDF with A is wrong!) for (Size i = 0; i < evaluation_points.size(); ++i) { out.push_back(boost::math::pdf(ndf, evaluation_points[i]) * int0 ); } return out; } double GaussFitter::GaussFitResult::eval(const double x) const { boost::math::normal_distribution<> ndf(x0, sigma); double int0 = A / boost::math::pdf(ndf, x0); // intensity normalization factor of the max @ x0 (simply multiplying the CDF with A is wrong!) return (boost::math::pdf(ndf, x) * int0 ); } double GaussFitter::GaussFitResult::log_eval_no_normalize(const double x) const { //TODO we could cache log sigma but then we would need to make the members private and update log sigma whenever // sigma is reset //TODO for likelihood maximization also the halflogtwopi constant could be removed return -log(sigma) - halflogtwopi - 0.5 * pow((x - x0) / sigma, 2.0); } } // namespace OpenMS //namespace Math
C++
3D
OpenMS/OpenMS
src/openms/source/MATH/STATISTICS/BasicStatistics.cpp
.cpp
341
10
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg $ // $Authors: $ // -------------------------------------------------------------------------- //
C++
3D
OpenMS/OpenMS
src/openms/source/MATH/STATISTICS/Histogram.cpp
.cpp
400
15
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg$ // $Authors: Marc Sturm $ // -------------------------------------------------------------------------- //OpenMS namespace OpenMS { } // namespace OpenMS
C++
3D
OpenMS/OpenMS
src/openms/source/DATASTRUCTURES/String.cpp
.cpp
10,439
541
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg$ // $Authors: Marc Sturm $ // -------------------------------------------------------------------------- #include <OpenMS/DATASTRUCTURES/String.h> #include <OpenMS/CONCEPT/PrecisionWrapper.h> #include <OpenMS/DATASTRUCTURES/DataValue.h> #include <OpenMS/DATASTRUCTURES/StringUtils.h> #include <OpenMS/DATASTRUCTURES/StringConversions.h> #include <boost/functional/hash.hpp> using namespace std; namespace OpenMS { const String String::EMPTY; String::String() : string() { } String::String(const string& s) : string(s) { } String::String(const std::string_view& sv) : string(sv) { } String::String(const char* s) : string(s) { } String::String(const QString& s) : string(s.toStdString()) { } String::String(const char* s, SizeType length) : string(s, length) { } String::String(const char c) : string(1, c) { } String::String(size_t len, char c) : string(len, c) { } String::String(int i) : string() { StringConversions::append(i, *this); } String::String(unsigned int i) : string() { StringConversions::append(i, *this); } String::String(short int i) : string() { StringConversions::append(i, *this); } String::String(short unsigned int i) : string() { StringConversions::append(i, *this); } String::String(long int i) : string() { StringConversions::append(i, *this); } String::String(long unsigned int i) : string() { StringConversions::append(i, *this); } String::String(long long unsigned int i) : string() { StringConversions::append(i, *this); } String::String(long long signed int i) : string() { StringConversions::append(i, *this); } String::String(float f, bool full_precision) : string() { full_precision ? StringConversions::append(f, *this) : StringConversions::appendLowP(f, *this); } String::String(double d, bool full_precision) : string() { full_precision ? StringConversions::append(d, *this) : StringConversions::appendLowP(d, *this); } String::String(long double ld, bool full_precision) : string() { full_precision ? StringConversions::append(ld, *this) : StringConversions::appendLowP(ld, *this); } String::String(const DataValue& d, bool full_precision) : string() { StringConversions::append(d, full_precision, *this); } String String::numberLength(double d, UInt n) { return StringUtils::numberLength(d, n); } String String::number(double d, UInt n) { return StringUtils::number(d, n); } String& String::fillLeft(char c, UInt size) { return StringUtils::fillLeft(*this, c, size); } String& String::fillRight(char c, UInt size) { return StringUtils::fillRight(*this, c, size); } bool String::hasPrefix(const String& string) const { return StringUtils::hasPrefix(*this, string); } bool String::hasSuffix(const String& string) const { return StringUtils::hasSuffix(*this, string); } bool String::hasSubstring(const String& string) const { return StringUtils::hasSubstring(*this, string); } bool String::has(Byte byte) const { return StringUtils::has(*this, byte); } String String::prefix(SizeType length) const { return StringUtils::prefix(*this, length); } String String::suffix(SizeType length) const { return StringUtils::suffix(*this, length); } String String::prefix(Int length) const { return StringUtils::prefix(*this, length); } String String::suffix(Int length) const { return StringUtils::suffix(*this, length); } String String::prefix(char delim) const { return StringUtils::prefix(*this, delim); } String String::suffix(char delim) const { return StringUtils::suffix(*this, delim); } String String::substr(size_t pos, size_t n) const { return StringUtils::substr(*this, pos, n); } String String::chop(Size n) const { return StringUtils::chop(*this, n); } String& String::trim() { return StringUtils::trim(*this); } bool String::isQuoted(char q) { return StringUtils::isQuoted(*this, q); } String& String::quote(char q, QuotingMethod method) { return StringUtils::quote(*this, q, method); } String& String::unquote(char q, QuotingMethod method) { return StringUtils::unquote(*this, q, method); } String& String::simplify() { return StringUtils::simplify(*this); } String String::random(UInt length) { return StringUtils::random(length); } String& String::reverse() { return StringUtils::reverse(*this); } bool String::split(const char splitter, vector<String>& substrings, bool quote_protect) const { return StringUtils::split(*this, splitter, substrings, quote_protect); } bool String::split(const String& splitter, std::vector<String>& substrings) const { return StringUtils::split(*this, splitter, substrings); } bool String::split_quoted(const String& splitter, vector<String>& substrings, char q, QuotingMethod method) const { return StringUtils::split_quoted(*this, splitter, substrings, q, method); } QString String::toQString() const { return StringUtils::toQString(*this); } Int String::toInt() const { if constexpr (is_same<Int, Int32>::value) { return StringUtils::toInt32(*this); } else { return StringUtils::toInt64(*this); } } Int32 String::toInt32() const { return StringUtils::toInt32(*this); } Int64 String::toInt64() const { return StringUtils::toInt64(*this); } float String::toFloat() const { return StringUtils::toFloat(*this); } double String::toDouble() const { return StringUtils::toDouble(*this); } String& String::toUpper() { return StringUtils::toUpper(*this); } String& String::firstToUpper() { return StringUtils::firstToUpper(*this); } String& String::toLower() { return StringUtils::toLower(*this); } String& String::substitute(char from, char to) { return StringUtils::substitute(*this, from, to); } String& String::substitute(const String& from, const String& to) { return StringUtils::substitute(*this, from, to); } String& String::remove(char what) { return StringUtils::remove(*this, what); } String& String::ensureLastChar(char end) { return StringUtils::ensureLastChar(*this, end); } String& String::removeWhitespaces() { return StringUtils::removeWhitespaces(*this); } /// ///// Operators /// String String::operator+(int i) const { String s(*this); StringConversions::append(i, s); return s; } String String::operator+(unsigned int i) const { String s(*this); StringConversions::append(i, s); return s; } String String::operator+(short int i) const { String s(*this); StringConversions::append(i, s); return s; } String String::operator+(short unsigned int i) const { String s(*this); StringConversions::append(i, s); return s; } String String::operator+(long int i) const { String s(*this); StringConversions::append(i, s); return s; } String String::operator+(long unsigned int i) const { String s(*this); StringConversions::append(i, s); return s; } String String::operator+(long long unsigned int i) const { String s(*this); StringConversions::append(i, s); return s; } String String::operator+(float f) const { String s(*this); StringConversions::append(f, s); return s; } String String::operator+(double d) const { String s(*this); StringConversions::append(d, s); return s; } String String::operator+(long double ld) const { String s(*this); StringConversions::append(ld, s); return s; } String String::operator+(char c) const { String tmp(*this); tmp.push_back(c); return tmp; } String String::operator+(const char* s) const { String tmp(*this); tmp.append(s); return tmp; } String String::operator+(const String& s) const { String tmp(*this); tmp.insert(tmp.end(), s.begin(), s.end()); return tmp; } String String::operator+(const std::string& s) const { String tmp(*this); tmp.insert(tmp.end(), s.begin(), s.end()); return tmp; } String& String::operator+=(int i) { StringConversions::append(i, *this); return *this; } String& String::operator+=(unsigned int i) { StringConversions::append(i, *this); return *this; } String& String::operator+=(short int i) { StringConversions::append(i, *this); return *this; } String& String::operator+=(short unsigned int i) { StringConversions::append(i, *this); return *this; } String& String::operator+=(long int i) { StringConversions::append(i, *this); return *this; } String& String::operator+=(long unsigned int i) { StringConversions::append(i, *this); return *this; } String& String::operator+=(long long unsigned int i) { StringConversions::append(i, *this); return *this; } String& String::operator+=(float f) { StringConversions::append(f, *this); return *this; } String& String::operator+=(double d) { StringConversions::append(d, *this); return *this; } String& String::operator+=(long double d) { StringConversions::append(d, *this); return *this; } String& String::operator+=(char c) { this->append(String(c)); return *this; } String& String::operator+=(const char* s) { this->append(s); return *this; } String& String::operator+=(const String& s) { this->append(s); return *this; } String& String::operator+=(const std::string& s) { this->append(s); return *this; } ::size_t hash_value(String const& s) { boost::hash<std::string> hasher; return hasher(static_cast<std::string>(s)); } } // namespace OpenMS
C++
3D
OpenMS/OpenMS
src/openms/source/DATASTRUCTURES/BinaryTreeNode.cpp
.cpp
917
35
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Mathias Walzer $ // $Authors: $ // -------------------------------------------------------------------------- #include <OpenMS/DATASTRUCTURES/BinaryTreeNode.h> namespace OpenMS { BinaryTreeNode::BinaryTreeNode(const Size i, const Size j, const float x) : left_child(i), right_child(j), distance(x) { } BinaryTreeNode::BinaryTreeNode(const BinaryTreeNode& source) = default; BinaryTreeNode::~BinaryTreeNode() = default; BinaryTreeNode& BinaryTreeNode::operator=(const BinaryTreeNode& source) { if (this != &source) { left_child = source.left_child; right_child = source.right_child; distance = source.distance; } return *this; } }
C++
3D
OpenMS/OpenMS
src/openms/source/DATASTRUCTURES/CVMappingRule.cpp
.cpp
2,999
122
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg $ // $Authors: Andreas Bertsch $ // -------------------------------------------------------------------------- #include <OpenMS/DATASTRUCTURES/CVMappingRule.h> #include <OpenMS/DATASTRUCTURES/CVMappingTerm.h> using namespace std; namespace OpenMS { // CV mapping rule implementation CVMappingRule::CVMappingRule() : requirement_level_(CVMappingRule::MUST), combinations_logic_(CVMappingRule::OR) { } CVMappingRule::CVMappingRule(const CVMappingRule& rhs) = default; CVMappingRule::~CVMappingRule() = default; CVMappingRule& CVMappingRule::operator=(const CVMappingRule& rhs) { if (this != &rhs) { identifier_ = rhs.identifier_; element_path_ = rhs.element_path_; requirement_level_ = rhs.requirement_level_; scope_path_ = rhs.scope_path_; combinations_logic_ = rhs.combinations_logic_; cv_terms_ = rhs.cv_terms_; } return *this; } bool CVMappingRule::operator==(const CVMappingRule& rhs) const { return identifier_ == rhs.identifier_ && element_path_ == rhs.element_path_ && requirement_level_ == rhs.requirement_level_ && scope_path_ == rhs.scope_path_ && combinations_logic_ == rhs.combinations_logic_ && cv_terms_ == rhs.cv_terms_; } bool CVMappingRule::operator!=(const CVMappingRule& rhs) const { return !(*this == rhs); } void CVMappingRule::setIdentifier(const String& identifier) { identifier_ = identifier; } const String& CVMappingRule::getIdentifier() const { return identifier_; } void CVMappingRule::setElementPath(const String& element_path) { element_path_ = element_path; } const String& CVMappingRule::getElementPath() const { return element_path_; } void CVMappingRule::setRequirementLevel(RequirementLevel level) { requirement_level_ = level; } CVMappingRule::RequirementLevel CVMappingRule::getRequirementLevel() const { return requirement_level_; } void CVMappingRule::setCombinationsLogic(CombinationsLogic combinations_logic) { combinations_logic_ = combinations_logic; } CVMappingRule::CombinationsLogic CVMappingRule::getCombinationsLogic() const { return combinations_logic_; } void CVMappingRule::setScopePath(const String& path) { scope_path_ = path; } const String& CVMappingRule::getScopePath() const { return scope_path_; } void CVMappingRule::setCVTerms(const vector<CVMappingTerm>& cv_terms) { cv_terms_ = cv_terms; } const vector<CVMappingTerm>& CVMappingRule::getCVTerms() const { return cv_terms_; } void CVMappingRule::addCVTerm(const CVMappingTerm& cv_term) { cv_terms_.push_back(cv_term); } } // namespace OpenMS
C++
3D
OpenMS/OpenMS
src/openms/source/DATASTRUCTURES/StringListUtils.cpp
.cpp
2,656
82
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg$ // $Authors: Marc Sturm $ // -------------------------------------------------------------------------- #include <OpenMS/DATASTRUCTURES/StringListUtils.h> #include <boost/mem_fn.hpp> #include <QtCore/QStringList> using namespace std; namespace OpenMS { StringList StringListUtils::fromQStringList(const QStringList& rhs) { StringList sl; sl.reserve(rhs.size()); for (const auto& item : rhs) { sl.push_back(item.toStdString()); } return sl; } void StringListUtils::toUpper(StringList& sl) { std::for_each(sl.begin(), sl.end(), boost::mem_fn(&String::toUpper)); } void StringListUtils::toLower(StringList& sl) { std::for_each(sl.begin(), sl.end(), boost::mem_fn(&String::toLower)); } StringListUtils::Iterator StringListUtils::searchPrefix(const Iterator& start, const Iterator& end, const String& text, bool trim) { return find_if(start, end, PrefixPredicate_(text, trim)); } StringListUtils::ConstIterator StringListUtils::searchPrefix(const ConstIterator& start, const ConstIterator& end, const String& text, bool trim) { return find_if(start, end, PrefixPredicate_(text, trim)); } StringListUtils::ConstIterator StringListUtils::searchPrefix(const StringList& container, const String& text, bool trim) { return searchPrefix(container.begin(), container.end(), text, trim); } StringListUtils::Iterator StringListUtils::searchPrefix(StringList& container, const String& text, bool trim) { return searchPrefix(container.begin(), container.end(), text, trim); } StringListUtils::Iterator StringListUtils::searchSuffix(const Iterator& start, const Iterator& end, const String& text, bool trim) { return find_if(start, end, SuffixPredicate_(text, trim)); } StringListUtils::ConstIterator StringListUtils::searchSuffix(const ConstIterator& start, const ConstIterator& end, const String& text, bool trim) { return find_if(start, end, SuffixPredicate_(text, trim)); } StringListUtils::ConstIterator StringListUtils::searchSuffix(const StringList& container, const String& text, bool trim) { return searchSuffix(container.begin(), container.end(), text, trim); } StringListUtils::Iterator StringListUtils::searchSuffix(StringList& container, const String& text, bool trim) { return searchSuffix(container.begin(), container.end(), text, trim); } } // namespace OpenMS
C++
3D
OpenMS/OpenMS
src/openms/source/DATASTRUCTURES/CVReference.cpp
.cpp
1,392
63
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg $ // $Authors: Andreas Bertsch $ // -------------------------------------------------------------------------- #include <OpenMS/DATASTRUCTURES/CVReference.h> using namespace std; namespace OpenMS { // CV reference implementation CVReference::CVReference() = default; CVReference::~CVReference() = default; CVReference::CVReference(const CVReference& rhs) = default; CVReference& CVReference::operator=(const CVReference& rhs) { if (this != &rhs) { name_ = rhs.name_; identifier_ = rhs.identifier_; } return *this; } bool CVReference::operator==(const CVReference& rhs) const { return name_ == rhs.name_ && identifier_ == rhs.identifier_; } bool CVReference::operator!=(const CVReference& rhs) const { return !(*this == rhs); } void CVReference::setName(const String& name) { name_ = name; } const String& CVReference::getName() const { return name_; } void CVReference::setIdentifier(const String& identifier) { identifier_ = identifier; } const String& CVReference::getIdentifier() const { return identifier_; } } // namespace OpenMS
C++
3D
OpenMS/OpenMS
src/openms/source/DATASTRUCTURES/StringUtils.cpp
.cpp
970
18
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg, Chris Bielow $ // $Authors: Marc Sturm, Stephan Aiche, Chris Bielow $ // -------------------------------------------------------------------------- #include <OpenMS/DATASTRUCTURES/StringUtils.h> namespace OpenMS { boost::spirit::qi::real_parser<double, StringUtilsHelper::real_policies_NANfixed_<double> > StringUtilsHelper::parse_double_ = boost::spirit::qi::real_parser<double, real_policies_NANfixed_<double> >(); boost::spirit::qi::real_parser<float, StringUtilsHelper::real_policies_NANfixed_<float> > StringUtilsHelper::parse_float_ = boost::spirit::qi::real_parser<float, real_policies_NANfixed_<float> >(); boost::spirit::qi::int_parser<> StringUtilsHelper::parse_int_ = boost::spirit::qi::int_parser<>(); }
C++
3D
OpenMS/OpenMS
src/openms/source/DATASTRUCTURES/ChargePair.cpp
.cpp
4,608
204
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Chris Bielow $ // $Authors: $ // -------------------------------------------------------------------------- #include <OpenMS/DATASTRUCTURES/ChargePair.h> #include <OpenMS/DATASTRUCTURES/Adduct.h> #include <ostream> namespace OpenMS { ChargePair::ChargePair() : feature0_index_(0), feature1_index_(0), feature0_charge_(0), feature1_charge_(0), compomer_(), mass_diff_(0), score_(1), is_active_(false) { } /// Constructor from map index, element index and Feature ChargePair::ChargePair(const Size& index0, const Size& index1, const Int& charge0, const Int& charge1, const Compomer& compomer, const double& mass_diff, const bool active) : feature0_index_(index0), feature1_index_(index1), feature0_charge_(charge0), feature1_charge_(charge1), compomer_(compomer), mass_diff_(mass_diff), score_(1), is_active_(active) { } /// Copy constructor ChargePair::ChargePair(const ChargePair& rhs) = default; /// Assignment operator ChargePair& ChargePair::operator=(const ChargePair& rhs) { if (&rhs == this) { return *this; } feature0_index_ = rhs.feature0_index_; feature1_index_ = rhs.feature1_index_; feature0_charge_ = rhs.feature0_charge_; feature1_charge_ = rhs.feature1_charge_; compomer_ = rhs.compomer_; mass_diff_ = rhs.mass_diff_; score_ = rhs.score_; is_active_ = rhs.is_active_; return *this; } //@} //@name Accessors //@{ /// Returns the charge (for element 0 or 1) Int ChargePair::getCharge(UInt pairID) const { if (pairID == 0) { return feature0_charge_; } else { return feature1_charge_; } } /// Set the charge (for element 0 or 1) void ChargePair::setCharge(UInt pairID, Int e) { if (pairID == 0) { feature0_charge_ = e; } else { feature1_charge_ = e; } } /// Returns the element index (for element 0 or 1) Size ChargePair::getElementIndex(UInt pairID) const { if (pairID == 0) { return feature0_index_; } else { return feature1_index_; } } /// Set the element index (for element 0 or 1) void ChargePair::setElementIndex(UInt pairID, Size e) { if (pairID == 0) { feature0_index_ = e; } else { feature1_index_ = e; } } /// Returns the Id of the compomer that explains the mass difference const Compomer& ChargePair::getCompomer() const { return compomer_; } /// Set the compomer id void ChargePair::setCompomer(const Compomer& compomer) { compomer_ = compomer; } /// Returns the mass difference double ChargePair::getMassDiff() const { return mass_diff_; } /// Sets the mass difference void ChargePair::setMassDiff(double mass_diff) { mass_diff_ = mass_diff; } /// Returns the ILP edge score double ChargePair::getEdgeScore() const { return score_; } /// Sets the ILP edge score void ChargePair::setEdgeScore(double score) { score_ = score; } /// is this pair realized? bool ChargePair::isActive() const { return is_active_; } void ChargePair::setActive(const bool active) { is_active_ = active; } //@} /// Equality operator bool ChargePair::operator==(const ChargePair& i) const { return (feature0_index_ == i.feature0_index_) && (feature1_index_ == i.feature1_index_) && (feature0_charge_ == i.feature0_charge_) && (feature1_charge_ == i.feature1_charge_) && (compomer_ == i.compomer_) && (mass_diff_ == i.mass_diff_) && (is_active_ == i.is_active_); } /// Equality operator bool ChargePair::operator!=(const ChargePair& i) const { return !(this->operator==(i)); } std::ostream& operator<<(std::ostream& os, const ChargePair& cp) { os << "---------- ChargePair -----------------\n" << "Mass Diff: " << cp.getMassDiff() << "\n" << "Compomer: " << cp.getCompomer() << "\n" << "Charge: " << cp.getCharge(0) << " : " << cp.getCharge(1) << "\n" << "Element Index: " << cp.getElementIndex(0) << " : " << cp.getElementIndex(1) << "\n"; return os; } }
C++
3D
OpenMS/OpenMS
src/openms/source/DATASTRUCTURES/FASTAContainer.cpp
.cpp
419
14
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Chris Bielow $ // $Authors: Chris Bielow $ // -------------------------------------------------------------------------- #include <OpenMS/DATASTRUCTURES/FASTAContainer.h> namespace OpenMS { }
C++
3D
OpenMS/OpenMS
src/openms/source/DATASTRUCTURES/Param.cpp
.cpp
54,635
1,694
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg $ // $Authors: Marc Sturm, Clemens Groepl $ // -------------------------------------------------------------------------- #include <OpenMS/DATASTRUCTURES/Param.h> #include <OpenMS/CONCEPT/LogStream.h> #include <algorithm> namespace OpenMS { //********************************* ParamEntry ************************************** Param::ParamEntry::ParamEntry() : name(), description(), value(), tags(), min_float(-std::numeric_limits<double>::max()), max_float(std::numeric_limits<double>::max()), min_int(-std::numeric_limits<int>::max()), max_int(std::numeric_limits<int>::max()), valid_strings() { } Param::ParamEntry::ParamEntry(const std::string& n, const ParamValue& v, const std::string& d, const std::vector<std::string>& t) : name(n), description(d), value(v), tags(), min_float(-std::numeric_limits<double>::max()), max_float(std::numeric_limits<double>::max()), min_int(-std::numeric_limits<int>::max()), max_int(std::numeric_limits<int>::max()), valid_strings() { //add tags for (size_t i = 0; i < t.size(); ++i) { tags.insert(t[i]); } //check name if (name.find(':') != std::string::npos) { OPENMS_LOG_ERROR << "Error ParamEntry name must not contain ':' characters!" << std::endl; } } Param::ParamEntry::~ParamEntry() = default; bool Param::ParamEntry::isValid(std::string& message) const { if (value.valueType() == ParamValue::STRING_VALUE) { if (!valid_strings.empty()) { bool ok = false; if (std::find(valid_strings.begin(), valid_strings.end(), value) != valid_strings.end()) { ok = true; } else if (std::find(tags.begin(), tags.end(), "input file") != tags.end() || std::find(tags.begin(), tags.end(), "output file") != tags.end() || std::find(tags.begin(), tags.end(), "output prefix") != tags.end()) { //do not check restrictions on file names for now ok = true; } if (!ok) { std::string valid = valid_strings.front(); for (auto it = valid_strings.begin() + 1, end = valid_strings.end(); it != end; ++it) { valid += "," + *it; } message = "Invalid string parameter value '" + value.toString() + "' for parameter '" + name + "' given! Valid values are: '" + valid + "'."; return false; } } } else if (value.valueType() == ParamValue::STRING_LIST) { std::string str_value; std::vector<std::string> ls_value = value; for (size_t i = 0; i < ls_value.size(); ++i) { str_value = ls_value[i]; if (!valid_strings.empty()) { bool ok = false; if (std::find(valid_strings.begin(), valid_strings.end(), str_value) != valid_strings.end()) { ok = true; } else if (std::find(tags.begin(), tags.end(), "input file") != tags.end() || std::find(tags.begin(), tags.end(), "output file") != tags.end()) { //do not check restrictions on file names for now ok = true; } if (!ok) { std::string valid = valid_strings.front(); for (auto it = valid_strings.begin() + 1, end = valid_strings.end(); it != end; ++it) { valid += "," + *it; } message = "Invalid string parameter value '" + str_value + "' for parameter '" + name + "' given! Valid values are: '" + valid + "'."; return false; } } } } else if (value.valueType() == ParamValue::INT_VALUE) { int tmp = value; if ((min_int != -std::numeric_limits<int>::max() && tmp < min_int) || (max_int != std::numeric_limits<int>::max() && tmp > max_int)) { message = "Invalid integer parameter value '" + std::to_string(tmp) + "' for parameter '" + name + "' given! The valid range is: [" + std::to_string(min_int) + ":" + std::to_string(max_int) + "]."; return false; } } else if (value.valueType() == ParamValue::INT_LIST) { int int_value; std::vector<int> ls_value = value; for (size_t i = 0; i < ls_value.size(); ++i) { int_value = ls_value[i]; if ((min_int != -std::numeric_limits<int>::max() && int_value < min_int) || (max_int != std::numeric_limits<int>::max() && int_value > max_int)) { message = "Invalid integer parameter value '" + std::to_string(int_value) + "' for parameter '" + name + "' given! The valid range is: [" + std::to_string(min_int) + ":" + std::to_string(max_int) + "]."; return false; } } } else if (value.valueType() == ParamValue::DOUBLE_VALUE) { double tmp = value; if ((min_float != -std::numeric_limits<double>::max() && tmp < min_float) || (max_float != std::numeric_limits<double>::max() && tmp > max_float)) { message = "Invalid double parameter value '" + std::to_string(tmp) + "' for parameter '" + name + "' given! The valid range is: [" + std::to_string(min_float)+ ":" + std::to_string(max_float) + "]."; return false; } } else if (value.valueType() == ParamValue::DOUBLE_LIST) { std::vector<double> ls_value = value; for (size_t i = 0; i < ls_value.size(); ++i) { double dou_value = ls_value[i]; if ((min_float != -std::numeric_limits<double>::max() && dou_value < min_float) || (max_float != std::numeric_limits<double>::max() && dou_value > max_float)) { message = "Invalid double parameter value '" + std::to_string(dou_value) + "' for parameter '" + name + "' given! The valid range is: [" + std::to_string(min_float) + ":" + std::to_string(max_float) + "]."; return false; } } } return true; } bool Param::ParamEntry::operator==(const ParamEntry& rhs) const { return name == rhs.name && value == rhs.value; } //********************************* ParamNode ************************************** Param::ParamNode::ParamNode() : name(), description(), entries(), nodes() { } Param::ParamNode::ParamNode(const std::string& n, const std::string& d) : name(n), description(d), entries(), nodes() { if (name.find(':') != std::string::npos) { OPENMS_LOG_WARN << "Error ParamNode name must not contain ':' characters!\n"; } } Param::ParamNode::~ParamNode() = default; bool Param::ParamNode::operator==(const ParamNode& rhs) const { if (name != rhs.name || entries.size() != rhs.entries.size() || nodes.size() != rhs.nodes.size()) { return false; } //order of sections / entries should not matter for (size_t i = 0; i < entries.size(); ++i) { if (find(rhs.entries.begin(), rhs.entries.end(), entries[i]) == rhs.entries.end()) { return false; } } for (size_t i = 0; i < nodes.size(); ++i) { if (find(rhs.nodes.begin(), rhs.nodes.end(), nodes[i]) == rhs.nodes.end()) { return false; } } return true; } Param::ParamNode::EntryIterator Param::ParamNode::findEntry(const std::string& local_name) { for (EntryIterator it = entries.begin(); it != entries.end(); ++it) { if (it->name == local_name) { return it; } } return entries.end(); } Param::ParamNode::NodeIterator Param::ParamNode::findNode(const std::string& local_name) { for (NodeIterator it = nodes.begin(); it != nodes.end(); ++it) { if (it->name == local_name) { return it; } } return nodes.end(); } Param::ParamNode* Param::ParamNode::findParentOf(const std::string& local_name) { //cout << "findParentOf nodename: " << this->name << " - nodes: " << this->nodes.size() << " - find: "<< name << '\n'; if (local_name.find(':') != std::string::npos) //several subnodes to browse through { size_t pos = local_name.find(':'); std::string prefix = local_name.substr(0, pos); //cout << " - Prefix: '" << prefix << "'\n"; NodeIterator it = findNode(prefix); if (it == nodes.end()) //subnode not found { return nullptr; } //recursively call findNode for the rest of the path std::string new_name = local_name.substr(it->name.size() + 1); //cout << " - Next name: '" << new_name << "'\n"; return it->findParentOf(new_name); } else // we are in the right child { //check if a node or entry prefix match for (size_t i = 0; i < nodes.size(); ++i) { if (nodes[i].name.compare(0, local_name.size(), local_name) == 0) { return this; } } for (size_t i = 0; i < entries.size(); ++i) { if (entries[i].name.compare(0, local_name.size(), local_name) == 0) { return this; } } return nullptr; } } Param::ParamEntry* Param::ParamNode::findEntryRecursive(const std::string& local_name) { ParamNode* parent = findParentOf(local_name); if (parent == nullptr) { return nullptr; } EntryIterator it = parent->findEntry(suffix(local_name)); if (it == parent->entries.end()) { return nullptr; } return &(*it); } void Param::ParamNode::insert(const ParamNode& node, const std::string& prefix) { //std::cerr << "INSERT NODE " << node.name << " (" << prefix << ")\n"; std::string prefix2 = prefix + node.name; ParamNode* insert_node = this; while (prefix2.find(':') != std::string::npos) { size_t pos = prefix2.find(':'); std::string local_name = prefix2.substr(0, pos); //check if the node already exists NodeIterator it = insert_node->findNode(local_name); if (it != insert_node->nodes.end()) //exists { insert_node = &(*it); } else //create it { insert_node->nodes.emplace_back(local_name, ""); insert_node = &(insert_node->nodes.back()); //std::cerr << " - Created new node: " << insert_node->name << '\n'; } //remove prefix prefix2 = prefix2.substr(local_name.size() + 1); } // check if the node exists as ParamEntry EntryIterator entry_it = insert_node->findEntry(prefix2); if (entry_it != insert_node->entries.end()) { std::string message = "Duplicate option \"" + prefix + "\" into \"" + name + "\", should not be added as ParamNode and ParamEntry at the same time (1)."; throw Exception::InternalToolError(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, message); } //check if the node already exists NodeIterator it = insert_node->findNode(prefix2); if (it != insert_node->nodes.end()) //append nodes and entries { for (ConstNodeIterator it2 = node.nodes.begin(); it2 != node.nodes.end(); ++it2) { it->insert(*it2); } for (ConstEntryIterator it2 = node.entries.begin(); it2 != node.entries.end(); ++it2) { it->insert(*it2); } if (it->description.empty() || !node.description.empty()) //replace description if not empty in new node { it->description = node.description; } } else //insert it { Param::ParamNode tmp(node); tmp.name = prefix2; insert_node->nodes.push_back(tmp); } } void Param::ParamNode::insert(const ParamEntry& entry, const std::string& prefix) { //std::cerr << "INSERT ENTRY " << entry.name << " (" << prefix << ")\n"; std::string prefix2 = prefix + entry.name; //std::cerr << " - inserting: " << prefix2 << '\n'; ParamNode* insert_node = this; while (prefix2.find(':') != std::string::npos) { size_t pos = prefix2.find(':'); std::string local_name = prefix2.substr(0, pos); //std::cerr << " - looking for node: " << name << '\n'; //look up if the node already exists NodeIterator it = insert_node->findNode(local_name); if (it != insert_node->nodes.end()) //exists { insert_node = &(*it); } else //create it { insert_node->nodes.emplace_back(local_name, ""); insert_node = &(insert_node->nodes.back()); //std::cerr << " - Created new node: " << insert_node->name << '\n'; } //remove prefix prefix2 = prefix2.substr(local_name.size() + 1); //std::cerr << " - new prefix: " << prefix2 << '\n'; } // check if the entry exists as ParamNode NodeIterator node_it = insert_node->findNode(prefix2); if (node_it != insert_node->nodes.end()) { std::string message = "Duplicate option \"" + prefix + "\" into \"" + name + "\", should not be added as ParamNode and ParamEntry at the same time (2)."; throw Exception::InternalToolError(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, message); } //check if the entry already exists //std::cerr << " - final entry name: " << prefix2 << '\n'; EntryIterator it = insert_node->findEntry(prefix2); if (it != insert_node->entries.end()) //overwrite entry { it->value = entry.value; it->tags = entry.tags; if (it->description.empty() || !entry.description.empty()) //replace description if not empty in new entry { it->description = entry.description; } } else //insert it { Param::ParamEntry tmp(entry); tmp.name = prefix2; insert_node->entries.push_back(tmp); } } size_t Param::ParamNode::size() const { size_t subnode_size = 0; for (std::vector<ParamNode>::const_iterator it = nodes.begin(); it != nodes.end(); ++it) { subnode_size += it->size(); } return entries.size() + subnode_size; } std::string Param::ParamNode::suffix(const std::string& key) const { size_t pos = key.rfind(':'); if (pos != std::string::npos) { return key.substr(++pos); } return key; } //********************************* Param ************************************** Param::Param() : root_("ROOT", "") { } Param::~Param() = default; Param::Param(const ParamNode& node) : root_(node) { root_.name = "ROOT"; root_.description = ""; } bool Param::operator==(const Param& rhs) const { return root_ == rhs.root_; } void Param::setValue(const std::string& key, const ParamValue& value, const std::string& description, const std::vector<std::string>& tags) { root_.insert(ParamEntry("", value, description, tags), key); } void Param::setValidStrings(const std::string& key, const std::vector<std::string>& strings) { ParamEntry& entry = getEntry_(key); //check if correct parameter type if (entry.value.valueType() != ParamValue::STRING_VALUE && entry.value.valueType() != ParamValue::STRING_LIST) { throw Exception::ElementNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, key); } //check for commas for (size_t i = 0; i < strings.size(); ++i) { if (strings[i].find(',') != std::string::npos) { throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Comma characters in Param string restrictions are not allowed!"); } } entry.valid_strings = strings; } const std::vector<std::string>& Param::getValidStrings(const std::string& key) const { ParamEntry& entry = getEntry_(key); // check if correct parameter type if (entry.value.valueType() != ParamValue::STRING_VALUE && entry.value.valueType() != ParamValue::STRING_LIST) { throw Exception::ElementNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, key); } return entry.valid_strings; } void Param::setMinInt(const std::string& key, int min) { ParamEntry& entry = getEntry_(key); if (entry.value.valueType() != ParamValue::INT_VALUE && entry.value.valueType() != ParamValue::INT_LIST) { throw Exception::ElementNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, key); } entry.min_int = min; } void Param::setMaxInt(const std::string& key, int max) { ParamEntry& entry = getEntry_(key); if (entry.value.valueType() != ParamValue::INT_VALUE && entry.value.valueType() != ParamValue::INT_LIST) { throw Exception::ElementNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, key); } entry.max_int = max; } void Param::setMinFloat(const std::string& key, double min) { ParamEntry& entry = getEntry_(key); if (entry.value.valueType() != ParamValue::DOUBLE_VALUE && entry.value.valueType() != ParamValue::DOUBLE_LIST) { throw Exception::ElementNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, key); } entry.min_float = min; } void Param::setMaxFloat(const std::string& key, double max) { ParamEntry& entry = getEntry_(key); if (entry.value.valueType() != ParamValue::DOUBLE_VALUE && entry.value.valueType() != ParamValue::DOUBLE_LIST) { throw Exception::ElementNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, key); } entry.max_float = max; } const ParamValue& Param::getValue(const std::string& key) const { return getEntry_(key).value; } const std::string& Param::getSectionDescription(const std::string& key) const { //This variable is used instead of String::EMPTY as the method is used in //static initialization and thus cannot rely on String::EMPTY been initialized. static std::string empty; ParamNode* node = root_.findParentOf(key); if (node == nullptr) { return empty; } Param::ParamNode::NodeIterator it = node->findNode(node->suffix(key)); if (it == node->nodes.end()) { return empty; } return it->description; } void Param::insert(const std::string& prefix, const Param& param) { //std::cerr << "INSERT PARAM (" << prefix << ")\n"; for (Param::ParamNode::NodeIterator it = param.root_.nodes.begin(); it != param.root_.nodes.end(); ++it) { root_.insert(*it, prefix); } for (Param::ParamNode::EntryIterator it = param.root_.entries.begin(); it != param.root_.entries.end(); ++it) { root_.insert(*it, prefix); } } void Param::setDefaults(const Param& defaults, const std::string& prefix, bool showMessage) { std::string prefix2 = prefix; if (!prefix2.empty()) { if (prefix2.back() != ':') { prefix2 += ':'; } } std::string pathname; for (Param::ParamIterator it = defaults.begin(); it != defaults.end(); ++it) { if (!exists(prefix2 + it.getName())) { if (showMessage) OPENMS_LOG_WARN << "Setting " << prefix2 + it.getName() << " to " << it->value << '\n'; std::string name = prefix2 + it.getName(); root_.insert(ParamEntry("", it->value, it->description), name); //copy tags for (std::set<std::string>::const_iterator tag_it = it->tags.begin(); tag_it != it->tags.end(); ++tag_it) { addTag(name, *tag_it); } //copy restrictions if (it->value.valueType() == ParamValue::STRING_VALUE || it->value.valueType() == ParamValue::STRING_LIST) { setValidStrings(name, it->valid_strings); } else if (it->value.valueType() == ParamValue::INT_VALUE || it->value.valueType() == ParamValue::INT_LIST) { setMinInt(name, it->min_int); setMaxInt(name, it->max_int); } else if (it->value.valueType() == ParamValue::DOUBLE_VALUE || it->value.valueType() == ParamValue::DOUBLE_LIST) { setMinFloat(name, it->min_float); setMaxFloat(name, it->max_float); } } //copy section descriptions const std::vector<ParamIterator::TraceInfo>& trace = it.getTrace(); for (std::vector<ParamIterator::TraceInfo>::const_iterator it2 = trace.begin(); it2 != trace.end(); ++it2) { if (it2->opened) { pathname += it2->name + ":"; } else { pathname.resize(pathname.size() - it2->name.size() - 1); } std::string real_pathname = pathname.substr(0, pathname.length() - 1); //remove ':' at the end if (!real_pathname.empty()) { std::string description_old = getSectionDescription(prefix + real_pathname); const std::string& description_new = defaults.getSectionDescription(real_pathname); if (description_old.empty()) { //std::cerr << "## Setting description of " << prefix+real_pathname << " to"<< '\n'; //std::cerr << "## " << description_new << '\n'; setSectionDescription(prefix2 + real_pathname, description_new); } } } } } void Param::remove(const std::string& key) { std::string keyname = key; if (!key.empty() && key.back() == ':') // delete section { keyname = key.substr(0, key.length() - 1); ParamNode* node_parent = root_.findParentOf(keyname); if (node_parent != nullptr) { Param::ParamNode::NodeIterator it = node_parent->findNode(node_parent->suffix(keyname)); if (it != node_parent->nodes.end()) { std::string name = it->name; node_parent->nodes.erase(it); // will automatically delete subnodes if (node_parent->nodes.empty() && node_parent->entries.empty()) { // delete last section name (could be partial) remove(keyname.substr(0, keyname.size() - name.size())); // keep last ':' to indicate deletion of a section } } } } else { ParamNode* node = root_.findParentOf(keyname); if (node != nullptr) { std::string entryname = node->suffix(keyname); // get everything beyond last ':' Param::ParamNode::EntryIterator it = node->findEntry(entryname); if (it != node->entries.end()) { node->entries.erase(it); // delete entry if (node->nodes.empty() && node->entries.empty()) { // delete if section is now empty remove(keyname.substr(0, keyname.length() - entryname.length())); // keep last ':' to indicate deletion of a section } } } } } void Param::removeAll(const std::string& prefix) { if (!prefix.empty() && prefix.back() == ':')//we have to delete one node only (and its subnodes) { ParamNode* node = root_.findParentOf(prefix.substr(0, prefix.length() - 1)); if (node != nullptr) { Param::ParamNode::NodeIterator it = node->findNode(node->suffix(prefix.substr(0, prefix.length() - 1))); if (it != node->nodes.end()) { std::string name = it->name; node->nodes.erase(it); // will automatically delete subnodes if (node->nodes.empty() && node->entries.empty()) { // delete last section name (could be partial) removeAll(prefix.substr(0, prefix.length() - name.length() - 1));// '-1' for the tailing ':' } } } } else //we have to delete all entries and nodes starting with the prefix { ParamNode* node = root_.findParentOf(prefix); if (node != nullptr) { std::string suffix = node->suffix(prefix); // name behind last ":" for (Param::ParamNode::NodeIterator it = node->nodes.begin(); it != node->nodes.end(); /*do nothing*/) { if (it->name.compare(0, suffix.length(), suffix) == 0) { it = node->nodes.erase(it); } else if (it != node->nodes.end()) { ++it; } } for (Param::ParamNode::EntryIterator it = node->entries.begin(); it != node->entries.end(); /*do nothing*/) { if (it->name.compare(0, suffix.size(), suffix) == 0) { it = node->entries.erase(it); } else if (it != node->entries.end()) { ++it; } } // the parent node might now be empty (delete it as well - otherwise the trace might be broken) if (node->nodes.empty() && node->entries.empty()) { // delete last section name (could be partial) removeAll(prefix.substr(0, prefix.size() - suffix.size())); } } } } Param Param::copySubset(const Param& subset) const { ParamNode out("ROOT", ""); for (const auto& entry : subset.root_.entries) { const auto& n = root_.findEntry(entry.name); if (n == root_.entries.end()) { OPENMS_LOG_WARN << "Warning: Trying to copy non-existent parameter entry " << entry.name << std::endl; } else { out.insert(*n); } } for (const auto& node : subset.root_.nodes) { const auto& n = root_.findNode(node.name); if (n == root_.nodes.end()) { OPENMS_LOG_WARN << "Warning: Trying to copy non-existent parameter node " << node.name << std::endl; } else { out.insert(*n); } } return Param(out); } Param Param::copy(const std::string& prefix, bool remove_prefix) const { ParamNode out("ROOT", ""); ParamNode* node = root_.findParentOf(prefix); if (node == nullptr) { return Param(); } //we have to copy this node only if (!prefix.empty() && prefix.back() == ':') { if (remove_prefix) { out = *node; } else { out.insert(*node, prefix.substr(0, prefix.size() - node->name.size() - 1)); } } else //we have to copy all entries and nodes starting with the right suffix { std::string suffix = node->suffix(prefix); for (Param::ParamNode::NodeIterator it = node->nodes.begin(); it != node->nodes.end(); ++it) { if (it->name.compare(0, suffix.size(), suffix) == 0) { if (remove_prefix) { ParamNode tmp = *it; tmp.name = tmp.name.substr(suffix.size()); out.insert(tmp); } else { out.insert(*it, prefix.substr(0, prefix.size() - suffix.size())); } } } for (Param::ParamNode::EntryIterator it = node->entries.begin(); it != node->entries.end(); ++it) { if (it->name.compare(0, suffix.size(), suffix) == 0) { if (remove_prefix) { ParamEntry tmp = *it; tmp.name = tmp.name.substr(suffix.size()); out.insert(tmp); } else { out.insert(*it, prefix.substr(0, prefix.size() - suffix.size())); } } } } return Param(out); } void Param::parseCommandLine(const int argc, const char** argv, const std::string& prefix) { //determine prefix std::string prefix2 = prefix; if (!prefix2.empty()) { //prefix2.ensureLastChar(':'); if (prefix2.back() != ':') { prefix2.append(1, ':'); } } //parse arguments std::string arg, arg1; for (int i = 1; i < argc; ++i) { //load the current and next argument: arg and arg1 ("" at the last argument) arg = argv[i]; arg1 = ""; if (i + 1 < argc) { arg1 = argv[i + 1]; } //it is a option when it starts with a '-' and the second character is not a number bool arg_is_option = false; if (arg.size() >= 2 && arg[0] == '-' && arg[1] != '0' && arg[1] != '1' && arg[1] != '2' && arg[1] != '3' && arg[1] != '4' && arg[1] != '5' && arg[1] != '6' && arg[1] != '7' && arg[1] != '8' && arg[1] != '9') { arg_is_option = true; } bool arg1_is_option = false; if (arg1.size() >= 2 && arg1[0] == '-' && arg1[1] != '0' && arg1[1] != '1' && arg1[1] != '2' && arg1[1] != '3' && arg1[1] != '4' && arg1[1] != '5' && arg1[1] != '6' && arg1[1] != '7' && arg1[1] != '8' && arg1[1] != '9') { arg1_is_option = true; } //cout << "Parse: '"<< arg << "' '" << arg1 << "'\n"; //flag (option without text argument) if (arg_is_option && arg1_is_option) { root_.insert(ParamEntry(arg, std::string(), ""), prefix2); } //option with argument else if (arg_is_option && !arg1_is_option) { root_.insert(ParamEntry(arg, arg1, ""), prefix2); ++i; } //just text arguments (not preceded by an option) else { ParamEntry* misc_entry = root_.findEntryRecursive(prefix2 + "misc"); if (misc_entry == nullptr) { std::vector<std::string> sl; sl.push_back(arg); // create "misc"-Node: root_.insert(ParamEntry("misc", sl, ""), prefix2); } else { std::vector<std::string> sl = misc_entry->value; sl.push_back(arg); misc_entry->value = sl; } } } } void Param::parseCommandLine(const int argc, const char** argv, const std::map<std::string, std::string>& options_with_one_argument, const std::map<std::string, std::string>& options_without_argument, const std::map<std::string, std::string>& options_with_multiple_argument, const std::string& misc, const std::string& unknown) { //determine misc key //determine unknown key //parse arguments std::string arg, arg1; for (int i = 1; i < argc; ++i) { //load the current and next argument: arg and arg1 ("" at the last argument) arg = argv[i]; arg1 = ""; if (i + 1 < argc) { arg1 = argv[i + 1]; } //it is a option when it starts with a '-' and the second character is not a number bool arg_is_option = false; if (arg.size() >= 2 && arg[0] == '-' && arg[1] != '0' && arg[1] != '1' && arg[1] != '2' && arg[1] != '3' && arg[1] != '4' && arg[1] != '5' && arg[1] != '6' && arg[1] != '7' && arg[1] != '8' && arg[1] != '9') { arg_is_option = true; } bool arg1_is_option = false; if (arg1.size() >= 2 && arg1[0] == '-' && arg1[1] != '0' && arg1[1] != '1' && arg1[1] != '2' && arg1[1] != '3' && arg1[1] != '4' && arg1[1] != '5' && arg1[1] != '6' && arg1[1] != '7' && arg1[1] != '8' && arg1[1] != '9') { arg1_is_option = true; } //with multiple argument if (options_with_multiple_argument.find(arg) != options_with_multiple_argument.end()) { //next argument is an option if (arg1_is_option) { root_.insert(ParamEntry("", std::vector<std::string>(), ""), options_with_multiple_argument.find(arg)->second); } //next argument is not an option else { std::vector<std::string> sl; int j = (i + 1); while (j < argc && !(arg1.size() >= 2 && arg1[0] == '-' && arg1[1] != '0' && arg1[1] != '1' && arg1[1] != '2' && arg1[1] != '3' && arg1[1] != '4' && arg1[1] != '5' && arg1[1] != '6' && arg1[1] != '7' && arg1[1] != '8' && arg1[1] != '9')) { sl.push_back(arg1); ++j; if (j < argc) { arg1 = argv[j]; } } root_.insert(ParamEntry("", sl, ""), options_with_multiple_argument.find(arg)->second); i = j - 1; } } //without argument else if (options_without_argument.find(arg) != options_without_argument.end()) { root_.insert(ParamEntry("", "true", ""), options_without_argument.find(arg)->second); } //with one argument else if (options_with_one_argument.find(arg) != options_with_one_argument.end()) { //next argument is not an option if (!arg1_is_option) { root_.insert(ParamEntry("", arg1, ""), options_with_one_argument.find(arg)->second); ++i; } //next argument is an option else { root_.insert(ParamEntry("", std::string(), ""), options_with_one_argument.find(arg)->second); } } //unknown option else if (arg_is_option) { ParamEntry* unknown_entry = root_.findEntryRecursive(unknown); if (unknown_entry == nullptr) { std::vector<std::string> sl; sl.push_back(arg); root_.insert(ParamEntry("", sl, ""), unknown); } else { std::vector<std::string> sl = unknown_entry->value; sl.push_back(arg); unknown_entry->value = sl; } } //just text argument else { ParamEntry* misc_entry = root_.findEntryRecursive(misc); if (misc_entry == nullptr) { std::vector<std::string> sl; sl.push_back(arg); // create "misc"-Node: root_.insert(ParamEntry("", sl, ""), misc); } else { std::vector<std::string> sl = misc_entry->value; sl.push_back(arg); misc_entry->value = sl; } } } } std::ostream& operator<<(std::ostream& os, const Param& param) { for (Param::ParamIterator it = param.begin(); it != param.end(); ++it) { os << '"'; if (it.getName().length() > it->name.length() + 1) { os << it.getName().substr(0, it.getName().length() - it->name.length() - 1) << "|"; } os << it->name << "\" -> \"" << it->value << '"'; if (!it->description.empty()) { os << " (" << it->description << ")"; } os << '\n'; } return os; } size_t Param::size() const { return root_.size(); } bool Param::empty() const { return size() == 0; } void Param::clear() { root_ = ParamNode("ROOT", ""); } void Param::checkDefaults(const std::string& name, const Param& defaults, const std::string& prefix) const { //Extract right parameters std::string prefix2 = prefix; if (!prefix2.empty()) { if (prefix2.back() != ':') { prefix2 += ':'; } } Param check_values = copy(prefix2, true); //check for (ParamIterator it = check_values.begin(); it != check_values.end(); ++it) { //unknown parameter if (!defaults.exists(it.getName())) { OPENMS_LOG_WARN << "Warning: " << name << " received the unknown parameter '" << it.getName() << "'"; if (!prefix2.empty()) { OPENMS_LOG_WARN << " in '" << prefix2 << "'"; } OPENMS_LOG_WARN << "!" << std::endl; } //different types ParamEntry* default_value = defaults.root_.findEntryRecursive(prefix2 + it.getName()); if (default_value == nullptr) { continue; } if (default_value->value.valueType() != it->value.valueType()) { std::string d_type; if (default_value->value.valueType() == ParamValue::STRING_VALUE) { d_type = "string"; } if (default_value->value.valueType() == ParamValue::STRING_LIST) { d_type = "string list"; } if (default_value->value.valueType() == ParamValue::EMPTY_VALUE) { d_type = "empty"; } if (default_value->value.valueType() == ParamValue::INT_VALUE) { d_type = "integer"; } if (default_value->value.valueType() == ParamValue::INT_LIST) { d_type = "integer list"; } if (default_value->value.valueType() == ParamValue::DOUBLE_VALUE) { d_type = "float"; } if (default_value->value.valueType() == ParamValue::DOUBLE_LIST) { d_type = "float list"; } std::string p_type; if (it->value.valueType() == ParamValue::STRING_VALUE) { p_type = "string"; } if (it->value.valueType() == ParamValue::STRING_LIST) { p_type = "string list"; } if (it->value.valueType() == ParamValue::EMPTY_VALUE) { p_type = "empty"; } if (it->value.valueType() == ParamValue::INT_VALUE) { p_type = "integer"; } if (it->value.valueType() == ParamValue::INT_LIST) { p_type = "integer list"; } if (it->value.valueType() == ParamValue::DOUBLE_VALUE) { p_type = "float"; } if (it->value.valueType() == ParamValue::DOUBLE_LIST) { p_type = "float list"; } throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, name + ": Wrong parameter type '" + p_type + "' for " + d_type + " parameter '" + it.getName() + "' given!"); } //parameter restrictions ParamEntry pe = *default_value; pe.value = it->value; std::string s; if (!pe.isValid(s)) throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, name + ": " + s); } } Param::ParamIterator Param::findFirst(const std::string& leaf) const { for (Param::ParamIterator it = this->begin(); it != this->end(); ++it) { std::string suffix = ":" + leaf; if (!(suffix.length() > it.getName().length()) && it.getName().compare(it.getName().length() - suffix.length(), suffix.length(), suffix) == 0) { return it; } } return this->end(); } Param::ParamIterator Param::findNext(const std::string& leaf, const ParamIterator& start_leaf) const { // start at NEXT entry Param::ParamIterator it = start_leaf; if (it != this->end()) ++it; for (; it != this->end(); ++it) { std::string suffix = ":" + leaf; if (!(suffix.length() > it.getName().length()) && it.getName().compare(it.getName().length() - suffix.length(), suffix.length(), suffix) == 0) { return it; } } return this->end(); } bool Param::update(const Param& p_outdated, const bool add_unknown) { return update(p_outdated, add_unknown, getGlobalLogWarn()); } bool Param::update(const Param& p_outdated, const bool add_unknown, Logger::LogStream& stream) { bool fail_on_invalid_values = false; bool fail_on_unknown_parameters = false; return update(p_outdated, true, add_unknown, fail_on_invalid_values, fail_on_unknown_parameters, stream); } bool Param::update(const Param& p_outdated, bool verbose, const bool add_unknown, bool fail_on_invalid_values, bool fail_on_unknown_parameters, Logger::LogStream& stream) { bool is_update_success(true); // augment for (Param::ParamIterator it = p_outdated.begin(); it != p_outdated.end(); ++it) { Param::ParamEntry new_entry; // entry of new location (used to retain new description) std::string target_name; // fully qualified name in new param if (this->exists(it.getName())) { // param 'version': do not override! std::string suffix = ":version"; if (!(suffix.length() > it.getName().length()) && it.getName().compare(it.getName().length() - suffix.length(), suffix.length(), suffix) == 0) { if (this->getValue(it.getName()) != it->value) { OPENMS_THREAD_CRITICAL(LOGSTREAM) stream << "Warning: for ':version' entry, augmented and Default Ini-File differ in value. Default value will not be altered!\n"; } continue; } // param 'type': do not override! else if (suffix = ":type", !(suffix.length() > it.getName().length()) && it.getName().compare(it.getName().length() - suffix.length(), suffix.length(), suffix) == 0) // only for TOPP type (e.g. PeakPicker:1:type), any other 'type' param is ok { size_t first = it.getName().find(':'); if (first != std::string::npos && it.getName().find(':', first+1) != std::string::npos) { if (this->getValue(it.getName()) != it->value) { OPENMS_THREAD_CRITICAL(LOGSTREAM) stream << "Warning: for ':type' entry, augmented and Default Ini-File differ in value. Default value will not be altered!\n"; } continue; } } // all other parameters: new_entry = this->getEntry(it.getName()); target_name = it.getName(); } else // outdated param non-existent in new param { // search by suffix in new param. Only match complete names, e.g. myname will match newsection:myname, but not newsection:othermyname Param::ParamEntry l1_entry = p_outdated.getEntry(it.getName()); // since the outdated param with full path does not exist within new param, // we will never find the new entry by using exists() as above, thus // its safe to modify it here ParamIterator it_match = this->findFirst(l1_entry.name); if (it_match != this->end()) { // make sure the same leaf name does not exist at any other position if (this->findNext(l1_entry.name, it_match) == this->end()) { OPENMS_THREAD_CRITICAL(LOGSTREAM) stream << "Found '" << it.getName() << "' as '" << it_match.getName() << "' in new param.\n"; new_entry = this->getEntry(it_match.getName()); target_name = it_match.getName(); } } if (target_name.empty()) // no mapping was found { if (fail_on_unknown_parameters) { OPENMS_THREAD_CRITICAL(LOGSTREAM) stream << "Unknown (or deprecated) Parameter '" << it.getName() << "' given in outdated parameter file!\n"; is_update_success = false; } else if (add_unknown) { OPENMS_THREAD_CRITICAL(LOGSTREAM) stream << "Unknown (or deprecated) Parameter '" << it.getName() << "' given in outdated parameter file! Adding to current set.\n"; Param::ParamEntry local_entry = p_outdated.getEntry(it.getName()); std::string prefix = ""; if (it.getName().find(':') != std::string::npos) { prefix = it.getName().substr(0, 1 + it.getName().find_last_of(':')); } this->root_.insert(local_entry, prefix); //->setValue(it.getName(), local_entry.value, local_entry.description, local_entry.tags); } else if (verbose) { OPENMS_THREAD_CRITICAL(LOGSTREAM) stream << "Unknown (or deprecated) Parameter '" << it.getName() << "' given in outdated parameter file! Ignoring parameter. \n"; } continue; } } // do the actual updating (we found a matching pair) if (new_entry.value.valueType() == it->value.valueType()) { if (new_entry.value != it->value) { // check entry for consistency (in case restrictions have changed) ParamValue default_value = new_entry.value; new_entry.value = it->value; std::string validation_result; if (new_entry.isValid(validation_result)) { // overwrite default value if (verbose) { OPENMS_THREAD_CRITICAL(LOGSTREAM) stream << "Default-Parameter '" << target_name << "' overridden: '" << default_value << "' --> '" << it->value << "'!\n"; } this->setValue(target_name, it->value, new_entry.description, this->getTags(target_name)); } else { OPENMS_THREAD_CRITICAL(LOGSTREAM) stream << validation_result; if (fail_on_invalid_values) { OPENMS_THREAD_CRITICAL(LOGSTREAM) stream << " Updating failed!\n"; is_update_success = false; } else { OPENMS_THREAD_CRITICAL(LOGSTREAM) stream << " Ignoring invalid value (using new default '" << default_value << "')!\n"; new_entry.value = default_value; } } } else { // value stayed the same .. nothing to be done } } else { OPENMS_THREAD_CRITICAL(LOGSTREAM) stream << "Parameter '" << it.getName() << "' has changed value type!\n"; if (fail_on_invalid_values) { OPENMS_THREAD_CRITICAL(LOGSTREAM) stream << " Updating failed!\n"; is_update_success = false; } else { OPENMS_THREAD_CRITICAL(LOGSTREAM) stream << " Ignoring invalid value (using new default)!\n"; } } } // next param in outdated tree return is_update_success; } void Param::merge(const OpenMS::Param& toMerge) { // keep track of the path inside the param tree std::string pathname; // augment for (Param::ParamIterator it = toMerge.begin(); it != toMerge.end(); ++it) { std::string prefix = ""; if (it.getName().find(':') != std::string::npos) prefix = it.getName().substr(0, 1 + it.getName().find_last_of(':')); // we care only about values that do not exist already if (!this->exists(it.getName())) { Param::ParamEntry entry = *it; OPENMS_LOG_DEBUG << "[Param::merge] merging " << it.getName() << '\n'; this->root_.insert(entry, prefix); } //copy section descriptions const std::vector<ParamIterator::TraceInfo>& trace = it.getTrace(); for (std::vector<ParamIterator::TraceInfo>::const_iterator traceIt = trace.begin(); traceIt != trace.end(); ++traceIt) { if (traceIt->opened) { OPENMS_LOG_DEBUG << "[Param::merge] extending param trace " << traceIt->name << " (" << pathname << ")\n"; pathname += traceIt->name + ":"; } else { OPENMS_LOG_DEBUG << "[Param::merge] reducing param trace " << traceIt->name << " (" << pathname << ")\n"; std::string suffix = traceIt->name + ":"; if (suffix.size() <= pathname.size() && pathname.compare(pathname.size() - suffix.size(), suffix.size(), suffix) == 0) pathname.resize(pathname.size() - traceIt->name.size() - 1); } std::string real_pathname = pathname.substr(0, pathname.size() - 1);//remove ':' at the end if (!real_pathname.empty()) { std::string description_old = getSectionDescription(prefix + real_pathname); const std::string& description_new = toMerge.getSectionDescription(real_pathname); if (description_old.empty()) { setSectionDescription(real_pathname, description_new); } } } } } void Param::setSectionDescription(const std::string& key, const std::string& description) { ParamNode* node = root_.findParentOf(key); if (node == nullptr) { throw Exception::ElementNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, key); } Param::ParamNode::NodeIterator it = node->findNode(node->suffix(key)); if (it == node->nodes.end()) { throw Exception::ElementNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, key); } it->description = description; } void Param::addSection(const std::string& key, const std::string& description) { root_.insert(ParamNode("",description),key); } Param::ParamIterator Param::begin() const { return ParamIterator(root_); } Param::ParamIterator Param::end() const { return ParamIterator(); } Param::ParamIterator::ParamIterator() : root_(nullptr), current_(0), stack_(), trace_() { } Param::ParamIterator::ParamIterator(const Param::ParamNode& root) : root_(&root), current_(-1), stack_(), trace_() { //Empty Param => begin == end iterator if (root_->entries.empty() && root_->nodes.empty()) { root_ = nullptr; return; } //find first entry stack_.push_back(root_); operator++(); } Param::ParamIterator::~ParamIterator() = default; const Param::ParamEntry& Param::ParamIterator::operator*() { return stack_.back()->entries[current_]; } const Param::ParamEntry* Param::ParamIterator::operator->() { return &(stack_.back()->entries[current_]); } Param::ParamIterator Param::ParamIterator::operator++(int) { ParamIterator tmp(*this); ++(*this); return tmp; } Param::ParamIterator& Param::ParamIterator::operator++() { if (root_ == nullptr) { return *this; } trace_.clear(); while (true) { const Param::ParamNode* node = stack_.back(); //std::cout << "############ operator++ #### " << node->name << " ## " << current_ << '\n'; //check if there is a next entry in the current node if (current_ + 1 < (int)node->entries.size()) { //std::cout << " - next entry\n"; ++current_; return *this; } //visit subnodes after entries else if (!node->nodes.empty()) { current_ = -1; stack_.push_back(&(node->nodes[0])); //std::cout << " - entering into: " << node->nodes[0].name << '\n'; //track changes (enter a node) trace_.emplace_back(node->nodes[0].name, node->nodes[0].description, true); continue; } //go back in tree until the node we came from is not the last subnode //of the current node. Go into the next subnode. else { while (true) { const Param::ParamNode* last = node; stack_.pop_back(); //std::cout << " - stack size: " << stack_.size() << '\n'; //we have reached the end if (stack_.empty()) { //std::cout << " - reached the end\n"; root_ = nullptr; return *this; } node = stack_.back(); //std::cout << " - last was: " << last->name << '\n'; //std::cout << " - descended to: " << node->name << '\n'; //track changes (leave a node) if (!trace_.empty() && trace_.back().name == last->name && trace_.back().opened) // was empty subnode { trace_.pop_back(); } else { trace_.emplace_back(last->name, last->description, false); } //check of new subtree is accessible unsigned int next_index = (last - &(node->nodes[0])) + 1; if (next_index < node->nodes.size()) { current_ = -1; stack_.push_back(&(node->nodes[next_index])); //cout << " - entering into: " << node->nodes[next_index].name << endl; //track changes (enter a node) trace_.emplace_back(node->nodes[next_index].name, node->nodes[next_index].description, true); break; } } } } } bool Param::ParamIterator::operator==(const ParamIterator& rhs) const { return (root_ == nullptr && rhs.root_ == nullptr) || (stack_ == rhs.stack_ && current_ == rhs.current_); } bool Param::ParamIterator::operator!=(const ParamIterator& rhs) const { return !operator==(rhs); } std::string Param::ParamIterator::getName() const { std::string tmp; for (std::vector<const Param::ParamNode*>::const_iterator it = stack_.begin() + 1; it != stack_.end(); ++it) { tmp += (*it)->name + ':'; } return tmp + stack_.back()->entries[current_].name; } const std::vector<Param::ParamIterator::TraceInfo>& Param::ParamIterator::getTrace() const { return trace_; } const Param::ParamEntry& Param::getEntry(const std::string& key) const { return getEntry_(key); } ParamValue::ValueType Param::getValueType(const std::string& key) const { return getEntry_(key).value.valueType(); } const std::string& Param::getDescription(const std::string& key) const { return getEntry_(key).description; } void Param::addTag(const std::string& key, const std::string& tag) { if (tag.find(',') != std::string::npos) { throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Param tags may not contain comma characters", tag); } getEntry_(key).tags.insert(tag); } void Param::addTags(const std::string& key, const std::vector<std::string>& tags) { ParamEntry& entry = getEntry_(key); for (size_t i = 0; i != tags.size(); ++i) { if (tags[i].find(',') != std::string::npos) { throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Param tags may not contain comma characters", tags[i]); } entry.tags.insert(tags[i]); } } std::vector<std::string> Param::getTags(const std::string& key) const { ParamEntry& entry = getEntry_(key); std::vector<std::string> list; for (std::set<std::string>::const_iterator it = entry.tags.begin(); it != entry.tags.end(); ++it) { list.push_back(*it); } return list; } void Param::clearTags(const std::string& key) { getEntry_(key).tags.clear(); } bool Param::hasTag(const std::string& key, const std::string& tag) const { return getEntry_(key).tags.count(tag); } bool Param::exists(const std::string& key) const { return root_.findEntryRecursive(key); } bool Param::hasSection(const std::string &key) const { if (key.back() == ':') { // Remove trailing colon from key return root_.findParentOf(key.substr(0, key.size() - 1)) != nullptr; } else { return root_.findParentOf(key) != nullptr; } } Param::ParamEntry& Param::getEntry_(const std::string& key) const { ParamEntry* entry = root_.findEntryRecursive(key); if (entry == nullptr) { throw Exception::ElementNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, key); } return *entry; } } //namespace
C++
3D
OpenMS/OpenMS
src/openms/source/DATASTRUCTURES/Matrix.cpp
.cpp
664
19
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg $ // $Authors: Timo Sachsenberg $ // -------------------------------------------------------------------------- #include <OpenMS/DATASTRUCTURES/Matrix.h> namespace OpenMS { // Explicit template instantiations to ensure symbols exist in the library. // This compiles the template code for these types into libOpenMS.so. template class Matrix<int>; template class Matrix<double>; template class Matrix<float>; }
C++
3D
OpenMS/OpenMS
src/openms/source/DATASTRUCTURES/StringUtilsSimple.cpp
.cpp
3,419
99
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg, Chris Bielow $ // $Authors: Marc Sturm, Stephan Aiche, Chris Bielow $ // -------------------------------------------------------------------------- #include <OpenMS/DATASTRUCTURES/StringUtilsSimple.h> #include <OpenMS/SYSTEM/SIMDe.h> namespace OpenMS { const char* StringUtils::skipWhitespace(const char* p, const char* p_end) { const simde__m128i w0 = simde_mm_set1_epi8(' '); const simde__m128i w1 = simde_mm_set1_epi8('\t'); const simde__m128i w2 = simde_mm_set1_epi8('\n'); const simde__m128i w3 = simde_mm_set1_epi8('\r'); for (; p <= p_end - 16; p += 16) { const simde__m128i s = simde_mm_loadu_si128(reinterpret_cast<const simde__m128i*>(p)); simde__m128i x = simde_mm_cmpeq_epi8(s, w0); x = simde_mm_or_si128(x, simde_mm_cmpeq_epi8(s, w1)); simde__m128i y = simde_mm_cmpeq_epi8(s, w2); y = simde_mm_or_si128(y, simde_mm_cmpeq_epi8(s, w3)); x = simde_mm_or_si128(x, y); // invert (i.e any non-spaces will be '1') and convert to a 16-bit int // (do not try to convert first and then invert -- not the same!) auto non_space = static_cast<uint16_t>(~simde_mm_movemask_epi8(x)); // 16 bit is paramount here. Do not use 32! if (non_space != 0) { // some characters are non-whitespace #ifdef _MSC_VER // Find the index of first non-whitespace unsigned long offset; _BitScanForward(&offset, non_space); return p + offset; #else return p + __builtin_ffs(non_space) - 1; #endif } } // the remainder while (p != p_end) { if (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t') ++p; else return p; } return p_end; } const char* StringUtils::skipNonWhitespace(const char* p, const char* p_end) { const simde__m128i w0 = simde_mm_set1_epi8(' '); const simde__m128i w1 = simde_mm_set1_epi8('\t'); const simde__m128i w2 = simde_mm_set1_epi8('\n'); const simde__m128i w3 = simde_mm_set1_epi8('\r'); for (; p <= p_end - 16; p += 16) { const simde__m128i s = simde_mm_loadu_si128(reinterpret_cast<const simde__m128i*>(p)); simde__m128i x = simde_mm_cmpeq_epi8(s, w0); x = simde_mm_or_si128(x, simde_mm_cmpeq_epi8(s, w1)); simde__m128i y = simde_mm_cmpeq_epi8(s, w2); y = simde_mm_or_si128(y, simde_mm_cmpeq_epi8(s, w3)); x = simde_mm_or_si128(x, y); // convert to a 16-bit int (i.e any spaces will be '1') // (do not try to convert first and then invert -- not the same!) auto spaces = static_cast<uint16_t>(simde_mm_movemask_epi8(x)); // 16 bit is paramount here. Do not use 32! if (spaces != 0) { // some characters are whitespace #ifdef _MSC_VER // Find the index of first whitespace unsigned long offset; _BitScanForward(&offset, spaces); return p + offset; #else return p + __builtin_ffs(spaces) - 1; #endif } } // the remainder while (p != p_end) { if (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t') return p; else ++p; } return p_end; } } // namespace OpenMS
C++
3D
OpenMS/OpenMS
src/openms/source/DATASTRUCTURES/CVMappings.cpp
.cpp
2,699
99
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg $ // $Authors: Andreas Bertsch $ // -------------------------------------------------------------------------- #include <OpenMS/DATASTRUCTURES/CVMappings.h> #include <OpenMS/DATASTRUCTURES/CVMappingRule.h> #include <OpenMS/DATASTRUCTURES/CVReference.h> #include <iostream> namespace OpenMS { class CVMappingRule; } // namespace OpenMS using namespace std; namespace OpenMS { CVMappings::CVMappings() = default; CVMappings::CVMappings(const CVMappings& rhs) = default; CVMappings::~CVMappings() = default; CVMappings& CVMappings::operator=(const CVMappings& rhs) { if (this != &rhs) { mapping_rules_ = rhs.mapping_rules_; cv_references_ = rhs.cv_references_; cv_references_vector_ = rhs.cv_references_vector_; } return *this; } bool CVMappings::operator==(const CVMappings& rhs) const { return mapping_rules_ == rhs.mapping_rules_ && cv_references_ == rhs.cv_references_ && cv_references_vector_ == rhs.cv_references_vector_; } bool CVMappings::operator!=(const CVMappings& rhs) const { return !(*this == rhs); } void CVMappings::setMappingRules(const vector<CVMappingRule>& cv_mapping_rules) { mapping_rules_ = cv_mapping_rules; } const vector<CVMappingRule>& CVMappings::getMappingRules() const { return mapping_rules_; } void CVMappings::addMappingRule(const CVMappingRule& cv_mapping_rule) { mapping_rules_.push_back(cv_mapping_rule); } void CVMappings::setCVReferences(const vector<CVReference>& cv_references) { for (vector<CVReference>::const_iterator it = cv_references.begin(); it != cv_references.end(); ++it) { cv_references_[it->getIdentifier()] = *it; cv_references_vector_.push_back(*it); } } const vector<CVReference>& CVMappings::getCVReferences() const { return cv_references_vector_; } void CVMappings::addCVReference(const CVReference& cv_reference) { if (hasCVReference(cv_reference.getIdentifier())) { cerr << "CVMappings: Warning: CV reference with identifier '" << cv_reference.getIdentifier() << "' already existing, ignoring it!" << endl; return; } cv_references_[cv_reference.getIdentifier()] = cv_reference; cv_references_vector_.push_back(cv_reference); } bool CVMappings::hasCVReference(const String& identifier) { return cv_references_.find(identifier) != cv_references_.end(); } } // namespace OpenMS
C++
3D
OpenMS/OpenMS
src/openms/source/DATASTRUCTURES/QTCluster.cpp
.cpp
17,679
468
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Hendrik Weisser $ // $Authors: Steffen Sass, Hendrik Weisser $ // -------------------------------------------------------------------------- #include <OpenMS/CONCEPT/Macros.h> #include <OpenMS/DATASTRUCTURES/GridFeature.h> #include <OpenMS/DATASTRUCTURES/QTCluster.h> #include <algorithm> // for set_intersection #include <iterator> // for inserter #include <numeric> // for make_pair using std::make_pair; using std::map; using std::min; using std::set; using std::vector; namespace OpenMS { QTCluster::BulkData::BulkData(const OpenMS::GridFeature* const center_point, Size num_maps, double max_distance, Int x_coord, Int y_coord, Size id) : center_point_(center_point), id_(id), neighbors_(), tmp_neighbors_(), max_distance_(max_distance), num_maps_(num_maps), x_coord_(x_coord), y_coord_(y_coord), annotations_() { } QTCluster::QTCluster(QTCluster::BulkData* const data, bool use_IDs) : quality_(0.0), data_(data), valid_(true), changed_(false), use_IDs_(use_IDs), collect_annotations_(false), finalized_(true) { if (use_IDs) { data_->annotations_ = data_->center_point_->getAnnotations(); } if (use_IDs_ && data_->center_point_->getAnnotations().size() != 1) { collect_annotations_ = true; } } const GridFeature* QTCluster::getCenterPoint() const { return data_->center_point_; } Size QTCluster::getId() const { return data_->id_; } double QTCluster::getCenterRT() const { return data_->center_point_->getRT(); } double QTCluster::getCenterMZ() const { return data_->center_point_->getMZ(); } Int QTCluster::getXCoord() const { return data_->x_coord_; } Int QTCluster::getYCoord() const { return data_->y_coord_; } void QTCluster::setInvalid() { // this cluster is considered invalid, it will never be used again in the // algorithm. This means we can clean up a bit and save some memory. valid_ = false; data_->annotations_.clear(); } Size QTCluster::size() const { OPENMS_PRECONDITION(finalized_, "Cannot perform operation on cluster that is not finalized") return data_->neighbors_.size() + 1; // + 1 for the center } bool QTCluster::operator<(const QTCluster& rhs) const { OPENMS_PRECONDITION(finalized_, "Cannot perform operation on cluster that is not finalized") return quality_ < rhs.quality_; } void QTCluster::add(const OpenMS::GridFeature* const element, double distance) { // get reference on member that is used in this function NeighborMapMulti& tmp_neighbors_ = data_->tmp_neighbors_; OPENMS_PRECONDITION(!finalized_, "Cannot perform operation on cluster that is not initialized") // ensure we only add compatible peptide annotations OPENMS_PRECONDITION(distance <= data_->max_distance_, "Distance cannot be larger than max_distance") Size map_index = element->getMapIndex(); // get reference on member that is used in this function const OpenMS::GridFeature& center_point = *(data_->center_point_); // Ensure we only add compatible peptide annotations. If the cluster center // has an annotation, then each added neighbor should have at least one matching annotation. // If the center element has no annotation we add all elements // and select the optimal annotation later (as in the case of multiple annotations), using optimizeAnnotations_ if (use_IDs_) { bool one_empty = (center_point.getAnnotations().empty() || element->getAnnotations().empty()); if (!one_empty) // both are annotated { set<AASequence> intersect; // overlap of at least one sequence is enough std::set_intersection(center_point.getAnnotations().begin(), center_point.getAnnotations().end(), element->getAnnotations().begin(), element->getAnnotations().end(), std::inserter(intersect, intersect.begin())); if (intersect.empty()) return; } } // We have to store annotations in a temporary map first if we collect all // annotations if (collect_annotations_ && map_index != center_point.getMapIndex()) { tmp_neighbors_[map_index].insert(make_pair(distance, element)); changed_ = true; } // Store best (closest) element: // Only add the element if either no element is present for the map or if // the element is closer than the current element for that map // TODO This might be wrong now with multiple seqs. // It might need consider the seqTable for every seq of the intersection! // On the other hand this just fills data_->neighbors_ which says it only stores the BEST feature per map. if (map_index != center_point.getMapIndex()) { NeighborMap& neighbors_ = data_->neighbors_; if (neighbors_.find(map_index) == neighbors_.end() || distance < neighbors_[map_index].distance) { neighbors_[map_index] = Neighbor {distance, element}; changed_ = true; } } } QTCluster::Elements QTCluster::getElements() const { OPENMS_PRECONDITION(finalized_, "Cannot perform operation on cluster that is not finalized") // get the neighbors and then insert the cluster center Elements elements = getAllNeighbors(); // add center point to the copy elements.push_back({data_->center_point_->getMapIndex(), data_->center_point_}); // Named return value optimization (no additional copy or move when returning by value) return elements; } bool QTCluster::update(const Elements& removed) { OPENMS_PRECONDITION(finalized_, "Cannot perform operation on cluster that is not finalized") // check if the cluster center was removed: for (const auto& removed_element : removed) { // If center point was removed, then we are done and no more work is // required if (removed_element.feature == data_->center_point_) { this->setInvalid(); return false; } } // get references on member that is used in this function NeighborMap& neighbors_ = data_->neighbors_; // update cluster contents, remove those elements we find in our cluster for (const auto& removed_element : removed) { NeighborMap::iterator pos = neighbors_.find(removed_element.map_index); if (pos == neighbors_.end()) { continue; // no points from this map } const GridFeature* const current_feature = pos->second.feature; if (current_feature == removed_element.feature) // remove this neighbor { changed_ = true; neighbors_.erase(pos); } } return changed_; } double QTCluster::getQuality() { // this should work for finalized and non finalized states if (changed_) { computeQuality_(); changed_ = false; } return quality_; } double QTCluster::getCurrentQuality() const { // ensure cluster is finalized OPENMS_PRECONDITION(finalized_, "Cannot perform operation on cluster that is finalized") return quality_; } void QTCluster::computeQuality_() { // ensure cluster is not finalized as we cannot call optimizeAnnotations_ // in that case OPENMS_PRECONDITION(!finalized_, "Cannot perform operation on cluster that is finalized") // get references on member that is used in this function NeighborMap& neighbors_ = data_->neighbors_; // get copy of member that is used in this function double max_distance_ = data_->max_distance_; Size num_other = data_->num_maps_ - 1; double internal_distance = 0.0; if (!use_IDs_ || data_->center_point_->getAnnotations().size() == 1 || neighbors_.empty()) { // if the cluster center is annotated with one peptide ID, the neighbors can // consist only of features with compatible IDs, so we don't need to // check again here for (const auto& neighbor : neighbors_) { internal_distance += neighbor.second.distance; } // add max. distance for missing cluster elements: internal_distance += double(num_other - neighbors_.size()) * max_distance_; } else // find the annotation that gives the best quality { internal_distance = optimizeAnnotations_(); } // normalize: internal_distance /= num_other; quality_ = (max_distance_ - internal_distance) / max_distance_; } QTCluster::Elements QTCluster::getAllNeighbors() const { OPENMS_PRECONDITION(finalized_, "Cannot perform operation on cluster that is not finalized") // copy the important info about the neighbors Elements elements; for (const auto& neighbor : data_->neighbors_) { elements.push_back({neighbor.first, neighbor.second.feature}); } // Named return value optimization (no additional copy or move when returning by value) return elements; } const set<AASequence>& QTCluster::getAnnotations() { if (changed_ && use_IDs_ && data_->center_point_->getAnnotations().size() != 1 && !data_->neighbors_.empty()) { optimizeAnnotations_(); } return data_->annotations_; } double QTCluster::optimizeAnnotations_() { OPENMS_PRECONDITION(collect_annotations_, "QTCluster::optimizeAnnotations_ should only be called if we use collect_annotations_") OPENMS_PRECONDITION(!data_->tmp_neighbors_.empty(), "QTCluster::optimizeAnnotations_ needs to have working tmp_neighbors_") OPENMS_PRECONDITION(!finalized_, "QTCluster::optimizeAnnotations_ cannot work on finalized cluster") // mapping: peptides -> best distance per input map map<AASequence, map<Size, double>> seq_table; makeSeqTable_(seq_table); // get copies of members that are used in this function Size num_maps_ = data_->num_maps_; double max_distance_ = data_->max_distance_; // combine annotation-specific and unspecific distances // (all unspecific ones are grouped as empty AASequence): auto unspecific = seq_table.find(AASequence()); if (unspecific != seq_table.end()) { for (auto it = seq_table.begin(); it != seq_table.end(); ++it) // OMS_CODING_TEST_EXCLUDE { if (it == unspecific) continue; // for all the maps for the "real" sequences, overwrite the distance, if an unspecific one is better or // add a new entry, so we can just "sum up" the distances for each seq later. for (const auto& mapidx_unspecdist : unspecific->second) { // try to add new entry auto mapidx_inserted = it->second.emplace(mapidx_unspecdist.first, mapidx_unspecdist.second); if (!mapidx_inserted.second) // an entry for that map idx already existed for the sequence, check minimum of both { mapidx_inserted.first->second = min(mapidx_inserted.first->second, mapidx_unspecdist.second); } } } } // compute distance totals -> best annotation set has smallest value: auto best_pos = seq_table.begin(); double best_total = num_maps_ * max_distance_; for (auto it = seq_table.begin(); it != seq_table.end(); ++it) // OMS_CODING_TEST_EXCLUDE { OPENMS_PRECONDITION(num_maps_ - 1 >= it->second.size(), "num_maps bigger than map size -1 (center)"); // init value is #missing maps times max_distance // above, unspecific distances were incorporated into the rest already. double total = std::accumulate(it->second.begin(), it->second.end(), double(num_maps_ - 1 - it->second.size()) * max_distance_, [](double val, const std::map<Size, double>::value_type& p) { return val + p.second; }); if (total < best_total) { best_pos = it; best_total = total; } } if (best_pos != seq_table.end()) { // TODO can we accumulate the union of possible annotations and set the best as "representative"? // Probably in another member and function though (e.g. after finalize), // since annotations_ is used in recomputeNeighbors to filter the cluster for matching // features of this "best" annotation. // Actually I think during creation of the consensusFeature later, all IDs of the linked features // (from the original full data) are copied anyway. // Then it would make sense to save the "best" annotation "distance-wise" from this algorithm, to be used during // IDConflictResolution (which is based on only ID scores). // OR already consider the ID scores here and make a more elaborate scoring. data_->annotations_ = {best_pos->first}; } // only keep neighbors that fit with the best annotation! recomputeNeighbors_(); return best_total; } void QTCluster::recomputeNeighbors_() { // get references on members that are used in this function NeighborMap& neighbors_ = data_->neighbors_; NeighborMapMulti& tmp_neighbors_ = data_->tmp_neighbors_; std::set<AASequence>& annotations_ = data_->annotations_; neighbors_.clear(); for (NeighborMapMulti::const_iterator n_it = tmp_neighbors_.begin(); n_it != tmp_neighbors_.end(); ++n_it) { for (std::multimap<double, const GridFeature*>::const_iterator df_it = n_it->second.begin(); df_it != n_it->second.end(); ++df_it) { std::set<AASequence> intersect; const std::set<AASequence>& current = df_it->second->getAnnotations(); std::set_intersection(current.begin(), current.end(), annotations_.begin(), annotations_.end(), std::inserter(intersect, intersect.begin())); // if no overlap with the re-calculated IDs in the center, do not re-add neighbor to the updated neighbors anymore. if (!intersect.empty() || current.empty()) { neighbors_[n_it->first] = Neighbor {df_it->first, df_it->second}; break; // found the best element for this input map } } } } void QTCluster::makeSeqTable_(map<AASequence, map<Size, double>>& seq_table) const { // get reference on member that is used in this function NeighborMapMulti& tmp_neighbors_ = data_->tmp_neighbors_; // for all maps contributing to this cluster for (NeighborMapMulti::iterator n_it = tmp_neighbors_.begin(); n_it != tmp_neighbors_.end(); ++n_it) { // for all neighbors relevant for this cluster in this map Size map_index = n_it->first; for (NeighborList::iterator df_it = n_it->second.begin(); df_it != n_it->second.end(); ++df_it) { double dist = df_it->first; // for all IDs/annotations of the neighboring feature (skipped if empty) for (const auto& current : df_it->second->getAnnotations()) { auto seqit_inserted = seq_table.emplace(current, map<Size, double> {{map_index, dist}}); // check if a minimum distance was already set for this ID if (!seqit_inserted.second) { // if so, check if a dist was annotated for that map_index already. auto distit_inserted = seqit_inserted.first->second.emplace(map_index, dist); // if so: // new dist. value for this input map // compare with old and set minimum if (!distit_inserted.second) { distit_inserted.first->second = min(dist, distit_inserted.first->second); } } } if (df_it->second->getAnnotations().empty()) // unannotated feature { auto seqit_inserted = seq_table.emplace(AASequence(), map<Size, double> {{map_index, dist}}); // check if a minimum distance was already set for empty ID = unannotated if (!seqit_inserted.second) { // if so, check if a dist was annotated for that map_index already. auto distit_inserted = seqit_inserted.first->second.emplace(map_index, dist); // if so: // new dist. value for this input map // compare with old and set minimum if (!distit_inserted.second) { distit_inserted.first->second = min(dist, distit_inserted.first->second); } } // As opposed to above IDed features (which could lead to new additional annotations), // no need to check further here: all following (also annotation-specific) distances are worse // than this unspecific one, since multimap is sorted & dists are already corrected // with noID_penalty. If you don't want this to happen, set the penalty to one and unIDed ones // will always be added at the end): break; } } } } void QTCluster::finalizeCluster() { OPENMS_PRECONDITION(!finalized_, "Try to finalize QTCluster that was not initialized or already finalized") // calls computeQuality_ if something changed since initialization. In // turn, computeQuality_ calls optimizeAnnotations_ if necessary which // ensures that the neighbors_ hash is populated correctly. getQuality(); finalized_ = true; data_->tmp_neighbors_.clear(); } void QTCluster::initializeCluster() { OPENMS_PRECONDITION(data_->tmp_neighbors_.empty(), "Try to initialize QTCluster that was not finalized") OPENMS_PRECONDITION(finalized_, "Try to initialize QTCluster that was not finalized") finalized_ = false; data_->tmp_neighbors_.clear(); } } // namespace OpenMS
C++
3D
OpenMS/OpenMS
src/openms/source/DATASTRUCTURES/FlagSet.cpp
.cpp
461
15
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Chris Bielow $ // $Authors: Chris Bielow $ // -------------------------------------------------------------------------- // class template .. nothing to see or include here... #if 0 #include <OpenMS/DATASTRUCTURES/FlagSet.h> #endif
C++
3D
OpenMS/OpenMS
src/openms/source/DATASTRUCTURES/DefaultParamHandler.cpp
.cpp
3,879
142
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg$ // $Authors: Marc Sturm $ // -------------------------------------------------------------------------- #include <OpenMS/CONCEPT/LogStream.h> #include <OpenMS/DATASTRUCTURES/DefaultParamHandler.h> #include <OpenMS/METADATA/MetaInfoInterface.h> using namespace std; namespace OpenMS { DefaultParamHandler::DefaultParamHandler(const String& name) : param_(), defaults_(), subsections_(), error_name_(name), check_defaults_(true), warn_empty_defaults_(true) { } DefaultParamHandler::DefaultParamHandler(const DefaultParamHandler& rhs) = default; DefaultParamHandler& DefaultParamHandler::operator=(const DefaultParamHandler& rhs) = default; bool DefaultParamHandler::operator==(const DefaultParamHandler& rhs) const { return param_ == rhs.param_ && defaults_ == rhs.defaults_ && subsections_ == rhs.subsections_ && error_name_ == rhs.error_name_ && check_defaults_ == rhs.check_defaults_ && warn_empty_defaults_ == rhs.warn_empty_defaults_; } DefaultParamHandler::~DefaultParamHandler() = default; void DefaultParamHandler::setParameters(const Param& param) { //set defaults and apply new parameters Param tmp(param); tmp.setDefaults(defaults_); param_ = tmp; if (check_defaults_) { if (defaults_.empty() && warn_empty_defaults_) { OPENMS_LOG_WARN << "Warning: No default parameters for DefaultParameterHandler '" << error_name_ << "' specified!" << endl; } //remove registered subsections for (vector<String>::const_iterator it = subsections_.begin(); it != subsections_.end(); ++it) { tmp.removeAll(*it + ':'); } //check defaults tmp.checkDefaults(error_name_, defaults_); } //do necessary changes to other member variables updateMembers_(); } void DefaultParamHandler::defaultsToParam_() { //check if a description is given for all defaults bool description_missing = false; String missing_parameters; for (Param::ParamIterator it = defaults_.begin(); it != defaults_.end(); ++it) { //cout << "Name: " << it->getName() << endl; if (it->description.empty()) { description_missing = true; missing_parameters += it.getName() + ","; break; } } if (description_missing) { cerr << "Warning: no default parameter description for parameters '" << missing_parameters << "' of DefaultParameterHandler '" << error_name_ << "' given!" << endl; } param_.setDefaults(defaults_); updateMembers_(); } void DefaultParamHandler::updateMembers_() { } const Param& DefaultParamHandler::getParameters() const { return param_; } const Param& DefaultParamHandler::getDefaults() const { return defaults_; } const String& DefaultParamHandler::getName() const { return error_name_; } void DefaultParamHandler::setName(const String& name) { error_name_ = name; } const std::vector<String>& DefaultParamHandler::getSubsections() const { return subsections_; } void DefaultParamHandler::writeParametersToMetaValues(const Param& write_this, MetaInfoInterface& write_here, const String& prefix) { String prefix_(prefix); if (!prefix_.empty()) { if (prefix_.compare(prefix_.size() - 1, 1, ":") != 0) // ends with colon? { prefix_ += ":"; } } for (auto it = write_this.begin(); it != write_this.end(); it++) { write_here.setMetaValue(prefix_ + (*it).name, DataValue((*it).value)); } } } // namespace OpenMS
C++
3D
OpenMS/OpenMS
src/openms/source/DATASTRUCTURES/ListUtils.cpp
.cpp
418
14
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg$ // $Authors: Stephan Aiche $ // -------------------------------------------------------------------------- #include <OpenMS/DATASTRUCTURES/ListUtils.h> namespace OpenMS { }
C++
3D
OpenMS/OpenMS
src/openms/source/DATASTRUCTURES/ConvexHull2D.cpp
.cpp
8,354
284
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg$ // $Authors: Marc Sturm, Chris Bielow $ // -------------------------------------------------------------------------- #include <OpenMS/DATASTRUCTURES/ConvexHull2D.h> namespace OpenMS { ConvexHull2D::ConvexHull2D() : map_points_(), outer_points_() { } /// assignment operator ConvexHull2D& ConvexHull2D::operator=(const ConvexHull2D& rhs) { if (&rhs == this) { return *this; } map_points_ = rhs.map_points_; outer_points_ = rhs.outer_points_; return *this; } /// equality operator bool ConvexHull2D::operator==(const ConvexHull2D& rhs) const { // different size => return false if (map_points_.size() != rhs.map_points_.size()) { return false; } if (outer_points_.size() != rhs.outer_points_.size()) { return false; } //different points now => return false for (const auto& point_pair : rhs.map_points_) { if (map_points_.find(point_pair.first) != map_points_.end()) { if (map_points_.at(point_pair.first) != point_pair.second) { return false; } } else { return false; } } //different points now => return false for (Size i = 0; i < rhs.outer_points_.size(); ++i) { if (outer_points_[i] != rhs.outer_points_[i]) { return false; } } return true; } /// removes all points void ConvexHull2D::clear() { map_points_.clear(); outer_points_.clear(); } /// accessor for the points const ConvexHull2D::PointArrayType& ConvexHull2D::getHullPoints() const { // construct outer hull if required if (outer_points_.empty() && !map_points_.empty()) { // walk the outer hull outer_points_.reserve(map_points_.size() * 2); // traverse lower m/z's of RT scans for (const auto& point_pair : map_points_) { PointType p; p.setX(point_pair.first); p.setY(point_pair.second.minPosition()[0]); outer_points_.push_back(p); } // traverse higher m/z's of RT scans for (HullPointType::const_reverse_iterator it = map_points_.rbegin(); it != map_points_.rend(); ++it) { PointType p; p.setX(it->first); p.setY(it->second.maxPosition()[0]); // turning point (avoid listing it twice if last scan only has a single point) if ((it == map_points_.rbegin()) && (it->second.width() == 0)) { continue; } // do not list first scan again if it's only a single point else if (it == --map_points_.rend() && (it->second.width() == 0)) { continue; } outer_points_.push_back(p); } } return outer_points_; } void ConvexHull2D::setHullPoints(const ConvexHull2D::PointArrayType& points) { map_points_.clear(); outer_points_ = points; } void ConvexHull2D::expandToBoundingBox() { DBoundingBox<2> bb(getBoundingBox()); typedef DBoundingBox<2>::PositionType Point; clear(); addPoint(Point(bb.minPosition()[0], bb.minPosition()[1])); addPoint(Point(bb.minPosition()[0], bb.maxPosition()[1])); addPoint(Point(bb.maxPosition()[0], bb.minPosition()[1])); addPoint(Point(bb.maxPosition()[0], bb.maxPosition()[1])); } /// returns the bounding box of the convex hull points DBoundingBox<2> ConvexHull2D::getBoundingBox() const { DBoundingBox<2> bb; // the internal structure might not be defined, but we try it first if (!map_points_.empty()) { for (HullPointType::const_iterator it = map_points_.begin(); it != map_points_.end(); ++it) { bb.enlarge(it->first, it->second.minPosition()[0]); bb.enlarge(it->first, it->second.maxPosition()[0]); } } else if (!outer_points_.empty()) { for (const auto& point : outer_points_) { bb.enlarge(point[0], point[1]); } } return bb; } bool ConvexHull2D::addPoint(const PointType& point) { outer_points_.clear(); if (map_points_.find(point[0]) != map_points_.end()) { if (map_points_.at(point[0]).encloses(point[1])) { return false; } map_points_[point[0]].enlarge(point[1]); } else { map_points_[point[0]] = DBoundingBox<1>(point[1], point[1]); } return true; } void ConvexHull2D::addPoints(const PointArrayType& points) { for (PointArrayTypeConstIterator it = points.begin(); it != points.end(); ++it) { addPoint(*it); } } Size ConvexHull2D::compress() { // iterate over rt scans and check if the m/z span is always the same in consecutive scans // keep the min&max scan only // if (map_points_.size() < 3) { return 0; // we need at least one "middle" scan } HullPointType compressed_map; compressed_map[map_points_.begin()->first] = map_points_.begin()->second; // copy first scan HullPointType::const_iterator pred_it = map_points_.begin(); HullPointType::const_iterator middle_it = pred_it; middle_it++; HullPointType::const_iterator succ_it = pred_it; succ_it++; succ_it++; for (Size p = 1; p < map_points_.size() - 1; ++p) { if (pred_it->second == middle_it->second && middle_it->second == succ_it->second) { // middle is identical in m/z range .. do not add to the compressed_map } else { compressed_map[middle_it->first] = middle_it->second; } ++succ_it; ++middle_it; ++pred_it; } compressed_map[middle_it->first] = middle_it->second; // copy last scan if (succ_it != map_points_.end()) { throw Exception::BufferOverflow(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION); } //std::cout << "compressed CH from " << map_points_.size() << " to " << compressed_map.size() << "\n"; Size saved_points = map_points_.size() - compressed_map.size(); //copy map_points_.clear(); map_points_.insert(compressed_map.begin(), compressed_map.end()); return saved_points; } bool ConvexHull2D::encloses(const PointType& point) const { if ((map_points_.empty()) && !outer_points_.empty()) // we cannot answer the query as we lack the internal data structure { // (if you need this you need to augment encloses() to work on outer_points_ only) throw Exception::NotImplemented(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION); } if (map_points_.find(point[0]) != map_points_.end()) { if (map_points_.at(point[0]).encloses(point[1])) { return true; } } // find the two RT scans surrounding the point: HullPointType::const_iterator it_upper = map_points_.end(), it_lower = map_points_.end(); // iterate over keys (which are sorted by ascending RT) for (HullPointType::const_iterator it = map_points_.begin(); it != map_points_.end(); ++it) { // lower bound if (((it->first) < (point[0]))) { it_lower = it; } // upper bound if ((it_upper == map_points_.end()) && ((it->first) > (point[0]))) { it_upper = it; } } // point is not between two scans if ((it_lower == map_points_.end()) || (it_upper == map_points_.end())) { return false; } // check if point is within bounds double mz_low = it_lower->second.minPosition()[0] // m/z offset + ((point[0] - (it_lower->first)) / ((it_upper->first) - (it_lower->first))) // factor (0-1) * (it_upper->second.minPosition()[0] - it_lower->second.minPosition()[0]); // m/z range double mz_high = it_lower->second.maxPosition()[0] // m/z offset + ((point[0] - (it_lower->first)) / ((it_upper->first) - (it_lower->first))) // factor (0-1) * (it_upper->second.maxPosition()[0] - it_lower->second.maxPosition()[0]); // m/z range DBoundingBox<1> range(mz_low, mz_high); return range.encloses(point[1]); } } // namespace OpenMS
C++
3D
OpenMS/OpenMS
src/openms/source/DATASTRUCTURES/StringView.cpp
.cpp
416
14
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg$ // $Authors: Marc Sturm $ // -------------------------------------------------------------------------- #include <OpenMS/DATASTRUCTURES/StringView.h> namespace OpenMS { }
C++
3D
OpenMS/OpenMS
src/openms/source/DATASTRUCTURES/Adduct.cpp
.cpp
5,111
220
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Chris Bielow $ // $Authors: $ // -------------------------------------------------------------------------- #include <OpenMS/DATASTRUCTURES/Adduct.h> #include <OpenMS/CHEMISTRY/Element.h> #include <OpenMS/CHEMISTRY/EmpiricalFormula.h> #include <iostream> #include <map> namespace OpenMS { Adduct::Adduct() : charge_(0), amount_(0), singleMass_(0), log_prob_(0), formula_(), rt_shift_(0), label_() { } Adduct::Adduct(Int charge) : charge_(charge), amount_(0), singleMass_(0), log_prob_(0), formula_(), rt_shift_(0), label_() { } Adduct::Adduct(Int charge, Int amount, double singleMass, const String& formula, double log_prob, double rt_shift, const String& label) : charge_(charge), amount_(amount), singleMass_(singleMass), log_prob_(log_prob), rt_shift_(rt_shift), label_(label) { if (amount < 0) { std::cerr << "Attention: Adduct received negative amount! (" << amount << ")\n"; } formula_ = checkFormula_(formula); } Adduct Adduct::operator*(const Int m) const { Adduct a = *this; a.amount_ *= m; return a; } Adduct Adduct::operator+(const Adduct& rhs) { if (this->formula_ != rhs.formula_) { throw "Adduct::Operator +() tried to add incompatible adduct!"; } Adduct a = *this; a.amount_ += rhs.amount_; return a; } void Adduct::operator+=(const Adduct& rhs) { if (this->formula_ != rhs.formula_) { throw "Adduct::Operator +=() tried to add incompatible adduct!"; } this->amount_ += rhs.amount_; } //@{ Accessors const Int& Adduct::getCharge() const { return charge_; } void Adduct::setCharge(const Int& charge) { charge_ = charge; } const Int& Adduct::getAmount() const { return amount_; } void Adduct::setAmount(const Int& amount) { if (amount < 0) { std::cerr << "Warning: Adduct received negative amount! (" << amount << ")\n"; } amount_ = amount; } const double& Adduct::getSingleMass() const { return singleMass_; } void Adduct::setSingleMass(const double& singleMass) { singleMass_ = singleMass; } const double& Adduct::getLogProb() const { return log_prob_; } void Adduct::setLogProb(const double& log_prob) { log_prob_ = log_prob; } const String& Adduct::getFormula() const { return formula_; } void Adduct::setFormula(const String& formula) { formula_ = checkFormula_(formula); } const double& Adduct::getRTShift() const { return rt_shift_; } const String& Adduct::getLabel() const { return label_; } String Adduct::toAdductString(const String& ion_string, const Int& charge) { EmpiricalFormula ef(ion_string); String charge_sign = charge >= 0 ? "+" : "-"; String s("[M"); //need elements sorted canonically (by string) std::map<String, String> sorted_elem_map; for (const auto& element_count : ef) { String e_symbol(element_count.first->getSymbol()); String tmp = element_count.second > 0 ? "+" : "-"; tmp += std::abs(element_count.second) > 1 ? String(std::abs(element_count.second)) : ""; tmp += e_symbol; sorted_elem_map[e_symbol] = std::move(tmp); } for (const auto& sorted_e_cnt : sorted_elem_map) { s += sorted_e_cnt.second; } s += String("]"); s += std::abs(charge) > 1 ? String(std::abs(charge)) : ""; s += charge_sign; return s; } String Adduct::checkFormula_(const String& formula) { EmpiricalFormula ef(formula); if (ef.getCharge() != 0) { std::cerr << "Warning: Adduct contains explicit charge (alternating mass)! (" << formula << ")\n"; } if (ef.isEmpty()) { std::cerr << "Warning: Adduct was given empty formula! (" << formula << ")\n"; } if ((ef.getNumberOfAtoms() > 1) && (std::distance(ef.begin(), ef.end()) == 1)) { std::cerr << "Warning: Adduct was given only a single element but with an abundance>1. This might lead to errors! (" << formula << ")\n"; } return ef.toString(); } ///Print the contents of an Adduct to a stream. OPENMS_DLLAPI std::ostream& operator<<(std::ostream& os, const Adduct& a) { os << "---------- Adduct -----------------\n"; os << "Charge: " << a.charge_ << std::endl; os << "Amount: " << a.amount_ << std::endl; os << "MassSingle: " << a.singleMass_ << std::endl; os << "Formula: " << a.formula_ << std::endl; os << "log P: " << a.log_prob_ << std::endl; return os; } OPENMS_DLLAPI bool operator==(const Adduct& a, const Adduct& b) { return a.charge_ == b.charge_ && a.amount_ == b.amount_ && a.singleMass_ == b.singleMass_ && a.log_prob_ == b.log_prob_ && a.formula_ == b.formula_; } }
C++
3D
OpenMS/OpenMS
src/openms/source/DATASTRUCTURES/CalibrationData.cpp
.cpp
5,110
192
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Chris Bielow $ // $Authors: Chris Bielow $ // -------------------------------------------------------------------------- #include <OpenMS/DATASTRUCTURES/CalibrationData.h> #include <OpenMS/MATH/MathFunctions.h> #include <OpenMS/MATH/StatisticFunctions.h> namespace OpenMS { CalibrationData::CalibrationData() : data_(), use_ppm_(true), groups_() { } CalibrationData::CalDataType::CoordinateType CalibrationData::getMZ( Size i ) const { return data_[i].getMZ(); } CalibrationData::CalDataType::CoordinateType CalibrationData::getRT( Size i ) const { return data_[i].getRT(); } CalibrationData::CalDataType::CoordinateType CalibrationData::getIntensity( Size i ) const { return data_[i].getIntensity(); } OpenMS::CalibrationData::const_iterator CalibrationData::begin() const { return data_.begin(); } OpenMS::CalibrationData::const_iterator CalibrationData::end() const { return data_.end(); } Size CalibrationData::size() const { return data_.size(); } bool CalibrationData::empty() const { return data_.empty(); } void CalibrationData::clear() { data_.clear(); } void CalibrationData::setUsePPM( bool usePPM ) { use_ppm_ = usePPM; } bool CalibrationData::usePPM() const { return use_ppm_; } void CalibrationData::insertCalibrationPoint( CalDataType::CoordinateType rt, CalDataType::CoordinateType mz_obs, CalDataType::IntensityType intensity, CalDataType::CoordinateType mz_ref, double weight, int group /*= -1*/ ) { RichPeak2D p(Peak2D::PositionType(rt, mz_obs), intensity); p.setMetaValue("mz_ref", mz_ref); p.setMetaValue("ppm_error", Math::getPPM(mz_obs, mz_ref)); p.setMetaValue("weight", weight); if (group >= 0) { p.setMetaValue("peakgroup", group); groups_.insert(group); } data_.push_back(p); } Size CalibrationData::getNrOfGroups() const { return groups_.size(); } CalibrationData::CalDataType::CoordinateType CalibrationData::getError( Size i ) const { if (use_ppm_) { return data_[i].getMetaValue("ppm_error"); } else { return (data_[i].getMZ() - getRefMZ(i)); } } CalibrationData::CalDataType::CoordinateType CalibrationData::getRefMZ( Size i ) const { if (!data_[i].metaValueExists("mz_ref")) { throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "getRefMz() received invalid point without meta data!"); } return data_[i].getMetaValue("mz_ref"); } CalibrationData::CalDataType::CoordinateType CalibrationData::getWeight( Size i ) const { if (!data_[i].metaValueExists("weight")) { throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "getWeight() received invalid point without meta data!"); } return data_[i].getMetaValue("weight"); } int CalibrationData::getGroup( Size i ) const { if (!data_[i].metaValueExists("peakgroup")) { return -1; } return data_[i].getMetaValue("peakgroup"); } OpenMS::StringList CalibrationData::getMetaValues() { return ListUtils::create<String>("mz_ref,ppm_error,weight"); } OpenMS::CalibrationData CalibrationData::median( double rt_left, double rt_right ) const { CalibrationData cd; cd.setUsePPM(this->usePPM()); Size i = std::distance(data_.begin(), lower_bound(data_.begin(), data_.end(), rt_left, RichPeak2D::PositionLess())); Size ie = std::distance(data_.begin(), upper_bound(data_.begin(), data_.end(), rt_right, RichPeak2D::PositionLess())); if (i == ie) { return cd; } double rt = (rt_left + rt_right) / 2; for (std::set<int>::const_iterator it_group = groups_.begin(); it_group!= groups_.end(); ++it_group) { std::vector<double> mzs, ints; double mz_ref(0); for (Size j = i; j < ie; ++j) { if (getGroup(j) == *it_group) { mzs.push_back(data_[j].getMZ()); ints.push_back(data_[j].getIntensity()); mz_ref = getRefMZ(j); } } if (ints.empty()) { continue; // no data points for this peak group in this RT range } double int_median = Math::median(ints.begin(), ints.end()); cd.insertCalibrationPoint(rt, Math::median(mzs.begin(), mzs.end()), int_median, mz_ref, log(int_median)); } return cd; } void CalibrationData::sortByRT() { std::sort(data_.begin(), data_.end(), RichPeak2D::PositionLess()); } } // namespace OpenMS
C++
3D
OpenMS/OpenMS
src/openms/source/DATASTRUCTURES/DIntervalBase.cpp
.cpp
387
13
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg $ // $Authors: Clemens Groepl, Marc Sturm $ // -------------------------------------------------------------------------- namespace OpenMS { }
C++
3D
OpenMS/OpenMS
src/openms/source/DATASTRUCTURES/ExposedVector.cpp
.cpp
418
14
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Chris Bielow $ // $Authors: Chris Bielow $ // -------------------------------------------------------------------------- #include <OpenMS/DATASTRUCTURES/ExposedVector.h> namespace OpenMS { }
C++
3D
OpenMS/OpenMS
src/openms/source/DATASTRUCTURES/StringConversions.cpp
.cpp
467
14
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg, Chris Bielow $ // $Authors: Marc Sturm, Stephan Aiche, Chris Bielow $ // -------------------------------------------------------------------------- #include <OpenMS/DATASTRUCTURES/StringConversions.h> namespace OpenMS { }
C++
3D
OpenMS/OpenMS
src/openms/source/DATASTRUCTURES/ListUtilsIO.cpp
.cpp
374
13
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin // SPDX-License-Identifier: BSD-3-Clause // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg $ // $Authors: Stephan Aiche $ // -------------------------------------------------------------------------- namespace OpenMS { }
C++