keyword stringclasses 7 values | repo_name stringlengths 8 98 | file_path stringlengths 4 244 | file_extension stringclasses 29 values | file_size int64 0 84.1M | line_count int64 0 1.6M | content stringlengths 1 84.1M ⌀ | language stringclasses 14 values |
|---|---|---|---|---|---|---|---|
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/ConsensusIDAlgorithmIdentity.cpp | .cpp | 4,522 | 125 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Sven Nahnsen, Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/ConsensusIDAlgorithmIdentity.h>
#include <OpenMS/CONCEPT/LogStream.h>
using namespace std;
namespace OpenMS
{
ConsensusIDAlgorithmIdentity::ConsensusIDAlgorithmIdentity()
{
setName("ConsensusIDAlgorithmIdentity"); // DefaultParamHandler
}
void ConsensusIDAlgorithmIdentity::preprocess_(
PeptideIdentificationList& ids)
{
// check score types and orientations:
bool higher_better = ids[0].isHigherScoreBetter();
set<String> score_types;
for (PeptideIdentificationList::iterator pep_it = ids.begin();
pep_it != ids.end(); ++pep_it)
{
if (pep_it->isHigherScoreBetter() != higher_better)
{
// scores with different orientations definitely aren't comparable:
String hi_lo = higher_better ? "higher/lower" : "lower/higher";
String msg = "Score types '" + ids[0].getScoreType() + "' and '" +
pep_it->getScoreType() + "' have different orientations (" + hi_lo +
" is better) and cannot be compared meaningfully.";
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
msg, higher_better ? "false" : "true");
}
score_types.insert(pep_it->getScoreType());
}
if (score_types.size() > 1)
{
String types;
types.concatenate(score_types.begin(), score_types.end(), "'/'");
OPENMS_LOG_WARN << "Warning: Different score types for peptide hits found ('"
<< types << "'). If the scores are not comparable, "
<< "results will be meaningless." << endl;
}
}
void ConsensusIDAlgorithmIdentity::apply_(PeptideIdentificationList& ids,
const map<String, String>& se_info,
SequenceGrouping& results)
{
preprocess_(ids);
// group peptide hits by sequence:
for (PeptideIdentificationList::iterator pep_it = ids.begin();
pep_it != ids.end(); ++pep_it)
{
String score_type = pep_it->getScoreType();
auto se = se_info.find(pep_it->getIdentifier());
if (se != se_info.end())
{
score_type = se->second + "_" + score_type;
}
for (vector<PeptideHit>::iterator hit_it = pep_it->getHits().begin();
hit_it != pep_it->getHits().end(); ++hit_it)
{
const AASequence& seq = hit_it->getSequence();
auto pos = results.find(seq);
if (pos == results.end()) // new sequence
{
auto ev = hit_it->getPeptideEvidences();
results[seq] = HitInfo{
hit_it->getCharge(),
{hit_it->getScore()},
{score_type},
hit_it->getMetaValue("target_decoy").toString(),
{std::make_move_iterator(ev.begin()), std::make_move_iterator(ev.end())},
0.,
0.
};
}
else // previously seen sequence
{
compareChargeStates_(pos->second.charge, hit_it->getCharge(),
pos->first);
pos->second.scores.emplace_back(hit_it->getScore());
pos->second.types.emplace_back(score_type);
for (const auto& ev : hit_it->getPeptideEvidences())
{
pos->second.evidence.emplace(ev);
}
}
}
}
// calculate score and support, and update results with them:
bool higher_better = ids[0].isHigherScoreBetter();
Size n_other_ids = (count_empty_ ? number_of_runs_ : ids.size()) - 1;
for (SequenceGrouping::iterator res_it = results.begin();
res_it != results.end(); ++res_it)
{
double score = getAggregateScore_(res_it->second.scores, higher_better);
// if 'count_empty' is false, 'n_other_ids' may be zero, in which case
// we define the support to be one to avoid a NaN:
double support = 1.0;
if (n_other_ids > 0) // the normal case
{
support = (res_it->second.scores.size() - 1.0) / n_other_ids;
}
res_it->second.final_score = score;
res_it->second.support = support;
}
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/PeptideSearchEngineFIAlgorithm.cpp | .cpp | 33,468 | 736 | // Copyright (c) 2002-present, The OpenMS Team -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: $
// $Authors: Raphael Förster $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/PeptideSearchEngineFIAlgorithm.h>
#include <OpenMS/ANALYSIS/ID/FragmentIndex.h>
#include <OpenMS/ANALYSIS/ID/PeptideIndexing.h>
#include <OpenMS/ANALYSIS/ID/HyperScore.h>
#include <OpenMS/ANALYSIS/ID/OpenSearchModificationAnalysis.h>
#include <OpenMS/CHEMISTRY/DecoyGenerator.h>
#include <OpenMS/CHEMISTRY/ModificationsDB.h>
#include <OpenMS/CHEMISTRY/ProteaseDB.h>
#include <OpenMS/CHEMISTRY/ResidueModification.h>
#include <OpenMS/CHEMISTRY/TheoreticalSpectrumGenerator.h>
#include <OpenMS/COMPARISON/SpectrumAlignment.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/CONCEPT/VersionInfo.h>
#include <OpenMS/DATASTRUCTURES/Param.h>
#include <OpenMS/DATASTRUCTURES/StringView.h>
#include <OpenMS/FORMAT/FASTAFile.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/KERNEL/MSSpectrum.h>
#include <OpenMS/KERNEL/Peak1D.h>
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/MATH/MathFunctions.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <OpenMS/METADATA/SpectrumSettings.h>
#include <OpenMS/PROCESSING/DEISOTOPING/Deisotoper.h>
#include <OpenMS/PROCESSING/FILTERING/NLargest.h>
#include <OpenMS/PROCESSING/FILTERING/ThresholdMower.h>
#include <OpenMS/PROCESSING/FILTERING/WindowMower.h>
#include <OpenMS/PROCESSING/ID/IDFilter.h>
#include <OpenMS/PROCESSING/SCALING/Normalizer.h>
#include <algorithm>
#include <map>
#ifdef _OPENMP
#include <omp.h>
#endif
using namespace std;
namespace OpenMS
{
PeptideSearchEngineFIAlgorithm::PeptideSearchEngineFIAlgorithm() :
DefaultParamHandler("PeptideSearchEngineFIAlgorithm"),
ProgressLogger()
{
defaults_.setValue("precursor:mass_tolerance", 10.0, "+/- tolerance for precursor mass.");
std::vector<std::string> precursor_mass_tolerance_unit_valid_strings;
precursor_mass_tolerance_unit_valid_strings.emplace_back("ppm");
precursor_mass_tolerance_unit_valid_strings.emplace_back("Da");
defaults_.setValue("precursor:mass_tolerance_unit", "ppm", "Unit of precursor mass tolerance.");
defaults_.setValidStrings("precursor:mass_tolerance_unit", precursor_mass_tolerance_unit_valid_strings);
defaults_.setValue("precursor:min_charge", 2, "Minimum precursor charge to be considered.");
defaults_.setValue("precursor:max_charge", 5, "Maximum precursor charge to be considered.");
defaults_.setSectionDescription("precursor", "Precursor (Parent Ion) Options");
// consider one before annotated monoisotopic peak and the annotated one
IntList isotopes = {0, 1};
defaults_.setValue("precursor:isotopes", isotopes, "Corrects for mono-isotopic peak misassignments. (E.g.: 1 = prec. may be misassigned to first isotopic peak)");
defaults_.setValue("fragment:mass_tolerance", 10.0, "Fragment mass tolerance");
std::vector<std::string> fragment_mass_tolerance_unit_valid_strings;
fragment_mass_tolerance_unit_valid_strings.emplace_back("ppm");
fragment_mass_tolerance_unit_valid_strings.emplace_back("Da");
defaults_.setValue("fragment:mass_tolerance_unit", "ppm", "Unit of fragment m");
defaults_.setValidStrings("fragment:mass_tolerance_unit", fragment_mass_tolerance_unit_valid_strings);
defaults_.setValue("fragment:min_mz", 150, "Minimal fragment mz for database");
defaults_.setValue("fragment:max_mz", 2000, "Maximal fragment mz for database");
defaults_.setSectionDescription("fragment", "Fragments (Product Ion) Options");
vector<String> all_mods;
ModificationsDB::getInstance()->getAllSearchModifications(all_mods);
defaults_.setValue("modifications:fixed", std::vector<std::string>{"Carbamidomethyl (C)"}, "Fixed modifications, specified using UniMod (www.unimod.org) terms, e.g. 'Carbamidomethyl (C)'");
defaults_.setValidStrings("modifications:fixed", ListUtils::create<std::string>(all_mods));
defaults_.setValue("modifications:variable", std::vector<std::string>{"Oxidation (M)"}, "Variable modifications, specified using UniMod (www.unimod.org) terms, e.g. 'Oxidation (M)'");
defaults_.setValidStrings("modifications:variable", ListUtils::create<std::string>(all_mods));
defaults_.setValue("modifications:variable_max_per_peptide", 2, "Maximum number of residues carrying a variable modification per candidate peptide");
defaults_.setSectionDescription("modifications", "Modifications Options");
vector<String> all_enzymes;
ProteaseDB::getInstance()->getAllNames(all_enzymes);
defaults_.setValue("enzyme", "Trypsin", "The enzyme used for peptide digestion.");
defaults_.setValidStrings("enzyme", ListUtils::create<std::string>(all_enzymes));
defaults_.setValue("decoys", "false", "Should decoys be generated?");
defaults_.setValidStrings("decoys", {"true","false"} );
defaults_.setValue("annotate:PSM", std::vector<std::string>{"ALL"}, "Annotations added to each PSM.");
defaults_.setValidStrings("annotate:PSM",
std::vector<std::string>{
"ALL",
Constants::UserParam::FRAGMENT_ERROR_MEDIAN_PPM_USERPARAM,
Constants::UserParam::PRECURSOR_ERROR_PPM_USERPARAM,
Constants::UserParam::MATCHED_PREFIX_IONS_FRACTION,
Constants::UserParam::MATCHED_SUFFIX_IONS_FRACTION}
);
defaults_.setSectionDescription("annotate", "Annotation Options");
defaults_.setValue("peptide:min_size", 7, "Minimum size a peptide must have after digestion to be considered in the search.");
defaults_.setValue("peptide:max_size", 40, "Maximum size a peptide must have after digestion to be considered in the search (0 = disabled).");
defaults_.setValue("peptide:missed_cleavages", 1, "Number of missed cleavages.");
defaults_.setValue("peptide:motif", "", "If set, only peptides that contain this motif (provided as RegEx) will be considered.");
defaults_.setSectionDescription("peptide", "Peptide Options");
defaults_.setValue("report:top_hits", 1, "Maximum number of top scoring hits per spectrum that are reported.");
defaults_.setSectionDescription("report", "Reporting Options");
// Add parameters which are only used by FragmentIndex
defaults_.setValue("peptide:min_mass", 100, "Minimal peptide mass for database");
defaults_.setValue("peptide:max_mass", 9000, "Maximal peptide mass for database");
// Fragment-level filtering
defaults_.setValue("fragment:min_matched_ions", 5, "Minimal number of matched ions to report a PSM");
// Precursor isotope error handling
defaults_.setValue("precursor:isotope_error_min", -1, "Minimum allowed precursor isotope error");
defaults_.setValue("precursor:isotope_error_max", 1, "Maximum allowed precursor isotope error");
// Fragment and scoring limits
defaults_.setValue("fragment:max_charge", 2, "max fragment charge");
defaults_.setValue("scoring:max_candidates_per_spectrum", 50, "The number of initial hits for which we calculate a score");
defaults_.setSectionDescription("scoring", "Search/Scoring Limits");
// Open search window bounds (used when tolerance > 1 Da or > 1000 ppm)
defaults_.setValue("precursor:open_window_lower", -100.0, "lower bound of the open precursor window");
defaults_.setValue("precursor:open_window_upper", 200.0, "upper bound of the open precursor window");
// Ion series toggles
defaults_.setValue("ions:add_y_ions", "true", "Add peaks of y-ions to the spectrum");
defaults_.setValidStrings("ions:add_y_ions", {"true","false"});
defaults_.setValue("ions:add_b_ions", "true", "Add peaks of b-ions to the spectrum");
defaults_.setValidStrings("ions:add_b_ions", {"true","false"});
defaults_.setValue("ions:add_a_ions", "false", "Add peaks of a-ions to the spectrum");
defaults_.setValidStrings("ions:add_a_ions", {"true","false"});
defaults_.setValue("ions:add_c_ions", "false", "Add peaks of c-ions to the spectrum");
defaults_.setValidStrings("ions:add_c_ions", {"true","false"});
defaults_.setValue("ions:add_x_ions", "false", "Add peaks of x-ions to the spectrum");
defaults_.setValidStrings("ions:add_x_ions", {"true","false"});
defaults_.setValue("ions:add_z_ions", "false", "Add peaks of z-ions to the spectrum");
defaults_.setValidStrings("ions:add_z_ions", {"true","false"});
defaults_.setSectionDescription("ions", "Theoretical ion series toggles");
defaultsToParam_();
}
void PeptideSearchEngineFIAlgorithm::updateMembers_()
{
precursor_mass_tolerance_ = param_.getValue("precursor:mass_tolerance");
precursor_mass_tolerance_unit_ = param_.getValue("precursor:mass_tolerance_unit").toString();
precursor_min_charge_ = param_.getValue("precursor:min_charge");
precursor_max_charge_ = param_.getValue("precursor:max_charge");
precursor_isotopes_ = param_.getValue("precursor:isotopes");
fragment_mass_tolerance_ = param_.getValue("fragment:mass_tolerance");
fragment_mass_tolerance_unit_ = param_.getValue("fragment:mass_tolerance_unit").toString();
modifications_fixed_ = ListUtils::toStringList<std::string>(param_.getValue("modifications:fixed"));
set<String> fixed_unique(modifications_fixed_.begin(), modifications_fixed_.end());
if (fixed_unique.size() != modifications_fixed_.size())
{
OPENMS_LOG_WARN << "Duplicate fixed modification provided. Making them unique." << endl;
modifications_fixed_.assign(fixed_unique.begin(), fixed_unique.end());
}
modifications_variable_ = ListUtils::toStringList<std::string>(param_.getValue("modifications:variable"));
set<String> var_unique(modifications_variable_.begin(), modifications_variable_.end());
if (var_unique.size() != modifications_variable_.size())
{
OPENMS_LOG_WARN << "Duplicate variable modification provided. Making them unique." << endl;
modifications_variable_.assign(var_unique.begin(), var_unique.end());
}
modifications_max_variable_mods_per_peptide_ = param_.getValue("modifications:variable_max_per_peptide");
enzyme_ = param_.getValue("enzyme").toString();
peptide_min_size_ = param_.getValue("peptide:min_size");
peptide_max_size_ = param_.getValue("peptide:max_size");
peptide_missed_cleavages_ = param_.getValue("peptide:missed_cleavages");
peptide_motif_ = param_.getValue("peptide:motif").toString(); // TODO: remove unused parameters
report_top_hits_ = param_.getValue("report:top_hits");
decoys_ = param_.getValue("decoys") == "true";
annotate_psm_ = ListUtils::toStringList<std::string>(param_.getValue("annotate:PSM"));
// Open search mode is automatically determined based on precursor tolerance in isOpenSearchMode_()
}
// static
void PeptideSearchEngineFIAlgorithm::preprocessSpectra_(PeakMap& exp, double fragment_mass_tolerance, bool fragment_mass_tolerance_unit_ppm)
{
// filter MS2 map
// remove 0 intensities
ThresholdMower threshold_mower_filter;
threshold_mower_filter.filterPeakMap(exp);
Normalizer normalizer;
normalizer.filterPeakMap(exp);
// sort by rt
exp.sortSpectra(false);
// filter settings
WindowMower window_mower_filter;
Param filter_param = window_mower_filter.getParameters();
filter_param.setValue("windowsize", 100.0, "The size of the sliding window along the m/z axis.");
filter_param.setValue("peakcount", 20, "The number of peaks that should be kept.");
filter_param.setValue("movetype", "jump", "Whether sliding window (one peak steps) or jumping window (window size steps) should be used.");
window_mower_filter.setParameters(filter_param);
NLargest nlargest_filter = NLargest(400);
#pragma omp parallel for default(none) shared(exp, fragment_mass_tolerance, fragment_mass_tolerance_unit_ppm, window_mower_filter, nlargest_filter)
for (SignedSize exp_index = 0; exp_index < (SignedSize)exp.size(); ++exp_index)
{
// sort by mz
exp[exp_index].sortByPosition();
// deisotope
Deisotoper::deisotopeAndSingleCharge(exp[exp_index],
fragment_mass_tolerance, fragment_mass_tolerance_unit_ppm,
1, 3, // min / max charge
false, // keep only deisotoped
3, 10, // min / max isopeaks
true); // convert fragment m/z to mono-charge
// remove noise
window_mower_filter.filterPeakSpectrum(exp[exp_index]);
nlargest_filter.filterPeakSpectrum(exp[exp_index]);
// sort (nlargest changes order)
exp[exp_index].sortByPosition();
}
}
void PeptideSearchEngineFIAlgorithm::postProcessHits_(const PeakMap& exp,
std::vector<std::vector<PeptideSearchEngineFIAlgorithm::AnnotatedHit_> >& annotated_hits,
std::vector<ProteinIdentification>& protein_ids,
PeptideIdentificationList& peptide_ids,
Size top_hits,
// const ModifiedPeptideGenerator::MapToResidueType& fixed_modifications,
// const ModifiedPeptideGenerator::MapToResidueType& variable_modifications,
// Size max_variable_mods_per_peptide, TODO: what about this parameter?
const StringList& modifications_fixed,
const StringList& modifications_variable,
Int peptide_missed_cleavages,
double precursor_mass_tolerance,
double fragment_mass_tolerance,
const String& precursor_mass_tolerance_unit_ppm,
const String& fragment_mass_tolerance_unit_ppm,
const Int precursor_min_charge,
const Int precursor_max_charge,
const String& enzyme,
const String& database_name) const
{
// remove all but top n scoring TODO: use two parameters to distinguish between number of reported peptides and number of pre-scored peptides
#pragma omp parallel for default(none) shared(annotated_hits, top_hits)
for (SignedSize scan_index = 0; scan_index < (SignedSize)annotated_hits.size(); ++scan_index)
{
// sort and keeps n best elements according to score
Size topn = top_hits > annotated_hits[scan_index].size() ? annotated_hits[scan_index].size() : top_hits;
std::partial_sort(annotated_hits[scan_index].begin(), annotated_hits[scan_index].begin() + topn, annotated_hits[scan_index].end(), AnnotatedHit_::hasBetterScore);
annotated_hits[scan_index].resize(topn);
annotated_hits[scan_index].shrink_to_fit();
}
bool annotation_precursor_error_ppm = std::find(annotate_psm_.begin(), annotate_psm_.end(), Constants::UserParam::PRECURSOR_ERROR_PPM_USERPARAM) != annotate_psm_.end();
bool annotation_fragment_error_ppm = std::find(annotate_psm_.begin(), annotate_psm_.end(), Constants::UserParam::FRAGMENT_ERROR_MEDIAN_PPM_USERPARAM) != annotate_psm_.end();
bool annotation_prefix_fraction = std::find(annotate_psm_.begin(), annotate_psm_.end(), Constants::UserParam::MATCHED_PREFIX_IONS_FRACTION) != annotate_psm_.end();
bool annotation_suffix_fraction = std::find(annotate_psm_.begin(), annotate_psm_.end(), Constants::UserParam::MATCHED_SUFFIX_IONS_FRACTION) != annotate_psm_.end();
// "ALL" adds all annotations
if (std::find(annotate_psm_.begin(), annotate_psm_.end(), "ALL") != annotate_psm_.end())
{
annotation_precursor_error_ppm = true;
annotation_fragment_error_ppm = true;
annotation_prefix_fraction = true;
annotation_suffix_fraction = true;
}
#pragma omp parallel for
for (SignedSize scan_index = 0; scan_index < (SignedSize)annotated_hits.size(); ++scan_index)
{
if (!annotated_hits[scan_index].empty())
{
const MSSpectrum& spec = exp[scan_index];
// create empty PeptideIdentification object and fill meta data
PeptideIdentification pi{};
pi.setSpectrumReference( spec.getNativeID());
pi.setMetaValue("scan_index", static_cast<unsigned int>(scan_index));
pi.setScoreType("ln(hyperscore)");
pi.setHigherScoreBetter(true);
double mz = spec.getPrecursors()[0].getMZ();
pi.setRT(spec.getRT());
pi.setMZ(mz);
Size charge = spec.getPrecursors()[0].getCharge();
// create full peptide hit structure from annotated hits
vector<PeptideHit> phs;
for (const auto& ah : annotated_hits[scan_index])
{
PeptideHit ph;
// Prefer spectrum charge; if absent (0), fall back to the charge actually used by FI for this candidate
const Size used_charge = (charge > 0) ? charge : static_cast<Size>(ah.applied_charge);
ph.setCharge(used_charge);
ph.setScore(ah.score);
ph.setSequence(ah.sequence);
if (annotation_fragment_error_ppm)
{
TheoreticalSpectrumGenerator tsg;
vector<pair<Size, Size> > alignment;
MSSpectrum theoretical_spec;
const int max_frag_z = (charge >= 2) ? std::min<int>(charge - 1, 2) : 1;
tsg.getSpectrum(theoretical_spec, ah.sequence, 1, max_frag_z);
SpectrumAlignment sa;
sa.getSpectrumAlignment(alignment, theoretical_spec, spec);
vector<double> err;
for (const auto& match : alignment)
{
double fragment_error = fabs(Math::getPPM(spec[match.second].getMZ(), theoretical_spec[match.first].getMZ()));
err.push_back(fragment_error);
}
double median_ppm_error(0);
if (!err.empty()) { median_ppm_error = Math::median(err.begin(), err.end(), false); }
ph.setMetaValue(Constants::UserParam::FRAGMENT_ERROR_MEDIAN_PPM_USERPARAM, median_ppm_error);
}
if (annotation_precursor_error_ppm)
{
double theo_mz = ah.sequence.getMZ(used_charge);
double ppm_difference = Math::getPPM(mz, theo_mz);
ph.setMetaValue(Constants::UserParam::PRECURSOR_ERROR_PPM_USERPARAM, ppm_difference);
}
if (annotation_prefix_fraction)
{
ph.setMetaValue(Constants::UserParam::MATCHED_PREFIX_IONS_FRACTION, ah.prefix_fraction);
}
if (annotation_suffix_fraction)
{
ph.setMetaValue(Constants::UserParam::MATCHED_SUFFIX_IONS_FRACTION, ah.suffix_fraction);
}
// Add isotope error metavalue (always)
ph.setMetaValue("isotope_error", ah.isotope_error);
// Add delta mass metavalue for open search
if (isOpenSearchMode_())
{
ph.setMetaValue("DeltaMass", ah.delta_mass);
}
// store PSM
phs.push_back(ph);
}
pi.setHits(phs);
// Ensure hits are sorted by score (best first), then assign ranks explicitly (0 = top hit)
pi.sort();
{
std::vector<PeptideHit>& hits = pi.getHits();
for (Size r = 0; r < hits.size(); ++r)
{
hits[r].setRank(static_cast<UInt>(r));
}
}
// Debug: log spectrum-level top hit details before storing PeptideIdentification
if (!pi.getHits().empty())
{
const PeptideHit& top_hit = pi.getHits().front();
OPENMS_LOG_INFO << "[PDBS-FI] scan_index=" << scan_index
<< " top_ln(hyperscore)=" << top_hit.getScore()
<< " top_charge=" << top_hit.getCharge()
<< " top_isotope_error=" << (int)top_hit.getMetaValue("isotope_error")
<< std::endl;
}
#pragma omp critical (peptide_ids_access)
{
//clang-tidy: seems to be a false-positive in combination with omp
peptide_ids.push_back(std::move(pi));
}
}
}
#ifdef _OPENMP
// we need to sort the peptide_ids by scan_index in order to have the same output in the idXML-file
if (omp_get_max_threads() > 1)
{
std::sort(peptide_ids.begin(), peptide_ids.end(), [](const PeptideIdentification& a, const PeptideIdentification& b)
{
return a.getMetaValue("scan_index") < b.getMetaValue("scan_index");
});
}
#endif
// protein identifications (leave as is...)
protein_ids = vector<ProteinIdentification>(1);
protein_ids[0].setDateTime(DateTime::now());
protein_ids[0].setSearchEngine("PeptideDataBaseSearchFI");
protein_ids[0].setSearchEngineVersion(VersionInfo::getVersion());
DateTime now = DateTime::now();
String identifier("PDBSFI_" + now.get());
protein_ids[0].setIdentifier(identifier);
for (auto & pid : peptide_ids) { pid.setIdentifier(identifier); }
ProteinIdentification::SearchParameters search_parameters;
search_parameters.db = database_name;
search_parameters.charges = String(precursor_min_charge) + ":" + String(precursor_max_charge);
ProteinIdentification::PeakMassType mass_type = ProteinIdentification::PeakMassType::MONOISOTOPIC;
search_parameters.mass_type = mass_type;
search_parameters.fixed_modifications = modifications_fixed;
search_parameters.variable_modifications = modifications_variable;
search_parameters.missed_cleavages = peptide_missed_cleavages;
search_parameters.fragment_mass_tolerance = fragment_mass_tolerance;
search_parameters.precursor_mass_tolerance = precursor_mass_tolerance;
search_parameters.precursor_mass_tolerance_ppm = precursor_mass_tolerance_unit_ppm == "ppm";
search_parameters.fragment_mass_tolerance_ppm = fragment_mass_tolerance_unit_ppm == "ppm";
search_parameters.digestion_enzyme = *ProteaseDB::getInstance()->getEnzyme(enzyme);
// add additional percolator features or post-processing
StringList feature_set{"score"};
if (annotation_fragment_error_ppm) feature_set.push_back(Constants::UserParam::FRAGMENT_ERROR_MEDIAN_PPM_USERPARAM);
if (annotation_prefix_fraction) feature_set.push_back(Constants::UserParam::MATCHED_PREFIX_IONS_FRACTION);
if (annotation_suffix_fraction) feature_set.push_back(Constants::UserParam::MATCHED_SUFFIX_IONS_FRACTION);
// note: precursor error is calculated by percolator itself
search_parameters.setMetaValue("extra_features", ListUtils::concatenate(feature_set, ","));
// record whether open-search mode was used
search_parameters.setMetaValue("open_search", isOpenSearchMode_() ? "true" : "false");
search_parameters.enzyme_term_specificity = EnzymaticDigestion::SPEC_FULL;
protein_ids[0].setSearchParameters(std::move(search_parameters));
}
PeptideSearchEngineFIAlgorithm::ExitCodes PeptideSearchEngineFIAlgorithm::search(const String& in_mzML, const String& in_db, vector<ProteinIdentification>& protein_ids, PeptideIdentificationList& peptide_ids) const
{
bool precursor_mass_tolerance_unit_ppm = (precursor_mass_tolerance_unit_ == "ppm");
bool fragment_mass_tolerance_unit_ppm = (fragment_mass_tolerance_unit_ == "ppm");
// Debug: log effective precursor/fragment tolerances (value + unit)
OPENMS_LOG_INFO << "[PDBS-FI] fragment_tol="
<< fragment_mass_tolerance_ << " "
<< (fragment_mass_tolerance_unit_ppm ? "ppm" : "Da")
<< " | precursor_tol="
<< precursor_mass_tolerance_ << " "
<< (precursor_mass_tolerance_unit_ppm ? "ppm" : "Da")
<< std::endl;
bool open_search = isOpenSearchMode_();
OPENMS_LOG_INFO << "[PDBS-FI] open_search=" << (open_search ? "true" : "false")
<< " (auto-determined from precursor tolerance)" << std::endl;
// load MS2 map
PeakMap spectra;
FileHandler f;
PeakFileOptions options;
options.clearMSLevels();
options.addMSLevel(2);
f.getOptions() = options;
f.loadExperiment(in_mzML, spectra, {FileTypes::MZML});
spectra.sortSpectra(true);
startProgress(0, 1, "Filtering spectra...");
preprocessSpectra_(spectra, fragment_mass_tolerance_, fragment_mass_tolerance_unit_ppm);
endProgress();
// create spectrum generator
TheoreticalSpectrumGenerator spectrum_generator;
Param param(spectrum_generator.getParameters());
param.setValue("add_first_prefix_ion", "true");
param.setValue("add_metainfo", "true");
spectrum_generator.setParameters(param);
// preallocate storage for PSMs
vector<vector<AnnotatedHit_> > annotated_hits(spectra.size(), vector<AnnotatedHit_>());
for (auto & a : annotated_hits) { a.reserve(report_top_hits_); }
vector<FASTAFile::FASTAEntry> fasta_db;
FASTAFile().load(in_db, fasta_db);
// generate decoy protein sequences by reversing them
if (decoys_)
{
startProgress(0, 1, "Generate decoys...");
DecoyGenerator decoy_generator;
// append decoy proteins
const size_t old_size = fasta_db.size();
fasta_db.reserve(fasta_db.size() * 2);
for (size_t i = 0; i != old_size; ++i)
{
FASTAFile::FASTAEntry e = fasta_db[i];
e.sequence = decoy_generator.reversePeptides(AASequence::fromString(e.sequence), enzyme_).toString();
e.identifier = "DECOY_" + e.identifier;
fasta_db.push_back(std::move(e));
}
// randomize order of targets and decoys to introduce no global bias in the case that
// many targets have the same score as their decoy. (As we always take the first best scoring one)
Math::RandomShuffler shuffler;
shuffler.portable_random_shuffle(fasta_db.begin(), fasta_db.end());
endProgress();
}
// build fragment index
//TODO: Pass all the other parameters from this class to FragmentIndex
//TODO: Can we do it with p.setValue or is there a more sophisticated way?
startProgress(0, 1, "Building fragment index...");
FragmentIndex fragment_index_;
auto this_params = getParameters();
fragment_index_.setParameters(this_params);
fragment_index_.build(fasta_db);
endProgress();
startProgress(0, spectra.size(), "Scoring peptide models against spectra...");
size_t count_spectra{};
// Compute open search mode once before parallel region
bool open_search_mode = open_search;
// Create local copy of constant for OpenMP shared access
const double proton_mass_u = Constants::PROTON_MASS_U;
#pragma omp parallel for schedule(static) default(none) shared(annotated_hits, count_spectra, fragment_index_, spectrum_generator, fasta_db, precursor_mass_tolerance_unit_ppm, fragment_mass_tolerance_unit_ppm, spectra, open_search_mode, proton_mass_u)
for (SignedSize scan_index = 0; scan_index < (SignedSize)spectra.size(); ++scan_index)
{
#pragma omp atomic
++count_spectra;
IF_MASTERTHREAD
{
setProgress(count_spectra);
}
const MSSpectrum& exp_spectrum = spectra[scan_index];
FragmentIndex::SpectrumMatchesTopN top_sms;
fragment_index_.querySpectrum(exp_spectrum, top_sms); // TODO: expose top N as argument here and use report_top_hits_
for (const auto& sms : top_sms.hits_)
{
FragmentIndex::Peptide sms_pep = fragment_index_.getPeptides()[sms.peptide_idx_];
pair<size_t, size_t> candidate_snippet = sms_pep.sequence_;
AASequence unmod_candidate = AASequence::fromString(fasta_db[sms_pep.protein_idx].sequence.substr(candidate_snippet.first, candidate_snippet.second));
AASequence mod_candidate;
//reapply modifications.
if (!(modifications_variable_.empty() && modifications_fixed_.empty()))
{
vector<AASequence> mod_candidates;
ModifiedPeptideGenerator::MapToResidueType fixed_modifications = ModifiedPeptideGenerator::getModifications(modifications_fixed_);
ModifiedPeptideGenerator::MapToResidueType variable_modifications = ModifiedPeptideGenerator::getModifications(modifications_variable_);
ModifiedPeptideGenerator::applyFixedModifications(fixed_modifications, unmod_candidate);
ModifiedPeptideGenerator::applyVariableModifications(variable_modifications, unmod_candidate, modifications_max_variable_mods_per_peptide_, mod_candidates);
mod_candidate = mod_candidates[sms_pep.modification_idx_];
}
else
{
mod_candidate = unmod_candidate;
}
// create theoretical spectrum
PeakSpectrum theo_spectrum;
// add peaks for b and y ions with charge 1
spectrum_generator.getSpectrum(theo_spectrum, mod_candidate, 1, 1);
// sort by mz
theo_spectrum.sortByPosition();
// const int& charge = exp_spectrum.getPrecursors()[0].getCharge();
HyperScore::PSMDetail detail;
const double& score = HyperScore::computeWithDetail(fragment_mass_tolerance_, fragment_mass_tolerance_unit_ppm, exp_spectrum, theo_spectrum, detail);
if (score == 0)
{
continue; // no hit?
}
// add peptide hit
AnnotatedHit_ ah;
ah.sequence = std::move(mod_candidate);
ah.score = score;
double seq_length = (double)ah.sequence.size();
ah.prefix_fraction = (double)detail.matched_b_ions/seq_length;
ah.suffix_fraction = (double)detail.matched_y_ions/seq_length;
ah.mean_error = detail.mean_error;
// Set isotope error and charge from FragmentIndex results
ah.isotope_error = sms.isotope_error_;
ah.applied_charge = sms.precursor_charge_;
// Calculate delta_mass for open search
ah.delta_mass = 0.0; // Initialize
if (open_search_mode)
{
double theo_mh_plus = ah.sequence.getMZ(1);
double exp_mz = exp_spectrum.getPrecursors()[0].getMZ();
double exp_mh_plus = exp_mz * sms.precursor_charge_ - ((sms.precursor_charge_ - 1) * proton_mass_u);
ah.delta_mass = exp_mh_plus - theo_mh_plus;
}
annotated_hits[scan_index].push_back(std::move(ah));
}
}
endProgress();
ModifiedPeptideGenerator::MapToResidueType fixed_modifications = ModifiedPeptideGenerator::getModifications(modifications_fixed_);
ModifiedPeptideGenerator::MapToResidueType variable_modifications = ModifiedPeptideGenerator::getModifications(modifications_variable_);
startProgress(0, 1, "Post-processing PSMs...");
PeptideSearchEngineFIAlgorithm::postProcessHits_(spectra,
annotated_hits,
protein_ids,
peptide_ids,
report_top_hits_,
//fixed_modifications, TODO: what about this unused parameter?
//variable_modifications, TODO: what about this unused parameter?
//modifications_max_variable_mods_per_peptide_, TODO: what about this unused parameter?
modifications_fixed_,
modifications_variable_,
peptide_missed_cleavages_,
precursor_mass_tolerance_,
fragment_mass_tolerance_,
precursor_mass_tolerance_unit_,
fragment_mass_tolerance_unit_,
precursor_min_charge_,
precursor_max_charge_,
enzyme_,
in_db
);
endProgress();
// Perform modification analysis for open search results
if (open_search)
{
OPENMS_LOG_INFO << "[PDBS-FI] Performing open search modification analysis..." << std::endl;
startProgress(0, 1, "Analyzing modification patterns...");
OpenSearchModificationAnalysis mod_analyzer;
// Generate output table filename based on input database
String mod_output_file = "";
if (!in_db.empty())
{
size_t dot_pos = in_db.rfind('.');
if (dot_pos != String::npos)
{
mod_output_file = in_db.substr(0, dot_pos) + "_ModificationAnalysis.idXML";
}
else
{
mod_output_file = in_db + "_ModificationAnalysis.idXML";
}
}
auto modification_summaries = mod_analyzer.analyzeModifications(
peptide_ids,
precursor_mass_tolerance_,
precursor_mass_tolerance_unit_ == "ppm",
false, // no smoothing for now
mod_output_file
);
OPENMS_LOG_INFO << "[PDBS-FI] Found " << modification_summaries.size()
<< " modification patterns in open search results." << std::endl;
endProgress();
}
// add meta data on spectra file
protein_ids[0].setPrimaryMSRunPath({in_mzML}, spectra);
// reindex peptides to proteins
PeptideIndexing indexer;
Param param_pi = indexer.getParameters();
param_pi.setValue("decoy_string", "DECOY_");
param_pi.setValue("decoy_string_position", "prefix");
param_pi.setValue("enzyme:name", enzyme_);
param_pi.setValue("enzyme:specificity", "full");
param_pi.setValue("missing_decoy_action", "silent");
indexer.setParameters(param_pi);
PeptideIndexing::ExitCodes indexer_exit = indexer.run(fasta_db, protein_ids, peptide_ids);
if ((indexer_exit != PeptideIndexing::ExitCodes::EXECUTION_OK) &&
(indexer_exit != PeptideIndexing::ExitCodes::PEPTIDE_IDS_EMPTY))
{
if (indexer_exit == PeptideIndexing::ExitCodes::DATABASE_EMPTY)
{
return ExitCodes::INPUT_FILE_EMPTY;
}
else if (indexer_exit == PeptideIndexing::ExitCodes::UNEXPECTED_RESULT)
{
return ExitCodes::UNEXPECTED_RESULT;
}
else
{
return ExitCodes::UNKNOWN_ERROR;
}
}
return ExitCodes::EXECUTION_OK;
}
} // namespace OpenMS | C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/SiriusMSConverter.cpp | .cpp | 29,164 | 751 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Oliver Alka, Axel Walter $
// $Authors: Oliver Alka $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/SiriusMSConverter.h>
#include <OpenMS/FORMAT/ControlledVocabulary.h>
#include <OpenMS/KERNEL/MSSpectrum.h>
#include <OpenMS/MATH/MathFunctions.h>
#include <OpenMS/METADATA/SourceFile.h>
#include <OpenMS/SYSTEM/File.h>
using namespace OpenMS;
using namespace std;
namespace OpenMS
{
// precursor correction (highest intensity)
Int SiriusMSFile::getHighestIntensityPeakInMZRange_(double test_mz,
const MSSpectrum& spectrum,
double tolerance,
bool ppm)
{
// get tolerance window and left/right iterator
pair<double, double> tolerance_window = Math::getTolWindow(test_mz, tolerance, ppm);
// Here left has to be smaller than right
OPENMS_PRECONDITION(tolerance_window.first < tolerance_window.second, "Left has to be smaller than right");
MSSpectrum::ConstIterator left = spectrum.MZBegin(tolerance_window.first);
MSSpectrum::ConstIterator right = spectrum.MZBegin(tolerance_window.second);
// no MS1 precursor peak in +- tolerance window found
if (left == right)
{
return -1;
}
MSSpectrum::ConstIterator max_intensity_it = max_element(left, right, Peak1D::IntensityLess());
return max_intensity_it - spectrum.begin();
}
// extract precursor isotope pattern if no feature information is available
std::vector<Peak1D> SiriusMSFile::extractPrecursorIsotopePattern_(const double& precursor_mz,
const MSSpectrum& precursor_spectrum,
int& iterations,
const int& charge)
{
vector<Peak1D> isotopes;
int peak_index;
Peak1D peak;
// monoisotopic_trace
const int tolerance = 10;
const bool ppm = true;
const int isotope_tolerance = 1;
peak_index = getHighestIntensityPeakInMZRange_(precursor_mz, precursor_spectrum, tolerance, ppm);
if (peak_index != -1)
{
peak = precursor_spectrum[peak_index];
isotopes.push_back(peak);
}
// further isotope_traces with the mass error of 1 ppm
double massdiff = Constants::C13C12_MASSDIFF_U;
// depending on the charge different MASSDIFF
if (charge != 0)
{
massdiff = massdiff/std::abs(charge);
}
while (peak_index != -1 && iterations > 0)
{
// check for isotope trace with "isotope_tolerance" ppm error
peak_index = SiriusMSFile::getHighestIntensityPeakInMZRange_(peak.getMZ() + massdiff, precursor_spectrum, isotope_tolerance, ppm);
if (peak_index != -1)
{
peak = precursor_spectrum[peak_index];
isotopes.push_back(peak);
}
--iterations;
}
return isotopes;
}
void SiriusMSFile::writeMsFile_(ofstream& os,
const MSExperiment& spectra,
const std::vector<size_t>& ms2_spectra_index,
const SiriusMSFile::AccessionInfo& ainfo,
const StringList& adducts,
const std::vector<String>& v_description,
const std::vector<String>& v_sumformula,
const std::vector<pair<double,double>>& f_isotopes,
int& feature_charge,
uint64_t& feature_id,
const double& feature_rt,
const double& feature_mz,
bool& writecompound,
const bool& no_masstrace_info_isotope_pattern,
const int& isotope_pattern_iterations,
int& count_skipped_spectra,
int& count_assume_mono,
int& count_no_ms1,
std::vector<SiriusMSFile::CompoundInfo>& v_cmpinfo,
const size_t& file_index)
{
// if multiple identifications present for one MS1 and MS2 use all of them and
// let SIRIUS sort it out using fragment annotation
for (unsigned int k = 0; k != v_description.size(); ++k)
{
if (v_description.size() > 1) { writecompound = true; } // write the same "entry" for each possible hit (different: description, adduct, sumformula)
SiriusMSFile::CompoundInfo cmpinfo;
cmpinfo.file_index = file_index;
for (const size_t& ind : ms2_spectra_index)
{
// construct compound info structure
const MSSpectrum& current_ms2 = spectra[ind];
const double& current_rt = current_ms2.getRT();
const String& native_id = current_ms2.getNativeID();
const int& scan_number = SpectrumLookup::extractScanNumber(native_id, ainfo.native_id_accession);
const std::vector<Precursor> &precursor = current_ms2.getPrecursors();
// get m/z and intensity of precursor
if (precursor.empty())
{
throw Exception::MissingInformation(__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"Precursor for MS/MS spectrum was not found.");
}
IonSource::Polarity p = current_ms2.getInstrumentSettings().getPolarity(); //charge
// there should be only one precursor and MS2 should contain peaks to be considered
if (precursor.size() == 1 && !current_ms2.empty())
{
// read precursor charge
int precursor_charge = precursor[0].getCharge();
// sirius supports only single charged ions (+1; -1)
// if charge = 0, it will be changed to +1; -1 depending on Polarity
if (precursor_charge > 1 || precursor_charge < -1)
{
++count_skipped_spectra;
continue;
}
// set precursor charge for msfile
// no charge annotated - assume mono-charged
if (precursor_charge == 0)
{
precursor_charge = 1;
++count_assume_mono;
}
// negative mode - make sure charges are < 0
if (p == IonSource::Polarity::NEGATIVE)
{
precursor_charge = -(std::abs(precursor_charge));
}
// set feature_charge for msfile if feature information is available
// no charge annotated - assume mono-charged
if (feature_id != 0 && feature_charge == 0)
{
feature_charge = 1;
++count_assume_mono;
}
// negative mode - make sure charges are < 0
if (p == IonSource::Polarity::NEGATIVE) { feature_charge = -(std::abs(feature_charge)); }
// get m/z and intensity of precursor != MS1 spectrum
double precursor_mz = precursor[0].getMZ();
float precursor_int = precursor[0].getIntensity();
// extract collision energy
double collision = precursor[0].getActivationEnergy();
// find corresponding ms1 spectra (precursor)
PeakMap::ConstIterator s_it2 = spectra.getPrecursorSpectrum((spectra.begin() + ind));
double test_mz = precursor_mz;
double precursor_rt = 0.0;
vector<Peak1D> isotopes;
isotopes.clear();
vector<Peak1D> precursor_spec;
// getPrecursorSpectrum returns past-the-end iterator if spectrum is not found.
if (s_it2 == spectra.end() || s_it2->getMSLevel() != 1)
{
++count_no_ms1;
}
// get the precursor in the ms1 spectrum (highest intensity in the range of the precursor mz +- 0.1 Da)
else
{
const MSSpectrum &precursor_spectrum = *s_it2;
precursor_rt = precursor_spectrum.getRT();
int interations = isotope_pattern_iterations;
// extract precursor isotope pattern via C13 isotope distance
if (feature_id != 0 && feature_charge != 0)
{
isotopes = SiriusMSFile::extractPrecursorIsotopePattern_(test_mz, precursor_spectrum, interations, feature_charge);
}
else
{
isotopes = SiriusMSFile::extractPrecursorIsotopePattern_(test_mz, precursor_spectrum, interations, precursor_charge);
}
for (Size i = 0; i < precursor_spectrum.size(); ++i)
{
const Peak1D &peak = precursor_spectrum[i];
precursor_spec.push_back(peak);
}
}
// construct query_id; remove spaces from string
// use first
std::string des_wo_space = v_description[k];
des_wo_space.erase(std::remove_if(des_wo_space.begin(), des_wo_space.end(), ::isspace), des_wo_space.end());
String query_id = String(file_index) + "_" + String(feature_id) +
String("-" + String(scan_number) + "-") +
String("-" + String(ind) + "--") +
String(des_wo_space);
if (writecompound)
{
// write internal unique .ms data as sirius input
os << fixed;
os << ">compound " << query_id << "\n";
cmpinfo.cmp = query_id;
if (!f_isotopes.empty() && !no_masstrace_info_isotope_pattern)
{
os << ">parentmass " << f_isotopes[0].first << fixed << "\n";
cmpinfo.pmass = f_isotopes[0].first;
}
else if (!isotopes.empty())
{
os << ">parentmass " << isotopes[0].getMZ() << fixed << "\n";
cmpinfo.pmass = isotopes[0].getMZ();
}
else
{
os << ">parentmass " << precursor_mz << fixed << "\n";
cmpinfo.pmass = precursor_mz;
}
if (!adducts.empty())
{
os << ">ionization " << adducts[k] << "\n";
cmpinfo.ionization = adducts[k];
}
if (v_sumformula[k] != "UNKNOWN")
{
os << ">formula " << v_sumformula[k] << "\n";
cmpinfo.formula = v_sumformula[k];
}
if (feature_charge != 0)
{
os << ">charge " << feature_charge << "\n";
cmpinfo.charge = feature_charge;
}
else
{
os << ">charge " << precursor_charge << "\n";
cmpinfo.charge = precursor_charge;
}
if (feature_rt != 0)
{
os << ">rt " << feature_rt << "\n";
cmpinfo.rt = feature_rt;
}
else if (precursor_rt != 0.0)
{
os << ">rt " << precursor_rt << "\n";
cmpinfo.rt = precursor_rt;
}
else
{
os << ">rt " << current_rt << "\n";
cmpinfo.rt = current_rt;
}
if (feature_mz != 0 && feature_id != 0)
{
os << "##fmz " << String(feature_mz) << "\n";
os << "##fid " << String(feature_id) << "\n";
cmpinfo.fmz = feature_mz;
cmpinfo.fid = feature_id;
}
os << "##des " << String(des_wo_space) << "\n";
os << "##specref_format " << "[MS, " << ainfo.native_id_accession <<", "<< ainfo.native_id_type << "]" << endl;
os << "##source file " << ainfo.sf_path << ainfo.sf_filename << endl;
os << "##source format " << "[MS, " << ainfo.sf_accession << ", "<< ainfo.sf_type << ",]" << endl;
cmpinfo.des = String(des_wo_space);
cmpinfo.specref_format = String("[MS, " + ainfo.native_id_accession + ", " + ainfo.native_id_type + "]");
cmpinfo.source_file = String(ainfo.sf_path + ainfo.sf_filename);
cmpinfo.source_format = String("[MS, " + ainfo.sf_accession + ", "+ ainfo.sf_type + ",]" );
// use precursor m/z & int and no ms1 spectra is available else use values from ms1 spectrum
Size num_isotopes = isotopes.size();
Size num_f_isotopes = f_isotopes.size();
if (num_f_isotopes > 0 && !no_masstrace_info_isotope_pattern)
{
os << ">ms1merged" << endl;
// m/z and intensity have to be higher than 1e-10
for (auto it = f_isotopes.begin(); it != f_isotopes.end(); ++it)
{
os << it->first << " " << it->second << "\n";
}
cmpinfo.pint_mono = f_isotopes[0].second;
}
else if (num_isotopes > 0) // if ms1 spectrum was present
{
os << ">ms1merged" << endl;
for (auto it = isotopes.begin(); it != isotopes.end(); ++it)
{
os << it->getMZ() << " " << it->getIntensity() << "\n";
}
cmpinfo.pint_mono = isotopes[0].getIntensity();
}
else
{
if (precursor_int != 0) // if no ms1 spectrum was present but precursor intensity is known
{
os << ">ms1merged" << "\n" << precursor_mz << " " << precursor_int << "\n\n";
cmpinfo.pint_mono = precursor_int;
}
}
}
// if a feature_id is present compound should only be written once
// since every ms2 belongs to the same feature with a different description
if (feature_id != 0)
{
writecompound = false;
}
if (!precursor_spec.empty())
{
os << ">ms1peaks" << endl;
for (auto iter = precursor_spec.begin(); iter != precursor_spec.end(); ++iter)
{
os << iter->getMZ() << " " << iter->getIntensity() << "\n";
}
}
// if collision energy was given - write it into .ms file if not use ms2 instead
if (collision == 0.0)
{
os << ">ms2peaks" << "\n";
}
else
{
os << ">collision" << " " << collision << "\n";
}
os << "##n_id " << native_id << endl;
// "m_id" annotation for multiple possible identifications (description_filepath_native_id_k)
// fragment mapping will be done using the m_id
String m_id = cmpinfo.des + "_" + ainfo.sf_filename + "_" + native_id + "_" + k;
os << "##m_id " << m_id << endl;
os << "##scan " << ind << endl;
os << "##specref " << "ms_run[1]:" << native_id << endl;
cmpinfo.native_ids.push_back(native_id);
cmpinfo.m_ids.push_back(m_id);
cmpinfo.scan_indices.emplace_back(ind);
cmpinfo.specrefs.emplace_back("ms_run[1]:" + native_id);
// single spectrum peaks
for (Size i = 0; i < current_ms2.size(); ++i)
{
const Peak1D &peak = current_ms2[i];
double mz = peak.getMZ();
float intensity = peak.getIntensity();
// intensity has to be higher than zero
if (intensity != 0)
{
os << mz << " " << intensity << "\n";
}
}
}
}
cmpinfo.native_ids_id = ListUtils::concatenate(cmpinfo.native_ids, "|");
cmpinfo.m_ids_id = ListUtils::concatenate(cmpinfo.m_ids, "|");
// add cmpinfo if derived from a feature (feature_id > 0)
if (feature_id > 0)
{
v_cmpinfo.push_back(std::move(cmpinfo));
}
}
}
void SiriusMSFile::store(const MSExperiment& spectra,
std::ofstream& os,
const FeatureMapping::FeatureToMs2Indices& feature_mapping,
const bool& feature_only,
const int& isotope_pattern_iterations,
const bool no_masstrace_info_isotope_pattern,
std::vector<SiriusMSFile::CompoundInfo>& v_cmpinfo,
const size_t& file_index)
{
const std::map<const BaseFeature*, vector<size_t>>& assigned_ms2 = feature_mapping.assignedMS2;
const vector<size_t> & unassigned_ms2 = feature_mapping.unassignedMS2;
bool use_feature_information = false;
bool use_unassigned_ms2 = false;
bool no_feature_information = false;
// Three different possible .ms formats
// feature information is used (adduct, masstrace_information (FFM+MAD || FFM+AMS || FMM+MAD+AMS [AMS preferred])
if (!assigned_ms2.empty()) use_feature_information = true;
// feature information was provided and unassigned ms2 should be used in addition
if (!unassigned_ms2.empty() && !feature_only) use_unassigned_ms2 = true;
// no feature information was provided (mzml input only)
if (assigned_ms2.empty() && unassigned_ms2.empty()) no_feature_information = true;
int count_skipped_spectra = 0; // spectra skipped due to precursor charge
int count_assume_mono = 0; // count if mono charge was assumed and set to current ion mode
int count_no_ms1 = 0; // count if no precursor was found
int count_skipped_features = 0; // features skipped due to charge
// check for all spectra at the beginning if spectra are centroided
// determine type of spectral data (profile or centroided) - only checking first spectrum (could be ms2 spectrum)
SpectrumSettings::SpectrumType spectrum_type = spectra[0].getType();
if (spectrum_type == SpectrumSettings::SpectrumType::PROFILE)
{
throw OpenMS::Exception::IllegalArgument(__FILE__, __LINE__, __FUNCTION__, "Error: Profile data provided but centroided spectra are needed. Please use PeakPicker to convert the spectra.");
}
AccessionInfo ainfo;
// sourcefile
if (spectra.getSourceFiles().empty())
{
throw OpenMS::Exception::IllegalArgument(__FILE__, __LINE__, __FUNCTION__, "Error: The SourceFile was annotated correctly in the provided mzML. Please run the OpenMS::FileConverter convert the files again from mzML to mzML.");
}
else
{
ainfo.sf_path = spectra.getSourceFiles()[0].getPathToFile();
ainfo.sf_filename = spectra.getSourceFiles()[0].getNameOfFile();
ainfo.sf_type = spectra.getSourceFiles()[0].getFileType();
// native_id
ainfo.native_id_accession = spectra.getSourceFiles()[0].getNativeIDTypeAccession();
ainfo.native_id_type = spectra.getSourceFiles()[0].getNativeIDType();
}
// extract accession by name
ControlledVocabulary cv;
cv.loadFromOBO("MS", File::find("/CV/psi-ms.obo"));
auto lambda = [&ainfo, &cv] (const String& child)
{
const ControlledVocabulary::CVTerm& c = cv.getTerm(child);
if (c.name == ainfo.sf_type)
{
ainfo.sf_accession = c.id;
return true;
}
return false;
};
cv.iterateAllChildren("MS:1000560", lambda);
vector<String> adducts;
String description;
String sumformula;
vector<String> v_description;
vector<String> v_sumformula;
uint64_t feature_id;
int feature_charge;
double feature_rt;
double feature_mz;
vector<pair<double, double>> f_isotopes;
// if feature information is available to this first (write features in one compound)
if (use_feature_information)
{
for (auto it = assigned_ms2.begin();
it != assigned_ms2.end();
++it)
{
// reset feature information with each iteration
f_isotopes.clear();
const BaseFeature* feature = it->first;
const vector<size_t> feature_associated_ms2 = it->second;
feature_id = feature->getUniqueId();
feature_charge = feature->getCharge();
feature_rt = feature->getRT();
feature_mz = feature->getMZ();
// multiple charged compounds are not allowed in sirius
if (feature_charge > 1 || feature_charge < -1)
{
++count_skipped_features;
continue;
}
// ffm featureXML
if (feature->metaValueExists("adducts"))
{
adducts = feature->getMetaValue("adducts");
}
if (feature->metaValueExists("masstrace_centroid_mz") && feature->metaValueExists("masstrace_intensity"))
{
vector<double> masstrace_centroid_mz = feature->getMetaValue("masstrace_centroid_mz");
vector<double> masstrace_intensity = feature->getMetaValue("masstrace_intensity");
if (masstrace_centroid_mz.size() == masstrace_intensity.size())
{
for (Size i = 0; i < masstrace_centroid_mz.size(); ++i)
{
pair<double, double> masstrace_mz_int(masstrace_centroid_mz[i], masstrace_intensity[i]);
f_isotopes.push_back(masstrace_mz_int);
}
}
}
else
{
OPENMS_LOG_WARN << "The feature " << feature->getUniqueId() << " misses the MetaValues for 'masstrace_centroid_mz' and 'masstrace_intensity'."
"If this happens more often, please validate your featureXML." << endl;
}
// prefer adducts from AccurateMassSearch if MetaboliteAdductDecharger and AccurateMassSearch were performed
// if multiple PeptideHits / identifications occur - use all for SIRIUS
v_description.clear();
v_sumformula.clear();
// descriptions is "[null]" if AccurateMassSearch was run with "keep unidentified masses"
if (!feature->getPeptideIdentifications().empty() && !feature->getPeptideIdentifications()[0].getHits().empty())
{
adducts.clear();
for (unsigned int j = 0; j != feature->getPeptideIdentifications()[0].getHits().size(); ++j)
{
String adduct;
description = feature->getPeptideIdentifications()[0].getHits()[j].getMetaValue("description");
if (description == "[null]")
{
description = "[UNKNOWN]";
}
sumformula = feature->getPeptideIdentifications()[0].getHits()[j].getMetaValue("chemical_formula");
if (sumformula.empty())
{
sumformula = "UNKNOWN";
}
adduct = feature->getPeptideIdentifications()[0].getHits()[j].getMetaValue("modifications");
if (adduct != "null")
{
// change format of adduct information M+H;1+ -> [M+H]1+
String adduct_prefix = adduct.prefix(';').trim();
String adduct_suffix = adduct.suffix(';').trim();
adduct = "[" + adduct_prefix + "]" + adduct_suffix;
}
else
{
adduct = "";
}
// change format of description [name] to name
description.erase(remove_if(begin(description),
end(description),
[](char c) { return c == '[' || c == ']'; }), end(description));
adducts.insert(adducts.begin(), adduct);
v_description.push_back(description);
v_sumformula.push_back(sumformula);
}
}
else
{
// initialization with UNKNOWN in case no feature information was available
v_description.emplace_back("UNKNOWN");
v_sumformula.emplace_back("UNKNOWN");
}
bool writecompound = true;
// call function to writeMsFile to os
writeMsFile_(os,
spectra,
feature_associated_ms2,
ainfo,
adducts,
v_description,
v_sumformula,
f_isotopes,
feature_charge,
feature_id,
feature_rt,
feature_mz,
writecompound,
no_masstrace_info_isotope_pattern,
isotope_pattern_iterations,
count_skipped_spectra,
count_assume_mono,
count_no_ms1,
v_cmpinfo,
file_index);
}
}
// ms2 spectra without an associated feature based on the provided featureXML
if (use_unassigned_ms2)
{
bool writecompound = true;
v_description = {"UNKNOWN"};
v_sumformula = {"UNKNOWN"};
f_isotopes.clear();
adducts.clear();
feature_charge = 0;
feature_id = 0;
feature_mz = 0;
feature_rt = 0;
writeMsFile_(os,
spectra,
unassigned_ms2,
ainfo,
adducts,
v_description,
v_sumformula,
f_isotopes,
feature_charge,
feature_id,
feature_rt,
feature_mz,
writecompound,
no_masstrace_info_isotope_pattern,
isotope_pattern_iterations,
count_skipped_spectra,
count_assume_mono,
count_no_ms1,
v_cmpinfo,
file_index);
}
// no feature information was provided
if (no_feature_information)
{
bool writecompound = true;
v_description = {"UNKNOWN"};
v_sumformula = {"UNKNOWN"};
f_isotopes.clear();
adducts.clear();
feature_charge = 0;
feature_id = 0;
feature_mz = 0;
feature_rt = 0;
// fill vector with index of all ms2 of the mzml
vector<size_t> all_ms2;
for (PeakMap::ConstIterator s_it = spectra.begin(); s_it != spectra.end(); ++s_it)
{
// process only MS2 spectra
if (s_it->getMSLevel() != 2)
{
continue;
}
int scan_index = s_it - spectra.begin();
all_ms2.push_back(scan_index);
}
writeMsFile_(os,
spectra,
all_ms2,
ainfo,
adducts,
v_description,
v_sumformula,
f_isotopes,
feature_charge,
feature_id,
feature_rt,
feature_mz,
writecompound,
no_masstrace_info_isotope_pattern,
isotope_pattern_iterations,
count_skipped_spectra,
count_assume_mono,
count_no_ms1,
v_cmpinfo,
file_index);
}
OPENMS_LOG_WARN << "No MS1 spectrum for this precursor. Occurred " << count_no_ms1 << " times." << endl;
OPENMS_LOG_WARN << count_skipped_spectra << " spectra were skipped due to precursor charge below -1 and above +1." << endl;
OPENMS_LOG_WARN << "Mono charge assumed and set to charge 1 with respect to current polarity " << count_assume_mono << " times."<< endl;
OPENMS_LOG_WARN << count_skipped_features << " features were skipped due to feature charge below -1 and above +1." << endl;
}
void SiriusMSFile::saveFeatureCompoundInfoAsTSV(const std::vector<SiriusMSFile::CompoundInfo>& v_cmpinfo,
const std::string& filename) {
std::ofstream file(filename);
// Check if the file is open
if (!file.is_open()) {
throw std::runtime_error("Unable to open file: " + filename);
}
// Write the header line
file << "cmp\tfile_index\tpmass\tpint_mono\trt\tfmz\tfid\tformula\tcharge\tionization\tdes\tspecref_format\tsource_file\tsource_format\tnative_ids_id\tm_ids_id\n";
// Iterate over the vector and write each object's attributes
for (const auto& info : v_cmpinfo)
{
file << info.cmp << "\t"
<< info.file_index << "\t"
<< info.pmass << "\t"
<< info.pint_mono << "\t"
<< info.rt << "\t"
<< info.fmz << "\t"
<< info.fid << "\t"
<< info.formula << "\t"
<< info.charge << "\t"
<< info.ionization << "\t"
<< info.des << "\t"
<< info.specref_format << "\t"
<< info.source_file << "\t"
<< info.source_format << "\t"
<< info.native_ids_id << "\t"
<< info.m_ids_id << "\n";
}
file.close();
}
} // namespace OpenMS
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/MessagePasserFactory.cpp | .cpp | 376 | 10 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Julianus Pfeuffer $
// $Authors: Julianus Pfeuffer $
// --------------------------------------------------------------------------
namespace OpenMS
{}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/ConsensusIDAlgorithmPEPIons.cpp | .cpp | 5,033 | 122 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Sven Nahnsen, Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/ConsensusIDAlgorithmPEPIons.h>
#include <OpenMS/CONCEPT/LogStream.h>
using namespace std;
namespace OpenMS
{
ConsensusIDAlgorithmPEPIons::ConsensusIDAlgorithmPEPIons()
{
setName("ConsensusIDAlgorithmPEPIons"); // DefaultParamHandler
defaults_.setValue("mass_tolerance", 0.5, "Maximum difference between fragment masses (in Da) for fragments to be considered 'shared' between peptides .");
defaults_.setMinFloat("mass_tolerance", 0.0);
// is the "min_shared" parameter necessary/useful?
defaults_.setValue("min_shared", 2, "The minimal number of 'shared' fragments (between two suggested peptides) that is necessary to evaluate the similarity based on shared peak count (SPC).");
defaults_.setMinInt("min_shared", 1);
defaultsToParam_();
}
void ConsensusIDAlgorithmPEPIons::updateMembers_()
{
ConsensusIDAlgorithmSimilarity::updateMembers_();
// similarity scoring based on shared peak count:
mass_tolerance_ = param_.getValue("mass_tolerance");
min_shared_ = param_.getValue("min_shared");
// new parameters may affect the similarity calculation, so clear cache:
similarities_.clear();
}
double ConsensusIDAlgorithmPEPIons::getSimilarity_(AASequence seq1,
AASequence seq2)
{
if (seq1 == seq2) return 1.0;
// order of sequences matters for cache look-up:
if (seq2 < seq1) std::swap(seq1, seq2); // "operator>" not defined
pair<AASequence, AASequence> seq_pair = make_pair(seq1, seq2);
SimilarityCache::iterator pos = similarities_.find(seq_pair);
if (pos != similarities_.end()) return pos->second; // score found in cache
// compare b and y ion series of seq. 1 and seq. 2:
vector<double> ions1(2 * seq1.size()), ions2(2 * seq2.size());
// b ions, seq. 1:
ions1[0] = seq1.getPrefix(1).getMonoWeight(); // includes N-terminal mods
// y ions, seq. 1:
ions1[seq1.size()] = seq1.getSuffix(1).getMonoWeight(); // inc. C-term. mods
for (Size i = 1; i < seq1.size(); ++i)
{
ions1[i] = ions1[i - 1] + seq1[i].getMonoWeight();
ions1[seq1.size() + i] = (ions1[seq1.size() + i - 1] +
seq1[seq1.size() - i - 1].getMonoWeight());
}
// b ions, seq. 2:
ions2[0] = seq2.getPrefix(1).getMonoWeight(); // includes N-terminal mods
// y ions, seq. 2:
ions2[seq2.size()] = seq2.getSuffix(1).getMonoWeight(); // inc. C-term. mods
for (Size i = 1; i < seq2.size(); ++i)
{
ions2[i] = ions2[i - 1] + seq2[i].getMonoWeight();
ions2[seq2.size() + i] = (ions2[seq2.size() + i - 1] +
seq2[seq2.size() - i - 1].getMonoWeight());
}
// now compare fragment masses from both sequences to find best matches
// within the allowed tolerance; note that:
// 1. we can be more efficient than comparing "all against all"
// 2. an ion from seq. 2 may be the best match for two (similar) ions from
// seq. 1 - then we want to count that ion only once, not twice
sort(ions1.begin(), ions1.end());
sort(ions2.begin(), ions2.end());
set<double> matches; // each best-matching ion counts only once
vector<double>::iterator start = ions2.begin();
// for each fragment in seq. 1...
for (vector<double>::iterator it1 = ions1.begin(); it1 != ions1.end();
++it1)
{
// ...find fragments from seq. 2 that are within the mass tolerance:
vector<double>::iterator lower = lower_bound(start, ions2.end(),
*it1 - mass_tolerance_);
if (lower == ions2.end()) break; // all values are too low
vector<double>::iterator upper = upper_bound(lower, ions2.end(),
*it1 + mass_tolerance_);
double best_match = 0.0, best_diff = mass_tolerance_ + 1.0;
// find ion from seq. 2 (*it2) that is closest to ion from seq. 1 (*it1):
for (vector<double>::iterator it2 = lower; it2 != upper; ++it2)
{
double diff = fabs(*it1 - *it2);
if (diff < best_diff)
{
best_diff = diff;
best_match = *it2;
}
}
if (best_diff <= mass_tolerance_) matches.insert(best_match);
start = lower; // "*it1" is increasing, so lower bounds can't get lower
}
double score_sim = 0.0;
if (matches.size() >= min_shared_)
{
score_sim = matches.size() / float(min(ions1.size(), ions2.size()));
}
similarities_[seq_pair] = score_sim; // cache the similarity score
return score_sim;
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/AScore.cpp | .cpp | 22,178 | 588 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg, Petra Gutenbrunner $
// $Authors: David Wojnar, Timo Sachsenberg, Petra Gutenbrunner $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/AScore.h>
#include <OpenMS/CHEMISTRY/TheoreticalSpectrumGenerator.h>
#include <OpenMS/DATASTRUCTURES/MatchedIterator.h>
#include <OpenMS/KERNEL/RangeUtils.h>
#include <OpenMS/MATH/MathFunctions.h>
using namespace std;
namespace OpenMS
{
AScore::AScore():
DefaultParamHandler("AScore")
{
defaults_.setValue("fragment_mass_tolerance", 0.05, "Fragment mass tolerance for spectrum comparisons");
defaults_.setMinFloat("fragment_mass_tolerance", 0.0);
defaults_.setValue("fragment_mass_unit", "Da", "Unit of fragment mass tolerance");
defaults_.setValidStrings("fragment_mass_unit", {"Da","ppm"});
vector<std::string> advanced(1, "advanced"); // tag for advanced parameters
defaults_.setValue("max_peptide_length", 40, "Restrict scoring to peptides with a length no greater than this value ('0' for 'no restriction')", advanced);
defaults_.setMinInt("max_peptide_length", 0);
defaults_.setValue("max_num_perm", 16384, "Maximum number of permutations a sequence can have to be processed ('0' for 'no restriction')", advanced);
defaults_.setMinInt("max_num_perm", 0);
defaults_.setValue("unambiguous_score", 1000, "Score to use for unambiguous assignments, where all sites on a peptide are phosphorylated. (Note: If a peptide is not phosphorylated at all, its score is set to '-1'.)", advanced);
defaultsToParam_();
}
AScore::~AScore() = default;
PeptideHit AScore::compute(const PeptideHit& hit, PeakSpectrum& real_spectrum)
{
PeptideHit phospho = hit;
//reset phospho
phospho.setScore(-1);
if (real_spectrum.empty())
{
return phospho;
}
String sequence_str = phospho.getSequence().toString();
String unmodified_sequence_str = phospho.getSequence().toUnmodifiedString();
Size number_of_phosphorylation_events = numberOfPhosphoEvents_(sequence_str);
AASequence seq_without_phospho = removePhosphositesFromSequence_(sequence_str);
if ((max_peptide_length_ > 0) && (unmodified_sequence_str.size() > max_peptide_length_))
{
OPENMS_LOG_DEBUG << "\tcalculation aborted: peptide too long: " << seq_without_phospho.toString() << std::endl;
return phospho;
}
// determine all phospho sites
vector<Size> sites = getSites_(unmodified_sequence_str);
Size number_of_STY = sites.size();
if (number_of_phosphorylation_events == 0 || number_of_STY == 0)
{
return phospho;
}
if (number_of_STY == number_of_phosphorylation_events)
{
phospho.setScore(unambiguous_score_);
return phospho;
}
vector<vector<Size>> permutations = computePermutations_(sites, (Int)number_of_phosphorylation_events);
OPENMS_LOG_DEBUG << "\tnumber of permutations: " << permutations.size() << std::endl;
// TODO: using a heuristic to calculate the best phospho sites if the number of permutations are exceeding the maximum.
// A heuristic could be to calculate the best site for the first phosphorylation and based on this the best site for the second
// phosphorylation and so on until every site is determined
if ((max_permutations_ > 0) && (permutations.size() > max_permutations_))
{
OPENMS_LOG_DEBUG << "\tcalculation aborted: number of permutations exceeded" << std::endl;
return phospho;
}
vector<PeakSpectrum> th_spectra = createTheoreticalSpectra_(permutations, seq_without_phospho);
// prepare real spectrum windows
if (!real_spectrum.isSorted())
{
real_spectrum.sortByPosition();
}
vector<PeakSpectrum> windows_top10 = peakPickingPerWindowsInSpectrum_(real_spectrum);
// compute match probability for a peak depth of 1
base_match_probability_ = computeBaseProbability_(real_spectrum.back().getMZ());
// calculate peptide score for each possible phospho site permutation
vector<vector<double>> peptide_site_scores = calculatePermutationPeptideScores_(th_spectra, windows_top10);
// rank peptide permutations ascending
multimap<double, Size> ranking = rankWeightedPermutationPeptideScores_(peptide_site_scores);
multimap<double, Size>::reverse_iterator rev = ranking.rbegin();
String seq1 = th_spectra[rev->second].getName();
phospho.setSequence(AASequence::fromString(seq1));
phospho.setMetaValue("search_engine_sequence", hit.getSequence().toString());
double peptide1_score = rev->first;
phospho.setMetaValue("AScore_pep_score", peptide1_score); // initialize score with highest peptide score (aka highest weighted score)
++rev;
String seq2 = th_spectra[rev->second].getName();
double peptide2_score = rev->first;
vector<ProbablePhosphoSites> phospho_sites;
determineHighestScoringPermutations_(peptide_site_scores, phospho_sites, permutations, ranking);
Int rank = 1;
double best_Ascore = std::numeric_limits<double>::max(); // the lower the better
for (vector<ProbablePhosphoSites>::iterator s_it = phospho_sites.begin(); s_it != phospho_sites.end(); ++s_it)
{
double Ascore = 0;
if (peptide1_score == peptide2_score) // set Ascore = 0 for each phosphorylation site
{
OPENMS_LOG_DEBUG << "\tscore of best (" << seq1 << ") and second best peptide (" << seq2 << ") are equal (" << peptide1_score << ")" << std::endl;
}
else
{
vector<PeakSpectrum> site_determining_ions;
computeSiteDeterminingIons_(th_spectra, *s_it, site_determining_ions);
Size N = site_determining_ions[0].size(); // all possibilities have the same number so take the first one
double p = static_cast<double>(s_it->peak_depth) * base_match_probability_;
Size n_first = 0; // number of matching peaks for first peptide
for (Size window_idx = 0; window_idx != windows_top10.size(); ++window_idx) // for each 100 m/z window
{
n_first += numberOfMatchedIons_(site_determining_ions[0], windows_top10[window_idx], s_it->peak_depth);
}
double P_first = computeCumulativeScore_(N, n_first, p);
Size n_second = 0; // number of matching peaks for second peptide
for (Size window_idx = 0; window_idx < windows_top10.size(); ++window_idx) //each 100 m/z window
{
n_second += numberOfMatchedIons_(site_determining_ions[1], windows_top10[window_idx], s_it->peak_depth);
}
Size N2 = site_determining_ions[1].size(); // all possibilities have the same number so take the first one
double P_second = computeCumulativeScore_(N2, n_second, p);
//abs is used to avoid -0 score values
double score_first = abs(-10 * log10(P_first));
double score_second = abs(-10 * log10(P_second));
OPENMS_LOG_DEBUG << "\tfirst - N: " << N << ",p: " << p << ",n: " << n_first << ", score: " << score_first << std::endl;
OPENMS_LOG_DEBUG << "\tsecond - N: " << N2 << ",p: " << p << ",n: " << n_second << ", score: " << score_second << std::endl;
Ascore = score_first - score_second;
OPENMS_LOG_DEBUG << "\tAscore_" << rank << ": " << Ascore << std::endl;
}
if (Ascore < best_Ascore)
{
best_Ascore = Ascore;
}
phospho.setMetaValue("AScore_" + String(rank), Ascore);
++rank;
}
phospho.setScore(best_Ascore);
return phospho;
}
double AScore::computeBaseProbability_(double ppm_reference_mz) const
{
double base_match_probability = 2. * fragment_mass_tolerance_ / 100.;
if (fragment_tolerance_ppm_)
{
base_match_probability *= ppm_reference_mz * 1e-6; // 1e-6 converts fragment_mass_tolerance_ to ppm
}
return base_match_probability;
}
double AScore::computeCumulativeScore_(Size N, Size n, double p) const
{
OPENMS_PRECONDITION(n <= N, "The number of matched ions (n) can be at most as large as the number of trials (N).");
OPENMS_PRECONDITION(p >= 0 && p <= 1.0, "p must be a probability [0,1].");
// Use the numerically stable implementation from MathFunctions
return Math::binomial_cdf_complement(N, n, p);
}
void AScore::determineHighestScoringPermutations_(const std::vector<std::vector<double>>& peptide_site_scores, std::vector<ProbablePhosphoSites>& sites, const vector<vector<Size>>& permutations, std::multimap<double, Size>& ranking) const
{
// For every phospho site of the highest (weighted) scoring phospho site assignment:
// 1. determine the next best (weighted) score assignment with this site in unphosporylated state.
// 2. determine the filtering level (peak depths) that maximizes the (unweighted) score difference between these two assignments
sites.clear();
// take first set of phospho site assignments
sites.resize(permutations[0].size());
const vector<Size> & best_peptide_sites = permutations[ranking.rbegin()->second]; // sites of the assignment that achieved the highest weighted score
for (Size i = 0; i < best_peptide_sites.size(); ++i) // for each phosphorylated site
{
multimap<double, Size>::reverse_iterator rev = ranking.rbegin();
sites[i].first = best_peptide_sites[i]; // store the site
sites[i].seq_1 = rev->second; // and permutation
bool peptide_not_found = true;
// iterate from best scoring peptide to the first peptide that doesn't contain the current phospho site
do
{
++rev;
for (Size j = 0; j < best_peptide_sites.size(); ++j)
{
if (j == i)
{
if (find(permutations[rev->second].begin(), permutations[rev->second].end(), best_peptide_sites[j]) != permutations[rev->second].end())
{
peptide_not_found = true;
break;
}
else
{
peptide_not_found = false;
}
}
else
{
if (find(permutations[rev->second].begin(), permutations[rev->second].end(), best_peptide_sites[j]) == permutations[rev->second].end())
{
peptide_not_found = true;
break;
}
else
{
peptide_not_found = false;
}
}
}
} while (peptide_not_found);
// store permutation of peptide without the phospho site i (seq_2)
sites[i].seq_2 = rev->second;
// store phospho site location that is not contained in the best scoring (seq_1) but in seq_2.
for (Size j = 0; j < permutations[sites[i].seq_2].size(); ++j)
{
if (find(permutations[sites[i].seq_1].begin(), permutations[sites[i].seq_1].end(), permutations[sites[i].seq_2][j]) == permutations[sites[i].seq_1].end())
{
sites[i].second = permutations[sites[i].seq_2][j];
break;
}
}
}
// store peak depth that achieves maximum score difference between best and runner up for every phospho site.
for (Size i = 0; i < sites.size(); ++i)
{
double maximum_score_difference = 0.0;
sites[i].peak_depth = 1;
vector<double>::const_iterator first_it = peptide_site_scores[sites[i].seq_1].begin();
vector<double>::const_iterator second_it = peptide_site_scores[sites[i].seq_2].begin();
for (Size depth = 1; second_it != peptide_site_scores[sites[i].seq_2].end(); ++second_it, ++first_it, ++depth)
{
double phospho_at_site_score = *first_it;
double no_phospho_at_site_score = *second_it;
double score_difference = phospho_at_site_score - no_phospho_at_site_score;
if (score_difference > maximum_score_difference)
{
maximum_score_difference = score_difference;
sites[i].peak_depth = depth;
}
}
}
}
// calculation of the number of different speaks between the theoretical spectra of the two best scoring peptide permutations, respectively
void AScore::computeSiteDeterminingIons_(const vector<PeakSpectrum>& th_spectra, const ProbablePhosphoSites& candidates, vector<PeakSpectrum>& site_determining_ions) const
{
site_determining_ions.clear();
site_determining_ions.resize(2);
PeakSpectrum spectrum_first = th_spectra[candidates.seq_1];
PeakSpectrum spectrum_second = th_spectra[candidates.seq_2];
PeakSpectrum spectrum_first_diff;
AScore::getSpectrumDifference_(
spectrum_first.begin(), spectrum_first.end(),
spectrum_second.begin(), spectrum_second.end(),
std::inserter(spectrum_first_diff, spectrum_first_diff.begin()));
PeakSpectrum spectrum_second_diff;
AScore::getSpectrumDifference_(
spectrum_second.begin(), spectrum_second.end(),
spectrum_first.begin(), spectrum_first.end(),
std::inserter(spectrum_second_diff, spectrum_second_diff.begin()));
OPENMS_LOG_DEBUG << spectrum_first_diff << std::endl;
OPENMS_LOG_DEBUG << spectrum_second_diff << std::endl;
site_determining_ions[0] = spectrum_first_diff;
site_determining_ions[1] = spectrum_second_diff;
site_determining_ions[0].sortByPosition();
site_determining_ions[1].sortByPosition();
}
Size AScore::numberOfMatchedIons_(const PeakSpectrum& th, const PeakSpectrum& window, Size depth) const
{
PeakSpectrum window_reduced = window;
if (window_reduced.size() > depth)
{
window_reduced.resize(depth);
}
window_reduced.sortByPosition();
Size matched_peaks(0);
if (fragment_tolerance_ppm_)
{
MatchedIterator<PeakSpectrum, PpmTrait> it(th, window_reduced, fragment_mass_tolerance_);
for (; it != it.end(); ++it) ++matched_peaks;
}
else
{
MatchedIterator<PeakSpectrum, DaTrait> it(th, window_reduced, fragment_mass_tolerance_);
for (; it != it.end(); ++it) ++matched_peaks;
}
return matched_peaks;
}
double AScore::peptideScore_(const std::vector<double>& scores) const
{
OPENMS_PRECONDITION(scores.size() == 10, "Scores vector must contain a score for every peak level.");
return (scores[0] * 0.5
+ scores[1] * 0.75
+ scores[2]
+ scores[3]
+ scores[4]
+ scores[5]
+ scores[6] * 0.75
+ scores[7] * 0.5
+ scores[8] * 0.25
+ scores[9] * 0.25)
/ 7.0;
}
vector<Size> AScore::getSites_(const String& unmodified) const
{
vector<Size> tupel;
for (Size i = 0; i < unmodified.size(); ++i)
{
if (unmodified[i] == 'Y' || unmodified[i] == 'T' || unmodified[i] == 'S')
{
tupel.push_back(i);
}
}
return tupel;
}
vector<vector<Size>> AScore::computePermutations_(const vector<Size>& sites, Int n_phosphorylation_events) const
{
vector<vector<Size> > permutations;
if (n_phosphorylation_events == 0)
{
return permutations;
}
else if (n_phosphorylation_events == 1)
{
for (Size i = 0; i < sites.size(); ++i)
{
vector<Size> temp;
temp.push_back(sites[i]);
permutations.push_back(temp);
}
return permutations;
}
// All sites are phosphorylated? Return one permutation containing all sites at once.
else if (sites.size() == (Size)n_phosphorylation_events)
{
permutations.push_back(sites);
return permutations;
}
else
// Generate all n_phosphorylation_events sized sets from sites
{
vector<Size> head;
vector<vector<Size>> tail;
// all permutations with first site selected
head.push_back(sites[0]);
vector<Size> tupel_left(++sites.begin(), sites.end());
Int tail_phospho_sites = n_phosphorylation_events - 1;
tail = computePermutations_(tupel_left, tail_phospho_sites);
for (vector<vector<Size>>::iterator it = tail.begin(); it != tail.end(); ++it)
{
vector<Size> temp(head);
temp.insert(temp.end(), it->begin(), it->end());
permutations.push_back(temp);
}
// all permutations with first site not selected
vector<vector<Size>> other_possibilities(computePermutations_(tupel_left, n_phosphorylation_events));
permutations.insert(permutations.end(), other_possibilities.begin(), other_possibilities.end());
return permutations;
}
}
/// Computes number of phospho events in a sequence
Size AScore::numberOfPhosphoEvents_(const String& sequence) const
{
Size cnt_phospho_events = 0;
for (Size i = sequence.find("(Phospho)"); i != std::string::npos; i = sequence.find("(Phospho)", i + 9))
{
++cnt_phospho_events;
}
return cnt_phospho_events;
}
/// Create variant of the peptide with all phosphorylations removed
AASequence AScore::removePhosphositesFromSequence_(const String& sequence) const
{
String seq(sequence);
seq.substitute("(Phospho)", "");
AASequence without_phospho = AASequence::fromString(seq);
return without_phospho;
}
/// Create theoretical spectra
vector<PeakSpectrum> AScore::createTheoreticalSpectra_(const vector<vector<Size>>& permutations, const AASequence& seq_without_phospho) const
{
vector<PeakSpectrum> th_spectra;
TheoreticalSpectrumGenerator spectrum_generator;
th_spectra.resize(permutations.size());
for (Size i = 0; i < permutations.size(); ++i)
{
AASequence seq(seq_without_phospho);
Size permu = 0;
for (Size as = 0; as < seq.size(); ++as)
{
if (as == permutations[i][permu])
{
seq.setModification(as, "Phospho");
++permu;
}
if (permu == permutations[i].size())
{
break;
}
}
// we mono-charge spectra, generating b- and y-ions is the default behavior of the TSG
spectrum_generator.getSpectrum(th_spectra[i], seq, 1, 1);
th_spectra[i].setName(seq.toString());
}
return th_spectra;
}
std::vector<PeakSpectrum> AScore::peakPickingPerWindowsInSpectrum_(PeakSpectrum& real_spectrum) const
{
vector<PeakSpectrum> windows_top10;
double spect_lower_bound = floor(real_spectrum.front().getMZ() / 100) * 100;
double spect_upper_bound = ceil(real_spectrum.back().getMZ() / 100) * 100;
Size number_of_windows = static_cast<Size>(ceil((spect_upper_bound - spect_lower_bound) / 100));
windows_top10.resize(number_of_windows);
PeakSpectrum::Iterator it_current_peak = real_spectrum.begin();
Size window_upper_bound(spect_lower_bound + 100);
for (Size current_window = 0; current_window < number_of_windows; ++current_window)
{
PeakSpectrum real_window;
while ((it_current_peak < real_spectrum.end()) && ((*it_current_peak).getMZ() <= window_upper_bound))
{
real_window.push_back(*it_current_peak);
++it_current_peak;
}
real_window.sortByIntensity(true);
for (Size i = 0; (i < 10) & (i < real_window.size()); ++i)
{
windows_top10[current_window].push_back(real_window[i]);
}
window_upper_bound += 100;
}
return windows_top10;
}
std::vector<std::vector<double>> AScore::calculatePermutationPeptideScores_(vector<PeakSpectrum>& th_spectra, const vector<PeakSpectrum>& windows_top10) const
{
//prepare peak depth for all windows in the actual spectrum
vector<vector<double>> permutation_peptide_scores(th_spectra.size());
vector<vector<double>>::iterator site_score = permutation_peptide_scores.begin();
// for each phospho site assignment
for (vector<PeakSpectrum>::iterator it = th_spectra.begin(); it != th_spectra.end(); ++it, ++site_score)
{
// the number of theoretical peaks (all b- and y-ions) correspond to the number of trials N
Size N = it->size();
site_score->resize(10);
for (Size i = 1; i <= 10; ++i)
{
Size n = 0;
for (Size current_win = 0; current_win < windows_top10.size(); ++current_win) // count matched ions over all 100 Da windows
{
n += numberOfMatchedIons_(*it, windows_top10[current_win], i);
}
double p = static_cast<double>(i) * base_match_probability_;
double cumulative_score = computeCumulativeScore_(N, n, p);
//abs is used to avoid -0 score values
(*site_score)[i - 1] = abs((-10.0 * log10(cumulative_score)));
}
}
return permutation_peptide_scores;
}
std::multimap<double, Size> AScore::rankWeightedPermutationPeptideScores_(const vector<vector<double>>& peptide_site_scores) const
{
multimap<double, Size> ranking;
for (Size i = 0; i != peptide_site_scores.size(); ++i)
{
double weighted_score = peptideScore_(peptide_site_scores[i]);
ranking.insert(pair<double, Size>(weighted_score, i));
}
return ranking;
}
int AScore::compareMZ_(double mz1, double mz2) const
{
double tolerance = fragment_mass_tolerance_;
double error = mz1 - mz2;
if (fragment_tolerance_ppm_)
{
double avg_mass = (mz1 + mz2) / 2;
tolerance = tolerance * avg_mass / 1e6;
}
if (error < -tolerance)
{
return -1;
}
else if (error > tolerance)
{
return 1;
}
else
{
return 0;
}
}
void AScore::updateMembers_()
{
fragment_mass_tolerance_ = param_.getValue("fragment_mass_tolerance");
fragment_tolerance_ppm_ = (param_.getValue("fragment_mass_unit") == "ppm");
max_peptide_length_ = param_.getValue("max_peptide_length");
max_permutations_ = param_.getValue("max_num_perm");
unambiguous_score_ = param_.getValue("unambiguous_score");
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/IDBoostGraph.cpp | .cpp | 66,375 | 1,735 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Julianus Pfeuffer $
// $Authors: Julianus Pfeuffer $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/IDBoostGraph.h>
#include <OpenMS/ANALYSIS/ID/FalseDiscoveryRate.h>
#include <OpenMS/ANALYSIS/ID/IDScoreGetterSetter.h>
#include <OpenMS/CHEMISTRY/ProteaseDigestion.h>
#include <OpenMS/CONCEPT/ProgressLogger.h>
#include <OpenMS/FORMAT/FASTAFile.h>
#include <OpenMS/DATASTRUCTURES/FASTAContainer.h>
#include <OpenMS/SYSTEM/StopWatch.h>
#include <boost/graph/copy.hpp>
#include <boost/graph/graphviz.hpp>
#include <boost/graph/graph_utility.hpp>
#include <boost/graph/connected_components.hpp>
#include <ostream>
#ifdef _OPENMP
#include <omp.h>
#endif
//#define INFERENCE_DEBUG
//#define INFERENCE_MT_DEBUG
using namespace OpenMS;
using namespace std;
using Internal::IDBoostGraph;
//TODO go through the vectors and see if we can preallocate some.
namespace OpenMS
{
/// Hasher for sets of uints using boost::hash_range
struct MyUIntSetHasher
{
public:
size_t operator()(const set<IDBoostGraph::vertex_t>& s) const
{
return boost::hash_range(s.begin(), s.end());
}
};
/// Helper struct to create Sequence->Replicate->Chargestate hierarchy for a set of PSMs from a protein
struct IDBoostGraph::SequenceToReplicateChargeVariantHierarchy
{
//TODO only add the intermediate nodes if there are more than one "splits"
SequenceToReplicateChargeVariantHierarchy(Size nrReplicates, int minCharge, int maxCharge):
seq_to_vecs_{},
minCharge_(minCharge),
nrCharges_(Size(maxCharge - minCharge) + 1u),
nrReplicates_(nrReplicates)
{}
void insert(String& seq, Size replicate, int charge, vertex_t pepVtx)
{
int chargeToPut = charge - minCharge_;
OPENMS_PRECONDITION(replicate < nrReplicates_, "Replicate OOR")
OPENMS_PRECONDITION(static_cast<Size>(chargeToPut) < nrCharges_, "Charge OOR")
auto seq_it = seq_to_vecs_.emplace(std::move(seq), std::vector<std::vector<std::set<vertex_t>>>{nrReplicates_, std::vector<std::set<vertex_t>>(nrCharges_, std::set<vertex_t>())});
seq_it.first->second[replicate][chargeToPut].insert(pepVtx);
}
//TODO finish and rework (root not needed?)
void insertToGraph(vertex_t /*rootProteinVtx*/, Graph& graph)
{
for (const auto& seqContainer : seq_to_vecs_)
{
vertex_t pep = boost::add_vertex(Peptide{seqContainer.first}, graph);
for (Size s = 0; s < seqContainer.second.size(); ++s)
{
vertex_t ri = boost::add_vertex(RunIndex{s},graph);
boost::add_edge(pep, ri, graph);
for (Size t = 0; t < seqContainer.second[s].size(); ++t)
{
vertex_t cs = boost::add_vertex(Charge{minCharge_ + int(t)}, graph);
boost::add_edge(ri, cs, graph);
for (const auto& pepVtx : seqContainer.second[s][t])
{
GraphConst::adjacency_iterator adjIt, adjIt_end;
// This assumes, that at this point, only proteins are connected to PSMS
boost::tie(adjIt, adjIt_end) = boost::adjacent_vertices(pepVtx, graph);
for (; adjIt != adjIt_end; adjIt++)
{
IDBoostGraph::IDPointer curr_idObj = graph[*adjIt];
if (curr_idObj.which() == 0) //protein
{
//below would invalidate iterator. We use clear vertex below
//boost::remove_edge(*adjIt, pepVtx, graph); //remove old one from protein
boost::add_edge(*adjIt, pep, graph); //instead add it to the sequence
}
}
boost::clear_vertex(pepVtx, graph);
boost::add_edge(cs, pepVtx, graph); //now connect the last level (charges) to this spectrum
}
}
}
}
}
std::unordered_map<std::string, std::vector<std::vector<std::set<vertex_t>>>> seq_to_vecs_;
int minCharge_;
Size nrCharges_;
Size nrReplicates_;
};
IDBoostGraph::IDBoostGraph(ProteinIdentification& proteins,
PeptideIdentificationList& idedSpectra,
Size use_top_psms,
bool use_run_info,
bool best_psms_annotated,
const std::optional<const ExperimentalDesign>& ed):
protIDs_(proteins)
{
OPENMS_LOG_INFO << "Building graph on " << idedSpectra.size() << " spectra and " << proteins.getHits().size() << " proteins." << std::endl;
if (use_run_info)
{
buildGraphWithRunInfo_(proteins, idedSpectra, use_top_psms, ed.value_or(ExperimentalDesign::fromIdentifications({proteins})));
}
else
{
buildGraph_(proteins, idedSpectra, use_top_psms, best_psms_annotated);
}
}
IDBoostGraph::IDBoostGraph(ProteinIdentification& proteins,
ConsensusMap& cmap,
Size use_top_psms,
bool use_run_info,
bool use_unassigned_ids,
bool best_psms_annotated,
const std::optional<const ExperimentalDesign>& ed):
protIDs_(proteins)
{
OPENMS_LOG_INFO << "Building graph on " << cmap.size() << " features, " << cmap.getUnassignedPeptideIdentifications().size() <<
" unassigned spectra (if chosen) and " << proteins.getHits().size() << " proteins." << std::endl;
if (use_run_info)
{
buildGraphWithRunInfo_(proteins, cmap, use_top_psms, use_unassigned_ids, ed.value_or(ExperimentalDesign::fromConsensusMap(cmap)));
}
else
{
buildGraph_(proteins, cmap, use_top_psms, use_unassigned_ids, best_psms_annotated);
}
}
unordered_map<unsigned, unsigned> convertMapLabelFree_(
const map<pair<String, unsigned>, unsigned>& fileToRun,
const StringList& files)
{
unordered_map<unsigned, unsigned> indexToRun;
unsigned i = 0;
for (const auto& file : files)
{
indexToRun[i] = fileToRun.at({file,1});
++i;
} // TODO what if file is not in the experimental design? Check in the very beginning!?
return indexToRun;
}
unordered_map<unsigned, unsigned> convertMap_(
const map<pair<String, unsigned>, unsigned>& fileLabToPrefractionationGroup,
const ConsensusMap::ColumnHeaders& idxToFileLabMappings,
const String& experiment_type)
{
unordered_map<unsigned, unsigned> indexToRun;
for (const auto& mapping : idxToFileLabMappings)
{
indexToRun[mapping.first] =
fileLabToPrefractionationGroup.at(make_pair(mapping.second.filename, mapping.second.getLabelAsUInt(experiment_type)));
} // TODO what if file is not in the experimental design? Check in the very beginning!?
return indexToRun;
}
void IDBoostGraph::addPeptideIDWithAssociatedProteins_(
PeptideIdentification& spectrum,
unordered_map<IDPointer, vertex_t, boost::hash<IDPointer>>& vertex_map,
const unordered_map<string, ProteinHit*>& accession_map,
Size use_top_psms,
bool best_psms_annotated)
{
//TODO add psm regularizer nodes here optionally if using multiple psms (i.e. forcing them, so that only 1 or maybe 2 are present per spectrum)
auto pepIt = spectrum.getHits().begin();
//TODO sort or assume sorted
auto pepItEnd = (use_top_psms == 0 || (spectrum.getHits().size() <= use_top_psms)) ? spectrum.getHits().end() : spectrum.getHits().begin() + use_top_psms;
for (; pepIt != pepItEnd; ++pepIt)
{
if (!best_psms_annotated || static_cast<int>(pepIt->getMetaValue("best_per_peptide")))
{
IDPointer pepPtr(&(*pepIt));
vertex_t pepV = addVertexWithLookup_(pepPtr, vertex_map);
for (auto const &proteinAcc : pepIt->extractProteinAccessionsSet())
{
// assumes protein is present
auto accToPHit = accession_map.find(std::string(proteinAcc));
if (accToPHit == accession_map.end())
{
OPENMS_LOG_WARN << "Warning: Building graph: skipping pep that maps to a non existent protein accession.\n";
continue;
}
//TODO consider/calculate missing digests. Probably not here though!
//int missingTheorDigests = accToPHit->second->getMetaValue("missingTheorDigests");
//accToPHit->second->setMetaValue("missingTheorDigests", missingTheorDigests);
IDPointer prot(accToPHit->second);
vertex_t protV = addVertexWithLookup_(prot, vertex_map);
boost::add_edge(protV, pepV, g);
}
}
}
}
void IDBoostGraph::addPeptideAndAssociatedProteinsWithRunInfo_(
PeptideIdentification& spectrum,
unordered_map<unsigned, unsigned>& indexToPrefractionationGroup,
unordered_map<IDPointer, vertex_t, boost::hash<IDPointer>>& vertex_map,
unordered_map<std::string, ProteinHit*>& accession_map,
Size use_top_psms)
{
Size idx(0);
Size pfg(0);
if (spectrum.metaValueExists(Constants::UserParam::ID_MERGE_INDEX))
{
idx = spectrum.getMetaValue(Constants::UserParam::ID_MERGE_INDEX);
auto find_it = indexToPrefractionationGroup.find(idx);
if (find_it == indexToPrefractionationGroup.end())
{
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Reference (id_merge_index) to non-existing run found at peptide ID."
" Sth went wrong during merging. Aborting.");
}
pfg = find_it->second;
}
else
{
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Trying to read run information (id_merge_index) but none present at peptide ID."
" Did you annotate runs during merging? Aborting.");
}
//TODO add psm regularizer nodes here optionally if using multiple psms
//TODO sort or assume sorted
auto pepItEnd = use_top_psms == 0 || spectrum.getHits().empty() ? spectrum.getHits().end() : spectrum.getHits().begin() + use_top_psms;
for (auto pepIt = spectrum.getHits().begin(); pepIt != pepItEnd; ++pepIt)
{
IDPointer pepPtr(&(*pepIt));
vertex_t pepV = addVertexWithLookup_(pepPtr, vertex_map);
//------- Only difference to the function without run info -----//
pepHitVtx_to_run_[pepV] = pfg;
//------- Only difference to the function without run info -----//
for (auto const & proteinAcc : pepIt->extractProteinAccessionsSet())
{
// assumes protein is present
auto accToPHit = accession_map.find(std::string(proteinAcc));
if (accToPHit == accession_map.end())
{
OPENMS_LOG_WARN << "Warning: Building graph: skipping pep that maps to a non existent protein accession.\n";
continue;
}
//TODO consider/calculate missing digests. Probably not here though!
//int missingTheorDigests = accToPHit->second->getMetaValue("missingTheorDigests");
//accToPHit->second->setMetaValue("missingTheorDigests", missingTheorDigests);
IDPointer prot(accToPHit->second);
vertex_t protV = addVertexWithLookup_(prot, vertex_map);
boost::add_edge(protV, pepV, g);
}
}
}
void IDBoostGraph::buildGraphWithRunInfo_(ProteinIdentification& proteins,
ConsensusMap& cmap,
Size use_top_psms,
bool use_unassigned_ids,
const ExperimentalDesign& ed)
{
unordered_map<unsigned, unsigned> indexToPrefractionationGroup;
{
// TODO check that the files in the ProteinID run are all in the Exp. Design
//StringList files;
//proteins.getPrimaryMSRunPath(files); // files merged in the protein identification run to be inferred
const ConsensusMap::ColumnHeaders& colHeaders = cmap.getColumnHeaders(); // all possible files and labels in the experiment
//TODO use exp. design to merge fractions
map<pair<String, unsigned>, unsigned> fileLabelToPrefractionationGroup = ed.getPathLabelToPrefractionationMapping(false);
nrPrefractionationGroups_ = fileLabelToPrefractionationGroup.size();
indexToPrefractionationGroup = convertMap_(fileLabelToPrefractionationGroup, colHeaders, cmap.getExperimentType()); // convert to index in the peptide ids
}
//TODO is this vertex_map really necessary. I think PSMs are always unique in our datastructures and could be
// added without lookup.
// And for the proteins we could add the vertex ID to the accession_map here and use that for lookup
unordered_map<IDPointer, vertex_t, boost::hash<IDPointer>> vertex_map{};
unordered_map<std::string, ProteinHit*> accession_map{};
for (auto& prot : proteins.getHits())
{
accession_map[prot.getAccession()] = &prot;
}
ProgressLogger pl;
Size roughNrIds = cmap.size();
if (use_unassigned_ids) roughNrIds += cmap.getUnassignedPeptideIdentifications().size();
pl.setLogType(ProgressLogger::CMD);
pl.startProgress(0, roughNrIds, "Building graph with run information...");
const String& protRun = proteins.getIdentifier();
for (auto& feat : cmap)
{
for (auto& spectrum : feat.getPeptideIdentifications())
{
if (spectrum.getIdentifier() == protRun)
{
addPeptideAndAssociatedProteinsWithRunInfo_(spectrum, indexToPrefractionationGroup,
vertex_map, accession_map, use_top_psms);
}
}
pl.nextProgress();
}
if (use_unassigned_ids)
{
for (auto& id : cmap.getUnassignedPeptideIdentifications())
{
if (id.getIdentifier() == protRun)
{
addPeptideAndAssociatedProteinsWithRunInfo_(id, indexToPrefractionationGroup,
vertex_map, accession_map, use_top_psms);
}
pl.nextProgress();
}
}
pl.endProgress();
}
void IDBoostGraph::buildGraphWithRunInfo_(ProteinIdentification& proteins,
PeptideIdentificationList& idedSpectra,
Size use_top_psms,
const ExperimentalDesign& ed)
{
unordered_map<unsigned, unsigned> indexToPrefractionationGroup;
{
StringList files;
proteins.getPrimaryMSRunPath(files);
map<pair<String, unsigned>, unsigned> fileLabelToPrefractionationGroup = ed.getPathLabelToPrefractionationMapping(false);
nrPrefractionationGroups_ = fileLabelToPrefractionationGroup.size();
//TODO if only given proteins and peptide IDs we automatically assume label-free since I don't know
// where the label would be stored.
indexToPrefractionationGroup = convertMapLabelFree_(fileLabelToPrefractionationGroup, files); // convert to index in the peptide ids
}
//TODO is this vertex_map really necessary. I think PSMs are always unique in our datastructures and could be
// added without lookup.
// And for the proteins we could add the vertex ID to the accession_map here and use that for lookup
unordered_map<IDPointer, vertex_t, boost::hash<IDPointer>> vertex_map{};
unordered_map<std::string, ProteinHit*> accession_map{};
for (auto& prot : proteins.getHits())
{
accession_map[prot.getAccession()] = &prot;
}
ProgressLogger pl;
pl.setLogType(ProgressLogger::CMD);
pl.startProgress(0, idedSpectra.size(), "Building graph with run info...");
const String& protRun = proteins.getIdentifier();
for (auto& spectrum : idedSpectra)
{
if (spectrum.getIdentifier() == protRun)
{
addPeptideAndAssociatedProteinsWithRunInfo_(spectrum, indexToPrefractionationGroup,
vertex_map, accession_map, use_top_psms);
}
pl.nextProgress();
}
pl.endProgress();
}
//TODO actually to build the graph, the inputs could be passed const. But if you want to do sth
// on the graph later it needs to be non-const. Overload the next functions or somehow make sure it can be used const.
void IDBoostGraph::buildGraph_(ProteinIdentification& proteins,
PeptideIdentificationList& idedSpectra,
Size use_top_psms,
bool best_psms_annotated)
{
unordered_map<IDPointer, vertex_t, boost::hash<IDPointer>> vertex_map{};
unordered_map<string, ProteinHit*> accession_map{};
for (auto& prot : proteins.getHits())
{
accession_map[prot.getAccession()] = &prot;
}
ProgressLogger pl;
pl.setLogType(ProgressLogger::CMD);
pl.startProgress(0, idedSpectra.size(), "Building graph...");
const String& protRun = proteins.getIdentifier();
for (auto& spectrum : idedSpectra)
{
if (spectrum.getIdentifier() == protRun)
{
addPeptideIDWithAssociatedProteins_(spectrum, vertex_map, accession_map, use_top_psms, best_psms_annotated);
}
pl.nextProgress();
}
pl.endProgress();
}
void IDBoostGraph::buildGraph_(ProteinIdentification& proteins,
ConsensusMap& cmap,
Size use_top_psms,
bool use_unassigned_ids,
bool best_psms_annotated)
{
StringList runs;
proteins.getPrimaryMSRunPath(runs);
unordered_map<IDPointer, vertex_t, boost::hash<IDPointer>> vertex_map{};
unordered_map<string, ProteinHit*> accession_map{};
for (auto& prot : proteins.getHits())
{
accession_map[prot.getAccession()] = &prot;
}
ProgressLogger pl;
Size roughNrIds = cmap.size();
if (use_unassigned_ids) roughNrIds += cmap.getUnassignedPeptideIdentifications().size();
pl.setLogType(ProgressLogger::CMD);
pl.startProgress(0, roughNrIds, "Building graph...");
const String& protRun = proteins.getIdentifier();
for (auto& feature : cmap)
{
for (auto& id : feature.getPeptideIdentifications())
{
if (id.getIdentifier() == protRun)
{
addPeptideIDWithAssociatedProteins_(id, vertex_map, accession_map, use_top_psms, best_psms_annotated);
}
}
pl.nextProgress();
}
if (use_unassigned_ids)
{
for (auto& id : cmap.getUnassignedPeptideIdentifications())
{
if (id.getIdentifier() == protRun)
{
addPeptideIDWithAssociatedProteins_(id, vertex_map, accession_map, use_top_psms, best_psms_annotated);
}
pl.nextProgress();
}
}
pl.endProgress();
}
/* This would be a version where you try to build the graph based on the theoretical peptides
* but this is quite some work and additional memory overhead
* For now my plan is to create theoretically indistinguishable groups and (maybe nr of only?) missing peptides
* in PeptideIndexer and use them here
template <class T>
void IDBoostGraph::buildTheoreticalGraph(pair<int,int> chargeRange, unsigned int nrReplicates, FASTAContainer<T>& proteins)
{
///ProteaseDigestion enzyme; //TODO get from protein ID run
bool IL_equivalent = true; //TODO how to incorporate that?
// cache the first proteins
const size_t PROTEIN_CACHE_SIZE = 4e5; // 400k should be enough for most DB's and is not too hard on memory either (~200 MB FASTA)
proteins.cacheChunk(PROTEIN_CACHE_SIZE);
if (proteins.empty()) // we do not allow an empty database
{
OPENMS_LOG_ERROR << "Error: An empty database was provided. Mapping makes no sense. Aborting..." << std::endl;
//TODO throw Exception
}
bool has_active_data = true;
bool invalid_protein_sequence = false;
Size count_j_proteins(0);
Size prot_count(0);
String prot = "";
while (true)
{
has_active_data = proteins.activateCache(); // swap in last cache
if (!has_active_data)
break; // leave while-loop
proteins.cacheChunk(PROTEIN_CACHE_SIZE);
for (Size i = 0; i < prot_count; ++i)
{
prot = proteins.chunkAt(i).sequence;
prot.remove('*');
// check for invalid sequences with modifications
if (prot.has('[') || prot.has('('))
{
invalid_protein_sequence = true; // not omp-critical because its write-only
// we cannot throw an exception here, since we'd need to catch it within the parallel region
}
// convert L/J to I; also replace 'J' in proteins
if (IL_equivalent)
{
prot.substitute('L', 'I');
prot.substitute('J', 'I');
}
else
{ // warn if 'J' is found (it eats into aaa_max)
if (prot.has('J'))
{
++count_j_proteins;
}
}
Size prot_idx = i + proteins.getChunkOffset();
}
}
//TODO add support for (consensus) feature information
unordered_map<IDPointer, vertex_t, boost::hash<IDPointer>> vertex_map{};
unordered_map<string, ProteinHit*> accession_map{};
for (auto &prot : proteins_.getHits())
{
accession_map[prot.getAccession()] = &prot;
}
ProgressLogger pl;
pl.setLogType(ProgressLogger::CMD);
pl.startProgress(0,idedSpectra_.size(), "Building graph...");
for (auto & spectrum : idedSpectra_)
{
//TODO add psm nodes here if using all psms
auto pepIt = spectrum.getHits().begin();
auto pepItEnd = use_all_psms || spectrum.getHits().empty() ? spectrum.getHits().end() : spectrum.getHits().begin() + 1;
for (; pepIt != pepItEnd; ++pepIt)
{
IDPointer pepPtr(&(*pepIt));
vertex_t pepV = addVertexWithLookup_(pepPtr, vertex_map);
for (auto const & proteinAcc : pepIt->extractProteinAccessionsSet())
{
// assumes protein is present
auto accToPHit = accession_map.find(std::string(proteinAcc));
if (accToPHit == accession_map.end())
{
std::cout << "Warning: Building graph: skipping pep that maps to a non existent protein accession.";
continue;
}
//TODO consider/calculate missing digests.
//int missingTheorDigests = accToPHit->second->getMetaValue("missingTheorDigests");
//accToPHit->second->setMetaValue("missingTheorDigests", missingTheorDigests);
IDPointer prot(accToPHit->second);
vertex_t protV = addVertexWithLookup_(prot, vertex_map);
boost::add_edge(protV, pepV, g);
}
}
pl.nextProgress();
}
pl.endProgress();
}*/
/// Do sth on ccs
void IDBoostGraph::applyFunctorOnCCs(const std::function<unsigned long(Graph&, unsigned int)>& functor)
{
if (ccs_.empty()) {
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "No connected components annotated. Run computeConnectedComponents first!");
}
// Use dynamic schedule because big CCs take much longer!
#pragma omp parallel for schedule(dynamic) default(none) shared(functor)
for (int i = 0; i < static_cast<int>(ccs_.size()); i += 1)
{
#ifdef INFERENCE_BENCH
StopWatch sw;
sw.start();
#endif
Graph& curr_cc = ccs_.at(i);
#ifdef INFERENCE_MT_DEBUG
OPENMS_LOG_INFO << "Processing on thread# " << omp_get_thread_num() << "\n";
#endif
#ifdef INFERENCE_DEBUG
OPENMS_LOG_INFO << "Processing cc " << i << " with " << boost::num_vertices(curr_cc) << " vertices.\n";
OPENMS_LOG_INFO << "Printing cc " << i << "\n";
printGraph(LOG_INFO, curr_cc);
OPENMS_LOG_INFO << "Printed cc " << i << "\n";
#endif
#ifdef INFERENCE_BENCH
unsigned long result = functor(curr_cc);
#else
functor(curr_cc, i);
#endif
#ifdef INFERENCE_BENCH
sw.stop();
sizes_and_times_[i] = tuple<vertex_t, vertex_t, unsigned long, double>{boost::num_vertices(curr_cc), boost::num_edges(curr_cc), result, sw.getClockTime()};
#endif
}
#ifdef INFERENCE_BENCH
ofstream debugfile;
debugfile.open("idgraph_functortimes_" + DateTime::now().getTime() + ".tsv");
for (const auto& size_time : sizes_and_times_ )
{
debugfile << std::get<0>(size_time) << "\t"
<< std::get<1>(size_time) << "\t"
<< std::get<2>(size_time) << "\t"
<< std::get<3>(size_time) << "\n";
}
debugfile.close();
#endif
}
/// Do sth on ccs single-threaded
void IDBoostGraph::applyFunctorOnCCsST(const std::function<void(Graph&)>& functor)
{
if (ccs_.empty()) {
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "No connected components annotated. Run computeConnectedComponents first!");
}
for (int i = 0; i < static_cast<int>(ccs_.size()); i += 1)
{
#ifdef INFERENCE_BENCH
StopWatch sw;
sw.start();
#endif
Graph& curr_cc = ccs_.at(i);
#ifdef INFERENCE_DEBUG
OPENMS_LOG_INFO << "Processing cc " << i << " with " << boost::num_vertices(curr_cc) << " vertices.\n";
OPENMS_LOG_INFO << "Printing cc " << i << "\n";
printGraph(LOG_INFO, curr_cc);
OPENMS_LOG_INFO << "Printed cc " << i << "\n";
#endif
functor(curr_cc);
#ifdef INFERENCE_BENCH
sw.stop();
sizes_and_times_[i] = tuple<vertex_t, vertex_t, unsigned long, double>{boost::num_vertices(curr_cc), boost::num_edges(curr_cc), 0, sw.getClockTime()};
#endif
}
#ifdef INFERENCE_BENCH
ofstream debugfile;
debugfile.open("idgraph_functortimes_" + DateTime::now().getTime() + ".tsv");
for (const auto& size_time : sizes_and_times_ )
{
debugfile << std::get<0>(size_time) << "\t" << std::get<1>(size_time) << "\t" << std::get<2>(size_time) << "\t" << std::get<3>(size_time) << "\n";
}
debugfile.close();
#endif
}
void IDBoostGraph::annotateIndistProteins(bool addSingletons)
{
if (ccs_.empty() && boost::num_vertices(g) == 0)
{
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Graph empty. Build it first.");
}
ProgressLogger pl;
pl.setLogType(ProgressLogger::CMD);
if (ccs_.empty())
{
pl.startProgress(0, 1, "Annotating indistinguishable proteins...");
annotateIndistProteins_(g, addSingletons);
pl.nextProgress();
pl.endProgress();
}
else
{
pl.startProgress(0, ccs_.size(), "Annotating indistinguishable proteins...");
Size cnt(0);
#pragma omp parallel for schedule(dynamic) default(none) shared(addSingletons, cnt, pl)
for (int i = 0; i < static_cast<int>(ccs_.size()); i += 1)
{
const Graph& curr_cc = ccs_.at(i);
#ifdef INFERENCE_MT_DEBUG
OPENMS_LOG_INFO << "Processing on thread# " << omp_get_thread_num() << "\n";
#endif
#ifdef INFERENCE_DEBUG
OPENMS_LOG_INFO << "Processing cc " << i << " with " << boost::num_vertices(curr_cc) << " vertices.\n";
OPENMS_LOG_INFO << "Printing cc " << i << "\n";
printGraph(LOG_INFO, curr_cc);
OPENMS_LOG_INFO << "Printed cc " << i << "\n";
#endif
annotateIndistProteins_(curr_cc, addSingletons);
#pragma omp atomic
++cnt;
IF_MASTERTHREAD pl.setProgress(cnt);
}
pl.endProgress();
}
OPENMS_LOG_INFO << "Annotated " << String(protIDs_.getIndistinguishableProteins().size()) << " indist. protein groups.\n";
}
void IDBoostGraph::calculateAndAnnotateIndistProteins(bool addSingletons)
{
if (ccs_.empty() && boost::num_vertices(g) == 0)
{
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Graph empty. Build it first.");
}
ProgressLogger pl;
pl.setLogType(ProgressLogger::CMD);
if (ccs_.empty())
{
pl.startProgress(0, 1, "Annotating indistinguishable proteins...");
calculateAndAnnotateIndistProteins_(g, addSingletons);
pl.nextProgress();
pl.endProgress();
}
else
{
pl.startProgress(0, ccs_.size(), "Annotating indistinguishable proteins...");
Size cnt(0);
#pragma omp parallel for schedule(dynamic) default(none) shared(addSingletons, cnt, pl)
for (int i = 0; i < static_cast<int>(ccs_.size()); i += 1)
{
const Graph& curr_cc = ccs_.at(i);
#ifdef INFERENCE_MT_DEBUG
OPENMS_LOG_INFO << "Processing on thread# " << omp_get_thread_num() << std::endl;
#endif
#ifdef INFERENCE_DEBUG
OPENMS_LOG_INFO << "Processing cc " << i << " with " << boost::num_vertices(curr_cc) << " vertices.\n";
OPENMS_LOG_INFO << "Printing cc " << i << "\n";
printGraph(LOG_INFO, curr_cc);
OPENMS_LOG_INFO << "Printed cc " << i << "\n";
#endif
calculateAndAnnotateIndistProteins_(curr_cc, addSingletons);
#pragma omp atomic
++cnt;
IF_MASTERTHREAD pl.setProgress(cnt);
}
pl.endProgress();
}
}
void IDBoostGraph::calculateAndAnnotateIndistProteins_(const Graph& fg, bool addSingletons)
{
//TODO evaluate hashing performance on sets
unordered_map<PeptideNodeSet, ProteinNodeSet, MyUIntSetHasher> indistProteins; //find indist proteins
Graph::vertex_iterator ui, ui_end;
boost::tie(ui, ui_end) = boost::vertices(fg);
//TODO refactor into function
// Cluster proteins
for (; ui != ui_end; ++ui)
{
IDBoostGraph::IDPointer curr_idObj = fg[*ui];
//TODO introduce an enum for the types to make it more clear.
// Or use the static_visitor pattern: You have to pass the vertex with its neighbors as a second arg though.
if (curr_idObj.which() == 0) //protein: find indist. ones
{
//TODO assert that there is at least one peptide mapping to this peptide! Eg. Require IDFilter removeUnmatched before.
// Or just check rigorously here.
PeptideNodeSet childPeps;
GraphConst::adjacency_iterator adjIt, adjIt_end;
boost::tie(adjIt, adjIt_end) = boost::adjacent_vertices(*ui, fg);
for (; adjIt != adjIt_end; ++adjIt)
{
if (fg[*adjIt].which() >= 3) //if there are only two types (pep,prot) this check for pep is actually unnecessary
{
childPeps.insert(*adjIt);
}
}
auto clusterIt = indistProteins.emplace(childPeps, ProteinNodeSet({*ui}));
if (!clusterIt.second) //no insertion -> append
{
(clusterIt.first)->second.insert(*ui);
}
}
}
// add the protein groups to the underlying ProteinGroup data structure only
for (auto const &pepsToGrps : indistProteins)
{
if (pepsToGrps.second.size() <= 1 && !addSingletons)
{
continue;
}
ProteinIdentification::ProteinGroup pg{};
pg.probability = -1.0;
for (auto const &proteinVID : pepsToGrps.second)
{
ProteinHit *proteinPtr = boost::get<ProteinHit*>(fg[proteinVID]);
pg.accessions.push_back(proteinPtr->getAccession());
// the following sets the score of the group to the max
// this might make not much sense if there was no inference yet -> score = 0
// And one might also want to use other scoring systems
// Anyway, without prior or add. info, all indist. proteins should have the same
// score
double oldscore = proteinPtr->getScore();
if (oldscore > pg.probability)
{
pg.probability = oldscore;
}
}
// TODO you could allocate as many groups as proteins in the beginning
// then you do not need a critical section. Resize afterwards.
// Or make a local vector of Groups and merge in a single threaded section
#pragma omp critical (ProteinGroups)
{protIDs_.getIndistinguishableProteins().push_back(pg);}
}
}
void IDBoostGraph::annotateIndistProteins_(const Graph& fg, bool addSingletons)
{
Graph::vertex_iterator ui, ui_end;
boost::tie(ui,ui_end) = boost::vertices(fg);
for (; ui != ui_end; ++ui)
{
if (fg[*ui].which() == 1) //prot group
{
ProteinIdentification::ProteinGroup pg{};
pg.probability = boost::get<IDBoostGraph::ProteinGroup>(fg[*ui]).score; //init
Graph::adjacency_iterator nbIt, nbIt_end;
boost::tie(nbIt, nbIt_end) = boost::adjacent_vertices(*ui, fg);
for (; nbIt != nbIt_end; ++nbIt)
{
if (fg[*nbIt].which() == 0) //neighboring proteins
{
ProteinHit *proteinPtr = boost::get<ProteinHit*>(fg[*nbIt]);
pg.accessions.push_back(proteinPtr->getAccession());
}
}
if (addSingletons || pg.accessions.size() > 1)
{
// TODO you could allocate as many groups as proteins in the beginning
// then you do not need a critical section. Resize afterwards.
// Or make a local vector of Groups and merge in a single threaded section
#pragma omp critical (ProteinGroups)
{protIDs_.getIndistinguishableProteins().push_back(pg);}
}
}
}
}
void IDBoostGraph::getUpstreamNodesNonRecursive(std::queue<vertex_t>& q, const Graph& graph, int lvl, bool stop_at_first, std::vector<vertex_t>& result)
{
if (lvl >= graph[q.front()].which()) return;
while (!q.empty())
{
vertex_t curr_node = q.front();
q.pop();
Graph::adjacency_iterator adjIt, adjIt_end;
boost::tie(adjIt, adjIt_end) = boost::adjacent_vertices(curr_node, graph);
for (;adjIt != adjIt_end; ++adjIt)
{
if (graph[*adjIt].which() <= lvl)
{
result.emplace_back(*adjIt);
if (!stop_at_first && graph[*adjIt].which() < graph[curr_node].which())
{
q.emplace(*adjIt);
}
}
else if (graph[*adjIt].which() < graph[curr_node].which())
{
q.emplace(*adjIt);
}
}
}
}
void IDBoostGraph::getDownstreamNodesNonRecursive(std::queue<vertex_t>& q, const Graph& graph, int lvl, bool stop_at_first, std::vector<vertex_t>& result)
{
if (lvl <= graph[q.front()].which()) return;
while (!q.empty())
{
vertex_t curr_node = q.front();
q.pop();
Graph::adjacency_iterator adjIt, adjIt_end;
boost::tie(adjIt, adjIt_end) = boost::adjacent_vertices(curr_node, graph);
for (;adjIt != adjIt_end; ++adjIt)
{
if (graph[*adjIt].which() >= lvl)
{
result.emplace_back(*adjIt);
if (!stop_at_first && graph[*adjIt].which() > graph[curr_node].which())
{
q.emplace(*adjIt);
}
}
else if (graph[*adjIt].which() > graph[curr_node].which())
{
q.emplace(*adjIt);
}
}
}
}
void IDBoostGraph::resolveGraphPeptideCentric(bool removeAssociationsInData/*, bool resolveTies*/)
{
if (ccs_.empty() && boost::num_vertices(g) == 0)
{
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Graph empty. Build it first.");
}
ProgressLogger pl;
pl.setLogType(ProgressLogger::CMD);
if (ccs_.empty())
{
pl.startProgress(0, 1, "Resolving graph...");
resolveGraphPeptideCentric_(g, removeAssociationsInData);
pl.nextProgress();
pl.endProgress();
}
else
{
pl.startProgress(0, ccs_.size(), "Resolving graph...");
Size cnt(0);
#pragma omp parallel for default(none) shared(removeAssociationsInData, cnt, pl)
for (int i = 0; i < static_cast<int>(ccs_.size()); i += 1)
{
Graph& curr_cc = ccs_.at(i);
#ifdef INFERENCE_MT_DEBUG
OPENMS_LOG_INFO << "Processing on thread# " << omp_get_thread_num() << "\n";
#endif
#ifdef INFERENCE_DEBUG
OPENMS_LOG_INFO << "Processing cc " << i << " with " << boost::num_vertices(curr_cc) << " vertices." << "\n";
OPENMS_LOG_INFO << "Printing cc " << i << "\n";
printGraph(LOG_INFO, curr_cc);
OPENMS_LOG_INFO << "Printed cc " << i << "\n";
#endif
resolveGraphPeptideCentric_(curr_cc, removeAssociationsInData);
#pragma omp atomic
cnt++;
IF_MASTERTHREAD pl.setProgress(cnt);
}
pl.endProgress();
}
}
// TODO allow for higher_score_worse
void IDBoostGraph::resolveGraphPeptideCentric_(Graph& fg, bool removeAssociationsInData = true/*, bool resolveTies*/)
{
GetScoreTgTVisitor gpv;
//TODO allow any score type!
auto score_tgt_compare = [&fg,&gpv](vertex_t& n, vertex_t& m) -> bool
{return boost::apply_visitor(gpv, fg[n]) < boost::apply_visitor(gpv, fg[m]);};
Graph::vertex_iterator ui, ui_end;
boost::tie(ui,ui_end) = boost::vertices(fg);
set<String> accs_to_remove;
queue<vertex_t> q;
vector<vertex_t> groups_or_singles;
vector<vertex_t> singles;
vector<PeptideEvidence> newev;
for (; ui != ui_end; ++ui)
{
if (fg[*ui].which() == 2)
// It should suffice to resolve at the pep cluster level
// if a pep does not belong to a cluster it didn't have multiple parents and
// therefore does not need to be resolved
{
accs_to_remove.clear();
q.push(*ui);
getUpstreamNodesNonRecursive(q, fg, 1, true, groups_or_singles); //get either single prots or groups
//TODO maybe apply visitor once to all and get normal vector of values
auto best_prot_it = std::max_element(groups_or_singles.begin(), groups_or_singles.end(), score_tgt_compare); //returns an iterator
const auto best_prot_val = boost::apply_visitor(gpv, fg[*best_prot_it]);
std::vector<size_t> best_indexes;
for (; best_prot_it != groups_or_singles.end();
best_prot_it = std::find_if(best_prot_it+1, groups_or_singles.end(),
[&best_prot_val,&gpv,&fg](const vertex_t& node) {return best_prot_val == boost::apply_visitor(gpv, fg[node]);}))
{
auto index = std::distance(groups_or_singles.begin(), best_prot_it);
best_indexes.push_back(index);
}
vertex_t best_prot = groups_or_singles[best_indexes[0]]; //cannot be empty
// if multiple equally good protein/groups exist, take the one with most identified (unmodified) peptide sequences
if (best_indexes.size() > 1)
{
std::vector<vertex_t> prots_to_resolve(best_indexes.size());
//select
std::transform(best_indexes.begin(), best_indexes.end(), prots_to_resolve.begin(), [&groups_or_singles](size_t pos) {return groups_or_singles[pos];});
//resolve ties based on nr peptides
Size most_peps = 0; //start with 0. every protein should have at least one peptide
std::vector<vertex_t> peps;
for (const auto& prot_node : prots_to_resolve)
{
q.push(prot_node);
// lvl6 would be PSMs while lvl3 would be Peptides. Let's do peptides.
getDownstreamNodesNonRecursive(q, fg, 3, true, peps);
if (peps.size() > most_peps)
{
most_peps = peps.size();
best_prot = prot_node;
}
peps.clear();
}
}
//TODO what if there are still ties left? Currently it just takes the first occurrence (due to ">")
for (const auto& prot : groups_or_singles)
{
if (prot != best_prot)
{
if (fg[prot].which() == 1) // if the node is a group, find their members first.
{
q.push(prot);
getUpstreamNodesNonRecursive(q,fg,0,true,singles);
for (const auto& single_prot : singles)
{
ProteinHit *proteinPtr = boost::get<ProteinHit*>(fg[single_prot]);
accs_to_remove.insert(proteinPtr->getAccession());
//TODO we probably cannot set it to 0 here since we dont know if it has a unique pep yet
//proteinPtr->setScore(0.);
}
singles.clear();
}
else
{
ProteinHit *proteinPtr = boost::get<ProteinHit*>(fg[prot]);
accs_to_remove.insert(proteinPtr->getAccession());
//TODO we probably cannot set it to 0 here since we dont know if it has a unique pep yet
//proteinPtr->setScore(0.);
}
boost::remove_edge(prot, *ui, fg);
}
}
if (removeAssociationsInData)
{
q.push(*ui);
getDownstreamNodesNonRecursive(q, fg, 6, true, singles);
for (const auto& pep : singles)
{
PeptideHit *peptidePtr = boost::get<PeptideHit*>(fg[pep]);
auto& ev = peptidePtr->getPeptideEvidences();
for (const auto& e : ev)
{
if (accs_to_remove.find(e.getProteinAccession()) == accs_to_remove.end())
{
newev.emplace_back(e);
}
}
if (newev.empty())
{
OPENMS_LOG_ERROR << "No evidences left for peptide " <<
peptidePtr->getSequence().toString() << " This should not happen. Please report an issue on GitHub."
<< std::endl;
}
peptidePtr->setPeptideEvidences(std::move(newev));
newev.clear();
}
singles.clear();
}
groups_or_singles.clear();
}
}
}
//needs run info annotated.
void IDBoostGraph::clusterIndistProteinsAndPeptidesAndExtendGraph()
{
if (nrPrefractionationGroups_ == 0)
{
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Graph not built with run information!");
}
/* This should be taken care of. We require that it was built with runinfo here
if (!pepHitVtx_to_run_.empty()) //graph built with run info
{
StringList runs;
protIDs_.getPrimaryMSRunPath(runs);
nrReplicates = runs.size();
}
*/
pair<int,int> chargeRange = protIDs_.getSearchParameters().getChargeRange();
if (ccs_.empty())
{
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"No connected components annotated. Run computeConnectedComponents first!");
}
#pragma omp parallel for schedule(dynamic) default(none) shared(chargeRange)
for (int i = 0; i < static_cast<int>(ccs_.size()); i += 1)
{
Graph& curr_cc = ccs_[i];
#ifdef INFERENCE_MT_DEBUG
OPENMS_LOG_INFO << "Processing on thread# " << omp_get_thread_num() << "\n";
#endif
#ifdef INFERENCE_DEBUG
OPENMS_LOG_INFO << "Processing cc " << i << " with " << boost::num_vertices(curr_cc) << " vertices." << "\n";
OPENMS_LOG_INFO << "Printing cc " << i << "\n";
printGraph(LOG_INFO, curr_cc);
OPENMS_LOG_INFO << "Printed cc " << i << "\n";
#endif
// Skip cc without peptide or protein
//TODO better to do quick brute-force calculation if the cc is really small
if (boost::num_edges(curr_cc) >= 1)
{
Graph::vertex_iterator ui, ui_end;
boost::tie(ui,ui_end) = boost::vertices(curr_cc);
// Cluster peptides with same sequence and create a replicate and charge hierarchy underneath
for (; ui != ui_end; ++ui)
{
if (curr_cc[*ui].which() == 0) //protein: same seq peptideHits have to be at a single protein
{
SequenceToReplicateChargeVariantHierarchy hierarchy{nrPrefractionationGroups_, chargeRange.first, chargeRange.second};
Graph::adjacency_iterator adjIt, adjIt_end;
boost::tie(adjIt, adjIt_end) = boost::adjacent_vertices(*ui, curr_cc);
for (; adjIt != adjIt_end; ++adjIt)
{
//pepHit, this also makes sure that pepHits already in hierarchy are masked
if (curr_cc[*adjIt].which() == 6)
{
PeptideHit *phitp = boost::get<PeptideHit *>(curr_cc[*adjIt]);
String seq = phitp->getSequence().toUnmodifiedString();
//TODO I think it is also best to completely focus on the extended Model here and assume that
// this information is present. If we allow mixtures of graphical models it gets complex
// with a lot of if-cases, also/especially during translation to the factor graph.
Size rep = 0; //In case no replicate info was read.
if (!pepHitVtx_to_run_.empty()) rep = pepHitVtx_to_run_[*adjIt];
int chg = phitp->getCharge();
hierarchy.insert(seq, rep, chg, *adjIt);
}
}
hierarchy.insertToGraph(*ui, curr_cc);
}
}
// Cluster peptides with same parents
unordered_map< ProteinNodeSet, PeptideNodeSet, MyUIntSetHasher > pepClusters; //maps the parent (protein) set to peptides that have the same
unordered_map< PeptideNodeSet, ProteinNodeSet, MyUIntSetHasher > indistProteins; //find indist proteins
boost::tie(ui,ui_end) = boost::vertices(curr_cc);
// Cluster proteins
for (; ui != ui_end; ++ui)
{
//TODO introduce an enum for the types to make it more clear.
//Or use the static_visitor pattern: You have to pass the vertex with its neighbors as a second arg though.
if (curr_cc[*ui].which() == 0) //protein: find indist. ones
{
//TODO assert that there is at least one peptide mapping to this peptide! Eg. Require IDFilter removeUnmatched before.
//Or just check rigorously here.
PeptideNodeSet childPeps;
Graph::adjacency_iterator adjIt, adjIt_end;
boost::tie(adjIt, adjIt_end) = boost::adjacent_vertices(*ui, curr_cc);
for (; adjIt != adjIt_end; ++adjIt)
{
if (curr_cc[*adjIt].which() >= 3) //if there are only two types (pep,prot) this check for pep is actually unnecessary
{
childPeps.insert(*adjIt);
}
}
auto clusterIt = indistProteins.emplace(childPeps, ProteinNodeSet({*ui}));
if (!clusterIt.second) //no insertion -> append
{
(clusterIt.first)->second.insert(*ui);
}
}
}
// add the protein groups to the graph
// and edges from the groups to the proteins for quick access
for (auto const& pepsToGrps : indistProteins)
{
if (pepsToGrps.second.size() <= 1)
continue;
//We can't point to protein groups while we fill them. Pointers invalidate in growing vectors.
//proteins_.getIndistinguishableProteins().push_back(ProteinGroup{});
//ProteinGroup& pg = proteins_.getIndistinguishableProteins().back();
auto grpVID = boost::add_vertex(ProteinGroup{}, curr_cc);
for (auto const &proteinVID : pepsToGrps.second)
{
//ProteinHit *proteinPtr = boost::get<ProteinHit*>(curr_cc[proteinVID]);
//pg.accessions.push_back(proteinPtr->getAccession());
boost::add_edge(proteinVID, grpVID, curr_cc);
for (auto const &pepVID : pepsToGrps.first)
{
boost::remove_edge(proteinVID, pepVID, curr_cc);
}
}
for (auto const &pepVID : pepsToGrps.first)
{
boost::add_edge(grpVID, pepVID, curr_cc);
}
//pg.probability = -1.0;
}
// reset iterator to loop through vertices again for peptide clusters
boost::tie(ui,ui_end) = boost::vertices(curr_cc);
for (; ui != ui_end; ++ui)
{
//TODO introduce an enum for the types to make it more clear.
if (curr_cc[*ui].which() >= 3) //peptide: find peptide clusters
{
//TODO assert that there is at least one protein mapping to this peptide! Eg. Require IDFilter removeUnmatched before.
//Or just check rigorously here.
ProteinNodeSet parents;
Graph::adjacency_iterator adjIt, adjIt_end;
boost::tie(adjIt, adjIt_end) = boost::adjacent_vertices(*ui, curr_cc);
for (; adjIt != adjIt_end; ++adjIt)
{
if (curr_cc[*adjIt].which() <= 1) // Either protein or protein group
{
parents.insert(*adjIt);
}
}
auto clusterIt = pepClusters.emplace(parents, PeptideNodeSet({*ui}));
if (!clusterIt.second) //no insertion -> append
{
(clusterIt.first)->second.insert(*ui);
}
}
}
// we add an edge from protein to pepCluster and from pepCluster to peptides
// peptides can use the same info from there.
for (auto const& protsToPepClusters : pepClusters)
{
if (protsToPepClusters.first.size() <= 1)
continue;
auto pcVID = boost::add_vertex(PeptideCluster{}, curr_cc);
for (auto const& pgVID : protsToPepClusters.first)
{
boost::add_edge(pgVID, pcVID, curr_cc);
for (auto const& peptideVID : protsToPepClusters.second)
{
boost::remove_edge(pgVID, peptideVID, curr_cc);
}
}
for (auto const& peptideVID : protsToPepClusters.second)
{
boost::add_edge(pcVID, peptideVID, curr_cc);
}
}
#ifdef INFERENCE_DEBUG
OPENMS_LOG_INFO << "Printing cc " << i << "with intermediate nodes.\n";
printGraph(LOG_INFO, curr_cc);
OPENMS_LOG_INFO << "Printed cc " << i << "with intermediate nodes.\n";
#endif
}
else
{
OPENMS_LOG_INFO << "Skipped cc with only one type (proteins or peptides)\n";
}
}
}
void IDBoostGraph::clusterIndistProteinsAndPeptides()
{
if (ccs_.empty()) {
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "No connected components annotated. Run computeConnectedComponents first!");
}
// add_vertex and add_edge not threadsafe
//#pragma omp parallel for
for (int i = 0; i < static_cast<int>(ccs_.size()); i += 1)
{
Graph& curr_cc = ccs_[i];
#ifdef INFERENCE_MT_DEBUG
OPENMS_LOG_INFO << "Processing on thread# " << omp_get_thread_num() << "\n";
#endif
#ifdef INFERENCE_DEBUG
OPENMS_LOG_INFO << "Processing cc " << i << " with " << boost::num_vertices(curr_cc) << " vertices.\n";
OPENMS_LOG_INFO << "Printing cc " << i << "\n";
printGraph(LOG_INFO, curr_cc);
OPENMS_LOG_INFO << "Printed cc " << i << "\n";
#endif
// Skip cc without peptide or protein
//TODO better to do quick brute-force calculation if the cc is really small
if (boost::num_edges(curr_cc) >= 1)
{
Graph::vertex_iterator ui, ui_end;
boost::tie(ui,ui_end) = boost::vertices(curr_cc);
// Cluster peptides with same parents
unordered_map< ProteinNodeSet, PeptideNodeSet, MyUIntSetHasher > pepClusters; //maps the parent (protein) set to peptides that have the same
unordered_map< PeptideNodeSet, ProteinNodeSet, MyUIntSetHasher > indistProteins; //find indist proteins
// Cluster proteins
for (; ui != ui_end; ++ui)
{
//TODO introduce an enum for the types to make it more clear.
//Or use the static_visitor pattern: You have to pass the vertex with its neighbors as a second arg though.
if (curr_cc[*ui].which() == 0) //protein: find indist. ones
{
//TODO assert that there is at least one peptide mapping to this peptide! Eg. Require IDFilter removeUnmatched before.
//Or just check rigorously here.
PeptideNodeSet childPeps;
Graph::adjacency_iterator adjIt, adjIt_end;
boost::tie(adjIt, adjIt_end) = boost::adjacent_vertices(*ui, curr_cc);
for (; adjIt != adjIt_end; ++adjIt)
{
if (curr_cc[*adjIt].which() >= 3) //if there are only two types (pep,prot) this check for pep is actually unnecessary
{
childPeps.insert(*adjIt);
}
}
auto clusterIt = indistProteins.emplace(childPeps, ProteinNodeSet({*ui}));
if (!clusterIt.second) //no insertion -> append
{
(clusterIt.first)->second.insert(*ui);
}
}
}
// add the protein groups to the graph
// and edges from the groups to the proteins for quick access
for (auto const& pepsToGrps : indistProteins)
{
if (pepsToGrps.second.size() <= 1)
continue;
//We can't point to protein groups while we fill them. Pointers invalidate in growing vectors.
//proteins_.getIndistinguishableProteins().push_back(ProteinGroup{});
//ProteinGroup& pg = proteins_.getIndistinguishableProteins().back();
auto grpVID = boost::add_vertex(ProteinGroup{}, curr_cc);
int nr_targets = 0;
for (auto const &proteinVID : pepsToGrps.second)
{
//check if decoy to count the decoys
bool target = boost::get<ProteinHit*>(curr_cc[proteinVID])->getMetaValue("target_decoy").toString()[0] == 't';
if (target) nr_targets++;
//ProteinHit *proteinPtr = boost::get<ProteinHit*>(curr_cc[proteinVID]);
//pg.accessions.push_back(proteinPtr->getAccession());
boost::add_edge(proteinVID, grpVID, curr_cc);
for (auto const &pepVID : pepsToGrps.first)
{
boost::remove_edge(proteinVID, pepVID, curr_cc);
}
}
for (auto const &pepVID : pepsToGrps.first)
{
boost::add_edge(grpVID, pepVID, curr_cc);
}
ProteinGroup& pgnode = boost::get<ProteinGroup&>(curr_cc[grpVID]);
pgnode.size = pepsToGrps.second.size();
pgnode.tgts = nr_targets;
// take score of any protein. They should be the same.
pgnode.score = boost::get<ProteinHit*>(curr_cc[*pepsToGrps.second.begin()])->getScore();
}
// reset iterator to loop through vertices again for peptide clusters
boost::tie(ui,ui_end) = boost::vertices(curr_cc);
for (; ui != ui_end; ++ui)
{
//TODO introduce an enum for the types to make it more clear.
if (curr_cc[*ui].which() == 6) //peptide: find peptide clusters
{
//TODO assert that there is at least one protein mapping to this peptide! Eg. Require IDFilter removeUnmatched before.
//Or just check rigorously here.
ProteinNodeSet parents;
Graph::adjacency_iterator adjIt, adjIt_end;
boost::tie(adjIt, adjIt_end) = boost::adjacent_vertices(*ui, curr_cc);
for (; adjIt != adjIt_end; ++adjIt)
{
if (curr_cc[*adjIt].which() <= 1) // Either protein or protein group
{
parents.insert(*adjIt);
}
}
auto clusterIt = pepClusters.emplace(parents, PeptideNodeSet({*ui}));
if (!clusterIt.second) //no insertion -> append
{
(clusterIt.first)->second.insert(*ui);
}
}
}
// we add an edge from protein to pepCluster and from pepCluster to peptides
// peptides can use the same info from there.
for (auto const& protsToPepClusters : pepClusters)
{
if (protsToPepClusters.first.size() <= 1)
continue;
auto pcVID = boost::add_vertex(PeptideCluster{}, curr_cc);
for (auto const& pgVID : protsToPepClusters.first)
{
boost::add_edge(pgVID, pcVID, curr_cc);
for (auto const& peptideVID : protsToPepClusters.second)
{
boost::remove_edge(pgVID, peptideVID, curr_cc);
}
}
for (auto const& peptideVID : protsToPepClusters.second)
{
boost::add_edge(pcVID, peptideVID, curr_cc);
}
}
#ifdef INFERENCE_DEBUG
OPENMS_LOG_INFO << "Printing cc " << i << "with intermediate nodes.\n";
printGraph(LOG_INFO, curr_cc);
OPENMS_LOG_INFO << "Printed cc " << i << "with intermediate nodes.\n";
#endif
}
else
{
OPENMS_LOG_INFO << "Skipped cc with only one type (proteins or peptides)\n";
}
}
}
//TODO we should probably rename it to splitCC now. Add logging and timing?
void IDBoostGraph::computeConnectedComponents()
{
auto vis = dfs_ccsplit_visitor(ccs_);
boost::depth_first_search(g, visitor(vis));
OPENMS_LOG_INFO << "Found " << ccs_.size() << " connected components.\n";
#ifdef INFERENCE_BENCH
sizes_and_times_.resize(ccs_.size());
#endif
g.clear();
}
const IDBoostGraph::Graph& IDBoostGraph::getComponent(Size cc)
{
if (cc == 0 && boost::num_vertices(g) != 0)
{
return g;
}
else
{
return ccs_.at(cc);
}
}
IDBoostGraph::vertex_t IDBoostGraph::addVertexWithLookup_(const IDPointer& ptr, unordered_map<IDPointer, vertex_t, boost::hash<IDPointer>>& vertex_map)
{
vertex_t v;
auto vertex_iter = vertex_map.find(ptr);
if (vertex_iter != vertex_map.end() )
{
v = boost::vertex(vertex_iter->second, g);
}
else
{
v = boost::add_vertex(g);
vertex_map[ptr] = v;
g[v] = ptr;
}
return v;
}
void IDBoostGraph::getProteinScores_(ScoreToTgtDecLabelPairs& scores_and_tgt)
{
const std::function<void(Graph&)>& fun =
[&scores_and_tgt]
(const Graph& graph)
{
Graph::vertex_iterator ui, ui_end;
boost::tie(ui,ui_end) = boost::vertices(graph);
for (; ui != ui_end; ++ui)
{
//TODO introduce an enum for the types to make it more clear.
//Or use the static_visitor pattern: You have to pass the vertex with its neighbors as a second arg though.
if (graph[*ui].which() == 0) // protein
{
const ProteinHit* ph = boost::get<ProteinHit*>(graph[*ui]);
scores_and_tgt.emplace_back(
ph->getScore(),
static_cast<double>(ph->getMetaValue("target_decoy").toString()[0] == 't')); // target = 1; false = 0;
}
}
};
applyFunctorOnCCsST(fun);
}
void IDBoostGraph::getProteinGroupScoresAndTgtFraction(ScoreToTgtDecLabelPairs& scores_and_tgt_fraction)
{
const std::function<void(Graph&)>& fun =
[&scores_and_tgt_fraction]
(const Graph& graph)
{
Graph::vertex_iterator ui, ui_end;
boost::tie(ui,ui_end) = boost::vertices(graph);
for (; ui != ui_end; ++ui)
{
//TODO introduce an enum for the types to make it more clear.
//Or use the static_visitor pattern: You have to pass the vertex with its neighbors as a second arg though.
if (graph[*ui].which() == 0) // protein
{
Graph::adjacency_iterator adjIt, adjIt_end;
boost::tie(adjIt, adjIt_end) = boost::adjacent_vertices(*ui, graph);
bool part_of_group = false;
for (; adjIt != adjIt_end; adjIt++)
{
if (graph[*adjIt].which() == 1) // if part of a group, skip
{
part_of_group = true;
break;
}
}
if (!part_of_group)
{
const ProteinHit* ph = boost::get<ProteinHit*>(graph[*ui]);
scores_and_tgt_fraction.emplace_back(
ph->getScore(),
static_cast<double>(ph->getMetaValue("target_decoy").toString()[0] == 't')); // target = 1; false = 0;
}
}
else if (graph[*ui].which() == 1) //protein group, always include
{
ProteinGroup &pg = boost::get<ProteinGroup &>(graph[*ui]);
scores_and_tgt_fraction.emplace_back(pg.score, static_cast<double>(pg.tgts) / pg.size);
}
}
};
applyFunctorOnCCsST(fun);
}
/// not finished yet!
void IDBoostGraph::getProteinGroupScoresAndHitchhikingTgtFraction(ScoreToTgtDecLabelPairs& scores_and_tgt_fraction)
{
const std::function<void(Graph&)>& fun =
[&scores_and_tgt_fraction,this]
(const Graph& fg)
{
Graph::vertex_iterator ui, ui_end;
boost::tie(ui,ui_end) = boost::vertices(fg);
GetPosteriorVisitor gpv;
std::unordered_map<vertex_t, double> prot_to_current_max;
for (; ui != ui_end; ++ui)
{
if (fg[*ui].which() == 2)
// It should suffice to resolve at the pep cluster level
// if a pep does not belong to a cluster it didn't have multiple parents and
// therefore does not need to be resolved
{
vector<vertex_t> prots;
queue<vertex_t> start;
start.push(*ui);
getUpstreamNodesNonRecursive(start,fg,1,true,prots);
auto score_greater = [&fg,&gpv](vertex_t& n, vertex_t& m) -> bool
{return boost::apply_visitor(gpv, fg[n]) > boost::apply_visitor(gpv, fg[m]);};
std::sort(prots.begin(), prots.end(), score_greater);
Size target_contribution_penalty = 1;
double target_fraction = 0.0;
// currently the maximum for a protein is saved.
// i.e. if once it is IDed as the second for a peptide and once as the third
// for a peptide, the estimated fraction for the former is used.
// TODO I am currently missing proteins with unique peptides here!!
for (const auto& prot : prots)
{
if (fg[prot].which() == 0) //protein
{
const ProteinHit* ph = boost::get<ProteinHit*>(fg[prot]);
// target = 1/penalty; decoy = 0;
target_fraction = static_cast<double>(ph->getMetaValue("target_decoy").toString()[0] == 't');
target_fraction /= target_contribution_penalty;
auto it_inserted = prot_to_current_max.emplace(prot, target_fraction);
if (!it_inserted.second)
{
if (target_fraction > it_inserted.first->second)
{
it_inserted.first->second = target_fraction;
}
}
}
else if (fg[prot].which() == 1) //protein group
{
ProteinGroup &pg = boost::get<ProteinGroup &>(fg[prot]);
target_fraction = static_cast<double>(pg.tgts) / pg.size;
target_fraction /= target_contribution_penalty;
auto it_inserted = prot_to_current_max.emplace(prot, target_fraction);
if (!it_inserted.second)
{
if (target_fraction > it_inserted.first->second)
{
it_inserted.first->second = target_fraction;
}
}
}
target_contribution_penalty++;
}
}
}
for (const auto& prot_tgt_frac : prot_to_current_max)
{
scores_and_tgt_fraction.emplace_back(boost::apply_visitor(gpv, fg[prot_tgt_frac.first]), prot_tgt_frac.second);
}
};
applyFunctorOnCCsST(fun);
}
Size IDBoostGraph::getNrConnectedComponents()
{
return ccs_.size();
}
const ProteinIdentification& IDBoostGraph::getProteinIDs()
{
return protIDs_;
}
void IDBoostGraph::printGraph(std::ostream& out, const Graph& fg)
{
LabelVisitor lv;
// Also tried to save the labels in a member after build_graph. But could not get the type right for a member that would store them.
//TODO Is passing "this" to lambda bad? How can I pass private members then?
auto labels = boost::make_transform_value_property_map([&](const IDPointer &p) { return boost::apply_visitor(lv, p); },
boost::get(boost::vertex_bundle, fg));
//boost::print_graph(fg);
boost::write_graphviz(out, fg, boost::make_label_writer(labels));
}
namespace Internal
{
/// Hashers for the strong typedefs
//TODO switch everything to pointers so we compare memory addresses
// then we don't need those. They are just here to fulfill the "interface".
std::size_t hash_value(const IDBoostGraph::Peptide& x)
{
boost::hash<std::string> hasher;
return hasher(static_cast<std::string>(x));
}
std::size_t hash_value(const IDBoostGraph::RunIndex& x)
{
boost::hash<Size> hasher;
return hasher(static_cast<Size>(x));
}
std::size_t hash_value(const IDBoostGraph::Charge& x)
{
boost::hash<int> hasher;
return hasher(static_cast<int>(x));
}
std::size_t hash_value(const IDBoostGraph::ProteinGroup&)
{
return 0;
}
bool operator==(const IDBoostGraph::ProteinGroup& lhs, const IDBoostGraph::ProteinGroup& rhs)
{
return std::tie(lhs.score, lhs.size, lhs.tgts) ==
std::tie(rhs.score, rhs.size, rhs.tgts);
}
std::size_t hash_value(const IDBoostGraph::PeptideCluster&)
{
return 1;
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/ConsensusIDAlgorithmSimilarity.cpp | .cpp | 5,113 | 135 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Sven Nahnsen, Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/ConsensusIDAlgorithmSimilarity.h>
#include <OpenMS/CONCEPT/LogStream.h>
using namespace std;
namespace OpenMS
{
ConsensusIDAlgorithmSimilarity::ConsensusIDAlgorithmSimilarity()
{
setName("ConsensusIDAlgorithmSimilarity"); // DefaultParamHandler
}
void ConsensusIDAlgorithmSimilarity::apply_(
PeptideIdentificationList& ids,
const map<String, String>& se_info,
SequenceGrouping& results)
{
for (PeptideIdentificationList::iterator id = ids.begin();
id != ids.end(); ++id)
{
if (id->getScoreType() != "Posterior Error Probability" &&
id->getScoreType() != "pep" &&
id->getScoreType() != "MS:1001493")
{
String msg = "Score type must be 'Posterior Error Probability'";
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
msg, id->getScoreType());
}
}
for (PeptideIdentificationList::iterator id1 = ids.begin();
id1 != ids.end(); ++id1)
{
String score_type = id1->getScoreType();
auto se = se_info.find(id1->getIdentifier());
if (se != se_info.end())
{
score_type = se->second + "_" + score_type;
}
for (vector<PeptideHit>::iterator hit1 = id1->getHits().begin();
hit1 != id1->getHits().end(); ++hit1)
{
// have we scored this sequence already? if yes, skip:
SequenceGrouping::iterator pos = results.find(hit1->getSequence());
if (pos != results.end())
{
compareChargeStates_(pos->second.charge, hit1->getCharge(),
pos->first);
pos->second.scores.emplace_back(hit1->getScore());
pos->second.types.emplace_back(id1->getScoreType());
for (const auto& ev : hit1->getPeptideEvidences())
{
pos->second.evidence.emplace(ev);
}
continue;
}
// similarity scores and PEPs of best matches for all ID runs:
vector<pair<double, double> > best_matches;
best_matches.reserve(ids.size() - 1);
for (PeptideIdentificationList::iterator id2 = ids.begin();
id2 != ids.end(); ++id2)
{
if (id1 == id2) continue;
// similarity scores and PEPs of all matches in current ID run
// (to get the best match, we look for highest similarity, breaking
// ties by better PEP - so we need to transform PEP so higher scores
// are better, same as similarity):
vector<pair<double, double> > current_matches;
current_matches.reserve(id2->getHits().size());
for (vector<PeptideHit>::iterator hit2 = id2->getHits().begin();
hit2 != id2->getHits().end(); ++hit2)
{
double sim_score = getSimilarity_(hit1->getSequence(),
hit2->getSequence());
// use "1 - PEP" so higher scores are better (for "max_element"):
current_matches.emplace_back(sim_score,
1.0 - hit2->getScore());
}
best_matches.push_back(*max_element(current_matches.begin(),
current_matches.end()));
}
double score = hit1->getScore();
double sum_sim = 1.0; // sum of similarity scores
for (vector<pair<double, double> >::iterator it = best_matches.begin();
it != best_matches.end(); ++it)
{
score += it->first * (1.0 - it->second); // undo "1 - PEP" transform
sum_sim += it->first;
}
score /= (sum_sim * sum_sim);
double support = 0.;
// normalize similarity score to range 0-1:
Size n_other_ids = (count_empty_ ?
number_of_runs_ - 1 : best_matches.size());
if (n_other_ids == 0) // only one ID run -> similarity is ill-defined
{
support = double(!count_empty_); // 0 or 1 depending on parameter
}
else
{
support = (sum_sim - 1.0) / n_other_ids;
}
auto ev = hit1->getPeptideEvidences();
// don't filter based on "min_score_" yet, so we don't recompute results
// for the same peptide sequence:
results[hit1->getSequence()] =
{
hit1->getCharge(),
{hit1->getScore()},
{score_type},
hit1->getMetaValue("target_decoy").toString(),
{std::make_move_iterator(ev.begin()), std::make_move_iterator(ev.end())},
score,
support
};
}
}
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/ConsensusIDAlgorithmPEPMatrix.cpp | .cpp | 2,364 | 75 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Sven Nahnsen, Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/ConsensusIDAlgorithmPEPMatrix.h>
using namespace std;
namespace OpenMS
{
ConsensusIDAlgorithmPEPMatrix::ConsensusIDAlgorithmPEPMatrix()
{
setName("ConsensusIDAlgorithmPEPMatrix"); // DefaultParamHandler
defaults_.setValue("matrix", "PAM30MS", "Substitution matrix to use for alignment-based similarity scoring");
defaults_.setValidStrings("matrix", NeedlemanWunsch::NamesOfScoringMatrices);
defaults_.setValue("penalty", 5, "Alignment gap penalty (the same value is used for gap opening and extension)");
defaults_.setMinInt("penalty", 1);
defaultsToParam_();
}
void ConsensusIDAlgorithmPEPMatrix::updateMembers_()
{
ConsensusIDAlgorithmSimilarity::updateMembers_();
string matrix = param_.getValue("matrix");
int penalty = param_.getValue("penalty");
alignment_.setMatrix(matrix);
if (penalty > 0)
{
alignment_.setPenalty(penalty);
}
else
{
String msg = "Gap penalty should be positive";
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
msg);
}
// new parameters may affect the similarity calculation, so clear cache:
similarities_.clear();
}
double ConsensusIDAlgorithmPEPMatrix::getSimilarity_(AASequence seq1,
AASequence seq2)
{
// here we cannot take modifications into account:
String unmod_seq1 = seq1.toUnmodifiedString();
String unmod_seq2 = seq2.toUnmodifiedString();
if (unmod_seq1 == unmod_seq2) return 1.0;
double score_sim = alignment_.align(unmod_seq1, unmod_seq2);
if (score_sim < 0)
{
score_sim = 0;
}
else
{
double score_self1 = alignment_.align(unmod_seq1, unmod_seq1);
double score_self2 = alignment_.align(unmod_seq2, unmod_seq2);
score_sim /= min(score_self1, score_self2); // normalize
}
return score_sim;
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/IDMapper.cpp | .cpp | 42,290 | 1,103 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Marc Sturm, Hendrik Weisser, Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/IDMapper.h>
#include <OpenMS/MATH/MathFunctions.h>
#include <OpenMS/METADATA/SpectrumLookup.h>
#include <OpenMS/METADATA/AnnotatedMSRun.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/SYSTEM/File.h>
#include <unordered_set>
using namespace std;
namespace OpenMS
{
IDMapper::IDMapper() :
DefaultParamHandler("IDMapper"),
rt_tolerance_(5.0),
mz_tolerance_(20),
measure_(MEASURE_PPM),
ignore_charge_(false)
{
defaults_.setValue("rt_tolerance", rt_tolerance_, "RT tolerance (in seconds) for the matching");
defaults_.setMinFloat("rt_tolerance", 0);
defaults_.setValue("mz_tolerance", mz_tolerance_, "m/z tolerance (in ppm or Da) for the matching");
defaults_.setMinFloat("mz_tolerance", 0);
defaults_.setValue("mz_measure", "ppm", "unit of 'mz_tolerance' (ppm or Da)");
defaults_.setValidStrings("mz_measure", {"ppm","Da"});
defaults_.setValue("mz_reference", "precursor", "source of m/z values for peptide identifications");
defaults_.setValidStrings("mz_reference", {"precursor","peptide"});
defaults_.setValue("ignore_charge", "false", "For feature/consensus maps: Assign an ID independently of whether its charge state matches that of the (consensus) feature.");
defaults_.setValidStrings("ignore_charge", {"true","false"});
defaultsToParam_();
}
IDMapper::IDMapper(const IDMapper& cp) :
DefaultParamHandler(cp),
rt_tolerance_(cp.rt_tolerance_),
mz_tolerance_(cp.mz_tolerance_),
measure_(cp.measure_),
ignore_charge_(cp.ignore_charge_)
{
updateMembers_();
}
IDMapper& IDMapper::operator=(const IDMapper& rhs)
{
if (this == &rhs)
return *this;
DefaultParamHandler::operator=(rhs);
rt_tolerance_ = rhs.rt_tolerance_;
mz_tolerance_ = rhs.mz_tolerance_;
measure_ = rhs.measure_;
ignore_charge_ = rhs.ignore_charge_;
updateMembers_();
return *this;
}
void IDMapper::updateMembers_()
{
rt_tolerance_ = param_.getValue("rt_tolerance");
mz_tolerance_ = param_.getValue("mz_tolerance");
measure_ = param_.getValue("mz_measure") == "ppm" ? MEASURE_PPM : MEASURE_DA;
ignore_charge_ = param_.getValue("ignore_charge") == "true";
}
void IDMapper::annotate(AnnotatedMSRun& map,
const PeptideIdentificationList& peptide_ids,
const vector<ProteinIdentification>& protein_ids,
const bool clear_ids,
const bool map_ms1)
{
checkHits_(peptide_ids);
SpectrumLookup lookup;
if (clear_ids)
{ // start with empty IDs
map.getPeptideIdentifications().clear();
map.getProteinIdentifications().clear();
}
if (peptide_ids.empty()) return;
// append protein identifications
map.getProteinIdentifications().insert(map.getProteinIdentifications().end(), protein_ids.begin(), protein_ids.end());
// AnnotatedMSRun will have one PeptideIdentification per spectrum (including ones without hits)
map.getPeptideIdentifications().resize(map.getMSExperiment().getSpectra().size());
// set up the lookup table for the spectra
lookup.readSpectra(map.getMSExperiment());
// remember which peptides were mapped (for stats later)
unordered_set<Size> peptides_mapped;
// store mapping of identification RT to index (ignore empty hits)
multimap<double, Size> identifications_precursors;
for (Size i = 0; i < peptide_ids.size(); ++i)
{
if (peptide_ids[i].empty()) continue;
// mapping is done by either native id or by comparing peptide_id RT with experiment RT
if (!peptide_ids[i].metaValueExists(Constants::UserParam::SPECTRUM_REFERENCE))
{ // use RT for mapping
identifications_precursors.insert(make_pair(peptide_ids[i].getRT(), i));
}
else
{ // use native id for mapping
DataValue native_id = peptide_ids[i].getMetaValue(Constants::UserParam::SPECTRUM_REFERENCE);
try
{ // spectrum can be retrieved
Size spectrum_idx = lookup.findByNativeID(native_id);
// Since we now have only one PeptideIdentification per spectrum, we need to merge the hits
PeptideIdentification& existing_id = map.getPeptideIdentifications()[spectrum_idx];
existing_id.getHits().insert(existing_id.getHits().end(),
peptide_ids[i].getHits().begin(),
peptide_ids[i].getHits().end());
peptides_mapped.insert(i);
}
catch (const Exception::ElementNotFound& /*e*/)
{ // use RT for mapping
identifications_precursors.insert(make_pair(peptide_ids[i].getRT(), i));
}
}
}
if (!identifications_precursors.empty())
{
// store mapping of scan RT to index
multimap<double, Size> experiment_precursors;
for (Size i = 0; i < map.getMSExperiment().size(); i++)
{
experiment_precursors.insert(make_pair(map.getMSExperiment()[i].getRT(), i));
}
// note that mappings are sorted by key via multimap (we rely on that down below)
// calculate the actual mapping
multimap<double, Size>::const_iterator experiment_iterator = experiment_precursors.begin();
multimap<double, Size>::const_iterator identifications_iterator = identifications_precursors.begin();
// to achieve O(n) complexity we now move along the spectra
// and for each spectrum we look at the peptide id's with the allowed RT range
// once we finish a spectrum, we simply move back in the peptide id window a little to get from the
// right end of the old interval to the left end of the new interval
while (experiment_iterator != experiment_precursors.end())
{
// maybe we hit end() of IDs during the last scan - go back to a real value
if (identifications_iterator == identifications_precursors.end())
{
--identifications_iterator; // this is valid, since we have at least one peptide ID
}
// go to left border of RT interval
while (identifications_iterator != identifications_precursors.begin() &&
(experiment_iterator->first - identifications_iterator->first) < rt_tolerance_) // do NOT use fabs() here, since we want the LEFT border
{
--identifications_iterator;
}
// ... we might have stepped too far left
if (identifications_iterator != identifications_precursors.end() && ((experiment_iterator->first - identifications_iterator->first) > rt_tolerance_))
{
++identifications_iterator; // get into interval again (we can potentially be at end() afterwards)
}
if (identifications_iterator == identifications_precursors.end())
{ // no more ID's, so we don't have any chance of matching the next spectra
break; // ... do NOT put this block below, since hitting the end of ID's for one spec, still allows to match stuff in the next (when going to left border)
}
// run through RT interval
while (identifications_iterator != identifications_precursors.end() &&
(identifications_iterator->first - experiment_iterator->first) < rt_tolerance_) // fabs() not required here, since are definitely within left border, and wait until exceeding the right
{
bool success = map_ms1;
if (!success)
{
for (const auto& precursor : map.getMSExperiment()[experiment_iterator->second].getPrecursors())
{
if (isMatch_(0, peptide_ids[identifications_iterator->second].getMZ(), precursor.getMZ()))
{
success = true;
break;
}
}
}
if (success)
{
// Since we have only one PeptideIdentification per spectrum, we need to merge the hits
PeptideIdentification& existing_id = map.getPeptideIdentifications()[experiment_iterator->second];
existing_id.getHits().insert(existing_id.getHits().end(),
peptide_ids[identifications_iterator->second].getHits().begin(),
peptide_ids[identifications_iterator->second].getHits().end());
peptides_mapped.insert(identifications_iterator->second);
}
++identifications_iterator;
}
// we are at the right border now (or likely even beyond)
++experiment_iterator;
}
}
// some statistics output
OPENMS_LOG_INFO << "Peptides assigned to a precursor: " << peptides_mapped.size() << "\n"
<< " Unassigned peptides: " << peptide_ids.size() - peptides_mapped.size() << "\n"
<< " Unmapped (empty) peptides: " << peptide_ids.size() - identifications_precursors.size() << endl;
}
void IDMapper::annotate(AnnotatedMSRun& map, const FeatureMap& fmap, const bool clear_ids, const bool map_ms1)
{
const vector<ProteinIdentification>& protein_ids = fmap.getProteinIdentifications();
PeptideIdentificationList peptide_ids;
for (FeatureMap::const_iterator it = fmap.begin(); it != fmap.end(); ++it)
{
const PeptideIdentificationList& pi = it->getPeptideIdentifications();
for (PeptideIdentificationList::const_iterator itp = pi.begin(); itp != pi.end(); ++itp)
{
peptide_ids.push_back(*itp);
// if pepID has no m/z or RT, use the values of the feature
if (!itp->hasMZ()) peptide_ids.back().setMZ(it->getMZ());
if (!itp->hasRT()) peptide_ids.back().setRT(it->getRT());
}
}
annotate(map, peptide_ids, protein_ids, clear_ids, map_ms1);
}
enum class NATIVE_ID_TYPE
{
UNKNOWN, MS2IDMS3TMT, MS2IDTMT
};
NATIVE_ID_TYPE checkTMTType(const ConsensusMap& map)
{
for (auto & cf : map)
{
// check if the native id of an identifying spectrum is annotated
if (cf.metaValueExists("id_scan_id")) // identifying MS2 spectrum in MS3 TMT
{
return NATIVE_ID_TYPE::MS2IDMS3TMT;
}
else if (cf.metaValueExists("scan_id")) // identifying MS2 spectrum in standard TMT
{
return NATIVE_ID_TYPE::MS2IDTMT;
}
}
return NATIVE_ID_TYPE::UNKNOWN;
}
void IDMapper::annotate(
ConsensusMap& map,
const PeptideIdentificationList& ids,
const vector<ProteinIdentification>& protein_ids,
bool measure_from_subelements,
bool annotate_ids_with_subelements,
const PeakMap& spectra)
{
// validate "RT" and "MZ" metavalues exist
checkHits_(ids);
// append protein identifications to Map
map.getProteinIdentifications().insert(map.getProteinIdentifications().end(), protein_ids.begin(), protein_ids.end());
// keep track of assigned/unassigned peptide identifications.
// maps Pep.Id. index to number of assignments to a feature
std::unordered_map<Size, Size> assigned_ids;
// keep track of assigned/unassigned precursors
std::unordered_map<Size, Size> assigned_precursors;
// store which peptides fit which feature (and avoid double entries)
// consensusMap -> {peptide_index}
vector<set<size_t>> mapping(map.size());
DoubleList mz_values;
double rt_pep;
IntList charges;
// for statistics
Size id_matches_none(0), id_matches_single(0), id_matches_multiple(0);
// build map from file to peptide id
std::map<String, std::unordered_map<String, const PeptideIdentification*>> file2nativeid2pepid;
bool has_spectrum_references{false};
std::unordered_map<String, ConsensusFeature*> nativeid2cf;
NATIVE_ID_TYPE native_id_type = checkTMTType(map);
// We have TMT data: spectrum references annotated at consensus feature and in id
// We can directly map by native id
if ((native_id_type != NATIVE_ID_TYPE::UNKNOWN) )
{
bool lookForScanNrsAsIntegers = false;
ProteinIdentification::Mapping mspath_mapping{protein_ids}; // used to retrieve spectrum file information annotated in protein ids given a peptide identification
for (Size i = 0; i < ids.size(); ++i)
{
const PeptideIdentification* pid = &ids[i];
if (pid->getHits().empty()) continue; // skip IDs without peptide annotations
String spectrum_file = File::basename(mspath_mapping.getPrimaryMSRunPath(*pid));
String spectrum_reference = pid->getMetaValue(Constants::UserParam::SPECTRUM_REFERENCE, "");
// missing file origin is fine, but we need a spectrum_reference if we want to build the map
if (spectrum_reference.empty()) continue;
// TODO make a unique decision in the whole class on if to extract by scan number or full string?
if (!lookForScanNrsAsIntegers)
{
// check if spectrum reference is a string that just contains a number
try
{
ids[0].getSpectrumReference().toInt64();
lookForScanNrsAsIntegers = true;
}
catch (...)
{
lookForScanNrsAsIntegers = false;
}
}
// TODO: check if there is already an entry
file2nativeid2pepid[spectrum_file][spectrum_reference] = pid;
has_spectrum_references = true;
}
if (!has_spectrum_references)
{
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "No spectrum references in ID file used in id mapping TMT/iTRAQ data.");
}
if (measure_from_subelements)
{
OPENMS_LOG_WARN << "IDMapper is configured to measure from subelements. Because the data looks like TMT/iTRAQ this option will be ignored." << std::endl;
}
if (!ignore_charge_)
{
OPENMS_LOG_WARN << "IDMapper is configured to validate charges. Because the data looks like TMT/iTRAQ this option will be ignored." << std::endl;
}
for (auto& cf : map)
{
const auto first_channel = *cf.getFeatures().begin();
String filename = File::basename(map.getColumnHeaders()[first_channel.getMapIndex()].filename); // all channels are associated with same file in TMT/iTRAQ
boost::regex scanregex{""};
String cf_scan_id_key_name = (native_id_type == NATIVE_ID_TYPE::MS2IDMS3TMT) ? "id_scan_id" : "scan_id";
String cf_scan_id = cf.getMetaValue(cf_scan_id_key_name, "");
if (!cf_scan_id.empty())
{
// This assumes all scan_ids are of the same structure
if (lookForScanNrsAsIntegers && scanregex.empty())
{
scanregex = SpectrumLookup::getRegExFromNativeID(cf_scan_id);
}
if (auto run_it = file2nativeid2pepid.find(filename); run_it != file2nativeid2pepid.end()) // TMT/iTRAQ run has identifications
{
if (auto scanid_it = run_it->second.find(cf_scan_id); scanid_it != run_it->second.end()) // TMT/iTRAQ run has scan_id with identification
{
cf.getPeptideIdentifications().push_back(*scanid_it->second);
++id_matches_single; // in TMT we only match to single consensus feature
}
// look for only the scan_number in case the search engine only extracted this (e.g. Sage)
else if (lookForScanNrsAsIntegers)
{
auto scanid_it = run_it->second.find(SpectrumLookup::extractScanNumber(cf_scan_id, scanregex, false));
if(scanid_it != run_it->second.end())
{
cf.getPeptideIdentifications().push_back(*scanid_it->second);
++id_matches_single; // in TMT we only match to single consensus feature
}
}
} // else identification file does not contained scan id (e.g. was removed)
else
{
OPENMS_LOG_WARN << "ConsensusMap for TMT/iTRAQ experiment contains scan identifier '" << cf_scan_id
<< "' quantified in file '" << filename
<< "' but there is no matching identification."
<< std::endl;
}
}
else // missing spectrum id annotation
{
OPENMS_LOG_WARN << "ConsensusMap for TMT/iTRAQ experiment is missing the scan identifier meta value '" << cf_scan_id << "'"
<< std::endl;
}
}
}
else
{ // non TMT data (e.g., label-free)
for (Size i = 0; i < ids.size(); ++i)
{
// skip IDs without peptide annotations
if (ids[i].getHits().empty()) continue;
getIDDetails_(ids[i], rt_pep, mz_values, charges);
bool id_mapped(false);
// iterate over the features
for (Size cm_index = 0; cm_index < map.size(); ++cm_index)
{
// if set to TRUE, we leave the i_mz-loop as we added the whole ID with all hits
bool was_added = false; // was current pep-m/z matched?!
// iterate over m/z values of pepIds
for (Size i_mz = 0; i_mz < mz_values.size(); ++i_mz)
{
double mz_pep = mz_values[i_mz];
// charge states to use for checking:
IntList current_charges;
if (!ignore_charge_)
{
// if "mz_ref." is "precursor", we have only one m/z value to check,
// but still one charge state per peptide hit that could match:
if (mz_values.size() == 1)
{
current_charges = charges;
}
else
{
current_charges.push_back(charges[i_mz]);
}
current_charges.push_back(0); // "not specified" always matches
}
//check if we compare distance from centroid or subelements
if (!measure_from_subelements)
{
if (
isMatch_(rt_pep - map[cm_index].getRT(), mz_pep, map[cm_index].getMZ()) &&
(ignore_charge_ || ListUtils::contains(current_charges, map[cm_index].getCharge()))
)
{
id_mapped = true;
was_added = true;
map[cm_index].getPeptideIdentifications().push_back(ids[i]);
++assigned_ids[i];
}
}
else
{
for (ConsensusFeature::HandleSetType::const_iterator it_handle = map[cm_index].getFeatures().begin();
it_handle != map[cm_index].getFeatures().end();
++it_handle)
{
if (isMatch_(rt_pep - it_handle->getRT(), mz_pep, it_handle->getMZ()) &&
(ignore_charge_ || ListUtils::contains(current_charges, it_handle->getCharge())))
{
id_mapped = true;
was_added = true;
if (mapping[cm_index].count(i) == 0)
{
// Store the map index of the peptide feature in the id the feature was mapped to.
PeptideIdentification id_pep = ids[i];
if (annotate_ids_with_subelements)
{
id_pep.setMetaValue("map_index", it_handle->getMapIndex());
}
map[cm_index].getPeptideIdentifications().push_back(id_pep);
++assigned_ids[i];
mapping[cm_index].insert(i);
}
break; // we added this peptide already.. no need to check other handles
}
}
// continue to here
}
if (was_added) break;
} // m/z values to check
// break to here
} // features
// the id has not been mapped to any consensus feature
if (!id_mapped)
{
map.getUnassignedPeptideIdentifications().push_back(ids[i]);
++id_matches_none;
}
} // Identifications
for (auto aid : assigned_ids)
{
if (aid.second == 1)
{
++id_matches_single;
}
else if (aid.second > 1)
{
++id_matches_multiple;
}
}
}
vector<Size> unidentified = mapPrecursorsToIdentifications(spectra, ids).unidentified;
if (!ids.empty() && !spectra.empty())
{
OPENMS_LOG_INFO << "Mapping " << ids.size() << "PeptideIdentifications to " << spectra.size() << " spectra." << endl;
OPENMS_LOG_INFO << "Identification state of spectra: \n"
<< "Unidentified: " << unidentified.size() << "\n"
<< "Identified: " << mapPrecursorsToIdentifications(spectra, ids).identified.size() << "\n"
<< "No precursor: " << mapPrecursorsToIdentifications(spectra, ids).no_precursors.size() << endl;
}
// we need a valid search run identifier so we try to:
// extract one from the map (either assigned or unassigned).
// or fall back to a new search run identifier.
ProteinIdentification empty_protein_id;
if (!unidentified.empty())
{
empty_protein_id.setDateTime(DateTime::now());
if (!map.getProteinIdentifications().empty())
{
empty_protein_id.setIdentifier(map.getProteinIdentifications()[0].getIdentifier());
}
else if (!map.getUnassignedPeptideIdentifications().empty())
{
empty_protein_id.setIdentifier(map.getUnassignedPeptideIdentifications()[0].getIdentifier());
}
else
{
// No search run identifier given so we create a new one
empty_protein_id.setIdentifier("UNKNOWN_SEARCH_RUN_IDENTIFIER");
map.getProteinIdentifications().push_back(empty_protein_id);
}
}
// for statistics:
Size spectrum_matches_none(0), spectrum_matches_single(0), spectrum_matches_multiple(0);
// are there any mapped but unidentified precursors?
for (Size ui = 0; ui != unidentified.size(); ++ui)
{
Size spectrum_index = unidentified[ui];
const MSSpectrum& spectrum = spectra[spectrum_index];
const vector<Precursor>& precursors = spectrum.getPrecursors();
bool precursor_mapped(false);
// check if precursor has been identified
for (Size i_p = 0; i_p < precursors.size(); ++i_p)
{
// check by precursor mass and spectrum RT
double mz_p = precursors[i_p].getMZ();
int z_p = precursors[i_p].getCharge();
double rt_value = spectrum.getRT();
PeptideIdentification precursor_empty_id;
precursor_empty_id.setRT(rt_value);
precursor_empty_id.setMZ(mz_p);
precursor_empty_id.setMetaValue("spectrum_index", spectrum_index);
if (!spectra[spectrum_index].getNativeID().empty())
{
precursor_empty_id.setMetaValue(Constants::UserParam::SPECTRUM_REFERENCE, spectra[spectrum_index].getNativeID());
}
precursor_empty_id.setIdentifier(empty_protein_id.getIdentifier());
// iterate over the consensus features
for (Size cm_index = 0; cm_index < map.size(); ++cm_index)
{
// charge states to use for checking:
IntList current_charges;
if (!ignore_charge_)
{
current_charges.push_back(z_p);
current_charges.push_back(0); // "not specified" always matches
}
// check if we compare distance from centroid or subelements
if (!measure_from_subelements) // measure from centroid
{
if (isMatch_(rt_value - map[cm_index].getRT(), mz_p, map[cm_index].getMZ()) && (ignore_charge_ || ListUtils::contains(current_charges, map[cm_index].getCharge())))
{
map[cm_index].getPeptideIdentifications().push_back(precursor_empty_id);
++assigned_precursors[spectrum_index];
precursor_mapped = true;
}
}
else // measure from subelements
{
for (ConsensusFeature::HandleSetType::const_iterator it_handle = map[cm_index].getFeatures().begin();
it_handle != map[cm_index].getFeatures().end();
++it_handle)
{
if (isMatch_(rt_value - it_handle->getRT(), mz_p, it_handle->getMZ()) && (ignore_charge_ || ListUtils::contains(current_charges, it_handle->getCharge())))
{
if (annotate_ids_with_subelements)
{
// store the map index the precursor was mapped to
Size map_index = it_handle->getMapIndex();
// we use no underscore here to be compatible with linkers
precursor_empty_id.setMetaValue("map_index", map_index);
}
map[cm_index].getPeptideIdentifications().push_back(precursor_empty_id);
++assigned_precursors[spectrum_index];
precursor_mapped = true;
}
}
}
} // m/z values to check
}
if (!precursor_mapped) ++spectrum_matches_none;
}
for (auto apc : assigned_precursors)
{
if (apc.second == 1)
{
++spectrum_matches_single;
}
else if (apc.second > 1)
{
++spectrum_matches_multiple;
}
}
// some statistics output
if (!ids.empty())
{
OPENMS_LOG_INFO << "Unassigned peptides: " << id_matches_none << "\n"
<< "Peptides assigned to exactly one feature: " << id_matches_single << "\n"
<< "Peptides assigned to multiple features: " << id_matches_multiple << "\n";
}
if (!spectra.empty())
{
OPENMS_LOG_INFO << "Unassigned precursors without identification: " << spectrum_matches_none << "\n"
<< "Unidentified precursor assigned to exactly one feature: " << spectrum_matches_single << "\n"
<< "Unidentified precursor assigned to multiple features: " << spectrum_matches_multiple << "\n";
}
}
void IDMapper::annotate(FeatureMap& map,
const PeptideIdentificationList& ids,
const vector<ProteinIdentification>& protein_ids,
bool use_centroid_rt,
bool use_centroid_mz,
const PeakMap& spectra)
{
// cout << "Starting annotation..." << endl;
checkHits_(ids); // check RT and m/z are present
// append protein identifications
map.getProteinIdentifications().insert(map.getProteinIdentifications().end(), protein_ids.begin(), protein_ids.end());
// check if all features have at least one convex hull
// if not, use the centroid and the given tolerances
if (!(use_centroid_rt && use_centroid_mz))
{
for (Feature& f_it : map)
{
if (f_it.getConvexHulls().empty())
{
use_centroid_rt = true;
use_centroid_mz = true;
OPENMS_LOG_WARN << "IDMapper warning: at least one feature has no convex hull - using centroid coordinates for matching" << endl;
break;
}
}
}
bool use_avg_mass = false; // use avg. peptide masses for matching?
if (use_centroid_mz && (param_.getValue("mz_reference") == "peptide"))
{
// if possible, check which m/z value is reported for features,
// so the appropriate peptide mass can be used for matching
use_avg_mass = checkMassType_(map.getDataProcessing());
}
// calculate feature bounding boxes only once:
vector<DBoundingBox<2> > boxes;
double min_rt = numeric_limits<double>::max();
double max_rt = -numeric_limits<double>::max();
// cout << "Precomputing bounding boxes..." << endl;
boxes.reserve(map.size());
for (Feature& f_it : map)
{
DBoundingBox<2> box;
if (!(use_centroid_rt && use_centroid_mz))
{
box = f_it.getConvexHull().getBoundingBox();
}
if (use_centroid_rt)
{
box.setMinX(f_it.getRT());
box.setMaxX(f_it.getRT());
}
if (use_centroid_mz)
{
box.setMinY(f_it.getMZ());
box.setMaxY(f_it.getMZ());
}
increaseBoundingBox_(box);
boxes.push_back(box);
min_rt = min(min_rt, box.minPosition().getX());
max_rt = max(max_rt, box.maxPosition().getX());
}
// hash bounding boxes of features by RT:
// RT range is partitioned into slices (bins) of 1 second; every feature
// that overlaps a certain slice is hashed into the corresponding bin
vector<vector<SignedSize> > hash_table;
// make sure the RT hash table has indices >= 0 and doesn't waste space
// in the beginning:
SignedSize offset(0);
if (!map.empty())
{
// cout << "Setting up hash table..." << endl;
offset = SignedSize(floor(min_rt));
// this only works if features were found
hash_table.resize(SignedSize(floor(max_rt)) - offset + 1);
for (Size index = 0; index < boxes.size(); ++index)
{
const DBoundingBox<2> & box = boxes[index];
for (SignedSize i = SignedSize(floor(box.minPosition().getX()));
i <= SignedSize(floor(box.maxPosition().getX())); ++i)
{
hash_table[i - offset].push_back(index);
}
}
}
else
{
OPENMS_LOG_WARN << "IDMapper received an empty FeatureMap! All peptides are mapped as 'unassigned'!" << endl;
}
// for statistics:
Size matches_none = 0, matches_single = 0, matches_multi = 0;
// cout << "Finding matches..." << endl;
// iterate over peptide IDs:
for (const PeptideIdentification& id_it : ids)
{
// cout << "Peptide ID: " << id_it - ids.begin() << endl;
if (id_it.getHits().empty()) continue;
DoubleList mz_values;
double rt_value;
IntList charges;
getIDDetails_(id_it, rt_value, mz_values, charges, use_avg_mass);
if ((rt_value < min_rt) || (rt_value > max_rt)) // RT out of bounds
{
map.getUnassignedPeptideIdentifications().push_back(id_it);
++matches_none;
continue;
}
// iterate over candidate features:
Size index = SignedSize(floor(rt_value)) - offset;
Size matching_features = 0;
for (SignedSize& hash_it : hash_table[index])
{
Feature & feat = map[hash_it];
// need to check the charge state?
bool check_charge = !ignore_charge_;
if (check_charge && (mz_values.size() == 1)) // check now
{
if (!ListUtils::contains(charges, feat.getCharge())) continue;
check_charge = false; // don't need to check later
}
// iterate over m/z values (only one if "mz_ref." is "precursor"):
Size l_index = 0;
for (DoubleList::iterator mz_it = mz_values.begin();
mz_it != mz_values.end(); ++mz_it, ++l_index)
{
if (check_charge && (charges[l_index] != feat.getCharge()))
{
continue; // charge states need to match
}
DPosition<2> id_pos(rt_value, *mz_it);
if (boxes[hash_it].encloses(id_pos)) // potential match
{
if (use_centroid_mz)
{
// only one m/z value to check, which was already incorporated
// into the overall bounding box -> success!
feat.getPeptideIdentifications().push_back(id_it);
++matching_features;
break; // "mz_it" loop
}
// else: check all the mass traces
bool found_match = false;
for (vector<ConvexHull2D>::iterator ch_it =
feat.getConvexHulls().begin(); ch_it !=
feat.getConvexHulls().end(); ++ch_it)
{
DBoundingBox<2> box = ch_it->getBoundingBox();
if (use_centroid_rt)
{
box.setMinX(feat.getRT());
box.setMaxX(feat.getRT());
}
increaseBoundingBox_(box);
if (box.encloses(id_pos)) // success!
{
feat.getPeptideIdentifications().push_back(id_it);
++matching_features;
found_match = true;
break; // "ch_it" loop
}
}
if (found_match) break; // "mz_it" loop
}
}
}
if (matching_features == 0)
{
map.getUnassignedPeptideIdentifications().push_back(id_it);
++matches_none;
}
else if (matching_features == 1)
{
++matches_single;
}
else
{
++matches_multi;
}
}
vector<Size> unidentified = mapPrecursorsToIdentifications(spectra, ids).unidentified;
// map all unidentified precursor to features
Size spectrum_matches_none(0);
Size spectrum_matches_single(0);
Size spectrum_matches_multi(0);
// we need a valid search run identifier so we try to:
// extract one from the map (either assigned or unassigned).
// or fall back to a new search run identifier.
ProteinIdentification empty_protein_id;
if (!unidentified.empty())
{
empty_protein_id.setDateTime(DateTime::now());
if (!map.getProteinIdentifications().empty())
{
empty_protein_id.setIdentifier(map.getProteinIdentifications()[0].getIdentifier());
}
else if (!map.getUnassignedPeptideIdentifications().empty())
{
empty_protein_id.setIdentifier(map.getUnassignedPeptideIdentifications()[0].getIdentifier());
}
else
{
// add a new search identification run (mandatory)
empty_protein_id.setIdentifier("UNKNOWN_SEARCH_RUN_IDENTIFIER");
map.getProteinIdentifications().push_back(empty_protein_id);
}
}
// are there any mapped but unidentified precursors?
for (Size i = 0; i != unidentified.size(); ++i)
{
Size spectrum_index = unidentified[i];
const MSSpectrum& spectrum = spectra[spectrum_index];
const vector<Precursor>& precursors = spectrum.getPrecursors();
// check if precursor has been identified
for (Size i_p = 0; i_p < precursors.size(); ++i_p)
{
// check by precursor mass and spectrum RT
double mz_p = precursors[i_p].getMZ();
double rt_value = spectrum.getRT();
int z_p = precursors[i_p].getCharge();
if ((rt_value < min_rt) || (rt_value > max_rt)) // RT out of bounds
{
++spectrum_matches_none;
continue;
}
// iterate over candidate features:
Size index = SignedSize(floor(rt_value)) - offset;
Size matching_features = 0;
PeptideIdentification precursor_empty_id;
precursor_empty_id.setRT(rt_value);
precursor_empty_id.setMZ(mz_p);
precursor_empty_id.setMetaValue("spectrum_index", spectrum_index);
if (!spectra[spectrum_index].getNativeID().empty())
{
precursor_empty_id.setMetaValue(Constants::UserParam::SPECTRUM_REFERENCE, spectra[spectrum_index].getNativeID());
}
precursor_empty_id.setIdentifier(empty_protein_id.getIdentifier());
//precursor_empty_id.setCharge(z_p);
for (SignedSize& hash_it : hash_table[index])
{
Feature & feat = map[hash_it];
// (optionally) check charge state
if (!ignore_charge_)
{
if (std::abs(z_p) != std::abs(feat.getCharge())) continue;
}
DPosition<2> id_pos(rt_value, mz_p);
if (boxes[hash_it].encloses(id_pos)) // potential match
{
if (use_centroid_mz)
{
// only one m/z value to check, which was already incorporated
// into the overall bounding box -> success!
feat.getPeptideIdentifications().push_back(precursor_empty_id);
break; // "mz_it" loop
}
// else: check all the mass traces
bool found_match = false;
for (vector<ConvexHull2D>::iterator ch_it =
feat.getConvexHulls().begin(); ch_it !=
feat.getConvexHulls().end(); ++ch_it)
{
DBoundingBox<2> box = ch_it->getBoundingBox();
if (use_centroid_rt)
{
box.setMinX(feat.getRT());
box.setMaxX(feat.getRT());
}
increaseBoundingBox_(box);
if (box.encloses(id_pos)) // success!
{
feat.getPeptideIdentifications().push_back(precursor_empty_id);
++matching_features;
found_match = true;
break; // "ch_it" loop
}
}
if (found_match) break; // "mz_it" loop
}
}
if (matching_features == 0)
{
++spectrum_matches_none;
}
else if (matching_features == 1)
{
++spectrum_matches_single;
}
else
{
++spectrum_matches_multi;
}
}
}
// some statistics output
OPENMS_LOG_INFO << "Unassigned peptides: " << matches_none << "\n"
<< "Peptides assigned to exactly one feature: " << matches_single << "\n"
<< "Peptides assigned to multiple features: " << matches_multi << "\n";
OPENMS_LOG_INFO << "Unassigned and unidentified precursors: " << spectrum_matches_none << "\n"
<< "Unidentified precursor assigned to exactly one feature: " << spectrum_matches_single << "\n"
<< "Unidentified precursor assigned to multiple features: " << spectrum_matches_multi << "\n";
OPENMS_LOG_INFO << map.getAnnotationStatistics() << endl;
}
double IDMapper::getAbsoluteMZTolerance_(const double mz) const
{
if (measure_ == MEASURE_PPM)
{
return mz * mz_tolerance_ / 1e6;
}
else if (measure_ == MEASURE_DA)
{
return mz_tolerance_;
}
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "IDMapper::getAbsoluteTolerance_(): illegal internal state of measure_!", String(measure_));
}
bool IDMapper::isMatch_(const double rt_distance, const double mz_theoretical, const double mz_observed) const
{
if (measure_ == MEASURE_PPM)
{
return (fabs(rt_distance) <= rt_tolerance_) && (Math::getPPMAbs(mz_observed, mz_theoretical) <= mz_tolerance_);
}
else if (measure_ == MEASURE_DA)
{
return (fabs(rt_distance) <= rt_tolerance_) && (fabs(mz_theoretical - mz_observed) <= mz_tolerance_);
}
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "IDMapper::getAbsoluteTolerance_(): illegal internal state of measure_!", String(measure_));
}
void IDMapper::checkHits_(const PeptideIdentificationList& ids) const
{
for (Size i = 0; i < ids.size(); ++i)
{
if (!ids[i].hasRT())
{
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "IDMapper: 'RT' information missing for peptide identification!");
}
if (!ids[i].hasMZ())
{
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "IDMapper: 'MZ' information missing for peptide identification!");
}
}
}
void IDMapper::getIDDetails_(const PeptideIdentification& id, double& rt_pep, DoubleList& mz_values, IntList& charges, bool use_avg_mass) const
{
mz_values.clear();
charges.clear();
rt_pep = id.getRT();
// collect m/z values of pepId
if (param_.getValue("mz_reference") == "precursor") // use precursor m/z of pepId
{
mz_values.push_back(id.getMZ());
}
for (const PeptideHit& hit_it : id.getHits())
{
Int charge = hit_it.getCharge();
charges.push_back(charge);
if (param_.getValue("mz_reference") == "peptide") // use mass of each pepHit (assuming H+ adducts)
{
double mass = use_avg_mass ?
hit_it.getSequence().getAverageWeight(Residue::Full, charge) :
hit_it.getSequence().getMonoWeight(Residue::Full, charge);
mz_values.push_back(mass / (double) charge);
}
}
}
void IDMapper::increaseBoundingBox_(DBoundingBox<2>& box)
{
DPosition<2> sub_min(rt_tolerance_,
getAbsoluteMZTolerance_(box.minPosition().getY())),
add_max(rt_tolerance_, getAbsoluteMZTolerance_(box.maxPosition().getY()));
box.setMin(box.minPosition() - sub_min);
box.setMax(box.maxPosition() + add_max);
}
bool IDMapper::checkMassType_(const vector<DataProcessing>& processing) const
{
bool use_avg_mass = false;
String before;
for (const DataProcessing& proc_it : processing)
{
if (proc_it.getSoftware().getName() == "FeatureFinder")
{
String reported_mz = proc_it.getMetaValue("parameter: algorithm:feature:reported_mz");
if (reported_mz.empty())
continue; // parameter info not available
if (!before.empty() && (reported_mz != before))
{
OPENMS_LOG_WARN << "The m/z values reported for features in the input seem to be of different types (e.g. monoisotopic/average). They will all be compared against monoisotopic peptide masses, but the mapping results may not be meaningful in the end." << endl;
return false;
}
if (reported_mz == "average")
{
use_avg_mass = true;
}
else if (reported_mz == "maximum")
{
OPENMS_LOG_WARN << "For features, m/z values from the highest mass traces are reported. This type of m/z value is not available for peptides, so the comparison has to be done using average peptide masses." << endl;
use_avg_mass = true;
}
before = reported_mz;
}
}
return use_avg_mass;
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/ConsensusIDAlgorithmBest.cpp | .cpp | 925 | 32 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Sven Nahnsen, Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/ConsensusIDAlgorithmBest.h>
using namespace std;
namespace OpenMS
{
ConsensusIDAlgorithmBest::ConsensusIDAlgorithmBest()
{
setName("ConsensusIDAlgorithmBest"); // DefaultParamHandler
}
double ConsensusIDAlgorithmBest::getAggregateScore_(vector<double>& scores,
bool higher_better)
{
if (higher_better)
{
return *max_element(scores.begin(), scores.end());
}
return *min_element(scores.begin(), scores.end());
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/ConsensusMapMergerAlgorithm.cpp | .cpp | 17,624 | 410 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Julianus Pfeuffer $
// $Authors: Julianus Pfeuffer $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/ConsensusMapMergerAlgorithm.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/METADATA/PeptideIdentification.h>
#include <unordered_map>
using namespace std;
namespace OpenMS
{
ConsensusMapMergerAlgorithm::ConsensusMapMergerAlgorithm() :
ConsensusMapMergerAlgorithm::DefaultParamHandler("ConsensusMapMergerAlgorithm")
{
defaults_.setValue("annotate_origin",
"true",
"If true, adds a map_index MetaValue to the PeptideIDs to annotate the IDRun they came from.");
defaults_.setValidStrings("annotate_origin", {"true","false"});
defaultsToParam_();
}
//merge proteins across fractions and replicates
void ConsensusMapMergerAlgorithm::mergeProteinsAcrossFractionsAndReplicates(ConsensusMap& cmap, const ExperimentalDesign& exp_design) const
{
const vector<vector<pair<String, unsigned>>> toMerge = exp_design.getConditionToPathLabelVector();
// one of label-free, labeled_MS1, labeled_MS2
const String & experiment_type = cmap.getExperimentType();
//Not supported because an ID would need to reference multiple protID runs.
//we could replicate the ID in the future or allow multiple references.
bool labelfree = true;
if (experiment_type != "label-free")
{
OPENMS_LOG_WARN << "Merging untested for labelled experiments" << endl;
labelfree = false;
}
//out of the path/label combos, construct sets of map indices to be merged
unsigned lab(0);
map<unsigned, unsigned> map_idx_2_rep_batch{};
for (auto& consHeader : cmap.getColumnHeaders())
{
bool found = false;
if (consHeader.second.metaValueExists("channel_id"))
{
lab = static_cast<unsigned int>(consHeader.second.getMetaValue("channel_id")) + 1;
}
else
{
if (!labelfree)
{
OPENMS_LOG_WARN << "No channel id annotated in consensusXML. Assuming one channel." << endl;
}
lab = 1;
}
pair<String, unsigned> path_lab{consHeader.second.filename, lab};
unsigned repBatchIdx(0);
for (auto& repBatch : toMerge)
{
for (const std::pair<String, unsigned>& rep : repBatch)
{
if (path_lab == rep)
{
map_idx_2_rep_batch[consHeader.first] = repBatchIdx;
found = true;
break;
}
}
if (found) break;
repBatchIdx++;
}
if (!found)
{
throw Exception::MissingInformation(
__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"ConsensusHeader entry ("
+ consHeader.second.filename + ", "
+ consHeader.second.label + ") could not be matched"
+ " to the given experimental design.");
}
}
mergeProteinIDRuns(cmap, map_idx_2_rep_batch);
}
void ConsensusMapMergerAlgorithm::mergeProteinIDRuns(ConsensusMap &cmap,
map<unsigned, unsigned> const &mapIdx_to_new_protIDRun) const
{
// one of label-free, labeled_MS1, labeled_MS2
const String & experiment_type = cmap.getExperimentType();
// Not fully supported yet because an ID would need to reference multiple protID runs.
// we could replicate the ID in the future or allow multiple references.
if (experiment_type != "label-free")
{
OPENMS_LOG_WARN << "Merging untested for labelled experiments" << endl;
}
// Unfortunately we need a kind of bimap here.
// For the features we need oldMapIdx -> newIDRunIdx
// For the new runs we need newIDRunIdx -> <[file_origins], [mapIdcs]> once to initialize them with metadata
// TODO should we instead better try to collect the primaryMSRuns from the old Runs?
// TODO I just saw that in the columnHeaders might have the featureXMLs as origins but we should enforce that
// this will be changed to the mzML by all tools
// Therefore we somehow need to check consistency of ColumnHeaders and ProteinIdentification (file_origins).
map<unsigned, pair<set<String>,vector<Int>>> new_idcs;
for (const auto& new_idx : mapIdx_to_new_protIDRun)
{
const auto& new_idcs_insert_it = new_idcs.emplace(new_idx.second, make_pair(set<String>(), vector<Int>()));
new_idcs_insert_it.first->second.first.emplace(cmap.getColumnHeaders().at(new_idx.first).filename);
new_idcs_insert_it.first->second.second.emplace_back(static_cast<Int>(new_idx.first));
}
Size new_size = new_idcs.size();
if (new_size == 1)
{
OPENMS_LOG_WARN << "Number of new protein ID runs is one. Consider using mergeAllProteinRuns for some additional speed." << endl;
}
else if (new_size >= cmap.getColumnHeaders().size())
//This also holds for TMT etc. because map_index is a combination of file and label already.
// even if IDs from the same file are split and replicated, the resulting runs are never more
{
throw Exception::InvalidValue(
__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"Number of new protein runs after merging"
" is bigger or equal to the original ones."
" Aborting. Nothing would be merged.", String(new_size));
}
else
{
OPENMS_LOG_INFO << "Merging into " << new_size << " protein ID runs." << endl;
}
// Mapping from old run ID String to new runIDs indices, i.e. calculate from the file/label pairs (=ColumnHeaders),
// which ProteinIdentifications need to be merged.
map<String, set<Size>> run_id_to_new_run_idcs;
// this is to check how many old runs contribute to the new runs
// this can help save time and we can double check
vector<Size> nr_inputs_for_new_run_ids(new_size, 0);
for (const auto& newidx_to_originset_map_idx_pair : new_idcs)
{
for (auto& old_prot_id : cmap.getProteinIdentifications())
{
StringList primary_runs;
old_prot_id.getPrimaryMSRunPath(primary_runs);
set<String> current_content(primary_runs.begin(), primary_runs.end());
const set<String>& merge_request = newidx_to_originset_map_idx_pair.second.first;
// if this run is fully covered by a requested merged set, use it for it.
Size count = 1;
if (std::includes(merge_request.begin(), merge_request.end(), current_content.begin(), current_content.end()))
{
auto it = run_id_to_new_run_idcs.emplace(old_prot_id.getIdentifier(), set<Size>());
if (!it.second)
{
OPENMS_LOG_WARN << "Duplicate protein run ID found. Uniquifying it." << endl;
old_prot_id.setIdentifier(old_prot_id.getIdentifier() + "_" + count);
it = run_id_to_new_run_idcs.emplace(old_prot_id.getIdentifier(), set<Size>());
}
it.first->second.emplace(newidx_to_originset_map_idx_pair.first);
nr_inputs_for_new_run_ids.at(newidx_to_originset_map_idx_pair.first)++;
}
}
}
vector<ProteinIdentification> new_prot_ids{new_size};
//TODO preallocate with sum of proteins?
unordered_map<ProteinHit,set<Size>,hash_type,equal_type> proteins_collected_hits_runs(0, accessionHash_, accessionEqual_);
// we only need to store an offset if we append the primaryRunPaths
//(oldRunID, newRunIdx) -> newMergeIdxOffset
map<pair<String,Size>, Size> oldrunid_newrunidx_pair2newmergeidx_offset;
for (auto& runid2newrunidcs_pair : run_id_to_new_run_idcs)
{
// find old run
auto it = cmap.getProteinIdentifications().begin();
for (; it != cmap.getProteinIdentifications().end(); ++it)
{
if (it->getIdentifier() == runid2newrunidcs_pair.first)
break;
}
for (const auto& newrunid : runid2newrunidcs_pair.second)
{
// go through new runs and fill the proteins and update search settings
// if first time filling this new run:
//TODO safe to check for empty identifier?
if (new_prot_ids.at(newrunid).getIdentifier().empty())
{
//initialize new run
new_prot_ids[newrunid].setSearchEngine(it->getSearchEngine());
new_prot_ids[newrunid].setSearchEngineVersion(it->getSearchEngineVersion());
new_prot_ids[newrunid].setSearchParameters(it->getSearchParameters());
StringList toFill;
it->getPrimaryMSRunPath(toFill);
new_prot_ids[newrunid].setPrimaryMSRunPath(toFill);
new_prot_ids[newrunid].setIdentifier("condition" + String(newrunid));
oldrunid_newrunidx_pair2newmergeidx_offset.emplace(std::piecewise_construct,
std::forward_as_tuple(runid2newrunidcs_pair.first, newrunid),
std::forward_as_tuple(0));
}
// if not, merge settings or check consistency
else
{
//check consistency and add origins
it->peptideIDsMergeable(new_prot_ids[newrunid], experiment_type);
Size offset = new_prot_ids[newrunid].nrPrimaryMSRunPaths();
StringList toFill; it->getPrimaryMSRunPath(toFill); // new ones
new_prot_ids[newrunid].addPrimaryMSRunPath(toFill); //add to previous
oldrunid_newrunidx_pair2newmergeidx_offset.emplace(std::piecewise_construct,
std::forward_as_tuple(runid2newrunidcs_pair.first, newrunid),
std::forward_as_tuple(offset));
}
}
//Insert hits into collection with empty set (if not present yet) and
// add destination run indices
for (auto& hit : it->getHits())
{
const auto& foundIt = proteins_collected_hits_runs.emplace(std::move(hit), set<Size>());
foundIt.first->second.insert(runid2newrunidcs_pair.second.begin(), runid2newrunidcs_pair.second.end());
}
it->getHits().clear(); //not needed anymore and moved anyway
}
// copy the protein hits into the destination runs
for (const auto& protToNewRuns : proteins_collected_hits_runs)
{
for (Size runID : protToNewRuns.second)
{
new_prot_ids.at(runID).getHits().emplace_back(protToNewRuns.first);
}
}
//Now update the references in the PeptideHits
//TODO double check the PrimaryRunPaths with the initial requested merge
function<void(PeptideIdentification&)> fun = [&run_id_to_new_run_idcs, &oldrunid_newrunidx_pair2newmergeidx_offset, &new_prot_ids](PeptideIdentification& pid)
{
const set<Size>& runs_to_put = run_id_to_new_run_idcs.at(pid.getIdentifier());
//TODO check that in the beginning until we support it
if (runs_to_put.size() > 1)
{
OPENMS_LOG_WARN << "Warning: Merging parts of IDRuns currently untested. If it is not a TMT/iTraq sample,"
"something is wrong anyway.";
// in this case you would need to copy the PeptideID
// should only happen in TMT/itraq
}
Size old_merge_idx = 0;
//TODO we could lookup the old protein ID and see if there were multiple MSruns. If so, we should fail if not
// exist
if (pid.metaValueExists(Constants::UserParam::ID_MERGE_INDEX))
{
old_merge_idx = pid.getMetaValue(Constants::UserParam::ID_MERGE_INDEX);
}
for (const auto& run_to_put : runs_to_put)
{
const ProteinIdentification& new_prot_id_run = new_prot_ids[run_to_put];
pid.setIdentifier(new_prot_id_run.getIdentifier());
pid.setMetaValue(Constants::UserParam::ID_MERGE_INDEX,
old_merge_idx + oldrunid_newrunidx_pair2newmergeidx_offset[{pid.getIdentifier(), run_to_put}]);
}
};
cmap.applyFunctionOnPeptideIDs(fun);
cmap.setProteinIdentifications(std::move(new_prot_ids));
}
//merge proteins across fractions and replicates
void ConsensusMapMergerAlgorithm::mergeAllIDRuns(ConsensusMap& cmap) const
{
if (cmap.getProteinIdentifications().size() == 1)
return;
// Everything needs to agree
checkOldRunConsistency_(cmap.getProteinIdentifications(), cmap.getExperimentType());
ProteinIdentification new_prot_id_run;
//TODO create better ID
new_prot_id_run.setIdentifier("merged");
//TODO merge SearchParams e.g. in case of SILAC
new_prot_id_run.setSearchEngine(cmap.getProteinIdentifications()[0].getSearchEngine());
new_prot_id_run.setSearchEngineVersion(cmap.getProteinIdentifications()[0].getSearchEngineVersion());
new_prot_id_run.setSearchParameters(cmap.getProteinIdentifications()[0].getSearchParameters());
String old_inference_engine = cmap.getProteinIdentifications()[0].getInferenceEngine();
if (!old_inference_engine.empty())
{
OPENMS_LOG_WARN << "Inference was already performed on the runs in this ConsensusXML."
" Merging their proteins, will invalidate correctness of the inference."
" You should redo it.\n";
// deliberately do not take over old inference settings.
}
//we do it based on the IDRuns since ID Runs maybe different from quantification in e.g. TMT
vector<String> merged_origin_files{};
map<String,pair<Size,bool>> oldrunid2offset_multi_pair;
for (const auto& pid : cmap.getProteinIdentifications())
{
vector<String> out;
pid.getPrimaryMSRunPath(out);
Size offset = merged_origin_files.size();
merged_origin_files.insert(merged_origin_files.end(), out.begin(), out.end());
oldrunid2offset_multi_pair.emplace(std::piecewise_construct,
std::forward_as_tuple(pid.getIdentifier()),
std::forward_as_tuple(offset, out.size() > 1));
}
new_prot_id_run.setPrimaryMSRunPath(merged_origin_files);
unordered_set<ProteinHit,hash_type,equal_type> proteins_collected_hits(0, accessionHash_, accessionEqual_);
std::vector<ProteinIdentification>& old_prot_runs = cmap.getProteinIdentifications();
typedef std::vector<ProteinHit>::iterator iter_t;
for (auto& prot_run : old_prot_runs)
{
auto& hits = prot_run.getHits();
proteins_collected_hits.insert(
std::move_iterator<iter_t>(hits.begin()),
std::move_iterator<iter_t>(hits.end())
);
hits.clear();
}
std::map<String, Size> run_id_to_run_idx;
for (Size old_prot_run_idx = 0; old_prot_run_idx < old_prot_runs.size(); ++old_prot_run_idx)
{
ProteinIdentification& protIDRun = old_prot_runs[old_prot_run_idx];
run_id_to_run_idx[protIDRun.getIdentifier()] = old_prot_run_idx;
}
const String& new_prot_id_run_string = new_prot_id_run.getIdentifier();
function<void(PeptideIdentification &)> fun =
[&new_prot_id_run_string, &oldrunid2offset_multi_pair](PeptideIdentification& pid) -> void
{
const auto& p = oldrunid2offset_multi_pair[pid.getIdentifier()];
pid.setIdentifier(new_prot_id_run_string);
Size old = 0;
if (pid.metaValueExists(Constants::UserParam::ID_MERGE_INDEX))
{
old = pid.getMetaValue(Constants::UserParam::ID_MERGE_INDEX);
}
else
{
if (p.second)
{
throw Exception::MissingInformation(
__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"No id_merge_index value in a merged ID run."); //TODO add more info about where.
}
}
pid.setMetaValue(Constants::UserParam::ID_MERGE_INDEX, old + p.first);
};
cmap.applyFunctionOnPeptideIDs(fun);
auto& hits = new_prot_id_run.getHits();
for (auto& prot : proteins_collected_hits)
{
hits.emplace_back(std::move(const_cast<ProteinHit&>(prot))); //careful this completely invalidates the set
}
proteins_collected_hits.clear();
cmap.getProteinIdentifications().resize(1);
swap(cmap.getProteinIdentifications()[0], new_prot_id_run);
//TODO remove unreferenced proteins? Can this happen when merging all? I think not.
}
bool ConsensusMapMergerAlgorithm::checkOldRunConsistency_(const vector<ProteinIdentification>& protRuns, const String& experiment_type) const
{
return checkOldRunConsistency_(protRuns, protRuns[0], experiment_type);
}
//TODO refactor the next two functions
bool ConsensusMapMergerAlgorithm::checkOldRunConsistency_(const vector<ProteinIdentification>& protRuns, const ProteinIdentification& ref, const String& experiment_type) const
{
bool ok = true;
for (const auto& idRun : protRuns)
{
// collect warnings and throw at the end if at least one failed
ok = ok && ref.peptideIDsMergeable(idRun, experiment_type);
}
if (!ok /*&& TODO and no force flag*/)
{
throw Exception::MissingInformation(__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"Search settings are not matching across IdentificationRuns. "
"See warnings. Aborting..");
}
return ok;
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/IDRipper.cpp | .cpp | 18,070 | 457 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg$
// $Authors: Immanuel Luhn, Leon Kuchenbecker$
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/IDRipper.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <QtCore/QDir>
#include <array>
#include <unordered_set>
using namespace std;
namespace OpenMS
{
const std::array<std::string, IDRipper::SIZE_OF_ORIGIN_ANNOTATION_FORMAT> IDRipper::names_of_OriginAnnotationFormat = {"file_origin", "map_index", Constants::UserParam::ID_MERGE_INDEX, "unknown"};
IDRipper::IDRipper() :
DefaultParamHandler("IDRipper")
{
}
IDRipper::IDRipper(const IDRipper& cp) = default;
IDRipper::~IDRipper() = default;
IDRipper& IDRipper::operator=(const IDRipper& rhs) = default;
IDRipper::IdentificationRuns::IdentificationRuns(const vector<ProteinIdentification>& prot_ids)
{
// build index_ map that maps the identifiers in prot_ids to indices 0,1,...
for (const auto& prot_id : prot_ids)
{
String id_run_id = prot_id.getIdentifier();
if (this->index_map.find(id_run_id) != this->index_map.end())
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "IdentificationRun IDs are not unique!", id_run_id);
}
UInt idx = this->index_map.size();
this->index_map[id_run_id] = idx;
const DataValue& mv_spectra_data = prot_id.getMetaValue("spectra_data");
spectra_data.push_back(mv_spectra_data.isEmpty() ? StringList() : mv_spectra_data.toStringList());
}
}
bool IDRipper::RipFileIdentifierIdxComparator::operator()(const RipFileIdentifier& left, const RipFileIdentifier& right) const
{
return std::tie(left.ident_run_idx, left.file_origin_idx)
< std::tie(right.ident_run_idx, right.file_origin_idx);
}
// Identify the output file name features associated via spectra_data or file_origin
IDRipper::RipFileIdentifier::RipFileIdentifier(const IDRipper::IdentificationRuns& id_runs, const PeptideIdentification& pep_id, const map<String, UInt>& file_origin_map, const IDRipper::OriginAnnotationFormat origin_annotation_fmt, bool split_ident_runs)
{
try
{
// Numerical identifier of the Identification Run
this->ident_run_idx = id_runs.index_map.at(pep_id.getIdentifier());
// Numerical identifier of the PeptideIdentification origin
this->file_origin_idx = (origin_annotation_fmt == MAP_INDEX || origin_annotation_fmt == ID_MERGE_INDEX)
? pep_id.getMetaValue(names_of_OriginAnnotationFormat[origin_annotation_fmt]).toString().toInt()
: file_origin_map.at(pep_id.getMetaValue("file_origin").toString());
// Store the origin full name
this->origin_fullname = (origin_annotation_fmt == MAP_INDEX || origin_annotation_fmt == ID_MERGE_INDEX)
? id_runs.spectra_data.at(this->ident_run_idx).at(this->file_origin_idx)
: pep_id.getMetaValue("file_origin").toString();
// Extract the basename, used for output files when --numeric_filenames is not set
this->out_basename = QFileInfo(this->origin_fullname.toQString()).completeBaseName().toStdString();
// Drop the identification run identifier if we're not splitting by identification runs
if (!split_ident_runs)
this->ident_run_idx = -1u;
}
catch (const std::out_of_range& e)
{
throw Exception::ParseError(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "input file",
"Failed to identify corresponding spectra_data element for PeptideIdentification element.");
}
}
UInt IDRipper::RipFileIdentifier::getIdentRunIdx() const
{
return ident_run_idx;
}
UInt IDRipper::RipFileIdentifier::getFileOriginIdx() const
{
return file_origin_idx;
}
const String & IDRipper::RipFileIdentifier::getOriginFullname() const
{
return origin_fullname;
}
const String & IDRipper::RipFileIdentifier::getOutputBasename() const
{
return out_basename;
}
const std::vector<ProteinIdentification> & IDRipper::RipFileContent::getProteinIdentifications()
{
return prot_idents;
}
const PeptideIdentificationList & IDRipper::RipFileContent::getPeptideIdentifications()
{
return pep_idents;
}
bool IDRipper::registerBasename_(map<String, pair<UInt, UInt> >& basename_to_numeric, const IDRipper::RipFileIdentifier& rfi)
{
auto it = basename_to_numeric.find(rfi.out_basename);
auto p = make_pair(rfi.ident_run_idx, rfi.file_origin_idx);
// If we have not seen this basename before, store it in the map
if (it == basename_to_numeric.end())
{
basename_to_numeric[rfi.out_basename] = p;
return true;
}
// Otherwise, check if we save it in the context of the same IdentificationRun and potentially spectra_data position
return it->second == p;
}
void IDRipper::rip(
RipFileMap& ripped,
vector<ProteinIdentification>& proteins,
PeptideIdentificationList& peptides,
bool numeric_filenames,
bool split_ident_runs)
{
// Detect file format w.r.t. origin annotation
map<String, UInt> file_origin_map;
IDRipper::OriginAnnotationFormat origin_annotation_fmt = detectOriginAnnotationFormat_(file_origin_map, peptides);
if (origin_annotation_fmt == UNKNOWN_OAF)
{
throw Exception::ParseError(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "input file",
"Unable to detect origin annotation format of provided input file.");
}
OPENMS_LOG_DEBUG << "Detected file origin annotation format: " << names_of_OriginAnnotationFormat[origin_annotation_fmt] << std::endl;
// Build identifier index
const IdentificationRuns id_runs = IdentificationRuns(proteins);
// Collect a unique set of representative protein hits. One per accession. Looks at all runs and removes the file origin
unordered_map<String, const ProteinHit*> acc2protein_hits;
for (ProteinIdentification& prot : proteins)
{
prot.removeMetaValue(names_of_OriginAnnotationFormat[origin_annotation_fmt]);
const vector<ProteinHit>& protein_hits = prot.getHits();
for (const auto& ph : protein_hits)
{
acc2protein_hits[ph.getAccession()] = &ph;
}
}
size_t protein_identifier_not_found{};
map<String, pair<UInt, UInt> > basename_to_numeric;
// map run identifier to protein accessions that were already added
map<IDRipper::RipFileIdentifier, unordered_map<String, unordered_set<String>>, RipFileIdentifierIdxComparator> ripped_prot_map;
//store protein and peptides identifications for each file origin
for (PeptideIdentification& pep : peptides)
{
// Build the output file identifier
const IDRipper::RipFileIdentifier rfi(id_runs, pep, file_origin_map, origin_annotation_fmt, split_ident_runs);
// If we are inferring the output file names from the spectra_data or
// file_origin, make sure they are unique
if (!numeric_filenames && !registerBasename_(basename_to_numeric, rfi))
{
throw Exception::Precondition(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Autodetected output file names are not unique. Use -numeric_filenames.");
}
// remove file origin annotation
pep.removeMetaValue(names_of_OriginAnnotationFormat[origin_annotation_fmt]);
// get peptide hits (PSMs) for each peptide identification (spectrum)
const vector<PeptideHit>& peptide_hits = pep.getHits();
if (peptide_hits.empty())
{
continue;
}
// collect all protein accessions that are stored in the peptide hits
set<String> protein_accessions = getProteinAccessions_(peptide_hits);
if (protein_accessions.empty())
{
OPENMS_LOG_WARN << "Peptide hits with empty protein accession." << std::endl;
continue;
}
// returns all protein hits that are associated with the accessions of current peptide hits
vector<ProteinHit> proteins_of_accessions;
getProteinHits_(proteins_of_accessions, acc2protein_hits, protein_accessions);
if (proteins_of_accessions.empty())
{
OPENMS_LOG_WARN << "No proteins found for given accessions." << std::endl;
continue;
}
// search for the protein identification of the peptide identification
int prot_ident_index = getProteinIdentification_(pep, id_runs);
if (prot_ident_index == -1)
{
++protein_identifier_not_found;
OPENMS_LOG_WARN << "Run identifier: " << pep.getIdentifier() << " was not found in protein identification runs." << std::endl;
continue;
}
const ProteinIdentification& merged_protein_id_run = proteins[prot_ident_index]; // protein identification run in the merged file
const String& merged_prot_identifier = merged_protein_id_run.getIdentifier(); // protein identification run identifier in merged file
if (RipFileMap::iterator it = ripped.find(rfi); it == ripped.end())
{ // file identifier does not exist yet. We need to create it.
OPENMS_LOG_INFO << "Creating entry for file identifier:\n"
<< "File origin: " << rfi.getOriginFullname() << "\n"
// << "Identification run index: " << rfi.getIdentRunIdx() << "\n" // not set here?
<< "Basename: " << rfi.getOutputBasename() << "\n"
<< "Merged identification file run identifier: " << merged_prot_identifier << "\n"
<< std::endl;
// create the protein run but only set the protein hits that are needed for the current peptide identification
ProteinIdentification p;
p.copyMetaDataOnly(merged_protein_id_run);
p.setHits(proteins_of_accessions); // TODO: what about protein groups?
for (const ProteinHit& prot : proteins_of_accessions)
{ // register protein so we don't add it twice
const String& acc = prot.getAccession();
ripped_prot_map[rfi][merged_prot_identifier].insert(acc);
}
vector<ProteinIdentification> protein_idents;
protein_idents.push_back(std::move(p));
//create new peptide identification
PeptideIdentificationList peptide_idents;
peptide_idents.push_back(pep);
//create and insert new map entry
ripped.insert(make_pair(rfi, RipFileContent(protein_idents, peptide_idents)));
}
else
{ // if file identifier already exists we attach
// query all protein identification runs for file identifier in the Ripped data structure
vector<ProteinIdentification>& ripped_protein_id_runs = it->second.prot_idents;
bool ripped_protein_identifier_exists{false};
for (auto& ripped_protein_id_run : ripped_protein_id_runs)
{ // for all protein identification runs associated with the current file identifier...
const String& ripped_prot_identifier = ripped_protein_id_run.getIdentifier();
if (merged_prot_identifier == ripped_prot_identifier)
{ // protein identification run already exists in ripped map. just add protein hits if not already present
for (const ProteinHit& prot : proteins_of_accessions)
{
// check if protein has already been added
const String& acc = prot.getAccession();
auto& acc_set = ripped_prot_map[rfi][merged_prot_identifier];
if (auto ri = acc_set.find(acc); ri == acc_set.end())
{ // only add protein once to the run identifier
ripped_protein_id_run.insertHit(prot);
acc_set.insert(acc);
#ifdef DEBUG_IDRIPPER
std::cout << "ripped/merged identifier: " << ripped_prot_identifier << " " << prot << std::endl;
#endif
}
}
ripped_protein_identifier_exists = true;
break;
}
}
// file identifier exists but not the protein identification run identifier - we did not add anything so far to it
if (!ripped_protein_identifier_exists)
{
ProteinIdentification p;
p.copyMetaDataOnly(merged_protein_id_run);
for (const ProteinHit& prot : proteins_of_accessions)
{
// check if protein has already been added
const String& acc = prot.getAccession();
auto& acc_set = ripped_prot_map[rfi][merged_prot_identifier];
if (auto ri = acc_set.find(acc); ri == acc_set.end())
{ // only add protein once to the run identifier
p.insertHit(prot);;
acc_set.insert(acc);
#ifdef DEBUG_IDRIPPER
std::cout << "ripped/merged identifier: " << ripped_prot_identifier << " " << prot << std::endl;
#endif
}
}
ripped_protein_id_runs.push_back(std::move(p));
}
// add current peptide identification
PeptideIdentificationList& ripped_pep = it->second.pep_idents;
ripped_pep.push_back(pep);
}
}
// Reduce the spectra data string list if that's what we ripped by
if (origin_annotation_fmt == MAP_INDEX || origin_annotation_fmt == ID_MERGE_INDEX)
{
RipFileMap::iterator it;
for (it = ripped.begin(); it != ripped.end(); ++it)
{
const RipFileIdentifier& rfi = it->first;
RipFileContent& rfc = it->second;
for (ProteinIdentification& prot_id : rfc.prot_idents)
{
StringList old_list;
prot_id.getPrimaryMSRunPath(old_list);
StringList new_list;
new_list.push_back(rfi.origin_fullname);
prot_id.setPrimaryMSRunPath(new_list);
}
}
}
if (protein_identifier_not_found > 0)
{
OPENMS_LOG_ERROR << "Some protein identification runs referenced in peptide identifications were not found." << std::endl;
}
}
void IDRipper::rip(
std::vector<RipFileIdentifier> & rfis,
std::vector<RipFileContent> & rfcs,
std::vector<ProteinIdentification> & proteins,
PeptideIdentificationList & peptides,
bool numeric_filenames,
bool split_ident_runs)
{
RipFileMap rfm;
this->rip(rfm, proteins, peptides, numeric_filenames, split_ident_runs);
rfis.clear();
rfcs.clear();
for (RipFileMap::iterator it = rfm.begin(); it != rfm.end(); ++it)
{
rfis.push_back(it->first);
rfcs.push_back(it->second);
}
}
bool IDRipper::setOriginAnnotationMode_(short& mode, short const new_value)
{
if (mode != -1 && mode != new_value)
{
return false;
}
mode = new_value;
return true;
}
IDRipper::OriginAnnotationFormat IDRipper::detectOriginAnnotationFormat_(map<String, UInt>& file_origin_map, const PeptideIdentificationList& peptide_idents)
{
// In case we observe 'file_origin' meta values, we assign an index to every unique meta value
file_origin_map.clear();
short mode = -1;
for (PeptideIdentificationList::const_iterator it = peptide_idents.begin(); it != peptide_idents.end(); ++it)
{
bool mode_identified = false;
for (size_t i = 0; i < SIZE_OF_ORIGIN_ANNOTATION_FORMAT; ++i)
{
if (it->metaValueExists(names_of_OriginAnnotationFormat[i]))
{
// Different mode identified for same or different peptide
if (mode_identified || !setOriginAnnotationMode_(mode, i))
{
return UNKNOWN_OAF;
}
else
{
mode_identified = true;
}
if (i == 0) // names_of_OriginAnnotationFormat[0] == "file_origin"
{
const String& file_origin = it->getMetaValue("file_origin");
// Did we already assign an index to this file_origin?
if (file_origin_map.find(file_origin) == file_origin_map.end())
{
// If not, assign a new unique index
size_t cur_size = file_origin_map.size();
file_origin_map[file_origin] = cur_size;
}
}
}
}
if (!mode_identified)
{
return UNKNOWN_OAF;
}
}
if (mode == -1)
{
return UNKNOWN_OAF;
}
else
{
return static_cast<IDRipper::OriginAnnotationFormat>(mode);
}
}
void IDRipper::getProteinHits_(vector<ProteinHit>& result, const unordered_map<String, const ProteinHit*>& acc2protein_hits, const set<String>& protein_accessions)
{
for (const String& s : protein_accessions)
{
if (auto it = acc2protein_hits.find(s); it != acc2protein_hits.end())
{
const ProteinHit* prot_ptr = it->second;
result.push_back(*prot_ptr);
}
}
}
std::set<String> IDRipper::getProteinAccessions_(const vector<PeptideHit>& peptide_hits)
{
std::set<String> accession_set;
for (const PeptideHit& it : peptide_hits)
{
std::set<String> protein_accessions = it.extractProteinAccessionsSet();
accession_set.insert(make_move_iterator(protein_accessions.begin()), make_move_iterator(protein_accessions.end()));
}
return accession_set;
}
int IDRipper::getProteinIdentification_(const PeptideIdentification& pep_ident, const IdentificationRuns& id_runs)
{
const String& identifier = pep_ident.getIdentifier();
if (auto it = id_runs.index_map.find(identifier); it != id_runs.index_map.end())
{
return it->second;
}
return -1;
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/ConsensusIDAlgorithmWorst.cpp | .cpp | 931 | 32 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Sven Nahnsen, Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/ConsensusIDAlgorithmWorst.h>
using namespace std;
namespace OpenMS
{
ConsensusIDAlgorithmWorst::ConsensusIDAlgorithmWorst()
{
setName("ConsensusIDAlgorithmWorst"); // DefaultParamHandler
}
double ConsensusIDAlgorithmWorst::getAggregateScore_(vector<double>& scores,
bool higher_better)
{
if (higher_better)
{
return *min_element(scores.begin(), scores.end());
}
return *max_element(scores.begin(), scores.end());
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/IDMergerAlgorithm.cpp | .cpp | 15,127 | 418 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Julianus Pfeuffer $
// $Authors: Julianus Pfeuffer $
// --------------------------------------------------------------------------
#include "OpenMS/CONCEPT/LogStream.h"
#include "OpenMS/METADATA/ProteinIdentification.h"
#include <OpenMS/ANALYSIS/ID/IDMergerAlgorithm.h>
#include <OpenMS/METADATA/PeptideIdentification.h>
#include <algorithm>
#include <array>
using namespace std;
namespace OpenMS
{
//TODO parameterize so it only adds/keeps best per peptide, peptide charge, modified peptide
// How? Maybe keep a map here about the best scores and lookup before adding and update and insert only if better
// proteins of this peptide could be skipped (if we assume same database as we do currently, it has to be there already)
IDMergerAlgorithm::IDMergerAlgorithm(const String& runIdentifier, bool addTimeStampToID) :
IDMergerAlgorithm::DefaultParamHandler("IDMergerAlgorithm"),
prot_result_(),
pep_result_(),
collected_protein_hits_(0, accessionHash_, accessionEqual_),
id_(runIdentifier),
fixed_identifier_(!addTimeStampToID)
{
defaults_.setValue("annotate_origin",
"true",
"If true, adds a map_index MetaValue to the PeptideIDs to annotate the IDRun they came from.");
defaults_.setValidStrings("annotate_origin", {"true","false"});
defaults_.setValue("allow_disagreeing_settings",
"false",
"Force merging of disagreeing runs. Use at your own risk.");
defaults_.setValidStrings("allow_disagreeing_settings", {"true","false"});
defaultsToParam_();
prot_result_.setIdentifier(getNewIdentifier_(addTimeStampToID));
}
//TODO overload to accept a set of specific runIDs only
void IDMergerAlgorithm::insertRuns(
std::vector<ProteinIdentification>&& prots,
PeptideIdentificationList&& peps
)
{
if (prots.empty())
{
OPENMS_LOG_WARN << "No ProteinIdentification(Runs) given. Skipping.";
return;
}
//TODO instead of only checking consistency, merge if possible (especially for SILAC mods)
if (!filled_)
{
if (prots.size() > 1)
{
//Without any exp. design we assume label-free for checking mods
checkOldRunConsistency_(prots, "label-free");
}
copySearchParams_(prots[0], prot_result_);
filled_ = true;
}
else
{
//Without any exp. design we assume label-free for checking mods
checkOldRunConsistency_(prots, this->prot_result_, "label-free");
}
// move proteins and move peps
movePepIDsAndRefProteinsToResultFaster_(std::move(peps), std::move(prots));
}
void IDMergerAlgorithm::insertRuns(
const std::vector<ProteinIdentification>& prots,
const PeptideIdentificationList& peps
)
{
if (prots.empty())
{
OPENMS_LOG_WARN << "No ProteinIdentification(Runs) given. Skipping.";
return;
}
//copy
std::vector<ProteinIdentification> pr = prots;
PeptideIdentificationList pep = peps;
//TODO instead of only checking consistency, merge if possible (especially for SILAC mods)
if (!filled_)
{
if (prots.size() > 1)
{
//Without any exp. design we assume label-free for checking mods
checkOldRunConsistency_(prots, "label-free");
}
copySearchParams_(prots[0], prot_result_);
filled_ = true;
}
else
{
//Without any exp. design we assume label-free for checking mods
checkOldRunConsistency_(prots, this->prot_result_, "label-free");
}
movePepIDsAndRefProteinsToResultFaster_(std::move(pep), std::move(pr));
}
void IDMergerAlgorithm::returnResultsAndClear(
ProteinIdentification& prots,
PeptideIdentificationList& peps)
{
// convert the map from file origin to idx into
// a vector
StringList newOrigins(file_origin_to_idx_.size());
for (auto& entry : file_origin_to_idx_)
{
newOrigins[entry.second] = entry.first;
}
// currently setPrimaryMSRunPath does not support move (const ref)
prot_result_.setPrimaryMSRunPath(newOrigins);
std::swap(prots, prot_result_);
std::swap(peps, pep_result_);
//reset so the new this class is reuseable
prot_result_ = ProteinIdentification{};
prot_result_.setIdentifier(getNewIdentifier_(!fixed_identifier_));
//clear, if user gave non-empty vector
pep_result_.clear();
//reset internals
file_origin_to_idx_.clear();
// Safely move hits out using node handles (empties the set)
while (!collected_protein_hits_.empty())
{
auto nh = collected_protein_hits_.extract(collected_protein_hits_.begin());
prots.getHits().push_back(std::move(nh.value()));
}
filled_ = false;
}
String IDMergerAlgorithm::getNewIdentifier_(bool addTimeStampToID) const
{
if (!addTimeStampToID) return id_;
std::array<char, 64> buffer;
buffer.fill(0);
time_t rawtime;
time(&rawtime);
const auto timeinfo = localtime(&rawtime);
strftime(buffer.data(), sizeof(buffer), "%d-%m-%Y %H-%M-%S", timeinfo);
return id_ + String(buffer.data());
}
void IDMergerAlgorithm::insertProteinIDs_(
vector<ProteinIdentification>&& old_protRuns
)
{
typedef std::vector<ProteinHit>::iterator iter_t;
for (auto& protRun : old_protRuns) //TODO check run ID when option is added
{
auto& hits = protRun.getHits();
collected_protein_hits_.insert(
std::move_iterator<iter_t>(hits.begin()),
std::move_iterator<iter_t>(hits.end())
);
hits.clear();
}
}
void IDMergerAlgorithm::updateAndMovePepIDs_(
PeptideIdentificationList&& pepIDs,
const map<String, Size>& runID_to_runIdx,
const vector<StringList>& originFiles,
bool annotate_origin)
{
//TODO if we allow run IDs, we should do a remove_if,
// then use the iterator to update and move
// the IDs, then erase them so we don't encounter them in
// subsequent calls of this function
for (auto &pid : pepIDs)
{
const String &runID = pid.getIdentifier();
const auto& runIdxIt = runID_to_runIdx.find(runID);
if (runIdxIt == runID_to_runIdx.end())
{
//This is an easy way to just merge peptides from a certain run
continue;
/*
throw Exception::MissingInformation(
__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"Old IdentificationRun not found for PeptideIdentification "
"(" + String(pid.getMZ()) + ", " + String(pid.getRT()) + ").");
*/
}
bool annotated = pid.metaValueExists(Constants::UserParam::ID_MERGE_INDEX);
if (annotate_origin || annotated)
{
Size oldFileIdx(0);
const StringList& origins = originFiles[runIdxIt->second];
if (annotated)
{
oldFileIdx = pid.getMetaValue(Constants::UserParam::ID_MERGE_INDEX);
}
else if (origins.size() > 1)
{
// If there is more than one possible file it might be from
// and it is not annotated -> fail
throw Exception::MissingInformation(
__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"Trying to annotate new id_merge_index for PeptideIdentification "
"(" + String(pid.getMZ()) + ", " + String(pid.getRT()) + ") but"
"no old id_merge_index present");
}
if (oldFileIdx >= origins.size())
{
throw Exception::MissingInformation(
__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"Trying to annotate new id_merge_index for PeptideIdentification "
"(" + String(pid.getMZ()) + ", " + String(pid.getRT()) + ") but"
" the index exceeds the number of files in the run.");
}
pid.setMetaValue(Constants::UserParam::ID_MERGE_INDEX, file_origin_to_idx_[origins[oldFileIdx]]);
}
pid.setIdentifier(prot_result_.getIdentifier());
//move peptides into right vector
pep_result_.emplace_back(std::move(pid));
}
}
/// this merges without checking the existence of a parent protein for the PeptideHits
/// therefore it can merge peptides and proteins separately and a bit faster.
void IDMergerAlgorithm::movePepIDsAndRefProteinsToResultFaster_(
PeptideIdentificationList&& pepIDs,
vector<ProteinIdentification>&& old_protRuns
)
{
bool annotate_origin(param_.getValue("annotate_origin").toBool());
vector<StringList> originFiles{};
//TODO here check run ID if we allow this option
for (const auto& protRun : old_protRuns)
{
StringList toFill{};
protRun.getPrimaryMSRunPath(toFill);
if (toFill.empty() && annotate_origin)
{
throw Exception::MissingInformation(
__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"Annotation of origin requested during merge, but no origin present in run "
+ protRun.getIdentifier() + ".");
}
//TODO this will make multiple runs from the same file appear multiple times.
// should be ok but check all possibilities at some point
originFiles.push_back(toFill);
for (String& f : toFill)
{
file_origin_to_idx_.emplace(std::move(f), file_origin_to_idx_.size());
}
toFill.clear();
}
std::map<String, Size> runIDToRunIdx;
for (Size oldProtRunIdx = 0; oldProtRunIdx < old_protRuns.size(); ++oldProtRunIdx)
{
ProteinIdentification &protIDRun = old_protRuns[oldProtRunIdx];
runIDToRunIdx[protIDRun.getIdentifier()] = oldProtRunIdx;
}
updateAndMovePepIDs_(std::move(pepIDs), runIDToRunIdx, originFiles, annotate_origin);
insertProteinIDs_(std::move(old_protRuns));
pepIDs.clear();
old_protRuns.clear();
}
/* Old version. Quite slower but only copies actually referenced proteins
void IDMergerAlgorithm::movePepIDsAndRefProteinsToResult_(
PeptideIdentificationList&& pepIDs,
vector<ProteinIdentification>&& oldProtRuns
)
{
bool annotate_origin(param_.getValue("annotate_origin").toBool());
vector<StringList> originFiles{};
//TODO here check run ID if we allow this option
for (const auto& protRun : oldProtRuns)
{
StringList toFill{};
protRun.getPrimaryMSRunPath(toFill);
if (toFill.empty() && annotate_origin)
{
throw Exception::MissingInformation(
__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"Annotation of origin requested during merge, but no origin present in run "
+ protRun.getIdentifier() + ".");
}
//TODO this will make multiple runs from the same file appear multiple times.
// should be ok but check all possibilities at some point
originFiles.push_back(toFill);
for (String& f : toFill)
{
file_origin_to_idx_.emplace(std::move(f), file_origin_to_idx_.size());
}
toFill.clear();
}
for (auto &pid : pepIDs)
{
const String &runID = pid.getIdentifier();
//TODO maybe create lookup table in the beginning runIDToRunRef
Size oldProtRunIdx = 0;
for (; oldProtRunIdx < oldProtRuns.size(); ++oldProtRunIdx)
{
ProteinIdentification &protIDRun = oldProtRuns[oldProtRunIdx];
if (protIDRun.getIdentifier() == runID)
{
break;
}
}
if (oldProtRunIdx == oldProtRuns.size())
{
throw Exception::MissingInformation(
__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"Old IdentificationRun not found for PeptideIdentification "
"(" + String(pid.getMZ()) + ", " + String(pid.getRT()) + ").");
}
bool annotated = pid.metaValueExists(Constants::UserParam::ID_MERGE_INDEX);
if (annotate_origin || annotated)
{
Size oldFileIdx(0);
if (annotated)
{
oldFileIdx = pid.getMetaValue(Constants::UserParam::ID_MERGE_INDEX);
}
// If there is more than one possible file it might be from
// and it is not annotated -> fail
else if (originFiles[oldProtRunIdx].size() > 1)
{
throw Exception::MissingInformation(
__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"Trying to annotate new id_merge_index for PeptideIdentification "
"(" + String(pid.getMZ()) + ", " + String(pid.getRT()) + ") but"
"no old id_merge_index present");
}
pid.setMetaValue(Constants::UserParam::ID_MERGE_INDEX, file_origin_to_idx_[originFiles[oldProtRunIdx].at(oldFileIdx)]);
}
pid.setIdentifier(prot_result_.getIdentifier());
for (auto &phit : pid.getHits())
{
for (auto &acc : phit.extractProteinAccessionsSet())
{
const auto &it = proteinsCollected.emplace(acc);
if (it.second) // was newly inserted
{
//TODO this linear findHit is not so nice: Maybe we can just insert all proteins into a
// unordered_set member
prot_result_.getHits().emplace_back(std::move(*oldProtRuns[oldProtRunIdx].findHit(acc)));
} // else it was there already
}
}
//move peptides into right vector
pep_result_.emplace_back(std::move(pid));
}
pepIDs.clear();
oldProtRuns.clear();
}*/
void IDMergerAlgorithm::copySearchParams_(const ProteinIdentification& from, ProteinIdentification& to)
{
to.setSearchEngine(from.getSearchEngine());
to.setSearchEngineVersion(from.getSearchEngineVersion());
to.setSearchParameters(from.getSearchParameters());
}
bool IDMergerAlgorithm::checkOldRunConsistency_(const vector<ProteinIdentification>& protRuns, const String& experiment_type) const
{
return checkOldRunConsistency_(protRuns, protRuns[0], experiment_type);
}
bool IDMergerAlgorithm::checkOldRunConsistency_(const vector<ProteinIdentification>& protRuns, const ProteinIdentification& ref, const String& experiment_type) const
{
bool ok = true;
for (const auto& idRun : protRuns)
{
// collect warnings and throw at the end if at least one failed
ok = ok && ref.peptideIDsMergeable(idRun, experiment_type);
}
if (!ok && !param_.getValue("allow_disagreeing_settings").toBool())
{
throw Exception::MissingInformation(__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"Search settings are not matching across IdentificationRuns. "
"See warnings. Aborting..");
}
return ok;
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/ConsensusIDAlgorithmAverage.cpp | .cpp | 900 | 31 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Sven Nahnsen, Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/ConsensusIDAlgorithmAverage.h>
#include <numeric> // for "accumulate"
using namespace std;
namespace OpenMS
{
ConsensusIDAlgorithmAverage::ConsensusIDAlgorithmAverage()
{
setName("ConsensusIDAlgorithmAverage"); // DefaultParamHandler
}
double ConsensusIDAlgorithmAverage::getAggregateScore_(
vector<double>& scores, bool /* higher_better */)
{
double sum_scores = accumulate(scores.begin(), scores.end(), 0.0);
return sum_scores / scores.size();
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/PeptideIndexing.cpp | .cpp | 41,059 | 909 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/PeptideIndexing.h>
#include <OpenMS/ANALYSIS/ID/AhoCorasickAmbiguous.h>
#include <OpenMS/CHEMISTRY/ProteaseDB.h>
#include <OpenMS/CHEMISTRY/ProteaseDigestion.h>
#include <OpenMS/CONCEPT/EnumHelpers.h>
#include <OpenMS/SYSTEM/StopWatch.h>
#include <OpenMS/SYSTEM/SysInfo.h>
#include <atomic>
#include <map>
#include <array>
#ifdef _OPENMP
#include <omp.h>
#endif
using namespace OpenMS;
using namespace std;
char const* const PeptideIndexing::AUTO_MODE = "auto";
const std::array<std::string, (Size)PeptideIndexing::Unmatched::SIZE_OF_UNMATCHED> PeptideIndexing::names_of_unmatched = { "error", "warn", "remove" };
const std::array<std::string, (Size)PeptideIndexing::MissingDecoy::SIZE_OF_MISSING_DECOY> PeptideIndexing::names_of_missing_decoy = { "error" , "warn" , "silent" };
// internal data structure to store match information (not exported)
struct PeptideProteinMatchInformation
{
Hit::T peptide_index; ///< index of the peptide
Hit::T protein_index; ///< index of the protein the peptide is contained in
Hit::T position; ///< the position of the peptide in the protein
char AABefore; //< the amino acid after the peptide in the protein
char AAAfter; //< the amino acid before the peptide in the protein
PeptideProteinMatchInformation(Hit::T pep_index, Hit::T prot_index, Hit::T pep_pos, char aa_before, char aa_after) :
peptide_index(pep_index), protein_index(prot_index), position(pep_pos), AABefore(aa_before), AAAfter(aa_after)
{
static_assert(sizeof(Hit::T) == 4); // make sure we do not waste huge amounts of memory; there are instances where we find 10^9 hits, which amounts to 14GB of data!
}
const std::tuple<const Hit::T&, const Hit::T&, const Hit::T&, const char&, const char&> tie() const
{ // sorting in the order peptide_index, then protein_index, then the rest is paramount for the code below to work!
return std::tie(peptide_index, protein_index, position, AABefore, AAAfter);
}
bool operator<(const PeptideProteinMatchInformation& other) const
{
return tie() < other.tie();
}
bool operator==(const PeptideProteinMatchInformation& other) const
{
return tie() == other.tie();
}
};
// internal functor (not exported)
struct FoundProteinFunctor
{
public:
using MapType = std::vector< PeptideProteinMatchInformation >;
MapType pep_to_prot; ///< peptide index --> protein indices as flat vector
Size filter_passed{}; ///< number of accepted hits (passing addHit() constraints)
Size filter_rejected{}; ///< number of rejected hits (not passing addHit())
ProteaseDigestion enzyme;
private:
bool xtandem_; //< are we checking XTandem cleavage rules?
public:
explicit FoundProteinFunctor(const ProteaseDigestion& enzyme, bool xtandem) :
enzyme(enzyme), xtandem_(xtandem)
{
}
void merge(FoundProteinFunctor& other)
{
if (pep_to_prot.empty())
{ // first merge is cheap
pep_to_prot = std::move(other.pep_to_prot);
}
else
{
pep_to_prot.insert(pep_to_prot.end(), other.pep_to_prot.cbegin(), other.pep_to_prot.cend());
other.pep_to_prot.clear();
}
// cheap members
this->filter_passed += other.filter_passed;
other.filter_passed = 0;
this->filter_rejected += other.filter_rejected;
other.filter_rejected = 0;
}
bool validate(const String& seq_prot, const Int position, const Int len_pep, const bool allow_nterm_protein_cleavage)
{
const bool ignore_missed_cleavages = true;
return enzyme.isValidProduct(seq_prot, position, len_pep, ignore_missed_cleavages, allow_nterm_protein_cleavage, xtandem_);
}
void addHit(
const bool is_valid,
const Hit::T idx_pep,
const Hit::T idx_prot,
const Hit::T len_pep,
const String& seq_prot,
const Hit::T position)
{
//TODO we could read and double-check missed cleavages as well
if (is_valid)
{
// append a PeptideProteinMatchInformation
pep_to_prot.emplace_back(
idx_pep,
idx_prot,
position,
(position == 0) ? PeptideEvidence::N_TERMINAL_AA : seq_prot[position - 1],
(position + len_pep >= seq_prot.size()) ? PeptideEvidence::C_TERMINAL_AA : seq_prot[position + len_pep]
);
++filter_passed;
}
else
{
//std::cerr << "REJECTED Peptide " << seq_pep << " with hit to protein "
// << seq_prot << " at position " << position << '\n';
++filter_rejected;
}
}
};
// free function (not exported) used to add hits
void search(ACTrie& trie, ACTrieState& state, const String& prot, const String& full_prot, size_t prot_offset, Hit::T idx_prot,
FoundProteinFunctor& func_threads, const bool allow_nterm_protein_cleavage)
{
state.setQuery(prot);
trie.getAllHits(state);
if (state.hits.empty())
{
return; // avoid expensive tokenize()
}
// by design in AC, hits are ordered by occurrence in protein: duplicate peptides will be consecutive.
Hit old{};
bool last_valid = false;
for (const auto& hit : state.hits)
{
Hit::T pos = Hit::T(hit.query_pos + prot_offset);
if (!(hit.needle_length == old.needle_length && hit.query_pos == old.query_pos))
{ // new stretch in protein. Validate if cutting site
last_valid = func_threads.validate(full_prot, pos, hit.needle_length, allow_nterm_protein_cleavage);
}
func_threads.addHit(last_valid, hit.needle_index, idx_prot, hit.needle_length, full_prot, pos);
old = hit;
}
}
PeptideIndexing::PeptideIndexing()
: DefaultParamHandler("PeptideIndexing")
{
defaults_.setValue("decoy_string", "", "String that was appended (or prefixed - see 'decoy_string_position' flag below) to the accessions in the protein database to indicate decoy proteins. If empty (default), it's determined automatically (checking for common terms, both as prefix and suffix).");
defaults_.setValue("decoy_string_position", "prefix", "Is the 'decoy_string' prepended (prefix) or appended (suffix) to the protein accession? (ignored if decoy_string is empty)");
defaults_.setValidStrings("decoy_string_position", { "prefix", "suffix" });
defaults_.setValue("missing_decoy_action", names_of_missing_decoy[(Size)MissingDecoy::IS_ERROR], "Action to take if NO peptide was assigned to a decoy protein (which indicates wrong database or decoy string): 'error' (exit with error, no output), 'warn' (exit with success, warning message), 'silent' (no action is taken, not even a warning)");
defaults_.setValidStrings("missing_decoy_action", std::vector<std::string>(names_of_missing_decoy.begin(), names_of_missing_decoy.end()));
defaults_.setValue("enzyme:name", AUTO_MODE, "Enzyme which determines valid cleavage sites - e.g. trypsin cleaves after lysine (K) or arginine (R), but not before proline (P). Default: deduce from input");
StringList enzymes{};
ProteaseDB::getInstance()->getAllNames(enzymes);
enzymes.emplace(enzymes.begin(), AUTO_MODE); // make it the first item
defaults_.setValidStrings("enzyme:name", ListUtils::create<std::string>(enzymes));
defaults_.setValue("enzyme:specificity", AUTO_MODE, "Specificity of the enzyme. Default: deduce from input."
"\n '" + EnzymaticDigestion::NamesOfSpecificity[EnzymaticDigestion::SPEC_FULL] + "': both internal cleavage sites must match."
"\n '" + EnzymaticDigestion::NamesOfSpecificity[EnzymaticDigestion::SPEC_SEMI] + "': one of two internal cleavage sites must match."
"\n '" + EnzymaticDigestion::NamesOfSpecificity[EnzymaticDigestion::SPEC_NONE] + "': allow all peptide hits no matter their context (enzyme is irrelevant).");
defaults_.setValidStrings("enzyme:specificity", {AUTO_MODE,
EnzymaticDigestion::NamesOfSpecificity[EnzymaticDigestion::SPEC_FULL],
EnzymaticDigestion::NamesOfSpecificity[EnzymaticDigestion::SPEC_SEMI],
EnzymaticDigestion::NamesOfSpecificity[EnzymaticDigestion::SPEC_NONE]});
defaults_.setValue("write_protein_sequence", "false", "If set, the protein sequences are stored as well.");
defaults_.setValidStrings("write_protein_sequence", { "true", "false" });
defaults_.setValue("write_protein_description", "false", "If set, the protein description is stored as well.");
defaults_.setValidStrings("write_protein_description", { "true", "false" });
defaults_.setValue("keep_unreferenced_proteins", "false", "If set, protein hits which are not referenced by any peptide are kept.");
defaults_.setValidStrings("keep_unreferenced_proteins", { "true", "false" });
defaults_.setValue("unmatched_action", names_of_unmatched[(Size)Unmatched::IS_ERROR], "If peptide sequences cannot be matched to any protein: 1) raise an error; 2) warn (unmatched PepHits will miss target/decoy annotation with downstream problems); 3) remove the hit.");
defaults_.setValidStrings("unmatched_action", std::vector<std::string>(names_of_unmatched.begin(), names_of_unmatched.end()));
defaults_.setValue("aaa_max", 3, "Maximal number of ambiguous amino acids (AAAs) allowed when matching to a protein database with AAAs. AAAs are 'B', 'J', 'Z' and 'X'.");
defaults_.setMinInt("aaa_max", 0);
defaults_.setMaxInt("aaa_max", 10);
defaults_.setValue("mismatches_max", 0, "Maximal number of mismatched (mm) amino acids allowed when matching to a protein database."
" The required runtime is exponential in the number of mm's; apply with care."
" MM's are allowed in addition to AAA's.");
defaults_.setMinInt("mismatches_max", 0);
defaults_.setMaxInt("mismatches_max", 10);
defaults_.setValue("IL_equivalent", "false", "Treat the isobaric amino acids isoleucine ('I') and leucine ('L') as equivalent (indistinguishable). Also occurrences of 'J' will be treated as 'I' thus avoiding ambiguous matching.");
defaults_.setValidStrings("IL_equivalent", { "true", "false" });
defaults_.setValue("allow_nterm_protein_cleavage", "true", "Allow the protein N-terminus amino acid to clip.");
defaults_.setValidStrings("allow_nterm_protein_cleavage", { "true", "false" });
defaultsToParam_();
}
PeptideIndexing::~PeptideIndexing() = default;
void PeptideIndexing::updateMembers_()
{
decoy_string_ = param_.getValue("decoy_string").toString();
prefix_ = (param_.getValue("decoy_string_position") == "prefix" ? true : false);
missing_decoy_action_ = (MissingDecoy)Helpers::indexOf(names_of_missing_decoy, param_.getValue("missing_decoy_action"));
enzyme_name_ = param_.getValue("enzyme:name").toString();
enzyme_specificity_ = param_.getValue("enzyme:specificity").toString();
write_protein_sequence_ = param_.getValue("write_protein_sequence").toBool();
write_protein_description_ = param_.getValue("write_protein_description").toBool();
keep_unreferenced_proteins_ = param_.getValue("keep_unreferenced_proteins").toBool();
unmatched_action_ = (Unmatched)Helpers::indexOf(names_of_unmatched, param_.getValue("unmatched_action"));
IL_equivalent_ = param_.getValue("IL_equivalent").toBool();
aaa_max_ = static_cast<Int>(param_.getValue("aaa_max"));
mm_max_ = static_cast<Int>(param_.getValue("mismatches_max"));
allow_nterm_protein_cleavage_ = param_.getValue("allow_nterm_protein_cleavage").toBool();
}
PeptideIndexing::ExitCodes PeptideIndexing::run(std::vector<FASTAFile::FASTAEntry>& proteins, std::vector<ProteinIdentification>& prot_ids, PeptideIdentificationList& pep_ids)
{
FASTAContainer<TFI_Vector> protein_container(proteins);
return run_<TFI_Vector>(protein_container, prot_ids, pep_ids);
}
PeptideIndexing::ExitCodes PeptideIndexing::run(FASTAContainer<TFI_File>& proteins, std::vector<ProteinIdentification>& prot_ids, PeptideIdentificationList& pep_ids)
{
return run_<TFI_File>(proteins, prot_ids, pep_ids);
}
PeptideIndexing::ExitCodes PeptideIndexing::run(FASTAContainer<TFI_Vector>& proteins, std::vector<ProteinIdentification>& prot_ids, PeptideIdentificationList& pep_ids)
{
return run_<TFI_Vector>(proteins, prot_ids, pep_ids);
}
const String& PeptideIndexing::getDecoyString() const
{
return decoy_string_;
}
bool PeptideIndexing::isPrefix() const
{
return prefix_;
}
template<typename T>
PeptideIndexing::ExitCodes PeptideIndexing::run_(FASTAContainer<T>& proteins, std::vector<ProteinIdentification>& prot_ids, PeptideIdentificationList& pep_ids)
{
if ((enzyme_name_ == "Chymotrypsin" || enzyme_name_ == "Chymotrypsin/P" || enzyme_name_ == "TrypChymo")
&& IL_equivalent_)
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"The used enzyme " + enzyme_name_ + "differentiates between I and L, therefore the IL_equivalent option cannot be used.");
}
// no decoy string provided? try to deduce from data
if (decoy_string_.empty())
{
auto r = DecoyHelper::findDecoyString(proteins);
proteins.reset();
if (!r.success)
{
r.is_prefix = true;
r.name = "DECOY_";
OPENMS_LOG_WARN << "Unable to determine decoy string automatically (not enough decoys were detected)! Using default " << (r.is_prefix ? "prefix" : "suffix") << " decoy string '" << r.name << "'\n"
<< "If you think that this is incorrect, please provide a decoy_string and its position manually!\n";
}
prefix_ = r.is_prefix;
decoy_string_ = r.name;
// decoy string and position was extracted successfully
OPENMS_LOG_INFO << "Using " << (prefix_ ? "prefix" : "suffix") << " decoy string '" << decoy_string_ << "'\n";
}
//---------------------------------------------------------------
// parsing parameters, correcting XTandem and MSGFPlus parameters
//---------------------------------------------------------------
ProteaseDigestion enzyme;
if (!enzyme_name_.empty() && (enzyme_name_.compare(AUTO_MODE) != 0))
{ // use param (not empty, not 'auto')
enzyme.setEnzyme(enzyme_name_);
}
else if (!prot_ids.empty() && prot_ids[0].getSearchParameters().digestion_enzyme.getName() != "unknown_enzyme")
{ // take from meta (this assumes all runs used the same enzyme)
OPENMS_LOG_INFO << "Info: using '" << prot_ids[0].getSearchParameters().digestion_enzyme.getName() << "' as enzyme (obtained from idXML) for digestion.\n";
enzyme.setEnzyme(&prot_ids[0].getSearchParameters().digestion_enzyme);
}
else
{ // fall-back
OPENMS_LOG_WARN << "Warning: Enzyme name neither given nor deducible from input. Defaulting to Trypsin!\n";
enzyme.setEnzyme("Trypsin");
}
bool xtandem_fix_parameters = false;
bool msgfplus_fix_parameters = false;
// determine if at least one search engine was XTandem or MSGFPlus to enable special rules
for (const auto& prot_id : prot_ids)
{
String search_engine = prot_id.getOriginalSearchEngineName();
StringUtils::toUpper(search_engine);
OPENMS_LOG_INFO << "Peptide identification engine: " << search_engine << '\n';
if (search_engine == "XTANDEM" || prot_id.getSearchParameters().metaValueExists("SE:XTandem")) { xtandem_fix_parameters = true; }
if (search_engine == "MS-GF+" || search_engine == "MSGFPLUS" || prot_id.getSearchParameters().metaValueExists("SE:MS-GF+")) { msgfplus_fix_parameters = true; }
}
if (xtandem_fix_parameters)
{
OPENMS_LOG_WARN << "X!Tandem detected. Allowing random Asp/Pro cleavage.\n";
}
// including MSGFPlus -> Trypsin/P as enzyme
if (msgfplus_fix_parameters && enzyme.getEnzymeName() == "Trypsin")
{
OPENMS_LOG_WARN << "MSGFPlus detected but enzyme cutting rules were set to Trypsin. Correcting to Trypsin/P to cope with special cutting rule in MSGFPlus.\n";
enzyme.setEnzyme("Trypsin/P");
}
OPENMS_LOG_INFO << "Enzyme: " << enzyme.getEnzymeName() << '\n';
if (!enzyme_specificity_.empty() && (enzyme_specificity_.compare(AUTO_MODE) != 0))
{ // use param (not empty and not 'auto')
enzyme.setSpecificity(ProteaseDigestion::getSpecificityByName(enzyme_specificity_));
}
else if (!prot_ids.empty() && prot_ids[0].getSearchParameters().enzyme_term_specificity != ProteaseDigestion::SPEC_UNKNOWN)
{ // deduce from data ('auto')
enzyme.setSpecificity(prot_ids[0].getSearchParameters().enzyme_term_specificity);
OPENMS_LOG_INFO << "Info: using '" << EnzymaticDigestion::NamesOfSpecificity[prot_ids[0].getSearchParameters().enzyme_term_specificity] << "' as enzyme specificity (obtained from idXML) for digestion.\n";
}
else
{ // fall-back
OPENMS_LOG_WARN << "Warning: Enzyme specificity neither given nor present in the input file. Defaulting to 'full'!\n";
enzyme.setSpecificity(ProteaseDigestion::SPEC_FULL);
}
//-------------------------------------------------------------
// calculations
//-------------------------------------------------------------
// cache the first proteins
const size_t PROTEIN_CACHE_SIZE = 4e5; // 400k should be enough for most DB's and is not too hard on memory either (~200 MB FASTA)
this->startProgress(0, 1, "Load first DB chunk");
proteins.cacheChunk(PROTEIN_CACHE_SIZE);
this->endProgress();
if (proteins.empty()) // we do not allow an empty database
{
OPENMS_LOG_ERROR << "Error: An empty database was provided. Mapping makes no sense. Aborting...\n";
return ExitCodes::DATABASE_EMPTY;
}
if (pep_ids.empty()) // Aho-Corasick requires non-empty input; but we allow this case, since the TOPP tool should not crash when encountering a bad raw file (with no PSMs)
{
OPENMS_LOG_WARN << "Warning: An empty set of peptide identifications was provided. Output will be empty as well.\n";
if (!keep_unreferenced_proteins_)
{
// delete only protein hits, not whole ID runs incl. meta data:
for (std::vector<ProteinIdentification>::iterator it = prot_ids.begin();
it != prot_ids.end(); ++it)
{
it->getHits().clear();
}
}
return ExitCodes::PEPTIDE_IDS_EMPTY;
}
FoundProteinFunctor func(enzyme, xtandem_fix_parameters); // store the matches
std::map<String, Size> acc_to_prot; // map: accessions --> FASTA protein index
std::vector<bool> protein_is_decoy; // protein index -> is decoy?
std::vector<std::string> protein_accessions; // protein index -> accession
bool invalid_protein_sequence = false; // check for proteins with modifications, i.e. '[' or '(', and throw an exception
{ // new scope - forget data after search
/*
Aho Corasick (fast)
*/
ACTrie ac_trie(aaa_max_, mm_max_);
SysInfo::MemUsage mu;
OPENMS_LOG_INFO << "Building trie ...";
StopWatch s;
s.start();
bool peptide_has_X {false}; // if any peptide contains an 'X', we switch off protein-X splitting (see below)
for (const auto& pep : pep_ids)
{
for (const auto& hit : pep.getHits())
{
//
// Warning:
// do not skip over peptides here, since the results are iterated in the same way
//
String seq = hit.getSequence().toUnmodifiedString().remove('*'); // make a copy, i.e. do NOT change the peptide sequence!
if (IL_equivalent_) // convert L to I;
{
seq.substitute('L', 'I');
}
peptide_has_X |= seq.has('X');
ac_trie.addNeedle(seq);
}
}
s.stop();
OPENMS_LOG_INFO << " done (" << int(s.getClockTime()) << "s)\n";
if (ac_trie.getNeedleCount() == 0)
{ // Aho-Corasick will crash if given empty needles as input
OPENMS_LOG_WARN << "Warning: Peptide identifications have no hits inside! Output will be empty as well.\n";
return ExitCodes::PEPTIDE_IDS_EMPTY;
}
s.start();
OPENMS_LOG_INFO << "Compressing trie to BFS format ...\n";
ac_trie.compressTrie();
s.stop();
OPENMS_LOG_INFO << " done (" << int(s.getClockTime()) << "s)\n";
s.reset();
OPENMS_LOG_INFO << "Mapping " << ac_trie.getNeedleCount() << " peptides to " << (proteins.size() == PROTEIN_CACHE_SIZE ? "? (unknown number of)" : String(proteins.size())) << " proteins."
<< '\n';
OPENMS_LOG_INFO << "Searching with up to " << aaa_max_ << " ambiguous amino acid(s) and " << mm_max_ << " mismatch(es)!\n";
uint16_t count_j_proteins(0);
bool has_active_data = true; // becomes false if end of FASTA file is reached
const std::string jumpX(aaa_max_ + mm_max_ + 1, 'X'); // jump over stretches of 'X' which cost a lot of time; +1 because AXXA is a valid hit for aaa_max == 2 (cannot split it)
// use very large target value for progress if DB size is unknown (did not fit into first chunk)
this->startProgress(0, proteins.size() == PROTEIN_CACHE_SIZE ? std::numeric_limits<SignedSize>::max() : proteins.size(), "Aho-Corasick");
std::atomic<int> progress_prots(0);
#pragma omp parallel
{
FoundProteinFunctor func_threads(enzyme, xtandem_fix_parameters);
std::map<String, Size> acc_to_prot_thread; // map: accessions --> FASTA protein index
ACTrieState ac_state;
String prot;
while (true)
{
#pragma omp barrier // all threads need to be here, since we are about to swap protein data
#pragma omp single
{
has_active_data = proteins.activateCache(); // swap in last cache
protein_accessions.resize(proteins.getChunkOffset() + proteins.chunkSize());
} // implicit barrier here
if (!has_active_data) break; // leave while-loop
SignedSize prot_count = (SignedSize)proteins.chunkSize();
#pragma omp master
{
proteins.cacheChunk(PROTEIN_CACHE_SIZE);
protein_is_decoy.resize(proteins.getChunkOffset() + prot_count);
for (SignedSize i = 0; i < prot_count; ++i)
{ // do this in master only, to avoid false sharing
const String& seq = proteins.chunkAt(i).identifier;
protein_is_decoy[i + proteins.getChunkOffset()] = (prefix_ ? seq.hasPrefix(decoy_string_) : seq.hasSuffix(decoy_string_));
}
}
// search all peptides in each protein
#pragma omp for schedule(dynamic, 100) nowait
for (SignedSize i = 0; i < prot_count; ++i)
{
++progress_prots; // atomic
#ifdef _OPENMP // without OMP, we always set progress
if (omp_get_thread_num() == 0)
#endif
{
this->setProgress(progress_prots);
}
prot = proteins.chunkAt(i).sequence;
prot.remove('*');
// check for invalid sequences with modifications
if (prot.has('[') || prot.has('('))
{
invalid_protein_sequence = true; // not omp-critical because its write-only
// we cannot throw an exception here, since we'd need to catch it within the parallel region
}
// convert L/J to I; also replace 'J' in proteins
if (IL_equivalent_)
{
prot.substitute('L', 'I');
prot.substitute('J', 'I');
}
else
{ // warn if 'J' is found (it eats into aaa_max)
if (prot.has('J'))
{
#pragma omp atomic
++count_j_proteins;
}
}
const Hit::T prot_idx = Hit::T(i + proteins.getChunkOffset());
// grab #hits before searching protein; we know its a hit if this number changes
const Size hits_total = func_threads.filter_passed + func_threads.filter_rejected;
// check if there are stretches of 'X' in the protein, but not in the peptide
if (!peptide_has_X && prot.has('X'))
{
// create chunks of the protein (splitting it at stretches of 'X..X') and feed them to AC one by one
size_t offset = -1, start = 0;
while ((offset = prot.find(jumpX, offset + 1)) != std::string::npos)
{
//std::cout << "found X..X at " << offset << " in protein " << proteins[i].identifier << "\n";
search(ac_trie, ac_state, prot.substr(start, offset + jumpX.size() - start), prot, start, prot_idx, func_threads,
allow_nterm_protein_cleavage_);
// skip ahead while we encounter more X...
while (offset + jumpX.size() < prot.size() && prot[offset + jumpX.size()] == 'X') ++offset;
start = offset;
//std::cout << " new start: " << start << "\n";
}
// last chunk
if (start < prot.size())
{
search(ac_trie, ac_state, prot.substr(start), prot, start, prot_idx, func_threads, allow_nterm_protein_cleavage_);
}
}
else // search the whole protein at once
{
search(ac_trie, ac_state, prot, prot, 0, prot_idx, func_threads, allow_nterm_protein_cleavage_);
}
// was protein found?
if (hits_total < func_threads.filter_passed + func_threads.filter_rejected)
{
protein_accessions[prot_idx] = proteins.chunkAt(i).identifier;
acc_to_prot_thread[protein_accessions[prot_idx]] = prot_idx;
}
} // end parallel FOR
// join results
#pragma omp critical(PeptideIndexer_joinAC)
{
s.start();
// hits
func.merge(func_threads);
// sort hits by peptide index
std::sort(func.pep_to_prot.begin(), func.pep_to_prot.end());
// accession -> index
acc_to_prot.insert(acc_to_prot_thread.begin(), acc_to_prot_thread.end());
acc_to_prot_thread.clear();
s.stop();
} // OMP end critical
} // end readChunk
} // OMP end parallel
this->endProgress();
std::cout << "Merge took: " << s.toString() << "\n";
mu.after();
std::cout << mu.delta("Aho-Corasick") << "\n\n";
{
// count number of peptides found
// the vector 'pep_to_prot' is sorted by peptide_index, and then by protein_index
size_t found_peptide_count{0};
Hit::T last_peptide_idx = -1;
for (const auto& hit : func.pep_to_prot)
{
if (hit.peptide_index != last_peptide_idx)
{
last_peptide_idx = hit.peptide_index;
++found_peptide_count;
}
}
OPENMS_LOG_INFO << "\nAho-Corasick done:\n found " << func.filter_passed << " hits for " << found_peptide_count << " of " << ac_trie.getNeedleCount() << " peptides.\n";
}
// write some stats
OPENMS_LOG_INFO << "Peptide hits passing enzyme filter: " << func.filter_passed << "\n"
<< " ... rejected by enzyme filter: " << func.filter_rejected << '\n';
if (count_j_proteins)
{
OPENMS_LOG_WARN << "PeptideIndexer found " << count_j_proteins << " protein sequences in your database containing the amino acid 'J'."
<< "To match 'J' in a protein, an ambiguous amino acid placeholder for I/L will be used.\n"
<< "This costs runtime and eats into the 'aaa_max' limit, leaving less opportunity for B/Z/X matches.\n"
<< "If you want 'J' to be treated as unambiguous, enable '-IL_equivalent'!\n";
}
} // end local scope
//
// do mapping
//
// index existing proteins
std::map<String, Size> runid_to_runidx; // identifier to index
for (Size run_idx = 0; run_idx < prot_ids.size(); ++run_idx)
{
runid_to_runidx[prot_ids[run_idx].getIdentifier()] = run_idx;
}
// for peptides --> proteins
Size stats_matched_unique(0);
Size stats_matched_multi(0);
Size stats_unmatched(0); // no match to DB
Size stats_count_m_t(0); // match to Target DB
Size stats_count_m_d(0); // match to Decoy DB
Size stats_count_m_td(0); // match to T+D DB
std::map<Size, std::set<Size> > runidx_to_protidx; // in which protID do appear which proteins (according to mapped peptides)
Size pep_idx(0);
Size func_hits_idx(0); ///< current position in func.pep_to_prot[] which has a stretch of matches for current pep_idx
const Size func_hits_size = func.pep_to_prot.size();
for (PeptideIdentificationList::iterator it1 = pep_ids.begin(); it1 != pep_ids.end(); ++it1)
{
// which ProteinIdentification does the peptide belong to?
Size run_idx = runid_to_runidx[it1->getIdentifier()];
std::vector<PeptideHit>& hits = it1->getHits();
for (std::vector<PeptideHit>::iterator it_hit = hits.begin(); it_hit != hits.end(); /* no increase here! we might need to erase it; see below */)
{
// clear protein accessions
it_hit->setPeptideEvidences(std::vector<PeptideEvidence>());
//
// is this a decoy hit?
//
bool matches_target(false);
bool matches_decoy(false);
size_t prot_count_of_current_pep {0}; ///< protein hits of this peptide
Hit::T last_prot_index = -1;
// add new protein references
// the vector 'pep_to_prot' is sorted by peptide_index, and then by protein_index
while ((func_hits_idx < func_hits_size) && (func.pep_to_prot[func_hits_idx].peptide_index == pep_idx))
{
const auto& pe = func.pep_to_prot[func_hits_idx];
if (last_prot_index != pe.protein_index)
{ // span of hits for a new protein starts;
last_prot_index = pe.protein_index;
++prot_count_of_current_pep;
}
const String& accession = protein_accessions[pe.protein_index];
it_hit->addPeptideEvidence(PeptideEvidence(accession, pe.position, pe.position + (int)it_hit->getSequence().size() - 1, pe.AABefore, pe.AAAfter));
runidx_to_protidx[run_idx].insert(pe.protein_index); // fill protein hits
if (protein_is_decoy[pe.protein_index])
{
matches_decoy = true;
}
else
{
matches_target = true;
}
++func_hits_idx;
}
++pep_idx; // next hit
if (matches_decoy && matches_target)
{
it_hit->setMetaValue("target_decoy", "target+decoy");
++stats_count_m_td;
}
else if (matches_target)
{
it_hit->setMetaValue("target_decoy", "target");
++stats_count_m_t;
}
else if (matches_decoy)
{
it_hit->setMetaValue("target_decoy", "decoy");
++stats_count_m_d;
} // else: could match to no protein (i.e. both are false)
//else ... // not required (handled below; see stats_unmatched);
if (prot_count_of_current_pep == 1)
{
it_hit->setMetaValue("protein_references", "unique");
++stats_matched_unique;
}
else if (prot_count_of_current_pep > 1)
{
it_hit->setMetaValue("protein_references", "non-unique");
++stats_matched_multi;
}
else
{
++stats_unmatched;
if (stats_unmatched < 15) OPENMS_LOG_INFO << "Unmatched peptide: " << it_hit->getSequence() << "\n";
else if (stats_unmatched == 15) OPENMS_LOG_INFO << "Unmatched peptide: ...\n";
if (unmatched_action_ == Unmatched::REMOVE)
{
it_hit = hits.erase(it_hit);
continue; // already points to the next hit
}
else
{
it_hit->setMetaValue("protein_references", "unmatched");
}
}
++it_hit; // next hit
} // all hits
} // next PepID
Size total_peptides = stats_count_m_t + stats_count_m_d + stats_count_m_td + stats_unmatched;
OPENMS_LOG_INFO << "-----------------------------------\n";
OPENMS_LOG_INFO << "Peptide statistics\n";
OPENMS_LOG_INFO << "\n";
OPENMS_LOG_INFO << " unmatched : " << stats_unmatched << " (" << stats_unmatched * 100 / total_peptides << " %)\n";
OPENMS_LOG_INFO << " target/decoy:\n";
OPENMS_LOG_INFO << " match to target DB only: " << stats_count_m_t << " (" << stats_count_m_t * 100 / total_peptides << " %)\n";
OPENMS_LOG_INFO << " match to decoy DB only : " << stats_count_m_d << " (" << stats_count_m_d * 100 / total_peptides << " %)\n";
OPENMS_LOG_INFO << " match to both : " << stats_count_m_td << " (" << stats_count_m_td * 100 / total_peptides << " %)\n";
OPENMS_LOG_INFO << "\n";
OPENMS_LOG_INFO << " mapping to proteins:\n";
OPENMS_LOG_INFO << " no match (to 0 protein) : " << stats_unmatched << "\n";
OPENMS_LOG_INFO << " unique match (to 1 protein) : " << stats_matched_unique << "\n";
OPENMS_LOG_INFO << " non-unique match (to >1 protein): " << stats_matched_multi << '\n';
/// for proteins --> peptides
Size stats_matched_proteins(0), stats_matched_new_proteins(0), stats_orphaned_proteins(0), stats_proteins_target(0), stats_proteins_decoy(0);
// all peptides contain the correct protein hit references, now update the protein hits
for (Size run_idx = 0; run_idx < prot_ids.size(); ++run_idx)
{
const auto& masterset = runidx_to_protidx[run_idx]; // all protein matches from above
std::vector<ProteinHit>& phits = prot_ids[run_idx].getHits();
{
// go through existing protein hits and count orphaned proteins (with no peptide hits)
std::vector<ProteinHit> orphaned_hits;
for (std::vector<ProteinHit>::iterator p_hit = phits.begin(); p_hit != phits.end(); ++p_hit)
{
const String& acc = p_hit->getAccession();
if (acc_to_prot.find(acc) == acc_to_prot.end()) // acc_to_prot only contains found proteins from current run
{ // old hit is orphaned
++stats_orphaned_proteins;
if (keep_unreferenced_proteins_)
{
p_hit->setMetaValue("target_decoy", "");
orphaned_hits.push_back(*p_hit);
}
}
}
// only keep orphaned hits (if any)
phits = orphaned_hits;
}
// add new protein hits
FASTAFile::FASTAEntry fe;
phits.reserve(phits.size() + masterset.size());
for (std::set<Size>::const_iterator it = masterset.begin(); it != masterset.end(); ++it)
{
ProteinHit hit;
hit.setAccession(protein_accessions[*it]);
if (write_protein_sequence_ || write_protein_description_)
{
proteins.readAt(fe, *it);
if (write_protein_sequence_)
{
hit.setSequence(fe.sequence);
} // no else, since sequence is empty by default
if (write_protein_description_)
{
hit.setDescription(fe.description);
} // no else, since description is empty by default
}
if (protein_is_decoy[*it])
{
hit.setMetaValue("target_decoy", "decoy");
++stats_proteins_decoy;
}
else
{
hit.setMetaValue("target_decoy", "target");
++stats_proteins_target;
}
phits.push_back(hit);
++stats_matched_new_proteins;
}
stats_matched_proteins += phits.size();
}
OPENMS_LOG_INFO << "-----------------------------------\n";
OPENMS_LOG_INFO << "Protein statistics\n";
OPENMS_LOG_INFO << "\n";
OPENMS_LOG_INFO << " total proteins searched: " << proteins.size() << "\n";
OPENMS_LOG_INFO << " matched proteins : " << stats_matched_proteins << " (" << stats_matched_new_proteins << " new)\n";
if (stats_matched_proteins)
{ // prevent Division-by-0 Exception
OPENMS_LOG_INFO << " matched target proteins: " << stats_proteins_target << " (" << stats_proteins_target * 100 / stats_matched_proteins << " %)\n";
OPENMS_LOG_INFO << " matched decoy proteins : " << stats_proteins_decoy << " (" << stats_proteins_decoy * 100 / stats_matched_proteins << " %)\n";
}
OPENMS_LOG_INFO << " orphaned proteins : " << stats_orphaned_proteins << (keep_unreferenced_proteins_ ? " (all kept)" : " (all removed)\n");
OPENMS_LOG_INFO << "-----------------------------------\n";
// Store PeptideIndexer settings in SearchParameters metavalues for documentation
for (Size run_idx = 0; run_idx < prot_ids.size(); ++run_idx)
{
ProteinIdentification::SearchParameters search_parameters = prot_ids[run_idx].getSearchParameters();
search_parameters.setMetaValue("PeptideIndexer:decoy_string", decoy_string_);
search_parameters.setMetaValue("PeptideIndexer:decoy_string_position", prefix_ ? "prefix" : "suffix");
search_parameters.setMetaValue("PeptideIndexer:enzyme", enzyme.getEnzymeName());
search_parameters.setMetaValue("PeptideIndexer:enzyme_specificity", EnzymaticDigestion::NamesOfSpecificity[enzyme.getSpecificity()]);
search_parameters.setMetaValue("PeptideIndexer:aaa_max", aaa_max_);
search_parameters.setMetaValue("PeptideIndexer:mismatches_max", mm_max_);
search_parameters.setMetaValue("PeptideIndexer:IL_equivalent", IL_equivalent_ ? "true" : "false");
search_parameters.setMetaValue("PeptideIndexer:allow_nterm_protein_cleavage", allow_nterm_protein_cleavage_ ? "true" : "false");
search_parameters.setMetaValue("PeptideIndexer:unmatched_action", names_of_unmatched[(Size)unmatched_action_]);
search_parameters.setMetaValue("PeptideIndexer:missing_decoy_action", names_of_missing_decoy[(Size)missing_decoy_action_]);
prot_ids[run_idx].setSearchParameters(search_parameters);
}
/// exit if no peptides were matched to decoy
bool has_error = false;
if (invalid_protein_sequence)
{
OPENMS_LOG_ERROR << "Error: One or more protein sequences contained the characters '[' or '(', which are illegal in protein sequences."
<< "\nPeptide hits might be masked by these characters (which usually indicate presence of modifications).\n";
has_error = true;
}
if ((stats_count_m_d + stats_count_m_td) == 0)
{
String msg("No peptides were matched to the decoy portion of the database! Did you provide the correct concatenated database? Are your 'decoy_string' (=" + decoy_string_ + ") and 'decoy_string_position' (=" + std::string(param_.getValue("decoy_string_position")) + ") settings correct?");
if (missing_decoy_action_ == MissingDecoy::IS_ERROR)
{
OPENMS_LOG_ERROR << "Error: " << msg << "\nSet 'missing_decoy_action' to 'warn' if you are sure this is ok!\nAborting ...\n";
has_error = true;
}
else if (missing_decoy_action_ == MissingDecoy::WARN)
{
OPENMS_LOG_WARN << "Warn: " << msg << "\nSet 'missing_decoy_action' to 'error' if you want to elevate this to an error!\n";
}
else // silent
{
}
}
if (stats_unmatched > 0)
{
OPENMS_LOG_ERROR << "PeptideIndexer found unmatched peptides, which could not be associated to a protein.\n";
if (unmatched_action_ == Unmatched::IS_ERROR)
{
OPENMS_LOG_ERROR
<< "Potential solutions:\n"
<< " - check your FASTA database is identical to the search DB (or use 'auto')\n"
<< " - set 'enzyme:specificity' and 'enzyme:name' to 'auto' to match the parameters of the search engine\n"
<< " - increase 'aaa_max' to allow more ambiguous amino acids\n"
<< " - as a last resort: use the 'unmatched_action' option to accept or even remove unmatched peptides\n"
<< " (note that unmatched peptides cannot be used for FDR calculation or quantification)\n";
has_error = true;
}
else if (unmatched_action_ == Unmatched::WARN)
{
OPENMS_LOG_ERROR << " Warning: " << stats_unmatched << " unmatched hits have been found, but were not removed!\n"
<< "These are not annotated with target/decoy information and might lead to issues with downstream tools (such as FDR).\n"
<< "Switch to '" << names_of_unmatched[(Size)Unmatched::REMOVE] << "' if you want to avoid these problems.\n";
}
else if (unmatched_action_ == Unmatched::REMOVE)
{
OPENMS_LOG_ERROR << " Warning: " << stats_unmatched <<" unmatched hits have been removed!\n"
<< "Make sure that these hits are actually a violation of the cutting rules by inspecting the database!\n";
if (xtandem_fix_parameters) OPENMS_LOG_ERROR << "Since the results are from X!Tandem, this is probably ok (check anyways).\n";
}
else
{
throw Exception::NotImplemented(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION);
}
}
if (has_error)
{
OPENMS_LOG_ERROR << "Result files will be written, but PeptideIndexer will exit with an error code.\n";
return ExitCodes::UNEXPECTED_RESULT;
}
return ExitCodes::EXECUTION_OK;
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/IonIdentityMolecularNetworking.cpp | .cpp | 8,035 | 186 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Axel Walter $
// $Authors: Axel Walter $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/IonIdentityMolecularNetworking.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/FORMAT/SVOutStream.h>
#include <fstream>
#include <iostream>
#include <set>
#include <unordered_map>
#include <map>
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/connected_components.hpp>
// VertexLabel is used for IINM to build a bipartite graph using UndirectedOSMIdGraph
struct VertexLabel
{
VertexLabel() = default;
VertexLabel(size_t u, bool is_feat):uid(u), is_feature(is_feat) {}
// if feature vertex: index (0 to n) for a feature in ConsensusMap;
// if group vertex: Constants::UserParam::ADDUCT_GROUP from ConsensusFeature Constants::UserParam::IIMN_LINKED_GROUPS
size_t uid = 0;
// false = vertex represents a group, true = vertex represents a feature
bool is_feature = false;
};
using UndirectedOSMIdGraph = boost::adjacency_list<
boost::vecS,
boost::vecS,
boost::undirectedS,
VertexLabel>;
namespace OpenMS
{
void IonIdentityMolecularNetworking::annotateConsensusMap(ConsensusMap& consensus_map)
{
// bipartite graph with ConsensusFeature indexes and Groups from Features
// Vertexes contain uid (index/Group) and is_feature (bool)
// e.g.
// ConsensusFeature0 contains Feature (Group = 1) and Feature (Group = 2)
// ConsensusFeature1 contains Feature (Group = 2) and Feature (Group = 3)
// ConsensusFeature2 contains Feature (Group = 4) and Feature (Group = 5)
// graph looks like (Vertex of ConsensusFeature <--> Vertex of Group; component_number)
// 0, true <--> 1, false; 0
// 0, true <--> 2, false; 0
// 1, true <--> 2, false; 0
// 1, true <--> 3, false; 0
// 2, true <--> 4, false; 1
// 2, true <--> 5, false; 1
// Total number of components: 2
UndirectedOSMIdGraph g;
// For each ConsensusFeature: add Constants::UserParam::IIMN_ROW_ID and if Constants::UserParam::IIMN_LINKED_GROUPS is present
// add a Vertex for the ConsensusFeature and all corresponding Groups to bipartite graph.
// Check if a Group Vertex has been added already to the graph, since the same Group can occur multiple times.
std::unordered_map<String, size_t> already_in_graph; // <group_uid, vertex_index>
for (size_t i = 0; i < consensus_map.size(); i++)
{
consensus_map[i].setMetaValue(Constants::UserParam::IIMN_ROW_ID, i+1);
if (!consensus_map[i].metaValueExists(Constants::UserParam::IIMN_LINKED_GROUPS)) continue;
auto feature_vertex = add_vertex(VertexLabel(i, true), g);
for (const auto& group: consensus_map[i].getMetaValue(Constants::UserParam::IIMN_LINKED_GROUPS).toStringList())
{
if (!already_in_graph[group])
{
add_edge(feature_vertex, add_vertex(VertexLabel(std::stoull(group), false), g), g);
already_in_graph[group] = boost::num_vertices(g);
continue;
}
else add_edge(feature_vertex, already_in_graph[group]-1, g);
}
}
// represents the component number for each vertex
std::vector<int> components (boost::num_vertices(g));
boost::connected_components (g, &components[0]);
// annotate network number and create a map with feature ID and partner IDs
// partner feature vertexes are connected via a group vertex
std::unordered_map<size_t, std::set<size_t>> partner_map;
for (const auto& i : boost::make_iterator_range(vertices(g)))
{
if (!g[i].is_feature) continue;
consensus_map[g[i].uid].setMetaValue(Constants::UserParam::IIMN_ANNOTATION_NETWORK_NUMBER, components[i]+1);
auto group_neighbours = boost::adjacent_vertices(i, g);
for (const auto& gn : make_iterator_range(group_neighbours))
{
auto feature_partners = boost::adjacent_vertices(gn, g);
for (const auto& partner : make_iterator_range(feature_partners))
{
if (i == partner) continue;
partner_map[g[i].uid].insert(g[partner].uid);
}
}
}
// annotate partners
for (const auto& i : partner_map)
{
String partners;
for (const auto& j : i.second)
{
if (partners.size() > 0) partners += ";";
partners += consensus_map[j].getMetaValue(Constants::UserParam::IIMN_ROW_ID).toString();
}
consensus_map[i.first].setMetaValue(Constants::UserParam::IIMN_ADDUCT_PARTNERS, partners);
}
// remove Constants::UserParam::IIMN_LINKED_GROUPS meta values
for (size_t i = 0; i < consensus_map.size(); i++)
{
consensus_map[i].removeMetaValue(Constants::UserParam::IIMN_LINKED_GROUPS);
}
}
/**
@brief Generates a supplementary pairs table required for GNPS IIMN, as defined here: https://ccms-ucsd.github.io/GNPSDocumentation/fbmn-iin/#supplementary-pairs
*/
void IonIdentityMolecularNetworking::writeSupplementaryPairTable(const ConsensusMap& consensus_map, const String& output_file)
{
// exit early if there is no IIMN annotations (first feature has no Constants::UserParam::IIMN_ROW_ID)
if (!consensus_map[0].metaValueExists(Constants::UserParam::IIMN_ROW_ID)) return;
// generate unordered map with feature id and partner feature ids
std::unordered_map<size_t, std::set<size_t>> feature_partners; // map<feature_index, partner_feature_indices>
for (size_t i = 0; i < consensus_map.size(); i++)
{
if (!consensus_map[i].metaValueExists(Constants::UserParam::IIMN_ADDUCT_PARTNERS)) continue;
std::stringstream ss(consensus_map[i].getMetaValue(Constants::UserParam::IIMN_ADDUCT_PARTNERS).toChar());
while(ss.good())
{
String substr;
getline(ss, substr, ';');
feature_partners[i].insert(std::stoi(substr)-1);
}
}
// sort feature partners in new map and swap values (this will give reproducible results across different runs)
std::map<size_t, std::set<size_t>> sorted;
for (auto& [key, value] : feature_partners)
{
sorted[key].swap(value);
}
// get number of partners for each feature to later calculate score of annotation
std::unordered_map<size_t, size_t> num_partners;
for (const auto& entry: sorted)
{
num_partners[entry.first] = entry.second.size();
}
// initialize SVOutStream with tab separation
std::ofstream outstr(output_file.c_str());
SVOutStream out(outstr, ",", "_", String::NONE);
// write table header
out << "ID1" << "ID2" << "EdgeType" << "Score" << "Annotation" << std::endl;
// write edge annotation for each feature / partner feature pair (sorted)
for (const auto& entry: sorted)
{
for (const auto& partner_index: entry.second)
{
out << consensus_map[entry.first].getMetaValue(Constants::UserParam::IIMN_ROW_ID);
out << consensus_map[partner_index].getMetaValue(Constants::UserParam::IIMN_ROW_ID);
out << "MS1 annotation";
out << num_partners[entry.first] + num_partners[partner_index] - 2; // total number of direct partners from both features minus themselves
std::stringstream annotation;
annotation << consensus_map[entry.first].getMetaValue(Constants::UserParam::IIMN_BEST_ION, String("default")) << " "
<< consensus_map[partner_index].getMetaValue(Constants::UserParam::IIMN_BEST_ION, String("default")) << " "
<< "dm/z=" << String(std::abs(consensus_map[entry.first].getMZ() - consensus_map[partner_index].getMZ()));
out << annotation.str();
out << std::endl;
sorted[partner_index].erase(entry.first); // remove other direction to avoid duplicates
}
}
outstr.close();
}
} // closing namespace OpenMS | C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/AccurateMassSearchEngine.cpp | .cpp | 60,333 | 1,533 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Erhan Kenar, Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/AccurateMassSearchEngine.h>
#include <OpenMS/CHEMISTRY/AdductInfo.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/IsotopeDistribution.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/CoarseIsotopePatternGenerator.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/CONCEPT/VersionInfo.h>
#include <OpenMS/FORMAT/TextFile.h>
#include <OpenMS/MATH/MathFunctions.h>
#include <OpenMS/METADATA/ProteinIdentification.h>
#include <OpenMS/METADATA/PeptideIdentification.h>
#include <OpenMS/METADATA/ID/IdentificationDataConverter.h>
#include <OpenMS/SYSTEM/File.h>
#include <numeric>
namespace OpenMS
{
/// default constructor
AccurateMassSearchResult::AccurateMassSearchResult() :
observed_mz_(),
theoretical_mz_(),
searched_mass_(),
db_mass_(),
charge_(),
mz_error_ppm_(),
observed_rt_(),
observed_intensity_(),
individual_intensities_(),
matching_index_(),
source_feature_index_(),
found_adduct_(),
empirical_formula_(),
matching_hmdb_ids_(),
mass_trace_intensities_(),
isotopes_sim_score_(-1.0)
{
}
/// default destructor
AccurateMassSearchResult::~AccurateMassSearchResult() = default;
/// copy constructor
AccurateMassSearchResult::AccurateMassSearchResult(const AccurateMassSearchResult& source) = default;
/// assignment operator
AccurateMassSearchResult& AccurateMassSearchResult::operator=(const AccurateMassSearchResult& rhs)
{
if (this == &rhs) return *this;
observed_mz_ = rhs.observed_mz_;
theoretical_mz_ = rhs.theoretical_mz_;
searched_mass_ = rhs.searched_mass_;
db_mass_ = rhs.db_mass_;
charge_ = rhs.charge_;
mz_error_ppm_ = rhs.mz_error_ppm_;
observed_rt_ = rhs.observed_rt_;
observed_intensity_ = rhs.observed_intensity_;
individual_intensities_ = rhs.individual_intensities_;
matching_index_ = rhs.matching_index_;
source_feature_index_ = rhs.source_feature_index_;
found_adduct_ = rhs.found_adduct_;
empirical_formula_ = rhs.empirical_formula_;
matching_hmdb_ids_ = rhs.matching_hmdb_ids_;
mass_trace_intensities_ = rhs.mass_trace_intensities_;
isotopes_sim_score_ = rhs.isotopes_sim_score_;
return *this;
}
double AccurateMassSearchResult::getObservedMZ() const
{
return observed_mz_;
}
void AccurateMassSearchResult::setObservedMZ(const double& m)
{
observed_mz_ = m;
}
double AccurateMassSearchResult::getCalculatedMZ() const
{
return theoretical_mz_;
}
void AccurateMassSearchResult::setCalculatedMZ(const double& m)
{
theoretical_mz_ = m;
}
double AccurateMassSearchResult::getQueryMass() const
{
return searched_mass_;
}
void AccurateMassSearchResult::setQueryMass(const double& m)
{
searched_mass_ = m;
}
double AccurateMassSearchResult::getFoundMass() const
{
return db_mass_;
}
void AccurateMassSearchResult::setFoundMass(const double& m)
{
db_mass_ = m;
}
Int AccurateMassSearchResult::getCharge() const
{
return charge_;
}
void AccurateMassSearchResult::setCharge(const Int& ch)
{
charge_ = ch;
}
double AccurateMassSearchResult::getMZErrorPPM() const
{
return mz_error_ppm_;
}
void AccurateMassSearchResult::setMZErrorPPM(const double ppm)
{
mz_error_ppm_ = ppm;
}
double AccurateMassSearchResult::getObservedRT() const
{
return observed_rt_;
}
void AccurateMassSearchResult::setObservedRT(const double& rt)
{
observed_rt_ = rt;
}
double AccurateMassSearchResult::getObservedIntensity() const
{
return observed_intensity_;
}
void AccurateMassSearchResult::setObservedIntensity(const double& intensity)
{
observed_intensity_ = intensity;
}
std::vector<double> AccurateMassSearchResult::getIndividualIntensities() const
{
return individual_intensities_;
}
void AccurateMassSearchResult::setIndividualIntensities(const std::vector<double>& indiv_ints)
{
individual_intensities_ = indiv_ints;
}
Size AccurateMassSearchResult::getMatchingIndex() const
{
return matching_index_;
}
void AccurateMassSearchResult::setMatchingIndex(const Size& idx)
{
matching_index_ = idx;
}
Size AccurateMassSearchResult::getSourceFeatureIndex() const
{
return source_feature_index_;
}
void AccurateMassSearchResult::setSourceFeatureIndex(const Size& idx)
{
source_feature_index_ = idx;
}
const String& AccurateMassSearchResult::getFoundAdduct() const
{
return found_adduct_;
}
void AccurateMassSearchResult::setFoundAdduct(const String& add)
{
found_adduct_ = add;
}
const String& AccurateMassSearchResult::getFormulaString() const
{
return empirical_formula_;
}
void AccurateMassSearchResult::setEmpiricalFormula(const String& ep)
{
empirical_formula_ = ep;
}
const std::vector<String>& AccurateMassSearchResult::getMatchingHMDBids() const
{
return matching_hmdb_ids_;
}
void AccurateMassSearchResult::setMatchingHMDBids(const std::vector<String>& match_ids)
{
matching_hmdb_ids_ = match_ids;
}
const std::vector<double>& AccurateMassSearchResult::getMasstraceIntensities() const
{
return mass_trace_intensities_;
}
void AccurateMassSearchResult::setMasstraceIntensities(const std::vector<double>& mti)
{
mass_trace_intensities_ = mti;
}
double AccurateMassSearchResult::getIsotopesSimScore() const
{
return isotopes_sim_score_;
}
void AccurateMassSearchResult::setIsotopesSimScore(const double& sim_score)
{
isotopes_sim_score_ = sim_score;
}
std::ostream& operator<<(std::ostream& os, const AccurateMassSearchResult& amsr)
{
// set maximum precision
std::streamsize old_precision = os.precision(std::numeric_limits<double>::digits10 + 2);
os << "observed RT: " << amsr.observed_rt_ << "\n";
os << "observed intensity: " << amsr.observed_intensity_ << "\n";
os << "observed m/z: " << amsr.observed_mz_ << "\n";
os << "m/z error ppm: " << amsr.mz_error_ppm_ << "\n";
os << "charge: " << amsr.charge_ << "\n";
os << "query mass (searched): " << amsr.searched_mass_ << "\n";
os << "theoretical (neutral) mass: " << amsr.db_mass_ << "\n";
os << "matching idx: " << amsr.matching_index_ << "\n";
os << "emp. formula: " << amsr.empirical_formula_ << "\n";
os << "adduct: " << amsr.found_adduct_ << "\n";
os << "matching HMDB ids:";
for (Size i = 0; i < amsr.matching_hmdb_ids_.size(); ++i)
{
os << " " << amsr.matching_hmdb_ids_[i];
}
os << "\n";
os << "isotope similarity score: " << amsr.isotopes_sim_score_ << "\n";
// restore precision
os.precision(old_precision);
return os;
}
AccurateMassSearchEngine::AccurateMassSearchEngine() :
DefaultParamHandler("AccurateMassSearchEngine"),
ProgressLogger(),
is_initialized_(false)
{
defaults_.setValue("mass_error_value", 5.0, "Tolerance allowed for accurate mass search.");
defaults_.setValue("mass_error_unit", "ppm", "Unit of mass error (ppm or Da)");
defaults_.setValidStrings("mass_error_unit", {"ppm", "Da"});
defaults_.setValue("ionization_mode", "positive", "Positive or negative ionization mode? If 'auto' is used, the first feature of the input map must contain the meta-value 'scan_polarity'. If its missing, the tool will exit with error.");
defaults_.setValidStrings("ionization_mode", {"positive", "negative", "auto"});
defaults_.setValue("isotopic_similarity", "false", "Computes a similarity score for each hit (only if the feature exhibits at least two isotopic mass traces).");
defaults_.setValidStrings("isotopic_similarity", {"false", "true"});
defaults_.setValue("db:mapping", std::vector<std::string>{"CHEMISTRY/HMDBMappingFile.tsv"}, "Database input file(s), containing three tab-separated columns of mass, formula, identifier. "
"If 'mass' is 0, it is re-computed from the molecular sum formula. "
"By default CHEMISTRY/HMDBMappingFile.tsv in OpenMS/share is used! If empty, the default will be used.");
defaults_.setValue("db:struct", std::vector<std::string>{"CHEMISTRY/HMDB2StructMapping.tsv"}, "Database input file(s), containing four tab-separated columns of identifier, name, SMILES, INCHI."
"The identifier should match with mapping file. SMILES and INCHI are reported in the output, but not used otherwise. "
"By default CHEMISTRY/HMDB2StructMapping.tsv in OpenMS/share is used! If empty, the default will be used.");
defaults_.setValue("positive_adducts", "CHEMISTRY/PositiveAdducts.tsv", "This file contains the list of potential positive adducts that will be looked for in the database. "
"Edit the list if you wish to exclude/include adducts. "
"By default CHEMISTRY/PositiveAdducts.tsv in OpenMS/share is used.", {"advanced"});
defaults_.setValue("negative_adducts", "CHEMISTRY/NegativeAdducts.tsv", "This file contains the list of potential negative adducts that will be looked for in the database. "
"Edit the list if you wish to exclude/include adducts. "
"By default CHEMISTRY/NegativeAdducts.tsv in OpenMS/share is used.", {"advanced"});
defaults_.setValue("use_feature_adducts", "false", "Whether to filter AMS candidates mismatching available feature adduct annotation.");
defaults_.setValidStrings("use_feature_adducts", {"false", "true"});
defaults_.setValue("keep_unidentified_masses", "true", "Keep features that did not yield any DB hit.");
defaults_.setValidStrings("keep_unidentified_masses", {"true", "false"});
defaults_.setValue("mzTab:exportIsotopeIntensities", "false", "[featureXML input only] Export column with available isotope trace intensities (opt_global_MTint)");
defaults_.setValidStrings("mzTab:exportIsotopeIntensities", {"false", "true"});
defaults_.setValue("id_format", "legacy", "Use legacy (ProteinID/PeptideID based storage of metabolomics data) with mzTab-v1.0.0 as output format or novel Identification Data (ID) with mzTab-v2.0.0-M as output format (ID and its MzTab-M output is currently only support for featureXML files).");
defaults_.setValidStrings("id_format", {"legacy", "ID"});
defaultsToParam_();
}
AccurateMassSearchEngine::~AccurateMassSearchEngine() = default;
/// public methods
void AccurateMassSearchEngine::queryByMZ(const double& observed_mz, const Int& observed_charge, const String& ion_mode, std::vector<AccurateMassSearchResult>& results, const EmpiricalFormula& observed_adduct) const
{
if (!is_initialized_)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "AccurateMassSearchEngine::init() was not called!");
}
// Depending on ion_mode_internal_, either positive or negative adducts are used
std::vector<AdductInfo>::const_iterator it_s, it_e;
if (ion_mode == "positive")
{
it_s = pos_adducts_.begin();
it_e = pos_adducts_.end();
}
else if (ion_mode == "negative")
{
it_s = neg_adducts_.begin();
it_e = neg_adducts_.end();
}
else
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("Ion mode cannot be set to '") + ion_mode + "'. Must be 'positive' or 'negative'!");
}
std::pair<Size, Size> hit_idx;
for (std::vector<AdductInfo>::const_iterator it = it_s; it != it_e; ++it)
{
if (observed_charge != 0 && (std::abs(observed_charge) != std::abs(it->getCharge())))
{ // charge of evidence and adduct must match in absolute terms (absolute, since any FeatureFinder gives only positive charges, even for negative-mode spectra)
// observed_charge==0 will pass, since we basically do not know its real charge (apparently, no isotopes were found)
continue;
}
if ((observed_adduct != EmpiricalFormula()) && (observed_adduct != it->getEmpiricalFormula()))
{ // If feature has no adduct annotation, method call defaults to empty EF(). If feature is annotated with an adduct, it must match.
continue;
}
// get potential hits as indices in masskey_table
double neutral_mass = it->getNeutralMass(observed_mz); // calculate mass of uncharged small molecule without adduct mass
// Our database is just a set of neutral masses (i.e., without adducts)
// However, given is either an absolute m/z tolerance or a ppm tolerance for the observed m/z
// We now need an upper bound on the absolute allowed mass difference, given the above tolerance in m/z.
// The selected candidates then have an mass tolerance which corresponds to the user's m/z tolerance.
// (the other approach is to pre-compute m/z values for all combinations of adducts, charges and DB entries -- too much)
double diff_mz;
// check if mass error window is given in ppm or Da
if (mass_error_unit_ == "ppm")
{
// convert ppm to absolute m/z tolerance for the current candidate
diff_mz = (observed_mz / 1e6) * mass_error_value_;
}
else
{
diff_mz = mass_error_value_;
}
// convert absolute m/z diff to absolute mass diff
// What about the adduct?
// absolute mass error: the adduct itself is irrelevant here since its a constant for both the theoretical and observed mass
// ppm tolerance: the diff_mz accounts for it already (heavy adducts lead to larger m/z tolerance)
// The adduct mass multiplier has to be taken into account when calculating the diff_mass (observed = 228 Da; Multiplier = 2M; theoretical mass = 114 Da)
// if not the allowed mass error will be the one from 228 Da instead of 114 Da (in this example twice as high).
double diff_mass = (diff_mz * std::abs(it->getCharge())) / it->getMolMultiplier(); // do not use observed charge (could be 0=unknown)
searchMass_(neutral_mass, diff_mass, hit_idx);
//std::cerr << ion_mode_internal_ << " adduct: " << adduct_name << ", " << adduct_mass << " Da, " << query_mass << " qm(against DB), " << charge << " q\n";
// store information from query hits in AccurateMassSearchResult objects
for (Size i = hit_idx.first; i < hit_idx.second; ++i)
{
// check if DB entry is compatible to the adduct
if (!it->isCompatible(EmpiricalFormula(mass_mappings_[i].formula)))
{
// only written if TOPP tool has --debug
OPENMS_LOG_DEBUG << "'" << mass_mappings_[i].formula << "' cannot have adduct '" << it->getName() << "'. Omitting.\n";
continue;
}
// compute ppm errors
double db_mass = mass_mappings_[i].mass;
double theoretical_mz = it->getMZ(db_mass);
double error_ppm_mz = Math::getPPM(observed_mz, theoretical_mz); // negative values are allowed!
AccurateMassSearchResult ams_result;
ams_result.setObservedMZ(observed_mz);
ams_result.setCalculatedMZ(theoretical_mz);
ams_result.setQueryMass(neutral_mass);
ams_result.setFoundMass(db_mass);
ams_result.setCharge(std::abs(it->getCharge())); // use theoretical adducts charge (is always valid); native charge might be zero
ams_result.setMZErrorPPM(error_ppm_mz);
ams_result.setMatchingIndex(i);
ams_result.setFoundAdduct(it->getName());
ams_result.setEmpiricalFormula(mass_mappings_[i].formula);
ams_result.setMatchingHMDBids(mass_mappings_[i].massIDs);
results.push_back(ams_result);
}
}
// if result is empty, add a 'not-found' indicator if empty hits should be stored
if (results.empty() && keep_unidentified_masses_)
{
AccurateMassSearchResult ams_result;
ams_result.setObservedMZ(observed_mz);
ams_result.setCalculatedMZ(std::numeric_limits<double>::quiet_NaN());
ams_result.setQueryMass(std::numeric_limits<double>::quiet_NaN());
ams_result.setFoundMass(std::numeric_limits<double>::quiet_NaN());
ams_result.setCharge(observed_charge);
ams_result.setMZErrorPPM(std::numeric_limits<double>::quiet_NaN());
ams_result.setMatchingIndex(-1); // this is checked to identify 'not-found'
ams_result.setFoundAdduct("null");
ams_result.setEmpiricalFormula("");
ams_result.setMatchingHMDBids(std::vector<String>(1, "null"));
results.push_back(ams_result);
}
return;
}
void AccurateMassSearchEngine::queryByFeature(const Feature& feature, const Size& feature_index, const String& ion_mode, std::vector<AccurateMassSearchResult>& results) const
{
if (!is_initialized_)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "AccurateMassSearchEngine::init() was not called!");
}
std::vector<AccurateMassSearchResult> results_part;
bool use_feature_adducts = param_.getValue("use_feature_adducts").toString() == "true";
if (use_feature_adducts && feature.metaValueExists(Constants::UserParam::DC_CHARGE_ADDUCTS))
{
queryByMZ(feature.getMZ(), feature.getCharge(), ion_mode, results_part, EmpiricalFormula(feature.getMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS)));
}
else
{
queryByMZ(feature.getMZ(), feature.getCharge(), ion_mode, results_part);
}
bool isotope_export = param_.getValue("mzTab:exportIsotopeIntensities").toString() == "true";
for (Size hit_idx = 0; hit_idx < results_part.size(); ++hit_idx)
{
results_part[hit_idx].setObservedRT(feature.getRT());
results_part[hit_idx].setSourceFeatureIndex(feature_index);
results_part[hit_idx].setObservedIntensity(feature.getIntensity());
std::vector<double> mti;
if (isotope_export)
{
if (feature.metaValueExists("masstrace_intensity"))
{
mti = feature.getMetaValue("masstrace_intensity");
}
results_part[hit_idx].setMasstraceIntensities(mti);
}
// append
results.push_back(results_part[hit_idx]);
}
}
void AccurateMassSearchEngine::queryByConsensusFeature(const ConsensusFeature& cfeat, const Size& cf_index, const Size& number_of_maps, const String& ion_mode, std::vector<AccurateMassSearchResult>& results) const
{
if (!is_initialized_)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "AccurateMassSearchEngine::init() was not called!");
}
results.clear();
// get hits
queryByMZ(cfeat.getMZ(), cfeat.getCharge(), ion_mode, results);
// collect meta data:
// intensities for all maps as given in handles; 0 if no handle is present for a map
const ConsensusFeature::HandleSetType& ind_feats(cfeat.getFeatures()); // sorted by MapIndices
ConsensusFeature::const_iterator f_it = ind_feats.begin();
std::vector<double> tmp_f_ints;
for (Size map_idx = 0; map_idx < number_of_maps; ++map_idx)
{
if (f_it != ind_feats.end() && map_idx == f_it->getMapIndex())
{
tmp_f_ints.push_back(f_it->getIntensity());
++f_it;
}
else
{
tmp_f_ints.push_back(0.0);
}
}
// augment all hits with meta data
for (Size hit_idx = 0; hit_idx < results.size(); ++hit_idx)
{
results[hit_idx].setObservedRT(cfeat.getRT());
results[hit_idx].setSourceFeatureIndex(cf_index);
// results_part[hit_idx].setObservedIntensity(cfeat.getIntensity());
results[hit_idx].setIndividualIntensities(tmp_f_ints);
}
}
void AccurateMassSearchEngine::init()
{
// Loads the default mapping file (chemical formulas -> HMDB IDs)
parseMappingFile_(db_mapping_file_);
// This loads additional properties like common name, smiles, and inchi key for each HMDB id
parseStructMappingFile_(db_struct_file_);
parseAdductsFile_(pos_adducts_fname_, pos_adducts_);
parseAdductsFile_(neg_adducts_fname_, neg_adducts_);
is_initialized_ = true;
}
void AccurateMassSearchEngine::run(FeatureMap& fmap, MzTabM& mztabm_out) const
{
if (!is_initialized_)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "AccurateMassSearchEngine::init() was not called!");
}
IdentificationData& id = fmap.getIdentificationData();
IdentificationData::InputFileRef file_ref;
IdentificationData::ScoreTypeRef mass_error_ppm_score_ref;
IdentificationData::ScoreTypeRef mass_error_Da_score_ref;
IdentificationData::ProcessingStepRef step_ref;
StringList ms_run_paths;
fmap.getPrimaryMSRunPath(ms_run_paths);
// set identifier for FeatureMap if missing (mandatory for OMS output)
if (fmap.getIdentifier().empty())
{
fmap.setIdentifier(File::basename(ms_run_paths[0]));
}
// check ion_mode
String ion_mode_internal(ion_mode_);
if (ion_mode_ == "auto")
{
ion_mode_internal = resolveAutoMode_(fmap);
}
// register input file
IdentificationData::InputFile file(ms_run_paths[0]);
file_ref = id.registerInputFile(file);
std::vector<IdentificationData::InputFileRef> file_refs;
file_refs.emplace_back(file_ref);
// add previous DataProcessingStep(s) from FeatureMap
auto data_processing = fmap.getDataProcessing();
for (const auto& it : data_processing)
{
// software
IdentificationData::ProcessingSoftware sw(it.getSoftware().getName(), it.getSoftware().getVersion());
// transfer previous metadata
sw.addMetaValues(it);
IdentificationDataInternal::ProcessingSoftwareRef sw_ref = id.registerProcessingSoftware(sw);
// ProcessingStep: software, input_file_refs, data_time, actions
IdentificationData::ProcessingStep step(sw_ref, file_refs, it.getCompletionTime(), it.getProcessingActions());
step_ref = id.registerProcessingStep(step);
id.setCurrentProcessingStep(step_ref);
}
// add information about current tool
// register a score type
IdentificationData::ScoreType mass_error_ppm_score("MassErrorPPMScore", false);
mass_error_ppm_score_ref = id.registerScoreType(mass_error_ppm_score);
IdentificationData::ScoreType mass_error_Da_score("MassErrorDaScore", false);
mass_error_Da_score_ref = id.registerScoreType(mass_error_Da_score);
// add the same score_refs to the ProcessingSoftware - to reference the Software with the
// ObservationMatch - the order is important - the most important score first.
std::vector<IdentificationDataInternal::ScoreTypeRef> assigned_scores{mass_error_ppm_score_ref, mass_error_Da_score_ref};
// register software (connected to score)
// CVTerm will be set in mztab-m based on the name
// if the name is not available in PSI-OBO "analysis software" will be used.
IdentificationData::ProcessingSoftware sw("AccurateMassSearch", VersionInfo::getVersion(), assigned_scores);
sw.setMetaValue("reliability", "2");
IdentificationData::ProcessingSoftwareRef sw_ref = id.registerProcessingSoftware(sw);
// all supported search settings
IdentificationData::DBSearchParam search_param;
search_param.database = database_name_;
search_param.database_version = database_version_;
search_param.setMetaValue("database_location", database_location_);
search_param.precursor_mass_tolerance = this->mass_error_value_;
search_param.precursor_tolerance_ppm = this->mass_error_unit_ == "ppm" ? true : false;
IdentificationData::SearchParamRef search_param_ref = id.registerDBSearchParam(search_param);
// file has been processed by software performing a specific processing action.
std::set<DataProcessing::ProcessingAction> actions;
actions.insert(DataProcessing::IDENTIFICATION);
IdentificationData::ProcessingStep step(sw_ref, file_refs, DateTime::now(), actions);
step_ref = id.registerProcessingStep(step, search_param_ref);
id.setCurrentProcessingStep(step_ref); // add the new step
// map for storing overall results
QueryResultsTable overall_results;
Size dummy_count(0);
for (Size i = 0; i < fmap.size(); ++i)
{
std::vector<AccurateMassSearchResult> query_results = extractQueryResults_(fmap[i], i, ion_mode_internal, dummy_count);
if (query_results.empty())
{
continue;
}
overall_results.push_back(query_results);
addMatchesToID_(id, query_results, file_ref, mass_error_ppm_score_ref, mass_error_Da_score_ref, step_ref, fmap[i]); // MztabM
}
// filter FeatureMap to only have entries with an PrimaryID attached
if (!keep_unidentified_masses_)
{
fmap.erase(std::remove_if(fmap.begin(), fmap.end(), [](const Feature& f){ return !f.hasPrimaryID(); }), fmap.end());
}
// add the identification data to the featureXML
// to allow featureXML export (without the use of legacy_ID)
// been transferred from the previous data stored within
// the feature.
IdentificationDataConverter::exportFeatureIDs(fmap, false);
if (fmap.empty())
{
OPENMS_LOG_INFO << "FeatureMap was empty! No hits found!" << std::endl;
}
else
{ // division by 0 if used on empty fmap
OPENMS_LOG_INFO << "\nFound " << (overall_results.size() - dummy_count) << " matched masses (with at least one hit each)\nfrom " << fmap.size() << " features\n --> " << (overall_results.size()-dummy_count)*100/fmap.size() << "% explained" << std::endl;
}
exportMzTabM_(fmap, mztabm_out);
return;
}
void AccurateMassSearchEngine::run(FeatureMap& fmap, MzTab& mztab_out) const
{
if (!is_initialized_)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "AccurateMassSearchEngine::init() was not called!");
}
StringList ms_run_paths;
fmap.getPrimaryMSRunPath(ms_run_paths);
// check ion_mode
String ion_mode_internal(ion_mode_);
if (ion_mode_ == "auto")
{
ion_mode_internal = resolveAutoMode_(fmap);
}
// corresponding file locations
std::vector<String> file_locations;
if (!ms_run_paths.empty()) // if the file location is not available it will be set to UNKNOWN by MzTab
{
file_locations.emplace_back(ms_run_paths[0]);
}
// map for storing overall results
QueryResultsTable overall_results;
Size dummy_count(0);
for (Size i = 0; i < fmap.size(); ++i)
{
std::vector<AccurateMassSearchResult> query_results = extractQueryResults_(fmap[i], i, ion_mode_internal, dummy_count);
if (query_results.empty())
{
continue;
}
overall_results.push_back(query_results);
annotate_(query_results, fmap[i]);
}
// filter FeatureMap to only have entries with an identification
if (!keep_unidentified_masses_)
{
fmap.erase(std::remove_if(fmap.begin(), fmap.end(), [](Feature f){ return f.getPeptideIdentifications().size() == 0; }), fmap.end());
}
// add dummy ProteinIdentification which is required to keep PeptideHits alive during store()
fmap.getProteinIdentifications().resize(fmap.getProteinIdentifications().size() + 1);
fmap.getProteinIdentifications().back().setIdentifier("AccurateMassSearchEngine");
fmap.getProteinIdentifications().back().setSearchEngine("AccurateMassSearch");
fmap.getProteinIdentifications().back().setDateTime(DateTime().now());
if (fmap.empty())
{
OPENMS_LOG_INFO << "FeatureMap was empty! No hits found!" << std::endl;
}
else
{ // division by 0 if used on empty fmap
OPENMS_LOG_INFO << "\nFound " << (overall_results.size() - dummy_count) << " matched masses (with at least one hit each)\nfrom " << fmap.size() << " features\n --> " << (overall_results.size()-dummy_count)*100/fmap.size() << "% explained" << std::endl;
}
exportMzTab_(overall_results, 1, mztab_out, file_locations);
return;
}
void AccurateMassSearchEngine::addMatchesToID_(
IdentificationData& id,
const std::vector<AccurateMassSearchResult>& amr,
const IdentificationData::InputFileRef& file_ref,
const IdentificationData::ScoreTypeRef& mass_error_ppm_score_ref,
const IdentificationData::ScoreTypeRef& mass_error_Da_score_ref,
const IdentificationData::ProcessingStepRef& step_ref,
BaseFeature& f) const
{
// register feature as search item associated with input file
IdentificationData::Observation obs(String(f.getUniqueId()), file_ref, f.getRT(), f.getMZ());
auto obs_ref = id.registerObservation(obs);
for (const AccurateMassSearchResult& r : amr)
{
for (Size i = 0; i < r.getMatchingHMDBids().size(); ++i)
{
if (!hmdb_properties_mapping_.count(r.getMatchingHMDBids()[i]))
{
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("DB entry '") + r.getMatchingHMDBids()[i] + "' not found in struct file!");
}
// get name from index 0 (2nd column in structMapping file)
HMDBPropsMapping::const_iterator entry = hmdb_properties_mapping_.find(r.getMatchingHMDBids()[i]);
if (entry == hmdb_properties_mapping_.end())
{
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("DB entry '") + r.getMatchingHMDBids()[i] + "' found in struct file but missing in mapping file!");
}
double mass_error_Da = r.getObservedMZ() - r.getCalculatedMZ();
double mass_error_ppm = r.getMZErrorPPM();
std::map<IdentificationDataInternal::ScoreTypeRef, double> scores{{mass_error_ppm_score_ref, mass_error_ppm},
{mass_error_Da_score_ref, mass_error_Da}
};
IdentificationDataInternal::AppliedProcessingStep applied_processing_step(step_ref, scores);
IdentificationDataInternal::AppliedProcessingSteps applied_processing_steps;
applied_processing_steps.emplace_back(applied_processing_step);
// register compound
const String& name = entry->second[0];
const String& smiles = entry->second[1];
const String& inchi_key = entry->second[2];
std::vector<String> names = {name}; // to fit legacy format - MetaValue
std::vector<String> identifiers = {r.getMatchingHMDBids()[i]}; // to fit legacy format - MetaValue
IdentificationData::IdentifiedCompound compound(r.getMatchingHMDBids()[i],
EmpiricalFormula(r.getFormulaString()),
name,
smiles,
inchi_key,
applied_processing_steps);
auto compound_ref = id.registerIdentifiedCompound(compound); // if already in DB -> NOP
// compound-feature match
IdentificationData::ObservationMatch match(compound_ref, obs_ref, r.getCharge());
match.addScore(mass_error_ppm_score_ref, mass_error_ppm, step_ref);
match.addScore(mass_error_Da_score_ref, mass_error_Da, step_ref);
match.setMetaValue("identifier", identifiers);
match.setMetaValue("description", names);
match.setMetaValue("modifications", r.getFoundAdduct());
match.setMetaValue("chemical_formula", r.getFormulaString());
match.setMetaValue("mz_error_ppm", mass_error_ppm);
match.setMetaValue("mz_error_Da", mass_error_Da);
// add adduct to the ObservationMatch
String adduct = r.getFoundAdduct(); // M+Na;1+
if (!adduct.empty() && adduct != "null")
{
AdductInfo ainfo = AdductInfo::parseAdductString(adduct);
auto adduct_ref = id.registerAdduct(ainfo);
match.adduct_opt = adduct_ref;
}
// register ObservationMatch
auto obs_match_ref = id.registerObservationMatch(match);
IdentificationData::IdentifiedMolecule molecule(compound_ref);
// add to Feature (set PrimaryID to add a reference to a specific molecule)
f.setPrimaryID(molecule);
f.addIDMatch(obs_match_ref);
}
}
}
void AccurateMassSearchEngine::annotate_(const std::vector<AccurateMassSearchResult>& amr, BaseFeature& f) const
{
f.getPeptideIdentifications().resize(f.getPeptideIdentifications().size() + 1);
f.getPeptideIdentifications().back().setIdentifier(search_engine_identifier);
for (const AccurateMassSearchResult& result : amr)
{
PeptideHit hit;
hit.setMetaValue("identifier", result.getMatchingHMDBids());
StringList names;
for (Size i = 0; i < result.getMatchingHMDBids().size(); ++i)
{ // mapping ok?
if (!hmdb_properties_mapping_.count(result.getMatchingHMDBids()[i]))
{
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("DB entry '") + result.getMatchingHMDBids()[i] + "' not found in struct file!");
}
// get name from index 0 (2nd column in structMapping file)
HMDBPropsMapping::const_iterator entry = hmdb_properties_mapping_.find(result.getMatchingHMDBids()[i]);
if (entry == hmdb_properties_mapping_.end())
{
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("DB entry '") + result.getMatchingHMDBids()[i] + "' found in struct file but missing in mapping file!");
}
names.push_back(entry->second[0]);
}
hit.setCharge(result.getCharge());
hit.setMetaValue("description", names);
hit.setMetaValue("modifications", result.getFoundAdduct());
hit.setMetaValue("chemical_formula", result.getFormulaString());
hit.setMetaValue("mz_error_ppm", result.getMZErrorPPM());
hit.setMetaValue("mz_error_Da", result.getObservedMZ() - result.getCalculatedMZ());
f.getPeptideIdentifications().back().insertHit(hit);
}
}
void AccurateMassSearchEngine::run(ConsensusMap& cmap, MzTab& mztab_out) const
{
if (!is_initialized_)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "AccurateMassSearchEngine::init() was not called!");
}
String ion_mode_internal(ion_mode_);
if (ion_mode_ == "auto")
{
ion_mode_internal = resolveAutoMode_(cmap);
}
ConsensusMap::ColumnHeaders fd_map = cmap.getColumnHeaders();
Size num_of_maps = fd_map.size();
// corresponding file locations
std::vector<String> file_locations;
for (const auto& fd : fd_map)
{
file_locations.emplace_back(fd.second.filename);
}
// map for storing overall results
QueryResultsTable overall_results;
for (Size i = 0; i < cmap.size(); ++i)
{
std::vector<AccurateMassSearchResult> query_results;
queryByConsensusFeature(cmap[i], i, num_of_maps, ion_mode_internal, query_results);
annotate_(query_results, cmap[i]);
overall_results.push_back(query_results);
}
// add dummy protein identification which is required to keep peptidehits alive during store()
cmap.getProteinIdentifications().resize(cmap.getProteinIdentifications().size() + 1);
cmap.getProteinIdentifications().back().setIdentifier(search_engine_identifier);
cmap.getProteinIdentifications().back().setSearchEngine(search_engine_identifier);
cmap.getProteinIdentifications().back().setDateTime(DateTime().now());
exportMzTab_(overall_results, num_of_maps, mztab_out, file_locations);
return;
}
// FeatureMap with IdentificationData attached!
void AccurateMassSearchEngine::exportMzTabM_(const FeatureMap& fmap, MzTabM& mztabm_out) const
{
mztabm_out = MzTabM::exportFeatureMapToMzTabM(fmap);
}
void AccurateMassSearchEngine::exportMzTab_(const QueryResultsTable& overall_results, const Size number_of_maps, MzTab& mztab_out, const std::vector<String>& file_locations) const
{
if (overall_results.empty())
{
return;
}
MzTabMetaData md = mztab_out.getMetaData();
// may contain quantification data so we choose quantification
md.mz_tab_type.fromCellString("Quantification");
// we don't report assay so we mark this as a summary file
md.mz_tab_mode.fromCellString("Summary");
md.description.fromCellString("Result summary from accurate mass search.");
// Set meta data
MzTabParameter search_engine_score;
search_engine_score.fromCellString("[,,MassErrorPPMScore,]");
md.smallmolecule_search_engine_score[1] = search_engine_score;
for (size_t i = 0; i != file_locations.size(); ++i)
{
MzTabMSRunMetaData run_md;
run_md.location.set(file_locations[i]);
md.ms_run[i + 1] = run_md; // +1 due to start at 1 in MzTab
}
// do not use overall_results.begin()->at(0).getIndividualIntensities().size(); since the first entry might be empty (no hit)
Size n_study_variables = number_of_maps;
for (Size i = 0; i != n_study_variables; ++i)
{
MzTabStudyVariableMetaData sv_md;
sv_md.description.fromCellString("Accurate mass search result file.");
md.study_variable[i + 1] = sv_md;
}
mztab_out.setMetaData(md);
// iterate the overall results table
MzTabSmallMoleculeSectionRows all_sm_rows;
Size id_group(1);
std::map<String, UInt> adduct_stats; // adduct --> # occurrences
std::map<String, std::set<Size> > adduct_stats_unique; // adduct --> # occurrences (count each feature only once)
bool isotope_export = param_.getValue("mzTab:exportIsotopeIntensities").toString() == "true";
for (QueryResultsTable::const_iterator tab_it = overall_results.begin(); tab_it != overall_results.end(); ++tab_it)
{
for (Size hit_idx = 0; hit_idx < tab_it->size(); ++hit_idx)
{
std::vector<String> matching_ids = (*tab_it)[hit_idx].getMatchingHMDBids();
// iterate over multiple IDs, generate a new row for each one
for (Size id_idx = 0; id_idx < matching_ids.size(); ++id_idx)
{
MzTabSmallMoleculeSectionRow mztab_row_record;
// set the identifier field
String hid_temp = matching_ids[id_idx];
bool db_hit = (hid_temp != "null");
if (db_hit)
{
MzTabString hmdb_id;
hmdb_id.set(hid_temp);
std::vector<MzTabString> hmdb_id_dummy;
hmdb_id_dummy.push_back(hmdb_id);
MzTabStringList string_dummy_list;
string_dummy_list.set(hmdb_id_dummy);
mztab_row_record.identifier = string_dummy_list;
// set the chemical formula field
MzTabString chem_form;
String form_temp = (*tab_it)[hit_idx].getFormulaString();
chem_form.set(form_temp);
mztab_row_record.chemical_formula = chem_form;
HMDBPropsMapping::const_iterator entry = hmdb_properties_mapping_.find(hid_temp);
// set the smiles field
String smi_temp = entry->second[1]; // extract SMILES from struct mapping file
MzTabString smi_string;
smi_string.set(smi_temp);
mztab_row_record.smiles = smi_string;
// set the inchi_key field
String inchi_temp = entry->second[2]; // extract INCHIKEY from struct mapping file
MzTabString inchi_key;
inchi_key.set(inchi_temp);
mztab_row_record.inchi_key = inchi_key;
// set description field (we use it for the common name of the compound)
MzTabString common_name;
common_name.set(entry->second[0]);
mztab_row_record.description = common_name;
// set the calc_mass_to_charge field (theoretical mass)
MzTabDouble mass_to_charge;
mass_to_charge.set((*tab_it)[hit_idx].getCalculatedMZ());
mztab_row_record.calc_mass_to_charge = mass_to_charge;
// set charge field
MzTabInteger mcharge;
mcharge.set((*tab_it)[hit_idx].getCharge());
mztab_row_record.charge = mcharge;
}
// experimental RT, m/z, database field and version, search engine and (null) score is also set if no db entry was matched
// set RT field
MzTabDouble rt_temp;
rt_temp.set((*tab_it)[hit_idx].getObservedRT());
std::vector<MzTabDouble> rt_temp3(1, rt_temp);
MzTabDoubleList observed_rt;
observed_rt.set(rt_temp3);
mztab_row_record.retention_time = observed_rt;
MzTabDouble exp_mass_to_charge;
exp_mass_to_charge.set((*tab_it)[hit_idx].getObservedMZ());
mztab_row_record.exp_mass_to_charge = exp_mass_to_charge;
// set database field
String dbname_temp = database_name_;
MzTabString dbname;
dbname.set(dbname_temp);
mztab_row_record.database = dbname;
// set database_version field
String dbver_temp = database_version_;
MzTabString dbversion;
dbversion.set(dbver_temp);
mztab_row_record.database_version = dbversion;
MzTabParameterList search_engines;
search_engines.fromCellString("[,,AccurateMassSearch,]");
mztab_row_record.search_engine = search_engines;
// same score for all files since it used the mass-to-charge of the ConsensusFeature
// for identification -> set as best_search_engine_score
mztab_row_record.best_search_engine_score[1] = MzTabDouble((*tab_it)[hit_idx].getMZErrorPPM());
// set search_engine_score per hit -> null
MzTabDouble null_score;
for (size_t i = 0; i != number_of_maps; ++i) // start from one since it is already filled.
{
mztab_row_record.search_engine_score_ms_run[1][i] = null_score;
}
// check if we deal with a feature or consensus feature
std::vector<double> indiv_ints(tab_it->at(hit_idx).getIndividualIntensities());
std::vector<MzTabDouble> int_temp3;
bool single_intensity = (indiv_ints.empty());
if (single_intensity)
{
double int_temp((*tab_it)[hit_idx].getObservedIntensity());
MzTabDouble int_temp2;
int_temp2.set(int_temp);
int_temp3.push_back(int_temp2);
}
else
{
for (Size ii = 0; ii < indiv_ints.size(); ++ii)
{
double int_temp(indiv_ints[ii]);
MzTabDouble int_temp2;
int_temp2.set(int_temp);
int_temp3.push_back(int_temp2);
}
}
for (Size i = 0; i != int_temp3.size(); ++i)
{
mztab_row_record.smallmolecule_abundance_study_variable[i + 1] = int_temp3[i];
}
// set smallmolecule_abundance_stdev_sub; not applicable for a single feature intensity, however must be filled. Otherwise, the mzTab export fails.
MzTabDouble stdev_temp;
stdev_temp.set(0.0);
std::vector<MzTabDouble> stdev_temp3;
if (indiv_ints.empty())
{
stdev_temp3.push_back(stdev_temp);
}
else
{
for (Size ii = 0; ii < indiv_ints.size(); ++ii)
{
stdev_temp3.push_back(stdev_temp);
}
}
for (Size i = 0; i != stdev_temp3.size(); ++i)
{
mztab_row_record.smallmolecule_abundance_stdev_study_variable[i + 1] = stdev_temp3[i];
}
// set smallmolecule_abundance_std_error_sub; not applicable for a single feature intensity, however must be filled. Otherwise, the mzTab export fails.
MzTabDouble stderr_temp2;
stderr_temp2.set(0.0);
std::vector<MzTabDouble> stderr_temp3;
if (indiv_ints.empty())
{
stderr_temp3.push_back(stderr_temp2);
}
else
{
for (Size ii = 0; ii < indiv_ints.size(); ++ii)
{
stderr_temp3.push_back(stderr_temp2);
}
}
for (Size i = 0; i != stderr_temp3.size(); ++i)
{
mztab_row_record.smallmolecule_abundance_std_error_study_variable[i + 1] = stderr_temp3[i];
}
// optional columns:
std::vector<MzTabOptionalColumnEntry> optionals;
// ppm error
MzTabString ppmerr;
if (db_hit)
{
ppmerr.set(String((*tab_it)[hit_idx].getMZErrorPPM()));
}
MzTabOptionalColumnEntry col0;
col0.first = "opt_global_mz_ppm_error";
col0.second = ppmerr;
optionals.push_back(col0);
// set found adduct ion
MzTabString addion;
if (db_hit)
{
String addion_temp((*tab_it)[hit_idx].getFoundAdduct());
addion.set(addion_temp);
++adduct_stats[addion_temp]; // just some stats
adduct_stats_unique[addion_temp].insert(id_group); // stats ...
}
MzTabOptionalColumnEntry col1;
col1.first = "opt_global_adduct_ion";
col1.second = addion;
optionals.push_back(col1);
// set isotope similarity score
MzTabString sim_score;
if (db_hit)
{
double sim_score_temp((*tab_it)[hit_idx].getIsotopesSimScore());
std::stringstream read_in;
read_in << sim_score_temp;
String sim_score_temp2(read_in.str());
sim_score.set(sim_score_temp2);
}
MzTabOptionalColumnEntry col2;
col2.first = "opt_global_isosim_score";
col2.second = sim_score;
optionals.push_back(col2);
// mass trace intensities (use NULL if not present)
if (isotope_export)
{
MzTabString trace_int; // implicitly NULL
std::vector<double> mt_int = (*tab_it)[hit_idx].getMasstraceIntensities();
std::vector<std::string> mt_int_strlist;
std::transform(std::begin(mt_int),
std::end(mt_int),
std::back_inserter(mt_int_strlist),
[](double d) { return std::to_string(d); }
);
String mt_int_str = ListUtils::concatenate(mt_int_strlist, ",");
MzTabOptionalColumnEntry col_mt;
col_mt.first = String("opt_global_MTint");
col_mt.second = MzTabString(mt_int_str);
optionals.push_back(col_mt);
}
// set neutral mass
MzTabString neutral_mass_string;
if (db_hit)
{
String neutral_mass((*tab_it)[hit_idx].getQueryMass());
neutral_mass_string.fromCellString(neutral_mass);
}
MzTabOptionalColumnEntry col3;
col3.first = "opt_global_neutral_mass";
col3.second = neutral_mass_string;
optionals.push_back(col3);
// set id group; rows with the same id group number originated from the same feature
String id_group_temp(id_group);
MzTabString id_group_str;
id_group_str.set(id_group_temp);
MzTabOptionalColumnEntry col4;
col4.first = "opt_global_id_group";
col4.second = id_group_str;
optionals.push_back(col4);
mztab_row_record.opt_ = optionals;
all_sm_rows.push_back(mztab_row_record);
}
}
++id_group;
}
mztab_out.setSmallMoleculeSectionRows(all_sm_rows);
// print some adduct stats:
OPENMS_LOG_INFO << "Hits by adduct: #peaks explained (# matching db entries)'\n";
for (std::map<String, UInt>::const_iterator it = adduct_stats.begin(); it != adduct_stats.end(); ++it)
{
OPENMS_LOG_INFO << " '" << it->first << "' : " << adduct_stats_unique[it->first].size() << " (" << it->second << ")\n";
}
OPENMS_LOG_INFO << std::endl;
}
/// protected methods
void AccurateMassSearchEngine::updateMembers_()
{
mass_error_value_ = (double)param_.getValue("mass_error_value");
mass_error_unit_ = param_.getValue("mass_error_unit").toString();
ion_mode_ = param_.getValue("ionization_mode").toString();
iso_similarity_ = param_.getValue("isotopic_similarity").toBool();
// use defaults if empty for all .tsv files
db_mapping_file_ = ListUtils::toStringList<std::string>(param_.getValue("db:mapping"));
if (db_mapping_file_.empty()) db_mapping_file_ = ListUtils::toStringList<std::string>(defaults_.getValue("db:mapping"));
db_struct_file_ = ListUtils::toStringList<std::string>(param_.getValue("db:struct"));
if (db_struct_file_.empty()) db_struct_file_ = ListUtils::toStringList<std::string>(defaults_.getValue("db:struct"));
pos_adducts_fname_ = param_.getValue("positive_adducts").toString();
neg_adducts_fname_ = param_.getValue("negative_adducts").toString();
keep_unidentified_masses_ = param_.getValue("keep_unidentified_masses").toBool();
// database names might have changed, so parse files again before next query
is_initialized_ = false;
legacyID_ = param_.getValue("id_format") == "legacy";
}
/// private methods
void AccurateMassSearchEngine::parseMappingFile_(const StringList& db_mapping_file)
{
mass_mappings_.clear();
database_location_ = ListUtils::concatenate(db_mapping_file, '|');
// load map_fname mapping file
for (StringList::const_iterator it_f = db_mapping_file.begin(); it_f != db_mapping_file.end(); ++it_f)
{
String filename = *it_f;
// load map_fname mapping file
if (!File::readable(filename))
{
// throws Exception::FileNotFound if not found
filename = File::find(filename);
}
String line;
Size line_count(0);
std::stringstream str_buf;
std::istream_iterator<String> eol;
// OPENMS_LOG_DEBUG << "parsing " << fname << " file..." << std::endl;
std::ifstream ifs(filename.c_str());
while (getline(ifs, line))
{
line.trim();
// skip empty lines
if (line.empty()) continue;
++line_count;
if (line_count == 1)
{
std::vector<String> fields;
line.trim().split('\t', fields);
if (fields[0] == "database_name")
{
database_name_ = fields[1];
continue;
}
else
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("Mapping file (") + filename + "') must contain \"database_name\t{NAME}\" as first line.!", line);
}
}
else if (line_count == 2)
{
std::vector<String> fields;
line.trim().split('\t', fields);
if (fields[0] == "database_version")
{
database_version_ = fields[1];
continue;
}
else
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("Mapping file (") + filename + "') must contain \"database_version\t{VERSION}\" as second line.!", line);
}
}
str_buf.clear();
str_buf << line;
std::istream_iterator<String> istr_it(str_buf);
Size word_count(0);
MappingEntry_ entry;
while (istr_it != eol)
{
// OPENMS_LOG_DEBUG << *istr_it << " ";
if (word_count == 0)
{
entry.mass = istr_it->toDouble();
}
else if (word_count == 1)
{
entry.formula = *istr_it;
if (entry.mass == 0)
{ // recompute mass from formula
entry.mass = EmpiricalFormula(entry.formula).getMonoWeight();
//std::cerr << "mass of " << entry.formula << " is " << entry.mass << "\n";
}
}
else // one or more IDs can follow
{
entry.massIDs.push_back(*istr_it);
}
++word_count;
++istr_it;
}
// OPENMS_LOG_DEBUG << std::endl;
if (entry.massIDs.empty())
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("File '") + filename + "' in line " + line_count + " as '" + line + "' cannot be parsed. Found " + word_count + " entries, expected at least three!");
}
mass_mappings_.push_back(entry);
}
}
std::sort(mass_mappings_.begin(), mass_mappings_.end(), CompareEntryAndMass_());
OPENMS_LOG_INFO << "Read " << mass_mappings_.size() << " entries from mapping file!" << std::endl;
return;
}
void AccurateMassSearchEngine::parseStructMappingFile_(const StringList& db_struct_file)
{
hmdb_properties_mapping_.clear();
for (StringList::const_iterator it_f = db_struct_file.begin(); it_f != db_struct_file.end(); ++it_f)
{
String filename = *it_f;
// load map_fname mapping file
if (!File::readable(filename))
{
// throws Exception::FileNotFound if not found
filename = File::find(filename);
}
std::ifstream ifs(filename.c_str());
String line;
// OPENMS_LOG_DEBUG << "parsing " << fname << " file..." << std::endl;
std::vector<String> parts;
while (getline(ifs, line))
{
line.trim();
line.split("\t", parts);
if (parts.size() == 4)
{
String hmdb_id_key(parts[0]);
if (hmdb_properties_mapping_.count(hmdb_id_key))
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("File '") + filename + "' in line '" + line + "' cannot be parsed. The ID entry was already used (see above)!");
}
std::copy(parts.begin() + 1, parts.end(), std::back_inserter(hmdb_properties_mapping_[hmdb_id_key]));
}
else
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("File '") + filename + "' in line '" + line + "' cannot be parsed. Expected four entries separated by tab. Found " + parts.size() + " entries!");
}
}
}
// add a null entry, so mzTab annotation does not discard 'not-found' features
std::vector<String> dummy_data(3, "null");
hmdb_properties_mapping_["null"] = dummy_data;
return;
}
void AccurateMassSearchEngine::parseAdductsFile_(const String& filename, std::vector<AdductInfo>& result)
{
result.clear();
String fname = filename;
// search for mapping file
if (!File::readable(fname))
{ // throws Exception::FileNotFound if not found
fname = File::find(filename);
}
TextFile tf(fname, true, -1, true); // trim & skip_empty
for (TextFile::ConstIterator it = tf.begin(); it != tf.end(); ++it)
{
result.push_back(AdductInfo::parseAdductString(*it));
}
OPENMS_LOG_INFO << "Read " << result.size() << " entries from adduct file '" << fname << "'." << std::endl;
return;
}
void AccurateMassSearchEngine::searchMass_(double neutral_query_mass, double diff_mass, std::pair<Size, Size>& hit_indices) const
{
//OPENMS_LOG_INFO << "searchMass: neutral_query_mass=" << neutral_query_mass << " diff_mz=" << diff_mz << " ppm allowed:" << mass_error_value_ << std::endl;
// binary search for formulas which are within diff_mz distance
if (mass_mappings_.empty())
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "There are no entries found in mass-to-ids mapping file! Aborting... ", "0");
}
std::vector<MappingEntry_>::const_iterator lower_it = std::lower_bound(mass_mappings_.begin(), mass_mappings_.end(), neutral_query_mass - diff_mass, CompareEntryAndMass_()); // first element equal or larger
std::vector<MappingEntry_>::const_iterator upper_it = std::upper_bound(mass_mappings_.begin(), mass_mappings_.end(), neutral_query_mass + diff_mass, CompareEntryAndMass_()); // first element greater than
Size start_idx = std::distance(mass_mappings_.begin(), lower_it);
Size end_idx = std::distance(mass_mappings_.begin(), upper_it);
hit_indices.first = start_idx;
hit_indices.second = end_idx;
return;
}
double AccurateMassSearchEngine::computeCosineSim_( const std::vector<double>& x, const std::vector<double>& y ) const
{
if (x.size() != y.size())
{
return 0.0;
}
double mixed_sum(0.0);
double x_squared_sum(0.0);
double y_squared_sum(0.0);
for (Size i = 0; i < x.size(); ++i)
{
mixed_sum += x[i] * y[i];
x_squared_sum += x[i] * x[i];
y_squared_sum += y[i] * y[i];
}
double denom(std::sqrt(x_squared_sum) * std::sqrt(y_squared_sum));
return (denom > 0.0) ? mixed_sum / denom : 0.0;
}
double AccurateMassSearchEngine::computeIsotopePatternSimilarity_(const Feature& feat, const EmpiricalFormula& form) const
{
Size num_traces = (Size)feat.getMetaValue(Constants::UserParam::NUM_OF_MASSTRACES);
const Size MAX_THEORET_ISOS(5);
Size common_size = std::min(num_traces, MAX_THEORET_ISOS);
// compute theoretical isotope distribution
IsotopeDistribution iso_dist(form.getIsotopeDistribution(CoarseIsotopePatternGenerator((UInt)common_size)));
std::vector<double> theoretical_iso_dist;
std::transform(
iso_dist.begin(),
iso_dist.end(),
back_inserter(theoretical_iso_dist),
[](const IsotopeDistribution::MassAbundance& p)
{
return p.getIntensity();
});
// same for observed isotope distribution
std::vector<double> observed_iso_dist;
if (num_traces > 0)
{
observed_iso_dist = feat.getMetaValue("masstrace_intensity");
}
return computeCosineSim_(theoretical_iso_dist, observed_iso_dist);
}
std::vector<AccurateMassSearchResult> AccurateMassSearchEngine::extractQueryResults_(const Feature& feature, const Size& feature_index, const String& ion_mode_internal, Size& dummy_count) const
{
std::vector<AccurateMassSearchResult> query_results;
queryByFeature(feature, feature_index, ion_mode_internal, query_results);
if (query_results.empty())
{
return query_results;
}
bool is_dummy = (query_results[0].getMatchingIndex() == (Size) - 1);
if (is_dummy)
++dummy_count;
if (iso_similarity_ && !is_dummy)
{
if (!feature.metaValueExists(Constants::UserParam::NUM_OF_MASSTRACES))
{
OPENMS_LOG_WARN
<< "Feature does not contain meta value '" << Constants::UserParam::NUM_OF_MASSTRACES << "'. Cannot compute isotope similarity.";
}
else if ((Size) feature.getMetaValue(Constants::UserParam::NUM_OF_MASSTRACES) > 1)
{ // compute isotope pattern similarities (do not take the best-scoring one, since it might have really bad ppm or other properties --
// it is impossible to decide here which one is best
for (Size hit_idx = 0; hit_idx < query_results.size(); ++hit_idx)
{
String emp_formula(query_results[hit_idx].getFormulaString());
double iso_sim(computeIsotopePatternSimilarity_(feature, EmpiricalFormula(emp_formula)));
query_results[hit_idx].setIsotopesSimScore(iso_sim);
}
}
}
return query_results;
}
} // closing namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/IDScoreSwitcherAlgorithm.cpp | .cpp | 2,150 | 58 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Julianus Pfeuffer $
// $Authors: Julianus Pfeuffer $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/IDScoreSwitcherAlgorithm.h>
#include <OpenMS/METADATA/PeptideIdentification.h>
#include <unordered_map>
using namespace std;
namespace OpenMS
{
IDScoreSwitcherAlgorithm::IDScoreSwitcherAlgorithm() :
IDScoreSwitcherAlgorithm::DefaultParamHandler("IDScoreSwitcherAlgorithm")
{
defaults_.setValue("new_score", "", "Name of the meta value to use as the new score");
defaults_.setValue("new_score_orientation", "", "Orientation of the new score (are higher or lower values better?)");
defaults_.setValidStrings("new_score_orientation", {"lower_better","higher_better"});
defaults_.setValue("new_score_type", "", "Name to use as the type of the new score (default: same as 'new_score')");
defaults_.setValue("old_score", "", "Name to use for the meta value storing the old score (default: old score type)");
defaults_.setValue("proteins", "false", "Apply to protein scores instead of PSM scores");
defaults_.setValidStrings("proteins", {"true","false"});
defaultsToParam_();
updateMembers_();
}
void IDScoreSwitcherAlgorithm::updateMembers_()
{
new_score_ = param_.getValue("new_score").toString();
new_score_type_ = param_.getValue("new_score_type").toString();
old_score_ = param_.getValue("old_score").toString();
higher_better_ = (param_.getValue("new_score_orientation").toString() ==
"higher_better");
if (new_score_type_.empty()) new_score_type_ = new_score_;
}
std::vector<String> IDScoreSwitcherAlgorithm::getScoreNames()
{
std::vector<String> names;
for (auto i : type_to_str_)
{
const std::set<String>& n = i.second;
for (auto j : n)
{
names.push_back(j);
}
}
return names;
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/MorpheusScore.cpp | .cpp | 7,081 | 195 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Timo Sachsenberg $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/MorpheusScore.h>
#include <OpenMS/KERNEL/MSSpectrum.h>
#include <OpenMS/MATH/MathFunctions.h>
#include <cmath>
namespace OpenMS
{
MorpheusScore::Result MorpheusScore::compute(double fragment_mass_tolerance,
bool fragment_mass_tolerance_unit_ppm,
const PeakSpectrum& exp_spectrum,
const PeakSpectrum& theo_spectrum)
{
const Size n_t(theo_spectrum.size());
const Size n_e(exp_spectrum.size());
MorpheusScore::Result psm = {};
if (n_t == 0 || n_e == 0) { return psm; }
Size t(0), e(0), matches(0);
double total_intensity(0);
// count matching peaks and make sure that every theoretical peak is matched at most once
while (t < n_t && e < n_e)
{
const double theo_mz = theo_spectrum[t].getMZ();
const double exp_mz = exp_spectrum[e].getMZ();
const double d = exp_mz - theo_mz;
const double max_dist_dalton = fragment_mass_tolerance_unit_ppm ? theo_mz * fragment_mass_tolerance * 1e-6 : fragment_mass_tolerance;
if (fabs(d) <= max_dist_dalton) // match in tolerance window?
{
++matches;
++t; // count theoretical peak only once
}
else if (d < 0) // exp. peak is left of theo. peak (outside of tolerance window)
{
total_intensity += exp_spectrum[e].getIntensity();
++e;
}
else if (d > 0) // theo. peak is left of exp. peak (outside of tolerance window)
{
++t;
}
}
for (; e < n_e; ++e) { total_intensity += exp_spectrum[e].getIntensity(); }
// similar to above but we now make sure that the intensity of every matched experimental peak is summed up to form match_intensity
t = 0;
e = 0;
double match_intensity(0.0);
double sum_error(0.0);
double sum_error_ppm(0.0);
while (t < n_t && e < n_e)
{
const double theo_mz = theo_spectrum[t].getMZ();
const double exp_mz = exp_spectrum[e].getMZ();
const double d = exp_mz - theo_mz;
const double max_dist_dalton = fragment_mass_tolerance_unit_ppm ? theo_mz * fragment_mass_tolerance * 1e-6 : fragment_mass_tolerance;
if (fabs(d) <= max_dist_dalton) // match in tolerance window?
{
match_intensity += exp_spectrum[e].getIntensity();
sum_error += fabs(d);
sum_error_ppm += Math::getPPMAbs(exp_mz, theo_mz);
++e; // sum up experimental peak intensity only once
}
else if (d < 0) // exp. peak is left of theo. peak (outside of tolerance window)
{
++e;
}
else if (d > 0) // theo. peak is left of exp. peak (outside of tolerance window)
{
++t;
}
}
const double intensity_fraction = match_intensity / total_intensity;
psm.score = static_cast<double>(matches) + intensity_fraction;
psm.n_peaks = theo_spectrum.size();
psm.matches = matches;
psm.MIC = match_intensity;
psm.TIC = total_intensity;
psm.err = matches > 0 ? sum_error / static_cast<double>(matches) : 1e10;
psm.err_ppm = matches > 0 ? sum_error_ppm / static_cast<double>(matches) : 1e10;
return psm;
}
MorpheusScore::Result MorpheusScore::compute(double fragment_mass_tolerance,
bool fragment_mass_tolerance_unit_ppm,
const PeakSpectrum& exp_spectrum,
const DataArrays::IntegerDataArray& exp_charges,
const PeakSpectrum& theo_spectrum,
const DataArrays::IntegerDataArray& theo_charges)
{
const Size n_t(theo_spectrum.size());
const Size n_e(exp_spectrum.size());
MorpheusScore::Result psm = {};
if (n_t == 0 || n_e == 0) { return psm; }
Size t(0), e(0), matches(0);
double total_intensity(0);
// count matching peaks and make sure that every theoretical peak is matched at most once
while (t < n_t && e < n_e)
{
const double theo_mz = theo_spectrum[t].getMZ();
const double exp_mz = exp_spectrum[e].getMZ();
const double d = exp_mz - theo_mz;
const double max_dist_dalton = fragment_mass_tolerance_unit_ppm ? theo_mz * fragment_mass_tolerance * 1e-6 : fragment_mass_tolerance;
if (fabs(d) <= max_dist_dalton) // match in tolerance window?
{
const int exp_charge(exp_charges[e]);
const int theo_charge(theo_charges[t]);
if (exp_charge == theo_charge)
{
++matches;
}
++t; // count theoretical peak only once
}
else if (d < 0) // exp. peak is left of theo. peak (outside of tolerance window)
{
total_intensity += exp_spectrum[e].getIntensity();
++e;
}
else if (d > 0) // theo. peak is left of exp. peak (outside of tolerance window)
{
++t;
}
}
for (; e < n_e; ++e) { total_intensity += exp_spectrum[e].getIntensity(); }
// similar to above but we now make sure that the intensity of every matched experimental peak is summed up to form match_intensity
t = 0;
e = 0;
double match_intensity(0.0);
double sum_error(0.0);
double sum_error_ppm(0.0);
while (t < n_t && e < n_e)
{
const double theo_mz = theo_spectrum[t].getMZ();
const double exp_mz = exp_spectrum[e].getMZ();
const double d = exp_mz - theo_mz;
const double max_dist_dalton = fragment_mass_tolerance_unit_ppm ? theo_mz * fragment_mass_tolerance * 1e-6 : fragment_mass_tolerance;
if (fabs(d) <= max_dist_dalton) // match in tolerance window?
{
const int exp_charge(exp_charges[e]);
const int theo_charge(theo_charges[t]);
if (exp_charge == theo_charge)
{
match_intensity += exp_spectrum[e].getIntensity();
sum_error += fabs(d);
sum_error_ppm += Math::getPPMAbs(exp_mz, theo_mz);
}
++e; // sum up experimental peak intensity only once
}
else if (d < 0) // exp. peak is left of theo. peak (outside of tolerance window)
{
++e;
}
else if (d > 0) // theo. peak is left of exp. peak (outside of tolerance window)
{
++t;
}
}
const double intensity_fraction = match_intensity / total_intensity;
psm.score = static_cast<double>(matches) + intensity_fraction;
psm.n_peaks = theo_spectrum.size();
psm.matches = matches;
psm.MIC = match_intensity;
psm.TIC = total_intensity;
psm.err = matches > 0 ? sum_error / static_cast<double>(matches) : 1e10;
psm.err_ppm = matches > 0 ? sum_error_ppm / static_cast<double>(matches) : 1e10;
return psm;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/BayesianProteinInferenceAlgorithm.cpp | .cpp | 49,092 | 1,094 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Julianus Pfeuffer $
// $Authors: Julianus Pfeuffer $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/BayesianProteinInferenceAlgorithm.h>
#include <OpenMS/ANALYSIS/ID/MessagePasserFactory.h>
#include <OpenMS/ANALYSIS/ID/FalseDiscoveryRate.h>
#include <OpenMS/ANALYSIS/ID/IDScoreGetterSetter.h>
#include <OpenMS/ANALYSIS/ID/IDBoostGraph.h>
#include <OpenMS/ANALYSIS/ID/IDScoreSwitcherAlgorithm.h>
#include <OpenMS/METADATA/PeptideIdentification.h>
#include <OpenMS/METADATA/ProteinIdentification.h>
#include <OpenMS/METADATA/ExperimentalDesign.h>
#include <OpenMS/DATASTRUCTURES/FASTAContainer.h>
#include <OpenMS/DATASTRUCTURES/StringView.h>
#include <OpenMS/PROCESSING/ID/IDFilter.h>
#include <OpenMS/CONCEPT/VersionInfo.h>
#include <set>
using namespace std;
using namespace OpenMS::Internal;
namespace OpenMS
{
/// A functor that specifies what to do on a connected component (IDBoostGraph::FilteredGraph)
class BayesianProteinInferenceAlgorithm::GraphInferenceFunctor
//: public std::function<unsigned long(IDBoostGraph::Graph&)>
{
public:
//TODO think about restructuring the passed params (we do not need every param from the BPI class here.
const Param& param_;
unsigned int debug_lvl_;
unsigned long cnt_;
explicit GraphInferenceFunctor(const Param& param, unsigned int debug_lvl):
param_(param),
debug_lvl_(debug_lvl),
cnt_(0)
{}
unsigned long operator() (IDBoostGraph::Graph& fg, unsigned int idx) {
//TODO do quick brute-force calculation if the cc is really small?
// this skips CCs with just peps or prots. We only add edges between different types.
// and if there were no edges, it would not be a CC.
if (boost::num_vertices(fg) >= 2)
{
unsigned long nrEdges = boost::num_edges(fg);
// avoid critical sections if not needed
if (debug_lvl_ > 1)
{
// we do not need information about file and line so use LOG_INFO instead
OPENMS_LOG_INFO << "Running cc " << String(idx) << "...\n";
OPENMS_LOG_INFO << "CC " << String(idx) << " has " << String(nrEdges) << " edges.\n";
}
bool graph_mp_ownership_acquired = false;
bool update_PSM_probabilities = param_.getValue("update_PSM_probabilities").toBool();
bool annotate_group_posterior = param_.getValue("annotate_group_probabilities").toBool();
bool user_defined_priors = param_.getValue("user_defined_priors").toBool();
bool regularize = param_.getValue("model_parameters:regularize").toBool();
double pnorm = param_.getValue("loopy_belief_propagation:p_norm_inference");
if (pnorm <= 0)
{
pnorm = std::numeric_limits<double>::infinity();
}
MessagePasserFactory<IDBoostGraph::vertex_t> mpf (param_.getValue("model_parameters:pep_emission"),
param_.getValue("model_parameters:pep_spurious_emission"),
param_.getValue("model_parameters:prot_prior"),
pnorm,
param_.getValue("model_parameters:pep_prior")); // the p used for marginalization: 1 = sum product, inf = max product
evergreen::BetheInferenceGraphBuilder<IDBoostGraph::vertex_t> bigb;
IDBoostGraph::Graph::vertex_iterator ui, ui_end;
boost::tie(ui,ui_end) = boost::vertices(fg);
// Store the IDs of the nodes for which you want the posteriors in the end
vector<vector<IDBoostGraph::vertex_t>> posteriorVars;
// direct neighbors are proteins on the "left" side and peptides on the "right" side
// TODO Can be sped up using directed graph. Needs some restructuring in IDBoostGraph class first tho.
vector<IDBoostGraph::vertex_t> in{};
//std::vector<IDBoostGraph::vertex_t> out{};
//TODO the try section could in theory be slimmed down a little bit. Start at first use of insertDependency maybe.
// check performance impact.
try
{
for (; ui != ui_end; ++ui)
{
IDBoostGraph::Graph::adjacency_iterator nbIt, nbIt_end;
boost::tie(nbIt, nbIt_end) = boost::adjacent_vertices(*ui, fg);
in.clear();
//out.clear(); // we don't need out edges currently
for (; nbIt != nbIt_end; ++nbIt)
{
if (fg[*nbIt].which() < fg[*ui].which())
{
in.push_back(*nbIt);
}
/*else
{
out.push_back(*nbIt);
}*/
}
//TODO introduce an enum for the types to make it more clear.
//Or use the static_visitor pattern: You have to pass the vertex with its neighbors as a second arg though.
if (fg[*ui].which() == 6) // pep hit = psm
{
if (regularize)
{
bigb.insert_dependency(mpf.createRegularizingSumEvidenceFactor(boost::get<PeptideHit *>(fg[*ui])
->getPeptideEvidences().size(), in[0], *ui));
}
else
{
bigb.insert_dependency(mpf.createSumEvidenceFactor(boost::get<PeptideHit *>(fg[*ui])
->getPeptideEvidences().size(), in[0], *ui));
}
bigb.insert_dependency(mpf.createPeptideEvidenceFactor(*ui,
boost::get<PeptideHit *>(fg[*ui])->getScore()));
if (update_PSM_probabilities)
{
posteriorVars.push_back({*ui});
}
}
else if (fg[*ui].which() == 2) // pep group
{
bigb.insert_dependency(mpf.createPeptideProbabilisticAdderFactor(in, *ui));
}
else if (fg[*ui].which() == 1) // prot group
{
bigb.insert_dependency(mpf.createPeptideProbabilisticAdderFactor(in, *ui));
if (annotate_group_posterior)
{
posteriorVars.push_back({*ui});
}
}
else if (fg[*ui].which() == 0) // prot
{
//TODO modify createProteinFactor to start with a modified prior based on the number of missing
// peptides (later tweak to include conditional prob. for that peptide
if (user_defined_priors)
{
bigb.insert_dependency(mpf.createProteinFactor(*ui,
(double) boost::get<ProteinHit *>(fg[*ui])
->getMetaValue("Prior")));
}
else
{
bigb.insert_dependency(mpf.createProteinFactor(*ui));
}
posteriorVars.push_back({*ui});
}
}
// create factor graph for Bayesian network
evergreen::InferenceGraph <IDBoostGraph::vertex_t> ig = bigb.to_graph();
graph_mp_ownership_acquired = true;
unsigned long maxMessages = param_
.getValue("loopy_belief_propagation:max_nr_iterations");
double initDampeningLambda = param_
.getValue("loopy_belief_propagation:dampening_lambda");
double initConvergenceThreshold = param_.getValue(
"loopy_belief_propagation:convergence_threshold");
std::string scheduler_type = param_.getValue(
"loopy_belief_propagation:scheduling_type");
std::unique_ptr<evergreen::Scheduler<IDBoostGraph::vertex_t>> scheduler;
if (scheduler_type == "priority")
{
scheduler = std::unique_ptr<evergreen::Scheduler<IDBoostGraph::vertex_t>>(
new evergreen::PriorityScheduler<IDBoostGraph::vertex_t>(
initDampeningLambda,
initConvergenceThreshold,
maxMessages));
}
else if (scheduler_type == "subtree")
{
scheduler = std::unique_ptr<evergreen::Scheduler<IDBoostGraph::vertex_t>>(
new evergreen::RandomSubtreeScheduler<IDBoostGraph::vertex_t>(
initDampeningLambda,
initConvergenceThreshold,
maxMessages));
}
else if (scheduler_type == "fifo")
{
scheduler = std::unique_ptr<evergreen::Scheduler<IDBoostGraph::vertex_t>>(
new evergreen::FIFOScheduler<IDBoostGraph::vertex_t>(
initDampeningLambda,
initConvergenceThreshold,
maxMessages));
}
else
{
scheduler = std::unique_ptr<evergreen::Scheduler<IDBoostGraph::vertex_t>>(
new evergreen::PriorityScheduler<IDBoostGraph::vertex_t>(
initDampeningLambda,
initConvergenceThreshold,
maxMessages));
}
scheduler->add_ab_initio_edges(ig);
evergreen::BeliefPropagationInferenceEngine<IDBoostGraph::vertex_t> bpie(*scheduler, ig);
vector<evergreen::LabeledPMF<IDBoostGraph::vertex_t>> posteriorFactors;
unsigned long nrEdgesSq = nrEdges*nrEdges;
if (maxMessages < nrEdgesSq * 3ul)
{
posteriorFactors = bpie.estimate_posteriors_in_steps(posteriorVars,
{
std::make_tuple(maxMessages, initDampeningLambda, initConvergenceThreshold)});
}
else
{
posteriorFactors = bpie.estimate_posteriors_in_steps(posteriorVars,
{
std::make_tuple(std::max<unsigned long>(10000ul, nrEdgesSq*2ul), initDampeningLambda, initConvergenceThreshold),
std::make_tuple(nrEdgesSq, std::min(0.49,initDampeningLambda*10), std::min(0.01,initConvergenceThreshold*10)),
std::make_tuple(nrEdgesSq/2ul, std::min(0.49,initDampeningLambda*100), std::min(0.01,initConvergenceThreshold*100))
});
}
// TODO move the writing of statistics from IDBoostGraph here and write more stats
// like nr messages and failure/success
unsigned long nrMessagesNeeded = bpie.getNrMessagesPassed();
for (auto const &posteriorFactor : posteriorFactors)
{
double posterior = 1.0;
IDBoostGraph::SetPosteriorVisitor pv;
IDBoostGraph::vertex_t nodeId = posteriorFactor.ordered_variables()[0];
const evergreen::PMF &pmf = posteriorFactor.pmf();
// If Index 0 is in the range of this result PMFFactor its probability is non-zero
// and the prob of presence is 1-P(p=0). Important in multi-value factors like protein groups.
if (0 >= pmf.first_support()[0] && 0 <= pmf.last_support()[0])
{
posterior = 1. - pmf.table()[0ul];
}
auto bound_visitor = std::bind(pv, std::placeholders::_1, posterior);
boost::apply_visitor(bound_visitor, fg[nodeId]);
}
// avoid critical sections if not needed
if (debug_lvl_ > 1)
{
// we do not need information about file and line so use LOG_INFO instead
OPENMS_LOG_INFO << "Finished cc " << String(idx) << "after " << String(nrMessagesNeeded) << " messages\n";
}
//TODO we could write out/save the posteriors here,
// so we can easily read them later for the best params of the grid search
return nrMessagesNeeded;
}
catch (const std::runtime_error& /*e*/)
{
//TODO print failing component and implement the following options
// 1) Leave posteriors (e.g. if Percolator was ran before. Make sure they are PPs not PEPs)
// 2) set posteriors to priors (implicitly done right now)
// 3) try another type of inference on that connected component. Different scheduler,
// different extreme probabilities or maybe best: trivial aggregation-based inference.
// 4) Cancelling this and all other threads/ the loop and call this set of parameters invalid
//For now we just warn and continue with the rest of the iterations. Might still be a valid run.
// Graph builder needs to build otherwise it leaks memory.
if (!graph_mp_ownership_acquired) bigb.to_graph();
if (debug_lvl_ > 2)
{
std::ofstream ofs;
ofs.open (std::string("failed_cc_a") +
param_.getValue("model_parameters:pep_emission").toString() + "_b" +
param_.getValue("model_parameters:pep_spurious_emission").toString() + "_g" +
param_.getValue("model_parameters:prot_prior").toString() + "_c" +
param_.getValue("model_parameters:pep_prior").toString() + "_p" + String(pnorm) + "_"
+ String(idx) + ".dot"
, std::ofstream::out);
IDBoostGraph::printGraph(ofs, fg);
//TODO print graph with peptide probabilities to see which evidences cause problems with which params
}
OPENMS_LOG_WARN << "Warning: Loopy belief propagation encountered a problem in a connected component. Skipping"
" inference there.\n";
return 0;
}
}
else
{
OPENMS_LOG_WARN << "Skipped cc with only one type (proteins or peptides)\n";
return 0;
}
}
};
/// A functor that specifies what to do on a connected component with additional layers (i.e. implicitly extended
/// graph. @TODO static type checking
class BayesianProteinInferenceAlgorithm::ExtendedGraphInferenceFunctor
//: public std::function<unsigned long(IDBoostGraph::Graph&)>
{
public:
const Param& param_;
explicit ExtendedGraphInferenceFunctor(const Param& param):
param_(param)
{}
unsigned long operator() (IDBoostGraph::Graph& fg, unsigned int /*idx*/) {
//TODO do quick brute-force calculation if the cc is really small
//TODO make use of idx
double pnorm = param_.getValue("loopy_belief_propagation:p_norm_inference");
if (pnorm <= 0)
{
pnorm = std::numeric_limits<double>::infinity();
}
// this skips CCs with just peps or prots. We only add edges between different types.
// and if there were no edges, it would not be a CC.
if (boost::num_vertices(fg) >= 2)
{
MessagePasserFactory<IDBoostGraph::vertex_t> mpf (param_.getValue("model_parameters:pep_emission"),
param_.getValue("model_parameters:pep_spurious_emission"),
param_.getValue("model_parameters:prot_prior"),
pnorm,
param_.getValue("model_parameters:pep_prior")); // the p used for marginalization: 1 = sum product, inf = max product
evergreen::BetheInferenceGraphBuilder<IDBoostGraph::vertex_t> bigb;
IDBoostGraph::Graph::vertex_iterator ui, ui_end;
boost::tie(ui,ui_end) = boost::vertices(fg);
// Store the IDs of the nodes for which you want the posteriors in the end (usually at least proteins)
// Maybe later peptides (e.g. for an iterative procedure)
vector<vector<IDBoostGraph::vertex_t>> posteriorVars;
// direct neighbors are proteins on the "left" side and peptides on the "right" side
// TODO can be sped up using directed graph. Requires some restructuring first.
std::vector<IDBoostGraph::vertex_t> in{};
//std::vector<IDBoostGraph::vertex_t> out{};
//TODO the try section could in theory be slimmed down a little bit. First use of insertDependency maybe.
// check performance impact.
try
{
for (; ui != ui_end; ++ui)
{
IDBoostGraph::Graph::adjacency_iterator nbIt, nbIt_end;
boost::tie(nbIt, nbIt_end) = boost::adjacent_vertices(*ui, fg);
in.clear();
//out.clear(); // we don't need out edges currently
for (; nbIt != nbIt_end; ++nbIt)
{
if (fg[*nbIt].which() < fg[*ui].which())
{
in.push_back(*nbIt);
}
/*else
{
out.push_back(*nbIt);
}*/
}
//TODO introduce an enum for the types to make it more clear.
//Or use the static_visitor pattern: You have to pass the vertex with its neighbors as a second arg though.
if (fg[*ui].which() == 6) // pep hit = psm
{
bigb.insert_dependency(mpf.createSumEvidenceFactor(boost::get<PeptideHit*>(fg[*ui])->getPeptideEvidences().size(), in[0], *ui));
bigb.insert_dependency(mpf.createPeptideEvidenceFactor(*ui, boost::get<PeptideHit*>(fg[*ui])->getScore()));
}
else if (fg[*ui].which() == 2) // pep group
{
bigb.insert_dependency(mpf.createPeptideProbabilisticAdderFactor(in, *ui));
}
else if (fg[*ui].which() == 1) // prot group
{
bigb.insert_dependency(mpf.createPeptideProbabilisticAdderFactor(in, *ui));
}
else if (fg[*ui].which() == 0) // prot
{
//TODO allow an already present prior probability here
//TODO modify createProteinFactor to start with a modified prior based on the number of missing
// peptides (later tweak to include conditional prob. for that peptide
bigb.insert_dependency(mpf.createProteinFactor(*ui));
posteriorVars.push_back({*ui});
}
}
// create factor graph for Bayesian network
evergreen::InferenceGraph<IDBoostGraph::vertex_t> ig = bigb.to_graph();
//TODO parametrize the type of scheduler.
evergreen::PriorityScheduler<IDBoostGraph::vertex_t> scheduler(param_.getValue("loopy_belief_propagation:dampening_lambda"),
param_.getValue("loopy_belief_propagation:convergence_threshold"),
param_.getValue("loopy_belief_propagation:max_nr_iterations"));
scheduler.add_ab_initio_edges(ig);
evergreen::BeliefPropagationInferenceEngine<IDBoostGraph::vertex_t> bpie(scheduler, ig);
auto posteriorFactors = bpie.estimate_posteriors(posteriorVars);
//TODO you could also save the indices of the peptides here and request + update their posteriors, too.
for (auto const &posteriorFactor : posteriorFactors)
{
double posterior = 0.0;
IDBoostGraph::SetPosteriorVisitor pv;
unsigned long nodeId = posteriorFactor.ordered_variables()[0];
const evergreen::PMF &pmf = posteriorFactor.pmf();
// If Index 1 is in the range of this result PMFFactor it is non-zero
if (1 >= pmf.first_support()[0] && 1 <= pmf.last_support()[0])
{
posterior = pmf.table()[1 - pmf.first_support()[0]];
}
auto bound_visitor = std::bind(pv, std::placeholders::_1, posterior);
boost::apply_visitor(bound_visitor, fg[nodeId]);
}
//TODO update to use actual nr of messages
return 1;
}
catch (const std::runtime_error& /*e*/)
{
//TODO print failing component
// set posteriors to priors or try another type of inference?
// Think about cancelling all other threads/ the loop
//For now we just warn and continue with the rest of the iterations. Might still be a valid run.
// Graph builder needs to build otherwise it leaks memory.
bigb.to_graph();
OPENMS_LOG_WARN << "Warning: Loopy belief propagation encountered a problem in a connected component. Skipping inference there.\n";
return 0;
}
//TODO we could write out the posteriors here, so we can easily read them for the best params of the grid search
}
else
{
OPENMS_LOG_WARN << "Skipped cc with only one type (proteins or peptides)\n";
return 0;
}
}
};
struct BayesianProteinInferenceAlgorithm::GridSearchEvaluator
{
Param& param_;
IDBoostGraph& ibg_;
const unsigned int debug_lvl_;
explicit GridSearchEvaluator(Param& param, IDBoostGraph& ibg, unsigned int debug_lvl):
param_(param),
ibg_(ibg),
debug_lvl_(debug_lvl)
{}
double operator() (double alpha, double beta, double gamma)
{
OPENMS_LOG_INFO << "Evaluating: " << alpha << " " << beta << " " << gamma << '\n';
if (beta - alpha >= 0.3 && alpha + beta <= 1.0)
{
OPENMS_LOG_INFO << "Skipping improbable parameter combination.. \n";
return 0.;
}
param_.setValue("model_parameters:prot_prior", gamma);
param_.setValue("model_parameters:pep_emission", alpha);
param_.setValue("model_parameters:pep_spurious_emission", beta);
GraphInferenceFunctor gif {param_, debug_lvl_};
ibg_.applyFunctorOnCCs(gif);
FalseDiscoveryRate fdr;
Param fdrparam = fdr.getParameters();
fdrparam.setValue("conservative",param_.getValue("param_optimize:conservative_fdr"));
fdrparam.setValue("add_decoy_proteins","true");
fdr.setParameters(fdrparam);
if (param_.getValue("annotate_group_probabilities").toBool())
{
ScoreToTgtDecLabelPairs scores_and_tgt_fraction{};
ibg_.getProteinGroupScoresAndTgtFraction(scores_and_tgt_fraction);
return fdr.applyEvaluateProteinIDs(scores_and_tgt_fraction, 1.0, 100, static_cast<double>(param_.getValue("param_optimize:aucweight")));
}
// The following line assumes that ALL proteins in the ID structure are used in the graph
return fdr.applyEvaluateProteinIDs(ibg_.getProteinIDs(), 1.0, 100, static_cast<double>(param_.getValue("param_optimize:aucweight")));
}
};
BayesianProteinInferenceAlgorithm::BayesianProteinInferenceAlgorithm(unsigned int debug_lvl) :
DefaultParamHandler("BayesianProteinInferenceAlgorithm"),
ProgressLogger(),
debug_lvl_(debug_lvl)
{
// set default parameter values
/* More parameter TODOs:
* - grid search settings: e.g. fine, coarse, prob. threshold, lower convergence crit., own lists
* - use own groups (and regularize)
* - multiple runs
* - what to do about multiple charge states or modded peptides
* - use add. pep. infos (rt, ms1dev)
* - add dependencies on peptides in same feature and psms to same peptide (so that there is competition)
* - option to write graphfile?
*/
/*
defaults_.setValue("combine_indist_groups",
"false",
"Combine indistinguishable protein groups beforehand to only perform inference on them (probability for the whole group = is ANY of them present).");*/
defaults_.setValue("psm_probability_cutoff",
0.001,
"Remove PSMs with probabilities less than this cutoff");
defaults_.setMinFloat("psm_probability_cutoff", 0.0);
defaults_.setMaxFloat("psm_probability_cutoff", 1.0);
defaults_.setValue("top_PSMs",
1,
"Consider only top X PSMs per spectrum. 0 considers all.");
defaults_.setMinInt("top_PSMs", 0);
defaults_.setValue("keep_best_PSM_only",
"true",
"Epifany uses the best PSM per peptide for inference. Discard the rest (true) or keep"
"e.g. for quantification/reporting?");
defaults_.setValidStrings("keep_best_PSM_only", {"true","false"});
defaults_.setValue("update_PSM_probabilities",
"true",
"(Experimental:) Update PSM probabilities with their posteriors under consideration of the protein probabilities.");
defaults_.setValidStrings("update_PSM_probabilities", {"true","false"});
defaults_.setValue("user_defined_priors",
"false",
"(Experimental:) Uses the current protein scores as user-defined priors.");
defaults_.setValidStrings("user_defined_priors", {"true","false"});
defaults_.setValue("annotate_group_probabilities",
"true",
"Annotates group probabilities for indistinguishable protein groups (indistinguishable by "
"experimentally observed PSMs).");
defaults_.setValidStrings("annotate_group_probabilities", {"true","false"});
defaults_.setValue("use_ids_outside_features",
"false",
"(Only consensusXML) Also use IDs without associated features for inference?");
defaults_.setValidStrings("use_ids_outside_features", {"true","false"});
defaults_.addSection("model_parameters","Model parameters for the Bayesian network");
defaults_.setValue("model_parameters:prot_prior",
-1.,
"Protein prior probability ('gamma' parameter). Negative values enable grid search for this param.");
defaults_.setMinFloat("model_parameters:prot_prior", -1.0);
defaults_.setMaxFloat("model_parameters:prot_prior", 1.0);
defaults_.setValue("model_parameters:pep_emission",
-1.,
"Peptide emission probability ('alpha' parameter). Negative values enable grid search for this param.");
defaults_.setMinFloat("model_parameters:pep_emission", -1.0);
defaults_.setMaxFloat("model_parameters:pep_emission", 1.0);
defaults_.setValue("model_parameters:pep_spurious_emission",
-1.,
"Spurious peptide identification probability ('beta' parameter)."
" Usually much smaller than emission from proteins. "
"Negative values enable grid search for this param.");
defaults_.setMinFloat("model_parameters:pep_spurious_emission", -1.0);
defaults_.setMaxFloat("model_parameters:pep_spurious_emission", 1.0);
defaults_.setValue("model_parameters:pep_prior",
0.1,
"Peptide prior probability (experimental, should be covered by combinations of the other params).");
defaults_.setMinFloat("model_parameters:pep_prior", 0.0);
defaults_.setMaxFloat("model_parameters:pep_prior", 1.0);
defaults_.setValue("model_parameters:regularize",
"false",
"Regularize the number of proteins that produce a peptide together (experimental, should be activated when using higher p-norms).");
defaults_.setValidStrings("model_parameters:regularize",{"true","false"});
defaults_.setValue("model_parameters:extended_model",
"false",
"Uses information from different peptidoforms also across runs"
" (automatically activated if an experimental design is given!)");
defaults_.setValidStrings("model_parameters:extended_model", {"true","false"});
defaults_.addSection("loopy_belief_propagation","Settings for the loopy belief propagation algorithm.");
defaults_.setValue("loopy_belief_propagation:scheduling_type",
"priority",
"(Not used yet) How to pick the next message:"
" priority = based on difference to last message (higher = more important)."
" fifo = first in first out."
" subtree = message passing follows a random spanning tree in each iteration");
defaults_.setValidStrings("loopy_belief_propagation:scheduling_type", {"priority","fifo","subtree"});
//TODO not yet implemented
/* defaults_.setValue("loopy_belief_propagation:message_difference",
"MSE",
"How to calculate the difference of distributions in updated messages.");
defaults_.setValidStrings("loopy_belief_propagation:message_difference", {"MSE"});*/
defaults_.setValue("loopy_belief_propagation:convergence_threshold",
1e-5,
"Initial threshold under which MSE difference a message is considered to be converged.");
defaults_.setMinFloat("loopy_belief_propagation:convergence_threshold", 1e-9);
defaults_.setMaxFloat("loopy_belief_propagation:convergence_threshold", 1.0);
defaults_.setValue("loopy_belief_propagation:dampening_lambda",
1e-3,
"Initial value for how strongly should messages be updated in each step. "
"0 = new message overwrites old completely (no dampening; only recommended for trees),"
"0.5 = equal contribution of old and new message (stay below that),"
"In-between it will be a convex combination of both. Prevents oscillations but hinders convergence.");
defaults_.setMinFloat("loopy_belief_propagation:dampening_lambda", 0.0);
defaults_.setMaxFloat("loopy_belief_propagation:dampening_lambda", 0.49999);
defaults_.setValue("loopy_belief_propagation:max_nr_iterations",
(1ul<<31)-1,
"(Usually auto-determined by estimated but you can set a hard limit here)."
" If not all messages converge, how many iterations should be done at max per connected component?");
//I think restricting does not work because it only works for type Int (= int), not unsigned long
//defaults_.setMinInt("loopy_belief_propagation:max_nr_iterations", 10);
defaults_.setValue("loopy_belief_propagation:p_norm_inference",
1.0,
"P-norm used for marginalization of multidimensional factors. "
"1 == sum-product inference (all configurations vote equally) (default),"
"<= 0 == infinity = max-product inference (only best configurations propagate)"
"The higher the value the more important high probability configurations get."
);
defaults_.addSection("param_optimize","Settings for the parameter optimization.");
defaults_.setValue("param_optimize:aucweight",
0.3,
"How important is target decoy AUC vs calibration of the posteriors?"
" 0 = maximize calibration only,"
" 1 = maximize AUC only,"
" between = convex combination.");
defaults_.setMinFloat("param_optimize:aucweight", 0.0);
defaults_.setMaxFloat("param_optimize:aucweight", 1.0);
defaults_.setValue("param_optimize:conservative_fdr",
"true",
"Use (D+1)/(T) instead of (D+1)/(T+D) for parameter estimation.");
defaults_.setValidStrings("param_optimize:conservative_fdr", {"true","false"});
defaults_.setValue("param_optimize:regularized_fdr",
"true",
"Use a regularized FDR for proteins without unique peptides.");
defaults_.setValidStrings("param_optimize:regularized_fdr", {"true","false"});
// write defaults into Param object param_
defaultsToParam_();
updateMembers_();
}
void BayesianProteinInferenceAlgorithm::updateMembers_()
{
//Note: the following lambda function can be changed, e.g. when we want to do a extremum removal etc. beforehand
/*
double min_nonnull_obs_probability = getDoubleOption_("min_psms_extreme_probability");
double max_nonone_obs_probability = getDoubleOption_("max_psms_extreme_probability");
// Currently unused
bool datadependent_extrema_removal = false;
if (datadependent_extrema_removal)
{
pair<double,double> minmax = checkExtremePSMScores_(mergedpeps);
min_nonnull_obs_probability = minmax.first;
max_nonone_obs_probability = minmax.second;
}
if (min_nonnull_obs_probability > 0.0 || max_nonone_obs_probability < 1.0 )
{
removeExtremeValues_(mergedpeps, min_nonnull_obs_probability, max_nonone_obs_probability);
}
*/
//TODO also convert potential PEPs to PPs in ProteinHits? In case you want to use them as priors or
// emergency posteriors?
//TODO test performance of getting the probability cutoff every time vs capture free lambda
double probability_cutoff = param_.getValue("psm_probability_cutoff");
checkConvertAndFilterPepHits_ = [probability_cutoff](PeptideIdentification &pep_id/*, const String& run_id*/)
{
//if (pep_id.getIdentifier() == run_id)
//{
String score_l = pep_id.getScoreType();
score_l = score_l.toLower();
if (score_l == "pep" || score_l == "posterior error probability" || score_l == "ms:1001493")
{
for (auto &pep_hit : pep_id.getHits())
{
double newScore = 1. - pep_hit.getScore();
pep_hit.setScore(newScore);
}
pep_id.setScoreType("Posterior Probability");
pep_id.setHigherScoreBetter(true);
}
else
{
if (score_l != "posterior probability")
{
throw OpenMS::Exception::InvalidParameter(
__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"Epifany needs Posterior (Error) Probabilities in the Peptide Hits. Use Percolator with PEP score"
" or run IDPosteriorErrorProbability first.");
}
}
//TODO remove hits "on-the-go"?
IDFilter::removeMatchingItems(pep_id.getHits(),
[&probability_cutoff](PeptideHit &hit)
{ return hit.getScore() < probability_cutoff; });
//}
};
}
void BayesianProteinInferenceAlgorithm::setScoreTypeAndSettings_(ProteinIdentification& proteinIDs)
{
proteinIDs.setScoreType("Posterior Probability");
proteinIDs.setInferenceEngine("Epifany");
proteinIDs.setInferenceEngineVersion(VersionInfo::getVersion());
proteinIDs.setHigherScoreBetter(true);
}
void BayesianProteinInferenceAlgorithm::inferPosteriorProbabilities(
ConsensusMap& cmap,
bool greedy_group_resolution, // TODO probably better to add it as a Param
std::optional<const ExperimentalDesign> exp_des)
{
// For study-wide inference on ConsensusMap data, require a single merged protein run.
// (Multiple runs should be merged using ConsensusMapMergerAlgorithm::mergeAllIDRuns() beforehand.)
vector<ProteinIdentification>& proteinIDs = cmap.getProteinIdentifications();
if (proteinIDs.empty())
{
throw OpenMS::Exception::MissingInformation(
__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"No protein identification runs provided for inference.");
}
if (proteinIDs.size() != 1)
{
throw OpenMS::Exception::MissingInformation(
__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"ConsensusMap-based inference requires a single merged ProteinIdentification run. "
"Merge runs first (ConsensusMapMergerAlgorithm::mergeAllIDRuns).");
}
IDScoreSwitcherAlgorithm switcher;
Size counter(0);
try
{
switcher.switchToGeneralScoreType(cmap, IDScoreSwitcherAlgorithm::ScoreType::PEP, counter);
}
catch (OpenMS::Exception::MissingInformation& /*e*/)
{
throw OpenMS::Exception::MissingInformation(
__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"Epifany needs Posterior Error Probabilities in the Peptide Hits. Use Percolator with PEP score"
" or run IDPosteriorErrorProbability first.");
}
//TODO filtering needs to account for run info if we allow running on a subset.
cmap.applyFunctionOnPeptideIDs(checkConvertAndFilterPepHits_);
//TODO BIG filter empty PeptideIDs afterwards
bool keep_all_psms = param_.getValue("keep_best_PSM_only").toString() == "false";
bool user_defined_priors = param_.getValue("user_defined_priors").toBool();
bool use_unannotated_ids = param_.getValue("use_ids_outside_features").toBool();
bool use_run_info = param_.getValue("model_parameters:extended_model").toBool();
Size nr_top_psms = static_cast<Size>(param_.getValue("top_PSMs"));
FalseDiscoveryRate pepFDR;
Param p = pepFDR.getParameters();
// I think it is best to always use the best PSM only for comparing PSM FDR before-after
// since inference might change the ranking.
p.setValue("use_all_hits", "false");
pepFDR.setParameters(p);
//TODO allow use unassigned everywhere
//TODO actually if we just want to use replicate information, we can still filter for best per run,
// but the extended model is currently coupled to multiple charge and mod states (which would be removed)
if (!use_run_info)
{
if (!keep_all_psms)
{
IDFilter::keepBestPerPeptidePerRun(cmap, true,
true, static_cast<unsigned int>(nr_top_psms));
IDFilter::removeEmptyIdentifications(cmap);
}
else
{
IDFilter::annotateBestPerPeptidePerRun(cmap, true,
true, static_cast<unsigned int>(nr_top_psms));
}
}
// filter in both cases since the PSM filtering by score is always active
IDFilter::removeUnreferencedProteins(cmap, true);
// extract proteins that are "theoretically" unreferenced, since
// unassigned PSMs might not be considered in inference (depending on param).
// NOTE: this would in theory not be necessary if we calculate the FDR based on
// the graph only (because then only the used proteins are in the graph).
// But FDR on the ProteinID data structure should be faster.
std::map<String, vector<ProteinHit>> unassigned{};
if (!use_unannotated_ids)
{
unassigned = IDFilter::extractUnassignedProteins(cmap);
}
// Single-run inference (ConsensusMap is expected to be merged upfront).
{
resetProteinScores_(proteinIDs[0], user_defined_priors);
// TODO try to calc AUC partial only (e.g. up to 5% FDR)
if (!keep_all_psms)
OPENMS_LOG_INFO << "Peptide FDR AUC before protein inference: " << pepFDR.rocN(cmap, 0) << '\n';
setScoreTypeAndSettings_(proteinIDs[0]);
IDBoostGraph ibg(proteinIDs[0], cmap, nr_top_psms, use_run_info, use_unannotated_ids, keep_all_psms, exp_des);
inferPosteriorProbabilities_(ibg);
if (greedy_group_resolution) ibg.resolveGraphPeptideCentric(true);
if (!keep_all_psms)
OPENMS_LOG_INFO << "Peptide FDR AUC after protein inference: " << pepFDR.rocN(cmap, 0) << '\n';
if (!use_unannotated_ids)
{
auto& unassigned_for_run = unassigned.at(proteinIDs[0].getIdentifier());
for (auto& h : unassigned_for_run) h.setScore(0.);
proteinIDs[0].getHits().reserve(proteinIDs[0].getHits().size() + unassigned_for_run.size());
std::move(std::begin(unassigned_for_run), std::end(unassigned_for_run), std::back_inserter(proteinIDs[0].getHits()));
unassigned_for_run.clear();
}
proteinIDs[0].fillIndistinguishableGroupsWithSingletons();
}
}
void BayesianProteinInferenceAlgorithm::inferPosteriorProbabilities_(
IDBoostGraph& ibg)
{
bool use_run_info = param_.getValue("model_parameters:extended_model").toBool();
ibg.computeConnectedComponents();
ibg.clusterIndistProteinsAndPeptides();
vector<double> gamma_search;
vector<double> beta_search;
vector<double> alpha_search;
GridSearch<double,double,double> gs = initGridSearchFromParams_(alpha_search, beta_search, gamma_search);
std::array<size_t, 3> bestParams{{0, 0, 0}};
//Save initial settings and deactivate certain features to save time during grid search and to not
// interfere with later runs.
// TODO We could think about optimizing PSM FDR as another goal though.
bool update_PSM_probabilities = param_.getValue("update_PSM_probabilities").toBool();
param_.setValue("update_PSM_probabilities","false");
bool annotate_group_posteriors = param_.getValue("annotate_group_probabilities").toBool();
// during grid search we evaluate on single protein-level
param_.setValue("annotate_group_probabilities","false");
//TODO run grid search on reduced graph? Then make sure, untouched protein/peps do not affect evaluation results.
//TODO if not, think about storing results temporary (file? mem?) and only keep the best in the end
//TODO think about running grid search on the small CCs only (maybe it's enough)
if (gs.getNrCombos() > 1)
{
OPENMS_LOG_INFO << "Testing " << gs.getNrCombos() << " param combinations.\n";
gs.evaluate(GridSearchEvaluator(param_, ibg, debug_lvl_), -1.0, bestParams);
}
else
{
OPENMS_LOG_INFO << "Only one combination specified: Skipping grid search.\n";
}
double bestGamma = gamma_search[bestParams[2]];
double bestBeta = beta_search[bestParams[1]];
double bestAlpha = alpha_search[bestParams[0]];
OPENMS_LOG_INFO << "Best params found at a=" << bestAlpha << ", b=" << bestBeta << ", g=" << bestGamma << '\n';
OPENMS_LOG_INFO << "Running with best parameters:\n";
param_.setValue("model_parameters:prot_prior", bestGamma);
param_.setValue("model_parameters:pep_emission", bestAlpha);
param_.setValue("model_parameters:pep_spurious_emission", bestBeta);
// Reset original values for those two options
param_.setValue("update_PSM_probabilities", update_PSM_probabilities ? "true" : "false");
param_.setValue("annotate_group_probabilities", annotate_group_posteriors ? "true" : "false");
if (!use_run_info)
{
GraphInferenceFunctor gif {param_, debug_lvl_};
ibg.applyFunctorOnCCs(gif);
}
else
{
//TODO under construction
ExtendedGraphInferenceFunctor gif {param_};
ibg.applyFunctorOnCCs(gif);
}
//uses the existing protein group nodes in the graph
ibg.annotateIndistProteins(true);
}
GridSearch<double,double,double> BayesianProteinInferenceAlgorithm::initGridSearchFromParams_(
vector<double>& alpha_search,
vector<double>& beta_search,
vector<double>& gamma_search
)
{
// Do not expand gamma_search when user_defined_priors is on. Would be unused.
double alpha = param_.getValue("model_parameters:pep_emission");
double beta = param_.getValue("model_parameters:pep_spurious_emission");
double gamma = param_.getValue("model_parameters:prot_prior");
if (gamma > 1.0 || gamma < 0.0)
{
gamma_search = {0.2, 0.5, 0.7};
}
else
{
gamma_search = {gamma};
}
if (beta > 1.0 || beta < 0.0)
{
beta_search = {0.01, 0.2, 0.4};
}
else
{
beta_search = {beta};
}
if (alpha > 1.0 || alpha < 0.0)
{
alpha_search = {0.1, 0.25, 0.5, 0.65, 0.8};
}
else
{
alpha_search = {alpha};
}
return GridSearch<double,double,double>{alpha_search, beta_search, gamma_search};
}
void BayesianProteinInferenceAlgorithm::inferPosteriorProbabilities(
std::vector<ProteinIdentification>& proteinIDs,
PeptideIdentificationList& peptideIDs,
bool greedy_group_resolution,
std::optional<const ExperimentalDesign> exp_des)
{
//TODO The following is a sketch to think about how to include missing peptides
// Requirement: Datastructures for peptides first
// Options:
// - Require annotation from peptideindexer
// - Require sequence from peptideindexer
// - Require fasta file and annotate here
/*
// get enzyme settings from peptideID
const DigestionEnzymeProtein enzyme = proteinIDs[0].getSearchParameters().digestion_enzyme;
Size missed_cleavages = proteinIDs[0].getSearchParameters().missed_cleavages;
EnzymaticDigestion ed{};
ed.setEnzyme(&enzyme);
ed.setMissedCleavages(missed_cleavages);
std::vector<StringView> tempDigests{};
// if not annotated, assign max nr of digests
for (auto& protein : proteinIDs[0].getHits())
{
// check for existing max nr peptides metavalue annotation
if (!protein.metaValueExists("missingTheorDigests"))
{
if(!protein.getSequence().empty())
{
tempDigests.clear();
//TODO check which peptide lengths we should support. Parameter?
//Size nrDiscarded =
ed.digestUnmodified(protein.getSequence(), tempDigests);
//TODO add the discarded digestions products, too?
protein.setMetaValue("missingTheorDigests", tempDigests.size());
}
else
{
//TODO Exception
std::cerr << "Protein sequence not annotated" << std::endl;
}
}
}*/
//TODO actually loop over all proteinID runs.
if (proteinIDs.size() > 1)
{
OPENMS_LOG_WARN << "Warning: more than one protein identification run provided for inference. Only "
"the first will be processed for now.\n";
}
// groups will be reannotated
proteinIDs[0].getIndistinguishableProteins().clear();
bool use_run_info = param_.getValue("model_parameters:extended_model").toBool();
//TODO BIG filtering needs to account for run info if only a subset is to be processed!
std::for_each(peptideIDs.begin(), peptideIDs.end(), checkConvertAndFilterPepHits_);
IDFilter::removeEmptyIdentifications(peptideIDs);
Size nr_top_psms = static_cast<Size>(param_.getValue("top_PSMs"));
bool keep_all_psms = param_.getValue("keep_best_PSM_only").toString() == "false";
//TODO actually if we just want to use replicate information, we can still filter for best per run,
// but the extended model is currently coupled to multiple charge and mod states (which would be removed)
if (!use_run_info)
{
if (!keep_all_psms)
{
IDFilter::keepBestPerPeptidePerRun(proteinIDs, peptideIDs, true,
true, static_cast<unsigned int>(nr_top_psms));
IDFilter::removeEmptyIdentifications(peptideIDs);
}
else
{
IDFilter::annotateBestPerPeptidePerRun(proteinIDs, peptideIDs, true,
true, static_cast<unsigned int>(nr_top_psms));
}
}
IDFilter::removeUnreferencedProteins(proteinIDs, peptideIDs);
FalseDiscoveryRate pepFDR;
Param p = pepFDR.getParameters();
// I think it is best to always use the best PSM only for comparing PSM FDR before-after
// since inference might change the ranking.
p.setValue("use_all_hits", "false");
pepFDR.setParameters(p);
bool user_defined_priors = param_.getValue("user_defined_priors").toBool();
resetProteinScores_(proteinIDs[0], user_defined_priors);
if (!keep_all_psms)
OPENMS_LOG_INFO << "Peptide FDR AUC before protein inference: " << pepFDR.rocN(peptideIDs, 0, proteinIDs[0].getIdentifier()) << '\n';
setScoreTypeAndSettings_(proteinIDs[0]);
IDBoostGraph ibg(proteinIDs[0], peptideIDs, nr_top_psms, use_run_info, keep_all_psms, exp_des);
inferPosteriorProbabilities_(ibg);
if (greedy_group_resolution) ibg.resolveGraphPeptideCentric(true);
proteinIDs[0].fillIndistinguishableGroupsWithSingletons();
if (!keep_all_psms)
OPENMS_LOG_INFO << "Peptide FDR AUC after protein inference: " << pepFDR.rocN(peptideIDs, 0, proteinIDs[0].getIdentifier()) << '\n';
}
void BayesianProteinInferenceAlgorithm::resetProteinScores_(ProteinIdentification& protein_id, bool keep_old_as_prior)
{
// Save current scores as priors if requested
if (keep_old_as_prior)
{
// Save current protein score into a metaValue
for (auto& prot_hit : protein_id.getHits())
{
prot_hit.setMetaValue("Prior", prot_hit.getScore());
prot_hit.setScore(0.);
}
}
else
{
// Save current protein score into a metaValue
for (auto& prot_hit : protein_id.getHits())
{
prot_hit.setScore(0.);
}
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/ConsensusIDAlgorithmRanks.cpp | .cpp | 2,977 | 79 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Sven Nahnsen, Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/ConsensusIDAlgorithmRanks.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <numeric> // for "accumulate"
using namespace std;
namespace OpenMS
{
ConsensusIDAlgorithmRanks::ConsensusIDAlgorithmRanks()
{
setName("ConsensusIDAlgorithmRanks"); // DefaultParamHandler
}
void ConsensusIDAlgorithmRanks::preprocess_(
PeptideIdentificationList& ids)
{
// The idea here is that each peptide hit (sequence) gets assigned a score
// from each ID run, based on its rank in the list of search results.
// The best hit of a run will receive score 0, the second best 1, etc. up to
// a score of N = considered_hits - 1 for the last hit (if there are that
// many). A hit that was not observed in an ID run receives a score of
// N + 1 = considered_hits from that run. In the end the scores for each
// hit (sequence) are averaged and normalized to the range from 0
// (exclusive, worst) to 1 (inclusive, best).
current_number_of_runs_ = ((number_of_runs_ > 0) ?
number_of_runs_ : ids.size());
current_considered_hits_ = considered_hits_;
bool set_considered_hits = (considered_hits_ == 0);
for (PeptideIdentificationList::iterator pep_it = ids.begin();
pep_it != ids.end(); ++pep_it)
{
pep_it->sort();
Size rank = 1;
for (vector<PeptideHit>::iterator hit_it = pep_it->getHits().begin();
hit_it != pep_it->getHits().end(); ++hit_it, ++rank)
{
// give each hit a score based on the search rank (counting from 0):
hit_it->setScore(rank - 1);
}
pep_it->setScoreType("ConsensusID_ranks");
pep_it->setHigherScoreBetter(true); // not true now, but after normalizing
// if "considered_hits" wasn't set, we find the max. number of hits:
if (set_considered_hits &&
(pep_it->getHits().size() > current_considered_hits_))
{
current_considered_hits_ = pep_it->getHits().size();
}
}
}
double ConsensusIDAlgorithmRanks::getAggregateScore_(vector<double>& scores,
bool /* higher_better */)
{
double sum_scores = accumulate(scores.begin(), scores.end(), 0.0);
// add score contributions equivalent to "not found":
sum_scores += ((current_number_of_runs_ - scores.size()) *
current_considered_hits_);
// normalize to range 0-1:
return 1.0 - sum_scores / (current_considered_hits_ *
current_number_of_runs_);
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/FalseDiscoveryRate.cpp | .cpp | 71,743 | 1,900 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Andreas Bertsch, Chris Bielow $
// --------------------------------------------------------------------------
#include <boost/foreach.hpp> // must be first, otherwise Q_FOREACH macro will wreak havoc
#include <boost/regex.hpp>
#include <OpenMS/ANALYSIS/ID/FalseDiscoveryRate.h>
#include <OpenMS/ANALYSIS/ID/IDScoreGetterSetter.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/DATASTRUCTURES/StringUtils.h>
#include <OpenMS/PROCESSING/ID/IDFilter.h>
#include <OpenMS/METADATA/ProteinIdentification.h>
#include <algorithm>
// #define FALSE_DISCOVERY_RATE_DEBUG
// #undef FALSE_DISCOVERY_RATE_DEBUG
using namespace std;
namespace OpenMS
{
FalseDiscoveryRate::FalseDiscoveryRate() :
DefaultParamHandler("FalseDiscoveryRate")
{
defaults_.setValue("no_qvalues", "false", "If 'true' strict FDRs will be calculated instead of q-values (the default)");
defaults_.setValidStrings("no_qvalues", {"true","false"});
defaults_.setValue("use_all_hits", "false", "If 'true' not only the first hit, but all are used (peptides only)");
defaults_.setValidStrings("use_all_hits", {"true","false"});
defaults_.setValue("split_charge_variants", "false", "If 'true' charge variants are treated separately (for peptides of combined target/decoy searches only).");
defaults_.setValidStrings("split_charge_variants", {"true","false"});
defaults_.setValue("treat_runs_separately", "false", "If 'true' different search runs are treated separately (for peptides of combined target/decoy searches only).");
defaults_.setValidStrings("treat_runs_separately", {"true","false"});
defaults_.setValue("add_decoy_peptides", "false", "If 'true' decoy peptides will be written to output file, too. The q-value is set to the closest target score.");
defaults_.setValidStrings("add_decoy_peptides", {"true","false"});
defaults_.setValue("add_decoy_proteins", "false", "If 'true' decoy proteins will be written to output file, too. The q-value is set to the closest target score.");
defaults_.setValidStrings("add_decoy_proteins", {"true","false"});
defaults_.setValue("conservative", "true", "If 'true' (D+1)/T instead of (D+1)/(T+D) is used as a formula.");
defaults_.setValidStrings("conservative", {"true","false"});
//defaults_.setValue("subprotein_level", "PSM", "Choose PSM or peptide or PSM+peptide");
//defaults_.setValidStrings("subprotein_level", {"PSM","peptide","PSM+peptide"});
defaultsToParam_();
}
bool isFirstBetterScore(double first, double second, bool isHigherBetter)
{
if (isHigherBetter) return first > second; else return first < second;
}
void FalseDiscoveryRate::apply(PeptideIdentificationList& ids, bool annotate_peptide_fdr) const
{
bool q_value = !param_.getValue("no_qvalues").toBool();
bool use_all_hits = param_.getValue("use_all_hits").toBool();
bool treat_runs_separately = param_.getValue("treat_runs_separately").toBool();
bool split_charge_variants = param_.getValue("split_charge_variants").toBool();
bool add_decoy_peptides = param_.getValue("add_decoy_peptides").toBool();
if (ids.empty())
{
OPENMS_LOG_WARN << "No peptide identifications given to FalseDiscoveryRate! No calculation performed.\n";
return;
}
bool higher_score_better = ids.begin()->isHigherScoreBetter();
// first search for all identifiers and charge variants
set<String> identifiers;
set<SignedSize> charge_variants;
for (auto it = ids.begin(); it != ids.end(); ++it)
{
identifiers.insert(it->getIdentifier());
it->sort();
if (!use_all_hits && it->getHits().size() > 1)
{
it->getHits().resize(1);
}
for (auto pit = it->getHits().begin(); pit != it->getHits().end(); ++pit)
{
charge_variants.insert(pit->getCharge());
}
}
#ifdef FALSE_DISCOVERY_RATE_DEBUG
cerr << "#id-runs: " << identifiers.size() << " ";
for (auto it = identifiers.begin(); it != identifiers.end(); ++it)
{
cerr << "," << *it;
}
cerr << endl;
cerr << "#of charge states: " << charge_variants.size() << " ";
for (auto it = charge_variants.begin(); it != charge_variants.end(); ++it)
{
cerr << "," << *it;
}
cerr << endl;
#endif
for (auto zit = charge_variants.begin(); zit != charge_variants.end(); ++zit)
{
#ifdef FALSE_DISCOVERY_RATE_DEBUG
cerr << "Charge variant=" << *zit << endl;
#endif
// for all identifiers
for (auto iit = identifiers.begin(); iit != identifiers.end(); ++iit)
{
if (!treat_runs_separately && iit != identifiers.begin())
{
continue; //only take the first run
}
#ifdef FALSE_DISCOVERY_RATE_DEBUG
cerr << "Id-run: " << *iit << endl;
#endif
// get the scores of all peptide hits
vector<double> target_scores, decoy_scores;
map<String, double> peptide_to_best_decoy_score, peptide_to_best_target_score;
for (auto it = ids.begin(); it != ids.end(); ++it)
{
// if runs should be treated separately, the identifiers must be the same
if (treat_runs_separately && it->getIdentifier() != *iit)
{
continue;
}
for (Size i = 0; i < it->getHits().size(); ++i)
{
if (split_charge_variants && it->getHits()[i].getCharge() != *zit)
{
continue;
}
auto target_decoy_type = it->getHits()[i].getTargetDecoyType();
if (target_decoy_type == PeptideHit::TargetDecoyType::UNKNOWN)
{
OPENMS_LOG_FATAL_ERROR << "Meta value 'target_decoy' does not exists, reindex the idXML file with 'PeptideIndexer' first (run-id='" << it->getIdentifier() << ", rank=" << i + 1 << " of " << it->getHits().size() << ")!" << endl;
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Meta value 'target_decoy' does not exist!");
}
const String peptide_sequence = it->getHits()[i].getSequence().toUnmodifiedString();
const double score = it->getHits()[i].getScore();
if (target_decoy_type == PeptideHit::TargetDecoyType::TARGET || target_decoy_type == PeptideHit::TargetDecoyType::TARGET_DECOY)
{
target_scores.push_back(score);
if (annotate_peptide_fdr)
{
// store best score for peptide (unmodified sequence)
auto [entry_it, success] = peptide_to_best_target_score.emplace(peptide_sequence, score); // try to construct in place (performance)
if (!success && // emplace failed because key was already present -> replace if current score is better?
isFirstBetterScore(score, entry_it->second, higher_score_better))
{
entry_it->second = score;
}
}
}
else
{
if (target_decoy_type == PeptideHit::TargetDecoyType::DECOY)
{
decoy_scores.push_back(score);
if (annotate_peptide_fdr)
{
auto [entry_it, success] = peptide_to_best_decoy_score.emplace(peptide_sequence, score); // try to construct in place (performance)
if (!success && // emplace failed because key was already present -> replace if current score is better?
isFirstBetterScore(score, entry_it->second, higher_score_better))
{
entry_it->second = score;
}
}
}
// Note: All other cases (UNKNOWN) are handled at the beginning of the loop
}
}
}
#ifdef FALSE_DISCOVERY_RATE_DEBUG
cerr << "#target-scores=" << target_scores.size() << ", #decoy-scores=" << decoy_scores.size() << endl;
#endif
// check decoy scores
if (decoy_scores.empty())
{
String error_string = "FalseDiscoveryRate: #decoy sequences is zero! Setting all target sequences to q-value/FDR 0! ";
if (split_charge_variants || treat_runs_separately)
{
error_string += "(";
if (split_charge_variants)
{
error_string += "charge_variant=" + String(*zit) + " ";
}
if (treat_runs_separately)
{
error_string += "run-id=" + *iit;
}
error_string += ")";
}
OPENMS_LOG_ERROR << error_string << '\n';
}
// check target scores
if (target_scores.empty())
{
String error_string = "FalseDiscoveryRate: #target sequences is zero! Ignoring. ";
if (split_charge_variants || treat_runs_separately)
{
error_string += "(";
if (split_charge_variants)
{
error_string += "charge_variant=" + String(*zit) + " ";
}
if (treat_runs_separately)
{
error_string += "run-id=" + *iit;
}
error_string += ")";
}
OPENMS_LOG_ERROR << error_string << '\n';
}
if (target_scores.empty() || decoy_scores.empty())
{
// now remove the relevant entries, or put 'pseudo-scores' in
for (auto it = ids.begin(); it != ids.end(); ++it)
{
// if runs should be treated separately, the identifiers must be the same
if (treat_runs_separately && it->getIdentifier() != *iit)
{
continue;
}
vector<PeptideHit> hits(it->getHits()), new_hits;
for (Size i = 0; i < hits.size(); ++i)
{
if (split_charge_variants && hits[i].getCharge() != *zit)
{
new_hits.push_back(hits[i]);
continue;
}
auto target_decoy_type = hits[i].getTargetDecoyType();
if (target_decoy_type == PeptideHit::TargetDecoyType::UNKNOWN)
{
OPENMS_LOG_FATAL_ERROR << "Meta value 'target_decoy' does not exists, reindex the idXML file with 'PeptideIndexer' (run-id='" << it->getIdentifier() << ", rank=" << i + 1 << " of " << hits.size() << ")!" << endl;
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Meta value 'target_decoy' does not exist!");
}
bool is_decoy = hits[i].isDecoy();
if (!is_decoy)
{
// if it is a target hit, there are no decoys, fdr/q-value should be zero then
new_hits.push_back(hits[i]);
String score_type = it->getScoreType() + "_score";
new_hits.back().setMetaValue(score_type, new_hits.back().getScore());
new_hits.back().setScore(0);
}
// Note: decoy hits are skipped (not added to new_hits)
}
it->setHits(new_hits);
}
continue;
}
// calculate fdr for the forward scores
map<double, double> score_to_fdr;
calculateFDRs_(score_to_fdr, target_scores, decoy_scores, q_value, higher_score_better);
// calculate peptide FDR
if (annotate_peptide_fdr)
{
vector<double> decoy_peptide_scores, target_peptide_scores;
for (const auto& ps : peptide_to_best_decoy_score)
{
decoy_peptide_scores.push_back(ps.second);
}
for (const auto& ps : peptide_to_best_target_score)
{
target_peptide_scores.push_back(ps.second);
}
map<double, double> score_to_peptide_fdr;
calculateFDRs_(score_to_peptide_fdr, target_peptide_scores, decoy_peptide_scores, q_value, higher_score_better);
// overwrite best peptide score with peptide q-value
for (auto& ps : peptide_to_best_decoy_score)
{
ps.second = score_to_peptide_fdr[ps.second];
}
for (auto& ps : peptide_to_best_target_score)
{
ps.second = score_to_peptide_fdr[ps.second];
}
}
// annotate fdr
for (auto it = ids.begin(); it != ids.end(); ++it)
{
// if runs should be treated separately, the identifiers must be the same
if (treat_runs_separately && it->getIdentifier() != *iit)
{
continue;
}
String score_type = it->getScoreType() + "_score";
vector<PeptideHit> hits;
for (vector<PeptideHit>::const_iterator pit = it->getHits().begin(); pit != it->getHits().end(); ++pit)
{
PeptideHit hit = *pit;
if (split_charge_variants && pit->getCharge() != *zit)
{
hits.push_back(*pit);
continue;
}
if (hit.metaValueExists("target_decoy"))
{
bool is_decoy = hit.isDecoy();
if (is_decoy && !add_decoy_peptides)
{
continue;
}
if (annotate_peptide_fdr)
{
const String peptide_sequence = hit.getSequence().toUnmodifiedString();
double peptide_fdr;
if (is_decoy)
{
peptide_fdr = peptide_to_best_decoy_score[peptide_sequence];
}
else
{
peptide_fdr = peptide_to_best_target_score[peptide_sequence];
}
if (q_value)
{
hit.setMetaValue(Constants::UserParam::PEPTIDE_Q_VALUE, peptide_fdr);
}
else
{
hit.setMetaValue("peptide FDR", peptide_fdr);
}
}
}
hit.setMetaValue(score_type, pit->getScore());
hit.setScore(score_to_fdr[pit->getScore()]);
hits.push_back(hit);
}
it->getHits().swap(hits);
}
}
if (!split_charge_variants)
{
break;
}
}
// higher-score-better can be set now, calculations are finished
for (PeptideIdentificationList::iterator it = ids.begin(); it != ids.end(); ++it)
{
if (q_value)
{
if (it->getScoreType() != "q-value")
{
it->setScoreType("q-value");
}
}
else
{
if (it->getScoreType() != "FDR")
{
it->setScoreType("FDR");
}
}
it->setHigherScoreBetter(false);
it->sort();
}
return;
}
void FalseDiscoveryRate::apply(PeptideIdentificationList& fwd_ids, PeptideIdentificationList& rev_ids) const
{
if (fwd_ids.empty() || rev_ids.empty())
{
return;
}
vector<double> target_scores, decoy_scores;
// get the scores of all peptide hits
for (PeptideIdentificationList::const_iterator it = fwd_ids.begin(); it != fwd_ids.end(); ++it)
{
for (vector<PeptideHit>::const_iterator pit = it->getHits().begin(); pit != it->getHits().end(); ++pit)
{
target_scores.push_back(pit->getScore());
}
}
for (PeptideIdentificationList::const_iterator it = rev_ids.begin(); it != rev_ids.end(); ++it)
{
for (vector<PeptideHit>::const_iterator pit = it->getHits().begin(); pit != it->getHits().end(); ++pit)
{
decoy_scores.push_back(pit->getScore());
}
}
bool q_value = !param_.getValue("no_qvalues").toBool();
bool higher_score_better = fwd_ids.begin()->isHigherScoreBetter();
bool add_decoy_peptides = param_.getValue("add_decoy_peptides").toBool();
// calculate fdr for the forward scores
map<double, double> score_to_fdr;
calculateFDRs_(score_to_fdr, target_scores, decoy_scores, q_value, higher_score_better);
// annotate fdr
String score_type = fwd_ids.begin()->getScoreType() + "_score";
for (PeptideIdentificationList::iterator it = fwd_ids.begin(); it != fwd_ids.end(); ++it)
{
if (q_value)
{
it->setScoreType("q-value");
}
else
{
it->setScoreType("FDR");
}
it->setHigherScoreBetter(false);
vector<PeptideHit> hits = it->getHits();
for (vector<PeptideHit>::iterator pit = hits.begin(); pit != hits.end(); ++pit)
{
#ifdef FALSE_DISCOVERY_RATE_DEBUG
cerr << pit->getScore() << " " << score_to_fdr[pit->getScore()] << endl;
#endif
pit->setMetaValue(score_type, pit->getScore());
pit->setScore(score_to_fdr[pit->getScore()]);
}
it->setHits(hits);
}
//write as well decoy peptides
if (add_decoy_peptides)
{
score_type = rev_ids.begin()->getScoreType() + "_score";
for (PeptideIdentificationList::iterator it = rev_ids.begin(); it != rev_ids.end(); ++it)
{
if (q_value)
{
it->setScoreType("q-value");
}
else
{
it->setScoreType("FDR");
}
it->setHigherScoreBetter(false);
vector<PeptideHit> hits = it->getHits();
for (vector<PeptideHit>::iterator pit = hits.begin(); pit != hits.end(); ++pit)
{
#ifdef FALSE_DISCOVERY_RATE_DEBUG
cerr << pit->getScore() << " " << score_to_fdr[pit->getScore()] << endl;
#endif
pit->setMetaValue(score_type, pit->getScore());
pit->setScore(score_to_fdr[pit->getScore()]);
}
it->setHits(hits);
}
}
return;
}
void FalseDiscoveryRate::apply(vector<ProteinIdentification>& ids) const
{
bool q_value = !param_.getValue("no_qvalues").toBool();
bool higher_score_better = ids.begin()->isHigherScoreBetter();
bool add_decoy_proteins = param_.getValue("add_decoy_proteins").toBool();
if (ids.empty())
{
OPENMS_LOG_WARN << "No protein identifications given to FalseDiscoveryRate! No calculation performed.\n";
return;
}
vector<double> target_scores, decoy_scores;
for (auto it = ids.begin(); it != ids.end(); ++it)
{
for (auto pit = it->getHits().begin(); pit != it->getHits().end(); ++pit)
{
if (pit->getTargetDecoyType() == ProteinHit::TargetDecoyType::UNKNOWN)
{
OPENMS_LOG_FATAL_ERROR << "Meta value 'target_decoy' does not exist, reindex the idXML file with 'PeptideIndexer' first (run-id='"
<< it->getIdentifier() << ")!" << endl;
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Meta value 'target_decoy' does not exist!");
}
if (pit->isDecoy())
{
decoy_scores.push_back(pit->getScore());
}
else
{
target_scores.push_back(pit->getScore());
}
}
}
// calculate fdr for the forward scores
map<double, double> score_to_fdr;
calculateFDRs_(score_to_fdr, target_scores, decoy_scores, q_value, higher_score_better);
// annotate fdr
String score_type = ids.begin()->getScoreType() + "_score";
for (auto it = ids.begin(); it != ids.end(); ++it)
{
if (q_value)
{
it->setScoreType("q-value");
}
else
{
it->setScoreType("FDR");
}
it->setHigherScoreBetter(false);
const vector<ProteinHit>& old_hits = it->getHits();
vector<ProteinHit> new_hits;
for (auto hit : old_hits) // NOTE: performs copy
{
// Add decoy proteins only if add_decoy_proteins is set
if (add_decoy_proteins || !hit.isDecoy())
{
hit.setMetaValue(score_type, hit.getScore());
hit.setScore(score_to_fdr[hit.getScore()]);
new_hits.push_back(std::move(hit));
}
}
it->setHits(std::move(new_hits));
}
}
void FalseDiscoveryRate::apply(vector<ProteinIdentification>& fwd_ids, vector<ProteinIdentification>& rev_ids) const
{
if (fwd_ids.empty() || rev_ids.empty())
{
return;
}
vector<double> target_scores, decoy_scores;
// get the scores of all peptide hits
for (vector<ProteinIdentification>::const_iterator it = fwd_ids.begin(); it != fwd_ids.end(); ++it)
{
for (vector<ProteinHit>::const_iterator pit = it->getHits().begin(); pit != it->getHits().end(); ++pit)
{
target_scores.push_back(pit->getScore());
}
}
for (vector<ProteinIdentification>::const_iterator it = rev_ids.begin(); it != rev_ids.end(); ++it)
{
for (vector<ProteinHit>::const_iterator pit = it->getHits().begin(); pit != it->getHits().end(); ++pit)
{
decoy_scores.push_back(pit->getScore());
}
}
bool q_value = !param_.getValue("no_qvalues").toBool();
bool higher_score_better = fwd_ids.begin()->isHigherScoreBetter();
// calculate fdr for the forward scores
map<double, double> score_to_fdr;
calculateFDRs_(score_to_fdr, target_scores, decoy_scores, q_value, higher_score_better);
// annotate fdr
String score_type = fwd_ids.begin()->getScoreType() + "_score";
for (vector<ProteinIdentification>::iterator it = fwd_ids.begin(); it != fwd_ids.end(); ++it)
{
if (q_value)
{
it->setScoreType("q-value");
}
else
{
it->setScoreType("FDR");
}
it->setHigherScoreBetter(false);
vector<ProteinHit> hits = it->getHits();
for (vector<ProteinHit>::iterator pit = hits.begin(); pit != hits.end(); ++pit)
{
pit->setMetaValue(score_type, pit->getScore());
pit->setScore(score_to_fdr[pit->getScore()]);
}
it->setHits(hits);
}
}
IdentificationData::ScoreTypeRef FalseDiscoveryRate::applyToObservationMatches(
IdentificationData& id_data, IdentificationData::ScoreTypeRef score_ref)
const
{
bool use_all_hits = param_.getValue("use_all_hits").toBool();
bool include_decoys = param_.getValue("add_decoy_peptides").toBool();
vector<double> target_scores, decoy_scores;
map<IdentificationData::IdentifiedMolecule, bool> molecule_to_decoy;
map<IdentificationData::ObservationMatchRef, double> match_to_score;
if (use_all_hits)
{
for (auto it = id_data.getObservationMatches().begin();
it != id_data.getObservationMatches().end(); ++it)
{
handleObservationMatch_(it, score_ref, target_scores, decoy_scores,
molecule_to_decoy, match_to_score);
}
}
else
{
vector<IdentificationData::ObservationMatchRef> best_matches =
id_data.getBestMatchPerObservation(score_ref);
for (const auto& match_ref : best_matches)
{
handleObservationMatch_(match_ref, score_ref, target_scores, decoy_scores,
molecule_to_decoy, match_to_score);
}
}
map<double, double> score_to_fdr;
bool higher_better = score_ref->higher_better;
bool use_qvalue = !param_.getValue("no_qvalues").toBool();
calculateFDRs_(score_to_fdr, target_scores, decoy_scores, use_qvalue,
higher_better);
IdentificationData::ScoreType fdr_score;
fdr_score.higher_better = false;
if (use_qvalue)
{
fdr_score.cv_term = CVTerm("MS:1002354", "PSM-level q-value", "MS");
}
else
{
fdr_score.cv_term = CVTerm("MS:1002355", "PSM-level FDRScore", "MS");
}
IdentificationData::ScoreTypeRef fdr_ref =
id_data.registerScoreType(fdr_score);
for (IdentificationData::ObservationMatches::iterator it =
id_data.getObservationMatches().begin(); it !=
id_data.getObservationMatches().end(); ++it)
{
if (!include_decoys)
{
auto pos = molecule_to_decoy.find(it->identified_molecule_var);
if ((pos != molecule_to_decoy.end()) && pos->second) continue;
}
auto pos = match_to_score.find(it);
if (pos == match_to_score.end()) continue;
double fdr = score_to_fdr.at(pos->second);
id_data.addScore(it, fdr_ref, fdr);
}
return fdr_ref;
}
void FalseDiscoveryRate::handleObservationMatch_(
IdentificationData::ObservationMatchRef match_ref,
IdentificationData::ScoreTypeRef score_ref,
vector<double>& target_scores, vector<double>& decoy_scores,
map<IdentificationData::IdentifiedMolecule, bool>& molecule_to_decoy,
map<IdentificationData::ObservationMatchRef, double>& match_to_score) const
{
const IdentificationData::IdentifiedMolecule& molecule_var =
match_ref->identified_molecule_var;
IdentificationData::MoleculeType molecule_type =
molecule_var.getMoleculeType();
if (molecule_type == IdentificationData::MoleculeType::COMPOUND)
{
return; // compounds don't have parents with target/decoy status
}
pair<double, bool> score = match_ref->getScore(score_ref);
if (!score.second) return; // no score of this type
match_to_score[match_ref] = score.first;
auto pos = molecule_to_decoy.find(molecule_var);
bool is_decoy;
if (pos == molecule_to_decoy.end()) // new molecule
{
if (molecule_type == IdentificationData::MoleculeType::PROTEIN)
{
is_decoy = molecule_var.getIdentifiedPeptideRef()->allParentsAreDecoys();
}
else // if (molecule_type == IdentificationData::MoleculeType::RNA)
{
is_decoy = molecule_var.getIdentifiedOligoRef()->allParentsAreDecoys();
}
molecule_to_decoy[molecule_var] = is_decoy;
}
else
{
is_decoy = pos->second;
}
if (is_decoy)
{
decoy_scores.push_back(score.first);
}
else
{
target_scores.push_back(score.first);
}
}
void FalseDiscoveryRate::calculateFDRs_(map<double, double>& score_to_fdr, vector<double>& target_scores, vector<double>& decoy_scores, bool q_value, bool higher_score_better) const
{
Size number_of_target_scores = target_scores.size();
// sort the scores
if (higher_score_better && !q_value)
{
sort(target_scores.rbegin(), target_scores.rend());
sort(decoy_scores.rbegin(), decoy_scores.rend());
}
else if (!higher_score_better && !q_value)
{
sort(target_scores.begin(), target_scores.end());
sort(decoy_scores.begin(), decoy_scores.end());
}
else if (higher_score_better)
{
sort(target_scores.begin(), target_scores.end());
sort(decoy_scores.rbegin(), decoy_scores.rend());
}
else
{
sort(target_scores.rbegin(), target_scores.rend());
sort(decoy_scores.begin(), decoy_scores.end());
}
Size j = 0;
if (q_value)
{
double minimal_fdr = 1.;
for (Size i = 0; i != target_scores.size(); ++i)
{
if (decoy_scores.empty())
{
// set FDR to 0 (done below automatically)
}
else if (i == 0 && j == 0)
{
while (j != decoy_scores.size()
&& ((target_scores[i] <= decoy_scores[j] && higher_score_better) ||
(target_scores[i] >= decoy_scores[j] && !higher_score_better)))
{
++j;
}
}
else
{
if (j == decoy_scores.size())
{
j--;
}
while (j != 0
&& ((target_scores[i] > decoy_scores[j] && higher_score_better) ||
(target_scores[i] < decoy_scores[j] && !higher_score_better)))
{
--j;
}
// Since j has to be equal to the number of fps above the threshold we add one
if ((target_scores[i] <= decoy_scores[j] && higher_score_better)
|| (target_scores[i] >= decoy_scores[j] && !higher_score_better))
{
++j;
}
}
#ifdef FALSE_DISCOVERY_RATE_DEBUG
cerr << target_scores[i] << " " << decoy_scores[j] << " " << i << " " << j << " ";
#endif
double fdr = 0.;
if (minimal_fdr >= (double)j / (number_of_target_scores - i))
{
minimal_fdr = (double)j / (number_of_target_scores - i);
}
fdr = minimal_fdr;
#ifdef FALSE_DISCOVERY_RATE_DEBUG
cerr << fdr << endl;
#endif
score_to_fdr[target_scores[i]] = fdr;
}
}
else
{
for (Size i = 0; i != target_scores.size(); ++i)
{
while (j != decoy_scores.size() &&
((target_scores[i] <= decoy_scores[j] && higher_score_better) ||
(target_scores[i] >= decoy_scores[j] && !higher_score_better)))
{
++j;
}
#ifdef FALSE_DISCOVERY_RATE_DEBUG
cerr << target_scores[i] << " " << decoy_scores[j] << " " << i << " " << j << " ";
#endif
double fdr(0);
fdr = (double)j / (double)(i + 1);
#ifdef FALSE_DISCOVERY_RATE_DEBUG
cerr << fdr << endl;
#endif
score_to_fdr[target_scores[i]] = fdr;
}
}
// assign q-value of decoy_score to closest target_score
for (Size i = 0; i != decoy_scores.size(); ++i)
{
const double& ds = decoy_scores[i];
// advance target index until score is better than decoy score
size_t k{0};
while (k != target_scores.size() &&
((target_scores[k] <= ds && higher_score_better) ||
(target_scores[k] >= ds && !higher_score_better)))
{
++k;
}
// corner cases
if (k == 0)
{
if (!target_scores.empty())
{
score_to_fdr[ds] = score_to_fdr[target_scores[0]];
continue;
}
else
{
score_to_fdr[ds] = 1.0;
continue;
}
}
if (k == target_scores.size()) { score_to_fdr[ds] = score_to_fdr[target_scores.back()]; continue; }
if (fabs(target_scores[k] - ds) < fabs(target_scores[k - 1] - ds))
{
score_to_fdr[ds] = score_to_fdr[target_scores[k]];
}
else
{
score_to_fdr[ds] = score_to_fdr[target_scores[k - 1]];
}
}
}
//TODO does not support "by run" and/or "by charge"
//TODO could be done for a percentage of FalsePos instead of a number
//TODO can be templated for proteins
double FalseDiscoveryRate::rocN(const PeptideIdentificationList& ids, Size fp_cutoff) const
{
bool higher_score_better(ids.begin()->isHigherScoreBetter());
bool use_all_hits = param_.getValue("use_all_hits").toBool();
ScoreToTgtDecLabelPairs scores_labels;
IDScoreGetterSetter::getScores_(scores_labels, ids, use_all_hits);
if (scores_labels.empty())
{
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "No scores could be extracted!");
}
if (higher_score_better)
{ // decreasing
std::sort(scores_labels.rbegin(), scores_labels.rend());
}
else
{ // increasing
std::sort(scores_labels.begin(), scores_labels.end());
}
// if fp_cutoff is zero do the full AUC.
return rocN(scores_labels, fp_cutoff == 0 ? scores_labels.size() : fp_cutoff);
}
double FalseDiscoveryRate::rocN(const PeptideIdentificationList& ids, Size fp_cutoff, const String& identifier) const
{
bool higher_score_better(ids.begin()->isHigherScoreBetter());
bool use_all_hits = param_.getValue("use_all_hits").toBool();
ScoreToTgtDecLabelPairs scores_labels;
IDScoreGetterSetter::getScores_(scores_labels, ids, [&identifier](const PeptideIdentification& id){return identifier == id.getIdentifier();}, use_all_hits);
if (scores_labels.empty())
{
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "No scores could be extracted!");
}
if (higher_score_better)
{ // decreasing
std::sort(scores_labels.rbegin(), scores_labels.rend());
}
else
{ // increasing
std::sort(scores_labels.begin(), scores_labels.end());
}
// if fp_cutoff is zero do the full AUC.
return rocN(scores_labels, fp_cutoff == 0 ? scores_labels.size() : fp_cutoff);
}
double FalseDiscoveryRate::rocN(const ConsensusMap& ids, Size fp_cutoff, bool include_unassigned_peptides) const
{
bool higher_score_better(false);
// Check first ID in a feature for the score orientation.
for (const auto& f : ids)
{
const auto& pepids = f.getPeptideIdentifications();
if (!pepids.empty())
{
higher_score_better = pepids[0].isHigherScoreBetter();
break;
}
}
bool use_all_hits = param_.getValue("use_all_hits").toBool();
ScoreToTgtDecLabelPairs scores_labels;
IDScoreGetterSetter::getPeptideScoresFromMap_(scores_labels, ids, include_unassigned_peptides, use_all_hits);
if (scores_labels.empty())
{
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "No scores could be extracted!");
}
if (higher_score_better)
{ // decreasing
std::sort(scores_labels.rbegin(), scores_labels.rend());
}
else
{ // increasing
std::sort(scores_labels.begin(), scores_labels.end());
}
// if fp_cutoff is zero do the full AUC.
return rocN(scores_labels, fp_cutoff == 0 ? scores_labels.size() : fp_cutoff);
}
double FalseDiscoveryRate::rocN(const ConsensusMap& ids, Size fp_cutoff, const String& identifier, bool include_unassigned_peptides) const
{
bool higher_score_better(ids[0].getPeptideIdentifications().begin()->isHigherScoreBetter());
bool use_all_hits = param_.getValue("use_all_hits").toBool();
ScoreToTgtDecLabelPairs scores_labels;
IDScoreGetterSetter::getPeptideScoresFromMap_(scores_labels, ids, include_unassigned_peptides, [&identifier](const PeptideIdentification& id){return identifier == id.getIdentifier();}, use_all_hits);
if (scores_labels.empty())
{
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "No scores could be extracted!");
}
if (higher_score_better)
{ // decreasing
std::sort(scores_labels.rbegin(), scores_labels.rend());
}
else
{ // increasing
std::sort(scores_labels.begin(), scores_labels.end());
}
// if fp_cutoff is zero do the full AUC.
return rocN(scores_labels, fp_cutoff == 0 ? scores_labels.size() : fp_cutoff);
}
void FalseDiscoveryRate::applyBasic(ConsensusMap & cmap, bool include_unassigned_peptides)
{
bool q_value = !param_.getValue("no_qvalues").toBool();
const string& score_type = q_value ? "q-value" : "FDR";
bool all_hits = param_.getValue("use_all_hits").toBool();
bool treat_runs_separately = param_.getValue("treat_runs_separately").toBool();
bool split_charge_variants = param_.getValue("split_charge_variants").toBool();
//TODO this assumes all used search engine scores have the same score orientation
// include the determination of orientation in the getScores methods instead
bool higher_score_better = false;
for (const auto& cf : cmap)
{
const auto& pep_ids = cf.getPeptideIdentifications();
if (!pep_ids.empty())
{
higher_score_better = pep_ids[0].isHigherScoreBetter();
break;
}
}
if (cmap.empty())
{
for (const auto& id : cmap.getUnassignedPeptideIdentifications())
{
higher_score_better = id.isHigherScoreBetter();
break;
}
}
bool add_decoy_peptides = param_.getValue("add_decoy_peptides").toBool();
ScoreToTgtDecLabelPairs scores_labels;
//Warning: this assumes that there are no dangling identifier references in the PeptideIDs
// because this disables checking
if (cmap.getProteinIdentifications().size() == 1)
{
treat_runs_separately = false;
}
if (treat_runs_separately)
{
for (const auto& protID : cmap.getProteinIdentifications())
{
if (split_charge_variants)
{
pair<int, int> chargeRange = protID.getSearchParameters().getChargeRange();
for (int c = chargeRange.first; c <= chargeRange.second; ++c)
{
if (c == 0) continue;
IDScoreGetterSetter::getPeptideScoresFromMap_(scores_labels, cmap, include_unassigned_peptides,
[&protID](const PeptideIdentification& id){return protID.getIdentifier() == id.getIdentifier();}, all_hits,
[&c](const PeptideHit& hit){return c == hit.getCharge();});
map<double, double> scores_to_fdr;
calculateFDRBasic_(scores_to_fdr, scores_labels, q_value, higher_score_better);
IDScoreGetterSetter::setPeptideScoresForMap_(scores_to_fdr, cmap, include_unassigned_peptides, score_type, higher_score_better, add_decoy_peptides, c, protID.getIdentifier());
}
}
else
{
IDScoreGetterSetter::getPeptideScoresFromMap_(scores_labels, cmap, include_unassigned_peptides, [&protID](const PeptideIdentification& id){return protID.getIdentifier() == id.getIdentifier();}, all_hits);
map<double, double> scores_to_fdr;
calculateFDRBasic_(scores_to_fdr, scores_labels, q_value, higher_score_better);
IDScoreGetterSetter::setPeptideScoresForMap_(scores_to_fdr, cmap, include_unassigned_peptides, score_type, higher_score_better, add_decoy_peptides, protID.getIdentifier());
}
}
}
else
{
IDScoreGetterSetter::getPeptideScoresFromMap_(scores_labels, cmap, include_unassigned_peptides, all_hits);
map<double, double> scores_to_fdr;
calculateFDRBasic_(scores_to_fdr, scores_labels, q_value, higher_score_better);
IDScoreGetterSetter::setPeptideScoresForMap_(scores_to_fdr, cmap, include_unassigned_peptides, score_type, higher_score_better, add_decoy_peptides);
}
}
//TODO Add another overload that iterates over a vector. to be consistent with old interface
//TODO Make it return a double for the AUC or max FDR (i.e., without cutoff)
void FalseDiscoveryRate::applyBasic(ProteinIdentification & id, bool groups_too)
{
bool add_decoy_proteins = param_.getValue("add_decoy_proteins").toBool();
bool q_value = !param_.getValue("no_qvalues").toBool();
//TODO Check naming conventions. Ontology? Make class member?
const string& score_type = q_value ? "q-value" : "FDR";
bool higher_score_better(id.isHigherScoreBetter());
ScoreToTgtDecLabelPairs scores_labels;
scores_labels.reserve(id.getHits().size());
std::map<double,double> scores_to_FDR;
// TODO this could be a separate function.. And it could actually be sped up.
// We could store the number of decoys/targets in the group, or we only update the
// scores of proteins that are actually in groups (rest stays the same)
// do groups first, if keep_decoy is false, we would otherwise miss those proteins
if (groups_too)
{
// Prepare lookup map for decoy proteins (since there is no direct way back from group to protein)
// TODO we could also require the decoy affix to be specified
unordered_set<string> decoy_accs;
for (const auto& prot : id.getHits())
{
// checks if not a target (UNKNOWN or DECOY)
if (prot.getTargetDecoyType() != ProteinHit::TargetDecoyType::TARGET)
{
decoy_accs.insert(prot.getAccession());
}
}
IDScoreGetterSetter::getScores_(scores_labels, id.getIndistinguishableProteins(), decoy_accs);
calculateFDRBasic_(scores_to_FDR, scores_labels, q_value, higher_score_better);
if (!scores_labels.empty())
IDScoreGetterSetter::setScores_(scores_to_FDR, id.getIndistinguishableProteins(), score_type, false);
}
scores_to_FDR.clear();
scores_labels.clear();
scores_labels.reserve(id.getHits().size());
IDScoreGetterSetter::getScores_(scores_labels, id);
if (scores_labels.empty())
{
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "No scores could be extracted!");
}
calculateFDRBasic_(scores_to_FDR, scores_labels, q_value, higher_score_better);
if (!scores_labels.empty())
{
IDScoreGetterSetter::setScores_(scores_to_FDR, id, score_type, false, add_decoy_proteins);
}
else
{
OPENMS_LOG_WARN << "Warning: No scores could be extracted for proteins. No FDR calculation performed.";
}
scores_to_FDR.clear();
}
void FalseDiscoveryRate::applyBasic(const std::vector<ProteinIdentification> & run_info, PeptideIdentificationList & ids)
{
if (ids.empty()) return;
bool treat_runs_separately = param_.getValue("treat_runs_separately").toBool();
bool split_charge_variants = param_.getValue("split_charge_variants").toBool();
// TODO decide on param interface. Consolidate with FDR tool parameters.
// Then pass the bool to the applyBasic calls.
//bool best_per_pep = param_.getValue("pepFDR").toBool();
String identifier = "";
bool higher_score_better = true;
if (treat_runs_separately)
{
for (const auto& run : run_info)
{
identifier = run.getIdentifier();
for (const auto& pepid : ids)
{
if (pepid.getIdentifier() == identifier)
{
higher_score_better = pepid.isHigherScoreBetter();
break;
}
}
if (split_charge_variants)
{
pair<int, int> chargeRange = run.getSearchParameters().getChargeRange();
for (int c = chargeRange.first; c <= chargeRange.second; ++c)
{
if (c == 0) continue;
applyBasic(ids, higher_score_better, c, identifier);
}
}
else
{
applyBasic(ids, higher_score_better, 0, identifier);
}
}
}
else if (split_charge_variants)
{
pair<int, int> chargeRange = {10000,-10000};
for (const auto& run : run_info)
{
chargeRange.first = std::min(run.getSearchParameters().getChargeRange().first, chargeRange.first);
chargeRange.second = std::max(run.getSearchParameters().getChargeRange().first, chargeRange.second);
}
higher_score_better = ids[0].isHigherScoreBetter();
for (int c = chargeRange.first; c <= chargeRange.second; ++c)
{
if (c == 0) continue;
applyBasic(ids, higher_score_better, c);
}
}
else // altogether
{
higher_score_better = ids[0].isHigherScoreBetter();
applyBasic(ids, higher_score_better);
}
}
void FalseDiscoveryRate::applyBasicPeptideLevel(ConsensusMap & map, bool include_unassigned)
{
bool q_value = !param_.getValue("no_qvalues").toBool();
//TODO Check naming conventions. Ontology?
const string& score_type = q_value ? Constants::UserParam::PEPTIDE_Q_VALUE : "peptide FDR";
bool add_decoy_peptides = param_.getValue("add_decoy_peptides").toBool();
// since we do not support multiple runs here yet, we take the orientation of the first ID
bool higher_better = true;
for (const auto& f : map)
{
if (!f.getPeptideIdentifications().empty())
{
higher_better = f.getPeptideIdentifications()[0].isHigherScoreBetter();
}
}
unordered_map<String, ScoreToTgtDecLabelPair> seq_to_score_labels;
IDScoreGetterSetter::fillPeptideScoreMap_(seq_to_score_labels, map, include_unassigned);
ScoreToTgtDecLabelPairs pairs;
for (auto const & seq_to_score_label : seq_to_score_labels)
{
pairs.push_back(seq_to_score_label.second);
}
std::map<double,double> score_to_fdr;
calculateFDRBasic_(score_to_fdr, pairs, q_value, higher_better);
// convert scores in unordered map to FDR/qvalues
for (auto & seq_to_score_label : seq_to_score_labels)
{
if (higher_better)
{
auto ub = score_to_fdr.upper_bound(seq_to_score_label.second.first);
if (ub != score_to_fdr.begin()) ub--;
seq_to_score_label.second.first = ub->second;
}
else
{
seq_to_score_label.second.first = score_to_fdr.lower_bound(seq_to_score_label.second.first)->second;
}
}
IDScoreGetterSetter::setPeptideScoresFromMap_(seq_to_score_labels, map, score_type, add_decoy_peptides, include_unassigned);
}
void FalseDiscoveryRate::applyBasicPeptideLevel(PeptideIdentificationList & ids)
{
bool q_value = !param_.getValue("no_qvalues").toBool();
//TODO Check naming conventions. Ontology?
const string& score_type = q_value ? Constants::UserParam::PEPTIDE_Q_VALUE : "peptide FDR";
bool add_decoy_peptides = param_.getValue("add_decoy_peptides").toBool();
// since we do not support multiple runs here yet, we take the orientation of the first ID
bool higher_better = ids[0].isHigherScoreBetter();
unordered_map<String, ScoreToTgtDecLabelPair> seq_to_score_labels;
IDScoreGetterSetter::fillPeptideScoreMap_(seq_to_score_labels, ids);
ScoreToTgtDecLabelPairs pairs;
for (auto const & seq_to_score_label : seq_to_score_labels)
{
pairs.push_back(seq_to_score_label.second);
}
map<double,double> score_to_fdr;
calculateFDRBasic_(score_to_fdr, pairs, q_value, higher_better);
// convert scores in unordered map to FDR/qvalues
for (auto & seq_to_score_label : seq_to_score_labels)
{
if (higher_better)
{
auto ub = score_to_fdr.upper_bound(seq_to_score_label.second.first);
if (ub != score_to_fdr.begin()) ub--;
seq_to_score_label.second.first = ub->second;
}
else
{
seq_to_score_label.second.first = score_to_fdr.lower_bound(seq_to_score_label.second.first)->second;
}
}
IDScoreGetterSetter::setPeptideScoresFromMap_(seq_to_score_labels, ids, score_type, add_decoy_peptides);
}
// TODO why again do we need higher_score_better here?
void FalseDiscoveryRate::applyBasic(PeptideIdentificationList & ids, bool higher_score_better, int charge, String identifier, bool only_best_per_pep)
{
bool q_value = !param_.getValue("no_qvalues").toBool();
//TODO Check naming conventions. Ontology?
const string& score_type = q_value ? "q-value" : "FDR";
bool use_all_hits = param_.getValue("use_all_hits").toBool();
bool add_decoy_peptides = param_.getValue("add_decoy_peptides").toBool();
ScoreToTgtDecLabelPairs scores_labels;
std::map<double,double> scores_to_FDR;
auto idcheck = [&identifier](const PeptideIdentification& id){return identifier == id.getIdentifier();};
if (charge == 0 && !only_best_per_pep)
{
if (identifier.empty())
{
IDScoreGetterSetter::getScores_(scores_labels, ids, use_all_hits);
}
else
{
IDScoreGetterSetter::getScores_(scores_labels, ids, idcheck, use_all_hits);
}
}
else
{
if (!only_best_per_pep /*&& charge != 0*/)
{
if (identifier.empty())
{
IDScoreGetterSetter::getScores_(scores_labels, ids, use_all_hits, [&charge](const PeptideHit& hit){return charge == hit.getCharge();});
}
else
{
IDScoreGetterSetter::getScores_(scores_labels, ids, idcheck, use_all_hits, [&charge](const PeptideHit& hit){return charge == hit.getCharge();});
}
}
else if (charge == 0 /* && only_best_per_pep */)
{
if (identifier.empty())
{
IDScoreGetterSetter::getScores_(scores_labels, ids, use_all_hits, [](const PeptideHit& hit){return hit.metaValueExists("best_per_peptide") && int(hit.getMetaValue("best_per_peptide")) == 1;});
}
else
{
IDScoreGetterSetter::getScores_(scores_labels, ids, idcheck, use_all_hits, [](const PeptideHit& hit){return hit.metaValueExists("best_per_peptide") && int(hit.getMetaValue("best_per_peptide")) == 1;});
}
}
else /*charge != 0 && only_best_per_pep */
{
if (identifier.empty())
{
IDScoreGetterSetter::getScores_(scores_labels, ids, use_all_hits, [&charge](const PeptideHit& hit){return (charge == hit.getCharge()) && hit.metaValueExists("best_per_peptide") && int(hit.getMetaValue("best_per_peptide")) == 1;});
}
else
{
IDScoreGetterSetter::getScores_(scores_labels, ids, idcheck, use_all_hits, [&charge](const PeptideHit& hit){return (charge == hit.getCharge()) && hit.metaValueExists("best_per_peptide") && int(hit.getMetaValue("best_per_peptide")) == 1;});
}
}
}
if (scores_labels.empty())
{
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "No scores could be extracted for FDR!");
}
calculateFDRBasic_(scores_to_FDR, scores_labels, q_value, higher_score_better);
if (!scores_labels.empty())
IDScoreGetterSetter::setScores_<PeptideIdentification>(scores_to_FDR, ids.getData(), score_type, false, add_decoy_peptides);
scores_to_FDR.clear();
}
//TODO could be implemented for PeptideIDs, too
//TODO iterate over the vector. to be consistent with old interface
void FalseDiscoveryRate::applyEstimated(std::vector<ProteinIdentification> &ids) const
{
//Note: this is actually unused because I think with that approach you will always get q-values.
//bool q_value = !param_.getValue("no_qvalues").toBool();
bool higher_score_better(ids.begin()->isHigherScoreBetter());
bool add_decoy_proteins = param_.getValue("add_decoy_proteins").toBool();
//TODO not yet supported (if ever)
//bool treat_runs_separately = param_.getValue("treat_runs_separately").toBool();
if (ids.size() > 1)
{
OPENMS_LOG_WARN << "More than one set of ProteinIdentifications found. Only using the first one for FDR calculation.\n";
}
if (ids[0].getScoreType() != "Posterior Probability" && ids[0].getScoreType() != "Posterior Error Probability")
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Proteins in ProteinIdentification do not have a posterior (error) probability assigned. Please run an inference first.", ids[0].getScoreType());
}
ScoreToTgtDecLabelPairs scores_labels;
std::map<double,double> scores_to_FDR;
//TODO actually we do not need the labels for estimated FDR and it currently fails if we do not have TD annotations
//TODO maybe separate getScores and getScoresAndLabels
IDScoreGetterSetter::getScores_(scores_labels, ids[0]);
calculateEstimatedQVal_(scores_to_FDR, scores_labels, higher_score_better);
if (!scores_labels.empty())
IDScoreGetterSetter::setScores_(scores_to_FDR, ids[0], "Estimated Q-Values", false, add_decoy_proteins);
}
//TODO remove?
double FalseDiscoveryRate::applyEvaluateProteinIDs(const std::vector<ProteinIdentification>& ids, double pepCutoff, UInt fpCutoff, double diffWeight) const
{
//TODO not yet supported (if ever)
//bool treat_runs_separately = param_.getValue("treat_runs_separately").toBool();
if (ids.size() > 1)
{
OPENMS_LOG_WARN << "More than one set of ProteinIdentifications found. Only using the first one for calculation.\n";
}
if (ids[0].getScoreType() != "Posterior Probability")
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Proteins in ProteinIdentification do not have a posterior probability assigned. Please run an inference first.", ids[0].getScoreType());
}
ScoreToTgtDecLabelPairs scores_labels;
IDScoreGetterSetter::getScores_(scores_labels, ids[0]);
std::sort(scores_labels.rbegin(), scores_labels.rend());
return diffEstimatedEmpirical(scores_labels, pepCutoff) * diffWeight +
rocN(scores_labels, fpCutoff) * (1 - diffWeight);
}
double FalseDiscoveryRate::applyEvaluateProteinIDs(const ProteinIdentification& ids, double pepCutoff, UInt fpCutoff, double diffWeight) const
{
if (ids.getScoreType() != "Posterior Probability")
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Proteins in ProteinIdentification do not have a posterior probability assigned. Please run an inference first.", ids.getScoreType());
}
ScoreToTgtDecLabelPairs scores_labels;
IDScoreGetterSetter::getScores_(scores_labels, ids);
std::sort(scores_labels.rbegin(), scores_labels.rend());
double diff = diffEstimatedEmpirical(scores_labels, pepCutoff);
double auc = rocN(scores_labels, fpCutoff);
OPENMS_LOG_INFO << "Evaluation of protein probabilities: Difference estimated vs. T-D FDR = " << diff << " and roc" << fpCutoff << " = " << auc << '\n';
// we want the score to get higher the lesser the difference. Subtract from one.
// Then convex combination with the AUC.
return (1.0 - diff) * (1.0 - diffWeight) + auc * diffWeight;
}
double FalseDiscoveryRate::applyEvaluateProteinIDs(ScoreToTgtDecLabelPairs& scores_labels, double pepCutoff, UInt fpCutoff, double diffWeight) const
{
std::sort(scores_labels.rbegin(), scores_labels.rend());
double diff = diffEstimatedEmpirical(scores_labels, pepCutoff);
double auc = rocN(scores_labels, fpCutoff);
OPENMS_LOG_INFO << "Evaluation of protein probabilities: Difference estimated vs. T-D FDR = " << diff << " and roc" << fpCutoff << " = " << auc << '\n';
// we want the score to get higher the lesser the difference. Subtract from one.
// Then convex combination with the AUC.
return (1.0 - diff) * (1.0 - diffWeight) + auc * diffWeight;
}
//TODO this probably could work on group level, too, but only if peptide-level decoys were used, such that
// decoys are indistinguishable iff targets are indistinguishable
void FalseDiscoveryRate::applyPickedProteinFDR(ProteinIdentification & id, String decoy_string, bool prefix, bool groups_too)
{
bool add_decoy_proteins = param_.getValue("add_decoy_proteins").toBool();
bool q_value = !param_.getValue("no_qvalues").toBool();
//TODO Check naming conventions. Ontology?
const string& score_type = q_value ? "q-value" : "FDR";
//TODO this assumes all runs have the same ordering! Otherwise do it per identifier.
bool higher_score_better(id.isHigherScoreBetter());
if (decoy_string.empty())
{
auto r = FalseDiscoveryRate::DecoyStringHelper::findDecoyString(id);
if (!r.success)
{
r.is_prefix = true;
r.name = "DECOY_";
OPENMS_LOG_WARN << "Unable to determine decoy string automatically (not enough decoys were detected)! Using default " << (r.is_prefix ? "prefix" : "suffix") << " decoy string '" << r.name << "'\n"
<< "If you think that this is incorrect, please provide a decoy_string and its position manually!\n";
}
prefix = r.is_prefix;
decoy_string = r.name;
// decoy string and position was extracted successfully
OPENMS_LOG_INFO << "Using " << (prefix ? "prefix" : "suffix") << " decoy string '" << decoy_string << "'\n";
}
ScoreToTgtDecLabelPairs scores_labels;
std::map<double,double> scores_to_FDR;
std::unordered_map<String, ScoreToTgtDecLabelPair> picked_scores;
IDScoreGetterSetter::getPickedProteinScores_(picked_scores, id, decoy_string, prefix);
scores_labels.reserve(picked_scores.size());
if (groups_too)
{
IDScoreGetterSetter::getPickedProteinGroupScores_(picked_scores, scores_labels, id.getIndistinguishableProteins(), decoy_string, prefix);
calculateFDRBasic_(scores_to_FDR, scores_labels, q_value, higher_score_better);
IDScoreGetterSetter::setScores_(scores_to_FDR, id.getIndistinguishableProteins(), score_type, false);
scores_to_FDR.clear();
scores_labels.clear();
}
// for single proteins just take all scores
for (auto& kv : picked_scores)
{
scores_labels.emplace_back(std::move(kv.second)); // move all. We do not need them anymore
}
if (scores_labels.empty())
{
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "No scores could be extracted for FDR calculation!");
}
calculateFDRBasic_(scores_to_FDR, scores_labels, q_value, higher_score_better);
IDScoreGetterSetter::setScores_(scores_to_FDR, id, score_type, false, add_decoy_proteins);
scores_to_FDR.clear();
scores_labels.clear();
}
//TODO the following two methods assume sortedness. Add precondition and/or doxygen comment
double FalseDiscoveryRate::diffEstimatedEmpirical(const ScoreToTgtDecLabelPairs& scores_labels, double pepCutoff) const
{
bool conservative = param_.getValue("conservative").toBool();
if (scores_labels.empty())
{
OPENMS_LOG_WARN << "Warning: No scores extracted for FDR calculation. Skipping. Do you have target-decoy annotated Hits?\n";
return 1.0;
}
double diffArea = 0.0;
double est = 0.0, estPrev = 0.0, emp = 0.0, empPrev = 0.0;
double pepSum = 0.0;
UInt truePos = 0u, falsePos = 0u; //, truePosPrev = 0u, falsePosPrev = 0u;
auto pit = scores_labels.cbegin();
for (; pit != scores_labels.end()-1; ++pit)
{
pit->second ? truePos++ : falsePos++;
pepSum += (1 - pit->first);
//Look ahead. Running variables have already been incremented
if ((pit+1)->first != pit->first)
{
est = pepSum / (truePos + falsePos);
if (conservative)
{
if (truePos == 0.)
{
emp = 1.;
}
else
{
emp = static_cast<double>(falsePos) / (truePos);
}
}
else
{
emp = static_cast<double>(falsePos) / (truePos+falsePos);
}
diffArea += trapezoidal_area_xEqy(estPrev, est, empPrev, emp);
//truePosPrev = truePos;
//falsePosPrev = falsePos;
estPrev = est;
empPrev = emp;
}
}
//Last item. Always add areas there
pit->second ? truePos++ : falsePos++;
pepSum += (1 - pit->first);
est = pepSum / (truePos + falsePos);
emp = static_cast<double>(falsePos) / (truePos + falsePos);
diffArea += trapezoidal_area_xEqy(estPrev, est, empPrev, emp);
//scale by max PEP value achievable (= width); height = empFDR can be 1.0
diffArea /= std::min(est, pepCutoff);
return diffArea;
}
double FalseDiscoveryRate::rocN(const ScoreToTgtDecLabelPairs& scores_labels, Size fpCutoff) const
{
if (scores_labels.empty())
{
OPENMS_LOG_WARN << "Warning: No scores extracted for FDR calculation. Skipping. Do you have target-decoy annotated Hits?\n";
return 0.0;
}
double rocN = 0.0;
UInt truePos = 0u, falsePos = 0u, truePosPrev = 0u, falsePosPrev = 0u;
auto pit = scores_labels.cbegin();
for (; pit != scores_labels.cend()-1; ++pit)
{
pit->second ? truePos++ : falsePos++;
//Look ahead. Running variables have already been incremented
if ((pit+1)->first != pit->first)
{
rocN += trapezoidal_area(falsePos, falsePosPrev, truePos, truePosPrev);
if (falsePos >= fpCutoff) return rocN / (falsePos * truePos); //if with the last batch, you have >= N FPs, return scaled
truePosPrev = truePos;
falsePosPrev = falsePos;
}
}
//Last item if not returned. Always add areas there
pit->second ? truePos++ : falsePos++;
rocN += trapezoidal_area(falsePos, falsePosPrev, truePos, truePosPrev);
if (falsePos == 0) return 1;
return rocN / (falsePos * truePos);
}
/// x2 has to be bigger than x1,
/// handles possible intersections
double FalseDiscoveryRate::trapezoidal_area_xEqy(double x1, double x2, double y1, double y2) const
{
double height = x2 - x1;
double b1 = y1 - x1;
double b2 = y2 - x2;
if (std::signbit(b1) == std::signbit(b2))
{
return (std::fabs(b1) + std::fabs(b2)) * height / 2.0;
}
else
{
// it is intersecting the x=y line. Add the area of the resulting triangles
return (b1*b1 + b2*b2) * height / (2.0 * (std::fabs(b1)+std::fabs(b2)));
}
}
/// assumes a flat base
double FalseDiscoveryRate::trapezoidal_area(double x1, double x2, double y1, double y2) const
{
double base = fabs(x1 - x2);
double avgHeight = (y1+y2)/2.0;
return base * avgHeight;
}
// Actually this does not need the bool entries in the scores_labels, but leads to less code
// Assumes P(E)Probabilities as scores
void FalseDiscoveryRate::calculateEstimatedQVal_(std::map<double, double> &scores_to_FDR,
ScoreToTgtDecLabelPairs &scores_labels,
bool higher_score_better) const
{
if (scores_labels.empty())
{
OPENMS_LOG_WARN << "Warning: No scores extracted for FDR calculation. Skipping. Do you have target-decoy annotated Hits?\n";
return;
}
if (higher_score_better)
{ // decreasing
std::sort(scores_labels.rbegin(), scores_labels.rend());
}
else
{ // increasing
std::sort(scores_labels.begin(), scores_labels.end());
}
//TODO I think we can just do it "in-place" to save space
std::vector<double> estimatedFDR;
estimatedFDR.reserve(scores_labels.size());
// Basically a running average
double sum = 0.0;
for (size_t j = 0; j < scores_labels.size(); ++j)
{
sum += scores_labels[j].first;
estimatedFDR[j] = sum / (double(j)+1.0);
}
if (higher_score_better) // Transform to PEP
{
std::transform(estimatedFDR.begin(), estimatedFDR.end(), estimatedFDR.begin(), [&](double d) { return 1 - d; });
}
// In case of multiple equal scores, this will add the _last_ fdr that it finds. Since fdrs are decreasing
// in either way in estimatedFDR, this adds the minimal FDR to the map for this score.
std::transform(scores_labels.begin(), scores_labels.end(), estimatedFDR.begin(), std::inserter(scores_to_FDR, scores_to_FDR.begin()), [&](std::pair<double,bool> sl, double fdr){return make_pair(sl.first, fdr);});
}
/*
void FalseDiscoveryRate::calculateFDRBasic_(
std::map<double,double>& scores_to_FDR,
ScoreToTgtDecLabelPairs& scores_labels,
std::vector<size_t>& ordering,
bool qvalue,
bool higher_score_better) const
{
}
void FalseDiscoveryRate::calculateFDRBasicOnSorted_(
std::map<double,double>& scores_to_FDR,
ScoreToTgtDecLabelPairs& scores_labels,
bool qvalue,
bool higher_score_better) const
{
}*/
void FalseDiscoveryRate::calculateFDRBasic_(
std::map<double,double>& scores_to_FDR,
ScoreToTgtDecLabelPairs& scores_labels,
bool qvalue,
bool higher_score_better) const
{
//TODO put in separate function to avoid ifs in iteration
bool conservative = param_.getValue("conservative").toBool();
if (scores_labels.empty())
{
OPENMS_LOG_WARN << "Warning: No scores extracted for FDR calculation. Skipping. Do you have target-decoy annotated Hits?\n";
return;
}
if (higher_score_better)
{ // decreasing
std::sort(scores_labels.rbegin(), scores_labels.rend());
}
else
{ // increasing
std::sort(scores_labels.begin(), scores_labels.end());
}
//uniquify scores and add decoy proportions
double decoys = 0.; // double to account for "partial" decoys
double last_score = scores_labels[0].first;
size_t j = 0;
for (; j < scores_labels.size(); ++j)
{
//Although we do not really care about equality we compare with tolerance to make it (more?) compiler independent.
if (std::abs(scores_labels[j].first - last_score) > 1e-12)
{
#ifdef FALSE_DISCOVERY_RATE_DEBUG
OPENMS_LOG_DEBUG << "Recording score: " << last_score << " with " << decoys << " decoys at index+1 = " << (j+1) << " -> fdr: " << decoys/(j+1.0) << '\n';
#endif
//we are using the conservative formula (Decoy + 1) / (Tgts)
if (conservative)
{
scores_to_FDR[last_score] = (decoys+1.0)/(double(j)+1.0-decoys);
}
else
{
scores_to_FDR[last_score] = (decoys+1.0)/(double(j)+1.0);
}
last_score = scores_labels[j].first;
}
decoys += 1. - scores_labels[j].second;
/* The following was for the binary interpretation. Now we allow for partial decoy contributions as above
if (!scores_labels[j].second)
{
decoys++;
}
*/
}
// in case there is only one score and generally to include the last score, I guess we need to do this
if (conservative)
{
scores_to_FDR[last_score] = (decoys+1.0)/(double(j)+1.0-decoys);
}
else
{
scores_to_FDR[last_score] = (decoys+1.0)/(double(j)+1.0);
}
if (qvalue) //apply a cumulative minimum on the map (from low to high fdrs)
{
double cummin = 1.0;
if (higher_score_better)
{
for (auto&& rit = scores_to_FDR.begin(); rit != scores_to_FDR.end(); ++rit)
{
#ifdef FALSE_DISCOVERY_RATE_DEBUG
OPENMS_LOG_DEBUG << "Comparing " << rit->second << " to " << cummin << '\n';
#endif
cummin = std::min(rit->second, cummin);
rit->second = cummin;
}
}
else
{
for (auto&& rit = scores_to_FDR.rbegin(); rit != scores_to_FDR.rend(); ++rit)
{
#ifdef FALSE_DISCOVERY_RATE_DEBUG
OPENMS_LOG_DEBUG << "Comparing " << rit->second << " to " << cummin << '\n';
#endif
cummin = std::min(rit->second, cummin);
rit->second = cummin;
}
}
}
}
using DecoyStringToAffixCount = std::unordered_map<std::string, std::pair<Size, Size>>;
using CaseInsensitiveToCaseSensitiveDecoy = std::unordered_map<std::string, std::string>;
/**
@brief Heuristic to determine the decoy string given a set of protein names
Tested decoy strings are "decoy", "dec", "reverse", "rev", "__id_decoy", "xxx", "shuffled", "shuffle", "pseudo" and "random".
Both prefix and suffix is tested and if one of the candidates above is found in at least 40% of all proteins,
it is returned as the winner (see DecoyHelper::Result).
*/
FalseDiscoveryRate::DecoyStringHelper::Result FalseDiscoveryRate::DecoyStringHelper::findDecoyString(const ProteinIdentification& proteins)
{
// common decoy strings in FASTA files
// note: decoy prefixes/suffices must be provided in lower case
static const std::vector<std::string> affixes{ "decoy", "dec", "reverse", "rev", "reversed", "__id_decoy", "xxx", "shuffled", "shuffle", "pseudo", "random" };
// map decoys to counts of occurrences as prefix/suffix
DecoyStringToAffixCount decoy_count;
// map case-insensitive strings back to original case (as used in fasta)
CaseInsensitiveToCaseSensitiveDecoy decoy_case_sensitive;
// setup prefix- and suffix regex strings
// TODO extend regex to allow skipping the underscore? i.e. with "?"
const std::string regexstr_prefix = std::string("^(") + ListUtils::concatenate<std::string>(affixes, "_*|") + "_*)";
const std::string regexstr_suffix = std::string("(_") + ListUtils::concatenate<std::string>(affixes, "*|_") + ")$";
// setup regexes
const boost::regex pattern_prefix(regexstr_prefix);
const boost::regex pattern_suffix(regexstr_suffix);
Size all_prefix_occur(0), all_suffix_occur(0), all_proteins_count(0);
for (const auto& prot : proteins.getHits())
{
all_proteins_count += 1;
boost::smatch sm;
const String& seq = prot.getAccession();
String seq_lower = seq;
seq_lower.toLower();
// search for prefix
bool found_prefix = boost::regex_search(seq_lower, sm, pattern_prefix);
if (found_prefix)
{
std::string match = sm[0];
all_prefix_occur++;
// increase count of observed prefix
decoy_count[match].first++;
// store observed (case-sensitive and with special characters)
std::string seq_decoy = StringUtils::prefix(seq, match.length());
decoy_case_sensitive[match] = seq_decoy;
}
// search for suffix
bool found_suffix = boost::regex_search(seq_lower, sm, pattern_suffix);
if (found_suffix)
{
std::string match = sm[0];
all_suffix_occur++;
// increase count of observed suffix
decoy_count[match].second++;
// store observed (case-sensitive and with special characters)
std::string seq_decoy = StringUtils::suffix(seq, match.length());
decoy_case_sensitive[match] = seq_decoy;
}
}
// DEBUG ONLY: print counts of found decoys
for (auto &a : decoy_count)
{
OPENMS_LOG_DEBUG << a.first << "\t" << a.second.first << "\t" << a.second.second << '\n';
}
// less than 30% of proteins are decoys -> won't be able to determine a decoy string and its position
// return default values
if (static_cast<double>(all_prefix_occur + all_suffix_occur) < 0.3 * static_cast<double>(all_proteins_count))
{
OPENMS_LOG_ERROR << "Unable to determine decoy string (not enough occurrences; <30%)!\n";
return {false, "?", true};
}
if (all_prefix_occur == all_suffix_occur)
{
OPENMS_LOG_ERROR << "Unable to determine decoy string (prefix and suffix occur equally often)!\n";
return {false, "?", true};
}
// Decoy prefix occurred at least 80% of all prefixes + observed in at least 30% of all proteins -> set it as prefix decoy
for (const auto& pair : decoy_count)
{
const std::string & case_insensitive_decoy_string = pair.first;
const std::pair<Size, Size>& prefix_suffix_counts = pair.second;
double freq_prefix = static_cast<double>(prefix_suffix_counts.first) / static_cast<double>(all_prefix_occur);
double freq_prefix_in_proteins = static_cast<double>(prefix_suffix_counts.first) / static_cast<double>(all_proteins_count);
if (freq_prefix >= 0.8 && freq_prefix_in_proteins >= 0.3)
{
if (prefix_suffix_counts.first != all_prefix_occur)
{
OPENMS_LOG_WARN << "More than one decoy prefix observed!\n";
OPENMS_LOG_WARN << "Using most frequent decoy prefix (" << (int)(freq_prefix * 100) << "%)\n";
}
return { true, decoy_case_sensitive[case_insensitive_decoy_string], true};
}
}
// Decoy suffix occurred at least 80% of all suffixes + observed in at least 30% of all proteins -> set it as suffix decoy
for (const auto& pair : decoy_count)
{
const std::string& case_insensitive_decoy_string = pair.first;
const std::pair<Size, Size>& prefix_suffix_counts = pair.second;
double freq_suffix = static_cast<double>(prefix_suffix_counts.second) / static_cast<double>(all_suffix_occur);
double freq_suffix_in_proteins = static_cast<double>(prefix_suffix_counts.second) / static_cast<double>(all_proteins_count);
if (freq_suffix >= 0.8 && freq_suffix_in_proteins >= 0.3)
{
if (prefix_suffix_counts.second != all_suffix_occur)
{
OPENMS_LOG_WARN << "More than one decoy suffix observed!\n";
OPENMS_LOG_WARN << "Using most frequent decoy suffix (" << (int)(freq_suffix * 100) << "%)\n";
}
return { true, decoy_case_sensitive[case_insensitive_decoy_string], false};
}
}
OPENMS_LOG_ERROR << "Unable to determine decoy string and its position. Please provide a decoy string and its position as parameters.\n";
return {false, "?", true};
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/MetaboliteSpectralMatching.cpp | .cpp | 27,101 | 843 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Erhan Kenar $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/MetaboliteSpectralMatching.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <numeric>
#include <boost/math/special_functions/factorials.hpp>
#include <boost/dynamic_bitset.hpp>
#include <OpenMS/PROCESSING/SPECTRAMERGING/SpectraMerger.h>
#include <OpenMS/PROCESSING/FILTERING/WindowMower.h>
using namespace std;
namespace OpenMS
{
SpectralMatch::SpectralMatch() :
observed_precursor_mass_(),
observed_precursor_rt_(),
found_precursor_mass_(),
found_precursor_charge_(),
matching_score_(),
observed_spectrum_idx_(),
matching_spectrum_idx_(),
observed_spectrum_native_id_(),
primary_id_(),
secondary_id_(),
common_name_(),
sum_formula_(),
inchi_string_(),
smiles_string_(),
precursor_adduct_()
{
}
/// Default destructor
SpectralMatch::~SpectralMatch() = default;
/// Copy constructor
SpectralMatch::SpectralMatch(const SpectralMatch& sm) = default;
/// Assignment operator
SpectralMatch& SpectralMatch::operator=(const SpectralMatch& rhs)
{
if (this == &rhs) return *this;
observed_precursor_mass_ = rhs.observed_precursor_mass_;
observed_precursor_rt_ = rhs.observed_precursor_rt_;
found_precursor_mass_ = rhs.found_precursor_mass_;
found_precursor_charge_ = rhs.found_precursor_charge_;
matching_score_ = rhs.matching_score_;
observed_spectrum_idx_ = rhs.observed_spectrum_idx_;
matching_spectrum_idx_ = rhs.matching_spectrum_idx_;
observed_spectrum_native_id_ = rhs.observed_spectrum_native_id_;
primary_id_ = rhs.primary_id_;
secondary_id_ = rhs.secondary_id_;
common_name_ = rhs.common_name_;
sum_formula_ = rhs.sum_formula_;
inchi_string_ = rhs.inchi_string_;
smiles_string_ = rhs.smiles_string_;
precursor_adduct_ = rhs.precursor_adduct_;
return *this;
}
double SpectralMatch::getObservedPrecursorMass() const
{
return observed_precursor_mass_;
}
void SpectralMatch::setObservedPrecursorMass(const double& qmass)
{
observed_precursor_mass_ = qmass;
}
double SpectralMatch::getObservedPrecursorRT() const
{
return observed_precursor_rt_;
}
void SpectralMatch::setObservedPrecursorRT(const double& prt)
{
observed_precursor_rt_ = prt;
}
double SpectralMatch::getFoundPrecursorMass() const
{
return found_precursor_mass_;
}
void SpectralMatch::setFoundPrecursorMass(const double& fmass)
{
found_precursor_mass_ = fmass;
}
Int SpectralMatch::getFoundPrecursorCharge() const
{
return found_precursor_charge_;
}
void SpectralMatch::setFoundPrecursorCharge(const Int& pch)
{
found_precursor_charge_ = pch;
}
double SpectralMatch::getMatchingScore() const
{
return matching_score_;
}
void SpectralMatch::setMatchingScore(const double& mscore)
{
matching_score_ = mscore;
}
Size SpectralMatch::getObservedSpectrumIndex() const
{
return observed_spectrum_idx_;
}
void SpectralMatch::setObservedSpectrumIndex(const Size& obs_spec_idx)
{
observed_spectrum_idx_ = obs_spec_idx;
}
Size SpectralMatch::getMatchingSpectrumIndex() const
{
return matching_spectrum_idx_;
}
void SpectralMatch::setMatchingSpectrumIndex(const Size& match_spec_idx)
{
matching_spectrum_idx_ = match_spec_idx;
}
String SpectralMatch::getObservedSpectrumNativeID() const
{
return observed_spectrum_native_id_;
}
void SpectralMatch::setObservedSpectrumNativeID(const String& obs_spec_native_id)
{
observed_spectrum_native_id_ = obs_spec_native_id;
}
String SpectralMatch::getPrimaryIdentifier() const
{
return primary_id_;
}
void SpectralMatch::setPrimaryIdentifier(const String& pid)
{
primary_id_ = pid;
}
String SpectralMatch::getSecondaryIdentifier() const
{
return secondary_id_;
}
void SpectralMatch::setSecondaryIdentifier(const String& sid)
{
secondary_id_ = sid;
}
String SpectralMatch::getCommonName() const
{
return common_name_;
}
void SpectralMatch::setCommonName(const String& cname)
{
common_name_ = cname;
}
String SpectralMatch::getSumFormula() const
{
return sum_formula_;
}
void SpectralMatch::setSumFormula(const String& sf)
{
sum_formula_ = sf;
}
String SpectralMatch::getInchiString() const
{
return inchi_string_;
}
void SpectralMatch::setInchiString(const String& istr)
{
inchi_string_ = istr;
}
String SpectralMatch::getSMILESString() const
{
return smiles_string_;
}
void SpectralMatch::setSMILESString(const String& sstr)
{
smiles_string_ = sstr;
}
String SpectralMatch::getPrecursorAdduct() const
{
return precursor_adduct_;
}
void SpectralMatch::setPrecursorAdduct(const String& padd)
{
precursor_adduct_ = padd;
}
MetaboliteSpectralMatching::MetaboliteSpectralMatching() :
DefaultParamHandler("MetaboliteSpectralMatching"), ProgressLogger()
{
defaults_.setValue("prec_mass_error_value", 100.0, "Error allowed for precursor ion mass.");
defaults_.setValue("frag_mass_error_value", 500.0, "Error allowed for product ions.");
defaults_.setValue("mass_error_unit", "ppm", "Unit of mass error (ppm or Da)");
defaults_.setValidStrings("mass_error_unit", {"ppm","Da"});
defaults_.setValue("report_mode", "top3", "Which results shall be reported: the top-three scoring ones or the best scoring one?");
defaults_.setValidStrings("report_mode", {"top3","best"});
defaults_.setValue("ionization_mode", "positive", "Positive or negative ionization mode?");
defaults_.setValidStrings("ionization_mode", {"positive","negative"});
defaults_.setValue("merge_spectra", "true", "Merge MS2 spectra with the same precursor mass.");
defaults_.setValidStrings("merge_spectra", {"true","false"});
defaultsToParam_();
this->setLogType(CMD);
}
MetaboliteSpectralMatching::~MetaboliteSpectralMatching() = default;
double MetaboliteSpectralMatching::computeHyperScore(
double fragment_mass_error,
bool fragment_mass_tolerance_unit_ppm,
const MSSpectrum& exp_spectrum,
const MSSpectrum& db_spectrum,
double mz_lower_bound)
{
return computeHyperScore_(fragment_mass_error,
fragment_mass_tolerance_unit_ppm, exp_spectrum,
db_spectrum, nullptr, mz_lower_bound);
}
double MetaboliteSpectralMatching::computeHyperScore(
double fragment_mass_error,
bool fragment_mass_tolerance_unit_ppm,
const MSSpectrum& exp_spectrum,
const MSSpectrum& db_spectrum,
vector<PeptideHit::PeakAnnotation>& annotations,
double mz_lower_bound)
{
return computeHyperScore_(fragment_mass_error,
fragment_mass_tolerance_unit_ppm, exp_spectrum,
db_spectrum, &annotations, mz_lower_bound);
}
double MetaboliteSpectralMatching::computeHyperScore_(
double fragment_mass_error,
bool fragment_mass_tolerance_unit_ppm,
const MSSpectrum& exp_spectrum,
const MSSpectrum& db_spectrum,
vector<PeptideHit::PeakAnnotation>* annotations,
double mz_lower_bound)
{
if (exp_spectrum.empty()) return 0;
// define m/z range to consider:
double min_exp_mz = exp_spectrum[0].getMZ(); // lowest experimental m/z
double mz_offset = fragment_mass_error;
if (fragment_mass_tolerance_unit_ppm)
{
mz_offset = min_exp_mz * fragment_mass_error * 1e-6;
}
mz_lower_bound = max(mz_lower_bound, min_exp_mz - mz_offset);
double max_exp_mz = exp_spectrum.back().getMZ(); // highest experimental m/z
if (fragment_mass_tolerance_unit_ppm)
{
mz_offset = max_exp_mz * fragment_mass_error * 1e-6;
}
double mz_upper_bound = max_exp_mz + mz_offset;
// for every DB (theoretical) peak in the valid m/z range, find the closest
// matching experimental (observed) peak within the allowed tolerance;
// in principle, multiple DB peaks can match to the same exp. peak:
map<Size, vector<MSSpectrum::ConstIterator>> peak_matches;
for (auto db_it = db_spectrum.MZBegin(mz_lower_bound);
db_it != db_spectrum.MZEnd(mz_upper_bound); ++db_it)
{
double db_mz = db_it->getMZ();
if (fragment_mass_tolerance_unit_ppm)
{
mz_offset = db_mz * fragment_mass_error * 1e-6;
}
Int index = exp_spectrum.findNearest(db_mz, mz_offset);
if (index >= 0) peak_matches[index].push_back(db_it);
}
double dot_product = 0.0;
for (const auto& match : peak_matches)
{
double db_intensity = 0.0;
for (const auto& db_it : match.second)
{
db_intensity = max(db_intensity, double(db_it->getIntensity()));
}
dot_product += db_intensity * exp_spectrum[match.first].getIntensity();
}
// return annotations for matching peaks?
if ((annotations != nullptr) &&
!db_spectrum.getStringDataArrays().empty() &&
!db_spectrum.getIntegerDataArrays().empty())
{
for (const auto& match : peak_matches)
{
const auto& exp_peak = exp_spectrum[match.first];
// potentially add several annotations for the same peak if there are
// multiple matches for that peak:
for (const auto& db_it : match.second)
{
PeptideHit::PeakAnnotation ann;
Size index = db_it - db_spectrum.begin();
ann.annotation = db_spectrum.getStringDataArrays()[0].at(index);
ann.charge = db_spectrum.getIntegerDataArrays()[0].at(index);
ann.mz = exp_peak.getMZ();
ann.intensity = exp_peak.getIntensity();
annotations->push_back(ann);
}
}
}
Size matched_ions_count = peak_matches.size(); // count obs. peaks only once
double matched_ions_term = 0.0;
// return score 0 if too few matched ions
if (matched_ions_count < 3)
{
return matched_ions_term;
}
if (matched_ions_count <= boost::math::max_factorial<double>::value)
{
matched_ions_term = log(boost::math::factorial<double>(matched_ions_count));
}
else
{
matched_ions_term = log(boost::math::factorial<double>(boost::math::max_factorial<double>::value));
}
double hyperscore = log(dot_product) + matched_ions_term;
if (hyperscore < 0) hyperscore = 0;
return hyperscore;
}
void MetaboliteSpectralMatching::run(PeakMap& msexp, PeakMap& spec_db, MzTab& mztab_out, String& out_spectra)
{
sort(spec_db.begin(), spec_db.end(), PrecursorMZLess);
vector<double> mz_keys;
// copy precursor m/z values to vector for searching
for (Size spec_idx = 0; spec_idx < spec_db.size(); ++spec_idx)
{
mz_keys.push_back(spec_db[spec_idx].getPrecursors()[0].getMZ());
}
// remove potential noise peaks by selecting the ten most intense peak per 100 Da window
WindowMower wm;
Param wm_param;
wm_param.setValue("windowsize", 20.0);
wm_param.setValue("movetype", "slide");
wm_param.setValue("peakcount", 5);
wm.setParameters(wm_param);
wm.filterPeakMap(msexp);
// merge MS2 spectra with same precursor mass
if (merge_spectra_)
{
SpectraMerger spme;
spme.mergeSpectraPrecursors(msexp);
wm.filterPeakMap(msexp);
}
// store the spectra if an output file path is given
if (!out_spectra.empty())
{
FileHandler().storeExperiment(out_spectra, msexp, {FileTypes::MZML});
}
// container storing results
vector<SpectralMatch> matching_results;
bool fragment_error_unit_ppm(true);
if (mz_error_unit_ == "Da") { fragment_error_unit_ppm = false; }
for (Size spec_idx = 0; spec_idx < msexp.size(); ++spec_idx)
{
OPENMS_LOG_DEBUG << "merged spectrum no. " << spec_idx << " with #fragment ions: " << msexp[spec_idx].size() << endl;
// iterate over all precursor masses
for (Size prec_idx = 0; prec_idx < msexp[spec_idx].getPrecursors().size(); ++prec_idx)
{
// get precursor m/z
double precursor_mz(msexp[spec_idx].getPrecursors()[prec_idx].getMZ());
OPENMS_LOG_DEBUG << "precursor no. " << prec_idx << ": mz " << precursor_mz << " ";
double prec_mz_lowerbound, prec_mz_upperbound;
if (!fragment_error_unit_ppm) // Da
{
prec_mz_lowerbound = precursor_mz - precursor_mz_error_;
prec_mz_upperbound = precursor_mz + precursor_mz_error_;
}
else // ppm
{
double ppm_offset(precursor_mz * 1e-6 * precursor_mz_error_);
prec_mz_lowerbound = precursor_mz - ppm_offset;
prec_mz_upperbound = precursor_mz + ppm_offset;
}
OPENMS_LOG_DEBUG << "lower mz: " << prec_mz_lowerbound << " ";
OPENMS_LOG_DEBUG << "upper mz: " << prec_mz_upperbound << endl;
vector<double>::const_iterator lower_it = lower_bound(mz_keys.begin(), mz_keys.end(), prec_mz_lowerbound);
vector<double>::const_iterator upper_it = upper_bound(mz_keys.begin(), mz_keys.end(), prec_mz_upperbound);
Size start_idx(lower_it - mz_keys.begin());
Size end_idx(upper_it - mz_keys.begin());
{
String id_to_log = msexp[spec_idx].metaValueExists("GNPS_Spectrum_ID")
? msexp[spec_idx].getMetaValue("GNPS_Spectrum_ID").toString()
: msexp[spec_idx].getNativeID();
OPENMS_LOG_DEBUG << "identifying " << id_to_log << endl;
}
vector<SpectralMatch> partial_results;
for (Size search_idx = start_idx; search_idx < end_idx; ++search_idx)
{
// do spectral matching
// Debug: list all available metadata keys
OPENMS_LOG_DEBUG << "Available metadata keys for spectrum " << search_idx << ":";
std::vector<String> keys;
spec_db[search_idx].getKeys(keys);
for (const auto& key : keys)
{
OPENMS_LOG_DEBUG << " " << key;
}
OPENMS_LOG_DEBUG << endl;
String metabolite_name = "";
if (spec_db[search_idx].metaValueExists(Constants::UserParam::MSM_METABOLITE_NAME)) {
metabolite_name = spec_db[search_idx]
.getMetaValue(Constants::UserParam::MSM_METABOLITE_NAME)
.toString();
} else if (spec_db[search_idx].metaValueExists("GNPS_Spectrum_ID")) {
metabolite_name = spec_db[search_idx]
.getMetaValue("GNPS_Spectrum_ID")
.toString();
}
OPENMS_LOG_DEBUG << "scanning "
<< spec_db[search_idx].getPrecursors()[0].getMZ()
<< " " << metabolite_name << endl;
// check for charge state of precursor ions: do they match?
if ( (ion_mode_ == "positive" && spec_db[search_idx].getPrecursors()[0].getCharge() < 0) || (ion_mode_ == "negative" && spec_db[search_idx].getPrecursors()[0].getCharge() > 0))
{
continue;
}
double hyperscore(computeHyperScore(fragment_mz_error_, fragment_error_unit_ppm, msexp[spec_idx], spec_db[search_idx], 0.0));
OPENMS_LOG_DEBUG << " scored with " << hyperscore << endl;
if (hyperscore > 0)
{
String massbank_id = "";
String metabolite_name = "";
if (spec_db[search_idx].metaValueExists("GNPS_Spectrum_ID")) {
massbank_id = spec_db[search_idx].getMetaValue("GNPS_Spectrum_ID").toString();
}
if (spec_db[search_idx].metaValueExists("Metabolite_Name")) {
metabolite_name = spec_db[search_idx].getMetaValue("Metabolite_Name").toString();
} else if (spec_db[search_idx].metaValueExists("GNPS_Spectrum_ID")) {
metabolite_name = massbank_id; // Use GNPS_Spectrum_ID as name if no Metabolite_Name
}
OPENMS_LOG_DEBUG << " ** detected " << massbank_id << " " << metabolite_name << " scored with " << hyperscore << endl;
// score result temporarily
SpectralMatch tmp_match;
tmp_match.setObservedPrecursorMass(precursor_mz);
tmp_match.setFoundPrecursorMass(spec_db[search_idx].getPrecursors()[0].getMZ());
double obs_rt = floor(msexp[spec_idx].getRT() * 10)/10.0;
tmp_match.setObservedPrecursorRT(obs_rt);
tmp_match.setFoundPrecursorCharge(spec_db[search_idx].getPrecursors()[0].getCharge());
tmp_match.setMatchingScore(hyperscore);
tmp_match.setObservedSpectrumIndex(spec_idx);
tmp_match.setMatchingSpectrumIndex(search_idx);
tmp_match.setObservedSpectrumNativeID(msexp[spec_idx].getNativeID());
String primary_id_value;
if (spec_db[search_idx].metaValueExists("GNPS_Spectrum_ID"))
{
primary_id_value = spec_db[search_idx].getMetaValue("GNPS_Spectrum_ID").toString();
}
else if (spec_db[search_idx].metaValueExists("Massbank_Accession_ID"))
{
primary_id_value = spec_db[search_idx].getMetaValue("Massbank_Accession_ID").toString();
}
else if (spec_db[search_idx].metaValueExists(Constants::UserParam::MSM_METABOLITE_NAME))
{
primary_id_value = spec_db[search_idx].getMetaValue(Constants::UserParam::MSM_METABOLITE_NAME).toString();
}
else
{
primary_id_value = spec_db[search_idx].getNativeID();
}
tmp_match.setPrimaryIdentifier(primary_id_value);
tmp_match.setSecondaryIdentifier(spec_db[search_idx].getMetaValue("HMDB_ID"));
tmp_match.setSumFormula(spec_db[search_idx].getMetaValue(Constants::UserParam::MSM_SUM_FORMULA));
tmp_match.setCommonName(metabolite_name);
tmp_match.setInchiString(spec_db[search_idx].getMetaValue(Constants::UserParam::MSM_INCHI_STRING));
tmp_match.setSMILESString(spec_db[search_idx].getMetaValue(Constants::UserParam::MSM_SMILES_STRING));
tmp_match.setPrecursorAdduct(spec_db[search_idx].getMetaValue(Constants::UserParam::MSM_PRECURSOR_ADDUCT));
partial_results.push_back(tmp_match);
}
}
// sort results by decreasing store
sort(partial_results.begin(), partial_results.end(), SpectralMatchScoreGreater);
// report mode: top3 or best?
if (report_mode_ == "top3")
{
Size num_results(partial_results.size());
Size last_result_idx = (num_results >= 3) ? 3 : num_results;
for (Size result_idx = 0; result_idx < last_result_idx; ++result_idx)
{
OPENMS_LOG_DEBUG << "score: " << partial_results[result_idx].getMatchingScore() << " " << partial_results[result_idx].getMatchingSpectrumIndex() << endl;
matching_results.push_back(partial_results[result_idx]);
}
}
if (report_mode_ == "best")
{
if (!partial_results.empty())
{
matching_results.push_back(partial_results[0]);
}
}
} // end precursor loop
} // end spectra loop
// write final results to MzTab
exportMzTab_(matching_results, mztab_out);
}
/// protected methods
void MetaboliteSpectralMatching::updateMembers_()
{
precursor_mz_error_ = (double)param_.getValue("prec_mass_error_value");
fragment_mz_error_ = (double)param_.getValue("frag_mass_error_value");
ion_mode_ = param_.getValue("ionization_mode").toString();
mz_error_unit_ = param_.getValue("mass_error_unit").toString();
report_mode_ = param_.getValue("report_mode").toString();
merge_spectra_ = (bool)param_.getValue("merge_spectra").toBool();
}
/// private methods
void MetaboliteSpectralMatching::exportMzTab_(const vector<SpectralMatch>& overall_results, MzTab& mztab_out)
{
// iterate the overall results table
MzTabSmallMoleculeSectionRows all_sm_rows;
for (Size id_idx = 0; id_idx < overall_results.size(); ++id_idx)
{
SpectralMatch current_id(overall_results[id_idx]);
MzTabSmallMoleculeSectionRow mztab_row_record;
// set the identifier field
String hid_temp = current_id.getPrimaryIdentifier();
MzTabString prim_id;
prim_id.set(hid_temp);
vector<MzTabString> id_dummy;
id_dummy.push_back(prim_id);
MzTabStringList string_dummy_list;
string_dummy_list.set(id_dummy);
mztab_row_record.identifier = string_dummy_list;
// set the chemical formula field
MzTabString chem_form;
String form_temp = current_id.getSumFormula();
chem_form.set(form_temp);
mztab_row_record.chemical_formula = chem_form;
// set the smiles field
String smi_temp = current_id.getSMILESString(); // extract SMILES from struct mapping file
MzTabString smi_string;
smi_string.set(smi_temp);
mztab_row_record.smiles = smi_string;
// set the inchi_key field
String inchi_temp = current_id.getInchiString(); // extract INCHIKEY from struct mapping file
MzTabString inchi_key;
inchi_key.set(inchi_temp);
mztab_row_record.inchi_key = inchi_key;
// set description field (we use it for the common name of the compound)
String name_temp = current_id.getCommonName();
MzTabString common_name;
common_name.set(name_temp);
mztab_row_record.description = common_name;
// set mass_to_charge field (precursor mass here)
double mz_temp = current_id.getFoundPrecursorMass();
MzTabDouble mass_to_charge;
mass_to_charge.set(mz_temp);
mztab_row_record.exp_mass_to_charge = mass_to_charge; //TODO: distinguish the experimental precursor mass and spectral library precursor mass (later should probably go into cv_opt_ column)
// set charge field
Int ch_temp = current_id.getFoundPrecursorCharge();
MzTabInteger mcharge;
mcharge.set(ch_temp);
mztab_row_record.charge = mcharge;
// set RT field
double rt_temp = current_id.getObservedPrecursorRT();
MzTabDouble rt_temp2;
rt_temp2.set(rt_temp);
vector<MzTabDouble> rt_temp3;
rt_temp3.push_back(rt_temp2);
MzTabDoubleList observed_rt;
observed_rt.set(rt_temp3);
mztab_row_record.retention_time = observed_rt;
// set database field
String dbname_temp = "MassBank";
MzTabString dbname;
dbname.set(dbname_temp);
mztab_row_record.database = dbname;
// set database_version field
String dbver_temp = "Sep 27, 2013";
MzTabString dbversion;
dbversion.set(dbver_temp);
mztab_row_record.database_version = dbversion;
// set smallmolecule_abundance_sub
// check if we deal with a feature or consensus feature
vector<MzTabDouble> int_temp3;
double int_temp(0.0);
MzTabDouble int_temp2;
int_temp2.set(int_temp);
int_temp3.push_back(int_temp2);
for (Size i = 0; i != int_temp3.size(); ++i)
{
mztab_row_record.smallmolecule_abundance_study_variable[i + 1] = int_temp3[i];
}
// set smallmolecule_abundance_stdev_sub; not applicable for a single feature intensity, however must be filled. Otherwise, the mzTab export fails.
double stdev_temp(0.0);
MzTabDouble stdev_temp2;
stdev_temp2.set(stdev_temp);
vector<MzTabDouble> stdev_temp3;
stdev_temp3.push_back(stdev_temp2);
for (Size i = 0; i != stdev_temp3.size(); ++i)
{
mztab_row_record.smallmolecule_abundance_stdev_study_variable[i + 1] = stdev_temp3[i];
}
// set smallmolecule_abundance_std_error_sub; not applicable for a single feature intensity, however must be filled. Otherwise, the mzTab export fails.
double stderr_temp(0.0);
MzTabDouble stderr_temp2;
stderr_temp2.set(stderr_temp);
vector<MzTabDouble> stderr_temp3;
stderr_temp3.push_back(stderr_temp2);
for (Size i = 0; i != stderr_temp3.size(); ++i)
{
mztab_row_record.smallmolecule_abundance_std_error_study_variable[i + 1] = stderr_temp3[i];
}
// optional columns:
vector<MzTabOptionalColumnEntry> optionals;
// ppm error
double error_ppm(((current_id.getFoundPrecursorMass() - current_id.getObservedPrecursorMass())/current_id.getFoundPrecursorMass())*1e6);
error_ppm = floor(error_ppm*100)/100;
MzTabString ppmerr;
ppmerr.set(String(error_ppm));
MzTabOptionalColumnEntry col0;
col0.first = "opt_ppm_error";
col0.second = ppmerr;
optionals.push_back(col0);
// set found adduct ion
String addion_temp = current_id.getPrecursorAdduct();
MzTabString addion;
addion.set(addion_temp);
MzTabOptionalColumnEntry col1;
col1.first = "opt_adduct_ion";
col1.second = addion;
optionals.push_back(col1);
// set isotope similarity score
double sim_score_temp = current_id.getMatchingScore();
stringstream read_in;
read_in << sim_score_temp;
String sim_score_temp2(read_in.str());
MzTabString sim_score;
sim_score.set(sim_score_temp2);
MzTabOptionalColumnEntry col2;
col2.first = "opt_match_score";
col2.second = sim_score;
optionals.push_back(col2);
// set secondary ID (here HMDB id)
String sec_id = current_id.getSecondaryIdentifier();
MzTabString sec_id_str;
sec_id_str.set(sec_id);
MzTabOptionalColumnEntry col3;
col3.first = "opt_sec_id";
col3.second = sec_id_str;
optionals.push_back(col3);
// set source spectra index
// TODO: this should use spectra_ref column
String source_idx = String(current_id.getObservedSpectrumIndex());
MzTabString source_idx_str;
source_idx_str.set(source_idx);
MzTabOptionalColumnEntry col4;
col4.first = "opt_source_idx";
col4.second = source_idx_str;
optionals.push_back(col4);
// set spectrum native ID
String spec_native_id = current_id.getObservedSpectrumNativeID();
MzTabString spec_native_id_str;
spec_native_id_str.set(spec_native_id);
MzTabOptionalColumnEntry col5;
col5.first = "opt_spec_native_id";
col5.second = spec_native_id_str;
optionals.push_back(col5);
mztab_row_record.opt_ = optionals;
all_sm_rows.push_back(mztab_row_record);
}
mztab_out.setSmallMoleculeSectionRows(all_sm_rows);
}
} // closing namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/IDConflictResolverAlgorithm.cpp | .cpp | 4,619 | 158 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Hendrik Weisser, Lucia Espona, Moritz Freidank $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/IDConflictResolverAlgorithm.h>
using namespace std;
namespace OpenMS
{
void IDConflictResolverAlgorithm::resolve(FeatureMap & features, bool keep_matching)
{
resolveConflict_(features, keep_matching);
}
void IDConflictResolverAlgorithm::resolve(ConsensusMap & features, bool keep_matching)
{
resolveConflict_(features, keep_matching);
}
void IDConflictResolverAlgorithm::resolveBetweenFeatures(FeatureMap & features)
{
resolveBetweenFeatures_(features);
}
void IDConflictResolverAlgorithm::resolveBetweenFeatures(ConsensusMap & features)
{
resolveBetweenFeatures_(features);
}
// static
void IDConflictResolverAlgorithm::resolveConflictKeepMatching_(
PeptideIdentificationList & peptides,
PeptideIdentificationList & removed,
UInt64 uid)
{
if (peptides.empty()) { return; }
for (PeptideIdentification & pep : peptides)
{
// sort hits
pep.sort();
}
PeptideIdentificationList::iterator pos;
if (peptides[0].isHigherScoreBetter()) // find highest-scoring ID
{
pos = max_element(peptides.begin(), peptides.end(), compareIDsSmallerScores_);
}
else // find lowest-scoring ID
{
pos = min_element(peptides.begin(), peptides.end(), compareIDsSmallerScores_);
}
const AASequence& best = (*pos).getHits()[0].getSequence();
std::swap(*peptides.begin(), *pos); // put best on first position
// filter for matching PEP Sequence and move to unassigned/removed
for (auto it = ++peptides.begin(); it != peptides.end();)
{
auto& hits = it->getHits();
auto hit = hits.begin();
for (; hit != hits.end(); ++hit)
{
if (hit->getSequence() == best)
{
break;
}
}
if (hit != hits.end()) // found sequence
{
hits[0] = *hit; // put the match on first place
hits.resize(1); // remove rest
++it;
}
else // not found
{
// annotate feature_id for later reference
it->setMetaValue("feature_id", String(uid));
// move to "removed" vector
removed.push_back(std::move(*it));
// erase and update iterator
it = peptides.erase(it);
}
}
}
// static
void IDConflictResolverAlgorithm::resolveConflict_(
PeptideIdentificationList & peptides,
PeptideIdentificationList & removed,
UInt64 uid)
{
if (peptides.empty()) { return; }
for (PeptideIdentification & pep : peptides)
{
// sort hits
pep.sort();
// remove all but the best hit
if (!pep.getHits().empty())
{
vector<PeptideHit> best_hit(1, pep.getHits()[0]);
pep.setHits(best_hit);
}
// annotate feature id
pep.setMetaValue("feature_id", String(uid));
}
PeptideIdentificationList::iterator pos;
if (peptides[0].isHigherScoreBetter()) // find highest-scoring ID
{
pos = max_element(peptides.begin(), peptides.end(), compareIDsSmallerScores_);
}
else // find lowest-scoring ID
{
pos = min_element(peptides.begin(), peptides.end(), compareIDsSmallerScores_);
}
// copy conflicting ones left to best one
for (auto it = peptides.begin(); it != pos; ++it)
{
removed.push_back(*it);
}
// copy conflicting ones right of best one
PeptideIdentificationList::iterator pos1p = pos + 1;
for (auto it = pos1p; it != peptides.end(); ++it) // OMS_CODING_TEST_EXCLUDE
{
removed.push_back(*it);
}
// set best one to first position and shrink vector
peptides[0] = *pos;
peptides.resize(1);
}
// static
bool IDConflictResolverAlgorithm::compareIDsSmallerScores_(const PeptideIdentification & left, const PeptideIdentification & right)
{
// if any of them is empty, the other is considered "greater"
// independent of the score in the first hit
if (left.getHits().empty() || right.getHits().empty())
{ // also: for strict weak ordering, comp(x,x) needs to be false
return left.getHits().size() < right.getHits().size();
}
return left.getHits()[0].getScore() < right.getHits()[0].getScore();
}
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/FIAMSScheduler.cpp | .cpp | 3,239 | 94 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Svetlana Kutuzova, Douglas McCloskey $
// $Authors: Svetlana Kutuzova, Douglas McCloskey $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/FIAMSScheduler.h>
#include <OpenMS/ANALYSIS/ID/FIAMSDataProcessor.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <QDir>
namespace OpenMS {
/// default constructor
FIAMSScheduler::FIAMSScheduler(
String filename,
String base_dir,
String output_dir,
bool load_cached
)
:
filename_(std::move(filename)),
base_dir_(std::move(base_dir)),
output_dir_(std::move(output_dir)),
load_cached_(load_cached),
samples_()
{
loadSamples_();
}
void FIAMSScheduler::loadSamples_() {
CsvFile csv_file(filename_, ',');
StringList headers;
csv_file.getRow(0, headers);
StringList row;
for (Size i = 1; i < csv_file.rowCount(); ++i) {
csv_file.getRow(i, row);
std::map<String, String> mapping;
for (Size j = 0; j < headers.size(); ++j) {
mapping[headers[j]] = row[j];
}
samples_.push_back(mapping);
}
}
void FIAMSScheduler::run() {
#pragma omp parallel for
for (int i = 0; i < (int)samples_.size(); ++i) {
MSExperiment exp;
FileHandler().loadExperiment(base_dir_ + samples_[i].at("dir_input") + "/" + samples_[i].at("filename") + ".mzML", exp, {FileTypes::MZML});
FIAMSDataProcessor fia_processor;
Param p;
p.setValue("filename", samples_[i].at("filename"));
p.setValue("dir_output", output_dir_ + samples_[i].at("dir_output"));
QDir qd;
qd.mkpath(p.getValue("dir_output").toString().c_str());
p.setValue("resolution", std::stof(samples_[i].at("resolution")));
p.setValue("polarity", samples_[i].at("charge"));
p.setValue("db:mapping", std::vector<std::string>{base_dir_ + samples_[i].at("db_mapping")});
p.setValue("db:struct", std::vector<std::string>{base_dir_ + samples_[i].at("db_struct")});
p.setValue("positive_adducts", base_dir_ + samples_[i].at("positive_adducts"));
p.setValue("negative_adducts", base_dir_ + samples_[i].at("negative_adducts"));
fia_processor.setParameters(p);
String time = samples_[i].at("time");
std::vector<String> times;
time.split(";", times);
for (Size j = 0; j < times.size(); ++j) {
OPENMS_LOG_INFO << "Started " << samples_[i].at("filename") << " for " << times[j] << " seconds" << std::endl;
MzTab mztab_output;
fia_processor.run(exp, std::stof(times[j]), mztab_output, load_cached_);
OPENMS_LOG_INFO << "Finished " << samples_[i].at("filename") << " for " << times[j] << " seconds" << std::endl;
}
}
}
const std::vector<std::map<String, String>>& FIAMSScheduler::getSamples() {
return samples_;
}
const String& FIAMSScheduler::getBaseDir() {
return base_dir_;
}
const String& FIAMSScheduler::getOutputDir() {
return output_dir_;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/IDDecoyProbability.cpp | .cpp | 18,432 | 542 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Sven Nahnsen $
// $Authors: Andreas Bertsch $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/IDDecoyProbability.h>
#include <boost/math/special_functions/gamma.hpp>
#include <fstream>
// #define IDDECOYPROBABILITY_DEBUG
// #undef IDDECOYPROBABILITY_DEBUG
using namespace std;
namespace OpenMS
{
IDDecoyProbability::IDDecoyProbability() :
DefaultParamHandler("IDDecoyProbability")
{
defaults_.setValue("number_of_bins", 40, "Number of bins used for the fitting, if sparse datasets are used, this number should be smaller", {"advanced"});
defaults_.setValue("lower_score_better_default_value_if_zero", 50.0, "This value is used if e.g. a E-value score is 0 and cannot be transformed in a real number (log of E-value)", {"advanced"});
#ifdef IDDECOYPROBABILITY_DEBUG
defaults_.setValue("rev_filename", "", "bla", {"advanced"});
defaults_.setValue("fwd_filename", "", "bla", {"advanced"});
#endif
defaultsToParam_();
}
IDDecoyProbability::IDDecoyProbability(const IDDecoyProbability & rhs) = default;
IDDecoyProbability::~IDDecoyProbability() = default;
void IDDecoyProbability::apply(PeptideIdentificationList & ids)
{
double lower_score_better_default_value_if_zero(static_cast<double>(param_.getValue("lower_score_better_default_value_if_zero")));
double lower_score_better_default_value_if_zero_exp = pow(10.0, -lower_score_better_default_value_if_zero);
vector<double> rev_scores, fwd_scores, all_scores;
// get the forward scores
for (PeptideIdentification& pep : ids)
{
String score_type = pep.getScoreType();
if (!pep.getHits().empty())
{
vector<PeptideHit> hits = pep.getHits();
for (PeptideHit& pit : hits)
{
double score = pit.getScore();
pit.setMetaValue(score_type + "_Score", score);
if (!pep.isHigherScoreBetter())
{
if (score < lower_score_better_default_value_if_zero_exp)
{
score = lower_score_better_default_value_if_zero;
}
else
{
score = -log10(score);
}
}
if (!pit.isDecoy())
{
fwd_scores.push_back(score);
}
else
{
rev_scores.push_back(score);
}
all_scores.push_back(score);
}
pep.setHits(hits);
}
}
#ifdef IDDECOYPROBABILITY_DEBUG
cerr << ids.size() << " " << rev_scores.size() << " " << fwd_scores.size() << " " << all_scores.size() << endl;
#endif
apply_(ids, rev_scores, fwd_scores, all_scores);
return;
}
void IDDecoyProbability::apply(PeptideIdentificationList & prob_ids, const PeptideIdentificationList & orig_fwd_ids, const PeptideIdentificationList & rev_ids)
{
double lower_score_better_default_value_if_zero((double)param_.getValue("lower_score_better_default_value_if_zero"));
double lower_score_better_default_value_if_zero_exp = pow((double)10.0, -lower_score_better_default_value_if_zero);
PeptideIdentificationList fwd_ids = orig_fwd_ids;
vector<double> rev_scores, fwd_scores, all_scores;
// get the forward scores
for (PeptideIdentification& pep : fwd_ids)
{
String score_type = pep.getScoreType();
if (!pep.getHits().empty())
{
vector<PeptideHit> hits = pep.getHits();
for (PeptideHit& pit : hits)
{
double score = pit.getScore();
pit.setMetaValue(score_type + "_Score", score);
if (!pep.isHigherScoreBetter())
{
if (score < lower_score_better_default_value_if_zero_exp)
{
score = lower_score_better_default_value_if_zero;
}
else
{
score = -log10(score);
}
}
fwd_scores.push_back(score);
all_scores.push_back(score);
}
pep.setHits(hits);
}
}
// get the reverse scores
for (const PeptideIdentification& pep : rev_ids)
{
if (!pep.getHits().empty())
{
for (const PeptideHit& pit : pep.getHits())
{
double score = pit.getScore();
if (!pep.isHigherScoreBetter())
{
if (score < lower_score_better_default_value_if_zero_exp)
{
score = lower_score_better_default_value_if_zero;
}
else
{
score = -log10(score);
}
}
rev_scores.push_back(score);
all_scores.push_back(score);
}
}
}
prob_ids = fwd_ids;
apply_(prob_ids, rev_scores, fwd_scores, all_scores);
return;
}
void IDDecoyProbability::apply_(PeptideIdentificationList & ids, const vector<double> & rev_scores, const vector<double> & fwd_scores, const vector<double> & all_scores)
{
Size number_of_bins(param_.getValue("number_of_bins"));
// normalize distribution to [0, 1]
vector<double> fwd_scores_normalized(number_of_bins, 0.0), rev_scores_normalized(number_of_bins, 0.0), diff_scores(number_of_bins, 0.0), all_scores_normalized(number_of_bins, 0.0);
Transformation_ rev_trafo, fwd_trafo, all_trafo;
normalizeBins_(rev_scores, rev_scores_normalized, rev_trafo);
normalizeBins_(fwd_scores, fwd_scores_normalized, fwd_trafo);
normalizeBins_(all_scores, all_scores_normalized, all_trafo);
// rev scores fitting
vector<DPosition<2> > rev_data;
for (Size i = 0; i < number_of_bins; ++i)
{
DPosition<2> pos;
pos.setX(((double)i) / (double)number_of_bins + 0.0001); // necessary????
pos.setY(rev_scores_normalized[i]);
rev_data.push_back(pos);
#ifdef IDDECOYPROBABILITY_DEBUG
cerr << pos.getX() << " " << pos.getY() << endl;
#endif
}
Math::GammaDistributionFitter gdf;
Math::GammaDistributionFitter::GammaDistributionFitResult result_gamma_1st (1.0, 3.0);
gdf.setInitialParameters(result_gamma_1st);
// TODO heuristic for good start parameters
Math::GammaDistributionFitter::GammaDistributionFitResult result_gamma = gdf.fit(rev_data);
#ifdef IDDECOYPROBABILITY_DEBUG
cerr << gdf.getGnuplotFormula() << endl;
String rev_filename = param_.getValue("rev_filename");
generateDistributionImage_(rev_scores_normalized, gdf.getGnuplotFormula(), rev_filename);
#endif
// generate diffs of distributions
// get the fwd and rev distribution, apply all_trafo and calculate the diff
vector<Size> fwd_bins(number_of_bins, 0), rev_bins(number_of_bins, 0);
double min(all_trafo.min_score), diff(all_trafo.diff_score);
Size max_bin(0);
for (vector<double>::const_iterator it = fwd_scores.begin(); it != fwd_scores.end(); ++it)
{
Size bin = (Size)((*it - min) / diff * (double)(number_of_bins - 1));
++fwd_bins[bin];
if (fwd_bins[bin] > max_bin)
{
max_bin = fwd_bins[bin];
}
}
Size max_reverse_bin(0), max_reverse_bin_value(0);
//min = rev_trafo.min_score;
//diff = rev_trafo.diff_score;
for (vector<double>::const_iterator it = rev_scores.begin(); it != rev_scores.end(); ++it)
{
Size bin = (Size)((*it - min) / diff * (double)number_of_bins);
++rev_bins[bin];
if (rev_bins[bin] > max_bin)
{
max_bin = rev_bins[bin];
}
if (rev_bins[bin] > max_reverse_bin_value)
{
max_reverse_bin = bin;
max_reverse_bin_value = rev_bins[bin];
}
}
#ifdef IDDECOYPROBABILITY_DEBUG
cerr << "Trying to get diff scores" << endl;
#endif
// get diff of fwd and rev
for (Size i = 0; i < number_of_bins; ++i)
{
Size fwd(0), rev(0);
fwd = fwd_bins[i];
rev = rev_bins[i];
if ((double)fwd > (double)(1.3 * rev) && max_reverse_bin < i)
{
diff_scores[i] = (double)(fwd - rev) / (double)max_bin;
}
else
{
diff_scores[i] = 0.0;
}
}
#ifdef IDDECOYPROBABILITY_DEBUG
cerr << "Gauss Fitting values size of diff scores=" << diff_scores.size() << endl;
#endif
// diff scores fitting
vector<DPosition<2> > diff_data;
double gauss_A(0), gauss_x0(0), norm_factor(0);
for (Size i = 0; i < number_of_bins; ++i)
{
DPosition<2> pos;
pos.setX((double)i / (double)number_of_bins);
pos.setY(diff_scores[i]);
if (pos.getY() > gauss_A)
{
gauss_A = pos.getY();
}
gauss_x0 += pos.getX() * pos.getY();
norm_factor += pos.getY();
diff_data.push_back(pos);
}
double gauss_sigma(0);
gauss_x0 /= (double)diff_data.size();
gauss_x0 /= norm_factor;
for (Size i = 0; i <= number_of_bins; ++i)
{
gauss_sigma += fabs(gauss_x0 - (double)i / (double)number_of_bins);
}
gauss_sigma /= (double)diff_data.size();
#ifdef IDDECOYPROBABILITY_DEBUG
cerr << "setting initial parameters: " << endl;
#endif
Math::GaussFitter gf;
Math::GaussFitter::GaussFitResult result_1st(gauss_A, gauss_x0, gauss_sigma);
gf.setInitialParameters(result_1st);
#ifdef IDDECOYPROBABILITY_DEBUG
cerr << "Initial Gauss guess: A=" << gauss_A << ", x0=" << gauss_x0 << ", sigma=" << gauss_sigma << endl;
#endif
//TODO: fail-to-fit correction was done using the GNUPlotFormula. Seemed to be a hack.
//Changed it to try-catch-block but I am not sure if this correction should be made
//at all. Can someone please verify?
Math::GaussFitter::GaussFitResult result_gauss (gauss_A, gauss_x0, gauss_sigma);
try {
result_gauss = gf.fit(diff_data);
}
catch(Exception::UnableToFit& /* e */)
{
result_gauss.A = gauss_A;
result_gauss.x0 = gauss_x0;
result_gauss.sigma = gauss_sigma;
}
// // fit failed?
// if (gf.getGnuplotFormula() == "")
// {
// result_gauss.A = gauss_A;
// result_gauss.x0 = gauss_x0;
// result_gauss.sigma = gauss_sigma;
// }
#ifdef IDDECOYPROBABILITY_DEBUG
cerr << gf.getGnuplotFormula() << endl;
String fwd_filename = param_.getValue("fwd_filename");
if (gf.getGnuplotFormula() == "")
{
String formula("f(x)=" + String(gauss_A) + " * exp(-(x - " + String(gauss_x0) + ") ** 2 / 2 / (" + String(gauss_sigma) + ") ** 2)");
generateDistributionImage_(diff_scores, formula, fwd_filename);
}
else
{
generateDistributionImage_(diff_scores, gf.getGnuplotFormula(), fwd_filename);
}
#endif
#ifdef IDDECOYPROBABILITY_DEBUG
//all_trafo.diff_score + all_trafo.min_score
String gauss_formula("f(x)=" + String(result_gauss.A / all_trafo.max_intensity) + " * exp(-(x - " + String(result_gauss.x0 * all_trafo.diff_score + all_trafo.min_score) + ") ** 2 / 2 / (" + String(result_gauss.sigma * all_trafo.diff_score) + ") ** 2)");
String b_str(result_gamma.b), p_str(result_gamma.p);
String gamma_formula = "g(x)=(" + b_str + " ** " + p_str + ") / gamma(" + p_str + ") * x ** (" + p_str + " - 1) * exp(- " + b_str + " * x)";
generateDistributionImage_(all_scores_normalized, all_trafo, gauss_formula, gamma_formula, (String)param_.getValue("fwd_filename"));
#endif
PeptideIdentificationList new_prob_ids;
// calculate the probabilities and write them to the IDs
for (const PeptideIdentification& pep : ids)
{
if (!pep.getHits().empty())
{
vector<PeptideHit> hits;
String score_type = pep.getScoreType() + "_score";
for (const PeptideHit& pit : pep.getHits())
{
PeptideHit hit = pit;
double score = hit.getScore();
if (!pep.isHigherScoreBetter())
{
score = -log10(score);
}
hit.setMetaValue(score_type, hit.getScore());
hit.setScore(getProbability_(result_gamma, rev_trafo, result_gauss, fwd_trafo, score));
hits.push_back(hit);
}
PeptideIdentification id = pep;
id.setHigherScoreBetter(true);
id.setScoreType(id.getScoreType() + "_DecoyProbability");
id.setHits(hits);
new_prob_ids.push_back(id);
}
}
ids = new_prob_ids;
}
// normalize the bins to [0, 1]
void IDDecoyProbability::normalizeBins_(const vector<double> & scores, vector<double> & binned, Transformation_ & trafo)
{
Size number_of_bins(param_.getValue("number_of_bins"));
// get the range of the scores
double max(numeric_limits<double>::min()), min(numeric_limits<double>::max());
for (vector<double>::const_iterator it = scores.begin(); it != scores.end(); ++it)
{
if (*it > max)
{
max = *it;
}
if (*it < min)
{
min = *it;
}
}
#ifdef IDDECOYPROBABILITY_DEBUG
cerr << "Range is [" << min << ", " << max << "]" << endl;
#endif
// perform the binning
double diff = max - min;
Size max_bin_number(0);
double max_bin(0);
for (vector<double>::const_iterator it = scores.begin(); it != scores.end(); ++it)
{
Size bin = (Size)((*it - min) / diff * (double)(number_of_bins - 1));
binned[bin] += 1;
if (binned[bin] > max_bin)
{
max_bin = binned[bin];
max_bin_number = bin;
}
}
// normalize to \sum = 1
for (vector<double>::iterator it = binned.begin(); it != binned.end(); ++it)
{
*it /= (double)max_bin / 4.0; // 4 is best value for the gamma distribution
}
// store the transformation
trafo.max_intensity = 4.0 / (double)max_bin;
trafo.diff_score = diff;
trafo.min_score = min;
trafo.max_intensity_bin = max_bin_number;
trafo.max_score = max;
#ifdef IDDECOYPROBABILITY_DEBUG
cerr << "TRAFO: max_intensity=" << trafo.max_intensity << ", diff_score=" << trafo.diff_score << ", min_score=" << trafo.min_score << ", max_intensity_bin=" << trafo.max_intensity_bin << ", max_score=" << trafo.max_score << endl;
#endif
}
double IDDecoyProbability::getProbability_(const Math::GammaDistributionFitter::GammaDistributionFitResult & result_gamma,
const Transformation_ & gamma_trafo,
const Math::GaussFitter::GaussFitResult & result_gauss,
const Transformation_ & gauss_trafo,
double score)
{
double rho_rev(0), rho_fwd(0);
Size number_of_bins(param_.getValue("number_of_bins"));
// first transform the score into a background distribution density value
double score_rev_trans = (score - gamma_trafo.min_score) / gamma_trafo.diff_score;
if (score_rev_trans < gamma_trafo.max_intensity_bin / (double)number_of_bins)
{
rho_rev = 1.0 / gamma_trafo.max_intensity;
}
else
{
rho_rev = pow(result_gamma.b, result_gamma.p) / std::tgamma(result_gamma.p) * pow(score_rev_trans, result_gamma.p - 1) * exp(-result_gamma.b * score_rev_trans);
}
// second transform the score into a 'correct' distribution density value
double score_fwd_trans = (score - gauss_trafo.min_score) / gauss_trafo.diff_score;
#ifdef IDDECOYPROBABILITY_DEBUG
cerr << "score=" << score << ", score_rev_trans=" << score_rev_trans << ", score_fwd_trans=" << score_fwd_trans << ", rho_rev=" << rho_rev << ", gauss_trafor.max_score=" << gauss_trafo.max_score;
#endif
if (score_fwd_trans < result_gauss.x0)
{
#ifdef IDDECOYPROBABILITY_DEBUG
cerr << "(score_fwd_trans > gauss_trafo.max_score, " << score_fwd_trans << " " << gauss_trafo.max_score << " -> 1)" << endl;
#endif
rho_fwd = result_gauss.A * exp(-pow(score_fwd_trans - result_gauss.x0, 2) / 2.0 / pow(result_gauss.sigma, 2));
}
else
{
rho_fwd = 1;
}
#ifdef IDDECOYPROBABILITY_DEBUG
cerr << "rho_fwd=" << rho_fwd << endl;
#endif
// calc P using Bayes theorem
return rho_fwd / (rho_fwd + rho_rev);
}
void IDDecoyProbability::generateDistributionImage_(const vector<double> & ids, const String & formula, const String & filename)
{
Size number_of_bins(param_.getValue("number_of_bins"));
// write distribution to file
ofstream o((filename + "_dist_tmp.dat").c_str());
for (Size i = 0; i < number_of_bins; ++i)
{
o << (double)i / (double)number_of_bins << " " << ids[i] << endl;
}
o.close();
ofstream os((filename + "_gnuplot.gpl").c_str());
os << "set terminal png" << endl;
os << "set output '" << filename << "_distribution.png'" << endl;
os << formula << endl;
os << "plot f(x), '" << filename << "_dist_tmp.dat' w boxes" << endl;
os.close();
#ifdef IDDECOYPROBABILITY_DEBUG
Int syscalret = system(("gnuplot " + filename + "_gnuplot.gpl").c_str());
if (syscalret != 0)
{
cerr << "gnuplot system call failed!" << endl;
}
#endif
return;
}
void IDDecoyProbability::generateDistributionImage_(const vector<double> & all_ids, const Transformation_ & all_trans, const String & fwd_formula, const String & rev_formula, const String & filename)
{
Size number_of_bins(param_.getValue("number_of_bins"));
ofstream all_output((filename + "_all_tmp.dat").c_str());
for (Size i = 0; i < number_of_bins; ++i)
{
all_output << (double)i / (double)number_of_bins * all_trans.diff_score + all_trans.min_score << " " << all_ids[i] / all_trans.max_intensity << endl;
}
all_output.close();
ofstream os((filename + "_both_gnuplot.gpl").c_str());
os << "set terminal png" << endl;
os << "set output '" << filename << "_both_distributions.png'" << endl;
os << fwd_formula << endl;
os << rev_formula << endl;
//os << "plot f(x), '" << filename << "_fwd_tmp.dat' w boxes, g(x), '" << filename << "_rev_tmp.dat' w boxes, '" << filename << "_all_tmp.dat' w i" << endl;
os << "plot f(x), g(x), '" << filename << "_all_tmp.dat' w i" << endl;
os.close();
#ifdef IDDECOYPROBABILITY_DEBUG
Int syscalret = system(("gnuplot " + filename + "_both_gnuplot.gpl").c_str());
if (syscalret != 0)
{
cerr << "gnuplot system call failed!" << endl;
}
#endif
return;
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/ConsensusIDAlgorithm.cpp | .cpp | 5,348 | 146 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Sven Nahnsen, Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/ConsensusIDAlgorithm.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/CONCEPT/Macros.h> // for "OPENMS_PRECONDITION"
#include <OpenMS/PROCESSING/ID/IDFilter.h>
using namespace std;
// #define DEBUG_ID_CONSENSUS
// #undef DEBUG_ID_CONSENSUS
namespace OpenMS
{
ConsensusIDAlgorithm::ConsensusIDAlgorithm() :
DefaultParamHandler("ConsensusIDAlgorithm")
{
defaults_.setValue("filter:considered_hits", 0, "The number of top hits in each ID run that are considered for consensus scoring ('0' for all hits).");
defaults_.setMinInt("filter:considered_hits", 0);
defaults_.setValue("filter:min_support", 0.0, "For each peptide hit from an ID run, the fraction of other ID runs that must support that hit (otherwise it is removed).");
defaults_.setMinFloat("filter:min_support", 0.0);
defaults_.setMaxFloat("filter:min_support", 1.0);
defaults_.setValue("filter:count_empty", "false", "Count empty ID runs (i.e. those containing no peptide hit for the current spectrum) when calculating 'min_support'?");
defaults_.setValidStrings("filter:count_empty", {"true","false"});
defaults_.setValue("filter:keep_old_scores", "false", "if set, keeps the original scores as user params");
defaults_.setValidStrings("filter:keep_old_scores", {"true","false"});
defaultsToParam_();
}
ConsensusIDAlgorithm::~ConsensusIDAlgorithm() = default;
void ConsensusIDAlgorithm::updateMembers_()
{
considered_hits_ = param_.getValue("filter:considered_hits");
min_support_ = param_.getValue("filter:min_support");
count_empty_ = (param_.getValue("filter:count_empty") == "true");
keep_old_scores_ = (param_.getValue("filter:keep_old_scores") == "true");
}
void ConsensusIDAlgorithm::apply(PeptideIdentificationList& ids,
const map<String, String>& se_info,
Size number_of_runs)
{
// abort if no IDs present
if (ids.empty())
{
return;
}
number_of_runs_ = (number_of_runs != 0) ? number_of_runs : ids.size();
// prepare data here, so that it doesn't have to happen in each algorithm:
for (PeptideIdentification& pep : ids)
{
pep.sort();
if ((considered_hits_ > 0) &&
(pep.getHits().size() > considered_hits_))
{
pep.getHits().resize(considered_hits_);
}
}
// make sure there are no duplicated hits (by sequence):
IDFilter::removeDuplicatePeptideHits(ids, true);
SequenceGrouping results;
apply_(ids, se_info, results); // actual (subclass-specific) processing
String score_type = ids[0].getScoreType();
bool higher_better = ids[0].isHigherScoreBetter();
ids.clear();
ids.resize(1);
ids[0].setScoreType(score_type);
ids[0].setHigherScoreBetter(higher_better);
for (SequenceGrouping::iterator res_it = results.begin();
res_it != results.end(); ++res_it)
{
// filter by "support" value:
if (res_it->second.support < min_support_) continue;
PeptideHit hit;
hit.setMetaValue("consensus_support", res_it->second.support);
if (!res_it->second.target_decoy.empty())
hit.setMetaValue("target_decoy", res_it->second.target_decoy);
hit.setSequence(res_it->first);
hit.setCharge(res_it->second.charge);
hit.setScore(res_it->second.final_score);
for (auto& ev : res_it->second.evidence)
{
hit.addPeptideEvidence(ev);
}
if (keep_old_scores_)
{
for (Size s = 0; s < res_it->second.scores.size(); ++s)
{
//TODO add SE name
hit.setMetaValue(res_it->second.types[s]+"_score", res_it->second.scores[s]);
}
}
ids[0].insertHit(hit);
#ifdef DEBUG_ID_CONSENSUS
OPENMS_LOG_DEBUG << " - Output hit: " << hit.getSequence() << " "
<< hit.getScore() << endl;
#endif
}
ids[0].sort();
}
void ConsensusIDAlgorithm::apply(PeptideIdentificationList& ids,
Size number_of_runs)
{
const auto empty = map<String,String>();
apply(ids, empty, number_of_runs);
}
void ConsensusIDAlgorithm::compareChargeStates_(Int& recorded_charge,
Int new_charge,
const AASequence& peptide)
{
if (recorded_charge == 0) // update recorded charge
{
recorded_charge = new_charge;
}
else if ((new_charge != 0) && (recorded_charge != new_charge))
{ // maybe TODO: calculate correct charge from prec. m/z and peptide mass?
String msg = "Conflicting charge states found for peptide '" +
peptide.toString() + "': " + String(recorded_charge) + ", " +
String(new_charge);
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
msg, String(new_charge));
}
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/BasicProteinInferenceAlgorithm.cpp | .cpp | 28,311 | 640 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Julianus Pfeuffer $
// $Authors: Julianus Pfeuffer $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/BasicProteinInferenceAlgorithm.h>
#include <OpenMS/ANALYSIS/ID/IDBoostGraph.h>
#include <OpenMS/CONCEPT/VersionInfo.h>
#include <OpenMS/PROCESSING/ID/IDFilter.h>
#include <OpenMS/METADATA/PeptideHit.h>
#include <OpenMS/METADATA/PeptideIdentification.h>
#include <algorithm>
#include <iostream>
#include <map>
#include <unordered_map>
namespace OpenMS
{
using Internal::IDBoostGraph;
BasicProteinInferenceAlgorithm::BasicProteinInferenceAlgorithm():
DefaultParamHandler("BasicProteinInferenceAlgorithm"),
ProgressLogger()
{
//TODO allow min_unique_peptides_per_protein (not the same as "use_shared = F" if you want to score the shared ones)
defaults_.setValue("min_peptides_per_protein", 1,
"Minimal number of peptides needed for a protein identification."
" If set to zero, unmatched proteins get a score of -Infinity."
" If bigger than zero, proteins with less peptides are filtered and evidences removed from the PSMs."
" PSMs that do not reference any proteins anymore are removed but the spectrum info is kept.");
defaults_.setMinInt("min_peptides_per_protein", 0);
defaults_.setValue("score_aggregation_method",
"best",
"How to aggregate scores of peptides matching to the same protein?");
defaults_.setValidStrings("score_aggregation_method", {"best","product","sum","maximum"});
defaults_.setValue("treat_charge_variants_separately", "true",
"If this is true, different charge variants of the same peptide sequence count as individual evidences.");
defaults_.setValidStrings("treat_charge_variants_separately", {"true","false"});
defaults_.setValue("treat_modification_variants_separately", "true",
"If this is true, different modification variants of the same peptide sequence count as individual evidences.");
defaults_.setValidStrings("treat_modification_variants_separately", {"true","false"});
defaults_.setValue("use_shared_peptides", "true", "If this is true, shared peptides are used as evidences. Note: shared_peptides are not deleted and potentially resolved in postprocessing as well.");
defaults_.setValidStrings("use_shared_peptides", {"true","false"});
defaults_.setValue("skip_count_annotation", "false", "If this is set, peptide counts won't be annotated at the proteins.");
defaults_.setValidStrings("skip_count_annotation", {"true","false"});
defaults_.setValue("annotate_indistinguishable_groups", "true", "If this is true, calculates and annotates indistinguishable protein groups.");
defaults_.setValidStrings("annotate_indistinguishable_groups", {"true","false"});
defaults_.setValue("greedy_group_resolution", "false", "If this is true, shared peptides will be associated to best proteins only (i.e. become potentially quantifiable razor peptides).");
defaults_.setValidStrings("greedy_group_resolution", {"true","false"});
defaults_.setValue("score_type", "", "PSM score type to use for inference. (default: empty = main score)");
defaults_.setValidStrings("score_type", {"", "PEP", "q-value", "RAW"});
defaultsToParam_();
}
void BasicProteinInferenceAlgorithm::run(PeptideIdentificationList &pep_ids,
ProteinIdentification &prot_id) const
{
Size min_peptides_per_protein = static_cast<Size>(param_.getValue("min_peptides_per_protein"));
std::unordered_map<std::string, std::map<Int, PeptideHit*>> best_pep;
std::unordered_map<std::string, std::pair<ProteinHit*, Size>> acc_to_protein_hitP_and_count;
String requested_score_type_as_string = param_.getValue("score_type").toString();
auto isr = IDScoreSwitcherAlgorithm::switchToScoreType(pep_ids, requested_score_type_as_string);
processRun_(
acc_to_protein_hitP_and_count,
best_pep,
prot_id,
pep_ids
);
if (min_peptides_per_protein > 0) //potentially sth was filtered
{
std::vector<ProteinIdentification> tmp(1);
std::swap(tmp[0], prot_id);
IDFilter::removeDanglingProteinReferences(pep_ids, tmp, true); //TODO allow keeping PSMs without evidence?
std::swap(tmp[0], prot_id);
}
IDScoreSwitcherAlgorithm::switchBackScoreType(pep_ids, isr);
}
void BasicProteinInferenceAlgorithm::run(ConsensusMap& cmap, ProteinIdentification& prot_run, bool include_unassigned) const
{
bool group(param_.getValue("annotate_indistinguishable_groups").toBool());
bool resolve(param_.getValue("greedy_group_resolution").toBool());
Size min_peptides_per_protein = static_cast<Size>(param_.getValue("min_peptides_per_protein"));
bool treat_charge_variants_separately(param_.getValue("treat_charge_variants_separately").toBool());
bool treat_modification_variants_separately(param_.getValue("treat_modification_variants_separately").toBool());
bool use_shared_peptides(param_.getValue("use_shared_peptides").toBool());
std::unordered_map<std::string, std::map<Int, PeptideHit*>> best_pep;
std::unordered_map<std::string, std::pair<ProteinHit*, Size>> acc_to_protein_hitP_and_count;
String agg_method_string(param_.getValue("score_aggregation_method").toString());
AggregationMethod aggregation_method = aggFromString_(agg_method_string);
//TODO think about only clearing values or using a big map for all runs together
acc_to_protein_hitP_and_count.clear();
best_pep.clear();
prot_run.setInferenceEngine("TOPPProteinInference");
prot_run.setInferenceEngineVersion(VersionInfo::getVersion());
ProteinIdentification::SearchParameters sp = prot_run.getSearchParameters();
sp.setMetaValue("TOPPProteinInference:aggregation_method", agg_method_string);
sp.setMetaValue("TOPPProteinInference:use_shared_peptides", use_shared_peptides);
sp.setMetaValue("TOPPProteinInference:treat_charge_variants_separately", treat_charge_variants_separately);
sp.setMetaValue("TOPPProteinInference:treat_modification_variants_separately", treat_modification_variants_separately);
prot_run.setSearchParameters(sp);
auto& prot_hits = prot_run.getHits();
IDFilter::keepNBestPeptideHits(cmap, 1); // we should filter for best psm per spec only, since those will be the psms used, also filterUnreferencedProteins depends on it (e.g. after resolution)
// determine requested score type. This can be a search engine score name or a broader score category (e.g. PEP)
String requested_score_type_as_string = param_.getValue("score_type").toString();
// switch scores to requested score type (e.g., "RAW" = a search engine score, "PEP", "q-value")
// for convenience, the results als contain name, score orientation and type before and after switching
// as well as a flag that indicates if a swtich was performed (or e.g., score was already the main score)
auto isr = IDScoreSwitcherAlgorithm::switchToScoreType(cmap, requested_score_type_as_string, include_unassigned);
// Here: we can be sure that we have the requested score type that as the main score now
// determine inital values for protein scores based on score type
bool pep_scores = (isr.requested_score_type == IDScoreSwitcherAlgorithm::ScoreType::PEP);
double initScore = getInitScoreForAggMethod_(aggregation_method, pep_scores || isr.requested_score_higher_better); // if we have pep scores, we will complement to pp during aggregation
for (auto& prothit : prot_hits)
{
prothit.setScore(initScore);
acc_to_protein_hitP_and_count[prothit.getAccession()] = std::make_pair<ProteinHit*, Size>(&prothit, 0);
}
checkCompat_(isr.requested_score_name, aggregation_method);
for (auto& cf : cmap)
{
aggregatePeptideScores_(best_pep,
cf.getPeptideIdentifications(),
isr.requested_score_name,
isr.requested_score_higher_better,
"");
}
if (include_unassigned)
{
aggregatePeptideScores_(best_pep,
cmap.getUnassignedPeptideIdentifications(),
isr.requested_score_name,
isr.requested_score_higher_better,
"");
}
updateProteinScores_(
acc_to_protein_hitP_and_count,
best_pep,
pep_scores,
isr.requested_score_higher_better
);
if (pep_scores)
{
prot_run.setScoreType("Posterior Probability");
prot_run.setHigherScoreBetter(true);
}
else
{
prot_run.setScoreType(isr.requested_score_name); // score name after switch (e.g. "PEP", "q-value")
prot_run.setHigherScoreBetter(isr.requested_score_higher_better);
}
if (min_peptides_per_protein > 0)
{
IDFilter::removeMatchingItems<std::vector<ProteinHit>>(prot_run.getHits(),
IDFilter::HasMaxMetaValue<ProteinHit>("nr_found_peptides", static_cast<int>(min_peptides_per_protein) - 1));
IDFilter::removeDanglingProteinReferences(cmap, prot_run, true);
}
if (group)
{
//TODO you could actually also do the aggregation/inference as well as the resolution on the Graph structure.
// Groups would be clustered already. Saving some time.
// But it is quite fast right now already.
IDBoostGraph ibg{prot_run, cmap, 1, false, include_unassigned, false};
ibg.computeConnectedComponents();
if (resolve)
{
ibg.clusterIndistProteinsAndPeptides(); //TODO check in resolve or do it there if not done yet!
//Note: the above does not add singleton groups to graph
ibg.resolveGraphPeptideCentric(true);
ibg.annotateIndistProteins(true); // this does not really add singletons since they are not in the graph
IDFilter::removeUnreferencedProteins(cmap, include_unassigned);
IDFilter::updateProteinGroups(prot_run.getIndistinguishableProteins(), prot_run.getHits());
prot_run.fillIndistinguishableGroupsWithSingletons();
}
else
{
ibg.calculateAndAnnotateIndistProteins(true);
}
auto & ipg = prot_run.getIndistinguishableProteins();
std::sort(std::begin(ipg), std::end(ipg));
}
else
{
if (resolve)
{
IDBoostGraph ibg{prot_run, cmap, 1, false, include_unassigned, false};
ibg.computeConnectedComponents();
ibg.clusterIndistProteinsAndPeptides(); //TODO check in resolve or do it there if not done yet!
//Note: the above does not add singleton groups to graph
ibg.resolveGraphPeptideCentric(true);
IDFilter::removeUnreferencedProteins(cmap, include_unassigned);
IDFilter::updateProteinGroups(prot_run.getIndistinguishableProteins(), prot_run.getHits());
}
}
prot_run.sort();
IDScoreSwitcherAlgorithm::switchBackScoreType(cmap, isr, include_unassigned); // NOP if no switch was performed
}
void BasicProteinInferenceAlgorithm::run(PeptideIdentificationList &pep_ids,
std::vector<ProteinIdentification> &prot_ids) const
{
Size min_peptides_per_protein = static_cast<Size>(param_.getValue("min_peptides_per_protein"));
IDFilter::keepNBestHits(pep_ids,1); // we should filter for best psm per spec only, since those will be the psms used, also filterUnreferencedProteins depends on it (e.g. after resolution)
std::unordered_map<std::string, std::map<Int, PeptideHit*>> best_pep;
std::unordered_map<std::string, std::pair<ProteinHit*, Size>> acc_to_protein_hitP_and_count;
// determine requested score type. This can be a search engine score name or a broader score category (e.g. PEP)
String requested_score_type_as_string = param_.getValue("score_type").toString();
auto isr = IDScoreSwitcherAlgorithm::switchToScoreType(pep_ids, requested_score_type_as_string);
for (auto &prot_run : prot_ids)
{
processRun_(
acc_to_protein_hitP_and_count,
best_pep,
prot_run,
pep_ids
);
}
if (min_peptides_per_protein > 0) //potentially sth was filtered
{
IDFilter::removeDanglingProteinReferences(pep_ids, prot_ids, true); //TODO allow keeping PSMs without evidence?
}
IDScoreSwitcherAlgorithm::switchBackScoreType(pep_ids, isr); // NOP if no switch was performed
}
void BasicProteinInferenceAlgorithm::aggregatePeptideScores_(
std::unordered_map<std::string, std::map<Int, PeptideHit*>>& best_pep,
PeptideIdentificationList& pep_ids,
const String& overall_score_type,
bool higher_better,
const std::string& run_id) const
{
bool treat_charge_variants_separately(param_.getValue("treat_charge_variants_separately").toBool());
bool treat_modification_variants_separately(param_.getValue("treat_modification_variants_separately").toBool());
bool use_shared_peptides(param_.getValue("use_shared_peptides").toBool());
for (auto &pep : pep_ids)
{
if (pep.getScoreType() != overall_score_type)
{
throw OpenMS::Exception::InvalidParameter(
__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"Differing score_types in the PeptideHits. Aborting...");
}
//skip if it does not belong to run
if (!run_id.empty() && pep.getIdentifier() != run_id)
continue;
//skip if no hits (which almost could be considered and error or warning)
if (pep.getHits().empty())
continue;
//make sure that first = best hit
pep.sort();
//TODO think about if using any but the best PSM per spectrum makes sense in such a simple aggregation scheme
//for (auto& hit : pep.getHits())
//{
PeptideHit &hit = pep.getHits()[0];
//skip if shared and option not enabled
//TODO warn if not present but requested?
//TODO use nr of evidences to re-calculate sharedness?
if (!use_shared_peptides &&
(!hit.metaValueExists("protein_references") || (hit.getMetaValue("protein_references") == "non-unique")))
continue;
//TODO refactor: this is very similar to IDFilter best per peptide functionality
String lookup_seq;
if (!treat_modification_variants_separately)
{
lookup_seq = hit.getSequence().toUnmodifiedString();
}
else
{
lookup_seq = hit.getSequence().toString();
}
int lookup_charge = 0;
if (treat_charge_variants_separately)
{
lookup_charge = hit.getCharge();
}
if (auto current_best_pep_it = best_pep.find(lookup_seq); current_best_pep_it == best_pep.end())
{ // no entry exist for sequence? initialize seq->charge->&hit
best_pep[lookup_seq][lookup_charge] = &hit;
}
else
{ // a peptide hit for the current sequence exists
if (auto current_best_pep_charge_it = current_best_pep_it->second.find(lookup_charge); current_best_pep_charge_it == current_best_pep_it->second.end())
{ // no entry for charge? add hit
current_best_pep_it->second[lookup_charge] = &hit;
}
else if (
(higher_better && (hit.getScore() > current_best_pep_charge_it->second->getScore())) ||
(!higher_better && (hit.getScore() < current_best_pep_charge_it->second->getScore())))
{ // seq with charge already exists? replace if new value has better score
current_best_pep_charge_it->second = &hit;
}
}
//}
}
}
void BasicProteinInferenceAlgorithm::updateProteinScores_(
std::unordered_map<std::string, std::pair<ProteinHit*, Size>>& acc_to_protein_hitP_and_count,
const std::unordered_map<std::string, std::map<Int, PeptideHit*>>& best_pep,
bool pep_scores,
bool higher_better) const
{
//TODO Allow count as aggregation method -> i.e. set as protein score?
if (!higher_better && pep_scores)
{
higher_better = true; // We will convert the scores to PPs for multiplication
}
bool skip_count_annotation(param_.getValue("skip_count_annotation").toBool());
String agg_method_string(param_.getValue("score_aggregation_method").toString());
AggregationMethod aggregation_method = aggFromString_(agg_method_string);
const auto& aggregation_fun = aggFunFromEnum_(aggregation_method, higher_better);
// update protein scores
for (const auto &seq_to_map_from_charge_to_pep_hit : best_pep)
{
// The next line assumes that PeptideHits of different charge states necessarily share the same
// protein accessions (might not be the case if we ever allow modified protein databases e.g. in PEF format)
// TODO this could be done for mods, too (first hashing AASeq, then the mods)
const std::map<Int, PeptideHit*>& charge_to_peptide_hit = seq_to_map_from_charge_to_pep_hit.second;
const PeptideHit& first_peptide_hit = *charge_to_peptide_hit.begin()->second;
for (const auto &acc : first_peptide_hit.extractProteinAccessionsSet())
{
for (const auto &charge_pep_hit_pair : charge_to_peptide_hit)
{
auto prot_count_pair_it = acc_to_protein_hitP_and_count.find(std::string(acc));
if (prot_count_pair_it == acc_to_protein_hitP_and_count.end())
{
OPENMS_LOG_WARN << "Warning, skipping pep that maps to a non existent protein accession. "
<< first_peptide_hit.getSequence().toUnmodifiedString() << std::endl;
continue; // very weird, has an accession that was not in the proteins loaded in the beginning
//TODO error? Suppress log?
}
ProteinHit *protein = prot_count_pair_it->second.first;
prot_count_pair_it->second.second++;
const PeptideHit& pep_hit = *charge_pep_hit_pair.second;
double new_score = pep_hit.getScore();
if (pep_scores) // convert PEP to PP
new_score = 1. - new_score;
protein->setScore(aggregation_fun(protein->getScore(), new_score));
}
}
}
if (!skip_count_annotation)
{
for (auto& entry : acc_to_protein_hitP_and_count)
{
entry.second.first->setMetaValue("nr_found_peptides", entry.second.second);
}
}
//normalize in case of SUM
if (aggregation_method == AggregationMethod::SUM)
{
for (auto& entry : acc_to_protein_hitP_and_count)
{
ProteinHit* phitp = entry.second.first;
phitp->setScore(phitp->getScore() / entry.second.second);
}
}
}
void BasicProteinInferenceAlgorithm::checkCompat_(
const String& score_name,
const AggregationMethod& aggregation_method
) const
{
//TODO do something smart about the scores, e.g. let the user specify a general score type
// he wants to use and then switch all of them
if (!IDScoreSwitcherAlgorithm().isScoreType(score_name, IDScoreSwitcherAlgorithm::ScoreType::PEP) &&
!IDScoreSwitcherAlgorithm().isScoreType(score_name, IDScoreSwitcherAlgorithm::ScoreType::PP) &&
aggregation_method == AggregationMethod::PROD)
{
OPENMS_LOG_WARN << "ProteinInference with multiplicative aggregation "
" should probably use Posterior (Error) Probabilities in the Peptide Hits."
" Use Percolator with PEP score or run IDPosteriorErrorProbability first.\n";
}
}
void BasicProteinInferenceAlgorithm::checkCompat_(
const IDScoreSwitcherAlgorithm::ScoreType& score_type,
const AggregationMethod& aggregation_method
) const
{
//TODO do something smart about the scores, e.g. let the user specify a general score type
// he wants to use and then switch all of them
if ((score_type != IDScoreSwitcherAlgorithm::ScoreType::PEP) &&
(score_type != IDScoreSwitcherAlgorithm::ScoreType::PP) &&
aggregation_method == AggregationMethod::PROD)
{
OPENMS_LOG_WARN << "ProteinInference with multiplicative aggregation "
" should probably use Posterior (Error) Probabilities in the Peptide Hits."
" Use Percolator with PEP score or run IDPosteriorErrorProbability first.\n";
}
}
BasicProteinInferenceAlgorithm::AggregationMethod BasicProteinInferenceAlgorithm::aggFromString_(const std::string& agg_method_string) const
{
if (agg_method_string == "best")
{
return AggregationMethod::BEST;
}
else if (agg_method_string == "product")
{
return AggregationMethod::PROD;
}
else if (agg_method_string == "sum")
{
return AggregationMethod::SUM;
}
else if (agg_method_string == "maximum")
{
return AggregationMethod::BEST;
}
else
{
return AggregationMethod::BEST;
}
}
BasicProteinInferenceAlgorithm::fptr BasicProteinInferenceAlgorithm::aggFunFromEnum_(const BasicProteinInferenceAlgorithm::AggregationMethod& agg_method, bool higher_better) const
{
switch (agg_method)
{
case AggregationMethod::PROD :
return [](double old_score, double new_score){
if (new_score > 0.0) //TODO for 0 probability peptides we could also multiply a minimum value
{
return old_score * new_score;
}
else
{
return old_score;
}
};
case AggregationMethod::BEST :
if (higher_better)
{
return [](double old_score, double new_score){return std::fmax(old_score, new_score);};
}
else
{
return [](double old_score, double new_score){return std::fmin(old_score, new_score);};
}
case AggregationMethod::SUM :
return [](double old_score, double new_score){return old_score + new_score;};
default:
throw Exception::NotImplemented(__FILE__,__LINE__,OPENMS_PRETTY_FUNCTION);
}
}
double BasicProteinInferenceAlgorithm::getInitScoreForAggMethod_(const AggregationMethod& aggregation_method, bool higher_better) const
{
switch (aggregation_method)
{
//TODO for 0 probability peptides we could also multiply a minimum value
case AggregationMethod::PROD :
return 1.0;
case AggregationMethod::BEST :
return higher_better ? -std::numeric_limits<double>::infinity() : std::numeric_limits<double>::infinity();
case AggregationMethod::SUM :
return 0.0;
default:
throw Exception::NotImplemented(__FILE__,__LINE__,OPENMS_PRETTY_FUNCTION);
}
}
void BasicProteinInferenceAlgorithm::processRun_(
std::unordered_map<std::string, std::pair<ProteinHit*, Size>>& acc_to_protein_hitP_and_count,
std::unordered_map<std::string, std::map<Int, PeptideHit*>>& best_pep,
ProteinIdentification& prot_run,
PeptideIdentificationList& pep_ids) const
{
// TODO actually clearing the scores should be enough, since this algorithm does not change the grouping
prot_run.getProteinGroups().clear();
prot_run.getIndistinguishableProteins().clear();
bool group(param_.getValue("annotate_indistinguishable_groups").toBool());
bool resolve(param_.getValue("greedy_group_resolution").toBool());
Size min_peptides_per_protein = static_cast<Size>(param_.getValue("min_peptides_per_protein"));
bool treat_charge_variants_separately(param_.getValue("treat_charge_variants_separately").toBool());
bool treat_modification_variants_separately(param_.getValue("treat_modification_variants_separately").toBool());
bool use_shared_peptides(param_.getValue("use_shared_peptides").toBool());
String agg_method_string(param_.getValue("score_aggregation_method").toString());
AggregationMethod aggregation_method = aggFromString_(agg_method_string);
//TODO think about only clearing values or using a big map for all runs together
acc_to_protein_hitP_and_count.clear();
best_pep.clear();
prot_run.setInferenceEngine("TOPPProteinInference");
prot_run.setInferenceEngineVersion(VersionInfo::getVersion());
ProteinIdentification::SearchParameters sp = prot_run.getSearchParameters();
sp.setMetaValue("TOPPProteinInference:aggregation_method", agg_method_string);
sp.setMetaValue("TOPPProteinInference:use_shared_peptides", use_shared_peptides);
sp.setMetaValue("TOPPProteinInference:treat_charge_variants_separately", treat_charge_variants_separately);
sp.setMetaValue("TOPPProteinInference:treat_modification_variants_separately", treat_modification_variants_separately);
prot_run.setSearchParameters(sp);
String main_score_name;
bool main_higher_better = true;
IDScoreSwitcherAlgorithm::ScoreType main_score_type;
IDScoreSwitcherAlgorithm().determineScoreNameOrientationAndType(pep_ids, main_score_name, main_higher_better, main_score_type);
bool pep_scores = (main_score_type == IDScoreSwitcherAlgorithm::ScoreType::PEP);
double initScore = getInitScoreForAggMethod_(aggregation_method, pep_scores || main_higher_better); // if we have pep scores, we will complement to pp during aggregation
//create Accession to ProteinHit and peptide count map. To have quick access later.
//If a protein occurs in multiple runs, it picks the last
for (auto &phit : prot_run.getHits())
{
acc_to_protein_hitP_and_count[phit.getAccession()] = std::make_pair<ProteinHit*, Size>(&phit, 0);
phit.setScore(initScore);
}
checkCompat_(main_score_name, aggregation_method);
aggregatePeptideScores_(best_pep, pep_ids, main_score_name, main_higher_better, prot_run.getIdentifier());
updateProteinScores_(acc_to_protein_hitP_and_count, best_pep, pep_scores, main_higher_better);
if (pep_scores) // we converted/ will convert
{
prot_run.setScoreType("Posterior Probability");
prot_run.setHigherScoreBetter(true);
}
else
{
prot_run.setScoreType(main_score_name);
prot_run.setHigherScoreBetter(main_higher_better);
}
if (min_peptides_per_protein > 0)
{
IDFilter::removeMatchingItems<std::vector<ProteinHit>>(prot_run.getHits(),
IDFilter::HasMaxMetaValue<ProteinHit>("nr_found_peptides", static_cast<int>(min_peptides_per_protein) - 1));
}
if (group)
{
//TODO you could actually also do the aggregation/inference as well as the resolution on the Graph structure.
// Groups would be clustered already. Saving some time.
// But it is quite fast right now already.
IDBoostGraph ibg{prot_run, pep_ids, 1, false, false};
ibg.computeConnectedComponents();
if (resolve)
{
ibg.clusterIndistProteinsAndPeptides(); //TODO check in resolve or do it there if not done yet!
//Note: the above does not add singleton groups to graph
ibg.resolveGraphPeptideCentric(true);
ibg.annotateIndistProteins(true); // this does not really add singletons since they are not in the graph
IDFilter::removeUnreferencedProteins(prot_run, pep_ids);
IDFilter::updateProteinGroups(prot_run.getIndistinguishableProteins(), prot_run.getHits());
prot_run.fillIndistinguishableGroupsWithSingletons();
}
else
{
ibg.calculateAndAnnotateIndistProteins(true);
}
auto & ipg = prot_run.getIndistinguishableProteins();
std::sort(std::begin(ipg), std::end(ipg));
}
else
{
if (resolve) // resolution needs groups anyway, so this is very similar to above, except that we remove them in the end.
{
IDBoostGraph ibg{prot_run, pep_ids, 1, false, false};
ibg.computeConnectedComponents();
ibg.clusterIndistProteinsAndPeptides(); //TODO check in resolve or do it there if not done yet!
//Note: the above does not add singleton groups to graph
ibg.resolveGraphPeptideCentric(true);
prot_run.getIndistinguishableProteins().clear();
IDFilter::removeUnreferencedProteins(prot_run, pep_ids);
}
}
}
} //namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/HyperScore.cpp | .cpp | 15,783 | 424 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Timo Sachsenberg, Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/HyperScore.h>
#include <OpenMS/KERNEL/MSSpectrum.h>
#include <OpenMS/DATASTRUCTURES/MatchedIterator.h>
#include <OpenMS/DATASTRUCTURES/StringUtils.h>
using std::vector;
namespace OpenMS
{
#include <cmath> // for std::lgamma
inline double HyperScore::logfactorial_(int x, int base)
{
// ensure base ≥ 2, and that x ≥ base–1
base = std::max(base, 2);
if (x < base - 1)
return 0.0;
// log(x!) = lgamma(x+1)
// log((base-1)!) = lgamma(base)
return std::lgamma(double(x + 1))
- std::lgamma(double(base));
}
double HyperScore::compute(double fragment_mass_tolerance, bool fragment_mass_tolerance_unit_ppm, const PeakSpectrum& exp_spectrum, const PeakSpectrum& theo_spectrum)
{
if (exp_spectrum.empty() || theo_spectrum.empty())
{
std::cout << "Warning: HyperScore: One of the given spectra is empty." << std::endl;
return 0.0;
}
// TODO this assumes only one StringDataArray is present and it is the right one
const PeakSpectrum::StringDataArray* ion_names;
if (!theo_spectrum.getStringDataArrays().empty())
{
ion_names = &theo_spectrum.getStringDataArrays()[0];
}
else
{
std::cout << "Error: HyperScore: Theoretical spectrum without StringDataArray (\"IonNames\" annotation) provided." << std::endl;
return 0.0;
}
int y_ion_count = 0;
int b_ion_count = 0;
double dot_product = 0.0;
if (fragment_mass_tolerance_unit_ppm)
{
MatchedIterator<PeakSpectrum, PpmTrait, true> it(theo_spectrum, exp_spectrum, fragment_mass_tolerance);
for (; it != it.end(); ++it)
{
dot_product += (*it).getIntensity() * it.ref().getIntensity(); /* * mass_error */;
// fragment annotations in XL-MS data are more complex and do not start with the ion type, but the ion type always follows after a $
auto i = it.refIdx();
if ((*ion_names)[i][0] == 'y' || (*ion_names)[i].hasSubstring("$y"))
{
++y_ion_count;
}
else if ((*ion_names)[i][0] == 'b' || (*ion_names)[i].hasSubstring("$b"))
{
++b_ion_count;
}
}
}
else
{
MatchedIterator<PeakSpectrum, DaTrait, true> it(theo_spectrum, exp_spectrum, fragment_mass_tolerance);
for (; it != it.end(); ++it)
{
dot_product += (*it).getIntensity() * it.ref().getIntensity(); /* * mass_error */;
// fragment annotations in XL-MS data are more complex and do not start with the ion type, but the ion type always follows after a $
auto i = it.refIdx();
if ((*ion_names)[i][0] == 'y' || (*ion_names)[i].hasSubstring("$y"))
{
++y_ion_count;
}
else if ((*ion_names)[i][0] == 'b' || (*ion_names)[i].hasSubstring("$b"))
{
++b_ion_count;
}
}
}
// inefficient: calculates logs repeatedly
//const double yFact = logfactorial_(y_ion_count);
//const double bFact = logfactorial_(b_ion_count);
//const double hyperScore = log1p(dot_product) + yFact + bFact;
const int i_min = std::min(y_ion_count, b_ion_count);
const int i_max = std::max(y_ion_count, b_ion_count);
const double hyperScore = log1p(dot_product) + 2*logfactorial_(i_min) + logfactorial_(i_max, i_min + 1);
return hyperScore;
}
double HyperScore::computeWithDetail(double fragment_mass_tolerance,
bool fragment_mass_tolerance_unit_ppm,
const PeakSpectrum& exp_spectrum,
const PeakSpectrum& theo_spectrum,
PSMDetail& d)
{
if (exp_spectrum.empty() || theo_spectrum.empty())
{
std::cout << "Warning: HyperScore: One of the given spectra is empty." << std::endl;
return 0.0;
}
// TODO this assumes only one StringDataArray is present and it is the right one
const PeakSpectrum::StringDataArray* ion_names;
if (!theo_spectrum.getStringDataArrays().empty())
{
ion_names = &theo_spectrum.getStringDataArrays()[0];
}
else
{
std::cout << "Error: HyperScore: Theoretical spectrum without StringDataArray (\"IonNames\" annotation) provided." << std::endl;
return 0.0;
}
int y_ion_count = 0;
int b_ion_count = 0;
double dot_product = 0.0;
double abs_error = 0.0;
if (fragment_mass_tolerance_unit_ppm)
{
MatchedIterator<PeakSpectrum, PpmTrait, true> it(theo_spectrum, exp_spectrum, fragment_mass_tolerance);
for (; it != it.end(); ++it)
{
const double exp_mz{it.ref().getMZ()};
const double theo_mz{(*it).getMZ()};
const double exp_int{it.ref().getIntensity()};
const double theo_int{(*it).getIntensity()};
abs_error += Math::getPPMAbs(theo_mz, exp_mz);
dot_product += theo_int * exp_int; /* * mass_error */;
// fragment annotations in XL-MS data are more complex and do not start with the ion type, but the ion type always follows after a $
auto i = it.refIdx();
if ((*ion_names)[i][0] == 'y' || (*ion_names)[i].hasSubstring("$y"))
{
++y_ion_count;
}
else if ((*ion_names)[i][0] == 'b' || (*ion_names)[i].hasSubstring("$b"))
{
++b_ion_count;
}
}
}
else
{
MatchedIterator<PeakSpectrum, DaTrait, true> it(theo_spectrum, exp_spectrum, fragment_mass_tolerance);
for (; it != it.end(); ++it)
{
abs_error += abs((*it).getMZ() - it.ref().getMZ());
dot_product += (*it).getIntensity() * it.ref().getIntensity(); /* * mass_error */;
// fragment annotations in XL-MS data are more complex and do not start with the ion type, but the ion type always follows after a $
auto i = it.refIdx();
if ((*ion_names)[i][0] == 'y' || (*ion_names)[i].hasSubstring("$y"))
{
++y_ion_count;
}
else if ((*ion_names)[i][0] == 'b' || (*ion_names)[i].hasSubstring("$b"))
{
++b_ion_count;
}
}
}
// inefficient: calculates logs repeatedly
//const double yFact = logfactorial_(y_ion_count);
//const double bFact = logfactorial_(b_ion_count);
//const double hyperScore = log1p(dot_product) + yFact + bFact;
const int i_min = std::min(y_ion_count, b_ion_count);
const int i_max = std::max(y_ion_count, b_ion_count);
const double hyperScore = log1p(dot_product) + 2*logfactorial_(i_min) + logfactorial_(i_max, i_min + 1);
d.matched_b_ions = b_ion_count;
d.matched_y_ions = y_ion_count;
d.mean_error = (b_ion_count + y_ion_count) > 0 ? abs_error / (double)(b_ion_count + y_ion_count) : 0.0;
return hyperScore;
}
double HyperScore::compute(double fragment_mass_tolerance,
bool fragment_mass_tolerance_unit_ppm,
const PeakSpectrum& exp_spectrum,
const DataArrays::IntegerDataArray& exp_charges,
const PeakSpectrum& theo_spectrum,
const DataArrays::IntegerDataArray& theo_charges)
{
double dot_product = 0.0;
UInt y_ion_count = 0;
UInt b_ion_count = 0;
if (exp_spectrum.size() < 1 || theo_spectrum.size() < 1)
{
std::cout << "Warning: HyperScore: One of the given spectra is empty." << std::endl;
return 0.0;
}
// TODO this assumes only one StringDataArray is present and it is the right one
const PeakSpectrum::StringDataArray* ion_names;
if (theo_spectrum.getStringDataArrays().size() > 0)
{
ion_names = &theo_spectrum.getStringDataArrays()[0];
}
else
{
std::cout << "Error: HyperScore: Theoretical spectrum without StringDataArray (\"IonNames\" annotation) provided." << std::endl;
return 0.0;
}
if (theo_charges.size() != theo_spectrum.size())
{
std::cout << "Error: HyperScore: #charges != #peaks in theoretical spectrum." << std::endl;
return 0.0;
}
if (exp_charges.size() != exp_spectrum.size())
{
std::cout << "Error: HyperScore: #charges != #peaks in experimental spectrum." << std::endl;
return 0.0;
}
for (Size i = 0; i < theo_spectrum.size(); ++i)
{
const double theo_mz = theo_spectrum[i].getMZ();
double max_dist_dalton = fragment_mass_tolerance_unit_ppm ? theo_mz * fragment_mass_tolerance * 1e-6 : fragment_mass_tolerance;
// iterate over peaks in experimental spectrum in given fragment tolerance around theoretical peak
Size index = exp_spectrum.findNearest(theo_mz);
const double exp_mz = exp_spectrum[index].getMZ();
const double theo_intensity = theo_spectrum[i].getIntensity();
const int exp_z = exp_charges[index];
const int theo_z = theo_charges[i];
#ifdef DEBUG_HYPERSCORE
if (exp_z != theo_z) std::cout << "exp_z != theo_z " << exp_z << "\t" << theo_z << '\n';
#endif
// found peak match
if (std::abs(theo_mz - exp_mz) < max_dist_dalton
&& exp_z == theo_z)
{
// fragment annotations in XL-MS data are more complex and do not start with the ion type, but the ion type always follows after a $
if ((*ion_names)[i][0] == 'y' || (*ion_names)[i].hasSubstring("$y"))
{
dot_product += exp_spectrum[index].getIntensity() * theo_intensity;
#ifdef DEBUG_HYPERSCORE
std::cout << (*ion_names)[i] << " intensity: " << exp_spectrum[index].getIntensity() << '\n';
#endif
++y_ion_count;
}
else if ((*ion_names)[i][0] == 'b' || (*ion_names)[i].hasSubstring("$b"))
{
dot_product += exp_spectrum[index].getIntensity() * theo_intensity;
#ifdef DEBUG_HYPERSCORE
std::cout << (*ion_names)[i] << " intensity: " << exp_spectrum[index].getIntensity() << '\n';
#endif
++b_ion_count;
}
}
}
if (y_ion_count == 0 && b_ion_count == 0) return 0;
const double yFact = logfactorial_(y_ion_count);
const double bFact = logfactorial_(b_ion_count);
const double hyperScore = log1p(dot_product) + yFact + bFact;
#ifdef DEBUG_HYPERSCORE
std::cout << "HyperScore/#y/#b: " << hyperScore << "/" << y_ion_count << "/" << b_ion_count << '\n';
#endif
return hyperScore;
}
double HyperScore::compute(double fragment_mass_tolerance,
bool fragment_mass_tolerance_unit_ppm,
const PeakSpectrum& exp_spectrum,
const DataArrays::IntegerDataArray& exp_charges,
const PeakSpectrum& theo_spectrum,
const DataArrays::IntegerDataArray& theo_charges,
std::vector<double>& intensity_sum)
{
if (exp_spectrum.size() < 1 || theo_spectrum.size() < 1)
{
std::cout << "Warning: HyperScore: One of the given spectra is empty." << std::endl;
return 0.0;
}
// TODO this assumes only one StringDataArray is present and it is the right one
const PeakSpectrum::StringDataArray* ion_names;
if (theo_spectrum.getStringDataArrays().size() > 0)
{
ion_names = &theo_spectrum.getStringDataArrays()[0];
}
else
{
std::cout << "Error: HyperScore: Theoretical spectrum without StringDataArray (\"IonNames\" annotation) provided." << std::endl;
return 0.0;
}
if (theo_charges.size() != theo_spectrum.size())
{
std::cout << "Error: HyperScore: #charges != #peaks in theoretical spectrum." << std::endl;
return 0.0;
}
if (exp_charges.size() != exp_spectrum.size())
{
std::cout << "Error: HyperScore: #charges != #peaks in experimental spectrum." << std::endl;
return 0.0;
}
double dot_product = 0.0;
const Size N = intensity_sum.size(); // length of peptide
if (N == 0 || N > 100000) // peptides longer than 100k residues are unreasonable
{
std::cout << "Error: HyperScore: intensity_sum has invalid size: " << N << std::endl;
return 0.0;
}
std::vector<double> b_ions(N, 0.0);
std::vector<double> y_ions(N, 0.0);
for (Size i = 0; i < theo_spectrum.size(); ++i)
{
const double theo_mz = theo_spectrum[i].getMZ();
double max_dist_dalton = fragment_mass_tolerance_unit_ppm ? theo_mz * fragment_mass_tolerance * 1e-6 : fragment_mass_tolerance;
// iterate over peaks in experimental spectrum in given fragment tolerance around theoretical peak
Size index = exp_spectrum.findNearest(theo_mz);
const double exp_mz = exp_spectrum[index].getMZ();
const double theo_intensity = theo_spectrum[i].getIntensity();
const int exp_z = exp_charges[index];
const int theo_z = theo_charges[i];
#ifdef DEBUG_HYPERSCORE
if (exp_z != theo_z) std::cout << "exp_z != theo_z " << exp_z << "\t" << theo_z << '\n';
#endif
// found peak match
if (std::abs(theo_mz - exp_mz) < max_dist_dalton
&& exp_z == theo_z)
{
// fragment annotations in XL-MS data are more complex and do not start with the ion type, but the ion type always follows after a $
if ((*ion_names)[i][0] == 'y')
{
auto start = (*ion_names)[i].begin() + 1;
auto end = (*ion_names)[i].end();
int ii(0);
if (!StringUtils::extractInt(start, end, ii)) continue;
dot_product += exp_spectrum[index].getIntensity() * theo_intensity;
#ifdef DEBUG_HYPERSCORE
std::cout << (*ion_names)[i] << " intensity: " << exp_spectrum[index].getIntensity() << '\n';
std::cout << "N:" << N << "\t" << ii << "\t" << N-1 - (ii-1) << '\n';
#endif
// we observed the suffix (N-1-ii, N-1] in 0-based AA coordinates
y_ions[N-1 - (ii-1)] += exp_spectrum[index].getIntensity();
}
else if ((*ion_names)[i][0] == 'b')
{
auto start = (*ion_names)[i].begin() + 1;
auto end = (*ion_names)[i].end();
int ii(0);
if (!StringUtils::extractInt(start, end, ii)) continue;
dot_product += exp_spectrum[index].getIntensity() * theo_intensity;
#ifdef DEBUG_HYPERSCORE
std::cout << (*ion_names)[i] << " intensity: " << exp_spectrum[index].getIntensity() << '\n';
std::cout << "N:" << N << "\t" << ii << "\t" << (ii-1) << '\n';
#endif
// we observed the prefix [0, ii) in 0-based AA coordinates
b_ions[ii - 1] += exp_spectrum[index].getIntensity();
}
}
}
UInt y_ion_count(0),
b_ion_count(0);
for (Size i = 0; i != b_ions.size(); ++i)
{
if (b_ions[i] > 0)
{
++b_ion_count;
intensity_sum[i] += b_ions[i];
}
}
for (Size i = 0; i != y_ions.size(); ++i)
{
if (y_ions[i] > 0)
{
++y_ion_count;
intensity_sum[i] += y_ions[i];
}
}
if (y_ion_count == 0 && b_ion_count == 0) return 0;
const double yFact = logfactorial_(y_ion_count);
const double bFact = logfactorial_(b_ion_count);
const double hyperScore = log1p(dot_product) + yFact + bFact;
#ifdef DEBUG_HYPERSCORE
std::cout << "HyperScore/#y/#b: " << hyperScore << "/" << y_ion_count << "/" << b_ion_count << '\n';
#endif
return hyperScore;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/SiriusExportAlgorithm.cpp | .cpp | 8,598 | 179 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Oliver Alka, Axel Walter $
// $Authors: Oliver Alka, Lukas Zimmermann $
// --------------------------------------------------------------------------
#include <boost/foreach.hpp> // must be first, otherwise Q_FOREACH macro will wreak havoc
#include <OpenMS/ANALYSIS/ID/SiriusExportAlgorithm.h>
#include <OpenMS/ANALYSIS/ID/SiriusMSConverter.h>
#include <OpenMS/FORMAT/FileHandler.h>
namespace OpenMS
{
SiriusExportAlgorithm::SiriusExportAlgorithm() :
DefaultParamHandler("SiriusExportAlgorithm")
{
defaults_.setValue("filter_by_num_masstraces", 1, "Number of mass traces each feature has to have to be included. To use this parameter, setting the feature_only flag is necessary");
defaults_.setMinInt("filter_by_num_masstraces", 1);
defaults_.setValue("precursor_mz_tolerance", 10.0, "Tolerance window for precursor selection (Feature selection in regard to the precursor)");
defaults_.setValue("precursor_mz_tolerance_unit", "ppm", "Unit of the preprocessing_precursor_mz_tolerance");
defaults_.setValidStrings("precursor_mz_tolerance_unit", {"ppm","Da"});
defaults_.setValue("precursor_rt_tolerance", 5.0, "Tolerance window (left and right) for precursor selection [seconds]");
defaults_.setValue("isotope_pattern_iterations", 3, "Number of iterations that should be performed to extract the C13 isotope pattern. If no peak is found (C13 distance) the function will abort. Be careful with noisy data - since this can lead to wrong isotope patterns");
defaults_.setValue("feature_only", "false", "Uses the feature information from in_featureinfo to reduce the search space to MS2 associated with a feature");
defaults_.setValidStrings("feature_only", {"false","true"});
defaults_.setValue("no_masstrace_info_isotope_pattern", "false", "Set to true if the masstrace information from a feature should be discarded and the isotope_pattern_iterations should be used instead");
defaults_.setValidStrings("no_masstrace_info_isotope_pattern", {"false","true"});
defaultsToParam_();
}
// ################
// Algorithm
// ################
void SiriusExportAlgorithm::preprocessing(const String& featureXML_path,
const MSExperiment& spectra,
FeatureMapping::FeatureMappingInfo& feature_mapping_info,
FeatureMapping::FeatureToMs2Indices& feature_ms2_indices) const
{
// if fileparameter is given and should be not empty
if (!featureXML_path.empty())
{
Size preprocessing_filter_by_num_masstraces = getFilterByNumMassTraces();
if (File::exists(featureXML_path) && !File::empty(featureXML_path))
{
// read featureXML
FeatureMap feature_map;
FileHandler().loadFeatures(featureXML_path, feature_map);
if (preprocessing_filter_by_num_masstraces != 1 && !isFeatureOnly())
{
preprocessing_filter_by_num_masstraces = 1;
OPENMS_LOG_WARN << "Parameter: preprocessing_filter_by_num_masstraces, was set to 1 to retain the adduct information for all MS2 spectra, if available. Masstrace filtering only makes sense in combination with feature_only." << std::endl;
}
// filter feature by number of masstraces
auto map_it = remove_if(feature_map.begin(), feature_map.end(),
[&preprocessing_filter_by_num_masstraces](const Feature &feat) -> bool
{
unsigned int n_masstraces = feat.getMetaValue(Constants::UserParam::NUM_OF_MASSTRACES);
return n_masstraces < preprocessing_filter_by_num_masstraces;
});
feature_map.erase(map_it, feature_map.end());
feature_mapping_info.feature_maps.push_back(feature_map);
feature_mapping_info.kd_tree.addMaps(feature_mapping_info.feature_maps); // KDTree references into feature_map
// mapping of MS2 spectra to features
feature_ms2_indices = FeatureMapping::assignMS2IndexToFeature(spectra,
feature_mapping_info,
getPrecursorMzTolerance(),
getPrecursorRtTolerance(),
precursorMzToleranceUnitIsPPM());
}
else
{
throw OpenMS::Exception::FileEmpty(__FILE__,
__LINE__,
__FUNCTION__,
"Error: FeatureXML was empty, please provide a valid file.");
}
}
}
void SiriusExportAlgorithm::logFeatureSpectraNumber(const String& featureXML_path,
const FeatureMapping::FeatureToMs2Indices& feature_ms2_indices,
const MSExperiment& spectra) const
{
// number of features to be processed
if (isFeatureOnly() && !featureXML_path.empty())
{
OPENMS_LOG_INFO << "Number of features to be processed: " << feature_ms2_indices.assignedMS2.size() << std::endl;
}
else if (!featureXML_path.empty())
{
OPENMS_LOG_INFO << "Number of features to be processed: " << feature_ms2_indices.assignedMS2.size() << std::endl;
OPENMS_LOG_INFO << "Number of additional MS2 spectra to be processed: " << feature_ms2_indices.unassignedMS2.size() << std::endl;
}
else
{
long count_ms2 = count_if(spectra.begin(), spectra.end(),
[](const MSSpectrum &spectrum) { return spectrum.getMSLevel() == 2; });
OPENMS_LOG_INFO << "Number of MS2 spectra to be processed: " << count_ms2 << std::endl;
}
}
void SiriusExportAlgorithm::run(const StringList& mzML_files,
const StringList& featureXML_files,
const String& out_ms,
const String& out_compoundinfo) const
{
// loop over all spectra in all files and write data to ofstream
std::ofstream os;
// create temporary input file (.ms)
os.open(out_ms);
if (!os)
{
throw Exception::UnableToCreateFile(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, out_ms);
}
os.precision(12);
std::vector<SiriusMSFile::CompoundInfo> v_cmpinfo; // To store compound information for all files
for (size_t i = 0; i < mzML_files.size(); ++i)
{
// load experiment
MSExperiment spectra;
FileHandler().loadExperiment(mzML_files[i], spectra, {FileTypes::MZML});
// run masstrace filter and feature mapping
FeatureMapping::FeatureMappingInfo feature_mapping_info;
FeatureMapping::FeatureToMs2Indices feature_ms2_indices;
// check if 'featureXML_files' is empty and pass an empty string if it is
String feature_info_to_pass = featureXML_files.empty() ? "" : featureXML_files[i];
SiriusExportAlgorithm::preprocessing(feature_info_to_pass,
spectra,
feature_mapping_info,
feature_ms2_indices);
// returns Log of feature and/or spectra number
SiriusExportAlgorithm::logFeatureSpectraNumber(feature_info_to_pass, feature_ms2_indices, spectra);
// temporary vector to store compound information for the current file
std::vector<SiriusMSFile::CompoundInfo> temp_cmpinfo;
SiriusMSFile::store(spectra,
os,
feature_ms2_indices,
isFeatureOnly(),
getIsotopePatternIterations(),
isNoMasstraceInfoIsotopePattern(),
temp_cmpinfo,
i);
// Append the compound information of the current file to the overall vector
v_cmpinfo.insert(v_cmpinfo.end(), temp_cmpinfo.begin(), temp_cmpinfo.end());
}
os.close();
if (!out_compoundinfo.empty())
{
SiriusMSFile::saveFeatureCompoundInfoAsTSV(v_cmpinfo, out_compoundinfo);
}
}
} // namespace OpenMS
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/NeighborSeq.cpp | .cpp | 5,839 | 178 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow, Philipp Wang $
// $Authors: Chris Bielow, Philipp Wang $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/NeighborSeq.h>
#include <OpenMS/CHEMISTRY/ResidueDB.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/KERNEL/MSSpectrum.h>
#include <OpenMS/MATH/MathFunctions.h>
using namespace OpenMS;
using namespace std;
NeighborSeq::NeighborSeq(std::vector<AASequence>&& digested_relevant_peptides)
: digested_relevant_peptides_(std::move(digested_relevant_peptides)),
neighbor_stats_(digested_relevant_peptides_.size(), 0)
{
Param params;
params.setValue("add_b_ions", "true");
params.setValue("add_y_ions", "true");
params.setValue("add_first_prefix_ion", "true"); // do not skip b1 ion
spec_gen_.setParameters(params);
x_residue_ = ResidueDB::getInstance()->getResidue('X');
// Index peptide masses for fast lookup
mass_position_map_ = createMassLookup_();
}
// Function to generate the theoretical spectrum for a given peptide sequence
MSSpectrum NeighborSeq::generateSpectrum(const AASequence& peptide_sequence)
{
MSSpectrum spectrum;
spec_gen_.getSpectrum(spectrum, peptide_sequence, 1, 1);
return spectrum;
}
int NeighborSeq::computeSharedIonCount(const MSSpectrum& spec1, const MSSpectrum& spec2, const double& mz_bin_size)
{
// compute shared b/y ions in two sorted ranges
auto setIntersectionCount = [mz_bin_size](auto first1, auto last1, auto first2, auto last2) -> Size
{
Size count {0};
while (first1 != last1 && first2 != last2)
{
auto val1 = int(first1->getMZ() / mz_bin_size);
auto val2 = int(first2->getMZ() / mz_bin_size);
if (val1 < val2) ++first1;
else
{
if (val1 == val2)
{
++first1;
++count;
}
++first2;
}
}
return count;
};
auto shared_ions = setIntersectionCount(spec1.begin(), spec1.end(), spec2.begin(), spec2.end());
return shared_ions;
}
// Function to compare two spectra and determine if they are similar
bool NeighborSeq::isNeighborSpectrum(const MSSpectrum& spec1, const MSSpectrum& spec2, const double min_shared_ion_fraction, const double mz_bin_size)
{
// Calculate the number of shared bins considering the bin frequencies
int B12 = computeSharedIonCount(spec1, spec2, mz_bin_size);
// Calculate the fraction of shared bins
double fraction_shared = (2.0 * B12) / (spec1.size() + spec2.size());
return fraction_shared > min_shared_ion_fraction;
}
//Finds candidate positions based on a given mono-isotopic weight and mass tolerance.
auto NeighborSeq::findCandidatePositions_(const double mono_weight, double mass_tolerance, const bool mass_tolerance_pc_ppm)
{
// Calculate the lower and upper bounds for the mass tolerance range
assert(mass_tolerance >= 0);
if (mass_tolerance_pc_ppm)
{
mass_tolerance = Math::ppmToMass(mono_weight, mass_tolerance);
}
// Find the lower bound iterator in the map
auto lower = mass_position_map_.lower_bound(mono_weight - mass_tolerance);
// Find the upper bound iterator in the map
auto upper = mass_position_map_.upper_bound(mono_weight + mass_tolerance);
return make_pair(lower, upper);
}
// Method to find neighbor peptides in a given FASTA file
bool NeighborSeq::isNeighborPeptide(const AASequence& peptide,
const double mass_tolerance_pc,
const bool mass_tolerance_pc_ppm,
const double min_shared_ion_fraction,
const double mz_bin_size)
{
auto [from, to] = findCandidatePositions_(peptide.getMonoWeight(), mass_tolerance_pc, mass_tolerance_pc_ppm);
if (from == to) return false;
bool found = false;
MSSpectrum spec = generateSpectrum(peptide);
for (auto it_rel_pep = from; it_rel_pep != to; ++it_rel_pep)
{
for (int pep_index : it_rel_pep->second)
{
MSSpectrum neighbor_spec = generateSpectrum(digested_relevant_peptides_[pep_index]);
if (isNeighborSpectrum(spec, neighbor_spec, min_shared_ion_fraction, mz_bin_size))
{
//std::cout << digested_relevant_peptides_[pep_index] << " has neighbor " << peptide << '\n';
neighbor_stats_[pep_index]++;
found = true;
}
}
}
return found;
}
map<double, vector<int>> NeighborSeq::createMassLookup_()
{
// Map to store the mass and corresponding positions
map<double, vector<int>> mass_position_map;
int skipped{0};
// Iterate through the vector of AASequence objects
for (size_t i = 0; i < digested_relevant_peptides_.size(); ++i)
{
if (digested_relevant_peptides_[i].has(*x_residue_))
{
neighbor_stats_[i] = -1; // mark as not findable
skipped++;
continue;
}
// Calculate the mono-isotopic mass of the sequence
double mass = digested_relevant_peptides_[i].getMonoWeight();
// Insert the mass and the position into the map
mass_position_map[mass].push_back(i);
}
OPENMS_LOG_WARN << "Skipped " << skipped << "/" << digested_relevant_peptides_.size()
<< " peptides with unknown('X') amino acids." << endl;
return mass_position_map;
}
NeighborSeq::NeighborStats NeighborSeq::getNeighborStats() const
{
NeighborStats stats;
for (int count : neighbor_stats_)
{
if (count == -1)
stats.unfindable_peptides++;
else if (count == 0)
stats.findable_no_neighbors++;
else if (count == 1)
stats.findable_one_neighbor++;
else
stats.findable_multiple_neighbors++;
}
return stats;
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/FragmentIndex.cpp | .cpp | 31,103 | 674 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: $
// $Authors: Raphael Förster $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/FragmentIndex.h>
#include <OpenMS/CHEMISTRY/AAIndex.h>
#include <OpenMS/CHEMISTRY/AASequence.h>
#include <OpenMS/CHEMISTRY/DigestionEnzyme.h>
#include <OpenMS/CHEMISTRY/ModificationsDB.h>
#include <OpenMS/CHEMISTRY/ModifiedPeptideGenerator.h>
#include <OpenMS/CHEMISTRY/ProteaseDB.h>
#include <OpenMS/CHEMISTRY/ProteaseDigestion.h>
#include <OpenMS/CHEMISTRY/TheoreticalSpectrumGenerator.h>
#include <OpenMS/CHEMISTRY/SimpleTSGXLMS.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/DATASTRUCTURES/DefaultParamHandler.h>
#include <OpenMS/DATASTRUCTURES/Param.h>
#include <OpenMS/DATASTRUCTURES/StringView.h>
#include <OpenMS/FORMAT/FASTAFile.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/KERNEL/Peak1D.h>
#include <OpenMS/MATH/MathFunctions.h>
#include <OpenMS/QC/QCBase.h>
#include <functional>
using namespace std;
namespace OpenMS
{
#ifdef DEBUG_FRAGMENT_INDEX
static void print_slice(const std::vector<FragmentIndex::Fragment>& slice, size_t low, size_t high)
{
cout << "Slice: ";
for(size_t i = low; i <= high; i++)
{
cout << slice[i].fragment_mz_ << " ";
}
cout << endl;
}
void FragmentIndex::addSpecialPeptide( OpenMS::AASequence& peptide, Size source_idx)
{
float temp_mono = peptide.getMonoWeight();
fi_peptides_.push_back({AASequence(std::move(peptide)), source_idx,temp_mono});
}
#endif
void FragmentIndex::clear()
{
fi_fragments_.clear();
fi_peptides_.clear();
bucket_min_mz_.clear();
is_build_ = false;
}
// TODO: check if it makes sense to stream fasta from disc (we have some code for that... would safe memory but might have other drawbacks)
void FragmentIndex::generatePeptides(const std::vector<FASTAFile::FASTAEntry>& fasta_entries)
{
fi_peptides_.reserve(fasta_entries.size() * 5 * modifications_variable_.size()); //TODO: Calculate the average cleavage site number for the most important model organisms
ModifiedPeptideGenerator::MapToResidueType fixed_modifications = ModifiedPeptideGenerator::getModifications(modifications_fixed_);
ModifiedPeptideGenerator::MapToResidueType variable_modifications = ModifiedPeptideGenerator::getModifications(modifications_variable_);
size_t skipped_peptides = 0;
ProteaseDigestion digestor;
digestor.setEnzyme(digestion_enzyme_);
digestor.setMissedCleavages(missed_cleavages_);
std::cout << "Generating peptides..." << std::endl;
vector<pair<size_t, size_t>> digested_peptides; // every thread gets it own copy that is only cleared, not destructed (prevents frequent reallocations)
#pragma omp parallel for private(digested_peptides)
for (SignedSize protein_idx = 0; protein_idx < (SignedSize)fasta_entries.size(); ++protein_idx)
{
digested_peptides.clear();
const FASTAFile::FASTAEntry& protein = fasta_entries[protein_idx];
/// DIGEST (if bottom-up)
digestor.digestUnmodified(StringView(protein.sequence), digested_peptides, peptide_min_length_, peptide_max_length_);
for (const pair<size_t, size_t>& digested_peptide : digested_peptides)
{
//remove peptides containing unknown AA
if (protein.sequence.substr(digested_peptide.first, digested_peptide.second).find('X') != string::npos)
{
#pragma omp atomic
skipped_peptides++;
continue;
}
/// MODIFY (if modifications are specified)
AASequence unmod_peptide = AASequence::fromString(protein.sequence.substr(digested_peptide.first, digested_peptide.second));
float unmodified_mz = unmod_peptide.getMZ(1);
if (!(modifications_fixed_.empty() && modifications_variable_.empty()))
{
vector<AASequence> modified_peptides;
AASequence mod_peptide = AASequence(unmod_peptide); // copy the peptide
ModifiedPeptideGenerator::applyFixedModifications(fixed_modifications, mod_peptide);
ModifiedPeptideGenerator::applyVariableModifications(variable_modifications, mod_peptide, max_variable_mods_per_peptide_, modified_peptides);
UInt32 modification_idx = 0;
for (const AASequence& modified_peptide : modified_peptides)
{
float modified_mz = modified_peptide.getMZ(1);
if (modified_mz < peptide_min_mass_ || modified_mz > peptide_max_mass_) // exclude peptides that are not in the min-max window
{
continue; //TODO: Integrate this check in ModPepGen from #6859
}
#pragma omp critical (FIIndex)
{
fi_peptides_.emplace_back(static_cast<UInt32>(protein_idx),
modification_idx,
std::make_pair(static_cast<uint16_t>(digested_peptide.first),
static_cast<uint16_t>(digested_peptide.second)),
modified_mz);
++modification_idx;
}
}
}
else
{
if (peptide_min_mass_ <= unmodified_mz && unmodified_mz <= peptide_max_mass_)
{
#pragma omp critical (FIIndex)
{
fi_peptides_.emplace_back(static_cast<UInt32>(protein_idx), 0,
std::make_pair(static_cast<uint16_t>(digested_peptide.first),
static_cast<uint16_t>(digested_peptide.second)),
unmodified_mz);
}
}
}
}
}
if (skipped_peptides > 0)
{
OPENMS_LOG_WARN << skipped_peptides << " peptides skipped due to unkown AA \n";
}
std::cout << "Sorting peptides..." << std::endl;
// sort the peptide vector, critical for following steps
sort(fi_peptides_.begin(), fi_peptides_.end(), [](const Peptide& a, const Peptide& b)
{
return std::tie(a.precursor_mz_, a.protein_idx) < std::tie(b.precursor_mz_, b.protein_idx);
});
std::cout << "done." << std::endl;
}
void FragmentIndex::build(const std::vector<FASTAFile::FASTAEntry>& fasta_entries)
{
// reserve some memory for fi_fragments_: Each Peptide can approx give rise to up to #AA*2 fragments
fi_fragments_.reserve(fi_peptides_.size() * 2 * peptide_min_length_); //TODO: Does this make senese?
// get the spectrum generator and set the ion-types
//TheoreticalSpectrumGenerator tsg;
//SimpleTSGXLMS tsg;
TheoreticalSpectrumGenerator tsg;
auto tsg_params = tsg.getParameters();
auto this_params = getParameters();
tsg_params.setValue("add_a_ions", this_params.getValue("ions:add_a_ions"));
tsg_params.setValue("add_b_ions", this_params.getValue("ions:add_b_ions"));
tsg_params.setValue("add_c_ions", this_params.getValue("ions:add_c_ions"));
tsg_params.setValue("add_x_ions", this_params.getValue("ions:add_x_ions"));
tsg_params.setValue("add_y_ions", this_params.getValue("ions:add_y_ions"));
tsg_params.setValue("add_z_ions", this_params.getValue("ions:add_z_ions"));
//tsg_params.setValue("add_first_prefix_ion", "true");
tsg.setParameters(tsg_params);
/// generate all Peptides
generatePeptides(fasta_entries);
/// Since we (the new) Peptide struct does not store the AASequence, we must reconstruct the modified ones
/// therefore we need the modificationGenerators:
ModifiedPeptideGenerator::MapToResidueType fixed_modifications = ModifiedPeptideGenerator::getModifications(modifications_fixed_);
ModifiedPeptideGenerator::MapToResidueType variable_modifications = ModifiedPeptideGenerator::getModifications(modifications_variable_);
vector<AASequence> mod_peptides;
std::vector<float> b_y_ions;
OPENMS_LOG_INFO << "Generating fragments..." << std::endl;
#pragma omp parallel for private(mod_peptides, b_y_ions)
for(SignedSize peptide_idx = 0; peptide_idx < (SignedSize)fi_peptides_.size(); peptide_idx++)
{
const Peptide& pep = fi_peptides_[peptide_idx];
mod_peptides.clear();
b_y_ions.clear();
AASequence unmod_peptide = AASequence::fromString(fasta_entries[pep.protein_idx].sequence.substr(pep.sequence_.first, pep.sequence_.second));
if (!(modifications_fixed_.empty() && modifications_variable_.empty()))
{
AASequence mod_peptide = AASequence(unmod_peptide); // copy the peptide
ModifiedPeptideGenerator::applyFixedModifications(fixed_modifications, mod_peptide);
ModifiedPeptideGenerator::applyVariableModifications(variable_modifications, mod_peptide, max_variable_mods_per_peptide_, mod_peptides);
tsg.getPrefixAndSuffixIonsMZ(b_y_ions, mod_peptides[pep.modification_idx_], 1);
}
else
{
tsg.getPrefixAndSuffixIonsMZ(b_y_ions, unmod_peptide, 1);
}
for (const float& frag : b_y_ions)
{
if (fragment_min_mz_ > frag || frag > fragment_max_mz_ ) continue;
#pragma omp critical (CreateFragment)
fi_fragments_.emplace_back(static_cast<UInt32>(peptide_idx),(float) frag);
}
}
std::cout << "Sorting fragments..." << std::endl;
/// 1.) First all Fragments are sorted by their own mass!
sort(fi_fragments_.begin(), fi_fragments_.end(), [](const Fragment& a, const Fragment& b)
{
return std::tie(a.fragment_mz_, a.peptide_idx_) < std::tie(b.fragment_mz_, b.peptide_idx_);
});
/// Calculate the bucket size
bucketsize_ = sqrt(fi_fragments_.size()); //Todo: MSFragger uses a different approach, which might be better
OPENMS_LOG_INFO << "Creating DB with bucket_size " << bucketsize_ << endl;
/// 2.) next sort after precursor mass and save the min_mz of each bucket
#pragma omp parallel for
for (SignedSize i = 0; i < (SignedSize)fi_fragments_.size(); i += bucketsize_)
{
#pragma omp critical
bucket_min_mz_.emplace_back(fi_fragments_[i].fragment_mz_);
auto bucket_start = fi_fragments_.begin() + i;
auto bucket_end = (i + bucketsize_) > fi_fragments_.size() ? fi_fragments_.end() : bucket_start + bucketsize_;
//TODO: is this thread safe????
sort(bucket_start, bucket_end, [](const Fragment& a, const Fragment& b) {
return a.peptide_idx_ < b.peptide_idx_; // we don´t need a tie, because the idx are unique
});
}
OPENMS_LOG_INFO << "Sorting by bucket min m/z:" << bucketsize_ << endl;
//Resort in case the parallelization block above messed something up TODO: check if this can happen
std::sort( bucket_min_mz_.begin(), bucket_min_mz_.end());
is_build_ = true;
OPENMS_LOG_INFO << "Fragment index built!" << endl;
}
std::pair<size_t, size_t > FragmentIndex::getPeptidesInPrecursorRange(float precursor_mass,
const std::pair<float, float>& window)
{
float prec_tol = precursor_mz_tolerance_unit_ppm_ ? Math::ppmToMass(precursor_mz_tolerance_, precursor_mass) : precursor_mz_tolerance_ ;
auto left_it = std::lower_bound(fi_peptides_.begin(), fi_peptides_.end(), precursor_mass - prec_tol + window.first, [](const Peptide& a, float b) { return a.precursor_mz_ < b;});
auto right_it = std::upper_bound(fi_peptides_.begin(), fi_peptides_.end(), precursor_mass + prec_tol + window.second, [](float b, const Peptide& a) { return b < a.precursor_mz_;});
return make_pair(std::distance(fi_peptides_.begin(), left_it), std::distance(fi_peptides_.begin(), right_it));
}
vector<FragmentIndex::Hit> FragmentIndex::query(const OpenMS::Peak1D& peak,
const pair<size_t, size_t>& peptide_idx_range,
uint16_t peak_charge)
{
float adjusted_mass = peak.getMZ() * (float)peak_charge -((peak_charge-1) * Constants::PROTON_MASS_U);
float frag_tol = fragment_mz_tolerance_unit_ppm_ ? Math::ppmToMass(fragment_mz_tolerance_, adjusted_mass) : fragment_mz_tolerance_;
auto left_it = std::lower_bound(bucket_min_mz_.begin(), bucket_min_mz_.end(), adjusted_mass - frag_tol);
auto right_it = std::upper_bound(bucket_min_mz_.begin(), bucket_min_mz_.end(), adjusted_mass + frag_tol);
if (left_it != bucket_min_mz_.begin()) --left_it;
auto in_range_buckets = make_pair(std::distance(bucket_min_mz_.begin(), left_it), std::distance(bucket_min_mz_.begin(), right_it));
vector<FragmentIndex::Hit> hits;
hits.reserve(peptide_idx_range.second - peptide_idx_range.first);
for (UInt32 j = in_range_buckets.first; j < in_range_buckets.second; j++)
{
auto slice_begin = fi_fragments_.begin() + (j*bucketsize_);
auto slice_end = ((j+1) * bucketsize_) >= fi_fragments_.size() ? fi_fragments_.end() : (fi_fragments_.begin() + ((j+1) * bucketsize_)) ;
auto left_iter = std::lower_bound(slice_begin, slice_end, peptide_idx_range.first, [](Fragment a, UInt32 b) { return a.peptide_idx_ < b;} );
while (left_iter != slice_end) // sequential scan
{
if(left_iter->peptide_idx_ > peptide_idx_range.second) break;
if ((adjusted_mass >= left_iter->fragment_mz_ - frag_tol ) && adjusted_mass <= (left_iter->fragment_mz_+ frag_tol))
{
hits.emplace_back(left_iter->peptide_idx_, left_iter->fragment_mz_);
#ifdef DEBUG_FRAGMENT_INDEX
if (left_iter->peptide_idx_ < peptide_idx_range.first || left_iter->peptide_idx_ > peptide_idx_range.second)
OPENMS_LOG_WARN << "idx out of range" << endl;
#endif
}
++left_iter;
}
}
return hits;
}
void FragmentIndex::queryPeaks(SpectrumMatchesTopN& candidates, const MSSpectrum& spectrum,
const std::pair<size_t, size_t>& candidates_range,
const int16_t isotope_error,
const uint16_t precursor_charge)
{
for (const Peak1D& peak : spectrum)
{
vector<Hit> query_hits;
uint16_t actual_max = std::min(precursor_charge, max_fragment_charge_);
for (uint16_t fragment_charge = 1; fragment_charge <= actual_max; fragment_charge++)
{
query_hits = query(peak, candidates_range, fragment_charge);
for (const auto& hit : query_hits)
{
{
size_t idx = hit.peptide_idx - candidates_range.first;
auto& source = candidates.hits_[idx];
if (source.num_matched_ == 0)
{
source.precursor_charge_ = precursor_charge;
source.peptide_idx_ = hit.peptide_idx;
source.isotope_error_ = isotope_error;
}
++source.num_matched_;
}
}
}
}
}
void FragmentIndex::trimHits(OpenMS::FragmentIndex::SpectrumMatchesTopN& init_hits) const
{
if (init_hits.hits_.size() > max_processed_hits_)
{
std::partial_sort(init_hits.hits_.begin(), init_hits.hits_.begin() + max_processed_hits_, init_hits.hits_.end(), [](const SpectrumMatch& a,const SpectrumMatch& b){
if (a.num_matched_ != b.num_matched_)
{
return a.num_matched_ > b.num_matched_;
}
else
{
// Prefer isotope_error close to 0: abs(isotope_error), then isotope_error, then precursor_charge
const auto abs_iso_a = a.isotope_error_ < 0 ? -a.isotope_error_ : a.isotope_error_;
const auto abs_iso_b = b.isotope_error_ < 0 ? -b.isotope_error_ : b.isotope_error_;
if (abs_iso_a != abs_iso_b) return abs_iso_a < abs_iso_b;
if (a.isotope_error_ != b.isotope_error_) return a.isotope_error_ < b.isotope_error_;
return a.precursor_charge_ < b.precursor_charge_;
}
});
init_hits.hits_.resize(max_processed_hits_);
}
else
{
std::sort(init_hits.hits_.begin(), init_hits.hits_.end(), [](const SpectrumMatch& a, const SpectrumMatch& b) {
if (a.num_matched_ != b.num_matched_)
{
return a.num_matched_ > b.num_matched_;
}
else
{
// Prefer isotope_error close to 0: abs(isotope_error), then isotope_error, then precursor_charge
const auto abs_iso_a = a.isotope_error_ < 0 ? -a.isotope_error_ : a.isotope_error_;
const auto abs_iso_b = b.isotope_error_ < 0 ? -b.isotope_error_ : b.isotope_error_;
if (abs_iso_a != abs_iso_b) return abs_iso_a < abs_iso_b;
if (a.isotope_error_ != b.isotope_error_) return a.isotope_error_ < b.isotope_error_;
return a.precursor_charge_ < b.precursor_charge_;
}
});
}
if (init_hits.hits_.size() > 0 )
{
if (init_hits.hits_[0].num_matched_ < min_matched_peaks_)
init_hits.hits_.resize(0);
}
for (auto hit_iter = init_hits.hits_.rbegin(); hit_iter != init_hits.hits_.rend(); ++hit_iter)
{
if (hit_iter->num_matched_ >= min_matched_peaks_) // search for the first element that should be included
{
init_hits.hits_.resize(init_hits.hits_.size() - (distance(init_hits.hits_.rbegin(), hit_iter)));
break;
}
}
/* alternative code
* auto it_zero = std::lower_bound(init_hits.hits_.begin(), init_hits.hits_.end(), min_matched_peaks_ , [](const SpectrumMatch& sm, uint32_t b){
return sm.num_matched_ > b;
});
if (it_zero != init_hits.hits_.end() && it_zero->num_matched_ == 0)
{
init_hits.hits_.erase(it_zero, init_hits.hits_.end());
}
* */
}
void FragmentIndex::searchDifferentPrecursorRanges(const MSSpectrum& spectrum,
float precursor_mass,
SpectrumMatchesTopN& sms,
uint16_t charge)
{
int16_t min_isotope_error_applied;
int16_t max_isotope_error_applied;
float precursor_window_upper_applied;
float precursor_window_lower_applied;
if (isOpenSearchMode_())
{
min_isotope_error_applied = 0;
max_isotope_error_applied = 0;
precursor_window_upper_applied = open_precursor_window_upper_;
precursor_window_lower_applied = open_precursor_window_lower_;
}
else
{
min_isotope_error_applied = min_isotope_error_;
max_isotope_error_applied = max_isotope_error_;
precursor_window_upper_applied = 0;
precursor_window_lower_applied = 0;
}
for (int16_t isotope_error = min_isotope_error_applied; isotope_error <= max_isotope_error_applied; isotope_error++)
{
SpectrumMatchesTopN candidates_iso_error;
float precursor_mass_isotope_error = precursor_mass + ((float)isotope_error * (float)Constants::C13C12_MASSDIFF_U);
auto candidates_range = getPeptidesInPrecursorRange(precursor_mass_isotope_error, {precursor_window_lower_applied, precursor_window_upper_applied}); // for the simple search we do not apply any modification window!!
candidates_iso_error.hits_.resize(candidates_range.second - candidates_range.first + 1);
queryPeaks(candidates_iso_error, spectrum, candidates_range, isotope_error, charge);
// take only top 50 hits
//trimHits(candidates_iso_error);
sms += candidates_iso_error;
}
//trimHits(sms);
}
void FragmentIndex::querySpectrum(const OpenMS::MSSpectrum& spectrum,
OpenMS::FragmentIndex::SpectrumMatchesTopN& sms)
{
if (!isBuild())
{
OPENMS_LOG_WARN << "FragmentIndex not yet build \n";
return;
}
if (spectrum.empty() || (spectrum.getMSLevel() != 2))
{
return;
}
const auto& precursor = spectrum.getPrecursors();
if (precursor.size() != 1)
{
OPENMS_LOG_WARN << "Number of precursors is not equal 1 \n";
return;
}
// two posible modes. Precursor has a charge or we test all possible charges
vector<size_t> charges;
//cout << "precursor charge = " << precursor[0].getCharge() << endl;
if (precursor[0].getCharge())
{
//cout << "precursor charge found" << endl;
charges.push_back(precursor[0].getCharge());
}
else
{
for (uint16_t i = min_precursor_charge_; i <= max_precursor_charge_; i++)
{
charges.push_back(i);
}
}
// loop over all PRECURSOR-charges
for (uint16_t charge : charges)
{
SpectrumMatchesTopN candidates_charge;
float mz;
mz = (float)precursor[0].getMZ() * charge - ((charge-1) * Constants::PROTON_MASS_U);
searchDifferentPrecursorRanges(spectrum, mz, candidates_charge, charge);
sms += candidates_charge;
}
trimHits(sms);
}
FragmentIndex::FragmentIndex() : DefaultParamHandler("FragmentIndex")
{
defaults_.setValue("ions:add_y_ions", "true", "Add peaks of y-ions to the spectrum");
defaults_.setValidStrings("ions:add_y_ions", {"true","false"});
defaults_.setValue("ions:add_b_ions", "true", "Add peaks of b-ions to the spectrum");
defaults_.setValidStrings("ions:add_b_ions", {"true","false"});
defaults_.setValue("ions:add_a_ions", "false", "Add peaks of a-ions to the spectrum");
defaults_.setValidStrings("ions:add_a_ions", {"true","false"});
defaults_.setValue("ions:add_c_ions", "false", "Add peaks of c-ions to the spectrum");
defaults_.setValidStrings("ions:add_c_ions", {"true","false"});
defaults_.setValue("ions:add_x_ions", "false", "Add peaks of x-ions to the spectrum");
defaults_.setValidStrings("ions:add_x_ions", {"true","false"});
defaults_.setValue("ions:add_z_ions", "false", "Add peaks of z-ions to the spectrum");
defaults_.setValidStrings("ions:add_z_ions", {"true","false"});
defaults_.setSectionDescription("ions", "Theoretical ion series toggles");
defaults_.setValue("precursor:mass_tolerance", 10.0, "Tolerance for precursor-m/z in search");
std::vector<std::string> precursor_mass_tolerance_unit_valid_strings;
precursor_mass_tolerance_unit_valid_strings.emplace_back("ppm");
precursor_mass_tolerance_unit_valid_strings.emplace_back("Da");
defaults_.setValue("precursor:mass_tolerance_unit", "ppm", "Unit of precursor mass tolerance.");
defaults_.setValidStrings("precursor:mass_tolerance_unit", precursor_mass_tolerance_unit_valid_strings);
defaults_.setValue("fragment:mass_tolerance", 10.0, "Fragment mass tolerance");
std::vector<std::string> fragment_mass_tolerance_unit_valid_strings;
fragment_mass_tolerance_unit_valid_strings.emplace_back("ppm");
fragment_mass_tolerance_unit_valid_strings.emplace_back("Da");
defaults_.setValue("fragment:mass_tolerance_unit", "ppm", "Unit of fragment m");
defaults_.setValidStrings("fragment:mass_tolerance_unit", fragment_mass_tolerance_unit_valid_strings);
defaults_.setValue("precursor:min_charge", 2, "min precursor charge");
defaults_.setValue("precursor:max_charge", 5, "max precursor charge");
defaults_.setValue("fragment:min_mz", 150, "Minimal fragment mz for database");
defaults_.setValue("fragment:max_mz", 2000, "Maximal fragment mz for database");
vector<String> all_mods;
ModificationsDB::getInstance()->getAllSearchModifications(all_mods);
defaults_.setValue("modifications:fixed", std::vector<std::string>{"Carbamidomethyl (C)"}, "Fixed modifications, specified using UniMod (www.unimod.org) terms, e.g. 'Carbamidomethyl (C)'");
defaults_.setValidStrings("modifications:fixed", ListUtils::create<std::string>(all_mods));
defaults_.setValue("modifications:variable", std::vector<std::string>{"Oxidation (M)"}, "Variable modifications, specified using UniMod (www.unimod.org) terms, e.g. 'Oxidation (M)'");
defaults_.setValidStrings("modifications:variable", ListUtils::create<std::string>(all_mods));
defaults_.setValue("modifications:variable_max_per_peptide", 2, "Maximum number of residues carrying a variable modification per candidate peptide");
vector<String> all_enzymes;
ProteaseDB::getInstance()->getAllNames(all_enzymes);
defaults_.setValue("enzyme", "Trypsin", "Enzyme for digestion");
defaults_.setValidStrings("enzyme", ListUtils::create<std::string>(all_enzymes));
defaults_.setValue("peptide:missed_cleavages", 1, "Missed cleavages for digestion");
defaults_.setValue("peptide:min_size", 7, "Minimal peptide length for database");
defaults_.setValue("peptide:max_size", 40, "Maximal peptide length for database");
defaults_.setValue("peptide:min_mass", 100, "Minimal peptide mass for database");
defaults_.setValue("peptide:max_mass", 9000, "Maximal peptide mass for database"); //Todo: set unlimited option
is_build_ = false; // TODO: remove this and build on construction
//Search-related params
defaults_.setValue("fragment:min_matched_ions", 5, "Minimal number of matched ions to report a PSM");
defaults_.setValue("precursor:isotope_error_min", -1, "Minimum allowed precursor isotope error");
defaults_.setValue("precursor:isotope_error_max", 1, "Maximum allowed precursor isotope error");
defaults_.setValue("fragment:max_charge", 2, "max fragment charge");
defaults_.setValue("scoring:max_candidates_per_spectrum", 50, "The number of initial hits for which we calculate a score");
defaults_.setSectionDescription("scoring", "Search/Scoring Limits");
// Open search window bounds (used when tolerance > 1 Da or > 1000 ppm)
defaults_.setValue("precursor:open_window_lower", -100.0, "lower bound of the open precursor window");
defaults_.setValue("precursor:open_window_upper", 200.0, "upper bound of the open precursor window");
//defaults from the searchEngine that are not needed for this class, but otherwise we would generate a warning
defaults_.setValue("decoys", "false", "Should decoys be generated?");
defaults_.setValidStrings("decoys", {"true","false"} );
defaults_.setValue("annotate:PSM", std::vector<std::string>{"ALL"}, "Annotations added to each PSM.");
defaults_.setValidStrings("annotate:PSM",
std::vector<std::string>{
"ALL",
Constants::UserParam::FRAGMENT_ERROR_MEDIAN_PPM_USERPARAM,
Constants::UserParam::PRECURSOR_ERROR_PPM_USERPARAM,
Constants::UserParam::MATCHED_PREFIX_IONS_FRACTION,
Constants::UserParam::MATCHED_SUFFIX_IONS_FRACTION}
);
defaults_.setValue("report:top_hits", 1, "Maximum number of top scoring hits per spectrum that are reported.");
defaults_.setSectionDescription("report", "Reporting Options");
defaults_.setValue("peptide:motif", "", "If set, only peptides that contain this motif (provided as RegEx) will be considered.");
defaults_.setSectionDescription("peptide", "Peptide Options");
IntList isotopes = {0, 1};
defaults_.setValue("precursor:isotopes", isotopes, "Corrects for mono-isotopic peak misassignments. (E.g.: 1 = prec. may be misassigned to first isotopic peak)");
defaultsToParam_();
}
void FragmentIndex::updateMembers_()
{
add_b_ions_ = param_.getValue("ions:add_b_ions").toBool();
add_y_ions_ = param_.getValue("ions:add_y_ions").toBool();
add_a_ions_ = param_.getValue("ions:add_a_ions").toBool();
add_c_ions_ = param_.getValue("ions:add_c_ions").toBool();
add_x_ions_ = param_.getValue("ions:add_x_ions").toBool();
add_z_ions_ = param_.getValue("ions:add_z_ions").toBool();
digestion_enzyme_ = param_.getValue("enzyme").toString();
missed_cleavages_ = param_.getValue("peptide:missed_cleavages");
peptide_min_mass_ = param_.getValue("peptide:min_mass");
peptide_max_mass_ = param_.getValue("peptide:max_mass");
peptide_min_length_ = param_.getValue("peptide:min_size");
peptide_max_length_ = param_.getValue("peptide:max_size");
fragment_min_mz_ = param_.getValue("fragment:min_mz");
fragment_max_mz_ = param_.getValue("fragment:max_mz");
precursor_mz_tolerance_ = param_.getValue("precursor:mass_tolerance");
fragment_mz_tolerance_ = param_.getValue("fragment:mass_tolerance");
precursor_mz_tolerance_unit_ppm_ = param_.getValue("precursor:mass_tolerance_unit").toString() == "ppm";
fragment_mz_tolerance_unit_ppm_ = param_.getValue("fragment:mass_tolerance_unit").toString() == "ppm";
modifications_fixed_ = ListUtils::toStringList<std::string>(param_.getValue("modifications:fixed"));
modifications_variable_ = ListUtils::toStringList<std::string>(param_.getValue("modifications:variable"));
max_variable_mods_per_peptide_ = param_.getValue("modifications:variable_max_per_peptide");
min_matched_peaks_ = param_.getValue("fragment:min_matched_ions");
min_isotope_error_ = param_.getValue("precursor:isotope_error_min");
max_isotope_error_ = param_.getValue("precursor:isotope_error_max");
min_precursor_charge_ = param_.getValue("precursor:min_charge");
max_precursor_charge_ = param_.getValue("precursor:max_charge");
max_fragment_charge_ = param_.getValue("fragment:max_charge");
max_processed_hits_ = param_.getValue("scoring:max_candidates_per_spectrum");
// Open search mode is automatically determined in isOpenSearchMode_()
if (isOpenSearchMode_())
{
OPENMS_LOG_INFO << "[FragmentIndex] Open-search mode enabled because precursor mass tolerance ("
<< precursor_mz_tolerance_ << " "
<< (precursor_mz_tolerance_unit_ppm_ ? "ppm" : "Da")
<< ") exceeds threshold (1000 ppm or 1 Da)." << std::endl;
}
open_precursor_window_lower_ = param_.getValue("precursor:open_window_lower");
open_precursor_window_upper_ = param_.getValue("precursor:open_window_upper");
}
bool FragmentIndex::isBuild() const
{
return is_build_;
}
const vector<FragmentIndex::Peptide>& FragmentIndex::getPeptides() const
{
return fi_peptides_;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/AhoCorasickAmbiguous.cpp | .cpp | 22,267 | 659 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/AhoCorasickAmbiguous.h>
#include <OpenMS/CONCEPT/Exception.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <algorithm>
#include <cassert>
#include <queue>
#include <tuple>
namespace OpenMS
{
/**
@brief given an ambAA or the 'superAA' @p aaa, return a range of AA's which need to be explored.
*/
inline constexpr std::tuple<AA,AA> _ambiguitiesOf(const AA aaa)
{
static_assert(++AA('D') == AA('N')); // must be neighbors
static_assert(++AA('E') == AA('Q')); // must be neighbors
static_assert(++AA('I') == AA('L')); // must be neighbors
// row of jump table must be B,J,Z,X,$:
static_assert(++AA('B') == AA('J')); // make sure the table is ordered as we expect
static_assert(++AA('J') == AA('Z')); // make sure the table is ordered as we expect
static_assert(++AA('Z') == AA('X')); // make sure the table is ordered as we expect
static_assert(++AA('X') == AA('$')); // make sure the table is ordered as we expect
// jump table: start of scouts
// end of scouts (including)
constexpr const AA jump[5][2] = {{AA('D'), AA('N')}, // B = D,N
{AA('I'), AA('L')}, // J = I,L
{AA('E'), AA('Q')}, // Z = E,Q
{AA('A'), AA('V')}, // X = A..V
{AA('A'), AA('X')}}; // $ = A..X
// which line of jump table do we need?
const auto line = (aaa - AA('B'))();
assert(aaa.isAmbiguous());
return {jump[line][0], jump[line][1]};
}
ACTrie::ACTrie(uint32_t max_aaa, uint32_t max_mm) : max_aaa_(max_aaa), max_mm_(max_mm)
{ // create root node:
trie_.emplace_back();
}
ACTrie::~ACTrie() = default;
void ACTrie::addNeedle(const std::string& needle)
{
Index cn {0}; // start at root
for (auto c : needle) // OMS_CODING_TEST_EXCLUDE
{
AA aa(c);
// make sure invalid chars raise an exception
if (aa.isValidForPeptide())
{
cn = add_(cn, aa);
}
else
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, std::string("Invalid amino acid"), std::string(1, c));
}
}
// add hit to last node
trie_[cn()].depth_and_hits.has_hit = 1;
// remember a needle ends here
if (vec_index2needles_.size() <= cn()) // make sure there is enough space
{ // increase to next power of 2
vec_index2needles_.resize(std::bit_ceil(cn() + 1)); // +1 since bit_ceil(x)^2 == x iff x is a power of 2 (i.e. no resize takes place)
}
vec_index2needles_[cn()].push_back(needle_count_);
++needle_count_;
}
void ACTrie::addNeedles(const std::vector<std::string>& needles)
{
for (const auto& s : needles)
{
addNeedle(s);
}
}
void ACTrie::addNeedlesAndCompress(const std::vector<std::string>& needles)
{
for (const auto& s : needles)
{
addNeedle(s);
}
compressTrie();
}
void ACTrie::compressTrie()
{
// final BFS tree we want to create
std::vector<ACNode> bfs_tree;
bfs_tree.reserve(trie_.size());
// translate old naive node index to new node index in BFS
decltype(vec_index2needles_) bfs_index2_needles;
bfs_index2_needles.resize(trie_.size());
// points to the parent node for each node in the final BFS tree.
// (needed for suffix construction)
std::vector<Index> tmp_parents;
tmp_parents.reserve(trie_.size());
// contains nodes in breadth first search order
std::queue<Index> bfs_q;
// lambda for each pop operation on the queue
auto bfs_op = [&bfs_q, &bfs_tree, &bfs_index2_needles, &tmp_parents, this](Index current_index)
{
auto bfs_index = bfs_tree.size();
// add current node to new trie
bfs_tree.push_back(trie_[current_index()]);
auto& bfs_node = bfs_tree.back();
if (current_index() < vec_index2children_naive_.size() && vec_index2children_naive_[current_index()].size() != 0)
{
auto& children = vec_index2children_naive_[current_index()];
bfs_node.nr_children = ACNode::ChildCountType(children.size());
std::sort(children.begin(), children.end(), // sort children by edge label (so they have the same order as in the bitset)
[&](const Index& a, const Index& b) { return trie_[a()].edge() < trie_[b()].edge(); });
for (const auto child : children)
{
bfs_node.children_bitset.set(trie_[child()].edge()); // set child exists
bfs_q.push(child);
tmp_parents.emplace_back(Index::T(bfs_index)); // the parent will be added at index = tmp_tree.size()
}
}
bfs_index2_needles[bfs_index] = std::move(vec_index2needles_[current_index()]);
};
// create root manually
tmp_parents.emplace_back(0); // root has no parents (points to itself)
bfs_op(0); // adds parents to 'tmp_parents' and children to 'BFS'
ACNode& root = bfs_tree.back();
root.first_child = 1; // we know the first child will start at index 1 (since root is index 0)
while (!bfs_q.empty())
{
auto& last_node = bfs_tree.back(); // previous node in final tree
Index current_index = bfs_q.front();
bfs_q.pop();
bfs_op(current_index);
// update where to find the children of the current node
// --> its where the children of the previous node end
bfs_tree.back().first_child = last_node.first_child() + last_node.nr_children;
}
// switch to BFS trie
trie_ = std::move(bfs_tree);
vec_index2needles_.swap(bfs_index2_needles);
/////////////////////////////////////////////////////////////
// compute suffix links (could also be done while creating the trie, but it would make the code more complex)
// .. and hit flag
trie_[0].suffix = 0; // must point to itself
/**
auto printTrie = [&]() {
int prev_depth = 0;
for (size_t i = 0; i < trie_.size(); ++i)
{
if (prev_depth != (int)trie_[i].depth_and_hits.depth)
{
prev_depth = (int)trie_[i].depth_and_hits.depth;
std::cout << " ***\n";
}
std::cout << "node " << i << " (edge " << trie_[i].edge.toChar() << ", depth " << (int)trie_[i].depth_and_hits.depth << ", hit "
<< (int)trie_[i].depth_and_hits.has_hit << ", suffix " << trie_[i].suffix() << ", first child " << (int)trie_[i].first_child()
<< '\n';
}
};
*/
/// first, build suffix links as usual (without path compression)
// start at depth = 2, since setting suffix links and has_hit for depth 1 is not needed (lvl 1 already points to root)
for (size_t i = 1 + (size_t)trie_[0].nr_children; i < trie_.size(); ++i)
{
Index parent = tmp_parents[i];
trie_[i].suffix = follow_(trie_[parent()].suffix(), trie_[i].edge);
trie_[i].depth_and_hits.has_hit |= trie_[trie_[i].suffix()].depth_and_hits.has_hit;
}
// second pass over trie, do path compression of suffix links where possible
// start at depth = 2, since setting suffix links and has_hit for depth 1 is not needed (lvl 1 already points to root)
size_t path_compression_count = 0;
for (size_t i = 1 + (size_t)trie_[0].nr_children; i < trie_.size(); ++i)
{
const bool suffix_is_hit = trie_[trie_[i].suffix()].depth_and_hits.has_hit;
if (suffix_is_hit) continue; // don't touch suffices which have hits (since we do not have separate output links)
const auto children_node = trie_[i].children_bitset.bits;
const auto childen_suffix = trie_[trie_[i].suffix()].children_bitset.bits;
if ((children_node | childen_suffix) == children_node) // suffix' node has no extra children compared to this node `i`
{
// point to suffix's suffix (path compression)
trie_[i].suffix = trie_[trie_[i].suffix()].suffix;
path_compression_count++;
}
}
OPENMS_LOG_INFO << "ACTrie::compressTrie(): created BFS trie with " << trie_.size() << " nodes (path compression skipped " << path_compression_count
<< " suffix nodes)\n";
//printTrie();
vec_index2children_naive_.clear(); // not needed anymore
}
size_t ACTrie::getNeedleCount() const
{
return needle_count_;
}
void ACTrie::setMaxAAACount(const uint32_t max_aaa)
{
max_aaa_ = max_aaa;
}
uint32_t ACTrie::getMaxAAACount() const
{
return max_aaa_;
}
void ACTrie::setMaxMMCount(const uint32_t max_mm)
{
max_mm_ = max_mm;
}
uint32_t ACTrie::getMaxMMCount() const
{
return max_mm_;
}
bool ACTrie::nextHits(ACTrieState& state) const
{
state.hits.clear();
assert(vec_index2children_naive_.empty()); // make sure compressTrie was called
nextHitsNoClear_(state);
return !state.hits.empty();
}
void ACTrie::getAllHits(ACTrieState& state) const
{
state.hits.clear();
assert(vec_index2children_naive_.empty()); // make sure compressTrie was called
while (nextHitsNoClear_(state)) {};
}
bool ACTrie::nextHitsNoClear_(ACTrieState& state) const
{
std::vector<Hit>& hits = state.hits;
for (AA aa = state.nextValidAA(); aa.isValid(); aa = state.nextValidAA())
{
state.tree_pos = stepPrimary_(state.tree_pos, aa, state);
// deal with scouts in queue: doing it now (instead of after the primary ends) benefits from hot caches
// and a lot less memory (since only hits from current scouts are found)
while (!state.scouts.empty())
{
ACScout& sp = state.scouts.front();
// let scout traverse the tree until it dies. This might add new scouts to the queue.
while (stepScout_(sp, state));
state.scouts.pop();
}
if (addHits_(state.tree_pos, state.textPos(), hits))
{
return true;
};
}
return false;
}
Index ACTrie::add_(const Index index, const AA label)
{
if (vec_index2children_naive_.size() <= index())
{ // double...
vec_index2children_naive_.resize(vec_index2children_naive_.size() * 2);
}
Index ch = findChildNaive_(index, label);
if (ch.isInvalid())
{
// remember index of new node we are about to create
ch.pos() = Index::T(trie_.size());
// create new node with label and depth
trie_.emplace_back(label, trie_[index()].depth_and_hits.depth + 1);
// add child to parent
vec_index2children_naive_[index()].push_back(ch);
}
return ch;
}
bool ACTrie::addHits_(Index i, const size_t text_pos, std::vector<Hit>& hits) const
{
size_t hits_before = hits.size();
// hits from current node; return true if going upstream has more hits..
auto collect = [&]() {
if (trie_[i()].depth_and_hits.has_hit)
{
const auto needle_length = trie_[i()].depth_and_hits.depth;
const auto text_start = text_pos - needle_length;
for (const auto needle_idx : vec_index2needles_.at(i()))
{
hits.emplace_back(needle_idx, needle_length, Hit::T(text_start));
}
return true;
}
return false;
};
// follow chain of suffix nodes until a node does not have hits anymore
while (collect())
{
i = trie_[i()].suffix;
}
return hits_before != hits.size();
}
bool ACTrie::addHitsScout_(Index i, const ACScout& scout, const size_t text_pos, std::vector<Hit>& hits, const int current_scout_depths) const
{
size_t hits_before = hits.size();
// hits from current node; return true if going upstream has more hits..
auto collect = [&]() {
if (trie_[i()].depth_and_hits.has_hit)
{
const auto needle_length = trie_[i()].depth_and_hits.depth;
const auto text_start = text_pos - needle_length;
// we want the first AAA of the scout to be part of the hit; otherwise that hit will be reported by shorter sub-scouts or the Primary
if (current_scout_depths - needle_length >= scout.max_prefix_loss_leftover)
{
return false;
}
for (const auto needle_idx : vec_index2needles_.at(i()))
{
hits.emplace_back(needle_idx, needle_length, Hit::T(text_start));
}
return true;
}
return false;
};
// follow chain of suffix nodes until a node does not have hits anymore
while (collect())
{
i = trie_[i()].suffix;
}
return hits_before != hits.size();
}
Index ACTrie::follow_(const Index i, const AA aa) const
{
Index ch = findChildBFS_(i, aa);
// has direct child (could also be an ambiguous AA - we don't care as long as a needle did contain that character)
if (ch.isValid())
{
return ch;
}
// no direct child; are we at root?
if (i.pos() == 0)
{
return 0;
}
// follow from the suffix...
Index suf = trie_[i.pos()].suffix;
assert(suf.isValid());
return follow_(suf, aa);
}
bool ACTrie::followScout_(ACScout& scout, const AA edge, ACTrieState& state) const
{
// let scout follow the original edge
Index j = follow_(scout.tree_pos, edge);
const int new_depth = int(trie_[j()].depth_and_hits.depth);
// did we loose a prefix? old-depth new depth
const int up_count = int(trie_[scout.tree_pos()].depth_and_hits.depth) - new_depth + 1;
if (up_count >= scout.max_prefix_loss_leftover)
{ // scout is dead because it lost its AAA/MM
return false;
}
// update the prefix length
scout.max_prefix_loss_leftover -= up_count;
scout.tree_pos = j;
addHitsScout_(j, scout, scout.textPos(state), state.hits, new_depth);
return true;
}
Index ACTrie::stepPrimary_(const Index i, const AA edge, ACTrieState& state) const
{
const bool consider_ambAA = max_aaa_ != 0;
const bool consider_MM = max_mm_ != 0;
// AAA
if (edge.isAmbiguous())
{ // create AAA scouts?
AA from(edge), to(edge);
if (consider_ambAA)
{ // first try AAA's (since edge is AAA)
std::tie(from, to) = _ambiguitiesOf(edge); // e.g. [D,N] for B; i.e. create two scouts; Primary will follow 'B' (if exists in pattern; if not, it will end up in root)
createScouts_(i, from, to, state, max_aaa_ - 1, max_mm_);
}
// test all other AA's for mismatch
if (consider_MM)
{
createMMScouts_(i, from, to, edge, state, max_aaa_, max_mm_ - 1); // try a MM for all AA's other than [from...to]
}
}
// edge is unambiguous:
else if (consider_MM)
{ // try a MM for all AA's other than 'edge'
createMMScouts_(i, edge, edge, edge, state, max_aaa_, max_mm_ - 1);
}
// Primary continues with the AA, no matter what it was...
Index ch = findChildBFS_(i, edge);
// has direct child (could also be an ambiguous AA - we don't care as long as a needle did contain that character)
if (ch.isValid())
{
return ch;
}
// are we at root?
if (i() == 0)
{
return i;
}
// follow from the suffix...
Index suf = trie_[i()].suffix;
assert(suf.isValid());
return follow_(suf, edge);
}
bool ACTrie::stepScout_(ACScout& scout, ACTrieState& state) const
{
for (AA edge = scout.nextValidAA(); edge.isValid(); edge = scout.nextValidAA())
{
const bool consider_ambAA = scout.max_aaa_leftover > 0;
const bool consider_MM = scout.max_mm_leftover > 0;
// AAA
if (edge.isAmbiguous())
{ // create scouts from this scout?
AA from(edge), to(edge);
if (consider_ambAA)
{ // first try AAA's (since edge is AAA)
std::tie(from, to) = _ambiguitiesOf(edge);
ACScout sp_temp = scout;
--sp_temp.max_aaa_leftover;
createSubScouts_(sp_temp, from, to, state);
}
// test all other superAA's for mismatch (except for AAA range, and the original edge itself)
if (consider_MM)
{
ACScout sp_temp = scout;
--sp_temp.max_mm_leftover;
createMMSubScouts_(sp_temp, from, to, edge, state);
}
}
else if (consider_MM) // edge is unambiguous
{ // try a MM for all superAA's other than 'edge'
ACScout sp_temp = scout;
--sp_temp.max_mm_leftover;
createMMSubScouts_(sp_temp, edge, edge, edge, state);
}
// process the scout itself
if (!followScout_(scout, edge, state)) return false;
}
return false; // end of query reached
}
void ACTrie::createMMScouts_(const Index i, const AA except_fromAA, const AA except_toAA, const AA except_edge, ACTrieState& state, const uint32_t aaa_left, const uint32_t mm_left) const
{
// create super-AA range, i.e. including the ambiguous AA's, since a peptide could contain an 'X', which we would like to match
auto [from, to] = _ambiguitiesOf(AA('$'));
for (AA mm_aa = from; mm_aa <= to; ++mm_aa)
{
if (mm_aa == except_fromAA)
{ // ignore this range
mm_aa = except_toAA;
continue;
}
// ignore edge from scout
if (mm_aa == except_edge)
{
continue;
}
createScouts_(i, mm_aa, mm_aa, state, aaa_left, mm_left);
}
}
void ACTrie::createMMSubScouts_(const ACScout& prototype, const AA except_fromAA, const AA except_toAA, const AA except_edge, ACTrieState& state) const
{
// create super-AA range, i.e. including the ambiguous AA's, since a peptide could contain an 'X', which we would like to match
auto [from, to] = _ambiguitiesOf(AA('$'));
for (AA mm_aa = from; mm_aa <= to; ++mm_aa)
{
if (mm_aa == except_fromAA)
{ // ignore this range
mm_aa = except_toAA;
continue;
}
// ignore edge from scout
if (mm_aa == except_edge)
{
continue;
}
createSubScouts_(prototype, mm_aa, mm_aa, state);
}
}
void ACTrie::createScouts_(const Index i, const AA fromAA, const AA toAA, ACTrieState& state, const uint32_t aaa_left, const uint32_t mm_left) const
{
for (AA aa = fromAA; aa <= toAA; ++aa)
{
Index scout_pos = follow_(i, aa); // call this using naive follow_(), which matches the exact char
if (scout_pos() > 0) // not at root
{
const uint8_t depth = trie_[scout_pos()].depth_and_hits.depth;
auto new_scout = state.scouts.emplace(state.textPosIt(), // the master already points to the next AA, so scout can start there
scout_pos,
aaa_left,
mm_left,
depth);
// we might have found a hit already: report it
addHits_(scout_pos, new_scout.textPos(state), state.hits);
}
}
}
void ACTrie::createSubScouts_(const ACScout& prototype, const AA fromAA, const AA toAA, ACTrieState& state) const
{
for (AA aa = fromAA; aa <= toAA; ++aa)
{
ACScout s(prototype);
if (followScout_(s, aa, state))
{ // scout survived following the edge
state.scouts.push(std::move(s));
}
}
}
Index ACTrie::findChildNaive_(Index parent, AA child_label)
{
for (Index child : vec_index2children_naive_[parent()]) // only a 4byte type: copy it
{
if (trie_[child.pos()].edge == child_label)
return child;
}
return Index {};
}
/// Count how many bits are set in the bitset up to (not including) position i
int countSetBitsUpTo(Bitset bs, unsigned int i)
{
static_assert(sizeof(bs) == 4, "Bitset must be 32 bits wide");
if (0 == i)
{
return 0; // no bits to count
}
unsigned int high_bits_to_eliminate = 32 - i; // i should be <= 31 (we don't check for performance reasons)
bs <<= (high_bits_to_eliminate);
return bs.pop_count();
}
Index ACTrie::findChildBFS_(const Index parent, const AA child_label) const
{
// check if it exists
if (trie_[parent()].children_bitset.test(child_label()) == 0)
{
return Index{}; // return invalid index
}
// child exists:
// .. find its offset (children are ordered by edge label)
auto child_offset = countSetBitsUpTo(trie_[parent()].children_bitset, child_label());
return trie_[parent()].first_child() + child_offset;
}
void ACTrieState::setQuery(const std::string& haystack)
{
hits.clear();
query_ = haystack;
it_q_ = &query_[0];
tree_pos = 0;
while (!scouts.empty())
{
scouts.pop();
}
}
size_t ACTrieState::textPos() const
{
return std::distance(&query_[0], it_q_);
}
const char* ACTrieState::textPosIt() const
{
return it_q_;
}
/// The current query
const std::string& ACTrieState::getQuery() const
{
return query_;
}
AA ACTrieState::nextValidAA()
{
return OpenMS::nextValidAA(it_q_);
}
ACScout::ACScout(const char* query_pos, Index tree_pos, uint8_t max_aa, uint8_t max_mm, uint8_t max_prefix_loss) :
it_query(query_pos), tree_pos(tree_pos), max_aaa_leftover(max_aa), max_mm_leftover(max_mm), max_prefix_loss_leftover(max_prefix_loss)
{
}
size_t ACScout::textPos(const ACTrieState& state) const
{
return std::distance(&state.query_[0], it_query);
}
AA ACScout::nextValidAA()
{
return OpenMS::nextValidAA(it_query);
}
AA nextValidAA(const char*& it_q)
{
const char* it_q_local = it_q; // local copy; huge performance loss to work on it_q directly (due to reference)
AA res {'?'}; // invalid
while (*it_q_local != '\0')
{
res = AA(*it_q_local);
++it_q_local;
if (res.isValid())
{
break;
}
}
it_q = it_q_local; // update the reference to iterator
return res;
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/IDScoreGetterSetter.cpp | .cpp | 11,179 | 307 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Julianus Pfeuffer $
// $Authors: Julianus Pfeuffer $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/IDScoreGetterSetter.h>
#include <OpenMS/DATASTRUCTURES/StringView.h>
using namespace std;
namespace OpenMS
{
/**
* @ingroup getScoresFunctions
* @brief For protein groups. Groups are target if at least one protein is target
* Therefore it needs an unordered set of decoy accessions to evaluate that.
*/
void IDScoreGetterSetter::getScores_(
ScoreToTgtDecLabelPairs &scores_labels,
const std::vector<ProteinIdentification::ProteinGroup> &grps,
const std::unordered_set<string> &decoy_accs)
{
for (const auto &grp : grps)
{
double score = grp.probability;
bool target = false;
for (const auto &acc : grp.accessions)
{
// In groups, you usually want to check if at least one member is a real target
if (decoy_accs.find(acc) == decoy_accs.end())
{
target = true;
break;
}
}
scores_labels.emplace_back(score, target);
}
}
inline bool isFirstBetterScore_(double first, double second, bool isHigherBetter)
{
if (isHigherBetter) return first > second; else return first < second;
}
inline void addToPeptideScoreMap_(
std::unordered_map<String, ScoreToTgtDecLabelPair>& seq_to_score_labels,
const PeptideIdentification& id)
{
bool higher_better = id.isHigherScoreBetter();
if (id.getHits().empty())
{
return;
}
const auto& best_hit = id.getHits()[0];
double score = best_hit.getScore();
auto [it, found] = seq_to_score_labels.try_emplace(
best_hit.getSequence().toUnmodifiedString(),
score,
(best_hit.getMetaValue("target_decoy") != DataValue::EMPTY) &&
(best_hit.getMetaValue("target_decoy").toString().hasPrefix("target")));
if (found && isFirstBetterScore_(score, it->second.first, higher_better))
{
it->second.first = score;
}
}
void IDScoreGetterSetter::fillPeptideScoreMap_(
std::unordered_map<String, ScoreToTgtDecLabelPair>& seq_to_score_labels,
const PeptideIdentificationList& ids)
{
for (auto const & id : ids)
{
addToPeptideScoreMap_(seq_to_score_labels, id);
}
}
void IDScoreGetterSetter::fillPeptideScoreMap_(
std::unordered_map<String, ScoreToTgtDecLabelPair>& seq_to_score_labels,
ConsensusMap const& map,
bool include_unassigned = true)
{
map.applyFunctionOnPeptideIDs(
[&seq_to_score_labels](const PeptideIdentification& id){addToPeptideScoreMap_(seq_to_score_labels, id);},
include_unassigned);
}
/**
* @ingroup getScoresFunctions
* @brief For protein groups. Groups are target if at least one protein is target
* Decoy accessions are determined by the decoy substring.
* Uses the "picked" algorithm. As soon as there was one member which was picked as target over a decoy, the group is counted as target. Otherwise as decoy.
*/
void IDScoreGetterSetter::getPickedProteinGroupScores_(
const std::unordered_map<String, ScoreToTgtDecLabelPair>& picked_scores,
ScoreToTgtDecLabelPairs& scores_labels,
const vector<ProteinIdentification::ProteinGroup>& grps,
const String& decoy_string,
bool decoy_prefix)
{
for (const auto& grp : grps)
{
bool decoy_picked = false;
for (const auto& acc : grp.accessions)
{
auto [isDecoy, tgt_accession] = removeDecoyStringIfPresent_(acc, decoy_string, decoy_prefix);
const double tgt_proportion = picked_scores.at(tgt_accession).second;
// The problem is here, that in theory, the matching pair can be in different groups
// therefore a group cannot really be matched one-to-one. So we say:
// If at least one (single) target was picked over a decoy in the group, the group
// is a target. This could in theory mean, that two targets in different groups
// are counted as +1, while a group containing both decoy-partners is counted as
// a single 0
if (!isDecoy && tgt_proportion > 0.) // target was picked on single protein level
{
scores_labels.emplace_back(grp.probability, 1.0);
break;
}
else if (isDecoy && tgt_proportion == 0)
{
decoy_picked = true;
}
}
// if for none of the proteins the target version was picked, add as decoy
if (decoy_picked) scores_labels.emplace_back(grp.probability, 0.0);
// TODO I think we need an unordered_set to check which proteins were picked already
// and add skip groups where every protein was picked already.
}
}
pair<bool,String> IDScoreGetterSetter::removeDecoyStringIfPresent_(const String& acc, const String& decoy_string, bool decoy_prefix)
{
if (decoy_prefix && acc.hasPrefix(decoy_string))
{
return {true ,acc.suffix(acc.size() - decoy_string.size())};
}
else if (acc.hasSuffix(decoy_string))
{
return {true, acc.prefix(acc.size() - decoy_string.size())};
}
else
{
return {false, acc};
}
}
void IDScoreGetterSetter::getPickedProteinScores_(
std::unordered_map<String, ScoreToTgtDecLabelPair>& picked_scores,
const ProteinIdentification& id,
const String& decoy_string,
bool decoy_prefix)
{
for (const auto& hit : id.getHits())
{
checkTDAnnotation_(hit);
StringView tgt_accession(hit.getAccession());
bool target = getTDLabel_(hit);
if (!target)
{
if (decoy_prefix) //TODO double-check hasSuffix/Prefix? Ignore TD Metavalue?
{
tgt_accession = tgt_accession.substr(decoy_string.size(),-1);
}
else
{
tgt_accession = tgt_accession.substr(0,tgt_accession.size()-decoy_string.size());
}
}
auto[it, inserted] = picked_scores.try_emplace(tgt_accession.getString(), hit.getScore(), target);
if (!inserted)
{
if ((id.isHigherScoreBetter() && (hit.getScore() > it->second.first)) ||
(!id.isHigherScoreBetter() && (hit.getScore() < it->second.first)))
{
it->second = {hit.getScore(), target};
}
else if (hit.getScore() == it->second.first)
{
it->second = {hit.getScore(), true}; //prefer targets. Alternative: put 0.5
}
}
}
}
/*static void getPickedProteinScores_(
ScoreToTgtDecLabelPairs& scores_labels,
const std::vector<ProteinIdentification::ProteinGroup> &grps,
const std::unordered_set<std::string> &decoy_accs,
const String& decoy_string,
bool prefix)
{
//TODO potential algorithm: Create a winner set based on single protein scores
// Iff a group contains at least one winner, add the group with its group score to the
// vector (for input to group FDR).
// Otherwise I feel like groups would block/steal too many singles/small groups
// On the other hand, with aggregational inference groups and singles will have the same scores anyway
std::unordered_map<String, std::pair<double, double>> picked_scores;
for (const auto& grp : grps)
{
StringView tgt_accession(grp.accessions);
bool target = getTDLabel_(hit);
if (!target)
{
if (decoy_prefix)
{
tgt_accession = tgt_accession.substr(decoy_string.size(),-1);
}
else
{
tgt_accession = tgt_accession.substr(0,tgt_accession.size()-decoy_string.size());
}
}
auto[it, inserted] = picked_scores.try_emplace(tgt_accession.getString(), hit.getScore(), target);
if (!inserted)
{
if ((id.isHigherScoreBetter() && (hit.getScore() > it->second.first)) ||
(!id.isHigherScoreBetter() && (hit.getScore() < it->second.first)))
{
it->second = {hit.getScore(), target};
}
else if (hit.getScore() == it->second.first)
{
it->second = {hit.getScore(), true}; //prefer targets
}
}
}
scores_labels.reserve(picked_scores.size());
for(auto& kv : picked_scores)
{
scores_labels.emplace_back(std::move(kv.second));
}
}*/
/** @ingroup setScoresFunctions
* @brief For protein groups. Unaffected by keep_decoy_proteins. Always keeps all for now @todo.
* score_type and higher_better unused since ProteinGroups do not carry that information.
* You have to assume that groups will always have the same scores as the ProteinHits
*/
void IDScoreGetterSetter::setScores_(const map<double, double> &scores_to_FDR,
vector <ProteinIdentification::ProteinGroup> &grps,
const string & /*score_type*/,
bool /*higher_better*/)
{
for (auto &grp : grps)
{
grp.probability = (scores_to_FDR.lower_bound(grp.probability)->second);
}
}
void IDScoreGetterSetter::setPeptideScoresFromMap_(std::unordered_map<String, ScoreToTgtDecLabelPair> const& seq_to_fdr,
PeptideIdentificationList& ids,
std::string const& score_type,
bool keep_decoys)
{
for (auto& id : ids)
{
if (id.getHits().empty())
{
continue;
}
auto& best_hit = id.getHits()[0];
if (!keep_decoys && (best_hit.getMetaValue("target_decoy") == DataValue::EMPTY || best_hit.getMetaValue("target_decoy") == "decoy"))
{
id.setHits({});
continue;
}
const auto seq = best_hit.getSequence().toUnmodifiedString();
auto it = seq_to_fdr.find(seq);
const auto& old_score_type = id.getScoreType();
if (it != seq_to_fdr.end())
{
best_hit.setMetaValue(old_score_type, best_hit.getScore());
best_hit.setScore(it->second.first);
id.setScoreType(score_type);
}
else
{
OPENMS_LOG_ERROR << "Error: No FDR found for " + seq + "." << std::endl;
continue;
}
}
}
void IDScoreGetterSetter::setPeptideScoresFromMap_(std::unordered_map<String, ScoreToTgtDecLabelPair> const& seq_to_fdr,
ConsensusMap& map,
std::string const& score_type,
bool keep_decoys,
bool include_unassigned)
{
for (auto& f : map)
{
setPeptideScoresFromMap_(seq_to_fdr, f.getPeptideIdentifications(), score_type, keep_decoys);
}
if (include_unassigned)
{
setPeptideScoresFromMap_(seq_to_fdr, map.getUnassignedPeptideIdentifications(), score_type, keep_decoys);
}
}
} // namespace std
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/PScore.cpp | .cpp | 7,376 | 234 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Timo Sachsenberg $
// --------------------------------------------------------------------------
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/ANALYSIS/ID/PScore.h>
#include <OpenMS/ANALYSIS/ID/AScore.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/DATASTRUCTURES/MatchedIterator.h>
using std::map;
using std::vector;
namespace OpenMS
{
vector<Size> PScore::calculateIntensityRankInMZWindow(const vector<double>& mz, const vector<double>& intensities, double mz_window = 100)
{
vector<Size> ranks; // note: ranks are zero based
if (mz.empty())
{
return ranks;
}
ranks.reserve(mz.size());
const double half_window = mz_window / 2.0;
for (Size p = 0; p < mz.size(); ++p)
{
const double m = mz[p];
const double i = intensities[p];
Size rank(0);
// count neighbors to the left that have higher intensity
for (Int j = p - 1; j >= 0; --j)
{
if (mz[j] < m - half_window) break;
if (intensities[j] > i) ++rank;
}
// count neighbors to the right that have higher intensity
for (Size j = p + 1; j < mz.size(); j++)
{
if (mz[j] > m + half_window) break;
if (intensities[j] > i) ++rank;
}
ranks.push_back(rank);
}
return ranks;
}
vector<vector<Size> > PScore::calculateRankMap(const PeakMap& peak_map, double mz_window)
{
vector<std::vector<Size> > rank_map; // note: ranks are zero based
rank_map.reserve(peak_map.size());
for (Size i = 0; i != peak_map.size(); ++i)
{
const PeakSpectrum& spec = peak_map[i];
vector<double> mz;
vector<double> intensities;
for (Size j = 0; j != spec.size(); ++j)
{
mz.push_back(spec[j].getMZ());
intensities.push_back(spec[j].getIntensity());
}
rank_map.push_back(calculateIntensityRankInMZWindow(mz, intensities, mz_window));
}
return rank_map;
}
map<Size, PeakSpectrum > PScore::calculatePeakLevelSpectra(const PeakSpectrum& spec, const vector<Size>& ranks, Size min_level, Size max_level)
{
map<Size, MSSpectrum > peak_level_spectra;
if (spec.empty()) return peak_level_spectra;
// loop over all peaks and associated (zero-based) ranks
for (Size i = 0; i != ranks.size(); ++i)
{
// start at the highest (less restrictive) level
for (int j = static_cast<int>(max_level); j >= static_cast<int>(min_level); --j)
{
// if the current peak is annotated to have lower or equal rank then allowed for this peak level add it
if (static_cast<int>(ranks[i]) <= j)
{
peak_level_spectra[j].push_back(spec[i]);
}
else
{
// if the current peak has higher rank than the current level then all it is also to high for the lower levels
break;
}
}
}
return peak_level_spectra;
}
double PScore::computePScore(double fragment_mass_tolerance, bool fragment_mass_tolerance_unit_ppm, const map<Size, PeakSpectrum>& peak_level_spectra, const vector<PeakSpectrum> & theo_spectra, double mz_window)
{
AScore a_score_algorithm; // TODO: make the cumulative score function static
double best_pscore = 0.0;
for (vector<PeakSpectrum>::const_iterator theo_spectra_it = theo_spectra.begin(); theo_spectra_it != theo_spectra.end(); ++theo_spectra_it)
{
const PeakSpectrum& theo_spectrum = *theo_spectra_it;
// number of theoretical ions for current spectrum
Size N = theo_spectrum.size();
for (map<Size, PeakSpectrum>::const_iterator l_it = peak_level_spectra.begin(); l_it != peak_level_spectra.end(); ++l_it)
{
const double level = static_cast<double>(l_it->first);
const PeakSpectrum& exp_spectrum = l_it->second;
Size matched_peaks(0);
for (PeakSpectrum::ConstIterator theo_peak_it = theo_spectrum.begin(); theo_peak_it != theo_spectrum.end(); ++theo_peak_it)
{
const double& theo_mz = theo_peak_it->getMZ();
double max_dist_dalton = fragment_mass_tolerance_unit_ppm ? theo_mz * fragment_mass_tolerance * 1e-6 : fragment_mass_tolerance;
// iterate over peaks in experimental spectrum in given fragment tolerance around theoretical peak
Size index = exp_spectrum.findNearest(theo_mz);
double exp_mz = exp_spectrum[index].getMZ();
// found peak match
if (std::abs(theo_mz - exp_mz) < max_dist_dalton)
{
++matched_peaks;
}
}
// compute p score as e.g. in the AScore implementation or Andromeda
const double p = level / mz_window;
const double pscore = -10.0 * log10(a_score_algorithm.computeCumulativeScore_(N, matched_peaks, p));
if (pscore > best_pscore)
{
best_pscore = pscore;
}
}
}
return best_pscore;
}
double PScore::computePScore(double fragment_mass_tolerance, bool fragment_mass_tolerance_unit_ppm, const map<Size, PeakSpectrum>& peak_level_spectra, const PeakSpectrum & theo_spectrum, double mz_window)
{
AScore a_score_algorithm; // TODO: make the cumulative score function static
double best_pscore = 0.0;
// number of theoretical ions for current spectrum
Size N = theo_spectrum.size();
for (map<Size, PeakSpectrum>::const_iterator l_it = peak_level_spectra.begin(); l_it != peak_level_spectra.end(); ++l_it)
{
const double level = static_cast<double>(l_it->first);
const PeakSpectrum& exp_spectrum = l_it->second;
Size matched_peaks(0);
if (fragment_mass_tolerance_unit_ppm)
{
MatchedIterator<PeakSpectrum, PpmTrait> it(theo_spectrum, exp_spectrum, fragment_mass_tolerance);
for (; it != it.end(); ++it) ++matched_peaks;
}
else
{
MatchedIterator<PeakSpectrum, DaTrait> it(theo_spectrum, exp_spectrum, fragment_mass_tolerance);
for (; it != it.end(); ++it) ++matched_peaks;
}
// compute p score as e.g. in the AScore implementation or Andromeda
const double p = (level + 1) / mz_window;
const double pscore = -10.0 * log10(a_score_algorithm.computeCumulativeScore_(N, matched_peaks, p));
if (pscore > best_pscore)
{
best_pscore = pscore;
}
}
return best_pscore;
}
double massCorrectionTerm(double mass)
{
return 0.024 * (mass - 600.0);
}
double cleavageCorrectionTerm(Size cleavages, bool consecutive_cleavage)
{
switch (cleavages)
{
case 0: return 53.2;
case 1: return consecutive_cleavage ? 42.1 : 31.1;
case 2: return 17.0;
default: return 0.0;
}
}
double modificationCorrectionTerm(Size modifications)
{
switch (modifications)
{
case 0:
return 42.0;
case 1:
return 28.0;
case 2:
return 22.0;
case 3:
return 16.0;
case 4:
return 9.0;
case 5:
return 5.0;
case 6:
return 2.0;
default:
return 0.0;
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/FIAMSDataProcessor.cpp | .cpp | 10,728 | 229 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Svetlana Kutuzova, Douglas McCloskey $
// $Authors: Svetlana Kutuzova, Douglas McCloskey $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/FIAMSDataProcessor.h>
#include <OpenMS/ANALYSIS/ID/AccurateMassSearchEngine.h>
#include <OpenMS/PROCESSING/NOISEESTIMATION/SignalToNoiseEstimatorMedianRapid.h>
#include <OpenMS/ANALYSIS/OPENSWATH/SpectrumAddition.h>
#include <OpenMS/FORMAT/FileHandler.h>
namespace OpenMS {
FIAMSDataProcessor::FIAMSDataProcessor() :
DefaultParamHandler("FIAMSDataProcessor"),
mzs_(),
bin_sizes_(),
sgfilter_(),
picker_()
{
defaults_.setValue("filename", "fiams", "The filename to use for naming the output files");
defaults_.setValue("dir_output", "", "The path to the directory where the output files will be placed");
defaults_.setValue("resolution", 120000.0, "The instrument settings: resolution");
defaults_.setValue("polarity", "positive", "The instrument settings: polarity");
defaults_.setValidStrings("polarity", {"positive", "negative"});
defaults_.setValue("max_mz", 1500, "Maximum mz");
defaults_.setValue("bin_step", 20, "The size of the step to recalculated the bin size used for adding up spectra along the time axis");
defaults_.setValue("db:mapping", std::vector<std::string>{"CHEMISTRY/HMDBMappingFile.tsv"}, "For the accurate mass search. Database input file(s), containing three tab-separated columns of mass, formula, identifier. "
"If 'mass' is 0, it is re-computed from the molecular sum formula. "
"By default CHEMISTRY/HMDBMappingFile.tsv in OpenMS/share is used! If empty, the default will be used.");
defaults_.setValue("db:struct", std::vector<std::string>{"CHEMISTRY/HMDB2StructMapping.tsv"}, "For the accurate mass search. Database input file(s), containing four tab-separated columns of identifier, name, SMILES, INCHI."
"The identifier should match with mapping file. SMILES and INCHI are reported in the output, but not used otherwise. "
"By default CHEMISTRY/HMDB2StructMapping.tsv in OpenMS/share is used! If empty, the default will be used.");
defaults_.setValue("positive_adducts", "CHEMISTRY/PositiveAdducts.tsv", "For the accurate mass search. This file contains the list of potential positive adducts that will be looked for in the database. "
"Edit the list if you wish to exclude/include adducts. "
"By default CHEMISTRY/PositiveAdducts.tsv in OpenMS/share is used! If empty, the default will be used.", {"advanced"});
defaults_.setValue("negative_adducts", "CHEMISTRY/NegativeAdducts.tsv", "For the accurate mass search. This file contains the list of potential negative adducts that will be looked for in the database. "
"Edit the list if you wish to exclude/include adducts. "
"By default CHEMISTRY/NegativeAdducts.tsv in OpenMS/share is used! If empty, the default will be used.", {"advanced"});
defaults_.setValue("store_progress", "true", "If the intermediate files should be stored in the output directory");
defaults_.setValidStrings("store_progress", {"true","false"});
defaults_.setValue("sgf:frame_length", 11, "SavitzkyGolayFilter parameter. The number of subsequent data points used for smoothing");
defaults_.setValue("sgf:polynomial_order", 4, "SavitzkyGolayFilter parameter. Order or the polynomial that is fitted");
defaults_.setValue("sne:window", 10, "SignalToNoiseEstimatorMedianRapid parameter. Signal-to-noise estimation window (in mz)");
defaultsToParam_();
}
void FIAMSDataProcessor::updateMembers_()
{
float max_mz_ = param_.getValue("max_mz");
float bin_step_ = param_.getValue("bin_step");
float resolution_ = static_cast<float>(param_.getValue("resolution"));
Size n_bins = static_cast<int> (max_mz_ / bin_step_);
mzs_.clear();
bin_sizes_.clear();
mzs_.reserve(n_bins);
bin_sizes_.reserve(n_bins);
for (Size i = 0; i < n_bins; i++) {
mzs_.push_back((i+1)*bin_step_);
bin_sizes_.push_back(mzs_[i] / (resolution_*4.0));
}
Param p;
p.setValue("frame_length", param_.getValue("sgf:frame_length"));
p.setValue("polynomial_order", param_.getValue("sgf:polynomial_order"));
sgfilter_.setParameters(p);
}
void FIAMSDataProcessor::cutForTime(const MSExperiment& experiment, const float n_seconds, std::vector<MSSpectrum>& output)
{
for (const auto & s : experiment.getSpectra()) {
if (s.getRT() < n_seconds) output.push_back(s);
}
}
MSSpectrum FIAMSDataProcessor::mergeAlongTime(
const std::vector<MSSpectrum> & input
) {
MSSpectrum output;
for (Size i = 0; i < mzs_.size() - 1; i++) {
OpenMS::MSSpectrum full_spectrum = OpenMS::SpectrumAddition::addUpSpectra(
input, bin_sizes_[i], false
);
for (auto it = full_spectrum.begin(); it != full_spectrum.end(); ++it) {
if (it->getMZ() > mzs_[i+1]) break;
if (it->getMZ() >= mzs_[i]) output.push_back(*it);
}
}
output.sortByPosition();
return output;
}
MSSpectrum FIAMSDataProcessor::extractPeaks(const MSSpectrum& input)
{
MSSpectrum spectrum(input);
sgfilter_.filter(spectrum);
MSSpectrum picked;
picker_.pick(spectrum, picked);
return picked;
}
FeatureMap FIAMSDataProcessor::convertToFeatureMap(const MSSpectrum& input)
{
String polarity_ = param_.getValue("polarity").toString();
FeatureMap output;
for (auto it = input.begin(); it != input.end(); ++it) {
Feature f;
f.setIntensity(it->getIntensity());
f.setMZ(it->getMZ());
f.setMetaValue("scan_polarity", polarity_);
output.push_back(f);
}
return output;
}
void FIAMSDataProcessor::runAccurateMassSearch(FeatureMap& input, OpenMS::MzTab& output)
{
Param ams_param;
ams_param.setValue("ionization_mode", "auto");
ams_param.setValue("mass_error_value", 1e+06 / (static_cast<float>(param_.getValue("resolution"))*2));
ams_param.setValue("db:mapping", param_.getValue("db:mapping"));
ams_param.setValue("db:struct", param_.getValue("db:struct"));
ams_param.setValue("positive_adducts", param_.getValue("positive_adducts"));
ams_param.setValue("negative_adducts", param_.getValue("negative_adducts"));
ams_param.setValue("keep_unidentified_masses", "false"); // only report IDs
AccurateMassSearchEngine ams;
ams.setParameters(ams_param);
ams.init();
ams.run(input, output);
}
MSSpectrum FIAMSDataProcessor::trackNoise(const MSSpectrum& input)
{
SignalToNoiseEstimatorMedianRapid sne(param_.getValue("sne:window"));
MSSpectrum output;
if (input.empty()) {
return output;
}
std::vector<double> mzs, intensities;
mzs.reserve(input.size());
intensities.reserve(input.size());
for (auto it = input.begin(); it != input.end(); ++it)
{
mzs.push_back(it->getMZ());
intensities.push_back(it->getIntensity());
}
SignalToNoiseEstimatorMedianRapid::NoiseEstimator e = sne.estimateNoise(mzs, intensities);
for (auto it = input.begin(); it != input.end(); ++it)
{
Peak1D peak;
peak.setMZ(it->getMZ());
peak.setIntensity(e.get_noise_value(it->getMZ()));
output.push_back(peak);
}
return output;
}
bool FIAMSDataProcessor::run(const MSExperiment& experiment, const float n_seconds, OpenMS::MzTab& output, const bool load_cached_spectrum)
{
String postfix = String(static_cast<int>(n_seconds));
std::string dir_output_ = param_.getValue("dir_output");
std::string filename_ = param_.getValue("filename");
String filepath_picked = dir_output_ + "/" + filename_ + "_picked_" + postfix + ".mzML";
MSSpectrum picked_spectrum;
bool is_cached;
if (load_cached_spectrum && File::exists(filepath_picked)) {
OPENMS_LOG_INFO << "Started loading cached picked spectrum " << filepath_picked << std::endl;
MSExperiment exp;
FileHandler().loadExperiment(filepath_picked, exp, {FileTypes::MZML});
picked_spectrum = exp.getSpectra()[0];
OPENMS_LOG_INFO << "Finished loading cached picked spectrum " << filepath_picked << std::endl;
is_cached = true;
} else {
OPENMS_LOG_INFO << "Started calculating picked spectrum " << filepath_picked << std::endl;
std::vector<MSSpectrum> output_cut;
cutForTime(experiment, n_seconds, output_cut);
MSSpectrum merged_spectrum = mergeAlongTime(output_cut);
picked_spectrum = extractPeaks(merged_spectrum);
if (param_.getValue("store_progress").toBool()) {
storeSpectrum_(merged_spectrum, dir_output_ + "/" + filename_ + "_merged_" + postfix + ".mzML");
storeSpectrum_(picked_spectrum, filepath_picked);
}
OPENMS_LOG_INFO << "Finished calculating picked spectrum " << filepath_picked << std::endl;
is_cached = false;
}
MSSpectrum signal_to_noise = trackNoise(picked_spectrum);
FeatureMap picked_features = convertToFeatureMap(picked_spectrum);
storeSpectrum_(signal_to_noise, dir_output_ + "/" + filename_ + "_signal_to_noise_" + postfix + ".mzML");
runAccurateMassSearch(picked_features, output);
OpenMS::MzTabFile mztab_outfile;
mztab_outfile.store(dir_output_ + "/" + filename_ + "_" + postfix + ".mzTab", output);
return is_cached;
}
void FIAMSDataProcessor::storeSpectrum_(const MSSpectrum& input, const String& filename)
{
MSExperiment exp;
exp.addSpectrum(input);
FileHandler().storeExperiment(filename, exp,{FileTypes::MZML});
}
/// Get mass-to-charge ratios to base the sliding window upon
const std::vector<float>& FIAMSDataProcessor::getMZs()
{
return mzs_;
}
/// Get the sliding bin sizes
const std::vector<float>& FIAMSDataProcessor::getBinSizes()
{
return bin_sizes_;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/PrecursorPurity.cpp | .cpp | 16,393 | 408 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Eugen Netz $
// $Authors: Eugen Netz $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/PrecursorPurity.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/MATH/MathFunctions.h>
namespace OpenMS
{
std::vector<double> PrecursorPurity::computeSingleScanPrecursorPurities(int ms2_spec_idx, int precursor_spec_idx, const MSExperiment & exp, double max_precursor_isotope_deviation)
{
const auto& ms2_spec = exp[ms2_spec_idx];
const auto& precursor_spec = exp[precursor_spec_idx];
std::vector<double> purities(ms2_spec.getPrecursors().size(), 1.0);
if (precursor_spec.empty()) return purities; // TODO fail instead?
Size precursor_idx = 0;
for (const auto& precursor_info : ms2_spec.getPrecursors())
{
typedef PeakMap::SpectrumType::ConstIterator const_spec_iterator;
int charge = precursor_info.getCharge();
if (charge == 0) charge = 1; // assume charge 1 for ions without charge
// compute distance between isotopic peaks based on the precursor charge.
const double charge_dist = Constants::NEUTRON_MASS_U / static_cast<double>(charge);
// the actual boundary values
const double strict_lower_mz = precursor_info.getMZ() - precursor_info.getIsolationWindowLowerOffset();
const double strict_upper_mz = precursor_info.getMZ() + precursor_info.getIsolationWindowUpperOffset();
if (strict_lower_mz == strict_upper_mz)
{
return purities;
}
const double dev_ppm = max_precursor_isotope_deviation / 1e6;
const double fuzzy_lower_mz = strict_lower_mz * (1 - dev_ppm);
const double fuzzy_upper_mz = strict_upper_mz * (1 + dev_ppm);
// first find the actual precursor peak
Size precursor_peak_idx = precursor_spec.findNearest(precursor_info.getMZ());
const Peak1D& precursor_peak = precursor_spec[precursor_peak_idx];
// now we get ourselves some border iterators
const_spec_iterator lower_bound = precursor_spec.MZBegin(fuzzy_lower_mz);
const_spec_iterator upper_bound = precursor_spec.MZEnd(precursor_info.getMZ());
Peak1D::IntensityType precursor_intensity = precursor_peak.getIntensity();
Peak1D::IntensityType total_intensity = precursor_peak.getIntensity();
// ------------------------------------------------------------------------------
// try to find a match for our isotopic peak on the left side
double expected_next_mz = precursor_peak.getMZ() - charge_dist;
while (expected_next_mz > fuzzy_lower_mz)
{
// find nearest peak in precursor window
const_spec_iterator np_it = precursor_spec.MZBegin(lower_bound, expected_next_mz, upper_bound);
// handle border cases
// check if next peak has smaller dist
const_spec_iterator np_it2 = np_it;
++np_it;
if (std::fabs(np_it2->getMZ() - expected_next_mz) < std::fabs(np_it->getMZ() - expected_next_mz))
{
np_it = np_it2;
}
// compute difference between found peak and expected
double min_ppm_diff = std::fabs(np_it->getMZ() - expected_next_mz) * 1000000 / expected_next_mz;
// check if we found an isotopic peak
if (min_ppm_diff < max_precursor_isotope_deviation)
{
if (np_it->getMZ() > strict_lower_mz)
{
precursor_intensity += np_it->getIntensity();
}
else
{
// we're in the fuzzy area, so we will take only 50% of the given intensity
// since we assume that the isolation window borders are not sharp
precursor_intensity += 0.5 * np_it->getIntensity();
}
// update expected_next_mz
expected_next_mz = np_it->getMZ() - charge_dist;
}
else
{
// update expected_next_mz with theoretical position
expected_next_mz -= charge_dist;
}
}
// ------------------------------------------------------------------------------
// try to find a match for our isotopic peak on the right
// redefine bounds
lower_bound = precursor_spec.MZBegin(precursor_info.getMZ());
upper_bound = precursor_spec.MZEnd(fuzzy_upper_mz);
expected_next_mz = precursor_peak.getMZ() + charge_dist;
while (expected_next_mz < fuzzy_upper_mz)
{
// find nearest peak in precursor window
const_spec_iterator np_it = precursor_spec.MZBegin(lower_bound, expected_next_mz, upper_bound);
// handle border cases
// check if next peak has smaller dist
const_spec_iterator np_it2 = np_it;
++np_it;
if (std::fabs(np_it2->getMZ() - expected_next_mz) < std::fabs(np_it->getMZ() - expected_next_mz))
{
np_it = np_it2;
}
// compute difference between found peak and expected
double min_ppm_diff = std::fabs(np_it->getMZ() - expected_next_mz) * 1000000 / expected_next_mz;
// check if we found an isotopic peak
if (min_ppm_diff < max_precursor_isotope_deviation)
{
if (np_it->getMZ() < strict_upper_mz)
{
precursor_intensity += np_it->getIntensity();
}
else
{
// we're in the fuzzy area, so we will take only 50% of the given intensity
// since we assume that the isolation window borders are not sharp
precursor_intensity += 0.5 * np_it->getIntensity();
}
// update expected_next_mz
expected_next_mz = np_it->getMZ() + charge_dist;
}
else
{
// update expected_next_mz with theoretical position
expected_next_mz += charge_dist;
}
}
// ------------------------------------------------------------------------------
// compute total intensity
int idx = static_cast<int>(precursor_peak_idx) - 1;
while (idx >= 0 && precursor_spec[idx].getMZ() > fuzzy_lower_mz)
{
if (precursor_spec[idx].getMZ() > strict_lower_mz)
{
total_intensity += precursor_spec[idx].getIntensity();
}
else
{
// we're in the fuzzy area, so we will take only 50% of the given intensity
// since we assume that the isolation window borders are not sharp
total_intensity += 0.5 * precursor_spec[idx].getIntensity();
}
--idx;
}
idx = static_cast<int>(precursor_peak_idx) + 1;
while (idx < static_cast<int>(precursor_spec.size()) && precursor_spec[idx].getMZ() < fuzzy_upper_mz)
{
if (precursor_spec[idx].getMZ() < strict_upper_mz)
{
total_intensity += precursor_spec[idx].getIntensity();
}
else
{
// we're in the fuzzy area, so we will take only 50% of the given intensity
// since we assume that the isolation window borders are not sharp
total_intensity += 0.5 * precursor_spec[idx].getIntensity();
}
++idx;
}
purities[precursor_idx] = precursor_intensity / total_intensity;
precursor_idx++;
}
return purities;
}
std::vector<double> PrecursorPurity::computeInterpolatedPrecursorPurity(int ms2_spec_idx, int precursor_spec_idx, int next_ms1_spec_idx, const MSExperiment & exp, double max_precursor_isotope_deviation)
{
const auto& ms2_spec = exp[ms2_spec_idx];
const auto& precursor_spec = exp[precursor_spec_idx];
// compute purity of preceding ms1 scan
std::vector<double> early_scan_purity = computeSingleScanPrecursorPurities(ms2_spec_idx, precursor_spec_idx, exp, max_precursor_isotope_deviation);
// Validate next_ms1_spec_idx before dereferencing
if (next_ms1_spec_idx < 0 || next_ms1_spec_idx >= static_cast<int>(exp.size()))
{
// Index out of range - fall back to early scan purity
return early_scan_purity;
}
const auto& next_ms1_spec = exp[next_ms1_spec_idx];
// Validate that the next spectrum is actually MS1
if (next_ms1_spec.getMSLevel() != 1)
{
// Not an MS1 spectrum - fall back to early scan purity
return early_scan_purity;
}
// Calculate RT denominator and check for edge cases
double rt_denominator = std::fabs(next_ms1_spec.getRT() - precursor_spec.getRT());
if (rt_denominator <= 0.0 || !std::isfinite(rt_denominator))
{
// Invalid RT denominator - fall back to early scan purity
return early_scan_purity;
}
std::vector<double> late_scan_purity = computeSingleScanPrecursorPurities(ms2_spec_idx, next_ms1_spec_idx, exp, max_precursor_isotope_deviation);
std::vector<double> interpolated_purity;
interpolated_purity.reserve(early_scan_purity.size());
for (Size i = 0; i < early_scan_purity.size(); ++i)
{
// calculating the extrapolated, S2I value as a time weighted linear combination of the two scans
// see: Savitski MM, Sweetman G, Askenazi M, Marto JA, Lang M, Zinn N, et al. (2011).
// Analytical chemistry 83: 8959–67. http://www.ncbi.nlm.nih.gov/pubmed/22017476
// std::fabs is applied to compensate for potentially negative RTs
interpolated_purity.push_back(
std::fabs(ms2_spec.getRT() - precursor_spec.getRT()) *
((late_scan_purity[i] - early_scan_purity[i]) / rt_denominator)
+ early_scan_purity[i]);
}
return interpolated_purity;
}
PrecursorPurity::PurityScores PrecursorPurity::computePrecursorPurity(const PeakSpectrum& ms1, const Precursor& pre, const double precursor_mass_tolerance, const bool precursor_mass_tolerance_unit_ppm)
{
PrecursorPurity::PurityScores score;
double target_mz = pre.getMZ();
double lower = target_mz - pre.getIsolationWindowLowerOffset();
double upper = target_mz + pre.getIsolationWindowUpperOffset();
int charge = abs(pre.getCharge());
if (charge == 0) charge = 1; // prevent division by zero
double precursor_tolerance_abs = precursor_mass_tolerance_unit_ppm ? (target_mz * precursor_mass_tolerance*2 * 1e-6) : precursor_mass_tolerance*2;
auto lower_it = ms1.MZBegin(lower);
auto upper_it = ms1.MZEnd(upper);
PeakSpectrum isolated_window;
while (lower_it != upper_it)
{
isolated_window.push_back(*lower_it);
lower_it++;
}
// total intensity in isolation window
double total_intensity(0);
double target_intensity(0);
Size target_peak_count(0);
for (const auto& peak : isolated_window)
{
total_intensity += peak.getIntensity();
}
// search for the target peak, return scores with 0-values if it is not found
if (isolated_window.empty())
{
return score;
}
// estimate a lower boundary for isotopic peaks
int negative_isotopes((pre.getIsolationWindowLowerOffset() * charge));
double iso = -negative_isotopes;
// depending on the isolation window, the first estimated peak might be outside the window
if (target_mz + (iso * Constants::C13C12_MASSDIFF_U / charge) < lower)
{
iso++;
}
// deisotoping (try to find isotopic peaks of the precursor mass, even if the actual precursor peak is missing)
while (true) // runs as long as the next mz is within the isolation window
{
double next_peak = target_mz + (iso * Constants::C13C12_MASSDIFF_U / charge);
// stop loop when new mz is outside the isolation window
// changes through the isotope index iso
if (next_peak > upper)
{
break;
}
int next_iso_index = isolated_window.findNearest(next_peak, precursor_tolerance_abs);
if (next_iso_index != -1)
{
target_intensity += isolated_window[next_iso_index].getIntensity();
isolated_window.erase(isolated_window.begin() + next_iso_index);
target_peak_count++;
}
// always increment iso to progress the loop
iso++;
}
double rel_sig(0);
if (target_intensity > 0.0)
{
rel_sig = target_intensity / total_intensity;
}
score.total_intensity = total_intensity;
score.target_intensity = target_intensity;
score.signal_proportion = rel_sig;
score.target_peak_count = target_peak_count;
score.interfering_peak_count = isolated_window.size();
score.interfering_peaks = isolated_window;
return score;
}
PrecursorPurity::PurityScores PrecursorPurity::combinePrecursorPurities(const PrecursorPurity::PurityScores& score1, const PrecursorPurity::PurityScores& score2)
{
PrecursorPurity::PurityScores score;
score.total_intensity = score1.total_intensity + score2.total_intensity;
score.target_intensity = score1.target_intensity + score2.target_intensity;
if (score.target_intensity > 0.0) // otherwise default value of 0 is used
{
score.signal_proportion = score.target_intensity / score.total_intensity;
}
score.target_peak_count = score1.target_peak_count + score2.target_peak_count;
score.interfering_peak_count = score1.interfering_peak_count + score2.interfering_peak_count;
return score;
}
std::map<String, PrecursorPurity::PurityScores> PrecursorPurity::computePrecursorPurities(const PeakMap& spectra, double precursor_mass_tolerance, bool precursor_mass_tolerance_unit_ppm, bool ignore_missing_precursor_spectra)
{
std::map<String, PrecursorPurity::PurityScores> purityscores;
std::pair<std::map<String, PrecursorPurity::PurityScores>::iterator, bool> insert_return_value;
int spectra_size = static_cast<int>(spectra.size());
if (spectra[0].getMSLevel() != 1 && !ignore_missing_precursor_spectra)
{
OPENMS_LOG_WARN << "Warning: Input data not suitable for Precursor Purity computation. First Spectrum is not MS1. Precursor Purity info will not be calculated!\n";
return purityscores;
}
for (int i = 0; i < spectra_size; ++i)
{
if (spectra[i].getMSLevel() == 2)
{
auto parent_spectrum_it = spectra.getPrecursorSpectrum(spectra.begin()+i);
if (parent_spectrum_it == spectra.end() && !ignore_missing_precursor_spectra)
{
OPENMS_LOG_WARN << "Warning: Input data not suitable for Precursor Purity computation. An MS2 spectrum without parent spectrum detected. Precursor Purity info will not be calculated!\n";
return std::map<String, PrecursorPurity::PurityScores>();
}
if (spectra[i].getNativeID().empty())
{
OPENMS_LOG_WARN << "Warning: Input data not suitable for Precursor Purity computation. Spectrum without an ID. Precursor Purity info will not be calculated!\n";
return std::map<String, PrecursorPurity::PurityScores>();
}
// check for uniqueness of IDs by inserting initialized (0-value) scores into map
insert_return_value = purityscores.insert(std::pair<String, PrecursorPurity::PurityScores>(spectra[i].getNativeID(), PrecursorPurity::PurityScores()));
if (!insert_return_value.second)
{
OPENMS_LOG_WARN << "Warning: Input data not suitable for Precursor Purity computation. Duplicate Spectrum IDs. Precursor Purity info will not be calculated!\n";
return std::map<String, PrecursorPurity::PurityScores>();
}
}
}
#pragma omp parallel for schedule(guided)
for (int i = 0; i < spectra_size; ++i)
{
if (spectra[i].getMSLevel() == 2)
{
PrecursorPurity::PurityScores score;
auto parent_spectrum_it = spectra.getPrecursorSpectrum(spectra.begin() + i);
if (parent_spectrum_it != spectra.end())
{
score = PrecursorPurity::computePrecursorPurity((*parent_spectrum_it), spectra[i].getPrecursors()[0], precursor_mass_tolerance, precursor_mass_tolerance_unit_ppm);
}
#pragma omp critical (purityscores_access)
{
// replace the initialized values
purityscores[spectra[i].getNativeID()] = score;
}
} // end of MS2 spectrum
} // end of parallelized spectra loop
return purityscores;
} // end of function def
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/PeptideProteinResolution.cpp | .cpp | 29,800 | 795 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Julianus Pfeuffer $
// $Authors: Julianus Pfeuffer $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/PeptideProteinResolution.h>
#include <OpenMS/PROCESSING/ID/IDFilter.h>
#include <queue>
#include <unordered_set>
#include <algorithm>
using namespace OpenMS;
using namespace std;
namespace OpenMS
{
std::ostream& operator<<(std::ostream& os, const ConnectedComponent& conn_comp)
{
os << "Proteins: ";
for (std::set<Size>::const_iterator prot_it = conn_comp.prot_grp_indices.begin();
prot_it != conn_comp.prot_grp_indices.end();
++prot_it)
{
os << *prot_it << ",";
}
os << '\n';
os << "Peptides: ";
for (std::set<Size>::const_iterator pep_it = conn_comp.pep_indices.begin();
pep_it != conn_comp.pep_indices.end();
++pep_it)
{
os << *pep_it << ",";
}
return os;
}
// C'tor
PeptideProteinResolution::PeptideProteinResolution(bool statistics) :
/*indist_prot_grp_td_(),*/
indist_prot_grp_to_pep_(),
pep_to_indist_prot_grp_(),
prot_acc_to_indist_prot_grp_(),
statistics_(statistics)
{
}
void PeptideProteinResolution::resolve(ProteinIdentification& protein,
PeptideIdentificationList& peptides,
bool resolve_ties,
bool targets_first)
{
vector<ProteinIdentification::ProteinGroup>& groups = protein.getIndistinguishableProteins();
vector<bool> indist_prot_grp_decoy(groups.size());
unordered_map<string, Size> prot_acc_to_indist_prot_grp;
if (groups.empty())
{
throw Exception::MissingInformation(
__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"No indistinguishable Groups annotated. Currently this class only resolves across groups.");
}
OPENMS_LOG_INFO << "Resolving peptides between " << protein.getHits().size() << " proteins in " << groups.size() << " indistinguishable groups.\n";
// I don't think we need to assume sortedness here
//if (!skip_sort) sort(groups.begin(), groups.end());
std::unordered_set<std::string> decoy_accs;
for (const ProteinHit& p : protein.getHits())
{
if (p.isDecoy())
{
decoy_accs.insert(p.getAccession());
}
}
// Construct intermediate mapping of single protein accessions
// to indist. protein groups
for (vector<ProteinIdentification::ProteinGroup>::const_iterator group_it =
groups.begin();
group_it != groups.end(); ++group_it)
{
for (vector<String>::const_iterator acc_it = group_it->accessions.begin();
acc_it != group_it->accessions.end(); ++acc_it)
{
Size idx = group_it - groups.begin();
prot_acc_to_indist_prot_grp[*acc_it] = idx;
if (decoy_accs.find(*acc_it) != decoy_accs.end())
{
indist_prot_grp_decoy[idx] = true;
}
}
}
// Go through PeptideIDs
for (PeptideIdentification& pep : peptides)
{
vector<PeptideHit>& hits = pep.getHits();
if (!hits.empty())
{
PeptideHit& best_hit = hits[0];
const vector<PeptideEvidence>& pepev = best_hit.getPeptideEvidences();
set<Size> bestNonDecoyGrpTie;
set<Size> bestDecoyGrpTie;
unordered_map<Size,set<Size>> grpIdxToEvIdx;
Size ev_idx = 0;
for (vector<PeptideEvidence>::const_iterator pepev_it = pepev.begin();
pepev_it != pepev.end(); ++pepev_it, ++ev_idx)
{
String acc = pepev_it->getProteinAccession();
auto found = prot_acc_to_indist_prot_grp.find(acc);
if (found == prot_acc_to_indist_prot_grp.end())
{
throw Exception::MissingInformation(
__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"Not all proteins present in an indistinguishable group. Make sure to add them as singletons.");
}
else
{
Size prot_group_index = found->second;
auto it = grpIdxToEvIdx.emplace(prot_group_index, set<Size>());
it.first->second.insert(ev_idx);
//TODO work with a tolerance for doubles instead?
if (indist_prot_grp_decoy[prot_group_index])
{
if (bestDecoyGrpTie.empty() ||
groups[prot_group_index].probability < groups[*bestDecoyGrpTie.begin()].probability)
{
bestDecoyGrpTie.clear();
bestDecoyGrpTie.insert(prot_group_index);
}
else if (groups[prot_group_index].probability == groups[*bestDecoyGrpTie.begin()].probability)
{
bestDecoyGrpTie.insert(prot_group_index);
}
}
else
{
if (bestNonDecoyGrpTie.empty() ||
groups[prot_group_index].probability < groups[*bestNonDecoyGrpTie.begin()].probability)
{
bestNonDecoyGrpTie.clear();
bestNonDecoyGrpTie.insert(prot_group_index);
}
else if (groups[prot_group_index].probability == groups[*bestNonDecoyGrpTie.begin()].probability)
{
bestNonDecoyGrpTie.insert(prot_group_index);
}
}
}
}
bool targets_first_resolve_ties = false;
set<Size>* toResolve;
set<Size> allGrpsSet;
if (bestNonDecoyGrpTie.empty())
{
toResolve = &bestDecoyGrpTie;
}
else if (bestDecoyGrpTie.empty() || targets_first)
{
toResolve = &bestNonDecoyGrpTie;
}
else if (groups[*bestNonDecoyGrpTie.begin()].probability > groups[*bestDecoyGrpTie.begin()].probability)
{
toResolve = &bestNonDecoyGrpTie;
}
else if (groups[*bestDecoyGrpTie.begin()].probability > groups[*bestNonDecoyGrpTie.begin()].probability)
{
toResolve = &bestDecoyGrpTie;
}
else // both equal
{
if (resolve_ties && targets_first_resolve_ties)
{
toResolve = &bestNonDecoyGrpTie;
}
else // take all best groups
{
vector<Size> allGrps;
merge(std::begin(bestNonDecoyGrpTie), std::end(bestNonDecoyGrpTie),
std::begin(bestDecoyGrpTie), std::end(bestDecoyGrpTie),
std::back_inserter(allGrps));
allGrpsSet = set<Size>(allGrps.begin(),allGrps.end());
toResolve = &allGrpsSet;
}
}
set<Size> evToKeep;
if (resolve_ties)
{
//TODO this tie resolution basically just takes the first group that occurred
evToKeep = grpIdxToEvIdx[*toResolve->begin()];
if (toResolve->size() > 1)
{
OPENMS_LOG_INFO << "Resolution: Peptide " << pep.getHits()[0].getSequence().toString() << " had groups:\n";
OPENMS_LOG_INFO << "tgt: ";
for (const auto& g : bestNonDecoyGrpTie)
{
OPENMS_LOG_INFO << g << "=" << groups[g].probability << ", ";
}
OPENMS_LOG_INFO << '\n';
OPENMS_LOG_INFO << "dec: ";
for (const auto& g : bestDecoyGrpTie)
{
OPENMS_LOG_INFO << g << "=" << groups[g].probability << ", ";
}
OPENMS_LOG_INFO << '\n';
OPENMS_LOG_INFO << "Kept: " << *toResolve->begin() << '\n';
}
}
else
{
for (const auto& grp : *toResolve)
{
evToKeep.insert(grpIdxToEvIdx[grp].begin(),grpIdxToEvIdx[grp].end());
}
}
vector<PeptideEvidence> newEv;
newEv.reserve(evToKeep.size());
for (const auto& idx : evToKeep)
{
newEv.push_back(pepev[idx]);
}
best_hit.setPeptideEvidences(newEv);
}
else
{
OPENMS_LOG_WARN << "Warning PeptideProteinResolution: Skipping spectrum without hits.\n";
}
}
}
// Initialization of global variables (= graph)
void PeptideProteinResolution::buildGraph(ProteinIdentification& protein,
const PeptideIdentificationList& peptides, bool skip_sort)
{
vector<ProteinIdentification::ProteinGroup>& groups = protein.getIndistinguishableProteins();
if (groups.empty())
{
throw Exception::MissingInformation(
__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"No indistinguishable Groups annotated. Currently this class only resolves across groups.");
}
OPENMS_LOG_INFO << "Resolving peptides between " << protein.getHits().size() << " proteins in " << groups.size() << " indistinguishable groups.\n";
if (!skip_sort) sort(groups.begin(), groups.end());
// TODO this is only needed for target_first option
std::unordered_set<std::string> decoy_accs;
for (const ProteinHit& p : protein.getHits())
{
if (p.isDecoy())
{
decoy_accs.insert(p.getAccession());
}
}
// Construct intermediate mapping of single protein accessions
// to indist. protein groups
for (vector<ProteinIdentification::ProteinGroup>::const_iterator group_it =
groups.begin();
group_it != groups.end(); ++group_it)
{
for (vector<String>::const_iterator acc_it = group_it->accessions.begin();
acc_it != group_it->accessions.end(); ++acc_it)
{
Size idx = group_it - groups.begin();
prot_acc_to_indist_prot_grp_[*acc_it] = idx;
}
}
// Go through PeptideIDs and construct a bidirectional mapping
for (PeptideIdentificationList::const_iterator pep_it = peptides.begin();
pep_it != peptides.end();
++pep_it)
{
Size pep_index = pep_it - peptides.begin();
const vector<PeptideHit>& hits = pep_it->getHits();
if (!hits.empty())
{
PeptideHit best_hit = hits[0];
const vector<PeptideEvidence>& pepev = best_hit.getPeptideEvidences();
for (vector<PeptideEvidence>::const_iterator pepev_it = pepev.begin();
pepev_it != pepev.end(); ++pepev_it)
{
String acc = pepev_it->getProteinAccession();
auto found = prot_acc_to_indist_prot_grp_.find(acc);
if (found == prot_acc_to_indist_prot_grp_.end())
{
throw Exception::MissingInformation(
__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"Not all proteins present in an indistinguishable group. (" + acc + " not found). Make sure to add them as singletons.");
}
else
{
Size prot_group_index = found->second;
pep_to_indist_prot_grp_[pep_index].insert(prot_group_index);
indist_prot_grp_to_pep_[prot_group_index];
indist_prot_grp_to_pep_[prot_group_index].insert(pep_index);
}
}
}
else
{
OPENMS_LOG_WARN << "Warning PeptideProteinResolution: Skipping spectrum without hits.\n";
}
}
}
// "Main" function
void PeptideProteinResolution::resolveGraph(ProteinIdentification& protein,
PeptideIdentificationList& peptides)
{
//Debugging
Size old_size = indist_prot_grp_to_pep_.size();
//Statistics
ConnectedComponent most_peps;
ConnectedComponent most_grps;
ConnectedComponent most_both;
// Traverse every connected component, remove visited "nodes" in each step
while (!indist_prot_grp_to_pep_.empty())
{
if (statistics_ && (old_size - indist_prot_grp_to_pep_.size() > 1))
{
OPENMS_LOG_INFO << "resolved group of size "
<< old_size - indist_prot_grp_to_pep_.size() << " in last step "
<< endl;
old_size = indist_prot_grp_to_pep_.size();
}
// We take any (= first) protein from map that is still left,
// to start the next BFS from it
Size root_prot_grp = indist_prot_grp_to_pep_.begin()->first;
// do BFS, return connected proteins and peptides
ConnectedComponent curr_component =
PeptideProteinResolution::findConnectedComponent(root_prot_grp);
// For debugging and statistics
if (statistics_)
{
if (curr_component.prot_grp_indices.size() >
most_grps.prot_grp_indices.size())
{
most_grps = curr_component;
}
if (curr_component.pep_indices.size() >
most_peps.pep_indices.size())
{
most_peps = curr_component;
}
if ((curr_component.prot_grp_indices.size() +
curr_component.pep_indices.size()) >
(most_both.prot_grp_indices.size() +
most_both.pep_indices.size()))
{
most_both = curr_component;
}
if (curr_component.prot_grp_indices.size() > 1)
{
OPENMS_LOG_INFO << "found group: " << endl;
OPENMS_LOG_INFO << curr_component;
OPENMS_LOG_INFO << endl << "Processing ..." << endl;
}
}
// resolve shared peptides based on posterior probabilities
// -> modifies PeptideIDs in peptides
PeptideProteinResolution::resolveConnectedComponent(curr_component,
protein,
peptides);
// mark proteins of this component as visited by removing them
for (set<Size>::iterator grp_it =
curr_component.prot_grp_indices.begin();
grp_it != curr_component.prot_grp_indices.end();
++grp_it)
{
indist_prot_grp_to_pep_.erase(*grp_it);
}
}
//TODO maybe extend statistics of connected components!
if (statistics_)
{
OPENMS_LOG_INFO << endl << "Most protein groups in component:" << endl;
OPENMS_LOG_INFO << most_grps;
OPENMS_LOG_INFO << endl << "Most peptides in component:"<< endl;
OPENMS_LOG_INFO << most_peps;
OPENMS_LOG_INFO << endl << "Biggest component:" << endl;
OPENMS_LOG_INFO << most_both;
}
}
/*
* Does a BFS on the two maps (= two parts of the graph; indist. prot. groups
* and peptides), switching from one to the other in each step.
* Returns a Connected Component as set of group and peptide indices.
*/
ConnectedComponent PeptideProteinResolution::findConnectedComponent(Size& root_prot_grp)
{
// init result
ConnectedComponent conn_comp;
// init queue, bool keeps track of if we need to use
// ProteinGroup -> Peptide (true) or
// Peptide -> ProteinGroup (false) as mapping
queue<pair<bool, Size> > my_queue;
// start with given root
my_queue.push(make_pair(true, root_prot_grp));
// check successes of insertions
pair<set<Size>::iterator,bool> success;
while (!my_queue.empty())
{
// save first element and pop
pair<bool, Size> curr_node = my_queue.front();
my_queue.pop();
// initialize neighbors
set<Size> neighbors;
// Choose correct map, depending on if we deal with protGrp or peptide
if (curr_node.first)
{
neighbors = indist_prot_grp_to_pep_[curr_node.second];
}
else
{
neighbors = pep_to_indist_prot_grp_[curr_node.second];
}
for (set<Size>::iterator nb_it = neighbors.begin();
nb_it != neighbors.end();
++nb_it)
{
// If current node is protein, its neighbors are peptides and
// vice versa -> look in corresponding "result" set and insert
// if not present
if (!curr_node.first)
{
success = conn_comp.prot_grp_indices.insert(*nb_it);
}
else
{
success = conn_comp.pep_indices.insert(*nb_it);
}
// If it was not seen yet, add it to the queue to process
// its neighbors later. All neighbors are from the opposite type now
if (success.second)
{
my_queue.push(make_pair(!curr_node.first, *nb_it));
}
}
}
return conn_comp;
}
/* TODO this does not produce correct results yet. Check again.
* Resolves connected components based on Fido probabilities and adds them
* as additional protein_groups to the output idXML.
* Thereby greedily assigns shared peptides in this component uniquely to
* the proteins of the current BEST INDISTINGUISHABLE protein group,
* ready to be used in ProteinQuantifier then.
* This is achieved by removing all other evidence from the input
* PeptideIDs and iterating until
* In accordance with Fido only the best hit (PSM) for an ID is considered.
* Probability ties are _currently_ resolved by taking the first occurrence.
*/
/* void PeptideProteinResolution::resolveConnectedComponentTargetsFirst(
ConnectedComponent& conn_comp,
ProteinIdentification& protein,
PeptideIdentificationList& peptides,
bool targets_first)
{
// Nothing to resolve in a singleton group (will not be added to output though)
if (conn_comp.prot_grp_indices.size() == 1) return;
// Add proteins from a connected component to ambiguity groups
ProteinIdentification::ProteinGroup ambiguity_grp;
vector<ProteinIdentification::ProteinGroup>& origin_groups = protein.getIndistinguishableProteins();
// Save the max probability in this component to add it (should be first one, since groups were sorted and
// lower index means higher score and set is sorted by index)
ambiguity_grp.probability = origin_groups[*conn_comp.prot_grp_indices.begin()].probability;
for (set<Size>::iterator grp_it = conn_comp.prot_grp_indices.begin();
grp_it != conn_comp.prot_grp_indices.end();
++grp_it)
{
if (*grp_it >= origin_groups.size())
{
OPENMS_LOG_FATAL_ERROR << "Something went terribly wrong. "
"Group with index " << *grp_it << "doesn't exist. "
" ProteinPeptideResolution: Groups changed"
" after building data structures.\n";
}
vector<String> accessions = origin_groups[*grp_it].accessions;
// Put the accessions of the indist. groups into the subsuming
// ambiguity group
ambiguity_grp.accessions.insert(ambiguity_grp.accessions.end(),
accessions.begin(),
accessions.end());
if (targets_first && indist_prot_grp_td_[*grp_it].first)
{
if (statistics_)
{
OPENMS_LOG_DEBUG << "Group: ";
for (const String& s : origin_groups[*grp_it].accessions)
{
OPENMS_LOG_DEBUG << s << ", ";
}
OPENMS_LOG_DEBUG << " steals " << indist_prot_grp_to_pep_[*grp_it].size() << " peptides for itself.\n";
}
// Update all the peptides the current best point to
for (set<Size>::iterator pepid_it =
indist_prot_grp_to_pep_[*grp_it].begin();
pepid_it != indist_prot_grp_to_pep_[*grp_it].end(); ++pepid_it)
{
vector<PeptideHit> pep_id_hits = peptides[*pepid_it].getHits();
vector<PeptideEvidence> best_hit_ev =
pep_id_hits[0].getPeptideEvidences();
// go through all the evidence of this peptide and remove all
// proteins but the ones from the current indist. group
for (vector<PeptideEvidence>::iterator pepev_it = best_hit_ev.begin();
pepev_it != best_hit_ev.end();
//don't increase index, will be done by case
)
{
// if its accession is not in the current best group, remove evidence
if (find(accessions.begin(),
accessions.end(),
pepev_it->getProteinAccession()) == accessions.end())
{
// we get valid iterator from erase with shifted objects
pepev_it = best_hit_ev.erase(pepev_it);
// also erase from the mapping of this class
indist_prot_grp_to_pep_[prot_acc_to_indist_prot_grp_[pepev_it->getProteinAccession()]].erase(*pepid_it);
}
else
{ // iterate further
++pepev_it;
}
}
// Set the remaining evidences as new evidence
pep_id_hits[0].setPeptideEvidences(best_hit_ev);
peptides[*pepid_it].setHits(pep_id_hits);
}
}
if (targets_first) // we need a second run with only decoys to resolve potential remaining peptides
{
for (set<Size>::iterator grp_it = conn_comp.prot_grp_indices.begin();
grp_it != conn_comp.prot_grp_indices.end();
++grp_it)
{
if (*grp_it >= origin_groups.size())
{
OPENMS_LOG_FATAL_ERROR << "Something went terribly wrong. "
"Group with index " << *grp_it << "doesn't exist. "
" ProteinPeptideResolution: Groups changed"
" after building data structures.\n";
}
vector<String> accessions = origin_groups[*grp_it].accessions;
// Put the accessions of the indist. groups into the subsuming
// ambiguity group
ambiguity_grp.accessions.insert(ambiguity_grp.accessions.end(),
accessions.begin(),
accessions.end());
if (!indist_prot_grp_td_[*grp_it].first)
{
if (statistics_)
{
OPENMS_LOG_DEBUG << "Group: ";
for (const String& s : origin_groups[*grp_it].accessions)
{
OPENMS_LOG_DEBUG << s << ", ";
}
OPENMS_LOG_DEBUG << " steals " << indist_prot_grp_to_pep_[*grp_it].size() << " peptides for itself.\n";
}
// Update all the peptides the current best point to
for (set<Size>::iterator pepid_it =
indist_prot_grp_to_pep_[*grp_it].begin();
pepid_it != indist_prot_grp_to_pep_[*grp_it].end(); ++pepid_it)
{
vector<PeptideHit> pep_id_hits = peptides[*pepid_it].getHits();
vector<PeptideEvidence> best_hit_ev =
pep_id_hits[0].getPeptideEvidences();
// go through all the evidence of this peptide and remove all
// proteins but the ones from the current indist. group
for (vector<PeptideEvidence>::iterator pepev_it = best_hit_ev.begin();
pepev_it != best_hit_ev.end();
//don't increase index, will be done by case
)
{
// if its accession is not in the current best group, remove evidence
if (find(accessions.begin(),
accessions.end(),
pepev_it->getProteinAccession()) == accessions.end())
{
// we get valid iterator from erase with shifted objects
pepev_it = best_hit_ev.erase(pepev_it);
// also erase from the mapping of this class
indist_prot_grp_to_pep_[prot_acc_to_indist_prot_grp_[pepev_it->getProteinAccession()]].erase(*pepid_it);
}
else
{ // iterate further
++pepev_it;
}
}
// Set the remaining evidences as new evidence
pep_id_hits[0].setPeptideEvidences(best_hit_ev);
peptides[*pepid_it].setHits(pep_id_hits);
}
}
}
}
}
//Finally insert ambiguity group
protein.insertProteinGroup(ambiguity_grp);
}*/
void PeptideProteinResolution::resolveConnectedComponent(
ConnectedComponent& conn_comp,
ProteinIdentification& protein,
PeptideIdentificationList& peptides)
{
// TODO think about ignoring decoy proteins (at least when resolving ties!)
// Nothing to resolve in a singleton group (will not be added to output though)
if (conn_comp.prot_grp_indices.size() <= 1) return;
// Add proteins from a connected component to ambiguity groups
ProteinIdentification::ProteinGroup ambiguity_grp;
vector<ProteinIdentification::ProteinGroup>& origin_groups = protein.getIndistinguishableProteins();
// Save the max probability in this component to add it (should be first one, since groups were sorted and
// lower index means higher score and set is sorted by index)
size_t best_grp_index = *conn_comp.prot_grp_indices.begin();
ambiguity_grp.probability = origin_groups[best_grp_index].probability;
// copy group indices so we can reorder them for tie resolution
vector<Size> prot_grp_indices(conn_comp.prot_grp_indices.begin(), conn_comp.prot_grp_indices.end());
// groups are currently only sorted by probability.
// in the presence of ties we need to resolve them by the number of peptides.
std::sort(prot_grp_indices.begin(), prot_grp_indices.end(),
[&](const Size & a, const Size & b) -> bool
{
size_t as = indist_prot_grp_to_pep_[a].size();
size_t bs = indist_prot_grp_to_pep_[b].size();
return std::tie(origin_groups[a].probability, as) > std::tie(origin_groups[b].probability, bs);
});
for (vector<Size>::iterator grp_it = prot_grp_indices.begin();
grp_it != prot_grp_indices.end();
++grp_it)
{
if (*grp_it >= origin_groups.size())
{
OPENMS_LOG_FATAL_ERROR << "Something went terribly wrong. "
<< "Group with index " << *grp_it << "doesn't exist. "
<< " ProteinPeptideResolution: Groups changed"
<< " after building data structures.\n";
}
const vector<String>& accessions = origin_groups[*grp_it].accessions;
// Put the accessions of the indist. groups into the subsuming
// ambiguity group
ambiguity_grp.accessions.insert(ambiguity_grp.accessions.end(),
accessions.begin(),
accessions.end());
if (statistics_)
{
OPENMS_LOG_DEBUG << "Group: ";
for (const String& s : accessions)
{
OPENMS_LOG_DEBUG << s << ", ";
}
OPENMS_LOG_DEBUG << " steals " << indist_prot_grp_to_pep_[*grp_it].size() << " peptides for itself.\n";
}
// Update all the peptides the current best point to
for (set<Size>::iterator pepid_it = indist_prot_grp_to_pep_[*grp_it].begin();
pepid_it != indist_prot_grp_to_pep_[*grp_it].end(); ++pepid_it)
{
vector<PeptideHit> pep_id_hits = peptides[*pepid_it].getHits();
vector<PeptideEvidence> best_hit_ev =
pep_id_hits[0].getPeptideEvidences();
// Go through all _remaining_ proteins of the component and remove this
// peptide from their mapping
vector<Size>::iterator grp_it_cont = grp_it;
++grp_it_cont;
for (; grp_it_cont != prot_grp_indices.end();
++grp_it_cont)
{
indist_prot_grp_to_pep_[*grp_it_cont].erase(*pepid_it);
}
// go through all the evidence of this peptide and remove all
// proteins but the ones from the current indist. group
for (vector<PeptideEvidence>::iterator pepev_it = best_hit_ev.begin();
pepev_it != best_hit_ev.end();
//don't increase index, will be done by case
)
{
// if its accession is not in the current best group, remove evidence
if (find(accessions.begin(),
accessions.end(),
pepev_it->getProteinAccession()) == accessions.end())
{
// we get valid iterator from erase with shifted objects
pepev_it = best_hit_ev.erase(pepev_it);
}
else
{ // iterate further
++pepev_it;
}
}
// Set the remaining evidences as new evidence
pep_id_hits[0].setPeptideEvidences(best_hit_ev);
peptides[*pepid_it].setHits(pep_id_hits);
}
}
//Finally insert ambiguity group
protein.insertProteinGroup(ambiguity_grp);
}
void PeptideProteinResolution::run(vector<ProteinIdentification>& inferred_protein_ids,
PeptideIdentificationList& inferred_peptide_ids)
{
PeptideProteinResolution ppr;
ppr.buildGraph(inferred_protein_ids[0], inferred_peptide_ids);
ppr.resolveGraph(inferred_protein_ids[0], inferred_peptide_ids);
IDFilter::removeUnreferencedProteins(inferred_protein_ids, inferred_peptide_ids);
IDFilter::updateProteinGroups(inferred_protein_ids[0].getIndistinguishableProteins(), inferred_protein_ids[0].getHits());
IDFilter::updateProteinGroups(inferred_protein_ids[0].getProteinGroups(), inferred_protein_ids[0].getHits());
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/SimpleSearchEngineAlgorithm.cpp | .cpp | 32,135 | 736 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Timo Sachsenberg $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/SimpleSearchEngineAlgorithm.h>
#include <OpenMS/ANALYSIS/ID/PeptideIndexing.h>
#include <OpenMS/ANALYSIS/ID/HyperScore.h>
#include <OpenMS/CHEMISTRY/DecoyGenerator.h>
#include <OpenMS/CHEMISTRY/ModificationsDB.h>
#include <OpenMS/CHEMISTRY/ProteaseDB.h>
#include <OpenMS/CHEMISTRY/ResidueModification.h>
#include <OpenMS/CHEMISTRY/TheoreticalSpectrumGenerator.h>
#include <OpenMS/COMPARISON/SpectrumAlignment.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/CONCEPT/VersionInfo.h>
#include <OpenMS/DATASTRUCTURES/Param.h>
#include <OpenMS/DATASTRUCTURES/StringView.h>
#include <OpenMS/PROCESSING/DEISOTOPING/Deisotoper.h>
#include <OpenMS/PROCESSING/ID/IDFilter.h>
#include <OpenMS/PROCESSING/FILTERING/NLargest.h>
#include <OpenMS/PROCESSING/SCALING/Normalizer.h>
#include <OpenMS/PROCESSING/FILTERING/ThresholdMower.h>
#include <OpenMS/PROCESSING/FILTERING/WindowMower.h>
#include <OpenMS/FORMAT/FASTAFile.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/KERNEL/MSSpectrum.h>
#include <OpenMS/KERNEL/Peak1D.h>
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/MATH/MathFunctions.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <OpenMS/METADATA/SpectrumSettings.h>
#include <algorithm>
#include <map>
#ifdef _OPENMP
#include <omp.h>
#endif
using namespace std;
namespace OpenMS
{
SimpleSearchEngineAlgorithm::SimpleSearchEngineAlgorithm() :
DefaultParamHandler("SimpleSearchEngineAlgorithm"),
ProgressLogger()
{
defaults_.setValue("precursor:mass_tolerance", 10.0, "+/- tolerance for precursor mass.");
std::vector<std::string> precursor_mass_tolerance_unit_valid_strings;
precursor_mass_tolerance_unit_valid_strings.emplace_back("ppm");
precursor_mass_tolerance_unit_valid_strings.emplace_back("Da");
defaults_.setValue("precursor:mass_tolerance_unit", "ppm", "Unit of precursor mass tolerance +/- to the theoretical one.");
defaults_.setValidStrings("precursor:mass_tolerance_unit", precursor_mass_tolerance_unit_valid_strings);
defaults_.setValue("precursor:min_charge", 2, "Minimum precursor charge to be considered.");
defaults_.setValue("precursor:max_charge", 5, "Maximum precursor charge to be considered.");
defaults_.setSectionDescription("precursor", "Precursor (Parent Ion) Options");
// consider one before annotated monoisotopic peak and the annotated one
IntList isotopes = {0, 1};
defaults_.setValue("precursor:isotopes", isotopes, "Corrects for mono-isotopic peak misassignments. (E.g.: 1 = prec. may be misassigned to first isotopic peak)");
defaults_.setValue("fragment:mass_tolerance", 10.0, "Fragment mass tolerance +/- to the theoretical one");
std::vector<std::string> fragment_mass_tolerance_unit_valid_strings;
fragment_mass_tolerance_unit_valid_strings.emplace_back("ppm");
fragment_mass_tolerance_unit_valid_strings.emplace_back("Da");
defaults_.setValue("fragment:mass_tolerance_unit", "ppm", "Unit of fragment m");
defaults_.setValidStrings("fragment:mass_tolerance_unit", fragment_mass_tolerance_unit_valid_strings);
defaults_.setSectionDescription("fragment", "Fragments (Product Ion) Options");
vector<String> all_mods;
ModificationsDB::getInstance()->getAllSearchModifications(all_mods);
defaults_.setValue("modifications:fixed", std::vector<std::string>{"Carbamidomethyl (C)"}, "Fixed modifications, specified using UniMod (www.unimod.org) terms, e.g. 'Carbamidomethyl (C)'");
defaults_.setValidStrings("modifications:fixed", ListUtils::create<std::string>(all_mods));
defaults_.setValue("modifications:variable", std::vector<std::string>{"Oxidation (M)"}, "Variable modifications, specified using UniMod (www.unimod.org) terms, e.g. 'Oxidation (M)'");
defaults_.setValidStrings("modifications:variable", ListUtils::create<std::string>(all_mods));
defaults_.setValue("modifications:variable_max_per_peptide", 2, "Maximum number of residues carrying a variable modification per candidate peptide");
defaults_.setSectionDescription("modifications", "Modifications Options");
vector<String> all_enzymes;
ProteaseDB::getInstance()->getAllNames(all_enzymes);
defaults_.setValue("enzyme", "Trypsin", "The enzyme used for peptide digestion.");
defaults_.setValidStrings("enzyme", ListUtils::create<std::string>(all_enzymes));
defaults_.setValue("decoys", "false", "Should decoys be generated?");
defaults_.setValidStrings("decoys", {"true","false"} );
defaults_.setValue("annotate:PSM", std::vector<std::string>{"ALL"}, "Annotations added to each PSM.");
defaults_.setValidStrings("annotate:PSM",
std::vector<std::string>{
"ALL",
Constants::UserParam::FRAGMENT_ERROR_MEDIAN_PPM_USERPARAM,
Constants::UserParam::PRECURSOR_ERROR_PPM_USERPARAM,
Constants::UserParam::MATCHED_PREFIX_IONS_FRACTION,
Constants::UserParam::MATCHED_SUFFIX_IONS_FRACTION}
);
defaults_.setSectionDescription("annotate", "Annotation Options");
defaults_.setValue("peptide:min_size", 7, "Minimum size a peptide must have after digestion to be considered in the search.");
defaults_.setValue("peptide:max_size", 40, "Maximum size a peptide must have after digestion to be considered in the search (0 = disabled).");
defaults_.setValue("peptide:missed_cleavages", 1, "Number of missed cleavages.");
defaults_.setValue("peptide:motif", "", "If set, only peptides that contain this motif (provided as RegEx) will be considered.");
defaults_.setSectionDescription("peptide", "Peptide Options");
defaults_.setValue("report:top_hits", 1, "Maximum number of top scoring hits per spectrum that are reported.");
defaults_.setSectionDescription("report", "Reporting Options");
defaultsToParam_();
}
void SimpleSearchEngineAlgorithm::updateMembers_()
{
precursor_mass_tolerance_ = param_.getValue("precursor:mass_tolerance");
precursor_mass_tolerance_unit_ = param_.getValue("precursor:mass_tolerance_unit").toString();
precursor_min_charge_ = param_.getValue("precursor:min_charge");
precursor_max_charge_ = param_.getValue("precursor:max_charge");
precursor_isotopes_ = param_.getValue("precursor:isotopes");
fragment_mass_tolerance_ = param_.getValue("fragment:mass_tolerance");
fragment_mass_tolerance_unit_ = param_.getValue("fragment:mass_tolerance_unit").toString();
modifications_fixed_ = ListUtils::toStringList<std::string>(param_.getValue("modifications:fixed"));
set<String> fixed_unique(modifications_fixed_.begin(), modifications_fixed_.end());
if (fixed_unique.size() != modifications_fixed_.size())
{
OPENMS_LOG_WARN << "Duplicate fixed modification provided. Making them unique." << endl;
modifications_fixed_.assign(fixed_unique.begin(), fixed_unique.end());
}
modifications_variable_ = ListUtils::toStringList<std::string>(param_.getValue("modifications:variable"));
set<String> var_unique(modifications_variable_.begin(), modifications_variable_.end());
if (var_unique.size() != modifications_variable_.size())
{
OPENMS_LOG_WARN << "Duplicate variable modification provided. Making them unique." << endl;
modifications_variable_.assign(var_unique.begin(), var_unique.end());
}
modifications_max_variable_mods_per_peptide_ = param_.getValue("modifications:variable_max_per_peptide");
enzyme_ = param_.getValue("enzyme").toString();
peptide_min_size_ = param_.getValue("peptide:min_size");
peptide_max_size_ = param_.getValue("peptide:max_size");
peptide_missed_cleavages_ = param_.getValue("peptide:missed_cleavages");
peptide_motif_ = param_.getValue("peptide:motif").toString();
report_top_hits_ = param_.getValue("report:top_hits");
decoys_ = param_.getValue("decoys") == "true";
annotate_psm_ = ListUtils::toStringList<std::string>(param_.getValue("annotate:PSM"));
}
// static
void SimpleSearchEngineAlgorithm::preprocessSpectra_(PeakMap& exp, double fragment_mass_tolerance, bool fragment_mass_tolerance_unit_ppm)
{
// filter MS2 map
// remove 0 intensities
ThresholdMower threshold_mower_filter;
threshold_mower_filter.filterPeakMap(exp);
Normalizer normalizer;
normalizer.filterPeakMap(exp);
// sort by rt
exp.sortSpectra(false);
// filter settings
WindowMower window_mower_filter;
Param filter_param = window_mower_filter.getParameters();
filter_param.setValue("windowsize", 100.0, "The size of the sliding window along the m/z axis.");
filter_param.setValue("peakcount", 20, "The number of peaks that should be kept.");
filter_param.setValue("movetype", "jump", "Whether sliding window (one peak steps) or jumping window (window size steps) should be used.");
window_mower_filter.setParameters(filter_param);
NLargest nlargest_filter = NLargest(400);
#pragma omp parallel for default(none) shared(exp, fragment_mass_tolerance, fragment_mass_tolerance_unit_ppm, window_mower_filter, nlargest_filter)
for (SignedSize exp_index = 0; exp_index < (SignedSize)exp.size(); ++exp_index)
{
// sort by mz
exp[exp_index].sortByPosition();
// deisotope
Deisotoper::deisotopeAndSingleCharge(exp[exp_index],
fragment_mass_tolerance, fragment_mass_tolerance_unit_ppm,
1, 3, // min / max charge
false, // keep only deisotoped
3, 10, // min / max isopeaks
true); // convert fragment m/z to mono-charge
// remove noise
window_mower_filter.filterPeakSpectrum(exp[exp_index]);
nlargest_filter.filterPeakSpectrum(exp[exp_index]);
// sort (nlargest changes order)
exp[exp_index].sortByPosition();
}
}
void SimpleSearchEngineAlgorithm::postProcessHits_(const PeakMap& exp,
std::vector<std::vector<SimpleSearchEngineAlgorithm::AnnotatedHit_> >& annotated_hits,
std::vector<ProteinIdentification>& protein_ids,
PeptideIdentificationList& peptide_ids,
Size top_hits,
const ModifiedPeptideGenerator::MapToResidueType& fixed_modifications,
const ModifiedPeptideGenerator::MapToResidueType& variable_modifications,
Size max_variable_mods_per_peptide,
const StringList& modifications_fixed,
const StringList& modifications_variable,
Int peptide_missed_cleavages,
double precursor_mass_tolerance,
double fragment_mass_tolerance,
const String& precursor_mass_tolerance_unit_ppm,
const String& fragment_mass_tolerance_unit_ppm,
const Int precursor_min_charge,
const Int precursor_max_charge,
const String& enzyme,
const String& database_name) const
{
// remove all but top n scoring
#pragma omp parallel for default(none) shared(annotated_hits, top_hits)
for (SignedSize scan_index = 0; scan_index < (SignedSize)annotated_hits.size(); ++scan_index)
{
// sort and keeps n best elements according to score
Size topn = top_hits > annotated_hits[scan_index].size() ? annotated_hits[scan_index].size() : top_hits;
std::partial_sort(annotated_hits[scan_index].begin(), annotated_hits[scan_index].begin() + topn, annotated_hits[scan_index].end(), AnnotatedHit_::hasBetterScore);
annotated_hits[scan_index].resize(topn);
annotated_hits.shrink_to_fit();
}
bool annotation_precursor_error_ppm = std::find(annotate_psm_.begin(), annotate_psm_.end(), Constants::UserParam::PRECURSOR_ERROR_PPM_USERPARAM) != annotate_psm_.end();
bool annotation_fragment_error_ppm = std::find(annotate_psm_.begin(), annotate_psm_.end(), Constants::UserParam::FRAGMENT_ERROR_MEDIAN_PPM_USERPARAM) != annotate_psm_.end();
bool annotation_prefix_fraction = std::find(annotate_psm_.begin(), annotate_psm_.end(), Constants::UserParam::MATCHED_PREFIX_IONS_FRACTION) != annotate_psm_.end();
bool annotation_suffix_fraction = std::find(annotate_psm_.begin(), annotate_psm_.end(), Constants::UserParam::MATCHED_SUFFIX_IONS_FRACTION) != annotate_psm_.end();
// "ALL" adds all annotations
if (std::find(annotate_psm_.begin(), annotate_psm_.end(), "ALL") != annotate_psm_.end())
{
annotation_precursor_error_ppm = true;
annotation_fragment_error_ppm = true;
annotation_prefix_fraction = true;
annotation_suffix_fraction = true;
}
#pragma omp parallel for
for (SignedSize scan_index = 0; scan_index < (SignedSize)annotated_hits.size(); ++scan_index)
{
if (!annotated_hits[scan_index].empty())
{
const MSSpectrum& spec = exp[scan_index];
// create empty PeptideIdentification object and fill meta data
PeptideIdentification pi{};
pi.setSpectrumReference( spec.getNativeID());
pi.setMetaValue("scan_index", static_cast<unsigned int>(scan_index));
pi.setScoreType("ln(hyperscore)");
pi.setHigherScoreBetter(true);
double mz = spec.getPrecursors()[0].getMZ();
pi.setRT(spec.getRT());
pi.setMZ(mz);
Size charge = spec.getPrecursors()[0].getCharge();
// create full peptide hit structure from annotated hits
vector<PeptideHit> phs;
for (const auto& ah : annotated_hits[scan_index])
{
PeptideHit ph;
ph.setCharge(charge);
ph.setMetaValue("isotope_error", ah.isotope_error);
// get unmodified string
AASequence aas = AASequence::fromString(ah.sequence.getString());
// reapply modifications (because for memory reasons we only stored the index and recreation is fast)
vector<AASequence> all_modified_peptides;
ModifiedPeptideGenerator::applyFixedModifications(fixed_modifications, aas);
ModifiedPeptideGenerator::applyVariableModifications(variable_modifications, aas, max_variable_mods_per_peptide, all_modified_peptides);
// reannotate much more memory heavy AASequence object
AASequence fixed_and_variable_modified_peptide = all_modified_peptides[ah.peptide_mod_index];
ph.setScore(ah.score);
ph.setSequence(fixed_and_variable_modified_peptide);
if (annotation_fragment_error_ppm)
{
TheoreticalSpectrumGenerator tsg;
vector<pair<Size, Size> > alignment;
MSSpectrum theoretical_spec;
tsg.getSpectrum(theoretical_spec, fixed_and_variable_modified_peptide, 1, std::min((int)charge - 1, 2));
SpectrumAlignment sa;
sa.getSpectrumAlignment(alignment, theoretical_spec, spec);
vector<double> err;
for (const auto& match : alignment)
{
double fragment_error = fabs(Math::getPPM(spec[match.second].getMZ(), theoretical_spec[match.first].getMZ()));
err.push_back(fragment_error);
}
double median_ppm_error(0);
if (!err.empty()) { median_ppm_error = Math::median(err.begin(), err.end(), false); }
ph.setMetaValue(Constants::UserParam::FRAGMENT_ERROR_MEDIAN_PPM_USERPARAM, median_ppm_error);
}
if (annotation_precursor_error_ppm)
{
double theo_mz = fixed_and_variable_modified_peptide.getMZ(charge);
double ppm_difference = Math::getPPM(mz - (double)ah.isotope_error * Constants::PROTON_MASS_U / (double)charge, theo_mz);
ph.setMetaValue(Constants::UserParam::PRECURSOR_ERROR_PPM_USERPARAM, ppm_difference);
}
if (annotation_prefix_fraction)
{
ph.setMetaValue(Constants::UserParam::MATCHED_PREFIX_IONS_FRACTION, ah.prefix_fraction);
}
if (annotation_suffix_fraction)
{
ph.setMetaValue(Constants::UserParam::MATCHED_SUFFIX_IONS_FRACTION, ah.suffix_fraction);
}
// store PSM
phs.push_back(ph);
}
pi.setHits(phs);
pi.sort();
#pragma omp critical (peptide_ids_access)
{
//clang-tidy: seems to be a false-positive in combination with omp
peptide_ids.push_back(std::move(pi));
}
}
}
#ifdef _OPENMP
// we need to sort the peptide_ids by scan_index in order to have the same output in the idXML-file
if (omp_get_max_threads() > 1)
{
std::sort(peptide_ids.begin(), peptide_ids.end(), [](const PeptideIdentification& a, const PeptideIdentification& b)
{
return a.getMetaValue("scan_index") < b.getMetaValue("scan_index");
});
}
#endif
// protein identifications (leave as is...)
protein_ids = vector<ProteinIdentification>(1);
protein_ids[0].setDateTime(DateTime::now());
protein_ids[0].setSearchEngine("SimpleSearchEngine");
protein_ids[0].setSearchEngineVersion(VersionInfo::getVersion());
DateTime now = DateTime::now();
String identifier("SSE_" + now.get());
protein_ids[0].setIdentifier(identifier);
for (auto & pid : peptide_ids) { pid.setIdentifier(identifier); }
ProteinIdentification::SearchParameters search_parameters;
search_parameters.db = database_name;
search_parameters.charges = String(precursor_min_charge) + ":" + String(precursor_max_charge);
ProteinIdentification::PeakMassType mass_type = ProteinIdentification::PeakMassType::MONOISOTOPIC;
search_parameters.mass_type = mass_type;
search_parameters.fixed_modifications = modifications_fixed;
search_parameters.variable_modifications = modifications_variable;
search_parameters.missed_cleavages = peptide_missed_cleavages;
search_parameters.fragment_mass_tolerance = fragment_mass_tolerance;
search_parameters.precursor_mass_tolerance = precursor_mass_tolerance;
search_parameters.precursor_mass_tolerance_ppm = precursor_mass_tolerance_unit_ppm == "ppm";
search_parameters.fragment_mass_tolerance_ppm = fragment_mass_tolerance_unit_ppm == "ppm";
search_parameters.digestion_enzyme = *ProteaseDB::getInstance()->getEnzyme(enzyme);
// add additional percolator features or post-processing
StringList feature_set{"score", "isotope_error"};
if (annotation_fragment_error_ppm) feature_set.push_back(Constants::UserParam::FRAGMENT_ERROR_MEDIAN_PPM_USERPARAM);
if (annotation_prefix_fraction) feature_set.push_back(Constants::UserParam::MATCHED_PREFIX_IONS_FRACTION);
if (annotation_suffix_fraction) feature_set.push_back(Constants::UserParam::MATCHED_SUFFIX_IONS_FRACTION);
search_parameters.setMetaValue("extra_features", ListUtils::concatenate(feature_set, ","));
search_parameters.enzyme_term_specificity = EnzymaticDigestion::SPEC_FULL;
protein_ids[0].setSearchParameters(std::move(search_parameters));
}
multimap<double, pair<Size, int>> mapPrecursorMassesToScans(
const Int min_precursor_charge,
const Int max_precursor_charge,
const IntList &precursor_isotopes,
const Size peptide_min_size,
const PeakMap & spectra)
{
multimap<double, pair<Size, int>> multimap_mass_2_scan_index;
for (MSExperiment::ConstIterator s_it = spectra.begin(); s_it != spectra.end(); ++s_it)
{
int scan_index = s_it - spectra.begin();
vector<Precursor> precursor = s_it->getPrecursors();
// there should only one precursor and MS2 should contain at least a few peaks to be considered (e.g. at least a peak for every AA in the peptide)
if (precursor.size() == 1 && s_it->size() >= peptide_min_size)
{
int precursor_charge = precursor[0].getCharge();
if (precursor_charge < min_precursor_charge
|| precursor_charge > max_precursor_charge)
{
continue;
}
double precursor_mz = precursor[0].getMZ();
// map (corrected) precursor mass to spectra
for (int i : precursor_isotopes)
{
double precursor_mass = (double) precursor_charge * precursor_mz - (double) precursor_charge * Constants::PROTON_MASS_U;
// corrected for monoisotopic misassignments of the precursor annotation
if (i != 0) { precursor_mass -= i * Constants::C13C12_MASSDIFF_U; }
multimap_mass_2_scan_index.insert(make_pair(precursor_mass, make_pair(scan_index, i)));
}
}
}
return multimap_mass_2_scan_index;
}
SimpleSearchEngineAlgorithm::ExitCodes SimpleSearchEngineAlgorithm::search(const String& in_mzML, const String& in_db, vector<ProteinIdentification>& protein_ids, PeptideIdentificationList& peptide_ids) const
{
boost::regex peptide_motif_regex(peptide_motif_);
bool precursor_mass_tolerance_unit_ppm = (precursor_mass_tolerance_unit_ == "ppm");
bool fragment_mass_tolerance_unit_ppm = (fragment_mass_tolerance_unit_ == "ppm");
ModifiedPeptideGenerator::MapToResidueType fixed_modifications = ModifiedPeptideGenerator::getModifications(modifications_fixed_);
ModifiedPeptideGenerator::MapToResidueType variable_modifications = ModifiedPeptideGenerator::getModifications(modifications_variable_);
// load MS2 map
PeakMap spectra;
FileHandler f;
//f.setLogType(log_type_);
PeakFileOptions options;
options.clearMSLevels();
options.addMSLevel(2);
f.getOptions() = options;
f.loadExperiment(in_mzML, spectra, {FileTypes::MZML});
spectra.sortSpectra(true);
startProgress(0, 1, "Filtering spectra...");
preprocessSpectra_(spectra, fragment_mass_tolerance_, fragment_mass_tolerance_unit_ppm);
endProgress();
// build multimap of precursor mass to scan index
auto multimap_mass_2_scan_index = mapPrecursorMassesToScans(precursor_min_charge_, precursor_max_charge_, precursor_isotopes_, peptide_min_size_, spectra);
// create spectrum generator
TheoreticalSpectrumGenerator spectrum_generator;
Param param(spectrum_generator.getParameters());
param.setValue("add_first_prefix_ion", "true");
param.setValue("add_metainfo", "true");
spectrum_generator.setParameters(param);
// preallocate storage for PSMs
vector<vector<AnnotatedHit_> > annotated_hits(spectra.size(), vector<AnnotatedHit_>());
for (auto & a : annotated_hits) { a.reserve(2 * report_top_hits_); }
#ifdef _OPENMP
// we want to do locking at the spectrum level so we get good parallelization
vector<omp_lock_t> annotated_hits_lock(annotated_hits.size());
for (size_t i = 0; i != annotated_hits_lock.size(); i++)
{
omp_init_lock(&(annotated_hits_lock[i]));
}
#endif
vector<FASTAFile::FASTAEntry> fasta_db;
FASTAFile().load(in_db, fasta_db);
// generate decoy protein sequences by reversing them
if (decoys_)
{
startProgress(0, 1, "Generate decoys...");
DecoyGenerator decoy_generator;
// append decoy proteins
const size_t old_size = fasta_db.size();
fasta_db.reserve(fasta_db.size() * 2);
for (size_t i = 0; i != old_size; ++i)
{
FASTAFile::FASTAEntry e = fasta_db[i];
e.sequence = decoy_generator.reversePeptides(AASequence::fromString(e.sequence), enzyme_).toString();
e.identifier = "DECOY_" + e.identifier;
fasta_db.push_back(std::move(e));
}
// randomize order of targets and decoys to introduce no global bias in the case that
// many targets have the same score as their decoy. (As we always take the first best scoring one)
Math::RandomShuffler shuffler;
shuffler.portable_random_shuffle(fasta_db.begin(), fasta_db.end());
endProgress();
}
ProteaseDigestion digestor;
digestor.setEnzyme(enzyme_);
digestor.setMissedCleavages(peptide_missed_cleavages_);
startProgress(0, fasta_db.size(), "Scoring peptide models against spectra...");
// lookup for processed peptides. must be defined outside of omp section and synchronized
set<StringView> processed_petides;
Size count_proteins(0), count_peptides(0);
#pragma omp parallel for schedule(static) default(none) shared(annotated_hits, spectrum_generator, multimap_mass_2_scan_index, fixed_modifications, variable_modifications, fasta_db, digestor, processed_petides, count_proteins, count_peptides, precursor_mass_tolerance_unit_ppm, fragment_mass_tolerance_unit_ppm, peptide_motif_regex, spectra, annotated_hits_lock)
for (SignedSize fasta_index = 0; fasta_index < (SignedSize)fasta_db.size(); ++fasta_index)
{
#pragma omp atomic
++count_proteins;
IF_MASTERTHREAD
{
setProgress(count_proteins);
}
vector<StringView> current_digest;
digestor.digestUnmodified(fasta_db[fasta_index].sequence, current_digest, peptide_min_size_, peptide_max_size_);
for (auto const & c : current_digest)
{
const String current_peptide = c.getString();
if (current_peptide.find_first_of("XBZ") != std::string::npos)
{
continue;
}
// if a peptide motif is provided skip all peptides without match
if (!peptide_motif_.empty() && !boost::regex_match(current_peptide, peptide_motif_regex))
{
continue;
}
bool already_processed = false;
#pragma omp critical (processed_peptides_access)
{
// peptide (and all modified variants) already processed so skip it
if (processed_petides.find(c) != processed_petides.end())
{
already_processed = true;
}
else
{
processed_petides.insert(c);
}
}
// skip peptides that have already been processed
if (already_processed) { continue; }
#pragma omp atomic
++count_peptides;
vector<AASequence> all_modified_peptides;
// this critical section is because ResidueDB is not thread safe and new residues are created based on the PTMs
#pragma omp critical (residuedb_access)
{
AASequence aas = AASequence::fromString(current_peptide);
ModifiedPeptideGenerator::applyFixedModifications(fixed_modifications, aas);
ModifiedPeptideGenerator::applyVariableModifications(variable_modifications, aas, modifications_max_variable_mods_per_peptide_, all_modified_peptides);
}
for (SignedSize mod_pep_idx = 0; mod_pep_idx < (SignedSize)all_modified_peptides.size(); ++mod_pep_idx)
{
const AASequence& candidate = all_modified_peptides[mod_pep_idx];
double current_peptide_mass = candidate.getMonoWeight();
// determine MS2 precursors that match to the current peptide mass
multimap<double, pair<Size, int>>::const_iterator low_it, up_it;
if (precursor_mass_tolerance_unit_ppm) // ppm
{
low_it = multimap_mass_2_scan_index.lower_bound(current_peptide_mass - current_peptide_mass * precursor_mass_tolerance_ * 1e-6);
up_it = multimap_mass_2_scan_index.upper_bound(current_peptide_mass + current_peptide_mass * precursor_mass_tolerance_ * 1e-6);
}
else // Dalton
{
low_it = multimap_mass_2_scan_index.lower_bound(current_peptide_mass - precursor_mass_tolerance_);
up_it = multimap_mass_2_scan_index.upper_bound(current_peptide_mass + precursor_mass_tolerance_);
}
// no matching precursor in data
if (low_it == up_it)
{
continue;
}
// create theoretical spectrum
PeakSpectrum theo_spectrum;
// add peaks for b and y ions with charge 1
spectrum_generator.getSpectrum(theo_spectrum, candidate, 1, 1);
// sort by mz
theo_spectrum.sortByPosition();
for (; low_it != up_it; ++low_it)
{
const Size scan_index = low_it->second.first;
const PeakSpectrum& exp_spectrum = spectra[scan_index];
// const int& charge = exp_spectrum.getPrecursors()[0].getCharge();
HyperScore::PSMDetail detail;
const double& score = HyperScore::computeWithDetail(fragment_mass_tolerance_, fragment_mass_tolerance_unit_ppm, exp_spectrum, theo_spectrum, detail);
if (score == 0)
{
continue; // no hit?
}
// add peptide hit
AnnotatedHit_ ah;
ah.sequence = c;
ah.peptide_mod_index = mod_pep_idx;
ah.score = score;
ah.prefix_fraction = (double)detail.matched_b_ions/(double)c.size();
ah.suffix_fraction = (double)detail.matched_y_ions/(double)c.size();
ah.mean_error = detail.mean_error;
ah.isotope_error = low_it->second.second;
#ifdef _OPENMP
omp_set_lock(&(annotated_hits_lock[scan_index]));
{
#endif
annotated_hits[scan_index].push_back(ah);
// prevent vector from growing indefinitely (memory) but don't shrink the vector every time
if (annotated_hits[scan_index].size() >= 2 * report_top_hits_)
{
std::partial_sort(annotated_hits[scan_index].begin(), annotated_hits[scan_index].begin() + report_top_hits_, annotated_hits[scan_index].end(), AnnotatedHit_::hasBetterScore);
annotated_hits[scan_index].resize(report_top_hits_);
}
#ifdef _OPENMP
}
omp_unset_lock(&(annotated_hits_lock[scan_index]));
#endif
}
}
}
}
endProgress();
OPENMS_LOG_INFO << "Proteins: " << count_proteins << endl;
OPENMS_LOG_INFO << "Peptides: " << count_peptides << endl;
OPENMS_LOG_INFO << "Processed peptides: " << processed_petides.size() << endl;
startProgress(0, 1, "Post-processing PSMs...");
SimpleSearchEngineAlgorithm::postProcessHits_(spectra,
annotated_hits,
protein_ids,
peptide_ids,
report_top_hits_,
fixed_modifications,
variable_modifications,
modifications_max_variable_mods_per_peptide_,
modifications_fixed_,
modifications_variable_,
peptide_missed_cleavages_,
precursor_mass_tolerance_,
fragment_mass_tolerance_,
precursor_mass_tolerance_unit_,
fragment_mass_tolerance_unit_,
precursor_min_charge_,
precursor_max_charge_,
enzyme_,
in_db
);
endProgress();
// add meta data on spectra file
protein_ids[0].setPrimaryMSRunPath({in_mzML}, spectra);
// reindex peptides to proteins
PeptideIndexing indexer;
Param param_pi = indexer.getParameters();
param_pi.setValue("decoy_string", "DECOY_");
param_pi.setValue("decoy_string_position", "prefix");
param_pi.setValue("enzyme:name", enzyme_);
param_pi.setValue("enzyme:specificity", "full");
param_pi.setValue("missing_decoy_action", "silent");
indexer.setParameters(param_pi);
PeptideIndexing::ExitCodes indexer_exit = indexer.run(fasta_db, protein_ids, peptide_ids);
if ((indexer_exit != PeptideIndexing::ExitCodes::EXECUTION_OK) &&
(indexer_exit != PeptideIndexing::ExitCodes::PEPTIDE_IDS_EMPTY))
{
if (indexer_exit == PeptideIndexing::ExitCodes::DATABASE_EMPTY)
{
return ExitCodes::INPUT_FILE_EMPTY;
}
else if (indexer_exit == PeptideIndexing::ExitCodes::UNEXPECTED_RESULT)
{
return ExitCodes::UNEXPECTED_RESULT;
}
else
{
return ExitCodes::UNKNOWN_ERROR;
}
}
#ifdef _OPENMP
// free locks
for (size_t i = 0; i != annotated_hits_lock.size(); i++)
{
omp_destroy_lock(&(annotated_hits_lock[i]));
}
#endif
return ExitCodes::EXECUTION_OK;
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/NUXL/NuXLParameterParsing.cpp | .cpp | 21,896 | 502 | // Copyright (c) 2002-present, The OpenMS Team -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Timo Sachsenberg $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/NUXL/NuXLParameterParsing.h>
#include <OpenMS/CHEMISTRY/ElementDB.h>
#include <OpenMS/CHEMISTRY/ResidueDB.h>
#include <OpenMS/CHEMISTRY/ResidueModification.h>
#include <OpenMS/CHEMISTRY/ModificationsDB.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <algorithm>
using namespace std;
namespace OpenMS
{
// extract all possible marker ions and make them unique by mass
std::vector<NuXLFragmentAdductDefinition> NuXLParameterParsing::getMarkerIonsMassSet(const NuXLParameterParsing::PrecursorsToMS2Adducts& pc2adducts)
{
std::vector<NuXLFragmentAdductDefinition> marker_ions_unique_by_mass;
for (const auto& p2a : pc2adducts)
{ // for all precursor adducts (e.g.: "UU-H2O") to all chemically feasible fragment adducts
for (const NuXLFragmentAdductDefinition& f : p2a.second.marker_ions)
{ // add all marker ions
marker_ions_unique_by_mass.push_back(f);
}
}
// NuXLFragmentAdductDefinition has formula, name and mass.
// We are only interested in unique masses for spectra generation so we need to make the.
auto less_by_mass = [](const NuXLFragmentAdductDefinition & a, const NuXLFragmentAdductDefinition & b) -> bool
{
return std::tie(a.mass, a.name) < std::tie(b.mass, b.name); // name required to break ties in case of same mass
};
auto equal_by_mass = [](const NuXLFragmentAdductDefinition & a, const NuXLFragmentAdductDefinition & b) -> bool
{
return std::fabs(a.mass - b.mass) < 1e-3;
};
sort(marker_ions_unique_by_mass.begin(), marker_ions_unique_by_mass.end(), less_by_mass);
marker_ions_unique_by_mass.erase(
unique(marker_ions_unique_by_mass.begin(), marker_ions_unique_by_mass.end(), equal_by_mass),
marker_ions_unique_by_mass.end());
return marker_ions_unique_by_mass;
}
NuXLParameterParsing::PrecursorsToMS2Adducts
NuXLParameterParsing::getAllFeasibleFragmentAdducts(
const NuXLModificationMassesResult & precursor_adducts,
const NuXLParameterParsing::NucleotideToFragmentAdductMap & nucleotide_to_fragment_adducts,
const set<char> & can_xl,
const bool always_add_default_marker_ions,
const bool default_marker_ions_RNA)
{
PrecursorsToMS2Adducts all_pc_all_feasible_adducts;
map<String, double> pc2mass;
map<String, String> pc2ef;
using PrecursorAdductMassAndMS2Fragments = pair<double, set<double>>;
using PrecursorAdductAndXLNucleotide = pair<String, char>;
map<PrecursorAdductMassAndMS2Fragments, vector<PrecursorAdductAndXLNucleotide>> mass_frags2pc_xlnuc;
// for all distinct precursor adduct formulas/masses
for (auto const & pa : precursor_adducts.formula2mass)
{
// get all precursor nucleotide formulas matching current empirical formula/mass
const String& ef = pa.first;
const double pc_mass = pa.second;
const auto& ambiguities = precursor_adducts.mod_combinations.at(ef);
if (ambiguities.size() >= 2)
{
OPENMS_LOG_DEBUG << ambiguities.size() << " nucleotide formulas are ambiguous on the level of empirical formula: " << endl;
for (auto const & pc_adduct : ambiguities) { OPENMS_LOG_DEBUG << pc_adduct << endl; }
}
// for each ambiguous (at the level of empirical formula) precursor adduct (stored as nucleotide formula e.g.: "AU-H2O")
for (const String & pc_adduct : ambiguities)
{
// calculate feasible fragment adducts and store them for lookup
const MS2AdductsOfSinglePrecursorAdduct& feasible_adducts = getFeasibleFragmentAdducts(pc_adduct,
ef,
nucleotide_to_fragment_adducts,
can_xl,
always_add_default_marker_ions,
default_marker_ions_RNA);
all_pc_all_feasible_adducts[pc_adduct] = feasible_adducts;
pc2mass[pc_adduct] = pc_mass;
pc2ef[pc_adduct] = ef;
}
}
// print feasible fragment adducts and marker ions
for (auto const & fa : all_pc_all_feasible_adducts)
{
const String & pc = fa.first;
OPENMS_LOG_DEBUG << "Precursor adduct: " << pc << "\t" << pc2ef[pc] << "\t" << pc2mass[pc] << "\n";
// collect set of masses to detect ambiguities
set<double> mi_fragments;
for (auto const & mis : fa.second.marker_ions) { mi_fragments.insert(mis.mass); }
for (auto const & ffa : fa.second.feasible_adducts)
{
const char & nucleotide = ffa.first;
set<double> fragments;
for (auto const & f : mi_fragments) { fragments.insert(f); } // copy over common marker ion masses
for (auto const & f : ffa.second) { fragments.insert(f.mass); } // copy over xl-ed nucleotide associated fragment masses
// we want to track if a certain MS1 mass and set of MS2 ions allow to distinguish
// (at least in theory) precursor adduct and cross-linked nucleotide
// to do so, we map mass and set of fragment ions to precursor adduct and cross-linked nucleotide
mass_frags2pc_xlnuc[{pc2mass[pc], fragments}].push_back({pc, nucleotide});
OPENMS_LOG_DEBUG << " Cross-linkable nucleotide '" << nucleotide << "' and feasible fragment adducts:" << endl;
for (auto const & a : ffa.second)
{
OPENMS_LOG_DEBUG << " " << a.name << "\t" << a.formula.toString() << "\t" << a.mass << "\n";
}
}
OPENMS_LOG_DEBUG << " Marker ions." << endl;
for (auto const & ffa : fa.second.marker_ions)
{
OPENMS_LOG_DEBUG << " " << ffa.name << "\t" << ffa.formula.toString() << "\t" << ffa.mass << "\n";
}
}
OPENMS_LOG_DEBUG << endl;
// report precursor adducts/cross-linked nucleotide combinations with same mass that produce the exact same MS2 ions as indistinguishable
// e.g., for nucleotides that only produce ribose fragments there might be a lot of overlap
for (auto const & f : mass_frags2pc_xlnuc)
{
if (f.second.size() > 1)
{
OPENMS_LOG_DEBUG << "Theoretical indistinguishable cross-link adducts detected: " << endl;
for (auto const & m : f.second)
{
OPENMS_LOG_DEBUG << "\tPrecursor: " << m.first << "\t and cross-linked nucleotide: " << m.second << endl;
for (auto const & fragment_mass : f.first.second)
{
OPENMS_LOG_DEBUG << "\tFragment mass: " << fragment_mass << endl;
}
}
}
}
return all_pc_all_feasible_adducts;
}
NuXLParameterParsing::NucleotideToFragmentAdductMap
NuXLParameterParsing::getTargetNucleotideToFragmentAdducts(StringList fragment_adducts)
{
NucleotideToFragmentAdductMap nucleotide_to_fragment_adducts;
std::sort(fragment_adducts.begin(), fragment_adducts.end());
for (String t : fragment_adducts)
{
t.removeWhitespaces();
EmpiricalFormula formula;
String name;
// format is: target_nucletide:formula;name
char target_nucleotide = t[0];
if (t[1] != ':')
{
OPENMS_LOG_WARN << "Missing ':'. Wrong format of fragment_adduct string: " << t << endl;
return NucleotideToFragmentAdductMap();
}
// remove target nucleotide and : character from t
t = t.substr(2);
// split into formula and name
vector<String> fs;
t.split(";", fs);
if (fs.size() == 1) // no name provided so we just take the formula as name
{
formula = EmpiricalFormula(fs[0]);
name = formula.toString();
}
else if (fs.size() == 2)
{
formula = EmpiricalFormula(fs[0]);
name = fs[1];
}
else
{
OPENMS_LOG_WARN << "Wrong format of fragment_adduct string: " << t << endl;
return NucleotideToFragmentAdductMap();
}
NuXLFragmentAdductDefinition fad;
fad.name = name;
fad.formula = formula;
fad.mass = formula.getMonoWeight();
nucleotide_to_fragment_adducts[target_nucleotide].insert(fad);
// register all fragment adducts as N- and C-terminal modification (if not already registered)
if (!ModificationsDB::getInstance()->has(name))
{
std::unique_ptr<ResidueModification> c_term{new ResidueModification()};
c_term->setId(name);
c_term->setName(name);
c_term->setFullId(name + " (C-term)");
c_term->setTermSpecificity(ResidueModification::C_TERM);
c_term->setDiffMonoMass(fad.mass);
ModificationsDB::getInstance()->addModification(std::move(c_term));
std::unique_ptr<ResidueModification> n_term{new ResidueModification()};
n_term->setId(name);
n_term->setName(name);
n_term->setFullId(name + " (N-term)");
n_term->setTermSpecificity(ResidueModification::N_TERM);
n_term->setDiffMonoMass(fad.mass);
ModificationsDB::getInstance()->addModification(std::move(n_term));
}
}
#ifdef DEBUG_OpenNuXL
for (auto const & p2fas : precursor_to_fragment_adducts)
{
for (auto const & p2fa : p2fas.second)
{
cout << "nucleotide:" << p2fas.first
<< " fragment adduct:" << p2fa.formula.toString()
<< " fragment adduct mass:" << p2fa.mass
<< " name:" << p2fa.name << endl;
}
}
#endif
return nucleotide_to_fragment_adducts;
}
MS2AdductsOfSinglePrecursorAdduct
NuXLParameterParsing::getFeasibleFragmentAdducts(const String &exp_pc_adduct,
const String &exp_pc_formula,
const NuXLParameterParsing::NucleotideToFragmentAdductMap &nucleotide_to_fragment_adducts,
const set<char> &can_xl,
const bool always_add_default_marker_ions,
const bool default_marker_ions_RNA)
{
OPENMS_LOG_DEBUG << "Generating fragment adducts for precursor adduct: '" << exp_pc_adduct << "'" << endl;
MS2AdductsOfSinglePrecursorAdduct ret;
// no precursor adduct?
if (exp_pc_formula.empty()) { return ret; } // no fragment adducts or marker ions are expected!
// count nucleotides in precursor adduct (e.g.: "TCA-H2O" yields map: T->1, C->1, A->1)
// and determine the set of cross-linkable nucleotides in the precursor adduct
size_t nt_count(0);
set<char> exp_pc_xl_nts; // the cross-linkable nucleotides in the precursor adduct
map<char, Size> exp_pc_nucleotide_count; // all nucleotides in the precursor adduct (e.g., used to determine marker ions)
{
for (String::const_iterator exp_pc_it = exp_pc_adduct.begin(); exp_pc_it != exp_pc_adduct.end(); ++exp_pc_it, ++nt_count)
{
// we are finished with nucleotides in string if first loss/gain is encountered
if (*exp_pc_it == '+' || *exp_pc_it == '-') break;
// count occurence of nucleotide
if (exp_pc_nucleotide_count.count(*exp_pc_it) == 0)
{
exp_pc_nucleotide_count[*exp_pc_it] = 1;
if (can_xl.count(*exp_pc_it)) { exp_pc_xl_nts.insert(*exp_pc_it); };
}
else
{
exp_pc_nucleotide_count[*exp_pc_it]++;
}
}
}
// check if at least one nucleotide present that can cross link
bool has_xl_nt = !exp_pc_xl_nts.empty();
OPENMS_LOG_DEBUG << "\t" << exp_pc_adduct << " has cross-linkable nucleotide (0 = false, 1 = true): " << has_xl_nt << endl;
// no cross-linkable nt contained in the precursor adduct? Return an empty fragment adduct definition set
if (!has_xl_nt) { return ret; }
// determine if there is a nucleotide/sugar/etc. that must be the cross-linked one
set<char> must_xl;
for (auto c : exp_pc_xl_nts) { if (c == 'd' || c == 'r') must_xl.insert(c); }
if (must_xl.size() >= 2)
{
OPENMS_LOG_WARN << "More than one nucleotide present that is marked as mandatory cross-linked ('d' or 'r')." << endl;
return ret;
}
else if (must_xl.size() == 1)
{
cout << "Mandatory cross-linking nt/sugar: " << *must_xl.begin() << " in precursor adduct: " << exp_pc_adduct << endl;
exp_pc_xl_nts = must_xl;
}
// else we have no mandatory cross-linked nts
///////////////////////////////////////////////////////////////////
// HERE: at least one cross-linkable nt present in precursor adduct
// extract loss string from precursor adduct (e.g.: "-H2O")
// String exp_pc_loss_string(exp_pc_it, exp_pc_adduct.end());
OPENMS_LOG_DEBUG << "\t" << exp_pc_adduct << " is monomer (1 = true, >1 = false): " << nt_count << endl;
// Handle the cases of monomer or oligo nucleotide bound to the precursor.
// This distinction is made because potential losses on the precursor only allows us to reduce the set of chemical feasible fragment adducts if they are on a monomer.
// In the case of an oligo we can't be sure if the cross-linked amino acid or any other in the oligo had the loss.
if (nt_count > 1) // No monomer? For every nucleotide that can be cross-linked: Create all fragment adducts without restriction by losses (no restriction as the loss could be on the other nts)
{
// for each nucleotide and potential set of fragment adducts
for (auto const & n2fa : nucleotide_to_fragment_adducts)
{
const char & nucleotide = n2fa.first; // the nucleotide without any associated loss
const set<NuXLFragmentAdductDefinition>& fragment_adducts = n2fa.second; // all potential fragment adducts that may arise from the unmodified nucleotide
// check if nucleotide is cross-linkable and part of the precursor adduct
if (exp_pc_xl_nts.find(nucleotide) != exp_pc_xl_nts.end())
{
OPENMS_LOG_DEBUG << "\t" << exp_pc_adduct << " found nucleotide: " << String(nucleotide) << " in precursor RNA." << endl;
OPENMS_LOG_DEBUG << "\t" << exp_pc_adduct << " nucleotide: " << String(nucleotide) << " has fragment_adducts: " << fragment_adducts.size() << endl;
// store feasible adducts associated with a cross-link with character nucleotide
vector<NuXLFragmentAdductDefinition> faa;
std::copy(fragment_adducts.begin(), fragment_adducts.end(), back_inserter(faa));
ret.feasible_adducts.emplace_back(make_pair(nucleotide, faa));
}
}
// Create set of marker ions for *all* nucleotides contained in the precursor adduct (including those that do not cross-link.)
// Note: The non-cross-linked nt in the precursor adduct are more likely to produce the marker ions (=more fragile than the cross-linked nt).
set<NuXLFragmentAdductDefinition> marker_ion_set;
for (auto const & n2fa : nucleotide_to_fragment_adducts)
{
const char & nucleotide = n2fa.first; // the nucleotide without any associated loss
if (exp_pc_nucleotide_count.find(nucleotide) != exp_pc_nucleotide_count.end())
{
marker_ion_set.insert(n2fa.second.begin(), n2fa.second.end());
}
}
std::move(std::begin(marker_ion_set), std::end(marker_ion_set), std::back_inserter(ret.marker_ions));
}
else // nt_count == 1: monomer. We need to check if the neutral loss reduces the set of feasible (e.g., chemically sound) fragment adducts
{
for (auto const & n2fa : nucleotide_to_fragment_adducts)
{
const char & nucleotide = n2fa.first; // one letter code of the nt
set<NuXLFragmentAdductDefinition> fas = n2fa.second; // all potential fragment adducts that may arise from NT assuming no loss on the precursor adduct
// check if nucleotide is cross-linkable and part of the precursor adduct
if (exp_pc_xl_nts.find(nucleotide) != exp_pc_xl_nts.end())
{
OPENMS_LOG_DEBUG << "\t" << exp_pc_adduct << " found nucleotide: " << String(nucleotide) << " in precursor NA adduct." << endl;
OPENMS_LOG_DEBUG << "\t" << exp_pc_adduct << " nucleotide: " << String(nucleotide) << " has fragment_adducts: " << fas.size() << endl;
// check chemical feasibility by checking if subtraction of adduct would result in negative elemental composition
for (auto it = fas.begin(); it != fas.end(); )
{
bool negative_elements = (EmpiricalFormula(exp_pc_formula) - it->formula).toString().hasSubstring("-");
if (negative_elements) // fragment adduct can't be subformula of precursor adduct
{
it = fas.erase(it);
}
else
{
++it; // STL erase idiom (mind the pre-increment)
}
}
if (fas.empty()) continue; // no feasible fragment adducts left? continue
// store feasible adducts associated with a cross-link with character nucleotide[0]
vector<NuXLFragmentAdductDefinition> faa;
std::copy(fas.begin(), fas.end(), back_inserter(faa));
ret.feasible_adducts.emplace_back(make_pair(nucleotide, faa));
// We only have one nucleotide in the precursor adduct (the cross-linked one)
// Note: marker ions of the cross-linked nucleotide are often missing or of very low intensity
std::copy(std::begin(fas), std::end(fas), std::back_inserter(ret.marker_ions));
}
}
}
// for chemical cross-linkers like DEB, fragments always carry DEB but marker ions might just be U or U' (and losses)
// In that case we need to ensure that the the default marker ions are added
if (always_add_default_marker_ions)
{
// Note: add the uncharged mass. Protons are added during spectrum generation.
if (default_marker_ions_RNA) // TODO: check if we can derive this from target nucleotides
{
ret.marker_ions.emplace_back(EmpiricalFormula("C9H13N2O9P1"), String("U"), EmpiricalFormula("C9H13N2O9P1").getMonoWeight());
ret.marker_ions.emplace_back(EmpiricalFormula("C9H14N3O8P"), "C", EmpiricalFormula("C9H14N3O8P").getMonoWeight());
ret.marker_ions.emplace_back(EmpiricalFormula("C10H14N5O8P"), "G", EmpiricalFormula("C10H14N5O8P").getMonoWeight());
ret.marker_ions.emplace_back(EmpiricalFormula("C10H14N5O7P"), "A", EmpiricalFormula("C10H14N5O7P").getMonoWeight());
ret.marker_ions.emplace_back(EmpiricalFormula("C4H4N2O2"), "U'", EmpiricalFormula("C4H4N2O2").getMonoWeight());
ret.marker_ions.emplace_back(EmpiricalFormula("C4H5N3O"), "C'", EmpiricalFormula("C4H5N3O").getMonoWeight());
ret.marker_ions.emplace_back(EmpiricalFormula("C5H5N5O"), "G'", EmpiricalFormula("C5H5N5O").getMonoWeight());
ret.marker_ions.emplace_back(EmpiricalFormula("C5H5N5"), "A'", EmpiricalFormula("C5H5N5").getMonoWeight());
}
else // DNA
{
ret.marker_ions.emplace_back(EmpiricalFormula("C10H15N2O8P"), "T", EmpiricalFormula("C10H15N2O8P").getMonoWeight());
ret.marker_ions.emplace_back(EmpiricalFormula("C9H14N3O7P"), "C", EmpiricalFormula("C9H14N3O7P").getMonoWeight());
ret.marker_ions.emplace_back(EmpiricalFormula("C10H14N5O7P"), "G", EmpiricalFormula("C10H14N5O7P").getMonoWeight());
ret.marker_ions.emplace_back(EmpiricalFormula("C10H14N5O6P"), "A", EmpiricalFormula("C10H14N5O6P").getMonoWeight());
ret.marker_ions.emplace_back(EmpiricalFormula("C5H6N2O2"), "T'", EmpiricalFormula("C5H6N2O2").getMonoWeight());
ret.marker_ions.emplace_back(EmpiricalFormula("C4H5N3O"), "C'", EmpiricalFormula("C4H5N3O").getMonoWeight());
ret.marker_ions.emplace_back(EmpiricalFormula("C5H5N5O"), "G'", EmpiricalFormula("C5H5N5O").getMonoWeight());
ret.marker_ions.emplace_back(EmpiricalFormula("C5H5N5"), "A'", EmpiricalFormula("C5H5N5").getMonoWeight());
}
}
// Because, e.g., ribose might be a feasible fragment of any nucleotide, we keep only one version
// Note: sort by formula and (as tie breaker) the name
std::sort(ret.marker_ions.begin(), ret.marker_ions.end(),
[](NuXLFragmentAdductDefinition const & a, NuXLFragmentAdductDefinition const & b)
{
const String as = a.formula.toString();
const String bs = b.formula.toString();
return std::tie(as, a.name) < std::tie(bs, b.name);
}
);
// Note: for uniqueness, we only rely on the formula (in case of tie: keeping the first = shortest name)
auto it = std::unique(ret.marker_ions.begin(), ret.marker_ions.end(),
[](NuXLFragmentAdductDefinition const & a, NuXLFragmentAdductDefinition const & b)
{
return a.formula == b.formula;
}
);
ret.marker_ions.resize(std::distance(ret.marker_ions.begin(), it));
// print feasible fragment adducts
for (auto const & ffa : ret.feasible_adducts)
{
const char & nucleotide = ffa.first;
OPENMS_LOG_DEBUG << " Cross-linkable nucleotide '" << nucleotide << "' and feasible fragment adducts:" << endl;
for (auto const & a : ffa.second)
{
OPENMS_LOG_DEBUG << "\t" << a.name << "\t" << a.formula.toString() << "\t" << a.mass << "\n";
}
}
// print marker ions
OPENMS_LOG_DEBUG << " Marker ions:" << endl;
for (auto const & a : ret.marker_ions)
{
OPENMS_LOG_DEBUG << "\t" << a.name << "\t" << a.formula.toString() << "\t" << a.mass << "\n";
}
return ret;
}
vector<ResidueModification> NuXLParameterParsing::getModifications(StringList modNames)
{
vector<ResidueModification> modifications;
// iterate over modification names and add to vector
for (String modification : modNames)
{
ResidueModification rm;
if (modification.hasSubstring(" (N-term)"))
{
modification.substitute(" (N-term)", "");
rm = *ModificationsDB::getInstance()->getModification(modification, "", ResidueModification::N_TERM);
}
else if (modification.hasSubstring(" (C-term)"))
{
modification.substitute(" (C-term)", "");
rm = *ModificationsDB::getInstance()->getModification(modification, "", ResidueModification::C_TERM);
}
else
{
rm = *ModificationsDB::getInstance()->getModification(modification);
}
modifications.push_back(rm);
}
return modifications;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/NUXL/NuXLReport.cpp | .cpp | 39,097 | 957 | // Copyright (c) 2002-present, The OpenMS Team -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Timo Sachsenberg $
// --------------------------------------------------------------------------
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/ANALYSIS/NUXL/NuXLReport.h>
#include <OpenMS/MATH/MathFunctions.h>
#include <boost/range/adaptor/reversed.hpp>
#include <OpenMS/ANALYSIS/ID/IDBoostGraph.h>
using namespace std;
namespace OpenMS
{
using Internal::IDBoostGraph;
String NuXLReportRow::getString(const String& separator) const
{
StringList sl;
// rt mz
sl << String::number(rt, 3)
<< String::number(original_mz, 4);
// id if available
if (no_id)
{
for (Size i = 0; i != 12; ++i) sl << "";
}
else
{
sl << accessions
<< peptide
<< NA
<< String(charge)
<< String(score)
<< String(rank)
<< best_localization_score
<< localization_scores
<< best_localization
<< String::number(peptide_weight, 4) << String::number(NA_weight, 4)
<< String::number(peptide_weight + NA_weight, 4);
}
// write out meta value columns
for (const String& v : meta_values)
{
sl << v;
}
// marker ions
for (auto it = marker_ions.cbegin(); it != marker_ions.cend(); ++it)
{
for (Size i = 0; i != it->second.size(); ++i)
{
sl << String::number(it->second[i].second * 100.0, 2);
}
}
// id error and multiple charged mass
if (no_id)
{
for (Size i = 0; i != 7; ++i) sl << "";
}
else
{
// error
sl << String::number(abs_prec_error, 4)
<< String::number(rel_prec_error, 1);
// weight
sl << String::number(m_H, 4)
<< String::number(m_2H, 4)
<< String::number(m_3H, 4)
<< String::number(m_4H, 4);
sl << fragment_annotation;
}
return ListUtils::concatenate(sl, separator);
}
String NuXLReportRowHeader::getString(const String& separator, const StringList& meta_values_to_export)
{
StringList sl;
sl << "#RT"
<< "m/z"
<< "proteins"
<< "peptide"
<< "NA"
<< "charge"
<< "score"
<< "rank"
<< "best localization score"
<< "localization scores"
<< "best localization(s)"
<< "peptide weight"
<< "NA weight"
<< "cross-link weight";
for (const String& s : meta_values_to_export) sl << s;
// marker ion fields
NuXLMarkerIonExtractor::MarkerIonsType marker_ions = NuXLMarkerIonExtractor::extractMarkerIons(PeakSpectrum(), 0.0); // call only to generate header entries
for (auto const & ma : marker_ions)
{
for (Size i = 0; i != ma.second.size(); ++i)
{
sl << String(ma.first + "_" + ma.second[i].first);
}
}
sl << "abs prec. error Da"
<< "rel. prec. error ppm"
<< "M+H"
<< "M+2H"
<< "M+3H"
<< "M+4H"
<< Constants::UserParam::FRAGMENT_ANNOTATION_USERPARAM;
return ListUtils::concatenate(sl, separator);
}
std::vector<NuXLReportRow> NuXLReport::annotate(const PeakMap& spectra, PeptideIdentificationList& peptide_ids, const StringList& meta_values_to_export, double marker_ions_tolerance)
{
std::map<Size, Size> map_spectra_to_id;
for (Size i = 0; i != peptide_ids.size(); ++i)
{
OPENMS_PRECONDITION(!peptide_ids[i].getHits().empty(), "Error: no empty peptide ids allowed.");
Size scan_index = (unsigned int)peptide_ids[i].getMetaValue("scan_index");
map_spectra_to_id[scan_index] = i;
}
std::vector<NuXLReportRow> csv_rows;
for (PeakMap::ConstIterator s_it = spectra.begin(); s_it != spectra.end(); ++s_it)
{
int scan_index = s_it - spectra.begin();
std::vector<Precursor> precursor = s_it->getPrecursors();
// there should only one precursor and MS2 should contain at least a few peaks to be considered (e.g. at least for every AA in the peptide)
if (s_it->getMSLevel() == 2 && precursor.size() == 1)
{
Size charge = precursor[0].getCharge();
double mz = precursor[0].getMZ();
NuXLMarkerIonExtractor::MarkerIonsType marker_ions = NuXLMarkerIonExtractor::extractMarkerIons(*s_it, marker_ions_tolerance);
double rt = s_it->getRT();
NuXLReportRow row;
// case 1: no peptide identification: store rt, mz, charge and marker ion intensities
if (map_spectra_to_id.find(scan_index) == map_spectra_to_id.end())
{
row.no_id = true;
row.rt = rt;
row.original_mz = mz;
row.charge = charge;
row.marker_ions = marker_ions;
csv_rows.push_back(row);
continue;
}
PeptideIdentification& pi = peptide_ids[map_spectra_to_id[scan_index]];
std::vector<PeptideHit>& phs = pi.getHits();
// case 2: identification data present for spectrum
Size rank(0);
for (PeptideHit& ph : phs)
{
++rank;
for (const String& meta_key : meta_values_to_export)
{
row.meta_values.emplace_back(ph.getMetaValue(meta_key).toString());
}
PeptideHit::PeakAnnotation::writePeakAnnotationsString_(row.fragment_annotation, ph.getPeakAnnotations());
// total weight = precursor NA weight + peptide weight
// this ensures that sequences with additional reported partial loss match the total weight
// Note that the partial loss is only relevent on the MS2 and would otherwise be added to the totalweight
String sequence_string = ph.getSequence().toString();
const AASequence sequence = AASequence::fromString(sequence_string);
double peptide_weight = sequence.getMonoWeight();
String rna_name = ph.getMetaValue("NuXL:NA");
double rna_weight = ph.getMetaValue("NuXL:NA_MASS_z0");
int isotope_error = ph.getMetaValue("isotope_error");
// crosslink weight for different charge states
double weight_z1 = (peptide_weight + rna_weight + 1.0 * Constants::PROTON_MASS_U);
double weight_z2 = (peptide_weight + rna_weight + 2.0 * Constants::PROTON_MASS_U) / 2.0;
double weight_z3 = (peptide_weight + rna_weight + 3.0 * Constants::PROTON_MASS_U) / 3.0;
double weight_z4 = (peptide_weight + rna_weight + 4.0 * Constants::PROTON_MASS_U) / 4.0;
double xl_weight = peptide_weight + rna_weight;
double theo_mz = (xl_weight + static_cast<double>(charge) * Constants::PROTON_MASS_U) / (double)charge;
double corr_mz = mz - (double)isotope_error * Constants::PROTON_MASS_U / (double)charge;
double absolute_difference = theo_mz - corr_mz;
double ppm_difference = Math::getPPM(corr_mz, theo_mz);
String protein_accessions;
std::set<String> accs = ph.extractProteinAccessionsSet();
// concatenate set into String
for (std::set<String>::const_iterator a_it = accs.begin(); a_it != accs.end(); ++a_it)
{
if (a_it != accs.begin())
{
protein_accessions += ",";
}
protein_accessions += *a_it;
}
row.no_id = false;
row.rt = rt;
row.original_mz = mz;
row.accessions = protein_accessions;
row.NA = rna_name;
row.peptide = ph.getSequence().toString();
row.charge = charge;
row.score = ph.getScore();
row.peptide_weight = peptide_weight;
row.NA_weight = rna_weight;
row.xl_weight = peptide_weight + rna_weight;
row.rank = rank;
ph.setMetaValue("NuXL:peptide_mass_z0", DataValue(peptide_weight));
ph.setMetaValue("NuXL:xl_mass_z0", xl_weight);
for (NuXLMarkerIonExtractor::MarkerIonsType::const_iterator it = marker_ions.begin(); it != marker_ions.end(); ++it)
{
for (Size i = 0; i != it->second.size(); ++i)
{
ph.setMetaValue(it->first + "_" + it->second[i].first, static_cast<double>(it->second[i].second * 100.0));
}
}
row.marker_ions = marker_ions;
row.abs_prec_error = absolute_difference;
row.rel_prec_error = ppm_difference;
row.m_H = weight_z1;
row.m_2H = weight_z2;
row.m_3H = weight_z3;
row.m_4H = weight_z4;
if (ph.metaValueExists("NuXL:best_localization_score") &&
ph.metaValueExists("NuXL:localization_scores") &&
ph.metaValueExists("NuXL:best_localization"))
{
row.best_localization_score = ph.getMetaValue("NuXL:best_localization_score");
row.localization_scores = ph.getMetaValue("NuXL:localization_scores");
row.best_localization = ph.getMetaValue("NuXL:best_localization");;
}
ph.setMetaValue("NuXL:Da difference", (double)absolute_difference);
ph.setMetaValue(Constants::UserParam::PRECURSOR_ERROR_PPM_USERPARAM, (double)ppm_difference);
ph.setMetaValue("NuXL:z1 mass", (double)weight_z1);
ph.setMetaValue("NuXL:z2 mass", (double)weight_z2);
ph.setMetaValue("NuXL:z3 mass", (double)weight_z3);
ph.setMetaValue("NuXL:z4 mass", (double)weight_z4);
csv_rows.push_back(row);
}
}
}
return csv_rows;
}
// crosslink efficiency = frequency of the crosslinked amino acid / frequency of the amino acid in all crosslink spectrum matches
map<char, double> NuXLProteinReport::getCrossLinkEfficiency(const PeptideIdentificationList& peps)
{
map<char, double> aa_xl_freq;
map<char, double> aa_freq;
// store modification statistic for every protein
for (const PeptideIdentification& pep : peps)
{
auto& hits = pep.getHits();
if (hits.empty()) continue;
const PeptideHit& ph = hits[0]; // only consider top hit
if (ph.isDecoy() || ph.getMetaValue("NuXL:isXL") == "false") continue;
const int best_localization = ph.getMetaValue("NuXL:best_localization_position");
if (best_localization >= 0)
{
const AASequence& aas = ph.getSequence();
char c = aas.toUnmodifiedString()[best_localization];
aa_xl_freq[c] += 1;
for (char c : aas.toUnmodifiedString())
{
aa_freq[c] += 1;
}
}
}
double xl_sum{};
for (const auto& m : aa_xl_freq) { xl_sum += m.second; }
for (auto& m : aa_xl_freq) { m.second /= xl_sum; }
double aa_sum{};
for (const auto& m : aa_freq) { aa_sum += m.second; }
for (auto& m : aa_freq) { m.second /= aa_sum; }
for (auto& m : aa_xl_freq) {m.second /= aa_freq[m.first]; }
return aa_xl_freq;
}
// returns map of adduct to counts
map<String, size_t> NuXLProteinReport::countAdducts(const PeptideIdentificationList& peps)
{
map<String, size_t> adduct2count;
for (const PeptideIdentification& pep : peps)
{
auto& hits = pep.getHits();
if (hits.empty()) continue;
const PeptideHit& ph = hits[0]; // only consider top hit
const String NA = ph.getMetaValue("NuXL:NA", String("none"));
adduct2count[NA] += 1;
}
return adduct2count;
}
/*
Output format:
+----------------------+-----+--------+---------+------+--------------------------+---------------------+---------------------------+-----------------------+------------------------+------------------------------+------------------------------+-------------------------------+------------------------------+---------------------------+---------------------------+----------------------------------+----------------------------------+---------------+-------------------------+
| accession | AA | pos. | start | end | adducts (loc. + unique) | NT (loc. + unique) | charges (loc. + unique) | CSMs (loc. + unique) | CSMs (loc. + shared) | precursors (loc. + unique) | precursors (loc. + shared) | adducts (\wo loc. + unique) | charges (\wo loc. + unique) | CSMs (\wo loc. + unique) | CSMs (\wo loc. + shared) | precursors (\wo loc. + unique) | precursors (\wo loc. + shared) | ambiguities | peptide | peptide-XL q-value
+----------------------+-----+--------+---------+------+--------------------------+---------------------+---------------------------+-----------------------+------------------------+------------------------------+------------------------------+-------------------------------+------------------------------+---------------------------+---------------------------+----------------------------------+----------------------------------+---------------+-------------------------+
| sp|P19338|NUCL_HUMAN | P | 302 | 297 | 317 | U | U | 3 | 1 | 0 | 1 | 0 | | | 0 | 0 | 0 | 0 | | VEGTEPTTAFNLFVGNLNFNK | 0.0
| sp|P19338|NUCL_HUMAN | F | 309 | 297 | 317 | U,U-H2O1 | U | 2,3 | 3 | 0 | 3 | 0 | | | 0 | 0 | 0 | 0 | | VEGTEPTTAFNLFVGNLNFNK | 0.0
+----------------------+-----+--------+---------+------+--------------------------+---------------------+---------------------------+-----------------------+------------------------+------------------------------+------------------------------+-------------------------------+------------------------------+---------------------------+---------------------------+----------------------------------+----------------------------------+---------------+-------------------------+
*/
struct AALevelLocalization
{
struct LocalizedXL
{
std::string adduct;
std::string NT;
int charge = 0;
};
// Note protein accession and position are stored in ProteinReportEntry
string AA;
// note: we use a vector as entries might be duplicated and we need to obtain proper counts
std::map<std::string, vector<LocalizedXL>> peptide2XL; // observed peptide -> adduct,NA,charge tuples
};
struct RegionLevelLocalization
{
struct UnlocalizedXL
{
std::string adduct;
int charge = 0;
};
// note: we use a vector as entries might be duplicated and we need to obtain proper counts
std::map<std::string, vector<UnlocalizedXL>> peptide2unlocalizedXL;
};
// all localization information for protein accession
struct ProteinReport
{
String sequence; //< the protein sequence
size_t CSMs_of_shared_peptides = 0; // XL spectral count of shared peptides
size_t CSMs_of_unique_peptides = 0; // XL spectral count of unique peptides
map<size_t, AALevelLocalization> aa_level_localization; // position in protein to loc info
map<pair<size_t, size_t>, RegionLevelLocalization> region_level_localization;
};
// all proteins
using ProteinsReport = map<std::string, ProteinReport>; //< protein accession to details
std::unordered_map<String, double> peptide_seq2XLFDR;
ProteinsReport getProteinReportEntries(
// vector<ProteinIdentification>& prot_ids,
const PeptideIdentificationList& peps,
const map<String, ProteinHit*>& acc2protein_targets,
const std::map<string, set<string>>& peptide2proteins
)
{
ProteinsReport report; // map accession to reporting data
// go through all CSMs and collect information for individual XL sites
for (const PeptideIdentification& pep : peps)
{
auto& hits = pep.getHits();
if (hits.empty()) continue;
const PeptideHit& ph = hits[0]; // only consider top hit
const int best_localization = ph.getMetaValue("NuXL:best_localization_position");
const String& NA = ph.getMetaValue("NuXL:NA"); // adduct
const String& NT = ph.getMetaValue("NuXL:NT"); // XLed nucleotide
const int charge = ph.getCharge();
const AASequence& peptide_sequence = ph.getSequence();
// get mapping of peptide sequence to protein(s)
const std::vector<PeptideEvidence>& ph_evidences = ph.getPeptideEvidences();
const std::string peptide_sequence_string = peptide_sequence.toUnmodifiedString();
// the peptide-level FDR in the group of cross-linked peptides
double peptide_XL_level_qvalue = (double)ph.getMetaValue(Constants::UserParam::PEPTIDE_Q_VALUE, 0.0);
peptide_seq2XLFDR[peptide_sequence_string] = peptide_XL_level_qvalue;
// loop over all target proteins the peptide maps to
const std::set<std::string> proteins = peptide2proteins.at(peptide_sequence_string);
const bool is_unique = proteins.size() == 1;
for (const auto& acc : proteins)
{
// add basic protein information first time we encounter a protein accession
int protein_length{};
std::string protein_sequence;
if (auto it = report.find(acc); it == report.end())
{
auto& protein_sequence = acc2protein_targets.at(acc)->getSequence();
ProteinReport pe;
pe.sequence = protein_sequence;
report.emplace(acc, pe); // add to report
protein_length = protein_sequence.size();
}
else
{
protein_length = (int)it->second.sequence.size();
}
// TODO: potentially could save that loop. Doesn't handle peptides mapping twice into same protein (but different position)
const PeptideEvidence& ph_evidence = *find_if(ph_evidences.begin(), ph_evidences.end(), [&acc] (const PeptideEvidence& e)
{ return e.getProteinAccession() == acc; } );
const int peptide_start_in_protein = ph_evidence.getStart();
if (peptide_start_in_protein < 0) continue; // TODO: can this happen?
if (best_localization >= 0)
{ // XL was localized
// calculate position in protein
int xl_pos_in_protein = peptide_start_in_protein + best_localization;
if (xl_pos_in_protein >= protein_length) continue; // TODO: can this happen?
// create basic site information for this protein
if (auto it = report[acc].aa_level_localization.find(xl_pos_in_protein);
it == report[acc].aa_level_localization.end())
{
auto& protein_sequence = acc2protein_targets.at(acc)->getSequence();
report[acc].aa_level_localization[xl_pos_in_protein].AA = protein_sequence[xl_pos_in_protein];
}
AALevelLocalization::LocalizedXL xl;
xl.adduct = NA;
xl.NT = NT;
xl.charge = charge;
report[acc].aa_level_localization[xl_pos_in_protein].peptide2XL[peptide_sequence_string].push_back(xl);
}
else
{ // not localized? annotate region
RegionLevelLocalization::UnlocalizedXL xl;
xl.adduct = NA;
xl.charge = charge;
int start = ph_evidence.getStart();
int end = ph_evidence.getEnd();
report[acc].region_level_localization[{start, end}].peptide2unlocalizedXL[peptide_sequence_string].push_back(xl);
}
if (is_unique)
{
report[acc].CSMs_of_unique_peptides++; // count CSM (localized and unlocalized) of unique peptides
}
else
{
report[acc].CSMs_of_shared_peptides++; // count CSM (localized and unlocalized)
}
}
}
return report;
}
set<string> printXLSiteDetails(
TextFile& tsv_file,
const std::string& accession,
size_t position,
const AALevelLocalization& aa_loc,
map<string, vector<RegionLevelLocalization::UnlocalizedXL>>& peptides2unlocalizedXL,
std::map<std::string, std::set<std::string>>& peptide2proteins,
map<string, map<string, set<pair<size_t, size_t>>>>& peptides2proteins2regions,
map<string, set<string>>& protein2proteins)
{
set<string> printed_peptides;
// one row per localized peptide
const string line_start = accession + "\t" + aa_loc.AA + "\t" + String(position) + "\t";
for (const auto& [peptide, localizedXLs] : aa_loc.peptide2XL)
{
printed_peptides.insert(peptide);
// protein, AA, position
String l = line_start;
// TODO: handle all set entries (e.g., if peptide maps multiple times in same protein)
// start and end of peptide in protein
string region = String(peptides2proteins2regions[peptide][accession].begin()->first) + "\t" + String(peptides2proteins2regions[peptide][accession].begin()->second) +"\t";
l += region;
bool is_unique = peptide2proteins[peptide].size() == 1;
vector<RegionLevelLocalization::UnlocalizedXL>* unlocalized = nullptr;
if (auto it = peptides2unlocalizedXL.find(peptide); it != peptides2unlocalizedXL.end())
{
unlocalized = &(it->second);
}
// condense information down
set<string> adduct_set, nt_set, charge_set;
set<string> unique_peptidoforms, shared_peptidoforms; // XLs that differ in either adduct, nt or charge
size_t unique_localized_CSM_count{};
size_t shared_localized_CSM_count{}; // shared
for (const auto& xls : localizedXLs)
{
adduct_set.insert(xls.adduct);
nt_set.insert(xls.NT);
charge_set.insert(String(xls.charge));
if (is_unique)
{
unique_localized_CSM_count++;
unique_peptidoforms.insert(xls.adduct + xls.NT + String(xls.charge));
}
else
{
shared_localized_CSM_count++;
shared_peptidoforms.insert(xls.adduct + xls.NT + String(xls.charge));
}
}
// peptide was found but not localized
set<string> unlocalized_adduct_set, unlocalized_charge_set;
set<string> unique_unlocalized_peptidoforms, shared_unlocalized_peptidoforms; // XLs that differ in either adduct, nt or charge
size_t unique_unlocalized_CSM_count{};
size_t shared_unlocalized_CSM_count{};
if (unlocalized != nullptr)
{
for (const auto& xls : *unlocalized)
{
unlocalized_adduct_set.insert(xls.adduct);
unlocalized_charge_set.insert(String(xls.charge));
if (is_unique)
{
unique_unlocalized_CSM_count++;
unique_unlocalized_peptidoforms.insert(xls.adduct + String(xls.charge));
}
else
{
shared_unlocalized_CSM_count++;
shared_unlocalized_peptidoforms.insert(xls.adduct + String(xls.charge));
}
}
}
// print adducts, nucleotides and charge sets
l += ListUtils::concatenate(adduct_set, ",") + "\t";
l += ListUtils::concatenate(nt_set, ",") + "\t";
l += ListUtils::concatenate(charge_set, ",") + "\t";
l += String(unique_localized_CSM_count) + "\t";
l += String(shared_localized_CSM_count) + "\t";
l += String(unique_peptidoforms.size()) + "\t"; // peptide counts
l += String(shared_peptidoforms.size()) + "\t";
// print adducts, nucleotides and charge sets of unlocalized peptides
l += ListUtils::concatenate(unlocalized_adduct_set, ",") + "\t";
l += ListUtils::concatenate(unlocalized_charge_set, ",") + "\t";
l += String(unique_unlocalized_CSM_count) + "\t";
l += String(shared_unlocalized_CSM_count) + "\t";
l += String(unique_unlocalized_peptidoforms.size()) + "\t"; // peptide counts
l += String(shared_unlocalized_peptidoforms.size()) + "\t";
// create string with other proteins
auto ambiguities = peptide2proteins[peptide];
protein2proteins[accession].insert(ambiguities.begin(), ambiguities.end()); // note: add same protein to group as well
ambiguities.erase(accession);
l += ListUtils::concatenate(ambiguities, ",") + "\t";
// add peptide sequence and sequence level FDR (in the group of XLs)
l += peptide + "\t";
l += peptide_seq2XLFDR[peptide];
tsv_file.addLine(l);
}
return printed_peptides;
}
void printXLRegionDetails(
TextFile& tsv_file,
const std::string& accession,
const RegionLevelLocalization& region_loc,
const set<string>& remaining_peptides,
std::map<std::string, std::set<std::string>>& peptide2proteins,
map<string, map<string, set<pair<size_t, size_t>>>>& peptides2proteins2regions,
map<string, set<string>>& protein2proteins
)
{
// one row per unlocalized peptide
for (const auto& [peptide, unlocalizedXLs] : region_loc.peptide2unlocalizedXL)
{
if (remaining_peptides.find(peptide) == remaining_peptides.end()) continue;
// protein, AA, position
String l = accession + "\t-\t-\t";
// TODO: handle all set entries (e.g., if peptide maps multiple times in same protein)
string region = String(peptides2proteins2regions[peptide][accession].begin()->first) + "\t" + String(peptides2proteins2regions[peptide][accession].begin()->second);
l += region + "\t";
bool is_unique = peptide2proteins[peptide].size() == 1;
// peptide was found but not localized
set<string> unlocalized_adduct_set, unlocalized_charge_set;
set<string> unique_unlocalized_peptidoforms, shared_unlocalized_peptidoforms; // XLs that differ in either adduct, nt or charge
size_t unique_unlocalized_CSM_count{};
size_t shared_unlocalized_CSM_count{};
for (const auto& xls : unlocalizedXLs)
{
unlocalized_adduct_set.insert(xls.adduct);
unlocalized_charge_set.insert(String(xls.charge));
if (is_unique)
{
unique_unlocalized_CSM_count++;
unique_unlocalized_peptidoforms.insert(xls.adduct + String(xls.charge));
}
else
{
shared_unlocalized_CSM_count++;
shared_unlocalized_peptidoforms.insert(xls.adduct + String(xls.charge));
}
}
// print adducts, nucleotides and charge sets
l += "-\t-\t-\t0\t0\t0\t0\t";
// print adducts, nucleotides and charge sets of unlocalized peptides
l += ListUtils::concatenate(unlocalized_adduct_set, ",") + "\t";
l += ListUtils::concatenate(unlocalized_charge_set, ",") + "\t";
l += String(unique_unlocalized_CSM_count) + "\t";
l += String(shared_unlocalized_CSM_count)+ "\t";
l += String(unique_unlocalized_peptidoforms.size()) + "\t"; // peptide counts
l += String(shared_unlocalized_peptidoforms.size()) + "\t";
// create string with other proteins
auto ambiguities = peptide2proteins[peptide];
protein2proteins[accession].insert(ambiguities.begin(), ambiguities.end()); // note: add same protein to group as well
ambiguities.erase(accession);
l += ListUtils::concatenate(ambiguities, ",") + "\t";
l += peptide + "\t"; // add peptide sequence
l += peptide_seq2XLFDR[peptide];
tsv_file.addLine(l);
}
}
// static
void NuXLProteinReport::mapAccessionToTDProteins(ProteinIdentification& prot_id, std::map<String, ProteinHit*>& acc2protein_targets, std::map<String, ProteinHit*>& acc2protein_decoys)
{
std::vector<ProteinHit>& proteins = prot_id.getHits();
for (ProteinHit& protein : proteins)
{
if (!protein.isDecoy())
{
acc2protein_targets[protein.getAccession()] = &protein;
}
else
{
acc2protein_decoys[protein.getAccession()] = &protein;
}
}
}
/*
map<string, set<string>> acc2grouptype = calculateXLGroupType(const ProteinsReport& report, const ProteinIdentification& prot_id)
{
map<string, string> a2g;
set<string> single_protein;
map<set<string>, size_t> indist;
size_t ind_index{};
for (const auto& pg : prot_id.getIndistinguishableProteins())
{
set<string> s(pg.accessions.begin(), pg.accessions.end());
if (auto it = indist.find(s); it == indist.end())
{
indist[s] = ind_index++;
}
}
for (const auto& [accession, pr] : report)
{
if (pr.CSMs_of_unique_peptides > 0) // at least one unique peptide -> protein has unique evidence for XL
{
a2g.insert("single protein");
}
else if (indist.find(accession) != indist.end())
{
a2g.insert("ind. protein group");
}
else
{
a2g.insert("gen. protein group");
}
}
return a2g;
}
*/
void NuXLProteinReport::annotateProteinModificationForTopHits(
vector<ProteinIdentification>& prot_ids,
const PeptideIdentificationList& peps,
TextFile& tsv_file)
{
assert(prot_ids.size() == 1); // support for one run only
// create lookup accession -> protein
ProteinIdentification& prot_id = prot_ids[0];
// create lookup accession -> protein
map<String, ProteinHit*> acc2protein_targets, acc2protein_decoys;
NuXLProteinReport::mapAccessionToTDProteins(prot_id, acc2protein_targets, acc2protein_decoys);
size_t CSMs_sum{}; // total number of XLed spectra
// map peptide sequence to protein(s)
std::map<std::string, std::set<std::string>> peptide2proteins;
std::map<std::string, std::set<std::string>> protein2peptides;
// lookup from peptide to its proteins and the region it maps to
map<string, map<string, set<pair<size_t, size_t>>>> peptides2proteins2regions;
for (const PeptideIdentification& pep : peps)
{
auto& hits = pep.getHits();
if (hits.empty()) continue;
const PeptideHit& ph = hits[0]; // only consider top hit
auto peptide_sequence = ph.getSequence().toUnmodifiedString();
const std::vector<PeptideEvidence>& ph_evidences = ph.getPeptideEvidences();
++CSMs_sum;
for (auto& ph_evidence : ph_evidences)
{
const String& acc = ph_evidence.getProteinAccession();
bool is_target = acc2protein_targets.find(acc) != acc2protein_targets.end();
if (!is_target) continue; // skip decoys
peptide2proteins[peptide_sequence].insert(acc);
protein2peptides[acc].insert(peptide_sequence);
peptides2proteins2regions[peptide_sequence][acc].insert({ph_evidence.getStart(), ph_evidence.getEnd()});
}
}
ProteinsReport r = getProteinReportEntries(peps, acc2protein_targets, peptide2proteins);
// copy to vector so we can sort
vector<pair<string, ProteinReport>> report;
copy(r.begin(), r.end(), back_inserter(report));
r.clear();
// sort report entries (largest number of XL PSM count first)
cout << "Sorting entries... " << endl;
std::sort(report.begin(), report.end(),
[](const pair<string, ProteinReport> & a, const pair<string, ProteinReport> & b) -> bool
{
return std::tie(a.second.CSMs_of_unique_peptides, a.second.CSMs_of_shared_peptides, a.first)
> std::tie(b.second.CSMs_of_unique_peptides, b.second.CSMs_of_shared_peptides, b.first);
});
// write to file
cout << "Writing " << report.size() << " proteins to tsv file... " << endl;
tsv_file.addLine(String("accession\tAA\tpos.\tstart\tend\t") +
"adducts (loc. + unique)\tNT (loc. + unique)\tcharges (loc. + unique)\t" +
"CSMs (loc. + unique)\tCSMs (loc. + shared)\tprecursors (loc. + unique)\tprecursors (loc. + shared)\t" +
"adducts (\\wo loc. + unique)\tcharges (\\wo loc. + unique)\t" +
"CSMs (\\wo loc. + unique)\tCSMs (\\wo loc. + shared)\tprecursors (\\wo loc. + unique)\tprecursors (\\wo loc. + shared)\t" +
"ambiguities\tpeptide\tq-value (peptide seq. level)"
);
map<string, set<string>> protein2proteins;
for (const auto& [accession, pr] : report)
{
// lookup to determine if and where the given peptide
// was identified without localization and where it maps to in the protein.
map<string, vector<RegionLevelLocalization::UnlocalizedXL>> peptides2unlocalizedXL;
for (const auto& [start_end, region_detail] : pr.region_level_localization)
{
for (const auto& [peptide, xls] : region_detail.peptide2unlocalizedXL)
{
peptides2unlocalizedXL[peptide] = xls;
}
}
// first write lines with site localizations
set<string> printed_peptides;
for (const auto& [position, aa_loc] : pr.aa_level_localization)
{
set<string> p = printXLSiteDetails(
tsv_file,
accession,
position,
aa_loc,
peptides2unlocalizedXL,
peptide2proteins,
peptides2proteins2regions,
protein2proteins
);
printed_peptides.insert(p.begin(), p.end());
}
// determine peptides/regions not yet printed (e.g., no site localization exists for those)
set<string> all_peptides = protein2peptides.at(accession);
set<string> remaining_peptides;
std::set_difference(all_peptides.begin(), all_peptides.end(),
printed_peptides.begin(), printed_peptides.end(),
std::inserter(remaining_peptides, remaining_peptides.end()));
// write remaining unlocalized peptides (=regions)
for (const auto& [region, region_loc] : pr.region_level_localization)
{
printXLRegionDetails(
tsv_file,
accession,
region_loc,
remaining_peptides,
peptide2proteins,
peptides2proteins2regions,
protein2proteins);
}
}
tsv_file.addLine("\n=============================================================");
tsv_file.addLine("Run summary:");
tsv_file.addLine("CSMs:\t" + String(CSMs_sum));
tsv_file.addLine("Proteins:\t" + String(report.size()));
tsv_file.addLine("\n=============================================================");
tsv_file.addLine("Protein summary:");
tsv_file.addLine("accession\tCSMs (unique pep.)\tCSMs (shared pep.)\tgroup type");
// calculate indistinguishable groups
vector<ProteinIdentification::ProteinGroup> ipg;
if (!prot_id.getHits().empty())
{
PeptideIdentificationList pep_copy{peps}; // TODO: why copy needed?
IDBoostGraph ibg{prot_id, pep_copy, true, false, false}; // only consider top hit
ibg.calculateAndAnnotateIndistProteins(false); // only indistinguishable protein groups
ipg = prot_id.getIndistinguishableProteins();
std::sort(std::begin(ipg), std::end(ipg));
}
// calculate proteins with unique XL peptide
set<string> accessionToUniquePeptides;
for (const PeptideIdentification& pep : peps)
{
auto& hits = pep.getHits();
if (hits.empty()) continue;
const PeptideHit& ph = hits[0]; // only consider top hit
const AASequence& peptide_sequence = ph.getSequence();
const std::string peptide_sequence_string = peptide_sequence.toUnmodifiedString();
const std::set<std::string>& proteins = peptide2proteins.at(peptide_sequence_string);
const bool is_unique = proteins.size() == 1;
if (is_unique)
{
accessionToUniquePeptides.insert(*proteins.begin());
}
}
map<string, ProteinIdentification::ProteinGroup*> accessionToIndistinguishableGroup;
for (auto& pg : ipg)
{
for (const auto& a : pg.accessions)
{
accessionToIndistinguishableGroup[a] = &pg;
}
}
// print single proteins and ind. protein groups
set<string> printed_ind_group;
for (const auto& [accession, pr] : report)
{
if (accessionToUniquePeptides.count(accession) == 1)
{ // protein with unique peptide
String group_type = "protein";
tsv_file.addLine(accession + "\t" + String(pr.CSMs_of_unique_peptides)
+ "\t" + String(pr.CSMs_of_shared_peptides)
+ "\t" + group_type);
}
else if (auto it = accessionToIndistinguishableGroup.find(accession);
it != accessionToIndistinguishableGroup.end())
{ // ind. protein group
if (printed_ind_group.count(accession) == 0)
{
const ProteinIdentification::ProteinGroup* pg = it->second;
String a = ListUtils::concatenate(pg->accessions, ",");
String group_type = "ind. protein group";
tsv_file.addLine(a + "\t" + String(pr.CSMs_of_unique_peptides)
+ "\t" + String(pr.CSMs_of_shared_peptides)
+ "\t" + group_type);
for (auto a : pg->accessions)
{ // mark all ind. protein members as already printed
printed_ind_group.insert(std::move(a));
}
}
}
else
{ // general protein groups
set<string> group_neighbors = protein2proteins[accession];
group_neighbors.erase(accession);
tsv_file.addLine(accession + "\t" + String(pr.CSMs_of_unique_peptides)
+ "\t" + String(pr.CSMs_of_shared_peptides)
+ "\tgen. protein group (shares XL peptides with: " + ListUtils::concatenate(group_neighbors, ",") + ")");
}
}
tsv_file.addLine("\n=============================================================");
tsv_file.addLine("Crosslink efficiency (AA freq. / AA freq. in all CSMs):");
auto aa_xl_freq = getCrossLinkEfficiency(peps);
for (auto& m : aa_xl_freq)
{
tsv_file.addLine(String(m.first) + "\t" + String(m.second));
}
tsv_file.addLine("\n=============================================================");
tsv_file.addLine("Precursor adduct summary:");
tsv_file.addLine("Precursor adduct:\tPSMs:\tPSMs(%)");
map<String, size_t> adduct2count = countAdducts(peps);
vector<pair<size_t, String>> count2adduct;
size_t total_psms{};
for (const auto& ac : adduct2count)
{
count2adduct.push_back({ac.second, ac.first});
total_psms += ac.second;
}
std::sort(count2adduct.begin(), count2adduct.end(),
[](const pair<size_t, String> & a, const pair<size_t, String> & b) -> bool
{
return std::tie(a.first, a.second) > std::tie(b.first, b.second);
});
for (const auto& ca : count2adduct)
{
tsv_file.addLine(ca.second + "\t" + String(ca.first) + "\t" + String(100.0 * (double)ca.first / (double)total_psms));
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/NUXL/NuXLFragmentIonGenerator.cpp | .cpp | 15,621 | 340 | // Copyright (c) 2002-present, The OpenMS Team -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Timo Sachsenberg $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/NUXL/NuXLFragmentIonGenerator.h>
#include <OpenMS/ANALYSIS/NUXL/NuXLFragmentAnnotationHelper.h>
#include <OpenMS/CHEMISTRY/ElementDB.h>
#include <OpenMS/CHEMISTRY/ResidueDB.h>
#include <OpenMS/CHEMISTRY/ResidueModification.h>
#include <OpenMS/CHEMISTRY/ModificationsDB.h>
#include <OpenMS/CONCEPT/LogStream.h>
using namespace std;
namespace OpenMS
{
void NuXLFragmentIonGenerator::addMS2MarkerIons(
const std::vector<NuXLFragmentAdductDefinition> &marker_ions,
PeakSpectrum &spectrum,
PeakSpectrum::IntegerDataArray &spectrum_charge,
PeakSpectrum::StringDataArray &spectrum_annotation)
{
for (auto const & m : marker_ions)
{
const double mz = m.mass + Constants::PROTON_MASS_U;
spectrum.emplace_back(mz, 1.0);
spectrum_charge.emplace_back(1);
spectrum_annotation.emplace_back(NuXLFragmentIonGenerator::ANNOTATIONS_MARKER_ION_PREFIX + m.name); // add name (e.g., MI:U-H2O)
}
}
void NuXLFragmentIonGenerator::addSpecialLysImmonumIons(
const String& unmodified_sequence,
PeakSpectrum &spectrum,
PeakSpectrum::IntegerDataArray &spectrum_charge,
PeakSpectrum::StringDataArray &spectrum_annotation)
{
if (unmodified_sequence.has('K'))
{
const double immonium_ion2_mz = EmpiricalFormula("C5H10N1").getMonoWeight();
// only add special ios if there is not already a peak
if (spectrum.findNearest(immonium_ion2_mz, 1e-4) == -1)
{
spectrum.emplace_back(immonium_ion2_mz, 1.0);
spectrum_charge.emplace_back(1);
spectrum_annotation.emplace_back(String("iK(C5H10N1)"));
}
// usually only observed without shift (A. Stuetzer)
const double immonium_ion3_mz = EmpiricalFormula("C6H13N2O").getMonoWeight();
// only add special ios if there is not already a peak
if (spectrum.findNearest(immonium_ion3_mz, 1e-4) == -1)
{
spectrum.emplace_back(immonium_ion3_mz, 1.0);
spectrum_charge.emplace_back(1);
spectrum_annotation.emplace_back(String("iK(C6H13N2O)"));
}
}
}
void NuXLFragmentIonGenerator::addShiftedImmoniumIons(const String &unmodified_sequence,
const String &fragment_shift_name,
const double fragment_shift_mass,
PeakSpectrum &partial_loss_spectrum,
PeakSpectrum::IntegerDataArray &partial_loss_spectrum_charge,
PeakSpectrum::StringDataArray &partial_loss_spectrum_annotation)
{
if (unmodified_sequence.hasSubstring("Y"))
{
const double immonium_ion_mz = EmpiricalFormula("C8H10NO").getMonoWeight() + fragment_shift_mass;
partial_loss_spectrum.emplace_back(immonium_ion_mz, 1.0);
partial_loss_spectrum_charge.emplace_back(1);
partial_loss_spectrum_annotation.emplace_back(NuXLFragmentAnnotationHelper::getAnnotatedImmoniumIon('Y', fragment_shift_name));
}
if (unmodified_sequence.hasSubstring("W"))
{
const double immonium_ion_mz = EmpiricalFormula("C10H11N2").getMonoWeight() + fragment_shift_mass;
partial_loss_spectrum.emplace_back(immonium_ion_mz, 1.0);
partial_loss_spectrum_charge.emplace_back(1);
partial_loss_spectrum_annotation.emplace_back(NuXLFragmentAnnotationHelper::getAnnotatedImmoniumIon('W', fragment_shift_name));
}
if (unmodified_sequence.hasSubstring("F"))
{
const double immonium_ion_mz = EmpiricalFormula("C8H10N").getMonoWeight() + fragment_shift_mass;
partial_loss_spectrum.emplace_back(immonium_ion_mz, 1.0);
partial_loss_spectrum_charge.emplace_back(1);
partial_loss_spectrum_annotation.emplace_back(NuXLFragmentAnnotationHelper::getAnnotatedImmoniumIon('F', fragment_shift_name));
}
if (unmodified_sequence.hasSubstring("H"))
{
const double immonium_ion_mz = EmpiricalFormula("C5H8N3").getMonoWeight() + fragment_shift_mass;
partial_loss_spectrum.emplace_back(immonium_ion_mz, 1.0);
partial_loss_spectrum_charge.emplace_back(1);
partial_loss_spectrum_annotation.emplace_back(NuXLFragmentAnnotationHelper::getAnnotatedImmoniumIon('H', fragment_shift_name));
}
if (unmodified_sequence.hasSubstring("C"))
{
const double immonium_ion_mz = EmpiricalFormula("C2H6NS").getMonoWeight() + fragment_shift_mass;
partial_loss_spectrum.emplace_back(immonium_ion_mz, 1.0);
partial_loss_spectrum_charge.emplace_back(1);
partial_loss_spectrum_annotation.emplace_back(NuXLFragmentAnnotationHelper::getAnnotatedImmoniumIon('C', fragment_shift_name));
}
if (unmodified_sequence.hasSubstring("P"))
{
const double immonium_ion_mz = EmpiricalFormula("C4H8N").getMonoWeight() + fragment_shift_mass;
partial_loss_spectrum.emplace_back(immonium_ion_mz, 1.0);
partial_loss_spectrum_charge.emplace_back(1);
partial_loss_spectrum_annotation.emplace_back(NuXLFragmentAnnotationHelper::getAnnotatedImmoniumIon('P', fragment_shift_name));
}
if (unmodified_sequence.hasSubstring("L") || unmodified_sequence.hasSubstring("I"))
{
const double immonium_ion_mz = EmpiricalFormula("C5H12N").getMonoWeight() + fragment_shift_mass;
partial_loss_spectrum.emplace_back(immonium_ion_mz, 1.0);
partial_loss_spectrum_charge.emplace_back(1);
partial_loss_spectrum_annotation.emplace_back(NuXLFragmentAnnotationHelper::getAnnotatedImmoniumIon('L', fragment_shift_name));
}
if (unmodified_sequence.hasSubstring("K"))
{
// classical immonium ion
const double immonium_ion_mz = EmpiricalFormula("C5H13N2").getMonoWeight() + fragment_shift_mass;
partial_loss_spectrum.emplace_back(immonium_ion_mz, 1.0);
partial_loss_spectrum_charge.emplace_back(1);
partial_loss_spectrum_annotation.emplace_back(NuXLFragmentAnnotationHelper::getAnnotatedImmoniumIon('K', fragment_shift_name));
// TODO: check if only DNA specific and if also other shifts are observed
// according to A. Stuetzer mainly observed with C‘-NH3 (94.0167 Da)
const double immonium_ion2_mz = EmpiricalFormula("C5H10N1").getMonoWeight() + fragment_shift_mass;
partial_loss_spectrum.emplace_back(immonium_ion2_mz, 1.0);
partial_loss_spectrum_charge.emplace_back(1);
partial_loss_spectrum_annotation.emplace_back(String("iK(C5H10N1)" + fragment_shift_name));
// usually only observed without shift (A. Stuetzer)
const double immonium_ion3_mz = EmpiricalFormula("C6H13N2O").getMonoWeight() + fragment_shift_mass;
partial_loss_spectrum.emplace_back(immonium_ion3_mz, 1.0);
partial_loss_spectrum_charge.emplace_back(1);
partial_loss_spectrum_annotation.emplace_back(String("iK(C6H13N2O)" + fragment_shift_name));
}
if (unmodified_sequence.hasSubstring("M"))
{
{
const double immonium_ion_mz = 104.05285 + fragment_shift_mass;
partial_loss_spectrum.emplace_back(immonium_ion_mz, 1.0);
partial_loss_spectrum_charge.emplace_back(1);
partial_loss_spectrum_annotation.emplace_back(NuXLFragmentAnnotationHelper::getAnnotatedImmoniumIon('M', fragment_shift_name));
}
{
const double immonium_ion_mz = EmpiricalFormula("CH5S").getMonoWeight() + fragment_shift_mass; // methionine related fragment
partial_loss_spectrum.emplace_back(immonium_ion_mz, 1.0);
partial_loss_spectrum_charge.emplace_back(1);
partial_loss_spectrum_annotation.emplace_back(NuXLFragmentAnnotationHelper::getAnnotatedImmoniumIon('M', fragment_shift_name));
}
}
}
/*
* Add peaks with shifts induced by the RNA/DNA:
* - Precursor with complete NA-oligo for charge 1..z
* - Partial shifts (without complete precursor adduct)
* - Add shifted immonium ions for charge 1 only
* and create shifted shifted b,y,a ions + precursors for charge 1..z (adding the unshifted version and performing the shift)
* based on the total_loss_spectrum provide to the method
*/
void NuXLFragmentIonGenerator::generatePartialLossSpectrum(const String &unmodified_sequence,
const double &fixed_and_variable_modified_peptide_weight,
const String &precursor_rna_adduct,
const double &precursor_rna_mass,
const int &precursor_charge,
const std::vector<NuXLFragmentAdductDefinition> &partial_loss_modification,
const PeakSpectrum& partial_loss_template_z1,
const PeakSpectrum& partial_loss_template_z2,
const PeakSpectrum& partial_loss_template_z3,
PeakSpectrum &partial_loss_spectrum)
{
partial_loss_spectrum.getIntegerDataArrays().resize(1);
PeakSpectrum::IntegerDataArray& partial_loss_spectrum_charge = partial_loss_spectrum.getIntegerDataArrays()[0];
partial_loss_spectrum.getStringDataArrays().resize(1); // annotation
PeakSpectrum::StringDataArray& partial_loss_spectrum_annotation = partial_loss_spectrum.getStringDataArrays()[0];
// for all observable MS2 adducts ...
for (Size i = 0; i != partial_loss_modification.size(); ++i)
{
// get name and mass of fragment adduct
const String& fragment_shift_name = partial_loss_modification[i].name; // e.g. U-H2O
const double fragment_shift_mass = partial_loss_modification[i].mass;
// ADD: shifted immonium ion peaks of charge 1 (if the amino acid is present in the sequence)
NuXLFragmentIonGenerator::addShiftedImmoniumIons(
unmodified_sequence,
fragment_shift_name,
fragment_shift_mass,
partial_loss_spectrum,
partial_loss_spectrum_charge,
partial_loss_spectrum_annotation);
// annotate generated a-,b-,y-ions with fragment shift name
PeakSpectrum shifted_series_peaks;
shifted_series_peaks.getStringDataArrays().resize(1); // annotation
shifted_series_peaks.getIntegerDataArrays().resize(1); // charge
PeakSpectrum::StringDataArray& shifted_series_annotations = shifted_series_peaks.getStringDataArrays()[0];
PeakSpectrum::IntegerDataArray& shifted_series_charges = shifted_series_peaks.getIntegerDataArrays()[0];
// For every charge state
for (int z = 1; z <= precursor_charge; ++z)
{
// 1. add shifted peaks
if (z == 1)
{
for (Size i = 0; i != partial_loss_template_z1.size(); ++i)
{
Peak1D p = partial_loss_template_z1[i];
p.setMZ(p.getMZ() + fragment_shift_mass);
shifted_series_peaks.push_back(p);
shifted_series_annotations.push_back(partial_loss_template_z1.getStringDataArrays()[0][i]);
shifted_series_charges.push_back(1);
}
}
else if (z == 2)
{
for (Size i = 0; i != partial_loss_template_z2.size(); ++i)
{
// currently, also contains z=1 precursor peaks which we aleardy added before
if (partial_loss_template_z2.getIntegerDataArrays()[0][i] == 2)
{
Peak1D p = partial_loss_template_z2[i];
p.setMZ(p.getMZ() + fragment_shift_mass / 2.0);
shifted_series_peaks.push_back(p);
shifted_series_annotations.push_back(partial_loss_template_z2.getStringDataArrays()[0][i]);
shifted_series_charges.push_back(2);
}
}
}
else if (z == 3)
{
for (Size i = 0; i != partial_loss_template_z3.size(); ++i)
{
// currently, also contains z=1 and 2 precursor peaks which we aleardy added before
if (partial_loss_template_z3.getIntegerDataArrays()[0][i] == 3)
{
Peak1D p = partial_loss_template_z3[i];
p.setMZ(p.getMZ() + fragment_shift_mass / 3.0);
shifted_series_peaks.push_back(p);
shifted_series_annotations.push_back(partial_loss_template_z3.getStringDataArrays()[0][i]);
shifted_series_charges.push_back(3);
}
}
}
else // don't consider fragment ions with charge >= 4
{
break;
}
}
// 2. add fragment shift name to annotation of shifted peaks
for (Size j = 0; j != shifted_series_annotations.size(); ++j)
{
shifted_series_annotations[j] += " " + fragment_shift_name;
}
// append shifted and annotated ion series to partial loss spectrum
partial_loss_spectrum.insert(partial_loss_spectrum.end(),
shifted_series_peaks.begin(), shifted_series_peaks.end());
// std::move strings during insert
partial_loss_spectrum_annotation.insert(
partial_loss_spectrum_annotation.end(),
make_move_iterator(shifted_series_annotations.begin()),
make_move_iterator(shifted_series_annotations.end())
);
partial_loss_spectrum.getIntegerDataArrays()[0].insert(
partial_loss_spectrum_charge.end(),
shifted_series_charges.begin(),
shifted_series_charges.end()
);
}
// ADD: (mainly for ETD) MS2 precursor peaks of the MS1 adduct (total RNA) carrying peptide for all z <= precursor charge
for (int charge = 1; charge <= static_cast<int>(precursor_charge); ++charge)
{
addPrecursorWithCompleteRNA_(fixed_and_variable_modified_peptide_weight,
precursor_rna_adduct,
precursor_rna_mass,
charge,
partial_loss_spectrum,
partial_loss_spectrum_charge,
partial_loss_spectrum_annotation);
}
partial_loss_spectrum.sortByPosition();
}
void NuXLFragmentIonGenerator::addPrecursorWithCompleteRNA_(
const double fixed_and_variable_modified_peptide_weight,
const String &precursor_rna_adduct,
const double precursor_rna_mass,
const int charge,
PeakSpectrum &partial_loss_spectrum,
MSSpectrum::IntegerDataArray &partial_loss_spectrum_charge,
MSSpectrum::StringDataArray &partial_loss_spectrum_annotation)
{
const double xl_mz = (fixed_and_variable_modified_peptide_weight + precursor_rna_mass +
static_cast<double>(charge) * Constants::PROTON_MASS_U)
/ static_cast<double>(charge);
// only add special ions if there is not already a peak
if (partial_loss_spectrum.findNearest(xl_mz, 1e-4) == -1)
{
partial_loss_spectrum.push_back(Peak1D(xl_mz, 1.0));
partial_loss_spectrum_charge.push_back(charge);
if (charge > 1)
{
partial_loss_spectrum_annotation.push_back(String("[M+")
+ String(charge) + "H+" + precursor_rna_adduct + "]");
}
else
{
partial_loss_spectrum_annotation.push_back(String("[M+H+")
+ precursor_rna_adduct + "]");
}
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/NUXL/NuXLMarkerIonExtractor.cpp | .cpp | 2,089 | 67 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Timo Sachsenberg $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/NUXL/NuXLMarkerIonExtractor.h>
#include <OpenMS/PROCESSING/SCALING/Normalizer.h>
using namespace std;
namespace OpenMS
{
NuXLMarkerIonExtractor::MarkerIonsType NuXLMarkerIonExtractor::extractMarkerIons(const PeakSpectrum& s, const double marker_tolerance)
{
MarkerIonsType marker_ions;
marker_ions["A"].push_back(make_pair(136.06231, 0.0));
marker_ions["A"].push_back(make_pair(330.06033, 0.0));
marker_ions["C"].push_back(make_pair(112.05108, 0.0));
marker_ions["C"].push_back(make_pair(306.04910, 0.0));
marker_ions["G"].push_back(make_pair(152.05723, 0.0));
marker_ions["G"].push_back(make_pair(346.05525, 0.0));
marker_ions["U"].push_back(make_pair(113.03509, 0.0));
marker_ions["U"].push_back(make_pair(307.03311, 0.0));
PeakSpectrum spec(s);
Normalizer normalizer;
normalizer.filterSpectrum(spec);
// for each nucleotide with marker ions
for (MarkerIonsType::iterator it = marker_ions.begin(); it != marker_ions.end(); ++it)
{
// for each marker ion of the current nucleotide
for (Size i = 0; i != it->second.size(); ++i)
{
double mz = it->second[i].first;
double max_intensity = 0;
for (PeakSpectrum::ConstIterator sit = spec.begin(); sit != spec.end(); ++sit)
{
if (sit->getMZ() + marker_tolerance < mz)
{
continue;
}
if (mz < sit->getMZ() - marker_tolerance)
{
break;
}
if (fabs(mz - sit->getMZ()) < marker_tolerance)
{
if (max_intensity < sit->getIntensity())
{
max_intensity = sit->getIntensity();
}
}
}
it->second[i].second = max_intensity;
}
}
return marker_ions;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/NUXL/NuXLFDR.cpp | .cpp | 10,182 | 287 | // Copyright (c) 2002-present, The OpenMS Team -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Timo Sachsenberg $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/NUXL/NuXLFDR.h>
#include <OpenMS/ANALYSIS/ID/FalseDiscoveryRate.h>
#include <OpenMS/PROCESSING/ID/IDFilter.h>
#include <OpenMS/FORMAT/IdXMLFile.h>
#include <OpenMS/FORMAT/TextFile.h>
#include <OpenMS/CONCEPT/Exception.h>
using namespace std;
namespace OpenMS
{
NuXLFDR::NuXLFDR(size_t report_top_hits):
report_top_hits_(report_top_hits)
{
}
void NuXLFDR::QValueAtPSMLevel(PeptideIdentificationList& peptide_ids) const
{
FalseDiscoveryRate fdr;
Param p = fdr.getParameters();
p.setValue("add_decoy_proteins", "true"); // we still want decoys in the result (e.g., to run percolator)
p.setValue("add_decoy_peptides", "true");
if (report_top_hits_ >= 2)
{
p.setValue("use_all_hits", "true");
}
fdr.setParameters(p);
fdr.apply(peptide_ids, true); // also calculate Constants::UserParam::PEPTIDE_Q_VALUE
}
void NuXLFDR::splitIntoPeptidesAndXLs(const PeptideIdentificationList& peptide_ids, PeptideIdentificationList& pep_pi, PeptideIdentificationList& xl_pi) const
{
pep_pi.clear();
xl_pi.clear();
for (const auto & pi : peptide_ids)
{
vector<PeptideHit> pep_ph, xl_ph;
for (const auto & ph : pi.getHits())
{
if (static_cast<int>(ph.getMetaValue("NuXL:isXL")) == 0)
{ // only add best hit
if (pep_ph.empty() && xl_ph.empty()) pep_ph.push_back(ph);
}
else
{
if (pep_ph.empty() && xl_ph.empty()) xl_ph.push_back(ph);
}
}
if (!pep_ph.empty()) { pep_pi.push_back(pi); pep_pi.back().setHits(pep_ph); }
if (!xl_ph.empty()) { xl_pi.push_back(pi); xl_pi.back().setHits(xl_ph); }
}
}
void NuXLFDR::mergePeptidesAndXLs(const PeptideIdentificationList& pep_pi, const PeptideIdentificationList& xl_pi, PeptideIdentificationList& peptide_ids) const
{
peptide_ids.clear();
map<String, size_t> native_id_to_id_index;
size_t id_index = 0;
for (const auto & pi : pep_pi)
{
peptide_ids.push_back(pi);
native_id_to_id_index[pi.getMetaValue("spectrum_reference")] = id_index;
++id_index;
}
for (const auto & pi : xl_pi)
{
if (native_id_to_id_index.find(pi.getMetaValue("spectrum_reference")) == native_id_to_id_index.end())
{
peptide_ids.push_back(pi);
}
else
{
// spectrum already identified
size_t index = native_id_to_id_index.at(pi.getMetaValue("spectrum_reference"));
auto hits = peptide_ids[index].getHits();
for (auto h : pi.getHits())
{
hits.push_back(h);
}
peptide_ids[index].setHits(hits);
peptide_ids[index].sort();
}
}
}
void NuXLFDR::calculatePeptideAndXLQValueAtPSMLevel(const PeptideIdentificationList& peptide_ids, PeptideIdentificationList& pep_pi, PeptideIdentificationList& xl_pi) const
{
FalseDiscoveryRate fdr;
Param p = fdr.getParameters();
p.setValue("add_decoy_proteins", "true"); // we still want decoys in the result (e.g., to run percolator)
p.setValue("add_decoy_peptides", "true");
if (report_top_hits_ >= 2)
{
p.setValue("use_all_hits", "true");
}
fdr.setParameters(p);
splitIntoPeptidesAndXLs(peptide_ids, pep_pi, xl_pi);
// calculate PSM and peptide FDRs separately
fdr.apply(xl_pi, true);
fdr.apply(pep_pi, true);
}
void NuXLFDR::calculatePeptideAndXLQValueAndFilterAtPSMLevel(
const vector<ProteinIdentification>& protein_ids,
const PeptideIdentificationList& peptide_ids,
PeptideIdentificationList& pep_pi,
double peptide_PSM_qvalue_threshold,
double peptide_peptide_qvalue_threshold,
PeptideIdentificationList& xl_pi,
vector<double> xl_PSM_qvalue_thresholds,
vector<double> xl_peptidelevel_qvalue_thresholds,
const String& out_idxml,
int decoy_factor) const
{
// calculate separate FDRs
calculatePeptideAndXLQValueAtPSMLevel(peptide_ids, pep_pi, xl_pi);
// correct for decoy factor
if (decoy_factor != 1)
{
for (auto & pi : xl_pi)
{
for (auto & p : pi.getHits())
{
p.setScore(p.getScore() / (double)decoy_factor);
}
}
}
// q-values might not be unique so we choose a second score to make them slightly different
bool svm_score_exists = false;
for (auto & pi : xl_pi)
{
if (svm_score_exists) break;
for (auto & p : pi.getHits())
{
if (p.metaValueExists("svm_score"))
{
svm_score_exists = true;
break;
}
}
}
// maximum value of the "main" score besides the q-value
double max_score = -1e10;
double min_score = 1e10;
for (auto & pi : xl_pi)
{
for (auto & p : pi.getHits())
{
if (svm_score_exists)
{
max_score = std::max(max_score, (double)p.getMetaValue("svm_score"));
min_score = std::min(min_score, (double)p.getMetaValue("svm_score"));
}
else
{
max_score = std::max(max_score, (double)p.getMetaValue("NuXL:score"));
min_score = std::min(min_score, (double)p.getMetaValue("NuXL:score"));
}
}
}
// add a very small value to q-value to break ties between same q-value but different main score
double score_range = max_score - min_score;
for (auto & pi : xl_pi)
{
for (auto & p : pi.getHits())
{
if (svm_score_exists)
{
double small_value = (1.0 - ((double)p.getMetaValue("svm_score") - min_score) / score_range) * 1e-5; // a high score will not or only slightly increase the q-value, lower scores will increase it more
p.setScore(p.getScore() + small_value);
}
else
{
double small_value = (1.0 - ((double)p.getMetaValue("NuXL:score") - min_score) / score_range) * 1e-5; // a high score will not or only slightly increase the q-value, lower scores will increase it more
p.setScore(p.getScore() + small_value);
}
}
}
// filter
IDFilter::removeDecoyHits(xl_pi);
IDFilter::removeDecoyHits(pep_pi);
// filter on peptide-level q-value
if (peptide_peptide_qvalue_threshold > 0.0 && peptide_peptide_qvalue_threshold < 1.0)
{
auto chechBadPeptideQValue = [&peptide_peptide_qvalue_threshold](PeptideHit& ph)->bool
{
return (double)ph.getMetaValue(Constants::UserParam::PEPTIDE_Q_VALUE) >= peptide_peptide_qvalue_threshold;
}; // of lambda
for (auto & pid : pep_pi)
{
vector<PeptideHit>& phs = pid.getHits();
phs.erase(remove_if(phs.begin(), phs.end(), chechBadPeptideQValue), phs.end());
}
}
// filter on PSM-level q-value
if (peptide_PSM_qvalue_threshold > 0.0 && peptide_PSM_qvalue_threshold < 1.0)
{
IDFilter::filterHitsByScore(pep_pi, peptide_PSM_qvalue_threshold);
}
// store peptide PSM result
{
vector<ProteinIdentification> tmp_prots = protein_ids;
IDFilter::removeUnreferencedProteins(tmp_prots, pep_pi);
IdXMLFile().store(out_idxml + String::number(peptide_PSM_qvalue_threshold, 4) + "_peptides.idXML", tmp_prots, pep_pi);
}
// treat disabled filtering as 100% FDR
std::replace(xl_PSM_qvalue_thresholds.begin(), xl_PSM_qvalue_thresholds.end(), 0.0, 1.0);
std::sort(xl_PSM_qvalue_thresholds.begin(), xl_PSM_qvalue_thresholds.end(), greater<double>()); // important: sort by threshold (descending) to generate results by applying increasingly stringent q-value filters
if (xl_PSM_qvalue_thresholds.size() != xl_peptidelevel_qvalue_thresholds.size())
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"q-value list for PSMs and peptides differ in size.",
String(xl_PSM_qvalue_thresholds.size()) + "!=" + String(xl_peptidelevel_qvalue_thresholds.size()));
}
for (Size i = 0; i != xl_PSM_qvalue_thresholds.size(); ++i)
{
double xlFDR = xl_PSM_qvalue_thresholds[i];
double xl_peptidelevel_FDR = xl_peptidelevel_qvalue_thresholds[i];
OPENMS_LOG_INFO << "Writing XL results at xl-FDR: " << xlFDR << endl;
// filter cross-links on peptide-level q-value
if (xl_peptidelevel_FDR > 0.0 && xl_peptidelevel_FDR < 1.0)
{
auto chechBadPeptideQValue = [&xl_peptidelevel_FDR](PeptideHit& ph)->bool
{
return (double)ph.getMetaValue(Constants::UserParam::PEPTIDE_Q_VALUE) >= xl_peptidelevel_FDR;
}; // of lambda
for (auto & pid : xl_pi)
{
vector<PeptideHit>& phs = pid.getHits();
phs.erase(remove_if(phs.begin(), phs.end(), chechBadPeptideQValue), phs.end());
}
}
// filter cross-links on PSM-level q-value
if (xlFDR > 0.0 && xlFDR < 1.0)
{
IDFilter::filterHitsByScore(xl_pi, xlFDR);
}
vector<ProteinIdentification> tmp_prots = protein_ids;
IDFilter::removeUnreferencedProteins(tmp_prots, xl_pi);
// compute coverage by cross-linked peptides
tmp_prots[0].computeCoverage(xl_pi);
// write out XL PSM result
IdXMLFile().store(out_idxml + String::number(xlFDR, 4) + "_XLs.idXML", tmp_prots, xl_pi);
// write out XL protein result only for results with FDR < 10% otherwise we get to many protein associations and large memory consumption
if (xlFDR <= 0.1)
{
OPENMS_LOG_INFO << "Writing XL protein results at xl-FDR: " << xlFDR << endl;
TextFile tsv_file;
NuXLProteinReport::annotateProteinModificationForTopHits(tmp_prots, xl_pi, tsv_file);
tsv_file.store(out_idxml + "proteins" + String::number(xlFDR, 4) + "_XLs.tsv");
}
}
}
} // namespace
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/NUXL/NuXLFragmentAnnotationHelper.cpp | .cpp | 4,726 | 138 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Timo Sachsenberg $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/NUXL/NuXLFragmentAnnotationHelper.h>
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/DATASTRUCTURES/String.h>
#include <set>
#include <map>
#include <vector>
#include <algorithm>
using namespace OpenMS;
namespace OpenMS
{
String NuXLFragmentAnnotationHelper::getAnnotatedImmoniumIon(char c, const String& fragment_shift_name)
{
return String("i") + c + "+" + fragment_shift_name + "+";
}
std::vector<PeptideHit::PeakAnnotation> NuXLFragmentAnnotationHelper::fragmentAnnotationDetailsToPHFA(
const String& ion_type,
const std::map<Size, std::vector<FragmentAnnotationDetail_> >& ion_annotation_details)
{
std::vector<PeptideHit::PeakAnnotation> fas;
for (const auto& ait : ion_annotation_details)
{
for (const auto& sit : ait.second)
{
PeptideHit::PeakAnnotation fa;
fa.charge = sit.charge;
fa.mz = sit.mz;
fa.intensity = sit.intensity;
if (sit.shift.empty())
{
fa.annotation = ion_type + String(ait.first) + String(fa.charge, '+');
}
else
{
const String annotation_text = ion_type + String(ait.first) + "+" + sit.shift + String(fa.charge, '+');
fa.annotation = annotation_text;
}
fas.push_back(std::move(fa));
}
}
return fas;
}
std::vector<PeptideHit::PeakAnnotation> NuXLFragmentAnnotationHelper::shiftedToPHFA(
const std::map<String,
std::set<std::pair<String, double> > >& shifted_ions)
{
std::vector<PeptideHit::PeakAnnotation> fas;
for (const auto& ait : shifted_ions)
{
for (const auto& sit : ait.second)
{
PeptideHit::PeakAnnotation fa;
fa.charge = 1;
fa.mz = sit.second;
fa.intensity = 1;
const String annotation_text = sit.first;
fa.annotation = annotation_text;
fas.push_back(std::move(fa));
}
}
return fas;
}
String NuXLFragmentAnnotationHelper::shiftedIonsToString(const std::vector<PeptideHit::PeakAnnotation>& as)
{
std::vector<PeptideHit::PeakAnnotation> sorted(as);
stable_sort(sorted.begin(), sorted.end());
String fas;
for (const auto & a : sorted)
{
fas += String("(") + String::number(a.mz, 3) + ","
+ String::number(100.0 * a.intensity, 1) + ",\""
+ a.annotation + "\")";
if (&a != &sorted.back()) { fas += "|"; }
}
return fas;
}
void NuXLFragmentAnnotationHelper::addShiftedPeakFragmentAnnotation_(
const std::map<Size, std::vector<FragmentAnnotationDetail_>>& shifted_b_ions,
const std::map<Size, std::vector<FragmentAnnotationDetail_>>& shifted_y_ions,
const std::map<Size, std::vector<FragmentAnnotationDetail_>>& shifted_a_ions,
const std::vector<PeptideHit::PeakAnnotation>& shifted_immonium_ions,
const std::vector<PeptideHit::PeakAnnotation>& annotated_marker_ions,
const std::vector<PeptideHit::PeakAnnotation>& annotated_precursor_ions,
std::vector<PeptideHit::PeakAnnotation>& fas)
{
if (!shifted_b_ions.empty())
{
const auto& fas_tmp = fragmentAnnotationDetailsToPHFA("b", shifted_b_ions);
fas.insert(fas.end(), fas_tmp.begin(), fas_tmp.end());
}
if (!shifted_y_ions.empty())
{
const auto& fas_tmp = fragmentAnnotationDetailsToPHFA("y", shifted_y_ions);
fas.insert(fas.end(), fas_tmp.begin(), fas_tmp.end());
}
if (!shifted_a_ions.empty())
{
const auto& fas_tmp = fragmentAnnotationDetailsToPHFA("a", shifted_a_ions);
fas.insert(fas.end(), fas_tmp.begin(), fas_tmp.end());
}
if (!shifted_immonium_ions.empty())
{
fas.insert(fas.end(), shifted_immonium_ions.begin(), shifted_immonium_ions.end());
}
if (!annotated_marker_ions.empty())
{
fas.insert(fas.end(), annotated_marker_ions.begin(), annotated_marker_ions.end());
}
if (!annotated_precursor_ions.empty())
{
fas.insert(fas.end(), annotated_precursor_ions.begin(), annotated_precursor_ions.end());
}
}
} // namespace
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/NUXL/NuXLModificationsGenerator.cpp | .cpp | 20,934 | 559 | // Copyright (c) 2002-present, The OpenMS Team -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Timo Sachsenberg $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/NUXL/NuXLModificationsGenerator.h>
#include <OpenMS/CHEMISTRY/ElementDB.h>
#include <OpenMS/CHEMISTRY/ResidueDB.h>
#include <OpenMS/CHEMISTRY/ResidueModification.h>
#include <OpenMS/CHEMISTRY/ModificationsDB.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <map>
using namespace std;
namespace OpenMS
{
//static
bool NuXLModificationsGenerator::notInSeq(const String& res_seq, const String& query)
{
// special case: empty query is in every seq -> false
if (query.empty()) { return false; }
// test all k-mers with k=size of query
for (Int l = 0; l <= (Int)res_seq.size() - (Int)query.size(); ++l)
{
String a = res_seq.substr(l, query.size());
String b = query;
sort(a.begin(), a.end());
sort(b.begin(), b.end());
if (a == b) { return false; }
}
return true;
}
//static
NuXLModificationMassesResult NuXLModificationsGenerator::initModificationMassesNA(const StringList& target_nucleotides,
const StringList& nt_groups,
const std::set<char>& can_xl,
const StringList& mappings,
const StringList& modifications,
String sequence_restriction,
bool cysteine_adduct,
Int max_length)
{
String original_sequence_restriction = sequence_restriction;
// 152 modification
const String cysteine_adduct_string("C4H8S2O2");//FIXME: why is this changed from ancestor?
const EmpiricalFormula cysteine_adduct_formula(cysteine_adduct_string); // 152 modification
NuXLModificationMassesResult result;
// parse "nucleotide=empirical formula of monophosphate"
// create map target to formula e.g., map "U" to "C10H14N5O7P"
map<String, EmpiricalFormula> map_target_to_formula;
for (auto const & s : target_nucleotides)
{
vector<String> fields;
s.split("=", fields);
map_target_to_formula[fields[0]] = EmpiricalFormula(fields[1]);
}
// read mapping of source to target
map<char, vector<char> > map_source_to_targets;
for (auto const & s : mappings)
{
vector<String> fields;
s.split("->", fields);
map_source_to_targets[fields[0][0]].push_back(fields[1][0]);
}
// extract source nucleotides based on mapping (if provided)
vector<char> source_nucleotides; // nucleotides as expected in the restriction sequence
for (auto const & s : mappings)
{
source_nucleotides.push_back(s[0]);
}
if (sequence_restriction.empty())
{
vector<String> all_combinations;
vector<String> actual_combinations;
// add single source nucleotides to all_combinations
for (Size i = 0; i != source_nucleotides.size(); ++i)
{
all_combinations.emplace_back(source_nucleotides[i]);
actual_combinations.emplace_back(source_nucleotides[i]);
}
for (Int i = 1; i <= max_length - 1; ++i)
{
vector<String> new_combinations;
for (Size n = 0; n != source_nucleotides.size(); ++n)
{
// grow actual_combinations/ all_combinations by one nucleotide
for (Size c = 0; c != actual_combinations.size(); ++c)
{
new_combinations.emplace_back(source_nucleotides[n] + actual_combinations[c]);
all_combinations.emplace_back(source_nucleotides[n] + actual_combinations[c]);
}
}
actual_combinations = new_combinations;
}
for (Size i = 0; i != all_combinations.size(); ++i)
{
sequence_restriction += all_combinations[i];
}
}
// erase trivial cases:
// 1. identity: only A->A provided
// 2. rename: only A->X provided
// but keep cominatorial cases: e.g., both A->X and A->A provided.
for (auto sit = map_source_to_targets.begin(); sit != map_source_to_targets.end(); )
{
char source = sit->first;
char first_target = sit->second[0];
if (sit->second.size() == 1 && source == first_target) // trivial case e.g. A->A... no substitution needed
{
map_source_to_targets.erase(sit++);
}
else if (sit->second.size() == 1 && source != first_target) // simple rename e.g. A->X... simply substitute all in restriction sequence
{
sequence_restriction.substitute(source, first_target);
map_source_to_targets.erase(sit++);
}
else // multiple targets
{
++sit;
}
}
if (!map_source_to_targets.empty() && sequence_restriction.empty())
{
OPENMS_LOG_WARN << "WARNING: no restriction on sequence but multiple target nucleotides specified."
<< "May generate huge amount of sequences considered as adduct." << endl;
}
using NucleotideModificationSubFormula = pair<EmpiricalFormula, bool>; // e.g., "H2O", true
using NucleotideModification = vector<NucleotideModificationSubFormula>;
using NucleotideModifications = vector<NucleotideModification>;
// map nucleotide to list of empirical MS1 precursor losses/gains
// nucleotide->all loss/gain formulas (each composed of subformulae)->subformulae
map<String, NucleotideModifications> map_to_nucleotide_modifications;
for (String m : modifications)
{
// extract target nucleotide
if (m[1] != ':')
{
throw Exception::MissingInformation(
__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
" Modifications parameter must specify nucleotide and forulas in format 'U:+H2O-H2O'.");
};
String target_nucleotide = m[0];
NucleotideModification nucleotide_modification;
m = m.substr(2); // remove nucleotide and ':' from front of string
// decompose string into subformulae
m.substitute("-", "#-");
m.substitute("+", "#+");
vector<String> ems;
m.split("#", ems);
for (Size j = 0; j != ems.size(); ++j)
{
if (ems[j].empty()) { continue; }
bool mod_is_subtractive(false);
if (ems[j][0] == '-')
{
mod_is_subtractive = true;
ems[j].remove('-');
}
else if (ems[j][0] == '+')
{
ems[j].remove('+');
}
EmpiricalFormula ef(ems[j]);
ef.setCharge(0);
NucleotideModificationSubFormula sf = make_pair(ef, mod_is_subtractive);
nucleotide_modification.push_back(sf);
}
// add formula to target nucleotide
map_to_nucleotide_modifications[target_nucleotide].push_back(nucleotide_modification);
}
// generate all target sequences by substituting each source nucleotide by their target nucleotide(s)
StringList target_sequences;
generateTargetSequences(sequence_restriction, 0, map_source_to_targets, target_sequences);
OPENMS_LOG_INFO << "sequence(s):" << target_sequences.size() << endl;
if (!original_sequence_restriction.empty())
{
for (Size i = 0; i != target_sequences.size(); ++i)
{
if (target_sequences[i].size() < 60)
{
OPENMS_LOG_INFO << target_sequences[i] << endl;
}
else
{
OPENMS_LOG_INFO << target_sequences[i].prefix(60) << "..." << endl;
}
}
}
{
// Append precursor modifications (e.g., "-H2O")
// to generate modified nucleotides: e.g.: "U" -> "U", "U-H2O", ...
vector<EmpiricalFormula> actual_combinations;
for (auto mit = map_target_to_formula.cbegin(); mit != map_target_to_formula.cend(); ++mit)
{
String target_nucleotide = mit->first;
OPENMS_LOG_INFO << "nucleotide: " << target_nucleotide << endl;
EmpiricalFormula target_nucleotide_formula = mit->second;
// get all precursor modifications for current nucleotide
NucleotideModifications nt_mods = map_to_nucleotide_modifications[target_nucleotide];
set<String> formulas_of_modified_nucleotide;
for (const NucleotideModification & nt_mod : nt_mods) // loop over list of nucleotide specific modifications
{
EmpiricalFormula sum_formula(target_nucleotide_formula);
String nt(target_nucleotide);
for (NucleotideModificationSubFormula const & sf : nt_mod) // loop over subformulae
{
// concatenate additive / subtractive substrings (e.g., "+H2O", "-H3PO")
EmpiricalFormula mod_ef(sf.first);
String mod(sf.first.toString());
if (sf.second)
{ // subtractive
nt += "-" + mod; // e.g., U-H2O
sum_formula = sum_formula - mod_ef; // sum formula of e.g. U-H2O
}
else
{ // additive
nt += "+" + mod; // e.g., U+H3PO4
sum_formula = sum_formula + mod_ef; // sum formula
}
}
if (formulas_of_modified_nucleotide.find(sum_formula.toString()) == formulas_of_modified_nucleotide.end())
{
actual_combinations.push_back(sum_formula);
result.mod_combinations[sum_formula.toString()].insert(nt); // add sum formula -> nucleotide
OPENMS_LOG_INFO << "\t" << "modifications: " << nt << "\t\t" << sum_formula.toString() << endl;
}
else
{
OPENMS_LOG_WARN << "WARNING:\tNucleotide + formula combination: " << nt << "\t\t" << sum_formula.toString()
<< " occured several times. Did you specify it multiple times in the ini file?. Will consider skip this entry." << endl;
}
}
}
// Generate >=1 nucleotide precursor adducts (e.g., "UU-H2O-H3PO")
// In every loop iteration, an unmodified target_nucleotide (e.g., "U", "A", ... ) is added to the chain
// The first element of the chain is an unmodified AND modified nucleotides.
// That way, at most one modified nucleotide is part of the chain
vector<EmpiricalFormula> all_combinations = actual_combinations;
for (Int i = 0; i < max_length - 1; ++i)
{
vector<EmpiricalFormula> new_combinations;
for (auto const & target_to_formula : map_target_to_formula) // loop over nucleotides (unmodified)
{
const String & target_nucleotide = target_to_formula.first;
const EmpiricalFormula & target_nucleotide_formula = target_to_formula.second;
for (EmpiricalFormula const & ac : actual_combinations) // append unmodified nucleotide to yield a (i+1)-mer
{
new_combinations.push_back(target_nucleotide_formula + ac - EmpiricalFormula("H2O")); // -H2O because of condensation reaction
all_combinations.push_back(target_nucleotide_formula + ac - EmpiricalFormula("H2O")); // " "
const auto& ambiguities = result.mod_combinations[ac.toString()];
for (auto const & s : ambiguities)
{
result.mod_combinations[all_combinations.back().toString()].insert(target_nucleotide + s);
OPENMS_LOG_DEBUG << target_nucleotide + s << endl;
}
}
}
actual_combinations = new_combinations;
}
for (Size i = 0; i != all_combinations.size(); ++i)
{
result.formula2mass[all_combinations[i].toString()] = all_combinations[i].getMonoWeight();
}
}
OPENMS_LOG_INFO << "Filtering on restrictions... " << endl;
// Remove precursor adducts that
// 1) do not contain a cross-linkable nucleotide
// 2) or contain no cross-linkable nucleotide that is part of the restricted target sequences
// 3) exceed maximum number of nucleotides
// 4) has multiple occurances of lower-case nucleotides/sugars
// - (e.g, "d" may only occur once to model binding to deoxyribose)
// keep track if a sorted nucleotide composition and modification has already been added
// e.g. we would not add both: UC-H2O-NH3 and CU-NH5O
std::vector<pair<String, double> > unique_nucleotide_and_mod_composition;
std::vector<pair<String, String> > violates_restriction; // elemental composition, nucleotide style formula
for (const auto& [formula, mass] : result.formula2mass)
{
// remove additive or subtractive modifications from string as these are not used in string comparison
const NuXLModificationMassesResult::NucleotideFormulas& ambiguities = result.mod_combinations[formula];
for (String const & s : ambiguities)
{
String nucleotide_style_formula(s);
// get nucleotide formula without losses / gains (e.g., "U" instead of "U-H2O")
Size p1 = nucleotide_style_formula.find('-');
Size p2 = nucleotide_style_formula.find('+');
Size p = min(p1, p2);
if (p != String::npos)
{
nucleotide_style_formula = nucleotide_style_formula.prefix(p);
}
// sort nucleotides so we compare based on nucleotide composition
// e.g.: AC-H2O and CA-H2O are considered the same
std::sort(nucleotide_style_formula.begin(), nucleotide_style_formula.end());
// restrict mandatory cross-linked nts/sugars (lowercase letters) to one
// e.g., could be a sugar that MUST be cross-linked
size_t count_lower = count_if(nucleotide_style_formula.begin(), nucleotide_style_formula.end(),
[](unsigned char c) { return islower(c); });
if (count_lower >= 2)
{
violates_restriction.emplace_back(formula, s);
continue;
}
// check if nucleotide formula contains a cross-linkable amino acid
bool has_xl_nt(false);
for (auto const & c : nucleotide_style_formula) { if (can_xl.count(c) > 0) { has_xl_nt = true; break;}; }
if (!has_xl_nt)
{ // no cross-linked nucleotide => not valid
violates_restriction.emplace_back(formula, s);
continue;
}
// check if nucleotide sequence too long
if ((int)nucleotide_style_formula.size() > max_length)
{
violates_restriction.emplace_back(formula, s);
continue;
}
// check if nucleotides from more than one nt_group are present (e.g., from DNA and RNA)
Size found_in_n_groups(0);
for (const String & n : nt_groups)
{
if (nucleotide_style_formula.find_first_of(n) != string::npos) { ++found_in_n_groups; }
}
// nucleotide style formula (e.g., AATU matches to more than one group (e.g., RNA and DNA))?
if (found_in_n_groups > 1)
{
violates_restriction.push_back({formula, s});
continue;
}
// check if nucleotide is contained in at least one of the target sequences
bool containment_violated(false);
Size violation_count(0);
for (const String & current_target_seq : target_sequences)
{
if (notInSeq(current_target_seq, nucleotide_style_formula)) { ++violation_count; }
}
if (violation_count == target_sequences.size()) { containment_violated = true; }
if (containment_violated)
{
violates_restriction.push_back({formula, s}); // chemical formula, nucleotide style formula pair violates restrictions
}
// last check: if the sorted nucleotide composition string and mass have already been added
// if so, we don't need to consider that composition again
if (
find(unique_nucleotide_and_mod_composition.begin(),
unique_nucleotide_and_mod_composition.end(),
make_pair(nucleotide_style_formula, mass)) != unique_nucleotide_and_mod_composition.end())
{
violates_restriction.push_back({formula, s});
}
// record that nucleotide and mod combination has passed all filters and will be considered in further processing
unique_nucleotide_and_mod_composition.push_back({nucleotide_style_formula, mass});
}
}
for (size_t i = 0; i != violates_restriction.size(); ++i)
{
const String& chemical_formula = violates_restriction[i].first;
result.mod_combinations[chemical_formula].erase(violates_restriction[i].second);
OPENMS_LOG_DEBUG << "filtered sequence: "
<< chemical_formula
<< "\t"
<< violates_restriction[i].second << endl;
}
// standard associative-container erase idiom
for (auto mcit = result.mod_combinations.begin(); mcit != result.mod_combinations.end(); )
{
if (mcit->second.empty())
{
result.formula2mass.erase(mcit->first); // remove from mod masses
result.mod_combinations.erase(mcit++); // don't change precedence !
}
else
{
++mcit; // don't change precedence !
}
}
// Optional: add cystein (DTT) adduct
if (cysteine_adduct)
{
result.formula2mass[cysteine_adduct_formula.toString()] = cysteine_adduct_formula.getMonoWeight();
result.mod_combinations[cysteine_adduct_formula.toString()].insert(cysteine_adduct_string);
}
// output index -> empirical formula -> (ambiguous) nucleotide formulas
// nucleotide formulas which only differ in nucleotide ordering are only printed once
// e.g. 5 C19H24N7O12P1 573.122 ( AU-H1O3P1 )
double index = 1;
for (auto const & m : result.formula2mass)
{
if (cysteine_adduct && m.first == cysteine_adduct_formula.toString())
{
OPENMS_LOG_INFO << "Precursor adduct " << index++ << "\t:\t" << m.first << " " << m.second << " ( cysteine adduct )" << endl;
continue;
}
OPENMS_LOG_INFO << "Precursor adduct " << index++ << "\t:\t" << m.first << " " << m.second << " ( ";
const NuXLModificationMassesResult::NucleotideFormulas& ambiguities = result.mod_combinations[m.first];
set<String> printed;
// for all ambiguities (same empirical formula)
for (String nucleotide_style_formula : ambiguities)
{
Size p1 = nucleotide_style_formula.find('-');
Size p2 = nucleotide_style_formula.find('+');
Size p = min(p1, p2);
// sort nucleotides up to beginning of modification (first '+' or '-')
if (p != String::npos)
{
std::sort(nucleotide_style_formula.begin(), nucleotide_style_formula.begin() + p);
}
else
{
std::sort(nucleotide_style_formula.begin(), nucleotide_style_formula.end());
}
// only print ambiguous sequences once
if (printed.find(nucleotide_style_formula) == printed.end())
{
OPENMS_LOG_INFO << nucleotide_style_formula << " ";
printed.insert(nucleotide_style_formula);
}
else
{
OPENMS_LOG_DEBUG << "Same nucleotide composition generated for: " << nucleotide_style_formula
<< " will only consider it once to prevent duplicate precursor adducts." << endl;
}
}
OPENMS_LOG_INFO << ")" << endl;
}
OPENMS_LOG_INFO << "Finished generation of modification masses." << endl;
return result;
}
//static
void NuXLModificationsGenerator::generateTargetSequences(const String& res_seq,
Size param_pos,
const map<char, vector<char> >& map_source2target,
StringList& target_sequences)
{
while (param_pos < res_seq.size())
{
// check if current character is in source 2 target map
auto target_iterator = map_source2target.find(res_seq[param_pos]);
if (target_iterator == map_source2target.end())
{
++param_pos;
}
else // yes?
{
const vector<char>& targets = target_iterator->second;
for (Size i = 0; i != targets.size(); ++i)
{
// modify sequence
String mod_seq = res_seq;
if (mod_seq[param_pos] != targets[i])
{
mod_seq[param_pos] = targets[i];
generateTargetSequences(mod_seq, param_pos + 1, map_source2target, target_sequences);
}
}
++param_pos;
}
}
// check and add only valid sequences (containing only target nucleotides or nucleotides that are both source and target nucleotides)
Size count = 0;
for (Size pos = 0; pos != res_seq.size(); ++pos)
{
auto target_iterator = map_source2target.find(res_seq[pos]);
// no pure source nucleotide?
if (target_iterator == map_source2target.end())
{
count++;
}
else // check if source nucleotide is also a valid target nucleotide
{
const vector<char>& targets = target_iterator->second;
for (Size i = 0; i != targets.size(); ++i)
{
if (res_seq[pos] == targets[i]) { count++; }
}
}
}
if (count == res_seq.size())
{
target_sequences.push_back(res_seq);
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/NUXL/NuXLAnnotateAndLocate.cpp | .cpp | 40,751 | 856 | // Copyright (c) 2002-present, The OpenMS Team -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Timo Sachsenberg $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/NUXL/NuXLFragmentIonGenerator.h>
#include <OpenMS/ANALYSIS/NUXL/NuXLAnnotateAndLocate.h>
#include <OpenMS/ANALYSIS/NUXL/NuXLFragmentAnnotationHelper.h>
#include <OpenMS/ANALYSIS/NUXL/NuXLConstants.h>
#include <OpenMS/ANALYSIS/XLMS/OPXLSpectrumProcessingAlgorithms.h>
#include <OpenMS/CHEMISTRY/TheoreticalSpectrumGenerator.h>
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/CONCEPT/Macros.h> // for OPENMS_PRECONDITION
#include <OpenMS/CONCEPT/LogStream.h>
#include <regex>
using namespace std;
//#define DEBUG_OpenNuXL
namespace OpenMS
{
// create total loss spectrum using new_param as template
static PeakSpectrum createTotalLossSpectrumForAnnotations(const AASequence& fixed_and_variable_modified_peptide, size_t precursor_charge, Param new_param)
{
PeakSpectrum total_loss_spectrum;
TheoreticalSpectrumGenerator tmp_generator;
new_param.setValue("add_all_precursor_charges", "true");
new_param.setValue("add_abundant_immonium_ions", "true");
new_param.setValue("add_losses", "true");
new_param.setValue("add_term_losses", "true");
new_param.setValue("add_a_ions", "true");
new_param.setValue("add_internal_fragments", "true");
tmp_generator.setParameters(new_param);
tmp_generator.getSpectrum(total_loss_spectrum, fixed_and_variable_modified_peptide, 1, precursor_charge);
const String& unmodified_sequence = fixed_and_variable_modified_peptide.toUnmodifiedString();
const bool contains_Methionine = unmodified_sequence.has('M');
if (contains_Methionine) // add mainly DEB + NM related precursor losses
{
static const double M_star_pc_loss = EmpiricalFormula("CH4S").getMonoWeight(); // methionine related loss on precursor (see OpenNuXL for scoring related code)
for (size_t charge = 1; charge <= precursor_charge; ++charge)
{
String ion_name = (charge == 1) ? "[M+H]-CH4S" : "[M+" + String(charge) + "H]-CH4S";
total_loss_spectrum.getStringDataArrays()[0].push_back(ion_name);
total_loss_spectrum.getIntegerDataArrays()[NuXLConstants::IA_CHARGE_INDEX].push_back(charge);
double mono_pos = fixed_and_variable_modified_peptide.getMonoWeight(Residue::Full, charge) - M_star_pc_loss; // precursor peak
total_loss_spectrum.emplace_back(mono_pos / (double)charge, 1.0);
}
}
// add special immonium ions
NuXLFragmentIonGenerator::addSpecialLysImmonumIons(
unmodified_sequence,
total_loss_spectrum,
total_loss_spectrum.getIntegerDataArrays()[NuXLConstants::IA_CHARGE_INDEX],
total_loss_spectrum.getStringDataArrays()[0]);
total_loss_spectrum.sortByPosition(); // need to resort after adding special immonium ions
return total_loss_spectrum;
}
/**
@brief Removes duplicate peaks based on their m/z values.
This function identifies and removes duplicate peaks from the spectrum based on
their m/z values. Only the first occurrence of a peak with a given m/z value is
retained. The function uses the `select()` method to efficiently remove the
duplicate peaks and their corresponding entries in the data arrays.
@note The spectrum should be sorted by position (m/z) before calling this function
to ensure correct identification of duplicates.
*/
static void removeDuplicatedPeaks(MSSpectrum& spec)
{
if (spec.empty()) return;
std::vector<Size> indices_to_keep;
indices_to_keep.push_back(0); // Always keep the first peak
for (Size i = 1; i < spec.size(); ++i)
{
// Compare the current peak's m/z with the last kept peak's m/z
if (spec[i].getMZ() != spec[indices_to_keep.back()].getMZ())
{
indices_to_keep.push_back(i);
}
#ifdef DEBUG_OpenNuXL
else
{
// happens a lot with precursor peaks and internal ions
std::cout << "Removing duplicate peak at m/z: " << spec[i].getMZ() << endl;
std::cout << spec.getStringDataArrays()[0][i] << " - "
<< spec.getStringDataArrays()[0][indices_to_keep.back()] << std::endl;
}
#endif
}
if (indices_to_keep.size() == spec.size()) return; // No duplicates found
spec.select(indices_to_keep);
}
using MapIonIndexToFragmentAnnotation = map<Size, vector<NuXLFragmentAnnotationHelper::FragmentAnnotationDetail_> >;
// ion centric (e.g. b and y-ion) spectrum annotation for unshifted ions (will later be merged with shifted)
static vector<PeptideHit::PeakAnnotation> createIonCentricFragmentAnnotationsForUnshiftedIons(
const PeakSpectrum& total_loss_spectrum,
const PeakSpectrum& exp_spectrum,
const vector<pair<Size, Size>>& alignment,
set<Size>& peak_is_annotated,
vector<PeptideHit::PeakAnnotation>& annotated_precursor_ions,
MapIonIndexToFragmentAnnotation& unshifted_b_ions,
MapIonIndexToFragmentAnnotation& unshifted_y_ions,
MapIonIndexToFragmentAnnotation& unshifted_a_ions,
vector<PeptideHit::PeakAnnotation>& unshifted_loss_ions,
vector<PeptideHit::PeakAnnotation>& annotated_immonium_ions
)
{
if (exp_spectrum.empty()) return {};
const PeakSpectrum::StringDataArray& total_loss_annotations = total_loss_spectrum.getStringDataArrays()[0];
const PeakSpectrum::IntegerDataArray& total_loss_charges = total_loss_spectrum.getIntegerDataArrays()[NuXLConstants::IA_CHARGE_INDEX];
// create total loss annotations
for (auto const & aligned : alignment)
{
// information on the experimental fragment in the alignment
const Size& fragment_index = aligned.second;
const Peak1D& fragment = exp_spectrum[fragment_index];
const double fragment_intensity = fragment.getIntensity(); // in percent (%)
const double fragment_mz = fragment.getMZ();
const String& ion_name = total_loss_annotations[aligned.first];
const int charge = total_loss_charges[aligned.first];
if (exp_spectrum.getIntegerDataArrays()[NuXLConstants::IA_CHARGE_INDEX][fragment_index] != charge)
{
OPENMS_LOG_WARN << "Charges in alignment do not match. Skipping annotation of fragment " << fragment_index << " with ion " << ion_name << " at " << fragment_mz << " m/z." << endl;
continue;
}
OPENMS_PRECONDITION(exp_spectrum.getIntegerDataArrays()[NuXLConstants::IA_CHARGE_INDEX][fragment_index] == charge, "Charges in alignment must match.");
// define which ion names are annotated
if (ion_name[0] == 'y')
{
Size loss_first = ion_name.find_first_of('-'); // start of loss
Size charge_pos = ion_name.find_first_of('+'); // charge indicator at end
const bool ion_has_neutral_loss = (loss_first != string::npos);
if (ion_has_neutral_loss) // ion with neutral loss e.g. water
{
PeptideHit::PeakAnnotation fa;
fa.mz = fragment_mz;
fa.intensity = fragment_intensity;
fa.charge = charge;
fa.annotation = ion_name;
unshifted_loss_ions.push_back(fa);
peak_is_annotated.insert(aligned.second);
}
else // no neutral loss
{
String ion_nr_string = ion_name.substr(1, charge_pos - 1);
Size ion_number = (Size)ion_nr_string.toInt();
NuXLFragmentAnnotationHelper::FragmentAnnotationDetail_ d("", charge, fragment_mz, fragment_intensity);
unshifted_y_ions[ion_number].push_back(d);
#ifdef DEBUG_OpenNuXL
const AASequence& peptide_sequence = fixed_and_variable_modified_peptide.getSuffix(ion_number);
OPENMS_LOG_DEBUG << "Annotating ion: " << ion_name << " at position: " << fragment_mz << " " << peptide_sequence.toString() << " intensity: " << fragment_intensity << endl;
#endif
peak_is_annotated.insert(aligned.second);
}
}
else if (ion_name[0] == 'b')
{
Size loss_first = ion_name.find_first_of('-'); // start of loss
Size charge_pos = ion_name.find_first_of('+'); // charge indicator at end
const bool ion_has_neutral_loss = (loss_first != string::npos);
if (ion_has_neutral_loss)
{
PeptideHit::PeakAnnotation fa;
fa.mz = fragment_mz;
fa.intensity = fragment_intensity;
fa.charge = charge;
fa.annotation = ion_name;
unshifted_loss_ions.push_back(fa);
peak_is_annotated.insert(aligned.second);
}
else
{
String ion_nr_string = ion_name.substr(1, charge_pos - 1);
Size ion_number = (Size)ion_nr_string.toInt();
#ifdef DEBUG_OpenNuXL
const AASequence& peptide_sequence = aas.getPrefix(ion_number);
OPENMS_LOG_DEBUG << "Annotating ion: " << ion_name << " at position: " << fragment_mz << " " << peptide_sequence.toString() << " intensity: " << fragment_intensity << endl;
#endif
NuXLFragmentAnnotationHelper::FragmentAnnotationDetail_ d("", charge, fragment_mz, fragment_intensity);
unshifted_b_ions[ion_number].push_back(d);
peak_is_annotated.insert(aligned.second);
}
}
else if (ion_name[0] == 'a')
{
Size loss_first = ion_name.find_first_of('-'); // start of loss
Size charge_pos = ion_name.find_first_of('+'); // charge indicator at end
const bool ion_has_neutral_loss = (loss_first != string::npos);
if (ion_has_neutral_loss)
{
PeptideHit::PeakAnnotation fa;
fa.mz = fragment_mz;
fa.intensity = fragment_intensity;
fa.charge = charge;
fa.annotation = ion_name;
unshifted_loss_ions.push_back(fa);
peak_is_annotated.insert(aligned.second);
}
else
{
String ion_nr_string = ion_name.substr(1, charge_pos - 1);
auto ion_number = (Size)ion_nr_string.toInt();
#ifdef DEBUG_OpenNuXL
const AASequence& peptide_sequence = aas.getPrefix(ion_number);
OPENMS_LOG_DEBUG << "Annotating ion: " << ion_name << " at position: " << fragment_mz << " " << peptide_sequence.toString() << " intensity: " << fragment_intensity << endl;
#endif
NuXLFragmentAnnotationHelper::FragmentAnnotationDetail_ d("", charge, fragment_mz, fragment_intensity);
unshifted_a_ions[ion_number].push_back(d);
peak_is_annotated.insert(aligned.second);
}
}
else if (ion_name.hasPrefix("[M+")) // precursor ion
{
PeptideHit::PeakAnnotation fa;
fa.mz = fragment_mz;
fa.intensity = fragment_intensity;
fa.charge = charge;
fa.annotation = ion_name;
annotated_precursor_ions.push_back(fa);
peak_is_annotated.insert(aligned.second);
}
else if (ion_name.hasPrefix("i")) // immonium ion
{
PeptideHit::PeakAnnotation fa;
fa.mz = fragment_mz;
fa.intensity = fragment_intensity;
fa.charge = charge;
fa.annotation = ion_name;
annotated_immonium_ions.push_back(fa);
peak_is_annotated.insert(aligned.second);
}
else if (isupper(ion_name[0])) // internal ions
{
PeptideHit::PeakAnnotation fa;
fa.mz = fragment_mz;
fa.intensity = fragment_intensity;
fa.charge = charge;
fa.annotation = ion_name;
annotated_immonium_ions.push_back(fa); //TODO: add to annotated_internal_fragment_ions or rename vector
peak_is_annotated.insert(aligned.second);
}
}
// generate fragment annotation strings for unshifted ions
vector<PeptideHit::PeakAnnotation> fas;
if (!unshifted_b_ions.empty())
{
const vector<PeptideHit::PeakAnnotation>& fas_tmp = NuXLFragmentAnnotationHelper::fragmentAnnotationDetailsToPHFA("b", unshifted_b_ions);
fas.insert(fas.end(), fas_tmp.begin(), fas_tmp.end());
}
if (!unshifted_y_ions.empty())
{
const vector<PeptideHit::PeakAnnotation>& fas_tmp = NuXLFragmentAnnotationHelper::fragmentAnnotationDetailsToPHFA("y", unshifted_y_ions);
fas.insert(fas.end(), fas_tmp.begin(), fas_tmp.end());
}
if (!unshifted_a_ions.empty())
{
const vector<PeptideHit::PeakAnnotation>& fas_tmp = NuXLFragmentAnnotationHelper::fragmentAnnotationDetailsToPHFA("a", unshifted_a_ions);
fas.insert(fas.end(), fas_tmp.begin(), fas_tmp.end());
}
if (!annotated_immonium_ions.empty())
{
fas.insert(fas.end(), annotated_immonium_ions.begin(), annotated_immonium_ions.end());
}
if (!unshifted_loss_ions.empty())
{
fas.insert(fas.end(), unshifted_loss_ions.begin(), unshifted_loss_ions.end());
}
return fas;
}
// static
void NuXLAnnotateAndLocate::annotateAndLocate_(
const PeakMap& exp,
vector<vector<NuXLAnnotatedHit>>& annotated_hits,
const NuXLModificationMassesResult& mm,
const ModifiedPeptideGenerator::MapToResidueType& fixed_modifications,
const ModifiedPeptideGenerator::MapToResidueType& variable_modifications,
Size max_variable_mods_per_peptide,
double fragment_mass_tolerance,
bool fragment_mass_tolerance_unit_ppm,
const NuXLParameterParsing::PrecursorsToMS2Adducts & all_feasible_adducts)
{
TheoreticalSpectrumGenerator partial_loss_spectrum_generator;
auto param = partial_loss_spectrum_generator.getParameters();
param.setValue("add_first_prefix_ion", "true");
param.setValue("add_abundant_immonium_ions", "false"); // we add them manually for charge 1
param.setValue("add_precursor_peaks", "true");
param.setValue("add_all_precursor_charges", "false"); // we add them manually for every charge
param.setValue("add_metainfo", "true");
param.setValue("add_a_ions", "true");
param.setValue("add_b_ions", "true");
param.setValue("add_c_ions", "false");
param.setValue("add_x_ions", "false");
param.setValue("add_y_ions", "true");
param.setValue("add_z_ions", "false");
param.setValue("add_internal_fragments", "false"); // TODO: creates too many ions with identical masses
partial_loss_spectrum_generator.setParameters(param);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (SignedSize scan_index = 0; scan_index < (SignedSize)annotated_hits.size(); ++scan_index)
{
if (annotated_hits[scan_index].empty()) { continue; }
const PeakSpectrum & exp_spectrum = exp[scan_index];
const Size & precursor_charge = exp_spectrum.getPrecursors()[0].getCharge();
for (auto & a : annotated_hits[scan_index])
{
// get unmodified string
const String unmodified_sequence = a.sequence.getString();
// initialize result fields
a.best_localization = unmodified_sequence;
a.best_localization_score = 0;
AASequence aas(unmodified_sequence);
// reapply modifications (because for memory reasons we only stored the index and recreation is fast)
vector<AASequence> all_modified_peptides;
ModifiedPeptideGenerator::applyFixedModifications(fixed_modifications, aas);
ModifiedPeptideGenerator::applyVariableModifications(variable_modifications, aas, max_variable_mods_per_peptide, all_modified_peptides);
// sequence with modifications - note: reannotated version requires much more memory heavy AASequence object
const AASequence& fixed_and_variable_modified_peptide = all_modified_peptides[a.peptide_mod_index];
const double fixed_and_variable_modified_peptide_weight = fixed_and_variable_modified_peptide.getMonoWeight();
// determine NA on precursor from index in map
auto mod_combinations_it = mm.mod_combinations.cbegin();
std::advance(mod_combinations_it, a.NA_mod_index);
const String precursor_na_adduct = *mod_combinations_it->second.begin(); // TODO: check if it is enough to consider only first precursor adduct ????????????????????????????????????????????????????????
const double precursor_na_mass = EmpiricalFormula(mod_combinations_it->first).getMonoWeight();
// generate total loss spectrum for the fixed and variable modified peptide (without NAs) (using the settings for partial loss generation)
// but as we also add the abundant immonium ions for charge 1 and precursor ions for all charges to get a more complete annotation
// (these have previously not been used in the scoring of the total loss spectrum)
PeakSpectrum total_loss_spectrum = createTotalLossSpectrumForAnnotations(fixed_and_variable_modified_peptide, precursor_charge, partial_loss_spectrum_generator.getParameters()); // use same parameters
// first annotate total loss peaks (these give no information where the actual shift occured)
#ifdef DEBUG_OpenNuXL
OPENMS_LOG_DEBUG << "Annotating ion (total loss spectrum): " << fixed_and_variable_modified_peptide.toString() << endl;
#endif
vector<pair<Size, Size>> alignment;
// align spectra (only allow matching charges)
DataArrays::FloatDataArray ppm_error_array; // not needed here but filled by alignment
removeDuplicatedPeaks(total_loss_spectrum); // remove duplicate peaks based on m/z
OPXLSpectrumProcessingAlgorithms::getSpectrumAlignmentFastCharge(alignment,
fragment_mass_tolerance,
fragment_mass_tolerance_unit_ppm,
total_loss_spectrum,
exp_spectrum,
total_loss_spectrum.getIntegerDataArrays()[NuXLConstants::IA_CHARGE_INDEX],
exp_spectrum.getIntegerDataArrays()[NuXLConstants::IA_CHARGE_INDEX],
ppm_error_array);
// fill annotated spectrum information
set<Size> peak_is_annotated; // experimental peak index
vector<PeptideHit::PeakAnnotation> annotated_precursor_ions; // also used for shifted ones
MapIonIndexToFragmentAnnotation unshifted_b_ions, unshifted_y_ions, unshifted_a_ions;
vector<PeptideHit::PeakAnnotation> unshifted_loss_ions, annotated_immonium_ions;
auto fas = createIonCentricFragmentAnnotationsForUnshiftedIons(total_loss_spectrum, exp_spectrum, alignment, peak_is_annotated,
annotated_precursor_ions,
unshifted_b_ions,
unshifted_y_ions,
unshifted_a_ions,
unshifted_loss_ions,
annotated_immonium_ions
);
// we don't localize on non-cross-links (only annotate)
if (precursor_na_adduct == "none")
{
a.fragment_annotations = fas;
continue;
}
// ion centric (e.g. b and y-ion) spectrum annotation that records all shifts of specific ions (e.g. y5, y5 + U, y5 + C3O)
// generate all partial loss spectra (excluding the complete loss spectrum) merged into one spectrum
// 1. get all possible NA fragment shifts in the MS2 (based on the precursor RNA/DNA)
OPENMS_LOG_DEBUG << "Precursor NA adduct: " << precursor_na_adduct << endl;
const vector<NucleotideToFeasibleFragmentAdducts>& feasible_MS2_adducts = all_feasible_adducts.at(precursor_na_adduct).feasible_adducts;
if (feasible_MS2_adducts.empty()) { continue; } // should not be the case - check case of no nucleotide but base fragment ?
// 2. retrieve the (nucleotide specific) fragment adducts for the cross-linked nucleotide (annotated in main search)
auto nt_to_adducts = std::find_if(feasible_MS2_adducts.begin(), feasible_MS2_adducts.end(),
[&a](NucleotideToFeasibleFragmentAdducts const & item)
{
return (item.first == a.cross_linked_nucleotide);
});
OPENMS_POSTCONDITION(nt_to_adducts != feasible_MS2_adducts.end(), "Nucleotide not found in mapping to feasible adducts.")
vector<NuXLFragmentAdductDefinition> partial_loss_modification = nt_to_adducts->second;
// TODO: check if needed for reproduciblity
std::stable_sort(partial_loss_modification.begin(), partial_loss_modification.end());
// get marker ions (these are not specific to the cross-linked nucleotide but also depend on the whole oligo bound to the precursor)
const vector<NuXLFragmentAdductDefinition>& marker_ions = all_feasible_adducts.at(precursor_na_adduct).marker_ions;
OPENMS_LOG_DEBUG << "Marker ions used for this Precursor NA adduct: " << endl;
for (auto & fa : marker_ions)
{
OPENMS_LOG_DEBUG << fa.name << " " << fa.mass << endl;
}
PeakSpectrum partial_loss_spectrum;
{
PeakSpectrum partial_loss_template_z1,
partial_loss_template_z2,
partial_loss_template_z3;
partial_loss_spectrum_generator.getSpectrum(partial_loss_template_z1, fixed_and_variable_modified_peptide, 1, 1);
partial_loss_template_z1.sortByPosition(); // need to resort after adding special immonium ions
partial_loss_spectrum_generator.getSpectrum(partial_loss_template_z2, fixed_and_variable_modified_peptide, 2, 2);
partial_loss_template_z2.sortByPosition(); // need to resort after adding special immonium ions
partial_loss_spectrum_generator.getSpectrum(partial_loss_template_z3, fixed_and_variable_modified_peptide, 3, 3);
partial_loss_template_z3.sortByPosition(); // need to resort after adding special immonium ions
NuXLFragmentIonGenerator::generatePartialLossSpectrum(unmodified_sequence,
fixed_and_variable_modified_peptide_weight,
precursor_na_adduct,
precursor_na_mass,
precursor_charge,
partial_loss_modification,
partial_loss_template_z1,
partial_loss_template_z2,
partial_loss_template_z3,
partial_loss_spectrum);
}
// add shifted marker ions
NuXLFragmentIonGenerator::addMS2MarkerIons(
marker_ions,
partial_loss_spectrum,
partial_loss_spectrum.getIntegerDataArrays()[NuXLConstants::IA_CHARGE_INDEX],
partial_loss_spectrum.getStringDataArrays()[0]);
partial_loss_spectrum.sortByPosition(); // need to resort after adding marker ions
// ion centric (e.g. b and y-ion) spectrum annotation that records all shifts of specific ions (e.g. y5, y5 + U, y5 + C3O)
MapIonIndexToFragmentAnnotation shifted_b_ions, shifted_y_ions, shifted_a_ions;
vector<PeptideHit::PeakAnnotation> shifted_immonium_ions, annotated_marker_ions;
vector<double> sites_sum_score(aas.size(), 0);
/////////////////
// Align partial-loss-spectrum to the experimental measured one
alignment.clear();
ppm_error_array.clear();
// remove duplicate peaks to prevent non-deterministic annotations (e.g., [M+H-H2O]+U and [M+H]+U-H2O )
removeDuplicatedPeaks(partial_loss_spectrum);
// align spectra (only allow matching charges)
OPXLSpectrumProcessingAlgorithms::getSpectrumAlignmentFastCharge(alignment,
fragment_mass_tolerance,
fragment_mass_tolerance_unit_ppm,
partial_loss_spectrum,
exp_spectrum,
partial_loss_spectrum.getIntegerDataArrays()[NuXLConstants::IA_CHARGE_INDEX],
exp_spectrum.getIntegerDataArrays()[NuXLConstants::IA_CHARGE_INDEX],
ppm_error_array);
#ifdef OPENUXL_DEBUG
for (size_t i = 0; i != exp_spectrum.size(); ++i)
{
OPENMS_LOG_DEBUG << "exp: " << exp_spectrum[i].getMZ() << "\t" << exp_spectrum.getIntegerDataArrays()[NuXLConstants::IA_CHARGE_INDEX][i] << endl;
}
#endif
const PeakSpectrum::StringDataArray& partial_loss_annotations = partial_loss_spectrum.getStringDataArrays()[0];
const PeakSpectrum::IntegerDataArray& partial_loss_charges = partial_loss_spectrum.getIntegerDataArrays()[NuXLConstants::IA_CHARGE_INDEX];
if (alignment.empty())
{
a.fragment_annotations = fas;
continue;
}
/* uncomment to write all annotations to a file(only makes sense if a single spectrum is searched)
MSExperiment tmp_exp;
tmp_exp.addSpectrum(total_loss_spectrum);
tmp_exp.addSpectrum(partial_loss_spectrum);
MzMLFile().store("theoretical_loss_spectrum.mzML", tmp_exp);
*/
for (auto pair_it = alignment.begin(); pair_it != alignment.end(); ++pair_it)
{
// only annotate experimental peaks with shift - i.e. do not annotated complete loss peaks again
if (peak_is_annotated.find(pair_it->second) != peak_is_annotated.end()) { continue; }
// information on the experimental fragment in the alignment
const Size & fragment_index = pair_it->second;
const Peak1D & fragment = exp_spectrum[fragment_index];
const double & fragment_intensity = fragment.getIntensity(); // in percent (%)
const double & fragment_mz = fragment.getMZ();
const int & fragment_charge = exp_spectrum.getIntegerDataArrays()[NuXLConstants::IA_CHARGE_INDEX][fragment_index];
#ifdef DEBUG_OpenNuXL
OPENMS_LOG_DEBUG << "fragment_mz:" << fragment_mz << " fragment_charge:" << fragment_charge << endl;
#endif
String ion_name = partial_loss_annotations[pair_it->first];
const int charge = partial_loss_charges[pair_it->first];
#ifdef DEBUG_OpenNuXL
OPENMS_LOG_DEBUG << "theo_name:" << ion_name << " theo_charge:" << charge << endl;
#endif
vector<String> f;
ion_name.split(' ', f); // e.g. "y3 C3O" or just "y2"
String fragment_shift_name;
if (f.size() == 2) { fragment_shift_name = f[1]; }
String fragment_ion_name = f[0]; // e.g. y3
#ifdef DEBUG_OpenNuXL
OPENMS_LOG_DEBUG << "Annotating ion: " << ion_name << " at position: " << fragment_mz << " " << " intensity: " << fragment_intensity << endl;
#endif
// define which ion names are annotated
if (fragment_ion_name.hasPrefix("y"))
{
String ion_nr_string = fragment_ion_name;
ion_nr_string.substitute("y", "");
ion_nr_string.substitute("+", ""); // remove one or multiple '+'
auto ion_number = (Size)ion_nr_string.toInt();
NuXLFragmentAnnotationHelper::FragmentAnnotationDetail_ d(fragment_shift_name, charge, fragment_mz, fragment_intensity);
if (ion_number > 1) // trypsin doesn't cut at cross-linked amino acid
{
shifted_y_ions[ion_number].push_back(d);
}
}
else if (fragment_ion_name.hasPrefix("b"))
{
String ion_nr_string = fragment_ion_name;
ion_nr_string.substitute("b", "");
ion_nr_string.substitute("+", ""); // remove one or multiple '+'
auto ion_number = (Size)ion_nr_string.toInt();
NuXLFragmentAnnotationHelper::FragmentAnnotationDetail_ d(fragment_shift_name, charge, fragment_mz, fragment_intensity);
shifted_b_ions[ion_number].push_back(d);
}
else if (fragment_ion_name.hasPrefix("a"))
{
String ion_nr_string = fragment_ion_name;
ion_nr_string.substitute("a", "");
ion_nr_string.substitute("+", ""); // remove one or multiple '+'
auto ion_number = (Size)ion_nr_string.toInt();
NuXLFragmentAnnotationHelper::FragmentAnnotationDetail_ d(fragment_shift_name, charge, fragment_mz, fragment_intensity);
shifted_a_ions[ion_number].push_back(d);
}
else if (ion_name.hasPrefix(NuXLFragmentIonGenerator::ANNOTATIONS_MARKER_ION_PREFIX))
{
OPENMS_LOG_DEBUG << "Marker ion aligned: " << ion_name << " fragment_mz: " << fragment_mz << " fragment_charge: " << fragment_charge << endl;
if (fragment_charge == 1)
{
PeptideHit::PeakAnnotation fa;
fa.mz = fragment_mz;
fa.intensity = fragment_intensity;
fa.charge = 1;
fa.annotation = ion_name + "+";
annotated_marker_ions.push_back(fa);
}
else
{
OPENMS_LOG_ERROR << "Unexpected marker ion charge." << endl;
}
}
else if (ion_name.hasPrefix("i"))
{
OPENMS_LOG_DEBUG << "Immonium ion aligned: " << ion_name << " fragment_mz: " << fragment_mz << " fragment_charge: " << fragment_charge << endl;
if (fragment_charge == 1)
{
PeptideHit::PeakAnnotation fa;
fa.mz = fragment_mz;
fa.intensity = fragment_intensity;
fa.charge = 1;
fa.annotation = ion_name + "+";
shifted_immonium_ions.push_back(fa);
}
else
{
OPENMS_LOG_ERROR << "Unexpected immonium ion charge." << endl;
}
}
else if (ion_name.hasPrefix("[M+"))
{
PeptideHit::PeakAnnotation fa;
fa.mz = fragment_mz;
fa.intensity = fragment_intensity;
fa.charge = charge;
static const std::regex pattern(R"(\](\++)+)");
fa.annotation = std::regex_replace(ion_name, pattern, "]"); // remove charge inside string (e.g., before loss)
fa.annotation.substitute(' ', '+'); // turn gap into plus "[M+2H] U-H2O" -> "[M+2H]+U-H2O"
fa.annotation += String(charge, '+'); // add charges back at end
annotated_precursor_ions.push_back(fa);
}
else if (isupper(ion_name[0])) // shifted internal ions
{
PeptideHit::PeakAnnotation fa;
fa.mz = fragment_mz;
fa.intensity = fragment_intensity;
fa.charge = charge;
String with_plus = ion_name;
with_plus.substitute(' ', '+'); // turn "PEPT U-H2O" into "PEPT+U-H20"
fa.annotation = with_plus + String(charge, '+');
shifted_immonium_ions.push_back(fa); //TODO: add to shifted_internal_fragment_ions or rename vector
}
}
// track shifts in n- and c-term ladders (in AAs coordinates)
// n_shifts and c_shifts will contain the summed intensities over all observed shifts at that position
// the distinction allows to easily detect prefix and suffix ladders in the next step
vector<double> n_shifts(sites_sum_score.size(), 0); // vector index 0 == ion index 1
vector<double> c_shifts(sites_sum_score.size(), 0);
for (Size i = 0; i != n_shifts.size(); ++i)
{
if (shifted_b_ions.find(i + 1) == shifted_b_ions.end()) { continue; }
for (auto& k : shifted_b_ions[i + 1]) { n_shifts[i] += k.intensity; }
}
for (Size i = 0; i != n_shifts.size(); ++i)
{
if (shifted_a_ions.find(i + 1) == shifted_a_ions.end()) { continue; }
for (auto& k : shifted_a_ions[i + 1]) { n_shifts[i] += k.intensity; }
}
for (Size i = 0; i != c_shifts.size(); ++i)
{
const Size ion_index = c_shifts.size() - i;
if (shifted_y_ions.find(ion_index) == shifted_y_ions.end()) { continue; }
for (auto& k : shifted_y_ions[ion_index]) { c_shifts[i] += k.intensity; }
}
vector<double> n_noshifts(sites_sum_score.size(), 0);
vector<double> c_noshifts(sites_sum_score.size(), 0);
for (Size i = 0; i != n_noshifts.size(); ++i)
{
if (unshifted_b_ions.find(i + 1) == unshifted_b_ions.end()) { continue; }
for (auto& k : unshifted_b_ions[i + 1]) { n_noshifts[i] += k.intensity; }
}
for (Size i = 0; i != n_noshifts.size(); ++i)
{
if (unshifted_a_ions.find(i + 1) == unshifted_a_ions.end()) { continue; }
for (auto& k : unshifted_a_ions[i + 1]) { n_noshifts[i] += k.intensity; }
}
for (Size i = 0; i != c_noshifts.size(); ++i)
{
const Size ion_index = c_noshifts.size() - i;
if (unshifted_y_ions.find(ion_index) == unshifted_y_ions.end()) { continue; }
for (auto& k : unshifted_y_ions[ion_index]) { c_noshifts[i] += k.intensity; }
}
#ifdef DEBUG_OpenNuXL
cout << "n:";
for (auto& k : n_shifts) cout << k << " ";
cout << endl;
cout << "c:";
for (auto& k : c_shifts) cout << k << " ";
cout << endl;
cout << "n0:";
for (auto& k : n_noshifts) cout << k << " ";
cout << endl;
cout << "c0:";
for (auto& k : c_noshifts) cout << k << " ";
cout << endl;
#endif
// Rules implemented:
// 1. if cross-link on AA, then the prefix or suffix ending at this AA must be shifted
// 2. if the previous AA in the prefix / suffix had a stronger shifted signal, then the current on is not the correct one
// 3. if the current AA is cross-linked, then the previous AA is not cross-linked and we should observe an unshifted prefix / suffix ion
// Sum up all intensities of shifted prefix / suffix ions
for (Size i = 0; i != sites_sum_score.size(); ++i)
{
sites_sum_score[i] = 0.0;
if (n_shifts[i] == 0 && c_shifts[i] == 0) { continue; } // no shifts? no cross-link at this AA
if (n_shifts[i] > 0)
{
// Rules apply only for a3,b3 and higher ions (because we rarely observe a1,b1 ions we can't check for Rule 3)
if (i >= 2 && n_shifts[i - 1] > n_shifts[i]) continue; // Stronger signal from shifted AA before the current one? Then skip it.
if (i >= 2 && n_noshifts[i - 1] == 0) continue; // continue if unshifted AA is missing before (left of) the shifted one.
// sum up all intensities from this position and all longer prefixes that also carry the NA
for (Size j = i; j != sites_sum_score.size(); ++j) { sites_sum_score[i] += n_shifts[j]; }
}
if (c_shifts[i] > 0)
{
// Rules apply only for y3 and higher ions (because we rarely observe y1 ions we can't check for Rule 3)
if (i < c_shifts.size()-2 && c_shifts[i + 1] > c_shifts[i]) continue; // AA after has higher intensity and also shifted? Then skip it.
if (i < c_noshifts.size()-2 && c_noshifts[i + 1] == 0) continue; // continue if unshifted AA is missing before (right of) the shifted one.
// sum up all intensities from this position and all longer suffixes that also carry the NA
for (int j = i; j >= 0; --j) { sites_sum_score[i] += c_shifts[j]; }
}
}
#ifdef DEBUG_OpenNuXL
cout << "site sum score (shifted a/b/y-ions):";
for (auto& k : sites_sum_score) cout << k << " ";
cout << endl;
#endif
#ifdef DEBUG_OpenNuXL
OPENMS_LOG_DEBUG << "Localisation based on immonium ions: ";
#endif
String aas_unmodified = aas.toUnmodifiedString();
for (Size i = 0; i != aas_unmodified.size(); ++i)
{
String origin = String(aas_unmodified[i]);
for (auto& a : shifted_immonium_ions)
{
// compare origin (the AA) of immonium ion to current AA
if (a.annotation[0] == 'i' && a.annotation[1] == aas_unmodified[i])
{
#ifdef DEBUG_OpenNuXL
OPENMS_LOG_DEBUG << "\n" << a.annotation << " " << "\n";
#endif
sites_sum_score[i] += a.intensity;
}
}
}
#ifdef DEBUG_OpenNuXL
cout << "site sum score (shifted a/b/y-ions & immonium ions):";
for (auto& k : sites_sum_score) cout << k << " ";
cout << endl;
#endif
String best_localization = unmodified_sequence;
int best_localization_position = -1; // UNKNOWN
double best_localization_score = 0;
String localization_scores;
for (Size i = 0; i != sites_sum_score.size(); ++i)
{
if (sites_sum_score[i] > best_localization_score) { best_localization_score = sites_sum_score[i]; }
}
for (Size i = 0; i != sites_sum_score.size(); ++i)
{
#ifdef DEBUG_OpenNuXL
OPENMS_LOG_DEBUG << String::number(100.0 * sites_sum_score[i], 2);
#endif
if (i != 0) localization_scores += ',';
if (sites_sum_score[i] > 0 )
{
localization_scores += String::number(100.0 * sites_sum_score[i], 2);
}
else
{
localization_scores += "0";
}
if (best_localization_score > 0.0 && sites_sum_score[i] >= best_localization_score - 1e-6)
{
best_localization[i] = tolower(best_localization[i]);
best_localization_position = i; // Note: check if there are situations where multiple have the same score
}
}
#ifdef DEBUG_OpenNuXL
OPENMS_LOG_DEBUG << endl;
#endif
// create annotation strings for shifted fragment ions
NuXLFragmentAnnotationHelper::addShiftedPeakFragmentAnnotation_(shifted_b_ions,
shifted_y_ions,
shifted_a_ions,
shifted_immonium_ions,
annotated_marker_ions,
annotated_precursor_ions,
fas);
// store score of best localization(s)
a.localization_scores = localization_scores;
a.best_localization = best_localization;
a.best_localization_score = best_localization_score;
a.best_localization_position = best_localization_position;
a.fragment_annotations = fas;
#ifdef DEBUG_OpenNuXL1
OPENMS_LOG_DEBUG << "Ion centric annotation: " << endl;
OPENMS_LOG_DEBUG << "unshifted b ions: " << endl;
OPENMS_LOG_DEBUG << NuXLFragmentAnnotationHelper::fragmentAnnotationDetailsToString("b", unshifted_b_ions) << endl;
OPENMS_LOG_DEBUG << "unshifted y ions: " << endl;
OPENMS_LOG_DEBUG << NuXLFragmentAnnotationHelper::fragmentAnnotationDetailsToString("y", unshifted_y_ions) << endl;
OPENMS_LOG_DEBUG << "unshifted a ions: " << endl;
OPENMS_LOG_DEBUG << NuXLFragmentAnnotationHelper::fragmentAnnotationDetailsToString("a", unshifted_a_ions) << endl;
OPENMS_LOG_DEBUG << "shifted b ions: " << endl;
OPENMS_LOG_DEBUG << NuXLFragmentAnnotationHelper::fragmentAnnotationDetailsToString("b", shifted_b_ions) << endl;
OPENMS_LOG_DEBUG << "shifted y ions: " << endl;
OPENMS_LOG_DEBUG << NuXLFragmentAnnotationHelper::fragmentAnnotationDetailsToString("y", shifted_y_ions) << endl;
OPENMS_LOG_DEBUG << "shifted a ions: " << endl;
OPENMS_LOG_DEBUG << NuXLFragmentAnnotationHelper::fragmentAnnotationDetailsToString("a", shifted_a_ions) << endl;
OPENMS_LOG_DEBUG << "shifted immonium ions: " << endl;
OPENMS_LOG_DEBUG << NuXLFragmentAnnotationHelper::shiftedIonsToString(shifted_immonium_ions) << endl;
OPENMS_LOG_DEBUG << "shifted marker ions: " << endl;
OPENMS_LOG_DEBUG << NuXLFragmentAnnotationHelper::shiftedIonsToString(annotated_marker_ions) << endl;
OPENMS_LOG_DEBUG << "shifted precursor ions: " << endl;
OPENMS_LOG_DEBUG << NuXLFragmentAnnotationHelper::shiftedIonsToString(annotated_precursor_ions) << endl;
OPENMS_LOG_DEBUG << "Localization scores: ";
OPENMS_LOG_DEBUG << localization_scores << endl;
OPENMS_LOG_DEBUG << "Localisation based on ion series and immonium ions of all observed fragments: ";
OPENMS_LOG_DEBUG << best_localization << endl;
#endif
}
}
}
} // namespace
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/NUXL/NuXLDeisotoper.cpp | .cpp | 16,156 | 409 | // Copyright (c) 2002-present, The OpenMS Team -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Timo Sachsenberg $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/ANALYSIS/NUXL/NuXLDeisotoper.h>
#include <OpenMS/PROCESSING/FILTERING/WindowMower.h>
#include <OpenMS/PROCESSING/FILTERING/NLargest.h>
#include <OpenMS/PROCESSING/FILTERING/ThresholdMower.h>
#include <OpenMS/KERNEL/MSSpectrum.h>
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/MATH/MathFunctions.h>
namespace OpenMS
{
// static
void NuXLDeisotoper::deisotopeAndSingleCharge(MSSpectrum& spec,
double fragment_tolerance,
bool fragment_unit_ppm,
int min_charge,
int max_charge,
bool keep_only_deisotoped,
unsigned int min_isopeaks,
unsigned int max_isopeaks,
bool make_single_charged,
bool annotate_charge,
bool annotate_iso_peak_count,
bool use_decreasing_model,
unsigned int start_intensity_check,
bool add_up_intensity,
bool annotate_features,
bool preserve_high_intensity_peaks,
double preserve_low_mz_peaks_threshold)
{
OPENMS_PRECONDITION(spec.isSorted(), "Spectrum must be sorted.");
if ((fragment_unit_ppm && fragment_tolerance > 100) || (!fragment_unit_ppm && fragment_tolerance > 0.1))
{
throw Exception::IllegalArgument(
__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"Fragment tolerance must not be greater than 100 ppm or 0.1 Da");
}
if (min_isopeaks < 2 || max_isopeaks < 2 || min_isopeaks > max_isopeaks)
{
throw Exception::IllegalArgument(__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"Minimum/maximum number of isotopic peaks must be at least 2 (and min_isopeaks <= max_isopeaks).");
}
if (spec.empty())
{
return;
}
Size charge_index{};
Size iso_peak_count_index{};
Size feature_number_dataarray_index{};
// reserve integer data array to store charge of peaks
if (annotate_charge)
{
// expand to hold one additional integer data array to hold the charge
spec.getIntegerDataArrays().resize(spec.getIntegerDataArrays().size() + 1);
spec.getIntegerDataArrays().back().setName("charge");
charge_index = spec.getIntegerDataArrays().size()-1;
}
// reserve integer data array to store number of isotopic peaks for each isotopic pattern
if (annotate_iso_peak_count)
{
spec.getIntegerDataArrays().resize(spec.getIntegerDataArrays().size() + 1);
spec.getIntegerDataArrays().back().setName("iso_peak_count");
iso_peak_count_index = spec.getIntegerDataArrays().size()-1;
}
if (annotate_features)
{
spec.getIntegerDataArrays().resize(spec.getIntegerDataArrays().size() + 1);
spec.getIntegerDataArrays().back().setName("feature_number");
feature_number_dataarray_index = spec.getIntegerDataArrays().size() - 1;
}
// during discovery phase, work on a constant reference (just to make sure we do not modify spec)
const MSSpectrum& old_spectrum = spec;
// determine charge seeds and extend them
std::vector<size_t> mono_isotopic_peak(old_spectrum.size(), 0);
std::vector<int> features(old_spectrum.size(), -1);
std::vector<double> mono_iso_peak_intensity(old_spectrum.size(), 0);
std::vector<Size> iso_peak_count(old_spectrum.size(), 1);
int feature_number = 0;
std::vector<size_t> extensions;
bool has_precursor_data(false);
double precursor_mass(0);
if (old_spectrum.getPrecursors().size() == 1)
{
has_precursor_data = true;
int precursor_charge = old_spectrum.getPrecursors()[0].getCharge();
precursor_mass = (old_spectrum.getPrecursors()[0].getMZ() * precursor_charge) - (Constants::PROTON_MASS * precursor_charge);
}
MSSpectrum high_intensity_peaks;
if (preserve_high_intensity_peaks) // TODO: document what happens with keep_only_deisotoped=true as it might add high-intensity peaks without charge (=> might lead to surprises if that option is taken)
{
high_intensity_peaks = spec;
// find high intensity peaks
WindowMower window_mower_filter;
Param filter_param = window_mower_filter.getParameters();
filter_param.setValue("windowsize", 4.0, "The size of the sliding window along the m/z axis.");
filter_param.setValue("peakcount", 1, "The number of peaks that should be kept.");
filter_param.setValue("movetype", "jump", "Whether sliding window (one peak steps) or jumping window (window size steps) should be used.");
window_mower_filter.setParameters(filter_param);
// add peak indices so we know what is retained after filtering
high_intensity_peaks.getIntegerDataArrays().resize(high_intensity_peaks.getIntegerDataArrays().size() + 1);
high_intensity_peaks.getIntegerDataArrays().back().setName("index");
for (size_t index = 0; index != high_intensity_peaks.size(); ++index)
{
high_intensity_peaks.getIntegerDataArrays().back().push_back(index);
}
// filter peaks and integer data arrays
window_mower_filter.filterPeakSpectrum(high_intensity_peaks);
// use high intensity peaks as seeds
for (size_t i = 0; i != high_intensity_peaks.size(); ++i)
{
size_t current_peak = high_intensity_peaks.getIntegerDataArrays().back()[i];
// deconvolve with high intensity peaks as seed
// TODO: remove duplication
const double current_mz = old_spectrum[current_peak].getMZ();
if (add_up_intensity)
{
mono_iso_peak_intensity[current_peak] = old_spectrum[current_peak].getIntensity();
}
for (int q = max_charge; q >= min_charge; --q) // important: test charge hypothesis from high to low
{
// try to extend isotopes from mono-isotopic peak
// if extension larger then min_isopeaks possible:
// - save charge q in mono_isotopic_peak[]
// - annotate_charge all isotopic peaks with feature number
if (features[current_peak] == -1) // only process peaks which have no assigned feature number
{
bool has_min_isopeaks = true;
const double tolerance_dalton = fragment_unit_ppm ? Math::ppmToMass(fragment_tolerance, current_mz) : fragment_tolerance;
// do not bother testing charges q (and masses m) with: m/q > precursor_mass/q (or m > precursor_mass)
if (has_precursor_data)
{
double current_theo_mass = (current_mz * q) - (Constants::PROTON_MASS * q);
if (current_theo_mass > (precursor_mass + tolerance_dalton))
{
continue;
}
}
extensions.clear();
extensions.push_back(current_peak); // add current peak as start of extensions
for (unsigned int i = 1; i < max_isopeaks; ++i)
{
const double expected_mz = current_mz + static_cast<double>(i) * Constants::C13C12_MASSDIFF_U / static_cast<double>(q);
const int p = old_spectrum.findNearest(expected_mz, tolerance_dalton);
if (p == -1) // test for missing peak
{
has_min_isopeaks = (i >= min_isopeaks);
break;
}
else
{
// Possible improvement: include proper averagine model filtering
// for now start at the peak with i = start_intensity_check to test hypothesis
// if start_intensity_check = 0 or 1, start checking by comparing monoisotopic and second isotopic peak
// if start_intensity_check = 2, start checking by comparing second isotopic peak with the third, etc.
// Note: this is a common approach used in several other search engines
if (use_decreasing_model && (i >= start_intensity_check) && (old_spectrum[p].getIntensity() > old_spectrum[extensions.back()].getIntensity()))
{
has_min_isopeaks = (i >= min_isopeaks);
break;
}
// ratio of first isotopic peak to monoisotopic peak may not be too large otherwise it might be just a satelite peak (e.g, amidation)
if (i == 1
&& old_spectrum[p].getIntensity() / old_spectrum[extensions.back()].getIntensity() > 10.0)
{
has_min_isopeaks = (i >= min_isopeaks);
break;
}
// ratio of first isotopic peak to monoisotopic peak may not be too small otherwise it might be just matching a noise peak
if (i == 1
&& old_spectrum[p].getIntensity() / old_spectrum[extensions.back()].getIntensity() < 0.01)
{
has_min_isopeaks = (i >= min_isopeaks);
break;
}
// averagine check passed or skipped
extensions.push_back(p);
if (annotate_iso_peak_count)
{
iso_peak_count[current_peak] = i + 1; // with "+ 1" the monoisotopic peak is counted as well
}
}
}
if (has_min_isopeaks)
{
// std::cout << "min peaks at " << current_mz << " " << " extensions: " << extensions.size() << std::endl;
mono_isotopic_peak[current_peak] = q;
for (unsigned int i = 0; i != extensions.size(); ++i)
{
features[extensions[i]] = feature_number;
// monoisotopic peak intensity is already set above, add up the other intensities here
if (add_up_intensity && (i != 0))
{
mono_iso_peak_intensity[current_peak] += old_spectrum[extensions[i]].getIntensity();
}
}
++feature_number;
}
}
}
}
}
// deisotope on all peaks
for (size_t current_peak = 0; current_peak != old_spectrum.size(); ++current_peak)
{
const double current_mz = old_spectrum[current_peak].getMZ();
if (add_up_intensity)
{
mono_iso_peak_intensity[current_peak] = old_spectrum[current_peak].getIntensity();
}
for (int q = max_charge; q >= min_charge; --q) // important: test charge hypothesis from high to low
{
// try to extend isotopes from mono-isotopic peak
// if extension larger then min_isopeaks possible:
// - save charge q in mono_isotopic_peak[]
// - annotate_charge all isotopic peaks with feature number
if (features[current_peak] == -1) // only process peaks which have no assigned feature number
{
bool has_min_isopeaks = true;
const double tolerance_dalton = fragment_unit_ppm ? Math::ppmToMass(fragment_tolerance, current_mz) : fragment_tolerance;
// do not bother testing charges q (and masses m) with: m/q > precursor_mass/q (or m > precursor_mass)
if (has_precursor_data)
{
double current_theo_mass = (current_mz * q) - (Constants::PROTON_MASS * q);
if (current_theo_mass > (precursor_mass + tolerance_dalton))
{
continue;
}
}
extensions.clear();
extensions.push_back(current_peak); // add current peak as start of extensions
for (unsigned int i = 1; i < max_isopeaks; ++i)
{
const double expected_mz = current_mz + static_cast<double>(i) * Constants::C13C12_MASSDIFF_U / static_cast<double>(q);
const int p = old_spectrum.findNearest(expected_mz, tolerance_dalton);
if (p == -1) // test for missing peak
{
has_min_isopeaks = (i >= min_isopeaks);
break;
}
else
{
// Possible improvement: include proper averagine model filtering
// for now start at the peak with i = start_intensity_check to test hypothesis
// if start_intensity_check = 0 or 1, start checking by comparing monoisotopic and second isotopic peak
// if start_intensity_check = 2, start checking by comparing second isotopic peak with the third, etc.
// Note: this is a common approach used in several other search engines
if (use_decreasing_model && (i >= start_intensity_check) && (old_spectrum[p].getIntensity() > old_spectrum[extensions.back()].getIntensity()))
{
has_min_isopeaks = (i >= min_isopeaks);
break;
}
// ratio of first isotopic peak to monoisotopic peak may not be too large otherwise it might be just a satelite peak (e.g, amidation)
if (use_decreasing_model && (i == 1 && old_spectrum[p].getIntensity() / old_spectrum[extensions.back()].getIntensity() > 10.0))
{
has_min_isopeaks = (i >= min_isopeaks);
break;
}
// averagine check passed or skipped
extensions.push_back(p);
if (annotate_iso_peak_count)
{
iso_peak_count[current_peak] = i + 1; // with "+ 1" the monoisotopic peak is counted as well
}
}
}
if (has_min_isopeaks)
{
mono_isotopic_peak[current_peak] = q;
for (unsigned int i = 0; i != extensions.size(); ++i)
{
features[extensions[i]] = feature_number;
// monoisotopic peak intensity is already set above, add up the other intensities here
if (add_up_intensity && (i != 0))
{
mono_iso_peak_intensity[current_peak] += old_spectrum[extensions[i]].getIntensity();
}
}
++feature_number;
}
}
}
}
if (annotate_features)
{ // assign feature indices without copy
spec.getIntegerDataArrays()[feature_number_dataarray_index].std::vector<Int>::swap(features);
}
// apply changes, i.e. select the indices which should survive
std::vector<Size> select_idx;
for (size_t i = 0; i != spec.size(); ++i)
{
Size z = mono_isotopic_peak[i];
if (annotate_charge)
{
spec.getIntegerDataArrays()[charge_index].push_back((int)z);
}
if (annotate_iso_peak_count)
{
spec.getIntegerDataArrays()[iso_peak_count_index].push_back((int)iso_peak_count[i]);
}
if (add_up_intensity)
{
spec[i].setIntensity(mono_iso_peak_intensity[i]);
}
if (!keep_only_deisotoped)
{ // keep all unassigned peaks
if (features[i] < 0)
{
select_idx.push_back(i);
continue;
}
}
if (z == 0)
{
continue;
}
// convert mono-isotopic peak with charge assigned by deisotoping
if (make_single_charged)
{
spec[i].setMZ(spec[i].getMZ() * z - (z - 1) * Constants::PROTON_MASS_U);
}
select_idx.push_back(i);
}
if (preserve_high_intensity_peaks)
{
// add peak index if we want to keep that peak
for (size_t i = 0; i != high_intensity_peaks.size(); ++i)
{
int peak_index = high_intensity_peaks.getIntegerDataArrays().back()[i];
if (std::find(select_idx.begin(), select_idx.end(), peak_index) == select_idx.end())
{
select_idx.push_back(peak_index);
}
}
}
if (preserve_low_mz_peaks_threshold > 0.0)
{
for (size_t i = 0; i != spec.size(); ++i)
{
if (spec[i].getMZ() < preserve_low_mz_peaks_threshold)
{
if (std::find(select_idx.begin(), select_idx.end(), i) == select_idx.end()) // not already selected
{
select_idx.push_back(i);
}
}
else
{
break;
}
}
}
// properly subsets all datapoints (incl. dataArrays)
spec.select(select_idx);
spec.sortByPosition();
return;
}
} // namespace OpenMS | C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/NUXL/NuXLFragmentAdductDefinition.cpp | .cpp | 909 | 28 | // Copyright (c) 2002-present, The OpenMS Team -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Timo Sachsenberg $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/NUXL/NuXLFragmentAdductDefinition.h>
using namespace std;
namespace OpenMS
{
bool NuXLFragmentAdductDefinition::operator<(const NuXLFragmentAdductDefinition& other) const
{
String fa = formula.toString();
String fb = other.formula.toString();
return std::tie(mass, fa, name) < std::tie(other.mass, fb, other.name);
}
bool NuXLFragmentAdductDefinition::operator==(const NuXLFragmentAdductDefinition& other) const
{
return std::tie(formula, name) == std::tie(other.formula, other.name);
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/NUXL/NuXLPresets.cpp | .cpp | 6,069 | 196 | // Copyright (c) 2002-present, The OpenMS Team -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Timo Sachsenberg $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/NUXL/NuXLPresets.h>
#include <OpenMS/SYSTEM/File.h>
#include <nlohmann/json.hpp>
#include <fstream>
using json = nlohmann::json;
namespace OpenMS
{
namespace NuXLPresets
{
StringList getAllPresetsNames(const String& custom_presets_file)
{
StringList presets;
// Determine which JSON file to use
String json_path;
if (!custom_presets_file.empty() && File::exists(custom_presets_file))
{
json_path = custom_presets_file;
OPENMS_LOG_INFO << "Using custom presets file: " << json_path << std::endl;
}
else
{
json_path = File::getOpenMSDataPath() + "/NUXL/nuxl_presets.json";
}
if (File::exists(json_path))
{
OPENMS_LOG_INFO << "Found presets file: " << json_path << std::endl;
}
if (File::exists(json_path))
{
try
{
std::ifstream file(json_path.c_str());
json j;
file >> j;
// Add all presets to the list
for (auto it = j.begin(); it != j.end(); ++it)
{
presets.push_back(it.key());
}
}
catch (const std::exception& e)
{
OPENMS_LOG_WARN << "Error reading presets from " << json_path << ": " << e.what() << std::endl;
}
}
else
{
OPENMS_LOG_WARN << "Presets file not found: " << json_path << std::endl;
}
return presets;
}
void getPresets(const String& p,
const String& custom_presets_file,
StringList& nucleotides,
StringList& mapping,
StringList& modifications,
StringList& fragment_adducts,
String& can_cross_link)
{
StringList presets = getAllPresetsNames(custom_presets_file);
OPENMS_LOG_INFO << "Found presets: " << presets.size() << std::endl;
for (const String& s : presets)
{
OPENMS_LOG_DEBUG << s << std::endl;
}
// Check if preset exists
bool found = find(presets.begin(), presets.end(), p) != presets.end();
if (!found)
{
throw std::runtime_error("Error: unknown preset '" + p + "'.");
}
// Try to load presets from JSON file
String json_path;
if (!custom_presets_file.empty() && File::exists(custom_presets_file))
{
json_path = custom_presets_file;
}
else
{
String share_path = File::getOpenMSDataPath();
json_path = share_path + "/NUXL/nuxl_presets.json";
}
if (File::exists(json_path))
{
try
{
std::ifstream file(json_path.c_str());
json j;
file >> j;
// Check if the requested preset exists in the JSON file
if (j.contains(p.c_str()))
{
const auto& preset = j[p.c_str()];
// Load nucleotides
if (preset.contains("target_nucleotides"))
{
nucleotides.clear();
for (const auto& nuc : preset["target_nucleotides"])
{
nucleotides.push_back(nuc.get<std::string>());
}
}
// Load mapping
if (preset.contains("mapping"))
{
mapping.clear();
for (const auto& map : preset["mapping"])
{
mapping.push_back(map.get<std::string>());
}
}
// Load modifications
if (preset.contains("modifications"))
{
modifications.clear();
for (const auto& mod : preset["modifications"])
{
modifications.push_back(mod.get<std::string>());
}
}
// Load fragment adducts
if (preset.contains("fragment_adducts"))
{
fragment_adducts.clear();
for (const auto& frag : preset["fragment_adducts"])
{
fragment_adducts.push_back(frag.get<std::string>());
}
}
// Load can_cross_link
if (preset.contains("can_cross_link"))
{
can_cross_link = preset["can_cross_link"].get<std::string>();
}
// Special handling for DEB and NM presets that need methionine loss
if (p.hasSubstring("DEB") || p.hasSubstring("NM"))
{
// add special methionine loss
auto r_ptr = const_cast<Residue*>(ResidueDB::getInstance()->getResidue('M'));
r_ptr->addLossFormula(EmpiricalFormula("CH4S1"));
}
// Preset loaded successfully, return
OPENMS_LOG_INFO << "Using preset '" << p << "' from " << json_path << std::endl;
return;
}
}
catch (const std::exception& e)
{
// If there's an error reading the JSON file, throw an error
OPENMS_LOG_WARN << "Error reading presets from " << json_path << ": " << e.what() << std::endl;
throw std::runtime_error("Error reading presets.");
}
}
else
{
throw std::runtime_error("Error: presets file not found.");
}
}
// Overload that uses the default presets file
void getPresets(const String& p,
StringList& nucleotides,
StringList& mapping,
StringList& modifications,
StringList& fragment_adducts,
String& can_cross_link)
{
getPresets(p, "", nucleotides, mapping, modifications, fragment_adducts, can_cross_link);
}
}
} | C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/ConfidenceScoring.cpp | .cpp | 10,636 | 263 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Hannes Roest, Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/ConfidenceScoring.h>
#include <OpenMS/FORMAT/TransformationXMLFile.h>
#include <OpenMS/OPENSWATHALGO/ALGO/Scoring.h>
#include <OpenMS/FORMAT/FeatureXMLFile.h>
#include <OpenMS/FORMAT/TraMLFile.h>
#include <boost/bimap.hpp>
#include <boost/bimap/multiset_of.hpp>
#include <numeric> // for "accumulate"
#include <ctime> // for "time" (random number seed)
#include <random>
#include <map>
using namespace std;
namespace OpenMS
{
/// Mapping: Q3 m/z <-> transition intensity (maybe not unique!)
typedef boost::bimap<double, boost::bimaps::multiset_of<double> >
BimapType;
ConfidenceScoring::ConfidenceScoring(bool test_mode_)
{
if (!test_mode_) shuffler_ = Math::RandomShuffler(0);
else shuffler_ = Math::RandomShuffler(time(nullptr));// seed with current time
}
/// Randomize the list of decoy indexes
void ConfidenceScoring::chooseDecoys_()
{
if (n_decoys_ == 0) return; // list is already initialized
// somewhat inefficient to shuffle the whole list when we only need a random
// sample, but easy to do...
shuffler_.portable_random_shuffle(decoy_index_.begin(), decoy_index_.end());
}
// double rmsd_(DoubleList x, DoubleList y)
// {
// double sum_of_squares = 0;
// for (Size i = 0; i < x.size(); i++)
// {
// double diff = x[i] - y[i];
// sum_of_squares += diff * diff;
// }
// return sqrt(sum_of_squares / x.size());
// }
/// Manhattan distance
double ConfidenceScoring::manhattanDist_(DoubleList x, DoubleList y)
{
double sum = 0;
for (Size i = 0; i < x.size(); ++i)
{
sum += fabs(x[i] - y[i]);
}
return sum;
}
/// Get the retention time of an assay
double ConfidenceScoring::getAssayRT_(const TargetedExperiment::Peptide& assay)
{
OPENMS_PRECONDITION(assay.hasRetentionTime(), "More than zero RTs needed")
return assay.getRetentionTime();
}
/// Extract the @p n_transitions highest intensities from @p intensity_map,
/// store them in @p intensities
void extractIntensities_(BimapType& intensity_map, Size n_transitions,
DoubleList& intensities)
{
// keep only as many transitions as needed, remove those with lowest
// intensities:
if (n_transitions > 0)
{
// use "Int" instead of "Size" to prevent overflows:
Int diff = intensity_map.size() - n_transitions;
for (Size i = 0; Int(i) < diff; ++i)
{
intensity_map.right.erase(intensity_map.right.begin());
}
}
// fill output list ordered by m/z:
intensities.clear();
for (BimapType::left_map::iterator int_it = intensity_map.left.begin();
int_it != intensity_map.left.end(); ++int_it)
{
// missing values might be "-1"
intensities.push_back(max(0.0, int_it->second));
}
}
/// Score the assay @p assay against feature data (@p feature_rt,
/// @p feature_intensities), optionally using only the specified transitions
/// (@p transition_ids)
double ConfidenceScoring::scoreAssay_(const TargetedExperiment::Peptide& assay,
double feature_rt, DoubleList& feature_intensities,
const std::set<String>& transition_ids)
{
// compute RT difference:
double assay_rt = rt_norm_(getAssayRT_(assay));
double diff_rt = assay_rt - feature_rt;
// collect transition intensities:
BimapType intensity_map;
for (IntList::iterator trans_it = transition_map_[assay.id].begin();
trans_it != transition_map_[assay.id].end(); ++trans_it)
{
const ReactionMonitoringTransition& transition =
library_.getTransitions()[*trans_it];
// for the "true" assay, we need to choose the same transitions as for the
// feature:
if (!transition_ids.empty() &&
(transition_ids.count(transition.getNativeID()) == 0)) continue;
// seems like Boost's Bimap doesn't support "operator[]"...
intensity_map.left.insert(make_pair(transition.getProductMZ(),
transition.getLibraryIntensity()));
}
DoubleList assay_intensities;
extractIntensities_(intensity_map, feature_intensities.size(),
assay_intensities);
if (feature_intensities.empty())
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Feature intensities were empty - please provide feature subordinate with intensities");
}
if (feature_intensities.size()!=assay_intensities.size())
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Did not find a feature for each assay provided - each feature needs "
"to have n subordinates with the meta-value 'native_id' set to the corresponding transition.");
}
// compute intensity distance:
OpenSwath::Scoring::normalize_sum(&feature_intensities[0],
boost::numeric_cast<int>(feature_intensities.size()));
OpenSwath::Scoring::normalize_sum(&assay_intensities[0],
boost::numeric_cast<int>(assay_intensities.size()));
double dist_int = manhattanDist_(feature_intensities,
assay_intensities);
double score = glm_(diff_rt, dist_int);
OPENMS_LOG_DEBUG << "\ndelta_RT: " << fabs(diff_rt)
<< "\ndist_int: " << dist_int
<< "\nGLM_score: " << score << endl;
return score;
}
/// Score a feature
void ConfidenceScoring::scoreFeature_(Feature& feature)
{
// extract predictors from feature:
double feature_rt = rt_norm_(rt_trafo_.apply(feature.getRT()));
BimapType intensity_map;
// for the "true" assay, we need to make sure we compare based on the same
// transitions, so keep track of them:
std::map<double, String> trans_id_map; // Q3 m/z -> transition ID
for (vector<Feature>::iterator sub_it = feature.getSubordinates().begin();
sub_it != feature.getSubordinates().end(); ++sub_it)
{
// seems like Boost's Bimap doesn't support "operator[]"...
intensity_map.left.insert(make_pair(sub_it->getMZ(),
sub_it->getIntensity()));
trans_id_map[sub_it->getMZ()] = sub_it->getMetaValue("native_id");
}
DoubleList feature_intensities;
extractIntensities_(intensity_map, n_transitions_, feature_intensities);
if ((n_transitions_ > 0) && (feature_intensities.size() < n_transitions_))
{
OPENMS_LOG_WARN << "Warning: Feature '" << feature.getUniqueId()
<< "' contains only " << feature_intensities.size()
<< " transitions." << endl;
}
// "intensity_map" now only contains the transitions we need later:
std::set<String> transition_ids;
for (BimapType::left_map::iterator int_it = intensity_map.left.begin();
int_it != intensity_map.left.end(); ++int_it)
{
transition_ids.insert(trans_id_map[int_it->first]);
}
DoubleList scores; // "true" score is in "scores[0]", decoy scores follow
// compare to "true" assay:
String true_id = feature.getMetaValue("PeptideRef");
OPENMS_LOG_DEBUG << "True assay (ID '" << true_id << "')" << endl;
if (true_id.empty())
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Feature does not contain meta value 'PeptideRef' (reference to assay)");
}
scores.push_back(scoreAssay_(library_.getPeptideByRef(true_id), feature_rt,
feature_intensities, transition_ids));
// compare to decoy assays:
chooseDecoys_();
Size counter = 0;
for (IntList::iterator decoy_it = decoy_index_.begin();
decoy_it != decoy_index_.end(); ++decoy_it)
{
const TargetedExperiment::Peptide& decoy_assay =
library_.getPeptides()[*decoy_it];
// skip the "true" assay and assays with too few transitions:
// TODO: maybe add an option to include assays with too few transitions?
if ((decoy_assay.id == true_id) ||
(transition_map_[decoy_assay.id].size() < feature_intensities.size()))
{
continue;
}
OPENMS_LOG_DEBUG << "Decoy assay " << scores.size() << " (ID '" << decoy_assay.id
<< "')" << endl;
scores.push_back(scoreAssay_(decoy_assay, feature_rt, feature_intensities));
if ((n_decoys_ > 0) && (++counter >= n_decoys_)) break; // enough decoys
}
Size n_scores = scores.size();
if (n_scores - 1 < n_decoys_)
{
OPENMS_LOG_WARN << "Warning: Feature '" << feature.getUniqueId()
<< "': Couldn't find enough decoy assays with at least "
<< feature_intensities.size() << " transitions. "
<< "Scoring based on " << n_scores - 1 << " decoys." << endl;
}
// TODO: this warning may trigger for every feature and get annoying
if ((n_decoys_ == 0) && (n_scores < library_.getPeptides().size()))
{
OPENMS_LOG_WARN << "Warning: Feature '" << feature.getUniqueId()
<< "': Skipped some decoy assays with fewer than "
<< feature_intensities.size() << " transitions. "
<< "Scoring based on " << n_scores - 1 << " decoys." << endl;
}
// count decoy scores that are greater than the "true" score:
counter = 0;
for (DoubleList::iterator it = ++scores.begin(); it != scores.end(); ++it)
{
if (*it > scores[0]) counter++;
}
// annotate feature:
feature.setMetaValue("GLM_score", scores[0]);
double local_fdr = counter / (n_scores - 1.0);
feature.setMetaValue("local_FDR", local_fdr);
feature.setOverallQuality(1.0 - local_fdr);
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/OpenSwathHelper.cpp | .cpp | 16,299 | 403 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/OpenSwathHelper.h>
#include <random>
#include <algorithm>
#include <unordered_set>
namespace OpenMS
{
void OpenSwathHelper::selectSwathTransitions(const OpenMS::TargetedExperiment& targeted_exp,
OpenMS::TargetedExperiment& transition_exp_used, double min_upper_edge_dist,
double lower, double upper)
{
transition_exp_used.setPeptides(targeted_exp.getPeptides());
transition_exp_used.setProteins(targeted_exp.getProteins());
for (Size i = 0; i < targeted_exp.getTransitions().size(); i++)
{
ReactionMonitoringTransition tr = targeted_exp.getTransitions()[i];
if (lower < tr.getPrecursorMZ() && tr.getPrecursorMZ() < upper &&
std::fabs(upper - tr.getPrecursorMZ()) >= min_upper_edge_dist)
{
OPENMS_LOG_DEBUG << "Adding Precursor with m/z " << tr.getPrecursorMZ() << " to swath with mz lower of " << lower << " m/z upper of " << upper;
transition_exp_used.addTransition(tr);
}
}
}
// For PASEF experiments it is possible to have DIA windows with the same m/z however different IM.
// Extract from the DIA window in which the precursor is more centered across its IM.
// Unlike the function above, current implementation may not be parrelization safe
void OpenSwathHelper::selectSwathTransitionsPasef(const OpenSwath::LightTargetedExperiment& transition_exp, std::vector<int>& tr_win_map,
double min_upper_edge_dist, const std::vector< OpenSwath::SwathMap > & swath_maps)
{
OPENMS_PRECONDITION(std::any_of(transition_exp.transitions.begin(), transition_exp.transitions.end(), [](auto i){return i.getPrecursorIM()!=-1;}), "All transitions must have a valid IM value (not -1)");
tr_win_map.resize(transition_exp.transitions.size(), -1);
for (SignedSize i = 0; i < boost::numeric_cast<SignedSize>(swath_maps.size()); ++i)
{
for (Size k = 0; k < transition_exp.transitions.size(); k++)
{
const OpenSwath::LightTransition& tr = transition_exp.transitions[k];
// If the transition falls inside the current DIA window (both in IM and m/z axis), check
// if the window is potentially a better match for extraction than
// the one previously stored in the map:
if (
swath_maps[i].imLower < tr.getPrecursorIM() && tr.getPrecursorIM() < swath_maps[i].imUpper &&
swath_maps[i].lower < tr.getPrecursorMZ() && tr.getPrecursorMZ() < swath_maps[i].upper &&
std::fabs(swath_maps[i].upper - tr.getPrecursorMZ()) >= min_upper_edge_dist )
{
if (tr_win_map[k] == -1)
{
tr_win_map[k] = i;
}
else
{
// Check if the current window is better than the previously assigned window (across IM)
double imOld = std::fabs(((swath_maps[ tr_win_map[k] ].imLower + swath_maps [ tr_win_map[k] ].imUpper) / 2) - tr.getPrecursorIM() );
double imNew = std::fabs(((swath_maps[ i ].imLower + swath_maps [ i ].imUpper) / 2) - tr.getPrecursorIM() );
if (imOld > imNew)
{
// current DIA window "i" is a better match
OPENMS_LOG_DEBUG << "For Precursor " << tr.getPrecursorIM() << " Replacing Swath Map with IM center of " <<
imOld << " with swath map of im center " << imNew << std::endl;
tr_win_map[k] = i;
}
}
}
}
}
}
void OpenSwathHelper::checkSwathMap(const OpenMS::PeakMap& swath_map,
double& lower, double& upper, double& center)
{
if (swath_map.empty() || swath_map[0].getPrecursors().empty())
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Swath map has no Spectra");
}
const std::vector<Precursor>& first_prec = swath_map[0].getPrecursors();
lower = first_prec[0].getMZ() - first_prec[0].getIsolationWindowLowerOffset();
upper = first_prec[0].getMZ() + first_prec[0].getIsolationWindowUpperOffset();
center = first_prec[0].getMZ();
UInt expected_mslevel = swath_map[0].getMSLevel();
for (Size index = 0; index < swath_map.size(); index++)
{
const std::vector<Precursor>& prec = swath_map[index].getPrecursors();
if (prec.size() != 1)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Scan " + String(index) + " does not have exactly one precursor.");
}
if (swath_map[index].getMSLevel() != expected_mslevel)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Scan " + String(index) + " if of a different MS level than the first scan.");
}
if (
fabs(prec[0].getMZ() - first_prec[0].getMZ()) > 0.1 ||
fabs(prec[0].getIsolationWindowLowerOffset() - first_prec[0].getIsolationWindowLowerOffset()) > 0.1 ||
fabs(prec[0].getIsolationWindowUpperOffset() - first_prec[0].getIsolationWindowUpperOffset()) > 0.1
)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Scan " + String(index) + " has a different precursor isolation window than the first scan.");
}
}
}
void OpenSwathHelper::selectSwathTransitions(const OpenSwath::LightTargetedExperiment& targeted_exp,
OpenSwath::LightTargetedExperiment& transition_exp_used, double min_upper_edge_dist,
double lower, double upper)
{
std::set<std::string> matching_compounds;
for (Size i = 0; i < targeted_exp.transitions.size(); i++)
{
const OpenSwath::LightTransition& tr = targeted_exp.transitions[i];
if (lower < tr.getPrecursorMZ() && tr.getPrecursorMZ() < upper &&
std::fabs(upper - tr.getPrecursorMZ()) >= min_upper_edge_dist)
{
transition_exp_used.transitions.push_back(tr);
matching_compounds.insert(tr.getPeptideRef());
}
}
std::set<std::string> matching_proteins;
for (Size i = 0; i < targeted_exp.compounds.size(); i++)
{
if (matching_compounds.find(targeted_exp.compounds[i].id) != matching_compounds.end())
{
transition_exp_used.compounds.push_back( targeted_exp.compounds[i] );
for (Size j = 0; j < targeted_exp.compounds[i].protein_refs.size(); j++)
{
matching_proteins.insert(targeted_exp.compounds[i].protein_refs[j]);
}
}
}
for (Size i = 0; i < targeted_exp.proteins.size(); i++)
{
if (matching_proteins.find(targeted_exp.proteins[i].id) != matching_proteins.end())
{
transition_exp_used.proteins.push_back( targeted_exp.proteins[i] );
}
}
}
std::pair<double,double> OpenSwathHelper::estimateRTRange(const OpenSwath::LightTargetedExperiment & exp)
{
if (exp.getCompounds().empty())
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Input list of targets is empty.");
}
double max = exp.getCompounds()[0].rt;
double min = exp.getCompounds()[0].rt;
for (Size i = 0; i < exp.getCompounds().size(); i++)
{
if (exp.getCompounds()[i].rt < min) min = exp.getCompounds()[i].rt;
if (exp.getCompounds()[i].rt > max) max = exp.getCompounds()[i].rt;
}
return std::make_pair(min,max);
}
OpenSwath::LightTargetedExperiment
OpenSwathHelper::sampleExperiment(
const OpenSwath::LightTargetedExperiment & exp,
Size bins,
Size peptides_per_bin,
unsigned int seed,
bool sort_by_intensity,
double top_fraction,
const std::unordered_set<std::string> & priority_peptides)
{
OPENMS_PRECONDITION(bins >= 1, "bins must be >= 1");
OPENMS_PRECONDITION(peptides_per_bin >= 1, "peptides_per_bin must be >= 1");
OPENMS_PRECONDITION(!sort_by_intensity || (top_fraction > 0.0 && top_fraction <= 1.0),
"top_fraction must be in (0,1] when sort_by_intensity is true");
// 0) initial candidate selection: exclude decoys
std::vector<OpenSwath::LightCompound> candidates;
std::vector<OpenSwath::LightCompound> priority_candidates;
std::unordered_set<String> good_ids;
for (auto & tr : exp.getTransitions())
{
if (!tr.getDecoy())
good_ids.insert(tr.getPeptideRef());
}
// Separate compounds into priority and regular candidates
for (auto & cmp : exp.getCompounds())
{
if (good_ids.count(cmp.id))
{
if (priority_peptides.count(cmp.sequence))
{
priority_candidates.push_back(cmp);
}
else
{
candidates.push_back(cmp);
}
}
}
// 1) optionally sort by library intensities and trim to top fraction
if (sort_by_intensity && top_fraction > 0.0 && top_fraction <= 1.0)
{
// sum intensities per peptide across all transitions
std::unordered_map<String, double> intensity_sum;
for (auto & tr : exp.getTransitions())
{
if (!tr.getDecoy())
{
intensity_sum[tr.getPeptideRef()] += tr.library_intensity;
}
}
// sort regular candidates by descending sum
std::sort(candidates.begin(), candidates.end(), [&](auto & a, auto & b) {
return intensity_sum[a.id] > intensity_sum[b.id];
});
// trim to top N%
Size max_keep = std::max<Size>(1, static_cast<Size>(candidates.size() * top_fraction));
candidates.resize(std::min(max_keep, candidates.size()));
// Also sort priority candidates by intensity (but don't trim them)
std::sort(priority_candidates.begin(), priority_candidates.end(), [&](auto & a, auto & b) {
return intensity_sum[a.id] > intensity_sum[b.id];
});
}
// Combine all available candidates for sampling
std::vector<OpenSwath::LightCompound> all_candidates;
all_candidates.reserve(priority_candidates.size() + candidates.size());
all_candidates.insert(all_candidates.end(), priority_candidates.begin(), priority_candidates.end());
all_candidates.insert(all_candidates.end(), candidates.begin(), candidates.end());
if (all_candidates.size() < 3)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Insufficient candidates for sampling: " + String(all_candidates.size()) +
" found, minimum 3 required for meaningful iRT calibration.");
}
// 2) estimate RT range using all available candidates
double rt_min = std::numeric_limits<double>::max();
double rt_max = std::numeric_limits<double>::lowest();
for (auto & cmp : all_candidates)
{
rt_min = std::min(rt_min, cmp.rt);
rt_max = std::max(rt_max, cmp.rt);
}
double bin_width = (rt_max - rt_min) / static_cast<double>(bins);
// 3) sample priority peptides first, then fill remaining quota with uniform sampling
std::vector<OpenSwath::LightCompound> picked;
std::unordered_set<std::string> picked_sequences; // Track which sequences we've already picked
// Initialize OpenMS random shuffler
Math::RandomShuffler rshuffler;
rshuffler.seed(seed == 0 ? std::random_device{}() : seed);
// First pass: sample priority peptides
if (!priority_candidates.empty())
{
OPENMS_LOG_INFO << "Sampling " << priority_candidates.size()
<< " priority peptides from the input experiment" << std::endl;
for (Size b = 0; b < bins; ++b)
{
double lo = rt_min + b * bin_width;
double hi = (b + 1 == bins ? rt_max : lo + bin_width);
std::vector<OpenSwath::LightCompound> bucket;
for (auto & cmp : priority_candidates)
{
if (cmp.rt >= lo && cmp.rt < hi && picked_sequences.find(cmp.sequence) == picked_sequences.end())
bucket.push_back(cmp);
}
if (!bucket.empty())
{
rshuffler.portable_random_shuffle(bucket.begin(), bucket.end());
Size take = std::min(peptides_per_bin, bucket.size());
for (Size i = 0; i < take; ++i)
{
picked.push_back(bucket[i]);
picked_sequences.insert(bucket[i].sequence);
}
}
}
OPENMS_LOG_INFO << "Successfully sampled " << picked.size()
<< " priority peptides" << std::endl;
}
// Second pass: fill remaining quota with regular sampling
Size total_quota = bins * peptides_per_bin;
if (picked.size() < total_quota && !candidates.empty())
{
OPENMS_LOG_INFO << "Filling remaining quota (" << (total_quota - picked.size())
<< " peptides) from regular candidates" << std::endl;
for (Size b = 0; b < bins; ++b)
{
double lo = rt_min + b * bin_width;
double hi = (b + 1 == bins ? rt_max : lo + bin_width);
// Count how many priority peptides we already have in this bin
Size priority_in_bin = 0;
for (auto & p : picked)
{
if (p.rt >= lo && p.rt < hi)
priority_in_bin++;
}
// Calculate how many more we need from this bin
Size needed = (peptides_per_bin > priority_in_bin) ? (peptides_per_bin - priority_in_bin) : 0;
if (needed > 0)
{
std::vector<OpenSwath::LightCompound> bucket;
for (auto & cmp : candidates)
{
if (cmp.rt >= lo && cmp.rt < hi && picked_sequences.find(cmp.sequence) == picked_sequences.end())
bucket.push_back(cmp);
}
if (!bucket.empty())
{
rshuffler.portable_random_shuffle(bucket.begin(), bucket.end());
Size take = std::min(needed, bucket.size());
for (Size i = 0; i < take; ++i)
{
picked.push_back(bucket[i]);
picked_sequences.insert(bucket[i].sequence);
}
}
}
}
}
// 4) assemble output experiment
OpenSwath::LightTargetedExperiment out_exp;
out_exp.compounds = picked;
// copy matching transitions, excluding decoys if requested
std::unordered_set<String> pep_ids;
for (auto & cmp : picked)
pep_ids.insert(cmp.id);
for (auto & tr : exp.getTransitions())
{
if (pep_ids.count(tr.getPeptideRef()) && (!tr.getDecoy()))
out_exp.transitions.push_back(tr);
}
// copy associated proteins
std::unordered_set<String> prot_ids;
for (auto & cmp : picked)
for (auto & pid : cmp.protein_refs)
prot_ids.insert(pid);
for (auto & prot : exp.getProteins())
{
if (prot_ids.count(prot.id))
out_exp.proteins.push_back(prot);
}
return out_exp;
}
std::map<std::string, double> OpenSwathHelper::simpleFindBestFeature(
const OpenMS::MRMFeatureFinderScoring::TransitionGroupMapType & transition_group_map,
bool useQualCutoff, double qualCutoff)
{
std::map<std::string, double> result;
for (const auto & trgroup_it : transition_group_map)
{
if (trgroup_it.second.getFeatures().empty() ) {continue;}
// Find the feature with the highest score
auto bestf = trgroup_it.second.getBestFeature();
// Skip if we did not find a feature or do not exceed a certain quality
if (useQualCutoff && bestf.getOverallQuality() < qualCutoff )
{
continue;
}
// If we have a found a best feature, add it to the vector
String pepref = trgroup_it.second.getTransitions()[0].getPeptideRef();
result[ pepref ] = bestf.getRT();
}
return result;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/OpenSwathOSWWriter.cpp | .cpp | 40,585 | 672 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: George Rosenberger $
// $Authors: George Rosenberger $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/OpenSwathOSWWriter.h>
#include <OpenMS/FORMAT/SqliteConnector.h>
#include <sqlite3.h>
namespace OpenMS
{
OpenSwathOSWWriter::OpenSwathOSWWriter(const String& output_filename, const UInt64 run_id, const String& input_filename, bool uis_scores) :
output_filename_(output_filename),
input_filename_(input_filename),
run_id_(Internal::SqliteHelper::clearSignBit(run_id)),
doWrite_(!output_filename.empty()),
enable_uis_scoring_(uis_scores)
{}
bool OpenSwathOSWWriter::isActive() const
{
return doWrite_;
}
void OpenSwathOSWWriter::writeHeader()
{
// Open database
SqliteConnector conn(output_filename_);
// Create SQL structure
const char * create_sql =
"CREATE TABLE RUN(" \
"ID INT PRIMARY KEY NOT NULL," \
"FILENAME TEXT NOT NULL); " \
"CREATE TABLE FEATURE(" \
"ID INT PRIMARY KEY NOT NULL," \
"RUN_ID INT NOT NULL," \
"PRECURSOR_ID INT NOT NULL," \
"EXP_RT REAL NOT NULL," \
"EXP_IM REAL, " \
"NORM_RT REAL NOT NULL," \
"DELTA_RT REAL NOT NULL," \
"LEFT_WIDTH REAL NOT NULL," \
"RIGHT_WIDTH REAL NOT NULL," \
"EXP_IM_LEFTWIDTH REAL," \
"EXP_IM_RIGHTWIDTH REAL); " \
// MS1-level scores
"CREATE TABLE FEATURE_MS1(" \
"FEATURE_ID INT NOT NULL," \
"AREA_INTENSITY REAL NOT NULL," \
"APEX_INTENSITY REAL NOT NULL," \
"EXP_IM REAL," \
"DELTA_IM REAL," \
"VAR_MASSDEV_SCORE REAL NULL," \
"VAR_MI_SCORE REAL NULL," \
"VAR_MI_CONTRAST_SCORE REAL NULL," \
"VAR_MI_COMBINED_SCORE REAL NULL," \
"VAR_ISOTOPE_CORRELATION_SCORE REAL NULL," \
"VAR_ISOTOPE_OVERLAP_SCORE REAL NULL," \
"VAR_IM_MS1_DELTA_SCORE REAL NULL," \
"VAR_XCORR_COELUTION REAL NULL," \
"VAR_XCORR_COELUTION_CONTRAST REAL NULL," \
"VAR_XCORR_COELUTION_COMBINED REAL NULL," \
"VAR_XCORR_SHAPE REAL NULL," \
"VAR_XCORR_SHAPE_CONTRAST REAL NULL," \
"VAR_XCORR_SHAPE_COMBINED REAL NULL); " \
// MS2-level scores
"CREATE TABLE FEATURE_MS2(" \
"FEATURE_ID INT NOT NULL," \
"AREA_INTENSITY REAL NOT NULL," \
"TOTAL_AREA_INTENSITY REAL NOT NULL," \
"APEX_INTENSITY REAL NOT NULL," \
"EXP_IM REAL," \
"EXP_IM_LEFTWIDTH REAL," \
"EXP_IM_RIGHTWIDTH REAL," \
"DELTA_IM REAL," \
"TOTAL_MI REAL NULL," \
"VAR_BSERIES_SCORE REAL NULL," \
"VAR_DOTPROD_SCORE REAL NULL," \
"VAR_INTENSITY_SCORE REAL NULL," \
"VAR_ISOTOPE_CORRELATION_SCORE REAL NULL," \
"VAR_ISOTOPE_OVERLAP_SCORE REAL NULL," \
"VAR_LIBRARY_CORR REAL NULL," \
"VAR_LIBRARY_DOTPROD REAL NULL," \
"VAR_LIBRARY_MANHATTAN REAL NULL," \
"VAR_LIBRARY_RMSD REAL NULL," \
"VAR_LIBRARY_ROOTMEANSQUARE REAL NULL," \
"VAR_LIBRARY_SANGLE REAL NULL," \
"VAR_LOG_SN_SCORE REAL NULL," \
"VAR_MANHATTAN_SCORE REAL NULL," \
"VAR_MASSDEV_SCORE REAL NULL," \
"VAR_MASSDEV_SCORE_WEIGHTED REAL NULL," \
"VAR_MI_SCORE REAL NULL," \
"VAR_MI_WEIGHTED_SCORE REAL NULL," \
"VAR_MI_RATIO_SCORE REAL NULL," \
"VAR_NORM_RT_SCORE REAL NULL," \
"VAR_XCORR_COELUTION REAL NULL," \
"VAR_XCORR_COELUTION_WEIGHTED REAL NULL," \
"VAR_XCORR_SHAPE REAL NULL," \
"VAR_XCORR_SHAPE_WEIGHTED REAL NULL," \
"VAR_YSERIES_SCORE REAL NULL," \
"VAR_ELUTION_MODEL_FIT_SCORE REAL NULL," \
"VAR_IM_XCORR_SHAPE REAL NULL," \
"VAR_IM_XCORR_COELUTION REAL NULL," \
"VAR_IM_DELTA_SCORE REAL NULL," \
"VAR_IM_LOG_INTENSITY REAL NULL);" \
"CREATE TABLE FEATURE_PRECURSOR(" \
"FEATURE_ID INT NOT NULL," \
"ISOTOPE INT NOT NULL," \
"AREA_INTENSITY REAL NOT NULL," \
"APEX_INTENSITY REAL NOT NULL);" \
// Transition-level scores
"CREATE TABLE FEATURE_TRANSITION(" \
"FEATURE_ID INT NOT NULL," \
"TRANSITION_ID INT NOT NULL," \
"AREA_INTENSITY REAL NOT NULL," \
"TOTAL_AREA_INTENSITY REAL NOT NULL," \
"APEX_RT REAL NULL," \
"APEX_INTENSITY REAL NOT NULL," \
"RT_FWHM REAL NOT NULL," \
"MASSERROR_PPM REAL NULL,"
"TOTAL_MI REAL NULL," \
"VAR_INTENSITY_SCORE REAL NULL," \
"VAR_INTENSITY_RATIO_SCORE REAL NULL," \
"VAR_LOG_INTENSITY REAL NULL," \
"VAR_XCORR_COELUTION REAL NULL," \
"VAR_XCORR_SHAPE REAL NULL," \
"VAR_LOG_SN_SCORE REAL NULL," \
"VAR_MASSDEV_SCORE REAL NULL," \
"VAR_MI_SCORE REAL NULL," \
"VAR_MI_RATIO_SCORE REAL NULL," \
"VAR_ISOTOPE_CORRELATION_SCORE REAL NULL," \
"VAR_ISOTOPE_OVERLAP_SCORE REAL NULL, " \
"EXP_IM REAL NULL," \
"EXP_IM_LEFTWIDTH REAL," \
"EXP_IM_RIGHTWIDTH REAL," \
"DELTA_IM REAL NULL," \
"VAR_IM_DELTA_SCORE REAL NULL," \
"VAR_IM_LOG_INTENSITY REAL NULL," \
"VAR_IM_XCORR_COELUTION_CONTRAST, " \
"VAR_IM_XCORR_SHAPE_CONTRAST, " \
"VAR_IM_XCORR_COELUTION_COMBINED, " \
"VAR_IM_XCORR_SHAPE_COMBINED, " \
"START_POSITION_AT_5 REAL NULL, " \
"END_POSITION_AT_5 REAL NULL, " \
"START_POSITION_AT_10 REAL NULL, " \
"END_POSITION_AT_10 REAL NULL, " \
"START_POSITION_AT_50 REAL NULL, " \
"END_POSITION_AT_50 REAL NULL, " \
"TOTAL_WIDTH REAL NULL, " \
"TAILING_FACTOR REAL NULL, " \
"ASYMMETRY_FACTOR REAL NULL, " \
"SLOPE_OF_BASELINE REAL NULL, " \
"BASELINE_DELTA_2_HEIGHT REAL NULL, " \
"POINTS_ACROSS_BASELINE REAL NULL, " \
"POINTS_ACROSS_HALF_HEIGHT REAL NULL); ";
// Execute SQL create statement
conn.executeStatement(create_sql);
// Insert run_id information
std::stringstream sql_run;
sql_run << "INSERT INTO RUN (ID, FILENAME) VALUES ("
<< run_id_ << ", '"
<< input_filename_ << "'); ";
// Execute SQL insert statement
conn.executeStatement(sql_run.str());
}
String OpenSwathOSWWriter::getScore(const Feature& feature, const std::string& score_name) const
{
String score = "NULL";
if (!feature.getMetaValue(score_name).isEmpty())
{
score = feature.getMetaValue(score_name).toString();
}
if (score.toLower() == "nan") score = "NULL";
if (score.toLower() == "-nan") score = "NULL";
return score;
}
std::vector<String> OpenSwathOSWWriter::getSeparateScore(const Feature& feature, const std::string& score_name) const
{
std::vector<String> separated_scores;
if (!feature.getMetaValue(score_name).isEmpty())
{
if (feature.getMetaValue(score_name).valueType() == DataValue::STRING_LIST)
{
separated_scores = feature.getMetaValue(score_name).toStringList();
}
else if (feature.getMetaValue(score_name).valueType() == DataValue::INT_LIST)
{
std::vector<int> int_separated_scores = feature.getMetaValue(score_name).toIntList();
std::transform(int_separated_scores.begin(), int_separated_scores.end(), std::back_inserter(separated_scores), [](const int& num) { return String(num); });
}
else if (feature.getMetaValue(score_name).valueType() == DataValue::DOUBLE_LIST)
{
std::vector<double> double_separated_scores = feature.getMetaValue(score_name).toDoubleList();
std::transform(double_separated_scores.begin(), double_separated_scores.end(), std::back_inserter(separated_scores), [](const double& num) { return String(num); });
}
else
{
separated_scores.push_back(feature.getMetaValue(score_name).toString());
}
}
return separated_scores;
}
String OpenSwathOSWWriter::prepareLine(const OpenSwath::LightCompound& /* pep */,
const OpenSwath::LightTransition* /* transition */,
const FeatureMap& output,
const String& id) const
{
std::stringstream sql, sql_feature, sql_feature_ms1, sql_feature_ms1_precursor, sql_feature_ms2, sql_feature_ms2_transition, sql_feature_uis_transition;
for (const auto& feature_it : output)
{
int64_t feature_id = Internal::SqliteHelper::clearSignBit(feature_it.getUniqueId()); // clear sign bit
const auto& masserror_ppm = feature_it.metaValueExists("masserror_ppm") ? getSeparateScore(feature_it, "masserror_ppm") : std::vector<String>();
const auto& subordinates = feature_it.getSubordinates();
for (Size i=0; i < subordinates.size(); i++)
{
const auto& sub_it = subordinates[i];
if (sub_it.metaValueExists("FeatureLevel") && sub_it.getMetaValue("FeatureLevel") == "MS2")
{
std::string total_mi = "NULL"; // total_mi is not guaranteed to be set
std::string masserror_ppm_query = "NULL"; // masserror_ppm is not guaranteed to be set
if (!masserror_ppm.empty())
{
masserror_ppm_query = masserror_ppm[i];
}
if (!sub_it.getMetaValue("total_mi").isEmpty())
{
total_mi = sub_it.getMetaValue("total_mi").toString();
}
bool enable_compute_peak_shape_metrics = sub_it.metaValueExists("start_position_at_5");
// Create sql query for storing transition level data, include peak shape metrics if they exist
sql_feature_ms2_transition << "INSERT INTO FEATURE_TRANSITION "
<< "(FEATURE_ID, TRANSITION_ID, AREA_INTENSITY, TOTAL_AREA_INTENSITY, APEX_INTENSITY, APEX_RT, RT_FWHM, MASSERROR_PPM, TOTAL_MI"
<< (enable_compute_peak_shape_metrics ? ", START_POSITION_AT_5, END_POSITION_AT_5, "
"START_POSITION_AT_10, END_POSITION_AT_10, START_POSITION_AT_50, END_POSITION_AT_50, "
"TOTAL_WIDTH, TAILING_FACTOR, ASYMMETRY_FACTOR, SLOPE_OF_BASELINE, BASELINE_DELTA_2_HEIGHT, "
"POINTS_ACROSS_BASELINE, POINTS_ACROSS_HALF_HEIGHT" : "")
<< ") VALUES ("
<< feature_id << ", "
<< sub_it.getMetaValue("native_id") << ", "
<< sub_it.getIntensity() << ", "
<< sub_it.getMetaValue("total_xic") << ", "
<< sub_it.getMetaValue("peak_apex_int") << ", "
<< sub_it.getMetaValue("peak_apex_position") << ", "
<< sub_it.getMetaValue("width_at_50") << ", "
<< masserror_ppm_query << ", "
<< total_mi;
if (enable_compute_peak_shape_metrics)
{
sql_feature_ms2_transition << ", "
<< sub_it.getMetaValue("start_position_at_5") << ", "
<< sub_it.getMetaValue("end_position_at_5") << ", "
<< sub_it.getMetaValue("start_position_at_10") << ", "
<< sub_it.getMetaValue("end_position_at_10") << ", "
<< sub_it.getMetaValue("start_position_at_50") << ", "
<< sub_it.getMetaValue("end_position_at_50") << ", "
<< sub_it.getMetaValue("total_width") << ", "
<< sub_it.getMetaValue("tailing_factor") << ", "
<< sub_it.getMetaValue("asymmetry_factor") << ", "
<< sub_it.getMetaValue("slope_of_baseline") << ", "
<< sub_it.getMetaValue("baseline_delta_2_height") << ", "
<< sub_it.getMetaValue("points_across_baseline") << ", "
<< sub_it.getMetaValue("points_across_half_height");
}
sql_feature_ms2_transition << "); ";
}
else if (sub_it.metaValueExists("FeatureLevel") && sub_it.getMetaValue("FeatureLevel") == "MS1" && sub_it.getIntensity() > 0.0)
{
std::vector<String> precursor_id;
OpenMS::String(sub_it.getMetaValue("native_id")).split(OpenMS::String("Precursor_i"), precursor_id);
sql_feature_ms1_precursor << "INSERT INTO FEATURE_PRECURSOR (FEATURE_ID, ISOTOPE, AREA_INTENSITY, APEX_INTENSITY) VALUES ("
<< feature_id << ", "
<< precursor_id[1] << ", "
<< sub_it.getIntensity() << ", "
<< sub_it.getMetaValue("peak_apex_int") << "); ";
}
}
// these will be missing if RT scoring is disabled
double norm_rt = -1, delta_rt = -1;
if (feature_it.metaValueExists("norm_RT") ) norm_rt = feature_it.getMetaValue("norm_RT");
if (feature_it.metaValueExists("delta_rt") ) delta_rt = feature_it.getMetaValue("delta_rt");
sql_feature << "INSERT INTO FEATURE (ID, RUN_ID, PRECURSOR_ID, EXP_RT, EXP_IM, NORM_RT, DELTA_RT, LEFT_WIDTH, RIGHT_WIDTH, EXP_IM_LEFTWIDTH, EXP_IM_RIGHTWIDTH) VALUES ("
<< feature_id << ", "
<< run_id_ << ", "
<< id << ", "
<< feature_it.getRT() << ", "
<< getScore(feature_it, "im_drift") << ", "
<< norm_rt << ", "
<< delta_rt << ", "
<< feature_it.getMetaValue("leftWidth") << ", "
<< feature_it.getMetaValue("rightWidth") << ", "
<< getScore(feature_it, "im_drift_left") << ", "
<< getScore(feature_it, "im_drift_right") << "); ";
sql_feature_ms2 << "INSERT INTO FEATURE_MS2 " \
"(FEATURE_ID, AREA_INTENSITY, TOTAL_AREA_INTENSITY, APEX_INTENSITY, EXP_IM, EXP_IM_LEFTWIDTH, EXP_IM_RIGHTWIDTH, DELTA_IM, TOTAL_MI, "\
"VAR_BSERIES_SCORE, VAR_DOTPROD_SCORE, VAR_INTENSITY_SCORE, " \
"VAR_ISOTOPE_CORRELATION_SCORE, VAR_ISOTOPE_OVERLAP_SCORE, VAR_LIBRARY_CORR, "\
"VAR_LIBRARY_DOTPROD, VAR_LIBRARY_MANHATTAN, VAR_LIBRARY_RMSD, VAR_LIBRARY_ROOTMEANSQUARE, "\
"VAR_LIBRARY_SANGLE, VAR_LOG_SN_SCORE, VAR_MANHATTAN_SCORE, VAR_MASSDEV_SCORE, VAR_MASSDEV_SCORE_WEIGHTED, "\
"VAR_MI_SCORE, VAR_MI_WEIGHTED_SCORE, VAR_MI_RATIO_SCORE, VAR_NORM_RT_SCORE, "\
"VAR_XCORR_COELUTION,VAR_XCORR_COELUTION_WEIGHTED, VAR_XCORR_SHAPE, "\
"VAR_XCORR_SHAPE_WEIGHTED, VAR_YSERIES_SCORE, VAR_ELUTION_MODEL_FIT_SCORE, "\
"VAR_IM_XCORR_SHAPE, VAR_IM_XCORR_COELUTION, VAR_IM_DELTA_SCORE, VAR_IM_LOG_INTENSITY"
<< ") VALUES ("
<< feature_id << ", "
<< feature_it.getIntensity() << ", "
<< getScore(feature_it, "total_xic") << ", "
<< getScore(feature_it, "peak_apices_sum") << ", "
<< getScore(feature_it, "im_drift") << ", "
<< getScore(feature_it, "im_drift_left") << ", "
<< getScore(feature_it, "im_drift_right") << ", "
<< getScore(feature_it, "im_delta") << ", "
<< getScore(feature_it, "total_mi") << ", "
<< getScore(feature_it, "var_bseries_score") << ", "
<< getScore(feature_it, "var_dotprod_score") << ", "
<< getScore(feature_it, "var_intensity_score") << ", "
<< getScore(feature_it, "var_isotope_correlation_score") << ", "
<< getScore(feature_it, "var_isotope_overlap_score") << ", "
<< getScore(feature_it, "var_library_corr") << ", "
<< getScore(feature_it, "var_library_dotprod") << ", "
<< getScore(feature_it, "var_library_manhattan") << ", "
<< getScore(feature_it, "var_library_rmsd") << ", "
<< getScore(feature_it, "var_library_rootmeansquare") << ", "
<< getScore(feature_it, "var_library_sangle") << ", "
<< getScore(feature_it, "var_log_sn_score") << ", "
<< getScore(feature_it, "var_manhatt_score") << ", "
<< getScore(feature_it, "var_massdev_score") << ", "
<< getScore(feature_it, "var_massdev_score_weighted") << ", "
<< getScore(feature_it, "var_mi_score") << ", "
<< getScore(feature_it, "var_mi_weighted_score") << ", "
<< getScore(feature_it, "var_mi_ratio_score") << ", "
<< getScore(feature_it, "var_norm_rt_score") << ", "
<< getScore(feature_it, "var_xcorr_coelution") << ", "
<< getScore(feature_it, "var_xcorr_coelution_weighted") << ", "
<< getScore(feature_it, "var_xcorr_shape") << ", "
<< getScore(feature_it, "var_xcorr_shape_weighted") << ", "
<< getScore(feature_it, "var_yseries_score") << ", "
<< getScore(feature_it, "var_elution_model_fit_score") << ", "
<< getScore(feature_it, "var_im_xcorr_shape") << ", "
<< getScore(feature_it, "var_im_xcorr_coelution") << ", "
<< getScore(feature_it, "var_im_delta_score") << ", "
<< getScore(feature_it, "im_log_intensity");
sql_feature_ms2 << "); ";
bool enable_ms1 = feature_it.metaValueExists("var_ms1_ppm_diff");
if (enable_ms1) // only write MS1 scores if they are present
{
sql_feature_ms1 << "INSERT INTO FEATURE_MS1 "\
"(FEATURE_ID, AREA_INTENSITY, APEX_INTENSITY, EXP_IM, DELTA_IM, "\
" VAR_MASSDEV_SCORE, VAR_IM_MS1_DELTA_SCORE, "\
" VAR_MI_SCORE, VAR_MI_CONTRAST_SCORE, VAR_MI_COMBINED_SCORE, VAR_ISOTOPE_CORRELATION_SCORE, "\
" VAR_ISOTOPE_OVERLAP_SCORE, VAR_XCORR_COELUTION, VAR_XCORR_COELUTION_CONTRAST, "\
" VAR_XCORR_COELUTION_COMBINED, VAR_XCORR_SHAPE, VAR_XCORR_SHAPE_CONTRAST, VAR_XCORR_SHAPE_COMBINED "\
") VALUES ("
<< feature_id << ", "
<< getScore(feature_it, "ms1_area_intensity") << ", "
<< getScore(feature_it, "ms1_apex_intensity") << ", "
<< getScore(feature_it, "im_ms1_drift") << ", "
<< getScore(feature_it, "im_ms1_delta") << ", "
<< getScore(feature_it, "var_ms1_ppm_diff") << ", "
<< getScore(feature_it, "var_im_ms1_delta_score") << ", "
<< getScore(feature_it, "var_ms1_mi_score") << ", "
<< getScore(feature_it, "var_ms1_mi_contrast_score") << ", "
<< getScore(feature_it, "var_ms1_mi_combined_score") << ", "
<< getScore(feature_it, "var_ms1_isotope_correlation") << ", "
<< getScore(feature_it, "var_ms1_isotope_overlap") << ", "
<< getScore(feature_it, "var_ms1_xcorr_coelution") << ", "
<< getScore(feature_it, "var_ms1_xcorr_coelution_contrast") << ", "
<< getScore(feature_it, "var_ms1_xcorr_coelution_combined") << ", "
<< getScore(feature_it, "var_ms1_xcorr_shape") << ", "
<< getScore(feature_it, "var_ms1_xcorr_shape_contrast") << ", "
<< getScore(feature_it, "var_ms1_xcorr_shape_combined") << "); ";
}
if (enable_uis_scoring_)
{
auto id_target_transition_names = getSeparateScore(feature_it, "id_target_transition_names");
auto id_target_area_intensity = getSeparateScore(feature_it, "id_target_area_intensity");
auto id_target_total_area_intensity = getSeparateScore(feature_it, "id_target_total_area_intensity");
auto id_target_apex_intensity = getSeparateScore(feature_it, "id_target_apex_intensity");
auto id_target_peak_apex_position = getSeparateScore(feature_it, "id_target_peak_apex_position");
auto id_target_peak_fwhm = getSeparateScore(feature_it, "id_target_width_at_50");
auto id_target_total_mi = getSeparateScore(feature_it, "id_target_total_mi");
auto id_target_intensity_score = getSeparateScore(feature_it, "id_target_intensity_score");
auto id_target_intensity_ratio_score = getSeparateScore(feature_it, "id_target_intensity_ratio_score");
auto id_target_log_intensity = getSeparateScore(feature_it, "id_target_ind_log_intensity");
auto id_target_ind_xcorr_coelution = getSeparateScore(feature_it, "id_target_ind_xcorr_coelution");
auto id_target_ind_xcorr_shape = getSeparateScore(feature_it, "id_target_ind_xcorr_shape");
auto id_target_ind_log_sn_score = getSeparateScore(feature_it, "id_target_ind_log_sn_score");
auto id_target_ind_massdev_score = getSeparateScore(feature_it, "id_target_ind_massdev_score");
auto id_target_ind_mi_score = getSeparateScore(feature_it, "id_target_ind_mi_score");
auto id_target_ind_mi_ratio_score = getSeparateScore(feature_it, "id_target_ind_mi_ratio_score");
auto id_target_ind_isotope_correlation = getSeparateScore(feature_it, "id_target_ind_isotope_correlation");
auto id_target_ind_isotope_overlap = getSeparateScore(feature_it, "id_target_ind_isotope_overlap");
// Ion Mobility scores
auto id_target_ind_im_drift = getSeparateScore(feature_it, "id_target_ind_im_drift");
auto id_target_ind_im_drift_left = getSeparateScore(feature_it, "id_target_ind_im_drift_left");
auto id_target_ind_im_drift_right = getSeparateScore(feature_it, "id_target_ind_im_drift_right");
auto id_target_ind_im_delta = getSeparateScore(feature_it, "id_target_ind_im_delta");
auto id_target_ind_im_delta_score = getSeparateScore(feature_it, "id_target_ind_im_delta_score");
auto id_target_ind_im_log_intensity = getSeparateScore(feature_it, "id_target_ind_im_log_intensity");
auto id_target_ind_im_contrast_coelution = getSeparateScore(feature_it, "id_target_ind_im_contrast_coelution");
auto id_target_ind_im_contrast_shape = getSeparateScore(feature_it, "id_target_ind_im_contrast_shape");
auto id_target_ind_im_sum_contrast_coelution = getSeparateScore(feature_it, "id_target_ind_im_sum_contrast_coelution");
auto id_target_ind_im_sum_contrast_shape = getSeparateScore(feature_it, "id_target_ind_im_sum_contrast_shape");
// check if there are compute_peak_shape_metrics scores
auto id_target_ind_start_position_at_5 = getSeparateScore(feature_it, "id_target_ind_start_position_at_5");
bool enable_compute_peak_shape_metrics = id_target_ind_start_position_at_5.size() > 0 && id_target_ind_start_position_at_5[0] != "0";
// get scores for peak shape metrics will just be empty vector if not present
auto start_position_at_5 = getSeparateScore(feature_it, "id_target_ind_start_position_at_5");
auto end_position_at_5 = getSeparateScore(feature_it, "id_target_ind_end_position_at_5");
auto start_position_at_10 = getSeparateScore(feature_it, "id_target_ind_start_position_at_10");
auto end_position_at_10 = getSeparateScore(feature_it, "id_target_ind_end_position_at_10");
auto start_position_at_50 = getSeparateScore(feature_it, "id_target_ind_start_position_at_50");
auto end_position_at_50 = getSeparateScore(feature_it, "id_target_ind_end_position_at_50");
auto total_width = getSeparateScore(feature_it, "id_target_ind_total_width");
auto tailing_factor = getSeparateScore(feature_it, "id_target_ind_tailing_factor");
auto asymmetry_factor = getSeparateScore(feature_it, "id_target_ind_asymmetry_factor");
auto slope_of_baseline = getSeparateScore(feature_it, "id_target_ind_slope_of_baseline");
auto baseline_delta_2_height = getSeparateScore(feature_it, "id_target_ind_baseline_delta_2_height");
auto points_across_baseline = getSeparateScore(feature_it, "id_target_ind_points_across_baseline");
auto points_across_half_height = getSeparateScore(feature_it, "id_target_ind_points_across_half_height");
if (feature_it.metaValueExists("id_target_num_transitions"))
{
int id_target_num_transitions = feature_it.getMetaValue("id_target_num_transitions");
for (int i = 0; i < id_target_num_transitions; ++i)
{
sql_feature_uis_transition << "INSERT INTO FEATURE_TRANSITION "\
"(FEATURE_ID, TRANSITION_ID, AREA_INTENSITY, TOTAL_AREA_INTENSITY, "\
" APEX_INTENSITY, APEX_RT, RT_FWHM, MASSERROR_PPM, TOTAL_MI, VAR_INTENSITY_SCORE, VAR_INTENSITY_RATIO_SCORE, "\
" VAR_LOG_INTENSITY, VAR_XCORR_COELUTION, VAR_XCORR_SHAPE, VAR_LOG_SN_SCORE, "\
" VAR_MASSDEV_SCORE, VAR_MI_SCORE, VAR_MI_RATIO_SCORE, "\
" VAR_ISOTOPE_CORRELATION_SCORE, VAR_ISOTOPE_OVERLAP_SCORE, "\
" EXP_IM, EXP_IM_LEFTWIDTH, EXP_IM_RIGHTWIDTH, DELTA_IM, VAR_IM_DELTA_SCORE, VAR_IM_LOG_INTENSITY, "\
" VAR_IM_XCORR_COELUTION_CONTRAST, VAR_IM_XCORR_SHAPE_CONTRAST, VAR_IM_XCORR_COELUTION_COMBINED, VAR_IM_XCORR_SHAPE_COMBINED "
<< (enable_compute_peak_shape_metrics ? ", START_POSITION_AT_5, END_POSITION_AT_5, "
"START_POSITION_AT_10, END_POSITION_AT_10, START_POSITION_AT_50, END_POSITION_AT_50, "
"TOTAL_WIDTH, TAILING_FACTOR, ASYMMETRY_FACTOR, SLOPE_OF_BASELINE, BASELINE_DELTA_2_HEIGHT, "
"POINTS_ACROSS_BASELINE, POINTS_ACROSS_HALF_HEIGHT" : "")
<< ") VALUES ("
<< feature_id << ", "
<< id_target_transition_names[i] << ", "
<< id_target_area_intensity[i] << ", "
<< id_target_total_area_intensity[i] << ", "
<< id_target_apex_intensity[i] << ", "
<< id_target_peak_apex_position[i] << ", "
<< id_target_peak_fwhm[i] << ", "
<< id_target_ind_massdev_score[i] << ", "
<< id_target_total_mi[i] << ", "
<< id_target_intensity_score[i] << ", "
<< id_target_intensity_ratio_score[i] << ", "
<< id_target_log_intensity[i] << ", "
<< id_target_ind_xcorr_coelution[i] << ", "
<< id_target_ind_xcorr_shape[i] << ", "
<< id_target_ind_log_sn_score[i] << ", "
<< id_target_ind_massdev_score[i] << ", "
<< id_target_ind_mi_score[i] << ", "
<< id_target_ind_mi_ratio_score[i] << ", "
<< id_target_ind_isotope_correlation[i] << ", "
<< id_target_ind_isotope_overlap[i] << ", "
<< id_target_ind_im_drift[i] << ", "
<< id_target_ind_im_drift_left[i] << ", "
<< id_target_ind_im_drift_right[i] << ", "
<< id_target_ind_im_delta[i] << ", "
<< id_target_ind_im_delta_score[i] << ", "
<< id_target_ind_im_log_intensity[i] << ", "
<< id_target_ind_im_contrast_coelution[i] << ", "
<< id_target_ind_im_contrast_shape[i] << ", "
<< id_target_ind_im_sum_contrast_coelution[i] << ", "
<< id_target_ind_im_sum_contrast_shape[i];
if (enable_compute_peak_shape_metrics)
{
sql_feature_uis_transition << ", "
<< start_position_at_5[i] << ", "
<< end_position_at_5[i] << ", "
<< start_position_at_10[i] << ", "
<< end_position_at_10[i] << ", "
<< start_position_at_50[i] << ", "
<< end_position_at_50[i] << ", "
<< total_width[i] << ", "
<< tailing_factor[i] << ", "
<< asymmetry_factor[i] << ", "
<< slope_of_baseline[i] << ", "
<< baseline_delta_2_height[i] << ", "
<< points_across_baseline[i] << ", "
<< points_across_half_height[i];
}
sql_feature_uis_transition << "); ";
}
}
auto id_decoy_transition_names = getSeparateScore(feature_it, "id_decoy_transition_names");
auto id_decoy_area_intensity = getSeparateScore(feature_it, "id_decoy_area_intensity");
auto id_decoy_total_area_intensity = getSeparateScore(feature_it, "id_decoy_total_area_intensity");
auto id_decoy_apex_intensity = getSeparateScore(feature_it, "id_decoy_apex_intensity");
auto id_decoy_peak_apex_position = getSeparateScore(feature_it, "id_decoy_peak_apex_position");
auto id_decoy_peak_fwhm = getSeparateScore(feature_it, "id_decoy_width_at_50");
auto id_decoy_total_mi = getSeparateScore(feature_it, "id_decoy_total_mi");
auto id_decoy_intensity_score = getSeparateScore(feature_it, "id_decoy_intensity_score");
auto id_decoy_intensity_ratio_score = getSeparateScore(feature_it, "id_decoy_intensity_ratio_score");
auto id_decoy_log_intensity = getSeparateScore(feature_it, "id_decoy_ind_log_intensity");
auto id_decoy_ind_xcorr_coelution = getSeparateScore(feature_it, "id_decoy_ind_xcorr_coelution");
auto id_decoy_ind_xcorr_shape = getSeparateScore(feature_it, "id_decoy_ind_xcorr_shape");
auto id_decoy_ind_log_sn_score = getSeparateScore(feature_it, "id_decoy_ind_log_sn_score");
auto id_decoy_ind_massdev_score = getSeparateScore(feature_it, "id_decoy_ind_massdev_score");
auto id_decoy_ind_mi_score = getSeparateScore(feature_it, "id_decoy_ind_mi_score");
auto id_decoy_ind_mi_ratio_score = getSeparateScore(feature_it, "id_decoy_ind_mi_ratio_score");
auto id_decoy_ind_isotope_correlation = getSeparateScore(feature_it, "id_decoy_ind_isotope_correlation");
auto id_decoy_ind_isotope_overlap = getSeparateScore(feature_it, "id_decoy_ind_isotope_overlap");
// Ion Mobility scores
auto id_decoy_ind_im_drift = getSeparateScore(feature_it, "id_decoy_ind_im_drift");
auto id_decoy_ind_im_drift_left = getSeparateScore(feature_it, "id_decoy_ind_im_drift_left");
auto id_decoy_ind_im_drift_right = getSeparateScore(feature_it, "id_decoy_ind_im_drift_right");
auto id_decoy_ind_im_delta = getSeparateScore(feature_it, "id_decoy_ind_im_delta");
auto id_decoy_ind_ind_im_delta_score = getSeparateScore(feature_it, "id_decoy_ind_im_delta_score");
auto id_decoy_ind_log_intensity = getSeparateScore(feature_it, "id_decoy_ind_im_log_intensity");
auto id_decoy_ind_im_contrast_coelution = getSeparateScore(feature_it, "id_decoy_ind_im_contrast_coelution");
auto id_decoy_ind_im_contrast_shape = getSeparateScore(feature_it, "id_decoy_ind_im_contrast_shape");
auto id_decoy_ind_im_sum_contrast_coelution = getSeparateScore(feature_it, "id_decoy_ind_im_sum_contrast_coelution");
auto id_decoy_ind_im_sum_contrast_shape = getSeparateScore(feature_it, "id_decoy_ind_im_sum_contrast_shape");
// get scores for peak shape metrics will just be empty vector if not present
auto decoy_start_position_at_5 = getSeparateScore(feature_it, "id_decoy_ind_start_position_at_5");
auto decoy_end_position_at_5 = getSeparateScore(feature_it, "id_decoy_ind_end_position_at_5");
auto decoy_start_position_at_10 = getSeparateScore(feature_it, "id_decoy_ind_start_position_at_10");
auto decoy_end_position_at_10 = getSeparateScore(feature_it, "id_decoy_ind_end_position_at_10");
auto decoy_start_position_at_50 = getSeparateScore(feature_it, "id_decoy_ind_start_position_at_50");
auto decoy_end_position_at_50 = getSeparateScore(feature_it, "id_decoy_ind_end_position_at_50");
auto decoy_total_width = getSeparateScore(feature_it, "id_decoy_ind_total_width");
auto decoy_tailing_factor = getSeparateScore(feature_it, "id_decoy_ind_tailing_factor");
auto decoy_asymmetry_factor = getSeparateScore(feature_it, "id_decoy_ind_asymmetry_factor");
auto decoy_slope_of_baseline = getSeparateScore(feature_it, "id_decoy_ind_slope_of_baseline");
auto decoy_baseline_delta_2_height = getSeparateScore(feature_it, "id_decoy_ind_baseline_delta_2_height");
auto decoy_points_across_baseline = getSeparateScore(feature_it, "id_decoy_ind_points_across_baseline");
auto decoy_points_across_half_height = getSeparateScore(feature_it, "id_decoy_ind_points_across_half_height");
if (feature_it.metaValueExists("id_decoy_num_transitions"))
{
int id_decoy_num_transitions = feature_it.getMetaValue("id_decoy_num_transitions");
for (int i = 0; i < id_decoy_num_transitions; ++i)
{
sql_feature_uis_transition << "INSERT INTO FEATURE_TRANSITION "\
"(FEATURE_ID, TRANSITION_ID, AREA_INTENSITY, TOTAL_AREA_INTENSITY, "\
" APEX_INTENSITY, APEX_RT, RT_FWHM, MASSERROR_PPM, TOTAL_MI, VAR_INTENSITY_SCORE, VAR_INTENSITY_RATIO_SCORE, "\
" VAR_LOG_INTENSITY, VAR_XCORR_COELUTION, VAR_XCORR_SHAPE, VAR_LOG_SN_SCORE, "\
" VAR_MASSDEV_SCORE, VAR_MI_SCORE, VAR_MI_RATIO_SCORE, "\
" VAR_ISOTOPE_CORRELATION_SCORE, VAR_ISOTOPE_OVERLAP_SCORE, "\
" EXP_IM, EXP_IM_LEFTWIDTH, EXP_IM_RIGHTWIDTH, DELTA_IM, VAR_IM_DELTA_SCORE, VAR_IM_LOG_INTENSITY, "\
" VAR_IM_XCORR_COELUTION_CONTRAST, VAR_IM_XCORR_SHAPE_CONTRAST, VAR_IM_XCORR_COELUTION_COMBINED, VAR_IM_XCORR_SHAPE_COMBINED "
<< (enable_compute_peak_shape_metrics ? ", START_POSITION_AT_5, END_POSITION_AT_5, "
"START_POSITION_AT_10, END_POSITION_AT_10, START_POSITION_AT_50, END_POSITION_AT_50, "
"TOTAL_WIDTH, TAILING_FACTOR, ASYMMETRY_FACTOR, SLOPE_OF_BASELINE, BASELINE_DELTA_2_HEIGHT, "
"POINTS_ACROSS_BASELINE, POINTS_ACROSS_HALF_HEIGHT" : "")
<< ") VALUES ("
<< feature_id << ", "
<< id_decoy_transition_names[i] << ", "
<< id_decoy_area_intensity[i] << ", "
<< id_decoy_total_area_intensity[i] << ", "
<< id_decoy_apex_intensity[i] << ", "
<< id_decoy_peak_apex_position[i] << ", "
<< id_decoy_peak_fwhm[i] << ", "
<< id_decoy_ind_massdev_score[i] << ", "
<< id_decoy_total_mi[i] << ", "
<< id_decoy_intensity_score[i] << ", "
<< id_decoy_intensity_ratio_score[i] << ", "
<< id_decoy_log_intensity[i] << ", "
<< id_decoy_ind_xcorr_coelution[i] << ", "
<< id_decoy_ind_xcorr_shape[i] << ", "
<< id_decoy_ind_log_sn_score[i] << ", "
<< id_decoy_ind_massdev_score[i] << ", "
<< id_decoy_ind_mi_score[i] << ", "
<< id_decoy_ind_mi_ratio_score[i] << ", "
<< id_decoy_ind_isotope_correlation[i] << ", "
<< id_decoy_ind_isotope_overlap[i] << ", "
<< id_decoy_ind_im_drift[i] << ", "
<< id_decoy_ind_im_drift_left[i] << ", "
<< id_decoy_ind_im_drift_right[i] << ", "
<< id_decoy_ind_im_delta[i] << ", "
<< id_decoy_ind_ind_im_delta_score[i] << ", "
<< id_decoy_ind_log_intensity[i] << ", "
<< id_decoy_ind_im_contrast_coelution[i] << ", "
<< id_decoy_ind_im_contrast_shape[i] << ", "
<< id_decoy_ind_im_sum_contrast_coelution[i] << ", "
<< id_decoy_ind_im_sum_contrast_shape[i];
if (enable_compute_peak_shape_metrics)
{
sql_feature_uis_transition << ", "
<< decoy_start_position_at_5[i] << ", "
<< decoy_end_position_at_5[i] << ", "
<< decoy_start_position_at_10[i] << ", "
<< decoy_end_position_at_10[i] << ", "
<< decoy_start_position_at_50[i] << ", "
<< decoy_end_position_at_50[i] << ", "
<< decoy_total_width[i] << ", "
<< decoy_tailing_factor[i] << ", "
<< decoy_asymmetry_factor[i] << ", "
<< decoy_slope_of_baseline[i] << ", "
<< decoy_baseline_delta_2_height[i] << ", "
<< decoy_points_across_baseline[i] << ", "
<< decoy_points_across_half_height[i];
}
sql_feature_uis_transition << "); ";
}
}
}
}
if (enable_uis_scoring_ && !sql_feature_uis_transition.str().empty() )
{
sql << sql_feature.str() << sql_feature_ms1.str() << sql_feature_ms1_precursor.str() << sql_feature_ms2.str() << sql_feature_uis_transition.str();
}
else
{
sql << sql_feature.str() << sql_feature_ms1.str() << sql_feature_ms1_precursor.str() << sql_feature_ms2.str() << sql_feature_ms2_transition.str();
}
return sql.str();
}
void OpenSwathOSWWriter::writeLines(const std::vector<String>& to_osw_output)
{
SqliteConnector conn(output_filename_);
conn.executeStatement("BEGIN TRANSACTION");
for (Size i = 0; i < to_osw_output.size(); i++)
{
conn.executeStatement(to_osw_output[i]);
}
conn.executeStatement("END TRANSACTION");
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/MRMRTNormalizer.cpp | .cpp | 10,205 | 280 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: George Rosenberger $
// $Authors: George Rosenberger, Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/MRMRTNormalizer.h>
#include <OpenMS/ML/REGRESSION/LinearRegression.h>
#include <OpenMS/CONCEPT/LogStream.h> // OPENMS_LOG_DEBUG
#include <OpenMS/ML/RANSAC/RANSAC.h> // RANSAC algorithm
#include <numeric>
#include <boost/math/special_functions/erf.hpp>
namespace OpenMS
{
std::vector<std::pair<double, double> > MRMRTNormalizer::removeOutliersRANSAC(
const std::vector<std::pair<double, double> >& pairs, double rsq_limit,
double coverage_limit, size_t max_iterations, double max_rt_threshold, size_t sampling_size)
{
size_t n = sampling_size;
size_t k = (size_t)max_iterations;
double t = max_rt_threshold*max_rt_threshold;
size_t d = (size_t)(coverage_limit*pairs.size());
if (n < 5)
{
throw Exception::UnableToFit(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"UnableToFit-LinearRegression-RTNormalizer", "WARNING: RANSAC: " +
String(n) + " sampled RT peptides is below limit of 5 peptides required for the RANSAC outlier detection algorithm.");
}
if (pairs.size() < 30)
{
throw Exception::UnableToFit(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"UnableToFit-LinearRegression-RTNormalizer", "WARNING: RANSAC: " +
String(pairs.size()) + " input RT peptides is below limit of 30 peptides required for the RANSAC outlier detection algorithm.");
}
Math::RANSAC<Math::RansacModelLinear> r;
std::vector<std::pair<double, double> > new_pairs = r.ransac(pairs, n, k, t, d);
double bestrsq = Math::RansacModelLinear::rm_rsq_impl(new_pairs.begin(), new_pairs.end());
if (bestrsq < rsq_limit)
{
throw Exception::UnableToFit(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"UnableToFit-LinearRegression-RTNormalizer", "WARNING: rsq: " +
String(bestrsq) + " is below limit of " +
String(rsq_limit) +
". Validate assays for RT-peptides and adjust the limit for rsq or coverage.");
}
if (new_pairs.size() < d)
{
throw Exception::UnableToFit(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"UnableToFit-LinearRegression-RTNormalizer", "WARNING: number of data points: " +
String(new_pairs.size()) +
" is below limit of " + String(d) +
". Validate assays for RT-peptides and adjust the limit for rsq or coverage.");
}
return new_pairs;
}
int MRMRTNormalizer::jackknifeOutlierCandidate_(const std::vector<double>& x, const std::vector<double>& y)
{
// Returns candidate outlier: A linear regression and rsq is calculated for
// the data points with one removed pair. The combination resulting in
// highest rsq is considered corresponding to the outlier candidate. The
// corresponding iterator position is then returned.
std::vector<double> x_tmp, y_tmp, rsq_tmp;
for (Size i = 0; i < x.size(); i++)
{
x_tmp = x;
y_tmp = y;
x_tmp.erase(x_tmp.begin() + i);
y_tmp.erase(y_tmp.begin() + i);
Math::LinearRegression lin_reg;
lin_reg.computeRegression(0.95, x_tmp.begin(), x_tmp.end(), y_tmp.begin());
rsq_tmp.push_back(lin_reg.getRSquared());
}
return max_element(rsq_tmp.begin(), rsq_tmp.end()) - rsq_tmp.begin();
}
int MRMRTNormalizer::residualOutlierCandidate_(const std::vector<double>& x, const std::vector<double>& y)
{
// Returns candidate outlier: A linear regression and residuals are calculated for
// the data points. The one with highest residual error is selected as the outlier candidate. The
// corresponding iterator position is then returned.
Math::LinearRegression lin_reg;
lin_reg.computeRegression(0.95, x.begin(), x.end(), y.begin());
std::vector<double> residuals;
for (Size i = 0; i < x.size(); i++)
{
double residual = fabs(y[i] - (lin_reg.getIntercept() + (lin_reg.getSlope() * x[i])));
residuals.push_back(residual);
}
return max_element(residuals.begin(), residuals.end()) - residuals.begin();
}
std::vector<std::pair<double, double> > MRMRTNormalizer::removeOutliersIterative(
const std::vector<std::pair<double, double> >& pairs, double rsq_limit,
double coverage_limit, bool use_chauvenet, const std::string& method)
{
if (pairs.size() < 3)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Need at least 3 data points to remove outliers for the regression.");
}
// Removes outliers from vector of pairs until upper rsq and lower coverage limits are reached.
std::vector<double> x, y;
double confidence_interval = 0.95;
std::vector<std::pair<double, double> > pairs_corrected;
for (auto it = pairs.begin(); it != pairs.end(); ++it)
{
x.push_back(it->first);
y.push_back(it->second);
OPENMS_LOG_DEBUG << "RT Normalization pairs: " << it->first << " : " << it->second << std::endl;
}
double rsq;
rsq = 0;
while (x.size() >= coverage_limit * pairs.size() && rsq < rsq_limit)
{
Math::LinearRegression lin_reg;
lin_reg.computeRegression(confidence_interval, x.begin(), x.end(), y.begin());
rsq = lin_reg.getRSquared();
if (rsq < rsq_limit)
{
std::vector<double> residuals;
// calculate residuals
for (auto it = pairs.begin(); it != pairs.end(); ++it)
{
double intercept = lin_reg.getIntercept();
double slope = (double)lin_reg.getSlope();
residuals.push_back(fabs(it->second - (intercept + it->first * slope)));
OPENMS_LOG_DEBUG << " RT Normalization residual is " << residuals.back() << std::endl;
}
int pos;
if (method == "iter_jackknife")
{
// get candidate outlier: removal of which datapoint results in best rsq?
pos = jackknifeOutlierCandidate_(x, y);
}
else if (method == "iter_residual")
{
// get candidate outlier: removal of datapoint with largest residual?
pos = residualOutlierCandidate_(x, y);
}
else
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
String("Method ") + method + " is not a valid method for removeOutliersIterative");
}
// remove if residual is an outlier according to Chauvenet's criterion
// or if testing is turned off
OPENMS_LOG_DEBUG << " Got outlier candidate " << pos << "(" << x[pos] << " / " << y[pos] << std::endl;
if (!use_chauvenet || chauvenet(residuals, pos))
{
x.erase(x.begin() + pos);
y.erase(y.begin() + pos);
}
else
{
break;
}
}
else
{
break;
}
}
if (rsq < rsq_limit)
{
// If the rsq is below the limit, this is an indication that something went wrong!
throw Exception::UnableToFit(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"UnableToFit-LinearRegression-RTNormalizer", "WARNING: rsq: " +
String(rsq) + " is below limit of " +
String(rsq_limit) +
". Validate assays for RT-peptides and adjust the limit for rsq or coverage.");
}
for (Size i = 0; i < x.size(); i++)
{
pairs_corrected.emplace_back(x[i], y[i]);
}
#ifdef DEBUG_MRMRTNORMALIZER
std::cout << "=======STARTPOINTS=======" << std::endl;
for (std::vector<std::pair<double, double> >::iterator it = pairs_corrected.begin(); it != pairs_corrected.end(); ++it)
{
std::cout << it->first << "\t" << it->second << std::endl;
}
std::cout << "=======ENDPOINTS=======" << std::endl;
#endif
return pairs_corrected;
}
bool MRMRTNormalizer::chauvenet(const std::vector<double>& residuals, int pos)
{
double criterion = 1.0 / (2 * residuals.size());
double prob = MRMRTNormalizer::chauvenet_probability(residuals, pos);
OPENMS_LOG_DEBUG << " Chauvinet testing " << prob << " < " << criterion << std::endl;
return prob < criterion;
}
double MRMRTNormalizer::chauvenet_probability(const std::vector<double>& residuals, int pos)
{
double mean = std::accumulate(residuals.begin(), residuals.end(), 0.0) / residuals.size();
double stdev = std::sqrt(
std::inner_product(residuals.begin(), residuals.end(), residuals.begin(), 0.0
) / residuals.size() - mean * mean);
double d = fabs(residuals[pos] - mean) / stdev;
d /= pow(2.0, 0.5);
double prob = std::erfc(d);
return prob;
}
bool MRMRTNormalizer::computeBinnedCoverage(const std::pair<double,double> & rtRange,
const std::vector<std::pair<double, double> > & pairs, int nrBins,
int minPeptidesPerBin, int minBinsFilled)
{
std::vector<int> binCounter(nrBins, 0);
for (std::vector<std::pair<double, double> >::const_iterator pair_it = pairs.begin(); pair_it != pairs.end(); ++pair_it)
{
double normRT = (pair_it->second - rtRange.first) / (rtRange.second - rtRange.first); // compute a value between [0,1)
normRT *= nrBins;
int bin = (int)normRT;
if (bin >= nrBins)
{
// this should never happen, but just to make sure
std::cerr << "MRMRTNormalizer::computeBinnedCoverage : computed bin was too large (" <<
bin << "), setting it to the maximum of " << nrBins - 1 << std::endl;
bin = nrBins - 1;
}
binCounter[ bin ]++;
}
int binsFilled = 0;
for (Size i = 0; i < binCounter.size(); i++)
{
OPENMS_LOG_DEBUG <<" In bin " << i << " out of " << binCounter.size() <<
" we have " << binCounter[i] << " peptides " << std::endl;
if (binCounter[i] >= minPeptidesPerBin)
{
binsFilled++;
}
}
return (binsFilled >= minBinsFilled);
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/MRMFeaturePicker.cpp | .cpp | 493 | 14 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Douglas McCloskey, Pasquale Domenico Colaianni $
// $Authors: Douglas McCloskey, Pasquale Domenico Colaianni $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/MRMFeaturePicker.h>
namespace OpenMS
{
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/TargetedSpectraExtractor.cpp | .cpp | 45,870 | 1,133 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Douglas McCloskey, Pasquale Domenico Colaianni $
// $Authors: Douglas McCloskey, Pasquale Domenico Colaianni $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/TargetedSpectraExtractor.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/DATASTRUCTURES/String.h>
#include <OpenMS/PROCESSING/NOISEESTIMATION/SignalToNoiseEstimatorMedian.h>
#include <OpenMS/PROCESSING/SMOOTHING/GaussFilter.h>
#include <OpenMS/PROCESSING/SMOOTHING/SavitzkyGolayFilter.h>
#include <OpenMS/PROCESSING/CENTROIDING/PeakPickerHiRes.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/PROCESSING/DEISOTOPING/Deisotoper.h>
#include <OpenMS/MATH/MathFunctions.h>
#include <OpenMS/KERNEL/RangeUtils.h>
#include <OpenMS/ANALYSIS/ID/AccurateMassSearchEngine.h>
#include <OpenMS/ANALYSIS/OPENSWATH/TransitionTSVFile.h>
namespace OpenMS
{
TargetedSpectraExtractor::TargetedSpectraExtractor() :
DefaultParamHandler("TargetedSpectraExtractor")
{
getDefaultParameters(defaults_);
subsections_.emplace_back("SavitzkyGolayFilter");
defaults_.setValue("SavitzkyGolayFilter:frame_length", 15);
defaults_.setValue("SavitzkyGolayFilter:polynomial_order", 3);
subsections_.emplace_back("GaussFilter");
defaults_.setValue("GaussFilter:gaussian_width", 0.2);
subsections_.emplace_back("PeakPickerHiRes");
defaults_.setValue("PeakPickerHiRes:signal_to_noise", 1.0);
defaults_.insert("AccurateMassSearchEngine:", AccurateMassSearchEngine().getDefaults());
defaults_.setValue("AccurateMassSearchEngine:keep_unidentified_masses", "false");
defaults_.setValidStrings("AccurateMassSearchEngine:keep_unidentified_masses", {"true","false"});
// write defaults into Param object param_
defaultsToParam_();
updateMembers_();
}
void TargetedSpectraExtractor::updateMembers_()
{
rt_window_ = (double)param_.getValue("rt_window");
min_select_score_ = (double)param_.getValue("min_select_score");
mz_tolerance_ = (double)param_.getValue("mz_tolerance");
mz_unit_is_Da_ = param_.getValue("mz_unit_is_Da").toBool();
use_gauss_ = param_.getValue("use_gauss").toBool();
peak_height_min_ = (double)param_.getValue("peak_height_min");
peak_height_max_ = (double)param_.getValue("peak_height_max");
fwhm_threshold_ = (double)param_.getValue("fwhm_threshold");
tic_weight_ = (double)param_.getValue("tic_weight");
fwhm_weight_ = (double)param_.getValue("fwhm_weight");
snr_weight_ = (double)param_.getValue("snr_weight");
top_matches_to_report_ = (Size)param_.getValue("top_matches_to_report");
min_match_score_ = (double)param_.getValue("min_match_score");
min_fragment_mz_ = (double) param_.getValue("min_fragment_mz");
max_fragment_mz_ = (double) param_.getValue("max_fragment_mz");
relative_allowable_product_mass_ = (double) param_.getValue("relative_allowable_product_mass");
deisotoping_use_deisotoper_ = param_.getValue("deisotoping:use_deisotoper").toBool();
deisotoping_fragment_tolerance_ = (double) param_.getValue("deisotoping:fragment_tolerance");
deisotoping_fragment_unit_ = param_.getValue("deisotoping:fragment_unit").toString();
deisotoping_min_charge_ = param_.getValue("deisotoping:min_charge");
deisotoping_max_charge_ = param_.getValue("deisotoping:max_charge");
deisotoping_min_isopeaks_ = param_.getValue("deisotoping:min_isopeaks");
deisotoping_max_isopeaks_ = param_.getValue("deisotoping:max_isopeaks");
deisotoping_keep_only_deisotoped_ = param_.getValue("deisotoping:keep_only_deisotoped").toBool();
deisotoping_annotate_charge_ = param_.getValue("deisotoping:annotate_charge").toBool();
max_precursor_mass_threashold_ = (double) param_.getValue("max_precursor_mass_threashold");
}
void TargetedSpectraExtractor::getDefaultParameters(Param& params) const
{
params.clear();
params.setValue(
"rt_window",
30.0,
"Precursor Retention Time window used during the annotation phase.\n"
"For each transition in the target list, annotateSpectra() looks for "
"the first spectrum whose RT time falls within the RT Window, whose "
"left and right limits are computed at each analyzed spectrum.\n"
"Also the spectrum's percursor MZ is checked against the transition MZ."
);
params.setValue(
"min_select_score",
0.7,
"Used in selectSpectra(), after the spectra have been assigned a score.\n"
"Remained transitions will have at least one spectrum assigned.\n"
"Each spectrum needs to have a score >= min_select_score_ to be valid, "
"otherwise it gets filtered out."
);
params.setMinFloat("min_select_score", 0.0);
params.setValue(
"mz_tolerance",
0.1,
"Precursor MZ tolerance used during the annotation phase.\n"
"For each transition in the target list, annotateSpectra() looks for "
"the first spectrum whose precursor MZ is close enough (+-mz_tolerance_) "
"to the transition's MZ.\n"
"Also the spectrum's precursor RT is checked against the transition RT."
);
params.setValue("mz_unit_is_Da", "true", "Unit to use for mz_tolerance_ and fwhm_threshold_: true for Da, false for ppm.");
params.setValidStrings("mz_unit_is_Da", {"false","true"});
params.setValue("use_gauss", "true", "Use Gaussian filter for smoothing (alternative is Savitzky-Golay filter)");
params.setValidStrings("use_gauss", {"false","true"});
params.setValue("peak_height_min", 0.0, "Used in pickSpectrum(), a peak's intensity needs to be >= peak_height_min_ for it to be picked.");
params.setMinFloat("peak_height_min", 0.0);
params.setValue("peak_height_max", std::numeric_limits<double>::max(), "Used in pickSpectrum(), a peak's intensity needs to be <= peak_height_max_ for it to be picked.");
params.setMinFloat("peak_height_max", 0.0);
params.setValue("fwhm_threshold", 0.0, "Used in pickSpectrum(), a peak's FWHM needs to be >= fwhm_threshold_ for it to be picked.");
params.setMinFloat("fwhm_threshold", 0.0);
params.setValue("tic_weight", 1.0, "TIC weight when scoring spectra.");
params.setMinFloat("tic_weight", 0.0);
params.setValue("fwhm_weight", 1.0, "FWHM weight when scoring spectra.");
params.setMinFloat("fwhm_weight", 0.0);
params.setValue("snr_weight", 1.0, "SNR weight when scoring spectra.");
params.setMinFloat("snr_weight", 0.0);
params.setValue(
"top_matches_to_report",
5,
"The number of matches to output from `matchSpectrum()`. "
"These will be the matches of highest scores, sorted in descending order."
);
params.setMinInt("top_matches_to_report", 1);
params.setValue(
"min_match_score",
0.8,
"Minimum score for a match to be considered valid in `matchSpectrum()`."
);
params.setMinFloat("min_match_score", 0.0);
params.setMaxFloat("min_match_score", 1.0);
params.setValue("min_fragment_mz", 0.0, "Minimal m/z of a fragment ion choosen as a transition");
params.setValue("max_fragment_mz", 2000.0, "Maximal m/z of a fragment ion choosen as a transition");
params.setValue("relative_allowable_product_mass", 10.0, "Threshold m/z of a product relatively to the precurosor m/z (can be negative)");
params.setValue("deisotoping:use_deisotoper", "false", "Use Deisotoper (if no fragment annotation is used)");
params.setValue("deisotoping:fragment_tolerance", 1.0, "Tolerance used to match isotopic peaks");
params.setValue("deisotoping:fragment_unit", "ppm", "Unit of the fragment tolerance");
params.setValidStrings("deisotoping:fragment_unit", {"ppm","Da"});
params.setValue("deisotoping:min_charge", 1, "The minimum charge considered");
params.setMinInt("deisotoping:min_charge", 1);
params.setValue("deisotoping:max_charge", 1, "The maximum charge considered");
params.setMinInt("deisotoping:max_charge", 1);
params.setValue("deisotoping:min_isopeaks", 2, "The minimum number of isotopic peaks (at least 2) required for an isotopic cluster");
params.setMinInt("deisotoping:min_isopeaks", 2);
params.setValue("deisotoping:max_isopeaks", 3, "The maximum number of isotopic peaks (at least 2) considered for an isotopic cluster");
params.setMinInt("deisotoping:max_isopeaks", 3);
params.setValue("deisotoping:keep_only_deisotoped", "false", "Only monoisotopic peaks of fragments with isotopic pattern are retained");
params.setValue("deisotoping:annotate_charge", "false", "Annotate the charge to the peaks");
params.setValue("max_precursor_mass_threashold", 10.0, "Tolerance used to set intensity to zero for peaks with mz higher than precursor mz");
}
void TargetedSpectraExtractor::annotateSpectra(
const std::vector<MSSpectrum>& spectra,
const FeatureMap& ms1_features,
FeatureMap& ms2_features,
std::vector<MSSpectrum>& annotated_spectra) const
{
annotated_spectra.clear();
for (const auto& spectrum : spectra)
{
if (spectrum.getMSLevel() == 1)
{
continue; // we want to annotate MS2 spectra only
}
const double spectrum_rt = spectrum.getRT();
const std::vector<Precursor>& precursors = spectrum.getPrecursors();
if (precursors.empty())
{
OPENMS_LOG_WARN << "annotateSpectra(): No precursor MZ found. Setting spectrum_mz to 0.\n";
}
const double spectrum_mz = precursors.empty() ? 0.0 : precursors.front().getMZ();
// Lambda to check the mz/rt thresholds
auto checkRtAndMzTol = [](const double& spectrum_mz, const double& spectrum_rt,
const double& target_mz, const double& target_rt, const double& mz_window, const double& rt_window)
{
const double rt_left_lim = spectrum_rt - rt_window / 2.0;
const double rt_right_lim = spectrum_rt + rt_window / 2.0;
const double mz_left_lim = spectrum_mz - mz_window / 2.0;
const double mz_right_lim = spectrum_mz + mz_window / 2.0;
if (spectrum_mz != 0.0)
{
return target_rt >= rt_left_lim && target_rt <= rt_right_lim && target_mz >= mz_left_lim && target_mz <= mz_right_lim;
}
else
{
return target_rt >= rt_left_lim && target_rt <= rt_right_lim;
}
};
// Lambda to create the feature
auto construct_feature = [checkRtAndMzTol, spectrum_rt, spectrum_mz, &ms2_features, &annotated_spectra, &spectrum](
const OpenMS::Feature& feature, const double& mz_tol, const double& rt_win)
{
const auto& peptide_ref_s = feature.getMetaValue("PeptideRef");
const auto& native_id_s = feature.getMetaValue("native_id");
// check for null annotations resulting from unnanotated features
if (peptide_ref_s != "null")
{
const double target_mz = feature.getMZ();
const double target_rt = feature.getRT();
if (checkRtAndMzTol(spectrum_mz, spectrum_rt, target_mz, target_rt, mz_tol, rt_win))
{
OPENMS_LOG_DEBUG << "annotateSpectra(): " << peptide_ref_s << "]";
OPENMS_LOG_DEBUG << " (target_rt: " << target_rt << ") (target_mz: " << target_mz << ")\n";
MSSpectrum annotated_spectrum = spectrum;
annotated_spectrum.setName(peptide_ref_s);
annotated_spectra.push_back(std::move(annotated_spectrum));
// fill the ms2 features map
Feature ms2_feature;
ms2_feature.setUniqueId();
ms2_feature.setRT(spectrum_rt);
ms2_feature.setMZ(spectrum_mz);
ms2_feature.setIntensity(feature.getIntensity());
ms2_feature.setMetaValue("native_id", native_id_s);
ms2_feature.setMetaValue("PeptideRef", peptide_ref_s);
ms2_features.push_back(std::move(ms2_feature));
}
}
};
for (const auto& feature : ms1_features)
{
if (!feature.getSubordinates().empty())
{
// iterate through the subordinate level
for (const auto& subordinate : feature.getSubordinates())
{
construct_feature(subordinate, mz_tolerance_, rt_window_);
}
}
else
{
construct_feature(feature, mz_tolerance_, rt_window_);
}
}
}
}
void TargetedSpectraExtractor::searchSpectrum(OpenMS::FeatureMap& feat_map,
OpenMS::FeatureMap& feat_map_output, bool add_unidentified_features) const
{
OpenMS::AccurateMassSearchEngine ams;
OpenMS::MzTab output;
ams.setParameters(param_.copy("AccurateMassSearchEngine:", true));
ams.init();
ams.run(feat_map, output);
// Remake the feature map replacing the peptide hits as features/sub-features
feat_map_output.clear();
for (const OpenMS::Feature& feature : feat_map)
{
const auto& peptide_identifications = feature.getPeptideIdentifications();
if (peptide_identifications.size())
{
for (const auto& ident : peptide_identifications)
{
for (const auto& hit : ident.getHits())
{
OpenMS::Feature f;
OpenMS::Feature s = feature;
f.setUniqueId();
s.setUniqueId();
if (hit.getMetaValue("identifier").toStringList().at(0) != "null")
{
f.setMetaValue("PeptideRef", hit.getMetaValue("identifier").toStringList().at(0));
s.setMetaValue("PeptideRef", hit.getMetaValue("identifier").toStringList().at(0));
std::string native_id = hit.getMetaValue("chemical_formula").toString() + ";" + hit.getMetaValue("modifications").toString();
s.setMetaValue("native_id", native_id);
s.setMetaValue("identifier", hit.getMetaValue("identifier"));
s.setMetaValue("description", hit.getMetaValue("description"));
s.setMetaValue("modifications", hit.getMetaValue("modifications"));
std::string adducts;
try
{
// Extract adduct: the first letter stands for the actual metabolite and then everything after are the abducts up until the ";"
// For example, M-H;1- will give -H
std::string str = hit.getMetaValue("modifications").toString();
std::string delimiter = ";";
adducts = str.substr(1, str.find(delimiter) - 1);
}
catch (const std::exception& e)
{
OPENMS_LOG_ERROR << e.what();
}
s.setMetaValue("adducts", adducts);
OpenMS::EmpiricalFormula chemform(hit.getMetaValue("chemical_formula").toString());
double adduct_mass = s.getMZ() * std::abs(hit.getCharge()) + static_cast<double>(hit.getMetaValue("mz_error_Da")) - chemform.getMonoWeight();
s.setMetaValue("dc_charge_adduct_mass", adduct_mass);
s.setMetaValue("chemical_formula", hit.getMetaValue("chemical_formula"));
s.setMetaValue("mz_error_ppm", hit.getMetaValue("mz_error_ppm"));
s.setMetaValue("mz_error_Da", hit.getMetaValue("mz_error_Da"));
s.setCharge(hit.getCharge());
f.setSubordinates({s});
feat_map_output.push_back(f);
}
else if (add_unidentified_features)
{
// "PeptideRef" metavalue should have been set during peak picking, but if not...
std::ostringstream mass_of_the_peak;
mass_of_the_peak << s.getMZ();
// Fill in accurateMassSearch metavalues
DataValue identifiers(std::vector<std::string>({mass_of_the_peak.str()}));
s.setMetaValue("identifier", identifiers);
s.setMetaValue("description", "");
s.setMetaValue("modifications", "");
s.setMetaValue("adducts", "");
s.setMetaValue("dc_charge_adduct_mass", 0);
s.setMetaValue("chemical_formula", "");
s.setMetaValue("mz_error_ppm", 0);
s.setMetaValue("mz_error_Da", 0);
// s.setCharge(hit.getCharge()); // The polarity should have been set during peak picking
f.setSubordinates({s});
feat_map_output.push_back(f);
}
}
}
}
}
}
void TargetedSpectraExtractor::annotateSpectra(
const std::vector<MSSpectrum>& spectra,
const TargetedExperiment& targeted_exp,
std::vector<MSSpectrum>& annotated_spectra,
FeatureMap& features,
const bool compute_features
) const
{
annotated_spectra.clear();
features.clear(true);
const std::vector<ReactionMonitoringTransition>& transitions = targeted_exp.getTransitions();
for (Size i = 0; i < spectra.size(); ++i)
{
const MSSpectrum& spectrum = spectra[i];
const double spectrum_rt = spectrum.getRT();
const double rt_left_lim = spectrum_rt - rt_window_ / 2.0;
const double rt_right_lim = spectrum_rt + rt_window_ / 2.0;
const std::vector<Precursor>& precursors = spectrum.getPrecursors();
if (precursors.empty())
{
OPENMS_LOG_WARN << "annotateSpectra(): No precursor MZ found. Setting spectrum_mz to 0.\n";
}
const double spectrum_mz = precursors.empty() ? 0.0 : precursors.front().getMZ();
const double mz_tolerance = mz_unit_is_Da_ ? mz_tolerance_ : mz_tolerance_ / 1e6;
// When spectrum_mz is 0, the mz check on transitions is inhibited
const double mz_left_lim = spectrum_mz ? spectrum_mz - mz_tolerance : std::numeric_limits<double>::min();
const double mz_right_lim = spectrum_mz ? spectrum_mz + mz_tolerance : std::numeric_limits<double>::max();
OPENMS_LOG_DEBUG << "annotateSpectra(): [" << i << "] (RT: " << spectrum_rt << ") (MZ: " << spectrum_mz << ")\n";
for (Size j = 0; j < transitions.size(); ++j)
{
const TargetedExperimentHelper::Peptide& peptide = targeted_exp.getPeptideByRef(transitions[j].getPeptideRef());
double target_rt = peptide.getRetentionTime();
if (peptide.getRetentionTimeUnit() == TargetedExperimentHelper::RetentionTime::RTUnit::MINUTE)
{
target_rt *= 60.0;
}
const double target_mz = transitions[j].getPrecursorMZ();
if (target_rt >= rt_left_lim && target_rt <= rt_right_lim &&
target_mz >= mz_left_lim && target_mz <= mz_right_lim)
{
OPENMS_LOG_DEBUG << "annotateSpectra(): [" << j << "][" << transitions[j].getPeptideRef() << "]";
OPENMS_LOG_DEBUG << " (target_rt: " << target_rt << ") (target_mz: " << target_mz << ")" << std::endl << '\n';
MSSpectrum annotated_spectrum = spectrum;
annotated_spectrum.setName(transitions[j].getPeptideRef());
annotated_spectra.push_back(annotated_spectrum);
if (compute_features)
{
Feature feature;
feature.setRT(spectrum_rt);
feature.setMZ(spectrum_mz);
feature.setMetaValue("transition_name", transitions[j].getPeptideRef());
features.push_back(feature);
}
}
}
}
OPENMS_LOG_DEBUG << "annotateSpectra(): (input size: " << spectra.size() << ") (annotated spectra: " << annotated_spectra.size() << ")\n\n";
}
void TargetedSpectraExtractor::annotateSpectra(
const std::vector<MSSpectrum>& spectra,
const TargetedExperiment& targeted_exp,
std::vector<MSSpectrum>& annotated_spectra
) const
{
FeatureMap features;
const bool compute_features { false };
annotateSpectra(spectra, targeted_exp, annotated_spectra, features, compute_features);
}
void TargetedSpectraExtractor::pickSpectrum(const MSSpectrum& spectrum, MSSpectrum& picked_spectrum) const
{
if (!spectrum.isSorted())
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Spectrum must be sorted by position");
}
// Smooth the spectrum
MSSpectrum smoothed_spectrum = spectrum;
if (use_gauss_)
{
GaussFilter gauss;
Param filter_parameters = gauss.getParameters();
filter_parameters.update(param_.copy("GaussFilter:", true));
gauss.setParameters(filter_parameters);
gauss.filter(smoothed_spectrum);
}
else
{
SavitzkyGolayFilter sgolay;
Param filter_parameters = sgolay.getParameters();
filter_parameters.update(param_.copy("SavitzkyGolayFilter:", true));
sgolay.setParameters(filter_parameters);
sgolay.filter(smoothed_spectrum);
}
// Find initial seeds (peak picking)
Param pepi_param = PeakPickerHiRes().getDefaults();
pepi_param.update(param_.copy("PeakPickerHiRes:", true));
// disable spacing constraints, since we're dealing with spectrum
pepi_param.setValue("spacing_difference", 0.0);
pepi_param.setValue("spacing_difference_gap", 0.0);
pepi_param.setValue("report_FWHM", "true");
pepi_param.setValue("report_FWHM_unit", "absolute");
picked_spectrum.clear(true);
PeakPickerHiRes pp;
pp.setParameters(pepi_param);
pp.pick(smoothed_spectrum, picked_spectrum);
std::vector<Int> peaks_pos_to_erase;
const double fwhm_threshold = mz_unit_is_Da_ ? fwhm_threshold_ : fwhm_threshold_ / 1e6;
for (Int i = picked_spectrum.size() - 1; i >= 0; --i)
{
if (picked_spectrum[i].getIntensity() < peak_height_min_ ||
picked_spectrum[i].getIntensity() > peak_height_max_ ||
picked_spectrum.getFloatDataArrays()[0][i] < fwhm_threshold)
{
peaks_pos_to_erase.push_back(i);
}
}
if (peaks_pos_to_erase.size() != picked_spectrum.size()) // if not all peaks are to be removed
{
for (Int i : peaks_pos_to_erase) // then keep only the valid peaks (and fwhm)
{
picked_spectrum.erase(picked_spectrum.begin() + i);
picked_spectrum.getFloatDataArrays()[0].erase(picked_spectrum.getFloatDataArrays()[0].begin() + i);
}
}
else // otherwise output an empty picked_spectrum
{
picked_spectrum.clear(true);
}
OPENMS_LOG_DEBUG << "pickSpectrum(): " << spectrum.getName() << " (input size: " <<
spectrum.size() << ") (picked: " << picked_spectrum.size() << ")\n\n";
}
void TargetedSpectraExtractor::scoreSpectra(
const std::vector<MSSpectrum>& annotated_spectra,
const std::vector<MSSpectrum>& picked_spectra,
FeatureMap& features,
std::vector<MSSpectrum>& scored_spectra,
const bool compute_features
) const
{
scored_spectra.clear();
scored_spectra.resize(annotated_spectra.size());
if (compute_features && scored_spectra.size() != features.size())
{
throw Exception::InvalidSize(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, scored_spectra.size(), "scored_spectra size does not match features size");
}
for (Size i = 0; i < annotated_spectra.size(); ++i)
{
double total_tic { 0 };
for (Size j = 0; j < annotated_spectra[i].size(); ++j)
{
total_tic += annotated_spectra[i][j].getIntensity();
}
double avgFWHM { 0 };
if (!picked_spectra[i].getFloatDataArrays().empty())
{
for (Size j = 0; j < picked_spectra[i].getFloatDataArrays()[0].size(); ++j)
{
avgFWHM += picked_spectra[i].getFloatDataArrays()[0][j];
}
avgFWHM /= picked_spectra[i].getFloatDataArrays()[0].size();
}
SignalToNoiseEstimatorMedian<MSSpectrum> sne;
Param p;
p.setValue("win_len", 40.0);
p.setValue("noise_for_empty_window", 2.0);
p.setValue("min_required_elements", 10);
sne.setParameters(p);
sne.init(annotated_spectra[i]);
double avgSNR { 0 };
for (Size j = 0; j < annotated_spectra[i].size(); ++j)
{
avgSNR += sne.getSignalToNoise(j);
}
avgSNR /= annotated_spectra[i].size();
const double log10_total_tic = log10(total_tic);
const double inverse_avgFWHM = 1.0 / avgFWHM;
const double score = log10_total_tic * tic_weight_ + inverse_avgFWHM * fwhm_weight_ + avgSNR * snr_weight_;
scored_spectra[i] = annotated_spectra[i];
scored_spectra[i].getFloatDataArrays().resize(5);
scored_spectra[i].getFloatDataArrays()[1].setName("score");
scored_spectra[i].getFloatDataArrays()[1].push_back(score);
scored_spectra[i].getFloatDataArrays()[2].setName("log10_total_tic");
scored_spectra[i].getFloatDataArrays()[2].push_back(log10_total_tic);
scored_spectra[i].getFloatDataArrays()[3].setName("inverse_avgFWHM");
scored_spectra[i].getFloatDataArrays()[3].push_back(inverse_avgFWHM);
scored_spectra[i].getFloatDataArrays()[4].setName("avgSNR");
scored_spectra[i].getFloatDataArrays()[4].push_back(avgSNR);
if (compute_features)
{
// The intensity of a feature is (proportional to) its total ion count
// http://www.openms.de/documentation/classOpenMS_1_1Feature.html
features[i].setIntensity(score);
features[i].setMetaValue("log10_total_tic", log10_total_tic);
features[i].setMetaValue("inverse_avgFWHM", inverse_avgFWHM);
features[i].setMetaValue("avgFWHM", avgFWHM);
features[i].setMetaValue("avgSNR", avgSNR);
std::vector<Feature> subordinates(picked_spectra[i].size());
for (Size j = 0; j < picked_spectra[i].size(); ++j)
{
subordinates[j].setMZ(picked_spectra[i][j].getMZ());
subordinates[j].setIntensity(picked_spectra[i][j].getIntensity());
subordinates[j].setMetaValue("FWHM", picked_spectra[i].getFloatDataArrays()[0][j]);
}
features[i].setSubordinates(subordinates);
}
}
}
void TargetedSpectraExtractor::scoreSpectra(
const std::vector<MSSpectrum>& annotated_spectra,
const std::vector<MSSpectrum>& picked_spectra,
std::vector<MSSpectrum>& scored_spectra
) const
{
FeatureMap features;
const bool compute_features { false };
scoreSpectra(annotated_spectra, picked_spectra, features, scored_spectra, compute_features);
}
void TargetedSpectraExtractor::selectSpectra(
const std::vector<MSSpectrum>& scored_spectra,
const FeatureMap& features,
std::vector<MSSpectrum>& selected_spectra,
FeatureMap& selected_features,
const bool compute_features
) const
{
if (compute_features && scored_spectra.size() != features.size())
{
throw Exception::InvalidSize(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, scored_spectra.size(), "scored_spectra size does not match features size");
}
std::map<std::string,UInt> transition_best_spec;
for (UInt i = 0; i < scored_spectra.size(); ++i)
{
if (scored_spectra[i].getFloatDataArrays()[1][0] < min_select_score_)
{
continue;
}
const std::string& transition_name = scored_spectra[i].getName();
std::map<std::string,UInt>::const_iterator it = transition_best_spec.find(transition_name);
if (it == transition_best_spec.cend())
{
transition_best_spec.emplace(transition_name, i);
}
else if (scored_spectra[it->second].getFloatDataArrays()[1][0] <
scored_spectra[i].getFloatDataArrays()[1][0])
{
transition_best_spec.erase(transition_name);
transition_best_spec.emplace(transition_name, i);
}
}
selected_spectra.clear();
selected_features.clear(true);
for (const auto& m : transition_best_spec)
{
selected_spectra.push_back(scored_spectra[m.second]);
if (compute_features) selected_features.push_back(features[m.second]);
}
}
void TargetedSpectraExtractor::selectSpectra(
const std::vector<MSSpectrum>& scored_spectra,
std::vector<MSSpectrum>& selected_spectra
) const
{
FeatureMap dummy_features;
FeatureMap dummy_selected_features;
const bool compute_features { false };
selectSpectra(scored_spectra, dummy_features, selected_spectra, dummy_selected_features, compute_features);
}
void TargetedSpectraExtractor::extractSpectra(
const MSExperiment& experiment,
const TargetedExperiment& targeted_exp,
std::vector<MSSpectrum>& extracted_spectra,
FeatureMap& extracted_features,
const bool compute_features
) const
{
// get the spectra from the experiment
const std::vector<MSSpectrum>& spectra = experiment.getSpectra();
// annotate spectra
std::vector<MSSpectrum> annotated;
FeatureMap features;
annotateSpectra(spectra, targeted_exp, annotated, features, compute_features);
// pick peaks from annotate spectra
std::vector<MSSpectrum> picked(annotated.size());
for (Size i = 0; i < annotated.size(); ++i)
{
pickSpectrum(annotated[i], picked[i]);
}
// remove empty picked<> spectra, and accordingly update annotated<> and features
for (Int i = annotated.size() - 1; i >= 0; --i)
{
if (picked[i].empty())
{
annotated.erase(annotated.begin() + i);
picked.erase(picked.begin() + i);
if (compute_features) features.erase(features.begin() + i);
}
}
// score spectra
std::vector<MSSpectrum> scored;
scoreSpectra(annotated, picked, features, scored, compute_features);
// select the best spectrum for each group of spectra having the same name
selectSpectra(scored, features, extracted_spectra, extracted_features, compute_features);
}
void TargetedSpectraExtractor::extractSpectra(
const MSExperiment& experiment,
const TargetedExperiment& targeted_exp,
std::vector<MSSpectrum>& extracted_spectra
) const
{
FeatureMap extracted_features;
const bool compute_features { false };
extractSpectra(experiment, targeted_exp, extracted_spectra, extracted_features, compute_features);
}
void TargetedSpectraExtractor::extractSpectra(
const MSExperiment& experiment,
const FeatureMap& ms1_features,
std::vector<MSSpectrum>& extracted_spectra
) const
{
FeatureMap extracted_features;
extractSpectra(experiment, ms1_features, extracted_spectra, extracted_features, false);
}
void TargetedSpectraExtractor::extractSpectra(
const MSExperiment& experiment,
const FeatureMap& ms1_features,
std::vector<MSSpectrum>& extracted_spectra,
FeatureMap& extracted_features
) const
{
extractSpectra(experiment, ms1_features, extracted_spectra, extracted_features, true);
}
void TargetedSpectraExtractor::extractSpectra(
const MSExperiment& experiment,
const FeatureMap& ms1_features,
std::vector<MSSpectrum>& extracted_spectra,
FeatureMap& extracted_features,
const bool compute_features
) const
{
// annotate spectra
std::vector<OpenMS::MSSpectrum> annotated_spectra;
OpenMS::FeatureMap ms2_features;
annotateSpectra(experiment.getSpectra(), ms1_features, ms2_features, annotated_spectra);
// pickSpectra
std::vector<MSSpectrum> picked_spectra(annotated_spectra.size());
for (Size i = 0; i < annotated_spectra.size(); ++i)
{
pickSpectrum(annotated_spectra[i], picked_spectra[i]);
}
// score and select
std::vector<OpenMS::MSSpectrum> scored_spectra;
scoreSpectra(annotated_spectra, picked_spectra, scored_spectra);
// select the best spectrum for each group of spectra having the same name
// NOTE: It maybe needed to take the top N instead of the top 1 spectra in the future
selectSpectra(scored_spectra, ms2_features, extracted_spectra, extracted_features, compute_features);
}
void TargetedSpectraExtractor::matchSpectrum(
const MSSpectrum& input_spectrum,
const Comparator& cmp,
std::vector<Match>& matches
) const
{
// TODO: remove times debug info
// std::clock_t start;
// start = std::clock();
matches.clear();
std::vector<std::pair<Size,double>> scores;
cmp.generateScores(input_spectrum, scores, min_match_score_);
// Sort the vector of scores
std::sort(scores.begin(), scores.end(),
[](const std::pair<Size,double>& a, const std::pair<Size,double>& b)
{
return a.second > b.second;
});
// Set the number of best matches to return
const Size n = std::min(top_matches_to_report_, scores.size());
// Construct a vector of n `Match`es
for (Size i = 0; i < n; ++i)
{
const Size spec_idx { scores[i].first };
const double spec_score { scores[i].second };
matches.emplace_back(cmp.getLibrary()[spec_idx], spec_score);
}
}
void TargetedSpectraExtractor::targetedMatching(
const std::vector<MSSpectrum>& spectra,
const Comparator& cmp,
FeatureMap& features
)
{
if (spectra.size() != features.size())
{
throw Exception::InvalidSize(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, spectra.size(), "spectra size does not match features size");
}
std::vector<Size> no_matches_idx; // to keep track of those features without a match
const Size tmp = top_matches_to_report_;
top_matches_to_report_ = 1;
for (Size i = 0; i < spectra.size(); ++i)
{
std::vector<Match> matches;
matchSpectrum(spectra[i], cmp, matches);
if (!matches.empty())
{
features[i].setMetaValue("spectral_library_name", matches[0].spectrum.getName());
features[i].setMetaValue("spectral_library_score", matches[0].score);
const String& comments = matches[0].spectrum.metaValueExists("Comments") ?
matches[0].spectrum.getMetaValue("Comments") : "";
features[i].setMetaValue("spectral_library_comments", comments);
}
else
{
no_matches_idx.push_back(i);
features[i].setMetaValue("spectral_library_name", "");
features[i].setMetaValue("spectral_library_score", 0.0);
features[i].setMetaValue("spectral_library_comments", "");
}
}
top_matches_to_report_ = tmp;
if (!no_matches_idx.empty())
{
String warn_msg = "No match was found for " + std::to_string(no_matches_idx.size()) + " `Feature`s. Indices: ";
for (const Size idx : no_matches_idx)
{
warn_msg += std::to_string(idx) + " ";
}
OPENMS_LOG_WARN << std:: endl << warn_msg << '\n';
}
}
void TargetedSpectraExtractor::untargetedMatching(
const std::vector<MSSpectrum>& spectra,
const Comparator& cmp,
FeatureMap& features
)
{
features.clear(true);
std::vector<MSSpectrum> picked(spectra.size());
for (Size i = 0; i < spectra.size(); ++i)
{
pickSpectrum(spectra[i], picked[i]);
}
// remove empty picked<> spectra
for (Int i = spectra.size() - 1; i >= 0; --i)
{
if (picked[i].empty())
{
picked.erase(picked.begin() + i);
}
}
for (const MSSpectrum& spectrum : picked)
{
const std::vector<Precursor>& precursors = spectrum.getPrecursors();
if (precursors.empty())
{
OPENMS_LOG_WARN << "untargetedMatching(): No precursor MZ found. Setting spectrum_mz to 0.\n";
}
const double spectrum_mz = precursors.empty() ? 0.0 : precursors.front().getMZ();
Feature feature;
feature.setRT(spectrum.getRT());
feature.setMZ(spectrum_mz);
features.push_back(feature);
}
targetedMatching(picked, cmp, features);
}
void TargetedSpectraExtractor::constructTransitionsList(const OpenMS::FeatureMap& ms1_features, const OpenMS::FeatureMap& ms2_features, TargetedExperiment& t_exp) const
{
// Create a map based on PeptideRef between MS1 and MS2 features
std::map<std::string, std::vector<const Feature*>> ms1_to_ms2;
for (const auto& feature : ms2_features)
{
for (const auto& subordinate : feature.getSubordinates())
{
ms1_to_ms2[subordinate.getMetaValue("PeptideRef")].push_back(&subordinate);
}
}
// Create individual maps for the transitions, peptides and proteins to account for the same IDs but different RTs
std::map<std::string, ReactionMonitoringTransition> rmt_map;
std::map<std::string, OpenMS::TargetedExperiment::Peptide> peptides_map;
std::map<std::string, OpenMS::TargetedExperiment::Protein> proteins_map;
for (const auto& ms1_feature : ms1_features)
{
std::string peptide_ref = ms1_feature.getMetaValue("PeptideRef");
OpenMS::TargetedExperiment::Protein protein;
protein.id = peptide_ref;
protein.addMetaValues(ms1_feature);
proteins_map.emplace(peptide_ref, protein); // OK to reject duplicate keys
OpenMS::ReactionMonitoringTransition::RetentionTime rt_f;
rt_f.setRT(ms1_feature.getRT());
OpenMS::TargetedExperiment::Peptide peptide;
peptide.id = peptide_ref;
peptide.setChargeState(ms1_feature.getCharge());
peptide.addMetaValues(ms1_feature);
peptide.protein_refs.emplace_back(peptide_ref);
peptide.rts.push_back(rt_f);
auto found_peptide = peptides_map.emplace(peptide_ref, peptide);
if (!found_peptide.second)
{
peptides_map.at(peptide_ref).rts.push_back(rt_f);
}
for (const auto& ms2_feature : ms1_to_ms2[peptide_ref])
{
auto current_mz = ms2_feature->getMZ();
if ((current_mz > min_fragment_mz_ && current_mz < max_fragment_mz_) &&
(current_mz < ms1_feature.getMZ() + relative_allowable_product_mass_))
{
OpenMS::ReactionMonitoringTransition rmt;
rmt.setLibraryIntensity(ms2_feature->getIntensity());
rmt.setName(ms2_feature->getMetaValue("native_id"));
OpenMS::ReactionMonitoringTransition::RetentionTime rt_s;
rt_s.setRT(ms2_feature->getRT());
rmt.setRetentionTime(rt_s);
std::ostringstream os;
os << peptide_ref << "_" << ms2_feature->getMetaValue("native_id") << "_" << ms2_feature->getRT();
rmt.setNativeID(os.str());
rmt.setPeptideRef(peptide_ref);
rmt.setPrecursorMZ(ms1_feature.getMZ());
rmt.setProductMZ(ms2_feature->getMZ());
rmt.addMetaValues(*ms2_feature);
rmt_map.emplace(os.str(), rmt); // OK to reject duplicate keys
}
}
}
// Reconstruct the final vectors from the maps
std::vector<TargetedExperiment::Peptide> peptides;
for (const auto& p : peptides_map) {
peptides.push_back(p.second);
}
std::vector<TargetedExperiment::Protein> proteins;
for (const auto& p : proteins_map)
{
proteins.push_back(p.second);
}
std::vector<ReactionMonitoringTransition> rmt_vec;
for (const auto& p : rmt_map)
{
rmt_vec.push_back(p.second);
}
t_exp.setProteins(proteins);
t_exp.setPeptides(peptides);
t_exp.setTransitions(rmt_vec);
// validate
OpenMS::TransitionTSVFile tsv_file;
tsv_file.validateTargetedExperiment(t_exp);
}
void TargetedSpectraExtractor::mergeFeatures(const OpenMS::FeatureMap& fmap_input, OpenMS::FeatureMap& fmap_output) const
{
try
{
// Pass 1: organize into a map by combining features and subordinates with the same `identifier`
std::map<OpenMS::String, std::vector<OpenMS::Feature>> fmapmap;
organizeMapWithSameIdentifier(fmap_input, fmapmap);
// Pass 2: compute the consensus manually
for (const auto& f_map : fmapmap)
{
// compute the total intensity for weighting
double total_intensity = 0;
for (const auto& f : f_map.second)
{
if (f.metaValueExists("peak_apex_int"))
total_intensity += (double) f.getMetaValue("peak_apex_int");
else
total_intensity += f.getIntensity();
}
// compute the weighted averages
double rt = 0.0, m = 0.0, intensity = 0.0, peak_apex_int = 0.0;
double weighting_factor = 1.0 / f_map.second.size();// will be updated
for (const auto& f : f_map.second)
{
// compute the weighting factor
if (f.metaValueExists("peak_apex_int"))
weighting_factor = (double) f.getMetaValue("peak_apex_int") / total_intensity;
else
weighting_factor = f.getIntensity() / total_intensity;
// compute the weighted averages
rt += f.getRT() * weighting_factor;
m += f.getMZ() * weighting_factor;
intensity += f.getIntensity();
if (f.metaValueExists("peak_apex_int"))
peak_apex_int += (double) f.getMetaValue("peak_apex_int");
}
// make the feature map and assign subordinates
OpenMS::Feature f;
f.setUniqueId();
// parse the identifier
std::string id_f;
try
{
id_f = f_map.first.prefix('_');
}
catch (const std::exception& e)
{
OPENMS_LOG_ERROR << e.what();
}
f.setMetaValue("PeptideRef", id_f);
f.setMZ(m);
f.setRT(rt);
f.setMetaValue("scan_polarity", f_map.second.front().getMetaValue("scan_polarity"));
f.setIntensity(intensity);
f.setMetaValue("peak_apex_int", peak_apex_int);
f.setSubordinates(f_map.second);
fmap_output.push_back(f);
}
}
catch (const std::exception& e)
{
OPENMS_LOG_ERROR << e.what();
}
}
void TargetedSpectraExtractor::storeSpectraMSP(const String& filename, MSExperiment& experiment) const
{
if (deisotoping_use_deisotoper_)
{
deisotopeMS2Spectra_(experiment);
}
removeMS2SpectraPeaks_(experiment);
// Store
FileHandler().storeExperiment(filename, experiment, {FileTypes::MSP});
}
void TargetedSpectraExtractor::deisotopeMS2Spectra_(MSExperiment& experiment) const
{
for (auto& peakmap_it : experiment.getSpectra())
{
MSSpectrum& spectrum = peakmap_it;
if (spectrum.getMSLevel() == 1)
{
continue;
}
bool fragment_unit_ppm = deisotoping_fragment_unit_ == "ppm" ? true : false;
bool make_single_charged = false;
Deisotoper::deisotopeAndSingleCharge(spectrum,
deisotoping_fragment_tolerance_,
fragment_unit_ppm,
deisotoping_min_charge_,
deisotoping_max_charge_,
deisotoping_keep_only_deisotoped_,
deisotoping_min_isopeaks_,
deisotoping_max_isopeaks_,
make_single_charged,
deisotoping_annotate_charge_);
}
}
void TargetedSpectraExtractor::removeMS2SpectraPeaks_(MSExperiment& experiment) const
{
// remove peaks form MS2 which are at a higher mz than the precursor + 10 ppm
for (auto& peakmap_it : experiment.getSpectra())
{
MSSpectrum& spectrum = peakmap_it;
if (spectrum.getMSLevel() == 1)
{
continue;
}
// if peak mz higher than precursor mz set intensity to zero
double prec_mz = spectrum.getPrecursors()[0].getMZ();
double mass_diff = Math::ppmToMass(max_precursor_mass_threashold_, prec_mz);
for (auto& spec : spectrum)
{
if (spec.getMZ() > prec_mz + mass_diff)
{
spec.setIntensity(0);
}
}
spectrum.erase(remove_if(spectrum.begin(),
spectrum.end(),
InIntensityRange<PeakMap::PeakType>(1,
std::numeric_limits<PeakMap::PeakType::IntensityType>::max(),
true)),
spectrum.end());
}
}
void TargetedSpectraExtractor::organizeMapWithSameIdentifier(const OpenMS::FeatureMap& fmap_input, std::map<OpenMS::String, std::vector<OpenMS::Feature>>& fmapmap) const
{
auto construct_feature = [&fmapmap](const OpenMS::Feature& feature)
{
if (feature.metaValueExists("PeptideRef") && feature.metaValueExists("identifier"))
{
std::string id = std::string(feature.getMetaValue("PeptideRef")) + std::string("_") + std::string(feature.getMetaValue("identifier").toStringList().at(0));
std::string id_f = id + std::string("_") + std::to_string(feature.getRT());
auto found_f = fmapmap.emplace(id_f, std::vector<OpenMS::Feature>({feature}));
if (!found_f.second)
{
fmapmap.at(id_f).push_back(feature);
}
}
};
for (const OpenMS::Feature& f : fmap_input)
{
construct_feature(f);
for (const OpenMS::Feature& s : f.getSubordinates())
{
construct_feature(s);
}
}
}
void TargetedSpectraExtractor::BinnedSpectrumComparator::init(const std::vector<MSSpectrum>& library, const std::map<String,DataValue>& options)
{
if (options.count("bin_size"))
{
bin_size_ = options.at("bin_size");
}
if (options.count("peak_spread"))
{
peak_spread_ = options.at("peak_spread");
}
if (options.count("bin_offset"))
{
bin_offset_ = options.at("bin_offset");
}
library_.clear();
bs_library_.clear();
for (const MSSpectrum& s : library)
{
library_.push_back(s);
bs_library_.emplace_back(s, bin_size_, false, peak_spread_, bin_offset_);
}
OPENMS_LOG_INFO << "The library contains " << bs_library_.size() << " spectra.\n";
}
}// namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/TransitionPQPFile.cpp | .cpp | 57,226 | 1,368 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: George Rosenberger $
// $Authors: George Rosenberger, Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/TransitionPQPFile.h>
#include <sqlite3.h>
#include <OpenMS/FORMAT/SqliteConnector.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <boost/range/algorithm.hpp>
#include <boost/range/algorithm_ext/erase.hpp>
#include <sstream>
#include <unordered_map>
#include <iostream>
namespace OpenMS
{
namespace Sql = Internal::SqliteHelper;
TransitionPQPFile::TransitionPQPFile() :
TransitionTSVFile()
{
}
TransitionPQPFile::~TransitionPQPFile() = default;
TransitionPQPFile::PQPSqlQueryInfo TransitionPQPFile::buildPQPSelectQuery_(sqlite3* db, bool legacy_traml_id) const
{
PQPSqlQueryInfo info;
// Use legacy TraML identifiers for precursors (transition_group_id) and transitions (transition_name)?
// When legacy_traml_id=true, use TRAML_ID column (string identifiers from TraML)
// When legacy_traml_id=false, use numeric ID column
std::string traml_id = legacy_traml_id ? "TRAML_ID" : "ID";
// Check for optional columns/tables
String select_drift_time = "";
info.drift_time_exists = SqliteConnector::columnExists(db, "PRECURSOR", "LIBRARY_DRIFT_TIME");
if (info.drift_time_exists)
{
select_drift_time = ", PRECURSOR.LIBRARY_DRIFT_TIME AS drift_time ";
}
String select_gene = "";
String select_gene_null = "";
String join_gene = "";
info.gene_exists = SqliteConnector::tableExists(db, "GENE");
if (info.gene_exists)
{
select_gene = ", GENE_AGGREGATED.GENE_NAME AS gene_name ";
select_gene_null = ", 'NA' AS gene_name ";
join_gene = "INNER JOIN PEPTIDE_GENE_MAPPING ON PEPTIDE.ID = PEPTIDE_GENE_MAPPING.PEPTIDE_ID " \
"INNER JOIN " \
"(SELECT PEPTIDE_ID, GROUP_CONCAT(GENE_NAME,';') AS GENE_NAME " \
"FROM GENE " \
"INNER JOIN PEPTIDE_GENE_MAPPING ON GENE.ID = PEPTIDE_GENE_MAPPING.GENE_ID "\
"GROUP BY PEPTIDE_ID) " \
"AS GENE_AGGREGATED ON PEPTIDE.ID = GENE_AGGREGATED.PEPTIDE_ID ";
}
String select_annotation = "'' AS Annotation, ";
bool annotation_exists = SqliteConnector::columnExists(db, "TRANSITION", "ANNOTATION");
if (annotation_exists) select_annotation = "TRANSITION.ANNOTATION AS Annotation, ";
String select_adducts = "'' AS Adducts, ";
bool adducts_exists = SqliteConnector::columnExists(db, "COMPOUND", "ADDUCTS");
if (adducts_exists) select_adducts = "COMPOUND.ADDUCTS AS Adducts, ";
// Build peptides query
info.select_sql = "SELECT " \
"PRECURSOR.PRECURSOR_MZ AS precursor, " \
"TRANSITION.PRODUCT_MZ AS product, " \
"PRECURSOR.LIBRARY_RT AS rt_calibrated, " \
"TRANSITION." + traml_id + " AS transition_name, " \
"-1 AS CE, " \
"TRANSITION.LIBRARY_INTENSITY AS library_intensity, " \
"PRECURSOR." + traml_id + " AS group_id, " \
"TRANSITION.DECOY AS decoy, " \
"PEPTIDE.UNMODIFIED_SEQUENCE AS PeptideSequence, " \
"PROTEIN_AGGREGATED.PROTEIN_ACCESSION AS ProteinName, " \
+ select_annotation + \
"PEPTIDE.MODIFIED_SEQUENCE AS FullPeptideName, " \
"NULL AS CompoundName, " \
"NULL AS SMILES, " \
"NULL AS SumFormula, " \
"NULL AS Adducts, " \
"PRECURSOR.CHARGE AS precursor_charge, " \
"PRECURSOR.GROUP_LABEL AS peptide_group_label, " \
"NULL AS label_type, " \
"TRANSITION.CHARGE AS fragment_charge, " \
"TRANSITION.ORDINAL AS fragment_nr, " \
"NULL AS fragment_mzdelta, " \
"NULL AS fragment_modification, " \
"TRANSITION.TYPE AS fragment_type, " \
"NULL AS uniprot_id, " \
"TRANSITION.DETECTING AS detecting_transition, " \
"TRANSITION.IDENTIFYING AS identifying_transition, " \
"TRANSITION.QUANTIFYING AS quantifying_transition, " \
"PEPTIDE_AGGREGATED.PEPTIDOFORMS AS peptidoforms " + \
select_drift_time + \
select_gene + \
"FROM PRECURSOR " + \
join_gene + \
"INNER JOIN TRANSITION_PRECURSOR_MAPPING ON PRECURSOR.ID = TRANSITION_PRECURSOR_MAPPING.PRECURSOR_ID " \
"INNER JOIN TRANSITION ON TRANSITION_PRECURSOR_MAPPING.TRANSITION_ID = TRANSITION.ID " \
"INNER JOIN PRECURSOR_PEPTIDE_MAPPING ON PRECURSOR.ID = PRECURSOR_PEPTIDE_MAPPING.PRECURSOR_ID " \
"INNER JOIN PEPTIDE ON PRECURSOR_PEPTIDE_MAPPING.PEPTIDE_ID = PEPTIDE.ID " \
"INNER JOIN " \
"(SELECT PEPTIDE_ID, GROUP_CONCAT(PROTEIN_ACCESSION,';') AS PROTEIN_ACCESSION " \
"FROM PROTEIN " \
"INNER JOIN PEPTIDE_PROTEIN_MAPPING ON PROTEIN.ID = PEPTIDE_PROTEIN_MAPPING.PROTEIN_ID "\
"GROUP BY PEPTIDE_ID) " \
"AS PROTEIN_AGGREGATED ON PEPTIDE.ID = PROTEIN_AGGREGATED.PEPTIDE_ID " \
"LEFT OUTER JOIN " \
"(SELECT TRANSITION_ID, GROUP_CONCAT(MODIFIED_SEQUENCE,'|') AS PEPTIDOFORMS " \
"FROM TRANSITION_PEPTIDE_MAPPING "\
"INNER JOIN PEPTIDE ON TRANSITION_PEPTIDE_MAPPING.PEPTIDE_ID = PEPTIDE.ID "\
"GROUP BY TRANSITION_ID) "\
"AS PEPTIDE_AGGREGATED ON TRANSITION.ID = PEPTIDE_AGGREGATED.TRANSITION_ID ";
// Append compounds query
info.select_sql += "UNION SELECT " \
"PRECURSOR.PRECURSOR_MZ AS precursor, " \
"TRANSITION.PRODUCT_MZ AS product, " \
"PRECURSOR.LIBRARY_RT AS rt_calibrated, " \
"TRANSITION." + traml_id + " AS transition_name, " \
"-1 AS CE, " \
"TRANSITION.LIBRARY_INTENSITY AS library_intensity, " \
"PRECURSOR." + traml_id + " AS group_id, " \
"TRANSITION.DECOY AS decoy, " \
"NULL AS PeptideSequence, " \
"NULL AS ProteinName, " \
+ select_annotation + \
"NULL AS FullPeptideName, " \
"COMPOUND.COMPOUND_NAME AS CompoundName, " \
"COMPOUND.SMILES AS SMILES, " \
"COMPOUND.SUM_FORMULA AS SumFormula, " \
+ select_adducts + \
"PRECURSOR.CHARGE AS precursor_charge, " \
"PRECURSOR.GROUP_LABEL AS peptide_group_label, " \
"NULL AS label_type, " \
"TRANSITION.CHARGE AS fragment_charge, " \
"TRANSITION.ORDINAL AS fragment_nr, " \
"NULL AS fragment_mzdelta, " \
"NULL AS fragment_modification, " \
"TRANSITION.TYPE AS fragment_type, " \
"NULL AS uniprot_id, " \
"TRANSITION.DETECTING AS detecting_transition, " \
"TRANSITION.IDENTIFYING AS identifying_transition, " \
"TRANSITION.QUANTIFYING AS quantifying_transition, " \
"NULL AS peptidoforms " +
select_drift_time +
select_gene_null +
"FROM PRECURSOR " \
"INNER JOIN TRANSITION_PRECURSOR_MAPPING ON PRECURSOR.ID = TRANSITION_PRECURSOR_MAPPING.PRECURSOR_ID " \
"INNER JOIN TRANSITION ON TRANSITION_PRECURSOR_MAPPING.TRANSITION_ID = TRANSITION.ID " \
"INNER JOIN PRECURSOR_COMPOUND_MAPPING ON PRECURSOR.ID = PRECURSOR_COMPOUND_MAPPING.PRECURSOR_ID " \
"INNER JOIN COMPOUND ON PRECURSOR_COMPOUND_MAPPING.COMPOUND_ID = COMPOUND.ID; ";
return info;
}
void TransitionPQPFile::readPQPInput_(const char* filename, std::vector<TSVTransition>& transition_list, bool legacy_traml_id)
{
sqlite3 *db;
sqlite3_stmt * cntstmt;
sqlite3_stmt * stmt;
startProgress(0, 1, "reading PQP file (SQL warmup)");
// Open database
SqliteConnector conn(filename);
db = conn.getDB();
// Count transitions
SqliteConnector::prepareStatement(db, &cntstmt, "SELECT COUNT(*) FROM TRANSITION;");
sqlite3_step( cntstmt );
int num_transitions = sqlite3_column_int(cntstmt, 0);
sqlite3_finalize(cntstmt);
// Build SQL query using shared helper
PQPSqlQueryInfo query_info = buildPQPSelectQuery_(db, legacy_traml_id);
// Execute SQL select statement
SqliteConnector::prepareStatement(db, &stmt, query_info.select_sql);
sqlite3_step(stmt);
endProgress();
Size progress = 0;
startProgress(0, num_transitions, "reading PQP file");
// Convert SQLite data to TSVTransition data structure
while (sqlite3_column_type(stmt, 0) != SQLITE_NULL)
{
setProgress(progress++);
TSVTransition mytransition;
Sql::extractValue<double>(&mytransition.precursor, stmt, 0);
Sql::extractValue<double>(&mytransition.product, stmt, 1);
Sql::extractValue<double>(&mytransition.rt_calibrated, stmt, 2);
Sql::extractValue<std::string>(&mytransition.transition_name, stmt, 3);
Sql::extractValue<double>(&mytransition.CE, stmt, 4);
Sql::extractValue<double>(&mytransition.library_intensity, stmt, 5);
Sql::extractValue<std::string>(&mytransition.group_id, stmt, 6);
Sql::extractValue<int>((int*)&mytransition.decoy, stmt, 7);
Sql::extractValue<std::string>(&mytransition.PeptideSequence, stmt, 8);
String tmp_field;
if (Sql::extractValue<std::string>(&tmp_field, stmt, 9)) tmp_field.split(';', mytransition.ProteinName);
Sql::extractValue<std::string>(&mytransition.Annotation, stmt, 10);
Sql::extractValue<std::string>(&mytransition.FullPeptideName, stmt, 11);
Sql::extractValue<std::string>(&mytransition.CompoundName, stmt, 12);
Sql::extractValue<std::string>(&mytransition.SMILES, stmt, 13);
Sql::extractValue<std::string>(&mytransition.SumFormula, stmt, 14);
Sql::extractValue<std::string>(&mytransition.Adducts, stmt, 15);
Sql::extractValueIntStr(&mytransition.precursor_charge, stmt, 16);
Sql::extractValue<std::string>(&mytransition.peptide_group_label, stmt, 17);
Sql::extractValue<std::string>(&mytransition.label_type, stmt, 18);
Sql::extractValueIntStr(&mytransition.fragment_charge, stmt, 19);
Sql::extractValue<int>(&mytransition.fragment_nr, stmt, 20);
Sql::extractValue<double>(&mytransition.fragment_mzdelta, stmt, 21);
Sql::extractValue<int>(&mytransition.fragment_modification, stmt, 22);
Sql::extractValue<std::string>(&mytransition.fragment_type, stmt, 23);
if (Sql::extractValue<std::string>(&tmp_field, stmt, 24)) tmp_field.split(';', mytransition.uniprot_id);
Sql::extractValue<int>((int*)&mytransition.detecting_transition, stmt, 25);
Sql::extractValue<int>((int*)&mytransition.identifying_transition, stmt, 26);
Sql::extractValue<int>((int*)&mytransition.quantifying_transition, stmt, 27);
if (Sql::extractValue<std::string>(&tmp_field, stmt, 28)) tmp_field.split('|', mytransition.peptidoforms);
// optional attributes only present in newer file versions
if (query_info.drift_time_exists) Sql::extractValue<double>(&mytransition.drift_time, stmt, 29);
if (query_info.gene_exists) Sql::extractValue<std::string>(&mytransition.GeneName, stmt, 30);
if (mytransition.GeneName == "NA") mytransition.GeneName = "";
transition_list.push_back(mytransition);
sqlite3_step( stmt );
}
endProgress();
sqlite3_finalize(stmt);
}
void TransitionPQPFile::streamPQPToLightTargetedExperiment_(const char* filename, OpenSwath::LightTargetedExperiment& exp, bool legacy_traml_id)
{
// Maps for deduplication
std::map<String, int> compound_map;
std::map<String, int> protein_map;
sqlite3 *db;
sqlite3_stmt * cntstmt;
sqlite3_stmt * stmt;
startProgress(0, 1, "reading PQP file (SQL warmup)");
// Open database
SqliteConnector conn(filename);
db = conn.getDB();
// Count transitions
SqliteConnector::prepareStatement(db, &cntstmt, "SELECT COUNT(*) FROM TRANSITION;");
sqlite3_step( cntstmt );
int num_transitions = sqlite3_column_int(cntstmt, 0);
sqlite3_finalize(cntstmt);
// Build SQL query using shared helper
PQPSqlQueryInfo query_info = buildPQPSelectQuery_(db, legacy_traml_id);
// Execute SQL select statement
SqliteConnector::prepareStatement(db, &stmt, query_info.select_sql);
sqlite3_step(stmt);
endProgress();
Size progress = 0;
startProgress(0, num_transitions, "streaming PQP to LightTargetedExperiment");
// Stream SQL results directly to LightTargetedExperiment
while (sqlite3_column_type(stmt, 0) != SQLITE_NULL)
{
setProgress(progress++);
// Extract values directly into variables
double precursor_mz = 0, product_mz = 0, rt_calibrated = 0, library_intensity = 0, drift_time = -1;
std::string transition_name, group_id, peptide_sequence, full_peptide_name;
std::string compound_name, smiles, sum_formula, adducts_str;
std::string peptide_group_label, fragment_type_str, gene_name;
int decoy = 0, precursor_charge = 0, fragment_charge = 0, fragment_nr = -1;
int detecting = 1, identifying = 0, quantifying = 1;
String protein_names_str, peptidoforms_str;
Sql::extractValue<double>(&precursor_mz, stmt, 0);
Sql::extractValue<double>(&product_mz, stmt, 1);
Sql::extractValue<double>(&rt_calibrated, stmt, 2);
Sql::extractValue<std::string>(&transition_name, stmt, 3);
// Skip CE (column 4) - not used in light path
Sql::extractValue<double>(&library_intensity, stmt, 5);
Sql::extractValue<std::string>(&group_id, stmt, 6);
Sql::extractValue<int>(&decoy, stmt, 7);
Sql::extractValue<std::string>(&peptide_sequence, stmt, 8);
Sql::extractValue<String>(&protein_names_str, stmt, 9);
// Skip Annotation (column 10) - reconstructed from fragment info
Sql::extractValue<std::string>(&full_peptide_name, stmt, 11);
Sql::extractValue<std::string>(&compound_name, stmt, 12);
Sql::extractValue<std::string>(&smiles, stmt, 13);
Sql::extractValue<std::string>(&sum_formula, stmt, 14);
Sql::extractValue<std::string>(&adducts_str, stmt, 15);
Sql::extractValue<int>(&precursor_charge, stmt, 16);
Sql::extractValue<std::string>(&peptide_group_label, stmt, 17);
// Skip label_type (column 18) - not in PQP
Sql::extractValue<int>(&fragment_charge, stmt, 19);
Sql::extractValue<int>(&fragment_nr, stmt, 20);
// Skip fragment_mzdelta (column 21)
// Skip fragment_modification (column 22)
Sql::extractValue<std::string>(&fragment_type_str, stmt, 23);
// Skip uniprot_id (column 24) - not in PQP
Sql::extractValue<int>(&detecting, stmt, 25);
Sql::extractValue<int>(&identifying, stmt, 26);
Sql::extractValue<int>(&quantifying, stmt, 27);
Sql::extractValue<String>(&peptidoforms_str, stmt, 28);
if (query_info.drift_time_exists) Sql::extractValue<double>(&drift_time, stmt, 29);
if (query_info.gene_exists)
{
Sql::extractValue<std::string>(&gene_name, stmt, 30);
if (gene_name == "NA") gene_name = "";
}
// Create LightTransition directly
OpenSwath::LightTransition transition;
transition.transition_name = transition_name;
transition.peptide_ref = group_id;
transition.library_intensity = library_intensity;
transition.precursor_mz = precursor_mz;
transition.product_mz = product_mz;
transition.precursor_im = drift_time;
transition.fragment_charge = static_cast<int8_t>(fragment_charge);
transition.setDecoy(decoy != 0);
transition.setDetectingTransition(detecting != 0);
transition.setIdentifyingTransition(identifying != 0);
transition.setQuantifyingTransition(quantifying != 0);
transition.fragment_nr = static_cast<int16_t>(fragment_nr);
transition.setFragmentType(fragment_type_str);
if (!peptidoforms_str.empty())
{
std::vector<String> peptidoforms_tmp;
peptidoforms_str.split('|', peptidoforms_tmp);
transition.peptidoforms.assign(peptidoforms_tmp.begin(), peptidoforms_tmp.end());
}
exp.transitions.push_back(std::move(transition));
// Create compound if needed
if (compound_map.find(group_id) == compound_map.end())
{
OpenSwath::LightCompound compound;
compound.id = group_id;
compound.drift_time = drift_time;
compound.rt = rt_calibrated;
compound.charge = precursor_charge;
compound.peptide_group_label = peptide_group_label;
compound.gene_name = gene_name;
bool is_peptide = compound_name.empty();
if (is_peptide)
{
compound.sequence = full_peptide_name.empty() ? peptide_sequence : full_peptide_name;
if (!protein_names_str.empty())
{
std::vector<String> protein_names;
protein_names_str.split(';', protein_names);
compound.protein_refs.assign(protein_names.begin(), protein_names.end());
}
// Parse modifications from sequence
String sequence = full_peptide_name.empty() ? peptide_sequence : full_peptide_name;
if (!sequence.empty())
{
try
{
AASequence aa_sequence = AASequence::fromString(sequence);
if (aa_sequence.hasNTerminalModification())
{
OpenSwath::LightModification mod;
mod.location = -1;
mod.unimod_id = aa_sequence.getNTerminalModification()->getUniModRecordId();
compound.modifications.push_back(mod);
}
if (aa_sequence.hasCTerminalModification())
{
OpenSwath::LightModification mod;
mod.location = static_cast<int>(aa_sequence.size());
mod.unimod_id = aa_sequence.getCTerminalModification()->getUniModRecordId();
compound.modifications.push_back(mod);
}
for (Size i = 0; i != aa_sequence.size(); i++)
{
if (aa_sequence[i].isModified())
{
OpenSwath::LightModification mod;
mod.location = static_cast<int>(i);
mod.unimod_id = aa_sequence.getResidue(i).getModification()->getUniModRecordId();
compound.modifications.push_back(mod);
}
}
}
catch (Exception::InvalidValue&)
{
OPENMS_LOG_DEBUG << "Could not parse modifications from sequence: " << sequence << std::endl;
}
}
}
else
{
compound.compound_name = compound_name;
compound.sum_formula = sum_formula;
compound.smiles = smiles;
compound.adducts = adducts_str;
}
exp.compounds.push_back(std::move(compound));
compound_map[group_id] = 0;
}
// Create proteins if needed
if (!protein_names_str.empty())
{
std::vector<String> protein_names;
protein_names_str.split(';', protein_names);
for (const auto& pname : protein_names)
{
if (protein_map.find(pname) == protein_map.end())
{
OpenSwath::LightProtein protein;
protein.id = pname;
protein.sequence = "";
exp.proteins.push_back(std::move(protein));
protein_map[pname] = 0;
}
}
}
sqlite3_step(stmt);
}
endProgress();
sqlite3_finalize(stmt);
}
void TransitionPQPFile::writePQPOutput_(const char* filename, OpenMS::TargetedExperiment& targeted_exp)
{
// delete file if present
remove(filename);
// Open database
SqliteConnector conn(filename);
// Create SQL structure
const char* create_sql =
"CREATE TABLE VERSION(" \
"ID INT NOT NULL);" \
// gene table
// OpenSWATH proteomics workflows
"CREATE TABLE GENE(" \
"ID INT PRIMARY KEY NOT NULL," \
"GENE_NAME TEXT NOT NULL," \
"DECOY INT NOT NULL);" \
// peptide_gene_mapping table
// OpenSWATH proteomics workflows
"CREATE TABLE PEPTIDE_GENE_MAPPING(" \
"PEPTIDE_ID INT NOT NULL," \
"GENE_ID INT NOT NULL);" \
// protein table
// OpenSWATH proteomics workflows
"CREATE TABLE PROTEIN(" \
"ID INT PRIMARY KEY NOT NULL," \
"PROTEIN_ACCESSION TEXT NOT NULL," \
"DECOY INT NOT NULL);" \
// peptide_protein_mapping table
// OpenSWATH proteomics workflows
"CREATE TABLE PEPTIDE_PROTEIN_MAPPING(" \
"PEPTIDE_ID INT NOT NULL," \
"PROTEIN_ID INT NOT NULL);" \
// peptide table
// OpenSWATH proteomics workflows
"CREATE TABLE PEPTIDE(" \
"ID INT PRIMARY KEY NOT NULL," \
"UNMODIFIED_SEQUENCE TEXT NOT NULL," \
"MODIFIED_SEQUENCE TEXT NOT NULL," \
"DECOY INT NOT NULL);" \
// precursor_peptide_mapping table
// OpenSWATH proteomics workflows
"CREATE TABLE PRECURSOR_PEPTIDE_MAPPING(" \
"PRECURSOR_ID INT NOT NULL," \
"PEPTIDE_ID INT NOT NULL);" \
// compound table
// OpenSWATH metabolomics workflows
"CREATE TABLE COMPOUND(" \
"ID INT PRIMARY KEY NOT NULL," \
"COMPOUND_NAME TEXT NOT NULL," \
"SUM_FORMULA TEXT NOT NULL," \
"SMILES TEXT NOT NULL," \
"ADDUCTS TEXT NOT NULL," \
"DECOY INT NOT NULL);" \
// precursor_compound_mapping table
// OpenSWATH metabolomics workflows
"CREATE TABLE PRECURSOR_COMPOUND_MAPPING(" \
"PRECURSOR_ID INT NOT NULL," \
"COMPOUND_ID INT NOT NULL);" \
// precursor table
"CREATE TABLE PRECURSOR(" \
"ID INT PRIMARY KEY NOT NULL," \
"TRAML_ID TEXT NULL," \
"GROUP_LABEL TEXT NULL," \
"PRECURSOR_MZ REAL NOT NULL," \
"CHARGE INT NULL," \
"LIBRARY_INTENSITY REAL NULL," \
"LIBRARY_RT REAL NULL," \
"LIBRARY_DRIFT_TIME REAL NULL," \
"DECOY INT NOT NULL);" \
// transition_precursor_mapping table
"CREATE TABLE TRANSITION_PRECURSOR_MAPPING(" \
"TRANSITION_ID INT NOT NULL," \
"PRECURSOR_ID INT NOT NULL);" \
// transition_peptide_mapping table
// IPF proteomics workflows
"CREATE TABLE TRANSITION_PEPTIDE_MAPPING(" \
"TRANSITION_ID INT NOT NULL," \
"PEPTIDE_ID INT NOT NULL);" \
// transition table
"CREATE TABLE TRANSITION(" \
"ID INT PRIMARY KEY NOT NULL," \
"TRAML_ID TEXT NULL," \
"PRODUCT_MZ REAL NOT NULL," \
"CHARGE INT NULL," \
"TYPE CHAR(1) NULL," \
"ANNOTATION TEXT NULL," \
"ORDINAL INT NULL," \
"DETECTING INT NOT NULL," \
"IDENTIFYING INT NOT NULL," \
"QUANTIFYING INT NOT NULL," \
"LIBRARY_INTENSITY REAL NULL," \
"DECOY INT NOT NULL);";
// Execute SQL create statement
conn.executeStatement(create_sql);
// Prepare insert statements
// Index maps
std::vector<std::string> group_vec, peptide_vec, compound_vec, protein_vec;
std::unordered_map<std::string, int > group_map, peptide_map, compound_map, protein_map, gene_map;
std::unordered_map<int,double> precursor_mz_map;
std::unordered_map<int,bool> precursor_decoy_map;
// OpenSWATH: Loop through TargetedExperiment to generate index maps for peptides
peptide_vec.reserve(targeted_exp.getPeptides().size());
group_vec.reserve(targeted_exp.getPeptides().size() + targeted_exp.getCompounds().size());
for (Size i = 0; i < targeted_exp.getPeptides().size(); i++)
{
OpenMS::TargetedExperiment::Peptide peptide = targeted_exp.getPeptides()[i];
std::string peptide_sequence = TargetedExperimentHelper::getAASequence(peptide).toUniModString();
peptide_vec.push_back(peptide_sequence);
group_vec.push_back(peptide.id);
}
// OpenSWATH: Loop through TargetedExperiment to generate index maps for compounds
compound_vec.reserve(targeted_exp.getCompounds().size());
for (Size i = 0; i < targeted_exp.getCompounds().size(); i++)
{
OpenMS::TargetedExperiment::Compound compound = targeted_exp.getCompounds()[i];
compound_vec.push_back(compound.id);
group_vec.push_back(compound.id);
}
boost::erase(compound_vec, boost::unique<boost::return_found_end>(boost::sort(compound_vec)));
int compound_map_idx = 0;
for (auto const & x : compound_vec) { compound_map[x] = compound_map_idx; compound_map_idx++; }
// OpenSWATH: Loop through TargetedExperiment to generate index maps for proteins
for (Size i = 0; i < targeted_exp.getProteins().size(); i++)
{
OpenMS::TargetedExperiment::Protein protein = targeted_exp.getProteins()[i];
protein_vec.push_back(protein.id);
}
// OpenSWATH: Protein set must be unique
boost::erase(protein_vec, boost::unique<boost::return_found_end>(boost::sort(protein_vec)));
int protein_map_idx = 0;
for (auto const & x : protein_vec) { protein_map[x] = protein_map_idx; protein_map_idx++; }
// OpenSWATH: Group set must be unique
boost::erase(group_vec, boost::unique<boost::return_found_end>(boost::sort(group_vec)));
int group_map_idx = 0;
for (auto const & x : group_vec) { group_map[x] = group_map_idx; group_map_idx++; }
// IPF: Loop through all transitions and generate peptidoform data structures
for (Size i = 0; i < targeted_exp.getTransitions().size(); i++)
{
std::vector<String> peptidoforms;
String(targeted_exp.getTransitions()[i].getMetaValue("Peptidoforms")).split('|', peptidoforms);
std::copy( peptidoforms.begin(), peptidoforms.end(),
std::inserter( peptide_vec, peptide_vec.end() ) );
}
// OpenSWATH: Peptide and compound sets must be unique
boost::erase(peptide_vec, boost::unique<boost::return_found_end>(boost::sort(peptide_vec)));
int peptide_map_idx = 0;
for (auto const & x : peptide_vec) { peptide_map[x] = peptide_map_idx; peptide_map_idx++; }
{
std::stringstream insert_transition_sql, insert_transition_peptide_mapping_sql, insert_transition_precursor_mapping_sql;
insert_transition_sql.precision(11);
for (Size i = 0; i < targeted_exp.getTransitions().size(); i++)
{
TransitionPQPFile::TSVTransition transition = convertTransition_(&targeted_exp.getTransitions()[i], targeted_exp);
int group_set_index = group_map[transition.group_id];
if (precursor_mz_map.find(group_set_index) == precursor_mz_map.end())
{
precursor_mz_map[group_set_index] = transition.precursor;
}
if (precursor_decoy_map.find(group_set_index) == precursor_decoy_map.end())
{
if (transition.detecting_transition == 1)
{
precursor_decoy_map[group_set_index] = transition.decoy;
}
}
// IPF: Generate transition-peptide mapping tables (one identification transition can map to multiple peptidoforms)
for (Size j = 0; j < transition.peptidoforms.size(); j++)
{
insert_transition_peptide_mapping_sql << "INSERT INTO TRANSITION_PEPTIDE_MAPPING (TRANSITION_ID, PEPTIDE_ID) VALUES (" <<
i << "," << peptide_map[transition.peptidoforms[j]] << "); ";
}
// OpenSWATH: Associate transitions with their precursors
insert_transition_precursor_mapping_sql << "INSERT INTO TRANSITION_PRECURSOR_MAPPING (TRANSITION_ID, PRECURSOR_ID) VALUES (" <<
i << "," << group_map[transition.group_id] << "); ";
std::string transition_charge = "NULL"; // workaround for compounds with missing charge
if (transition.fragment_charge != "NA")
{
transition_charge = transition.fragment_charge;
}
// OpenSWATH: Insert transition data
insert_transition_sql << "INSERT INTO TRANSITION (ID, TRAML_ID, PRODUCT_MZ, CHARGE, TYPE, ANNOTATION, ORDINAL, " <<
"DETECTING, IDENTIFYING, QUANTIFYING, LIBRARY_INTENSITY, DECOY) VALUES (" << i << ",'" <<
transition.transition_name << "'," <<
transition.product << "," <<
transition_charge << ",'" <<
transition.fragment_type << "','" <<
transition.Annotation <<"'," <<
transition.fragment_nr << "," <<
transition.detecting_transition << "," <<
transition.identifying_transition << "," <<
transition.quantifying_transition << "," <<
transition.library_intensity << "," << transition.decoy << "); ";
if (i % 50000 == 0)
// if (i % 2 == 0) // for testing
{
conn.executeStatement("BEGIN TRANSACTION");
conn.executeStatement(insert_transition_sql.str());
conn.executeStatement(insert_transition_peptide_mapping_sql.str());
conn.executeStatement(insert_transition_precursor_mapping_sql.str());
conn.executeStatement("END TRANSACTION");
insert_transition_sql.str("");
insert_transition_sql.clear();
insert_transition_peptide_mapping_sql.str("");
insert_transition_peptide_mapping_sql.clear();
insert_transition_precursor_mapping_sql.str("");
insert_transition_precursor_mapping_sql.clear();
}
}
conn.executeStatement("BEGIN TRANSACTION");
conn.executeStatement(insert_transition_sql.str());
conn.executeStatement(insert_transition_peptide_mapping_sql.str());
conn.executeStatement(insert_transition_precursor_mapping_sql.str());
conn.executeStatement("END TRANSACTION");
}
std::stringstream insert_precursor_sql, insert_precursor_peptide_mapping, insert_precursor_compound_mapping;
insert_precursor_sql.precision(11);
std::vector<std::pair<int, int> > peptide_protein_map;
std::vector<std::pair<int, int> > peptide_gene_map;
// OpenSWATH: Prepare peptide precursor inserts
for (Size i = 0; i < targeted_exp.getPeptides().size(); i++)
{
OpenMS::TargetedExperiment::Peptide peptide = targeted_exp.getPeptides()[i];
std::string peptide_sequence = TargetedExperimentHelper::getAASequence(peptide).toUniModString();
int group_set_index = group_map[peptide.id];
int peptide_set_index = peptide_map[peptide_sequence];
for (const auto& it : peptide.protein_refs)
{
peptide_protein_map.emplace_back(peptide_set_index, protein_map[it]);
}
String gene_name = "NA";
if (peptide.metaValueExists("GeneName"))
{
gene_name = peptide.getMetaValue("GeneName");
}
if (gene_map.find(gene_name) == gene_map.end()) gene_map[gene_name] = (int)gene_map.size();
peptide_gene_map.emplace_back(peptide_set_index, gene_map[gene_name]);
insert_precursor_sql <<
"INSERT INTO PRECURSOR (ID, TRAML_ID, GROUP_LABEL, PRECURSOR_MZ, CHARGE, LIBRARY_INTENSITY, " <<
"LIBRARY_DRIFT_TIME, LIBRARY_RT, DECOY) VALUES (" <<
group_set_index << ",'" << peptide.id << "','" <<
peptide.getPeptideGroupLabel() << "'," <<
precursor_mz_map[group_set_index] << "," <<
peptide.getChargeState() <<
",NULL," <<
peptide.getDriftTime() << "," <<
peptide.getRetentionTime() << "," <<
precursor_decoy_map[group_set_index] << "); ";
insert_precursor_peptide_mapping << "INSERT INTO PRECURSOR_PEPTIDE_MAPPING (PRECURSOR_ID, PEPTIDE_ID) VALUES (" <<
group_set_index << "," << peptide_set_index << "); ";
}
// OpenSWATH: Prepare compound precursor inserts
for (Size i = 0; i < targeted_exp.getCompounds().size(); i++)
{
OpenMS::TargetedExperiment::Compound compound = targeted_exp.getCompounds()[i];
int group_set_index = group_map[compound.id];
int compound_set_index = compound_map[compound.id];
std::string compound_charge = "NULL"; // workaround for compounds with missing charge
if (compound.hasCharge())
{
compound_charge = String(compound.getChargeState());
}
insert_precursor_sql << "INSERT INTO PRECURSOR (ID, TRAML_ID, GROUP_LABEL, PRECURSOR_MZ, CHARGE, LIBRARY_INTENSITY, " <<
"LIBRARY_DRIFT_TIME, LIBRARY_RT, DECOY) VALUES (" << group_set_index
<< ",'" << compound.id << "',NULL," <<
precursor_mz_map[group_set_index] << "," <<
compound_charge <<
",NULL," <<
compound.getDriftTime() << "," <<
compound.getRetentionTime() << "," <<
precursor_decoy_map[group_set_index] << "); ";
insert_precursor_compound_mapping << "INSERT INTO PRECURSOR_COMPOUND_MAPPING (PRECURSOR_ID, COMPOUND_ID) VALUES (" <<
group_set_index << "," << compound_set_index << "); ";
}
boost::erase(peptide_protein_map, boost::unique<boost::return_found_end>(boost::sort(peptide_protein_map)));
boost::erase(peptide_gene_map, boost::unique<boost::return_found_end>(boost::sort(peptide_gene_map)));
// OpenSWATH: Prepare peptide-gene mapping inserts
std::stringstream insert_peptide_gene_mapping;
for (const auto& it : peptide_gene_map)
{
insert_peptide_gene_mapping << "INSERT INTO PEPTIDE_GENE_MAPPING (PEPTIDE_ID, GENE_ID) VALUES (" <<
it.first << "," << it.second << "); ";
}
// OpenSWATH: Prepare gene inserts
std::stringstream insert_gene_sql;
for (const auto& it : gene_map)
{
insert_gene_sql << "INSERT INTO GENE (ID, GENE_NAME, DECOY) VALUES (" <<
it.second << ",'" << it.first << "'," << 0 << "); ";
}
// OpenSWATH: Prepare peptide-protein mapping inserts
std::stringstream insert_peptide_protein_mapping;
for (const auto& it : peptide_protein_map)
{
insert_peptide_protein_mapping << "INSERT INTO PEPTIDE_PROTEIN_MAPPING (PEPTIDE_ID, PROTEIN_ID) VALUES (" <<
it.first << "," << it.second << "); ";
}
// OpenSWATH: Prepare protein inserts
std::stringstream insert_protein_sql;
for (const auto& it : protein_map)
{
insert_protein_sql << "INSERT INTO PROTEIN (ID, PROTEIN_ACCESSION, DECOY) VALUES (" <<
it.second << ",'" << it.first << "'," << 0 << "); ";
}
// OpenSWATH: Prepare peptide inserts
std::stringstream insert_peptide_sql;
for (const auto& it : peptide_map)
{
insert_peptide_sql << "INSERT INTO PEPTIDE (ID, UNMODIFIED_SEQUENCE, MODIFIED_SEQUENCE, DECOY) VALUES (" <<
it.second << ",'" <<
AASequence::fromString(it.first).toUnmodifiedString() << "','" <<
it.first << "'," << 0 << "); ";
}
// OpenSWATH: Prepare compound inserts
std::stringstream insert_compound_sql;
for (const auto& it : compound_map)
{
String adducts;
String compound_name;
const auto& compound = targeted_exp.getCompoundByRef(it.first);
if (compound.metaValueExists("Adducts"))
{
adducts = compound.getMetaValue("Adducts");
}
if (compound.metaValueExists("CompoundName"))
{
compound_name = compound.getMetaValue("CompoundName");
}
else
{
compound_name = compound.id;
}
insert_compound_sql << "INSERT INTO COMPOUND (ID, COMPOUND_NAME, SUM_FORMULA, SMILES, ADDUCTS, DECOY) VALUES (" <<
it.second << ",'" <<
compound_name << "','" <<
compound.molecular_formula << "','" <<
compound.smiles_string << "','" <<
adducts << "'," <<
0 << "); ";
}
// OpenSWATH: Prepare decoy updates
std::stringstream update_decoys_sql;
// Peptides
update_decoys_sql << "UPDATE PEPTIDE SET DECOY = 1 WHERE ID IN " <<
"(SELECT PEPTIDE.ID FROM PRECURSOR " <<
"JOIN PRECURSOR_PEPTIDE_MAPPING ON PRECURSOR.ID = PRECURSOR_PEPTIDE_MAPPING.PRECURSOR_ID " <<
"JOIN PEPTIDE ON PRECURSOR_PEPTIDE_MAPPING.PEPTIDE_ID = PEPTIDE.ID WHERE PRECURSOR.DECOY = 1); ";
// Compounds
update_decoys_sql << "UPDATE COMPOUND SET DECOY = 1 WHERE ID IN " <<
"(SELECT COMPOUND.ID FROM PRECURSOR " <<
"JOIN PRECURSOR_COMPOUND_MAPPING ON PRECURSOR.ID = PRECURSOR_COMPOUND_MAPPING.PRECURSOR_ID " <<
"JOIN COMPOUND ON PRECURSOR_COMPOUND_MAPPING.COMPOUND_ID = COMPOUND.ID WHERE PRECURSOR.DECOY = 1); ";
// Proteins
update_decoys_sql << "UPDATE PROTEIN SET DECOY = 1 WHERE ID IN " <<
"(SELECT PROTEIN.ID FROM PEPTIDE " <<
"JOIN PEPTIDE_PROTEIN_MAPPING ON PEPTIDE.ID = PEPTIDE_PROTEIN_MAPPING.PEPTIDE_ID " <<
"JOIN PROTEIN ON PEPTIDE_PROTEIN_MAPPING.PROTEIN_ID = PROTEIN.ID WHERE PEPTIDE.DECOY = 1); ";
// Genes
update_decoys_sql << "UPDATE GENE SET DECOY = 1 WHERE ID IN " <<
"(SELECT GENE.ID FROM PEPTIDE " <<
"JOIN PEPTIDE_GENE_MAPPING ON PEPTIDE.ID = PEPTIDE_GENE_MAPPING.PEPTIDE_ID " <<
"JOIN GENE ON PEPTIDE_GENE_MAPPING.GENE_ID = GENE.ID WHERE PEPTIDE.DECOY = 1); ";
conn.executeStatement("BEGIN TRANSACTION");
// Execute SQL insert statement
String insert_version = "INSERT INTO VERSION (ID) VALUES (3);";
conn.executeStatement(insert_version);
conn.executeStatement(insert_protein_sql.str());
conn.executeStatement(insert_peptide_protein_mapping.str());
conn.executeStatement(insert_gene_sql.str());
conn.executeStatement(insert_peptide_gene_mapping.str());
conn.executeStatement(insert_peptide_sql.str());
conn.executeStatement(insert_compound_sql.str());
conn.executeStatement(insert_precursor_peptide_mapping.str());
conn.executeStatement(insert_precursor_compound_mapping.str());
conn.executeStatement(insert_precursor_sql.str());
conn.executeStatement(update_decoys_sql.str());
conn.executeStatement("END TRANSACTION");
}
// public methods
std::unordered_map<std::string, std::string> TransitionPQPFile::getPQPIDToTraMLIDMap(const char* filename, std::string tableName)
{
sqlite3 *db;
sqlite3_stmt * cntstmt;
sqlite3_stmt * stmt;
std::string select_sql;
std::unordered_map<std::string, std::string> out;
// Open database
SqliteConnector conn(filename);
db = conn.getDB();
// Count Precursors
SqliteConnector::prepareStatement(db, &cntstmt, "SELECT COUNT(*) FROM " + tableName + ";");
sqlite3_step( cntstmt );
sqlite3_finalize(cntstmt);
std::string query = "SELECT ID, TRAML_ID FROM " + tableName + ";";
// Execute SQL select statement
SqliteConnector::prepareStatement(db, &stmt, query);
sqlite3_step(stmt);
while (sqlite3_column_type(stmt, 0) != SQLITE_NULL)
{
std::string traml_id, prec_id;
Sql::extractValue<std::string>(&prec_id, stmt, 0);
Sql::extractValue<std::string>(&traml_id, stmt, 1);
out[traml_id] = prec_id;
sqlite3_step( stmt );
}
return out;
}
void TransitionPQPFile::convertTargetedExperimentToPQP(const char* filename, OpenMS::TargetedExperiment& targeted_exp)
{
if (targeted_exp.containsInvalidReferences())
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Your input file contains invalid references, cannot process file.");
}
writePQPOutput_(filename, targeted_exp);
}
void TransitionPQPFile::convertPQPToTargetedExperiment(const char* filename,
OpenMS::TargetedExperiment& targeted_exp,
bool legacy_traml_id)
{
std::vector<TSVTransition> transition_list;
readPQPInput_(filename, transition_list, legacy_traml_id);
TSVToTargetedExperiment_(transition_list, targeted_exp);
}
void TransitionPQPFile::convertPQPToTargetedExperiment(const char* filename,
OpenSwath::LightTargetedExperiment& targeted_exp,
bool legacy_traml_id)
{
// Use streaming parser for memory efficiency (~5x reduction in peak memory)
streamPQPToLightTargetedExperiment_(filename, targeted_exp, legacy_traml_id);
}
void TransitionPQPFile::convertLightTargetedExperimentToPQP(const char* filename, const OpenSwath::LightTargetedExperiment& targeted_exp)
{
// delete file if present
remove(filename);
// Open database
SqliteConnector conn(filename);
// Create SQL structure (same as heavy version)
const char* create_sql =
"CREATE TABLE VERSION(" \
"ID INT NOT NULL);" \
// gene table
"CREATE TABLE GENE(" \
"ID INT PRIMARY KEY NOT NULL," \
"GENE_NAME TEXT NOT NULL," \
"DECOY INT NOT NULL);" \
// peptide_gene_mapping table
"CREATE TABLE PEPTIDE_GENE_MAPPING(" \
"PEPTIDE_ID INT NOT NULL," \
"GENE_ID INT NOT NULL);" \
// protein table
"CREATE TABLE PROTEIN(" \
"ID INT PRIMARY KEY NOT NULL," \
"PROTEIN_ACCESSION TEXT NOT NULL," \
"DECOY INT NOT NULL);" \
// peptide_protein_mapping table
"CREATE TABLE PEPTIDE_PROTEIN_MAPPING(" \
"PEPTIDE_ID INT NOT NULL," \
"PROTEIN_ID INT NOT NULL);" \
// peptide table
"CREATE TABLE PEPTIDE(" \
"ID INT PRIMARY KEY NOT NULL," \
"UNMODIFIED_SEQUENCE TEXT NOT NULL," \
"MODIFIED_SEQUENCE TEXT NOT NULL," \
"DECOY INT NOT NULL);" \
// precursor_peptide_mapping table
"CREATE TABLE PRECURSOR_PEPTIDE_MAPPING(" \
"PRECURSOR_ID INT NOT NULL," \
"PEPTIDE_ID INT NOT NULL);" \
// compound table
"CREATE TABLE COMPOUND(" \
"ID INT PRIMARY KEY NOT NULL," \
"COMPOUND_NAME TEXT NOT NULL," \
"SUM_FORMULA TEXT NOT NULL," \
"SMILES TEXT NOT NULL," \
"ADDUCTS TEXT NOT NULL," \
"DECOY INT NOT NULL);" \
// precursor_compound_mapping table
"CREATE TABLE PRECURSOR_COMPOUND_MAPPING(" \
"PRECURSOR_ID INT NOT NULL," \
"COMPOUND_ID INT NOT NULL);" \
// precursor table
"CREATE TABLE PRECURSOR(" \
"ID INT PRIMARY KEY NOT NULL," \
"TRAML_ID TEXT NULL," \
"GROUP_LABEL TEXT NULL," \
"PRECURSOR_MZ REAL NOT NULL," \
"CHARGE INT NULL," \
"LIBRARY_INTENSITY REAL NULL," \
"LIBRARY_RT REAL NULL," \
"LIBRARY_DRIFT_TIME REAL NULL," \
"DECOY INT NOT NULL);" \
// transition_precursor_mapping table
"CREATE TABLE TRANSITION_PRECURSOR_MAPPING(" \
"TRANSITION_ID INT NOT NULL," \
"PRECURSOR_ID INT NOT NULL);" \
// transition_peptide_mapping table
"CREATE TABLE TRANSITION_PEPTIDE_MAPPING(" \
"TRANSITION_ID INT NOT NULL," \
"PEPTIDE_ID INT NOT NULL);" \
// transition table
"CREATE TABLE TRANSITION(" \
"ID INT PRIMARY KEY NOT NULL," \
"TRAML_ID TEXT NULL," \
"PRODUCT_MZ REAL NOT NULL," \
"CHARGE INT NULL," \
"TYPE CHAR(1) NULL," \
"ANNOTATION TEXT NULL," \
"ORDINAL INT NULL," \
"DETECTING INT NOT NULL," \
"IDENTIFYING INT NOT NULL," \
"QUANTIFYING INT NOT NULL," \
"LIBRARY_INTENSITY REAL NULL," \
"DECOY INT NOT NULL);";
// Execute SQL create statement
conn.executeStatement(create_sql);
// Build index maps
std::vector<std::string> group_vec, peptide_vec, compound_vec, protein_vec;
std::unordered_map<std::string, int> group_map, peptide_map, compound_map, protein_map, gene_map;
std::unordered_map<int, double> precursor_mz_map;
std::unordered_map<int, bool> precursor_decoy_map;
// Loop through compounds to generate index maps
for (const auto& compound : targeted_exp.compounds)
{
group_vec.push_back(compound.id);
if (compound.isPeptide())
{
peptide_vec.push_back(compound.sequence);
}
else
{
compound_vec.push_back(compound.id);
}
}
// Loop through proteins
for (const auto& protein : targeted_exp.proteins)
{
protein_vec.push_back(protein.id);
}
// Loop through transitions and add peptidoforms
for (const auto& tr : targeted_exp.transitions)
{
for (const auto& peptidoform : tr.peptidoforms)
{
peptide_vec.push_back(peptidoform);
}
}
// Create unique sorted sets and maps
boost::erase(compound_vec, boost::unique<boost::return_found_end>(boost::sort(compound_vec)));
int compound_map_idx = 0;
for (const auto& x : compound_vec) { compound_map[x] = compound_map_idx++; }
boost::erase(protein_vec, boost::unique<boost::return_found_end>(boost::sort(protein_vec)));
int protein_map_idx = 0;
for (const auto& x : protein_vec) { protein_map[x] = protein_map_idx++; }
boost::erase(group_vec, boost::unique<boost::return_found_end>(boost::sort(group_vec)));
int group_map_idx = 0;
for (const auto& x : group_vec) { group_map[x] = group_map_idx++; }
boost::erase(peptide_vec, boost::unique<boost::return_found_end>(boost::sort(peptide_vec)));
int peptide_map_idx = 0;
for (const auto& x : peptide_vec) { peptide_map[x] = peptide_map_idx++; }
// Build compound lookup
std::map<std::string, const OpenSwath::LightCompound*> compound_lookup;
for (const auto& compound : targeted_exp.compounds)
{
compound_lookup[compound.id] = &compound;
}
// Insert transitions
{
std::stringstream insert_transition_sql, insert_transition_peptide_mapping_sql, insert_transition_precursor_mapping_sql;
insert_transition_sql.precision(11);
for (Size i = 0; i < targeted_exp.transitions.size(); i++)
{
const auto& tr = targeted_exp.transitions[i];
int group_set_index = group_map[tr.peptide_ref];
if (precursor_mz_map.find(group_set_index) == precursor_mz_map.end())
{
precursor_mz_map[group_set_index] = tr.precursor_mz;
}
if (precursor_decoy_map.find(group_set_index) == precursor_decoy_map.end())
{
if (tr.isDetectingTransition())
{
precursor_decoy_map[group_set_index] = tr.getDecoy();
}
}
// IPF: Generate transition-peptide mapping tables
for (const auto& peptidoform : tr.peptidoforms)
{
insert_transition_peptide_mapping_sql << "INSERT INTO TRANSITION_PEPTIDE_MAPPING (TRANSITION_ID, PEPTIDE_ID) VALUES (" <<
i << "," << peptide_map[peptidoform] << "); ";
}
// Associate transitions with their precursors
insert_transition_precursor_mapping_sql << "INSERT INTO TRANSITION_PRECURSOR_MAPPING (TRANSITION_ID, PRECURSOR_ID) VALUES (" <<
i << "," << group_set_index << "); ";
std::string transition_charge = "NULL";
if (tr.fragment_charge != 0)
{
transition_charge = String(static_cast<int>(tr.fragment_charge));
}
std::string fragment_type_str = tr.getFragmentType();
std::string fragment_type_char = fragment_type_str.empty() ? "" : fragment_type_str.substr(0, 1);
// Insert transition data
insert_transition_sql << "INSERT INTO TRANSITION (ID, TRAML_ID, PRODUCT_MZ, CHARGE, TYPE, ANNOTATION, ORDINAL, " <<
"DETECTING, IDENTIFYING, QUANTIFYING, LIBRARY_INTENSITY, DECOY) VALUES (" << i << ",'" <<
tr.transition_name << "'," <<
tr.product_mz << "," <<
transition_charge << ",'" <<
fragment_type_char << "','" <<
tr.getAnnotation() << "'," <<
tr.fragment_nr << "," <<
tr.isDetectingTransition() << "," <<
tr.isIdentifyingTransition() << "," <<
tr.isQuantifyingTransition() << "," <<
tr.library_intensity << "," << tr.getDecoy() << "); ";
if (i % 50000 == 0 && i > 0)
{
conn.executeStatement("BEGIN TRANSACTION");
conn.executeStatement(insert_transition_sql.str());
conn.executeStatement(insert_transition_peptide_mapping_sql.str());
conn.executeStatement(insert_transition_precursor_mapping_sql.str());
conn.executeStatement("END TRANSACTION");
insert_transition_sql.str("");
insert_transition_sql.clear();
insert_transition_peptide_mapping_sql.str("");
insert_transition_peptide_mapping_sql.clear();
insert_transition_precursor_mapping_sql.str("");
insert_transition_precursor_mapping_sql.clear();
}
}
conn.executeStatement("BEGIN TRANSACTION");
conn.executeStatement(insert_transition_sql.str());
conn.executeStatement(insert_transition_peptide_mapping_sql.str());
conn.executeStatement(insert_transition_precursor_mapping_sql.str());
conn.executeStatement("END TRANSACTION");
}
std::stringstream insert_precursor_sql, insert_precursor_peptide_mapping, insert_precursor_compound_mapping;
insert_precursor_sql.precision(11);
std::vector<std::pair<int, int>> peptide_protein_map_vec;
std::vector<std::pair<int, int>> peptide_gene_map_vec;
// Insert precursors (compounds)
for (const auto& compound : targeted_exp.compounds)
{
int group_set_index = group_map[compound.id];
if (compound.isPeptide())
{
int peptide_set_index = peptide_map[compound.sequence];
for (const auto& prot_ref : compound.protein_refs)
{
if (protein_map.find(prot_ref) != protein_map.end())
{
peptide_protein_map_vec.emplace_back(peptide_set_index, protein_map[prot_ref]);
}
}
std::string gene_name = compound.gene_name.empty() ? "NA" : compound.gene_name;
if (gene_map.find(gene_name) == gene_map.end()) gene_map[gene_name] = (int)gene_map.size();
peptide_gene_map_vec.emplace_back(peptide_set_index, gene_map[gene_name]);
insert_precursor_sql <<
"INSERT INTO PRECURSOR (ID, TRAML_ID, GROUP_LABEL, PRECURSOR_MZ, CHARGE, LIBRARY_INTENSITY, " <<
"LIBRARY_DRIFT_TIME, LIBRARY_RT, DECOY) VALUES (" <<
group_set_index << ",'" << compound.id << "','" <<
compound.peptide_group_label << "'," <<
precursor_mz_map[group_set_index] << "," <<
compound.charge <<
",NULL," <<
compound.drift_time << "," <<
compound.rt << "," <<
precursor_decoy_map[group_set_index] << "); ";
insert_precursor_peptide_mapping << "INSERT INTO PRECURSOR_PEPTIDE_MAPPING (PRECURSOR_ID, PEPTIDE_ID) VALUES (" <<
group_set_index << "," << peptide_set_index << "); ";
}
else
{
int compound_set_index = compound_map[compound.id];
std::string compound_charge = "NULL";
if (compound.charge != 0)
{
compound_charge = String(compound.charge);
}
insert_precursor_sql << "INSERT INTO PRECURSOR (ID, TRAML_ID, GROUP_LABEL, PRECURSOR_MZ, CHARGE, LIBRARY_INTENSITY, " <<
"LIBRARY_DRIFT_TIME, LIBRARY_RT, DECOY) VALUES (" << group_set_index
<< ",'" << compound.id << "',NULL," <<
precursor_mz_map[group_set_index] << "," <<
compound_charge <<
",NULL," <<
compound.drift_time << "," <<
compound.rt << "," <<
precursor_decoy_map[group_set_index] << "); ";
insert_precursor_compound_mapping << "INSERT INTO PRECURSOR_COMPOUND_MAPPING (PRECURSOR_ID, COMPOUND_ID) VALUES (" <<
group_set_index << "," << compound_set_index << "); ";
}
}
boost::erase(peptide_protein_map_vec, boost::unique<boost::return_found_end>(boost::sort(peptide_protein_map_vec)));
boost::erase(peptide_gene_map_vec, boost::unique<boost::return_found_end>(boost::sort(peptide_gene_map_vec)));
// Prepare peptide-gene mapping inserts
std::stringstream insert_peptide_gene_mapping;
for (const auto& it : peptide_gene_map_vec)
{
insert_peptide_gene_mapping << "INSERT INTO PEPTIDE_GENE_MAPPING (PEPTIDE_ID, GENE_ID) VALUES (" <<
it.first << "," << it.second << "); ";
}
// Prepare gene inserts
std::stringstream insert_gene_sql;
for (const auto& it : gene_map)
{
insert_gene_sql << "INSERT INTO GENE (ID, GENE_NAME, DECOY) VALUES (" <<
it.second << ",'" << it.first << "'," << 0 << "); ";
}
// Prepare peptide-protein mapping inserts
std::stringstream insert_peptide_protein_mapping;
for (const auto& it : peptide_protein_map_vec)
{
insert_peptide_protein_mapping << "INSERT INTO PEPTIDE_PROTEIN_MAPPING (PEPTIDE_ID, PROTEIN_ID) VALUES (" <<
it.first << "," << it.second << "); ";
}
// Prepare protein inserts
std::stringstream insert_protein_sql;
for (const auto& it : protein_map)
{
insert_protein_sql << "INSERT INTO PROTEIN (ID, PROTEIN_ACCESSION, DECOY) VALUES (" <<
it.second << ",'" << it.first << "'," << 0 << "); ";
}
// Prepare peptide inserts
std::stringstream insert_peptide_sql;
for (const auto& it : peptide_map)
{
std::string unmodified_seq;
try
{
unmodified_seq = AASequence::fromString(it.first).toUnmodifiedString();
}
catch (Exception::InvalidValue&)
{
unmodified_seq = it.first;
}
insert_peptide_sql << "INSERT INTO PEPTIDE (ID, UNMODIFIED_SEQUENCE, MODIFIED_SEQUENCE, DECOY) VALUES (" <<
it.second << ",'" <<
unmodified_seq << "','" <<
it.first << "'," << 0 << "); ";
}
// Prepare compound inserts
std::stringstream insert_compound_sql;
for (const auto& it : compound_map)
{
auto comp_it = compound_lookup.find(it.first);
std::string compound_name = it.first;
std::string sum_formula;
std::string smiles;
std::string adducts;
if (comp_it != compound_lookup.end())
{
compound_name = comp_it->second->compound_name.empty() ? it.first : comp_it->second->compound_name;
sum_formula = comp_it->second->sum_formula;
smiles = comp_it->second->smiles;
adducts = comp_it->second->adducts;
}
insert_compound_sql << "INSERT INTO COMPOUND (ID, COMPOUND_NAME, SUM_FORMULA, SMILES, ADDUCTS, DECOY) VALUES (" <<
it.second << ",'" <<
compound_name << "','" <<
sum_formula << "','" <<
smiles << "','" <<
adducts << "'," <<
0 << "); ";
}
// Prepare decoy updates
std::stringstream update_decoys_sql;
update_decoys_sql << "UPDATE PEPTIDE SET DECOY = 1 WHERE ID IN " <<
"(SELECT PEPTIDE.ID FROM PRECURSOR " <<
"JOIN PRECURSOR_PEPTIDE_MAPPING ON PRECURSOR.ID = PRECURSOR_PEPTIDE_MAPPING.PRECURSOR_ID " <<
"JOIN PEPTIDE ON PRECURSOR_PEPTIDE_MAPPING.PEPTIDE_ID = PEPTIDE.ID WHERE PRECURSOR.DECOY = 1); ";
update_decoys_sql << "UPDATE COMPOUND SET DECOY = 1 WHERE ID IN " <<
"(SELECT COMPOUND.ID FROM PRECURSOR " <<
"JOIN PRECURSOR_COMPOUND_MAPPING ON PRECURSOR.ID = PRECURSOR_COMPOUND_MAPPING.PRECURSOR_ID " <<
"JOIN COMPOUND ON PRECURSOR_COMPOUND_MAPPING.COMPOUND_ID = COMPOUND.ID WHERE PRECURSOR.DECOY = 1); ";
update_decoys_sql << "UPDATE PROTEIN SET DECOY = 1 WHERE ID IN " <<
"(SELECT PROTEIN.ID FROM PEPTIDE " <<
"JOIN PEPTIDE_PROTEIN_MAPPING ON PEPTIDE.ID = PEPTIDE_PROTEIN_MAPPING.PEPTIDE_ID " <<
"JOIN PROTEIN ON PEPTIDE_PROTEIN_MAPPING.PROTEIN_ID = PROTEIN.ID WHERE PEPTIDE.DECOY = 1); ";
update_decoys_sql << "UPDATE GENE SET DECOY = 1 WHERE ID IN " <<
"(SELECT GENE.ID FROM PEPTIDE " <<
"JOIN PEPTIDE_GENE_MAPPING ON PEPTIDE.ID = PEPTIDE_GENE_MAPPING.PEPTIDE_ID " <<
"JOIN GENE ON PEPTIDE_GENE_MAPPING.GENE_ID = GENE.ID WHERE PEPTIDE.DECOY = 1); ";
conn.executeStatement("BEGIN TRANSACTION");
// Execute SQL insert statement
String insert_version = "INSERT INTO VERSION (ID) VALUES (3);";
conn.executeStatement(insert_version);
conn.executeStatement(insert_protein_sql.str());
conn.executeStatement(insert_peptide_protein_mapping.str());
conn.executeStatement(insert_gene_sql.str());
conn.executeStatement(insert_peptide_gene_mapping.str());
conn.executeStatement(insert_peptide_sql.str());
conn.executeStatement(insert_compound_sql.str());
conn.executeStatement(insert_precursor_peptide_mapping.str());
conn.executeStatement(insert_precursor_compound_mapping.str());
conn.executeStatement(insert_precursor_sql.str());
conn.executeStatement(update_decoys_sql.str());
conn.executeStatement("END TRANSACTION");
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/MRMFeatureFinderScoring.cpp | .cpp | 62,826 | 1,202 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/MRMFeatureFinderScoring.h>
// data access
#include <OpenMS/ANALYSIS/OPENSWATH/DATAACCESS/DataAccessHelper.h>
#include <OpenMS/ANALYSIS/OPENSWATH/DATAACCESS/SimpleOpenMSSpectraAccessFactory.h>
#include <OpenMS/ANALYSIS/OPENSWATH/DATAACCESS/MRMFeatureAccessOpenMS.h>
// peak picking & noise estimation
#include <OpenMS/ANALYSIS/OPENSWATH/MRMScoring.h>
#include <OpenMS/ANALYSIS/OPENSWATH/MRMTransitionGroupPicker.h>
// Helpers
#include <OpenMS/ANALYSIS/OPENSWATH/OpenSwathHelper.h>
#include <OpenMS/CHEMISTRY/ProteaseDigestion.h>
#include <boost/range/adaptor/map.hpp>
#include <memory>
#include <boost/foreach.hpp>
#define run_identifier "unique_run_identifier"
bool SortDoubleDoublePairFirst(const std::pair<double, double>& left, const std::pair<double, double>& right)
{
return left.first < right.first;
}
void processFeatureForOutput(OpenMS::Feature& curr_feature, bool write_convex_hull_, double
quantification_cutoff_, double& total_intensity, double& total_peak_apices, const std::string& ms_level)
{
// Save some space when writing out the featureXML
if (!write_convex_hull_)
{
curr_feature.getConvexHulls().clear();
}
// Ensure a unique id is present
curr_feature.ensureUniqueId();
// Sum up intensities of the features
if (curr_feature.getMZ() > quantification_cutoff_)
{
total_intensity += curr_feature.getIntensity();
total_peak_apices += (double)curr_feature.getMetaValue("peak_apex_int");
}
curr_feature.setMetaValue("FeatureLevel", ms_level);
}
namespace OpenMS
{
MRMFeatureFinderScoring::MRMFeatureFinderScoring() :
DefaultParamHandler("MRMFeatureFinderScoring"),
ProgressLogger()
{
defaults_.setValue("stop_report_after_feature", -1, "Stop reporting after feature (ordered by quality; -1 means do not stop).");
defaults_.setValue("rt_extraction_window", -1.0, "Only extract RT around this value (-1 means extract over the whole range, a value of 500 means to extract around +/- 500 s of the expected elution). For this to work, the TraML input file needs to contain normalized RT values.");
defaults_.setValue("rt_normalization_factor", 1.0, "The normalized RT is expected to be between 0 and 1. If your normalized RT has a different range, pass this here (e.g. it goes from 0 to 100, set this value to 100)");
defaults_.setValue("quantification_cutoff", 0.0, "Cutoff in m/z below which peaks should not be used for quantification any more", {"advanced"});
defaults_.setMinFloat("quantification_cutoff", 0.0);
defaults_.setValue("write_convex_hull", "false", "Whether to write out all points of all features into the featureXML", {"advanced"});
defaults_.setValidStrings("write_convex_hull", {"true","false"});
defaults_.setValue("spectrum_addition_method", "simple", "For spectrum addition, either use simple concatenation or use peak resampling", {"advanced"});
defaults_.setValidStrings("spectrum_addition_method", {"simple", "resample"});
defaults_.setValue("spectrum_merge_method_type", "fixed", "For spectrum addition, either use a fixed number of spectra or dynamically select the number of spectra to add around the peak apex based on the merge_spectra_by_peak_width_fraction.", {"advanced"});
defaults_.setValidStrings("spectrum_merge_method_type", {"fixed", "dynamic"});
defaults_.setValue("add_up_spectra", 1, "Add up spectra on the left and right around the retention time peak apex.", {"advanced"});
defaults_.setMinInt("add_up_spectra", 1);
defaults_.setValue("spacing_for_spectra_resampling", 0.005, "If spectra are to be added, use this spacing to add them up", {"advanced"});
defaults_.setMinFloat("spacing_for_spectra_resampling", 0.0);
defaults_.setValue("merge_spectra_by_peak_width_fraction", 0.15, "If spectra are to be added based on the peak width of peak, construct number of spectra to be added based on N percent of number of points of peak width.", {"advanced"});
defaults_.setMinFloat("merge_spectra_by_peak_width_fraction", 0.0001);
defaults_.setMaxFloat("merge_spectra_by_peak_width_fraction", 1.0);
defaults_.setValue("uis_threshold_sn", -1, "S/N threshold to consider identification transition (set to -1 to consider all)");
defaults_.setValue("uis_threshold_peak_area", 0, "Peak area threshold to consider identification transition (set to -1 to consider all)");
defaults_.setValue("scoring_model", "default", "Scoring model to use", {"advanced"});
defaults_.setValidStrings("scoring_model", {"default","single_transition"});
defaults_.setValue("im_extra_drift", 0.0, "Extra drift time to extract for IM scoring (as a fraction, e.g. 0.25 means 25% extra on each side)", {"advanced"});
defaults_.setMinFloat("im_extra_drift", 0.0);
defaults_.setValue("strict", "true", "Whether to error (true) or skip (false) if a transition in a transition group does not have a corresponding chromatogram.", {"advanced"});
defaults_.setValidStrings("strict", {"true","false"});
defaults_.setValue("use_ms1_ion_mobility", "true", "Performs ion mobility extraction in MS1. Set to false if MS1 spectra do not contain ion mobility", {"advanced"});
defaults_.setValue("apply_im_peak_picking", "false", "Perform peak picking on the extracted ion mobilograms. This is useful for reducing intefering signals from co-eluting analytes in the ion mobility dimension. The peak picking will take the highest peak and discard the remaining peaks for ion mobility scoring. ", {"advanced"});
defaults_.setValidStrings("apply_im_peak_picking", {"true","false"});
defaults_.insert("TransitionGroupPicker:", MRMTransitionGroupPicker().getDefaults());
defaults_.insert("DIAScoring:", DIAScoring().getDefaults());
defaults_.insert("EMGScoring:", EmgScoring().getDefaults());
// One can turn on / off each score individually
Param scores_to_use;
scores_to_use.setValue("use_shape_score", "true", "Use the shape score (this score measures the similarity in shape of the transitions using a cross-correlation)", {"advanced"});
scores_to_use.setValidStrings("use_shape_score", {"true","false"});
scores_to_use.setValue("use_coelution_score", "true", "Use the coelution score (this score measures the similarity in coelution of the transitions using a cross-correlation)", {"advanced"});
scores_to_use.setValidStrings("use_coelution_score", {"true","false"});
scores_to_use.setValue("use_rt_score", "true", "Use the retention time score (this score measure the difference in retention time)", {"advanced"});
scores_to_use.setValidStrings("use_rt_score", {"true","false"});
scores_to_use.setValue("use_library_score", "true", "Use the library score", {"advanced"});
scores_to_use.setValidStrings("use_library_score", {"true","false"});
scores_to_use.setValue("use_elution_model_score", "true", "Use the elution model (EMG) score (this score fits a gaussian model to the peak and checks the fit)", {"advanced"});
scores_to_use.setValidStrings("use_elution_model_score", {"true","false"});
scores_to_use.setValue("use_intensity_score", "true", "Use the intensity score", {"advanced"});
scores_to_use.setValidStrings("use_intensity_score", {"true","false"});
scores_to_use.setValue("use_nr_peaks_score", "true", "Use the number of peaks score", {"advanced"});
scores_to_use.setValidStrings("use_nr_peaks_score", {"true","false"});
scores_to_use.setValue("use_total_xic_score", "true", "Use the total XIC score", {"advanced"});
scores_to_use.setValidStrings("use_total_xic_score", {"true","false"});
scores_to_use.setValue("use_total_mi_score", "false", "Use the total MI score", {"advanced"});
scores_to_use.setValidStrings("use_total_mi_score", {"true","false"});
scores_to_use.setValue("use_sn_score", "true", "Use the SN (signal to noise) score", {"advanced"});
scores_to_use.setValidStrings("use_sn_score", {"true","false"});
scores_to_use.setValue("use_mi_score", "false", "Use the MI (mutual information) score", {"advanced"});
scores_to_use.setValidStrings("use_mi_score", {"true","false"});
scores_to_use.setValue("use_dia_scores", "true", "Use the DIA (SWATH) scores. If turned off, will not use fragment ion spectra for scoring.", {"advanced"});
scores_to_use.setValidStrings("use_dia_scores", {"true","false"});
scores_to_use.setValue("use_ms1_correlation", "false", "Use the correlation scores with the MS1 elution profiles", {"advanced"});
scores_to_use.setValidStrings("use_ms1_correlation", {"true","false"});
scores_to_use.setValue("use_ion_mobility_scores", "false", "Use the scores for Ion Mobility scans", {"advanced"});
scores_to_use.setValidStrings("use_ion_mobility_scores", {"true","false"});
scores_to_use.setValue("use_ms1_fullscan", "false", "Use the full MS1 scan at the peak apex for scoring (ppm accuracy of precursor and isotopic pattern)", {"advanced"});
scores_to_use.setValidStrings("use_ms1_fullscan", {"true","false"});
scores_to_use.setValue("use_ms1_mi", "false", "Use the MS1 MI score", {"advanced"});
scores_to_use.setValidStrings("use_ms1_mi", {"true","false"});
scores_to_use.setValue("use_uis_scores", "false", "Use UIS scores for peptidoform identification", {"advanced"});
scores_to_use.setValidStrings("use_uis_scores", {"true","false"});
scores_to_use.setValue("use_peak_shape_metrics", "false", "Use peak shape metrics for scoring", {"advanced"});
scores_to_use.setValue("use_ionseries_scores", "true", "Use MS2-level b/y ion-series scores for peptidoform identification", {"advanced"});
scores_to_use.setValidStrings("use_ionseries_scores", {"true","false"});
scores_to_use.setValue("use_ms2_isotope_scores", "true", "Use MS2-level isotope scores (pearson & manhattan) across product transitions (based on ID if annotated or averagine)", {"advanced"});
scores_to_use.setValidStrings("use_ms2_isotope_scores", {"true","false"});
defaults_.insert("Scores:", scores_to_use);
// write defaults into Param object param_
defaultsToParam_();
}
MRMFeatureFinderScoring::~MRMFeatureFinderScoring() = default;
void MRMFeatureFinderScoring::pickExperiment(const PeakMap& chromatograms,
FeatureMap& output,
const TargetedExperiment& transition_exp_,
const TransformationDescription& trafo,
const PeakMap& swath_map)
{
OpenSwath::LightTargetedExperiment transition_exp;
OpenSwathDataAccessHelper::convertTargetedExp(transition_exp_, transition_exp);
TransitionGroupMapType transition_group_map;
std::shared_ptr<PeakMap > sh_chromatograms = std::make_shared<PeakMap >(chromatograms);
std::shared_ptr<PeakMap > sh_swath_map = std::make_shared<PeakMap >(swath_map);
OpenSwath::SpectrumAccessPtr chromatogram_ptr = SimpleOpenMSSpectraFactory::getSpectrumAccessOpenMSPtr(sh_chromatograms);
OpenSwath::SpectrumAccessPtr swath_ptr = SimpleOpenMSSpectraFactory::getSpectrumAccessOpenMSPtr(sh_swath_map);
OpenSwath::SwathMap m;
m.sptr = swath_ptr;
std::vector<OpenSwath::SwathMap> swath_ptrs;
swath_ptrs.push_back(m);
pickExperiment(chromatogram_ptr, output, transition_exp, trafo, swath_ptrs, transition_group_map);
}
void MRMFeatureFinderScoring::pickExperiment(const OpenSwath::SpectrumAccessPtr& input,
FeatureMap& output,
const OpenSwath::LightTargetedExperiment& transition_exp,
const TransformationDescription& trafo,
const std::vector<OpenSwath::SwathMap>& swath_maps,
TransitionGroupMapType& transition_group_map)
{
//
// Step 1
//
// Store the peptide retention times in an intermediate map
prepareProteinPeptideMaps_(transition_exp);
// Store the proteins from the input in the output feature map
std::vector<ProteinHit> protein_hits;
for (const ProteinType& prot : transition_exp.getProteins())
{
ProteinHit prot_hit = ProteinHit();
prot_hit.setSequence(prot.sequence);
prot_hit.setAccession(prot.id);
protein_hits.push_back(prot_hit);
}
ProteinIdentification prot_id = ProteinIdentification();
prot_id.setHits(protein_hits);
prot_id.setIdentifier(run_identifier);
output.getProteinIdentifications().push_back(prot_id);
//
// Step 2
//
// Create all MRM transition groups from the individual transitions.
mapExperimentToTransitionList(input, transition_exp, transition_group_map, trafo, rt_extraction_window_);
int counter = 0;
for (const auto& trgroup : transition_group_map)
{
if (!trgroup.second.getChromatograms().empty()) {counter++; }
}
OPENMS_LOG_INFO << "Will analyse " << counter << " peptides with a total of " << transition_exp.getTransitions().size() << " transitions " << std::endl;
//
// Step 3
//
// Go through all transition groups: first create consensus features, then score them
MRMTransitionGroupPicker trgroup_picker;
Param trgroup_picker_param = param_.copy("TransitionGroupPicker:", true);
// If use_total_mi_score is defined, we need to instruct MRMTransitionGroupPicker to compute the score
if (su_.use_total_mi_score_)
{
trgroup_picker_param.setValue("compute_total_mi", "true");
}
trgroup_picker.setParameters(trgroup_picker_param);
Size progress = 0;
startProgress(0, transition_group_map.size(), "picking peaks");
for (TransitionGroupMapType::iterator trgroup_it = transition_group_map.begin(); trgroup_it != transition_group_map.end(); ++trgroup_it)
{
setProgress(++progress);
MRMTransitionGroupType& transition_group = trgroup_it->second;
if (transition_group.getChromatograms().empty() || transition_group.getTransitions().empty())
{
continue;
}
trgroup_picker.pickTransitionGroup(transition_group);
scorePeakgroups(trgroup_it->second, trafo, swath_maps, output);
}
endProgress();
//output.sortByPosition(); // if the exact same order is needed
return;
}
void MRMFeatureFinderScoring::prepareProteinPeptideMaps_(const OpenSwath::LightTargetedExperiment& transition_exp)
{
PeptideRefMap_.reserve(transition_exp.getCompounds().size());
for (Size i = 0; i < transition_exp.getCompounds().size(); i++)
{
PeptideRefMap_[transition_exp.getCompounds()[i].id] = &transition_exp.getCompounds()[i];
}
}
void MRMFeatureFinderScoring::splitTransitionGroupsDetection_(const MRMTransitionGroupType& transition_group,
MRMTransitionGroupType& transition_group_detection) const
{
std::vector<TransitionType> tr = transition_group.getTransitions();
std::vector<std::string> detecting_transitions;
for (std::vector<TransitionType>::const_iterator tr_it = tr.begin(); tr_it != tr.end(); ++tr_it)
{
if (tr_it->isDetectingTransition())
{
detecting_transitions.push_back(tr_it->getNativeID());
}
}
if (detecting_transitions.size() == transition_group.getTransitions().size())
{
transition_group_detection = transition_group;
}
else
{
transition_group_detection = transition_group.subset(detecting_transitions);
}
}
void MRMFeatureFinderScoring::splitTransitionGroupsIdentification_(const MRMTransitionGroupType& transition_group,
MRMTransitionGroupType& transition_group_identification,
MRMTransitionGroupType& transition_group_identification_decoy) const
{
std::vector<TransitionType> tr = transition_group.getTransitions();
std::vector<std::string> identifying_transitions, identifying_transitions_decoy;
for (std::vector<TransitionType>::iterator tr_it = tr.begin(); tr_it != tr.end(); ++tr_it)
{
if (tr_it->isIdentifyingTransition())
{
if (tr_it->getDecoy())
{
identifying_transitions_decoy.push_back(tr_it->getNativeID());
}
else
{
identifying_transitions.push_back(tr_it->getNativeID());
}
}
}
transition_group_identification = transition_group.subsetDependent(identifying_transitions);
transition_group_identification_decoy = transition_group.subsetDependent(identifying_transitions_decoy);
}
OpenSwath_Ind_Scores MRMFeatureFinderScoring::scoreIdentification_(MRMTransitionGroupType& trgr_ident,
MRMTransitionGroupType& trgr_detect,
OpenSwathScoring& scorer,
const size_t feature_idx,
const std::vector<std::string>& native_ids_detection,
const double det_intensity_ratio_score,
const double det_mi_ratio_score,
const std::vector<OpenSwath::SwathMap>& swath_maps,
const double drift_target,
RangeMobility& im_range) const
{
MRMFeature idmrmfeature = trgr_ident.getFeaturesMuteable()[feature_idx];
OpenSwath::IMRMFeature* idimrmfeature;
idimrmfeature = new MRMFeatureOpenMS(idmrmfeature);
std::vector<std::string> native_ids_identification;
std::vector<OpenSwath::ISignalToNoisePtr> signal_noise_estimators_identification;
for (Size i = 0; i < trgr_ident.size(); i++)
{
OpenSwath::ISignalToNoisePtr snptr(new OpenMS::SignalToNoiseOpenMS< MSChromatogram >(
trgr_ident.getChromatogram(trgr_ident.getTransitions()[i].getNativeID()),
sn_win_len_, sn_bin_count_, write_log_messages_));
if ( (snptr->getValueAtRT(idmrmfeature.getRT()) > uis_threshold_sn_)
&& (idmrmfeature.getFeature(trgr_ident.getTransitions()[i].getNativeID()).getIntensity() > uis_threshold_peak_area_))
{
signal_noise_estimators_identification.push_back(snptr);
native_ids_identification.push_back(trgr_ident.getTransitions()[i].getNativeID());
}
}
OpenSwath_Ind_Scores idscores;
if (!native_ids_identification.empty())
{
scorer.calculateChromatographicIdScores(idimrmfeature,
native_ids_identification,
native_ids_detection,
signal_noise_estimators_identification,
idscores);
std::vector<double> ind_mi_score;
if (su_.use_mi_score_)
{
ind_mi_score = idscores.ind_mi_score;
}
for (size_t i = 0; i < native_ids_identification.size(); i++)
{
idscores.ind_transition_names.emplace_back(native_ids_identification[i]);
if (idmrmfeature.getFeature(native_ids_identification[i]).getIntensity() > 0)
{
double intensity_score = double(idmrmfeature.getFeature(native_ids_identification[i]).getIntensity()) / double(idmrmfeature.getFeature(native_ids_identification[i]).getMetaValue("total_xic"));
double intensity_ratio = 0;
if (det_intensity_ratio_score > 0) { intensity_ratio = intensity_score / det_intensity_ratio_score; }
if (intensity_ratio > 1) { intensity_ratio = 1 / intensity_ratio; }
double total_mi = 0;
if (su_.use_total_mi_score_)
{
total_mi = double(idmrmfeature.getFeature(native_ids_identification[i]).getMetaValue("total_mi"));
}
double mi_ratio = 0;
if (su_.use_mi_score_ && su_.use_total_mi_score_)
{
if (det_mi_ratio_score > 0) { mi_ratio = (ind_mi_score[i] / total_mi) / det_mi_ratio_score; }
if (mi_ratio > 1) { mi_ratio = 1 / mi_ratio; }
}
idscores.ind_area_intensity.push_back(idmrmfeature.getFeature(native_ids_identification[i]).getIntensity());
idscores.ind_total_area_intensity.push_back(idmrmfeature.getFeature(native_ids_identification[i]).getMetaValue("total_xic"));
idscores.ind_intensity_score.push_back(intensity_score);
idscores.ind_apex_intensity.push_back(idmrmfeature.getFeature(native_ids_identification[i]).getMetaValue("peak_apex_int"));
idscores.ind_apex_position.push_back(idmrmfeature.getFeature(native_ids_identification[i]).getMetaValue("peak_apex_position"));
idscores.ind_fwhm.push_back(idmrmfeature.getFeature(native_ids_identification[i]).getMetaValue("width_at_50"));
idscores.ind_total_mi .push_back(total_mi);
idscores.ind_log_intensity.push_back(std::log(idmrmfeature.getFeature(native_ids_identification[i]).getIntensity()));
idscores.ind_intensity_ratio.push_back(intensity_ratio);
idscores.ind_mi_ratio.push_back(mi_ratio);
if (su_.use_peak_shape_metrics)
{
idscores.ind_start_position_at_5.push_back(idmrmfeature.getFeature(native_ids_identification[i]).getMetaValue("start_position_at_5"));
idscores.ind_end_position_at_5.push_back(idmrmfeature.getFeature(native_ids_identification[i]).getMetaValue("end_position_at_5"));
idscores.ind_start_position_at_10.push_back(idmrmfeature.getFeature(native_ids_identification[i]).getMetaValue("start_position_at_10"));
idscores.ind_end_position_at_10.push_back(idmrmfeature.getFeature(native_ids_identification[i]).getMetaValue("end_position_at_10"));
idscores.ind_start_position_at_50.push_back(idmrmfeature.getFeature(native_ids_identification[i]).getMetaValue("start_position_at_50"));
idscores.ind_end_position_at_50.push_back(idmrmfeature.getFeature(native_ids_identification[i]).getMetaValue("end_position_at_50"));
idscores.ind_total_width.push_back(idmrmfeature.getFeature(native_ids_identification[i]).getMetaValue("total_width"));
idscores.ind_tailing_factor.push_back(idmrmfeature.getFeature(native_ids_identification[i]).getMetaValue("tailing_factor"));
idscores.ind_asymmetry_factor.push_back(idmrmfeature.getFeature(native_ids_identification[i]).getMetaValue("asymmetry_factor"));
idscores.ind_slope_of_baseline.push_back(idmrmfeature.getFeature(native_ids_identification[i]).getMetaValue("slope_of_baseline"));
idscores.ind_baseline_delta_2_height.push_back(idmrmfeature.getFeature(native_ids_identification[i]).getMetaValue("baseline_delta_2_height"));
idscores.ind_points_across_baseline.push_back(idmrmfeature.getFeature(native_ids_identification[i]).getMetaValue("points_across_baseline"));
idscores.ind_points_across_half_height.push_back(idmrmfeature.getFeature(native_ids_identification[i]).getMetaValue("points_across_half_height"));
}
}
else
{
idscores.ind_area_intensity.push_back(0);
idscores.ind_total_area_intensity.push_back(0);
idscores.ind_intensity_score.push_back(0);
idscores.ind_apex_intensity.push_back(0);
idscores.ind_apex_position.push_back(0);
idscores.ind_fwhm.push_back(0);
idscores.ind_total_mi.push_back(0);
idscores.ind_log_intensity.push_back(0);
idscores.ind_intensity_ratio.push_back(0);
idscores.ind_mi_ratio.push_back(0);
if (su_.use_peak_shape_metrics)
{
idscores.ind_start_position_at_5.push_back(0);
idscores.ind_end_position_at_5.push_back(0);
idscores.ind_start_position_at_10.push_back(0);
idscores.ind_end_position_at_10.push_back(0);
idscores.ind_start_position_at_50.push_back(0);
idscores.ind_end_position_at_50.push_back(0);
idscores.ind_total_width.push_back(0);
idscores.ind_tailing_factor.push_back(0);
idscores.ind_asymmetry_factor.push_back(0);
idscores.ind_slope_of_baseline.push_back(0);
idscores.ind_baseline_delta_2_height.push_back(0);
idscores.ind_points_across_baseline.push_back(0);
idscores.ind_points_across_half_height.push_back(0);
}
}
idscores.ind_num_transitions = native_ids_identification.size();
}
}
// Compute DIA scores only on the identification transitions
bool swath_present = (!swath_maps.empty() && swath_maps[0].sptr->getNrSpectra() > 0);
if (swath_present && su_.use_dia_scores_ && !native_ids_identification.empty())
{
std::vector<double> ind_isotope_correlation, ind_isotope_overlap, ind_massdev_score, ind_im_drift, ind_im_drift_left, ind_im_drift_right, ind_im_delta, ind_im_delta_score, ind_im_log_intensity;
std::vector<double> ind_im_contrast_coelution, ind_im_contrast_shape, ind_im_sum_contrast_coelution, ind_im_sum_contrast_shape;
for (size_t i = 0; i < native_ids_identification.size(); i++)
{
OpenSwath_Scores tmp_scores;
scorer.calculateDIAIdScores(idimrmfeature,
trgr_ident.getTransition(native_ids_identification[i]),
trgr_detect,
swath_maps, im_range, diascoring_, tmp_scores, drift_target);
ind_isotope_correlation.push_back(tmp_scores.isotope_correlation);
ind_isotope_overlap.push_back(tmp_scores.isotope_overlap);
ind_massdev_score.push_back(tmp_scores.massdev_score);
// Ion mobility scores
ind_im_drift.push_back(tmp_scores.im_drift);
ind_im_drift_left.push_back(tmp_scores.im_drift_left);
ind_im_drift_right.push_back(tmp_scores.im_drift_right);
ind_im_delta.push_back(tmp_scores.im_delta);
ind_im_delta_score.push_back(tmp_scores.im_delta_score);
ind_im_log_intensity.push_back(tmp_scores.im_log_intensity);
ind_im_contrast_coelution.push_back(tmp_scores.im_ind_contrast_coelution);
ind_im_contrast_shape.push_back(tmp_scores.im_ind_contrast_shape);
ind_im_sum_contrast_coelution.push_back(tmp_scores.im_ind_sum_contrast_coelution);
ind_im_sum_contrast_shape.push_back(tmp_scores.im_ind_sum_contrast_shape);
}
idscores.ind_isotope_correlation = ind_isotope_correlation;
idscores.ind_isotope_overlap = ind_isotope_overlap;
idscores.ind_massdev_score = ind_massdev_score;
idscores.ind_im_drift = ind_im_drift;
idscores.ind_im_drift_left = ind_im_drift_left;
idscores.ind_im_drift_right = ind_im_drift_right;
idscores.ind_im_delta = ind_im_delta;
idscores.ind_im_delta_score = ind_im_delta_score;
idscores.ind_im_log_intensity = ind_im_log_intensity;
idscores.ind_im_contrast_coelution = ind_im_contrast_coelution;
idscores.ind_im_contrast_shape = ind_im_contrast_shape;
idscores.ind_im_sum_contrast_coelution = ind_im_sum_contrast_coelution;
idscores.ind_im_sum_contrast_shape = ind_im_sum_contrast_shape;
}
delete idimrmfeature;
return idscores;
}
void MRMFeatureFinderScoring::scorePeakgroups(MRMTransitionGroupType& transition_group,
const TransformationDescription& trafo,
const std::vector<OpenSwath::SwathMap>& swath_maps,
FeatureMap& output,
bool ms1only) const
{
if (PeptideRefMap_.empty())
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Error: Peptide reference map is empty, please call prepareProteinPeptideMaps_ first.");
}
if (transition_group.getTransitionGroupID().empty())
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Error: Transition group id is empty, please set it.");
}
MRMTransitionGroupType transition_group_detection, transition_group_identification, transition_group_identification_decoy;
splitTransitionGroupsDetection_(transition_group, transition_group_detection);
if (su_.use_uis_scores)
{
splitTransitionGroupsIdentification_(transition_group, transition_group_identification, transition_group_identification_decoy);
}
std::vector<OpenSwath::ISignalToNoisePtr> signal_noise_estimators;
std::vector<MRMFeature> feature_list;
// get drift time upper/lower offset (this assumes that all chromatograms
// are derived from the same precursor with the same drift time)
RangeMobility im_range;
double drift_target(0);
auto setDriftTarget = [](auto& prec){
double lower_bound = prec.getDriftTime() - prec.getDriftTimeWindowLowerOffset();
double upper_bound = prec.getDriftTime() + prec.getDriftTimeWindowUpperOffset();
return RangeMobility(lower_bound, upper_bound);
};
if ( !transition_group_detection.getChromatograms().empty() )
{
auto & prec = transition_group_detection.getChromatograms()[0].getPrecursor();
drift_target = prec.getDriftTime();
if (drift_target > 0) im_range = setDriftTarget(prec);
}
else if ( !transition_group_detection.getPrecursorChromatograms().empty() )
{
auto & prec = transition_group_detection.getPrecursorChromatograms()[0].getPrecursor();
drift_target = prec.getDriftTime();
if (drift_target > 0) im_range = setDriftTarget(prec);
}
// currently we cannot do much about the log messages and they mostly occur in decoy transition signals
for (Size k = 0; k < transition_group_detection.getChromatograms().size(); k++)
{
OpenSwath::ISignalToNoisePtr snptr(new OpenMS::SignalToNoiseOpenMS< MSChromatogram >(
transition_group_detection.getChromatograms()[k], sn_win_len_, sn_bin_count_, write_log_messages_));
signal_noise_estimators.push_back(snptr);
}
// skip MS1 noise estimator if we perform fragment ion analysis
std::vector<OpenSwath::ISignalToNoisePtr> ms1_signal_noise_estimators;
if (ms1only)
{
for (Size k = 0; k < transition_group_detection.getPrecursorChromatograms().size(); k++)
{
OpenSwath::ISignalToNoisePtr snptr(new OpenMS::SignalToNoiseOpenMS< MSChromatogram >(
transition_group_detection.getPrecursorChromatograms()[k], sn_win_len_, sn_bin_count_, write_log_messages_));
ms1_signal_noise_estimators.push_back(snptr);
}
}
// get the expected rt value for this compound
const PeptideType* pep = PeptideRefMap_.at(transition_group_detection.getTransitionGroupID());
double expected_rt = pep->rt;
TransformationDescription newtr = trafo;
newtr.invert();
expected_rt = newtr.apply(expected_rt);
OpenSwathScoring scorer;
scorer.initialize(rt_normalization_factor_, add_up_spectra_,
spacing_for_spectra_resampling_,
merge_spectra_by_peak_width_fraction_,
im_extra_drift_,
su_,
spectrum_addition_method_,
spectrum_merge_method_type_,
use_ms1_ion_mobility_,
apply_im_peak_picking_);
ProteaseDigestion pd;
pd.setEnzyme("Trypsin");
auto& mrmfeatures = transition_group_detection.getFeaturesMuteable();
// Go through all peak groups (found MRM features) and score them
#ifdef _OPENMP
int in_parallel = omp_in_parallel();
#endif
#pragma omp parallel for if (in_parallel == 0)
for (SignedSize feature_idx = 0; feature_idx < (SignedSize) mrmfeatures.size(); ++feature_idx)
{
auto& mrmfeature = mrmfeatures[feature_idx];
OpenSwath::IMRMFeature* imrmfeature;
imrmfeature = new MRMFeatureOpenMS(mrmfeature);
OPENMS_LOG_DEBUG << "Scoring feature " << (mrmfeature) << " == " << mrmfeature.getMetaValue("PeptideRef") <<
" [ expected RT " << PeptideRefMap_.at(mrmfeature.getMetaValue("PeptideRef"))->rt << " / " << expected_rt << " ]" <<
" with " << transition_group_detection.size() << " transitions and " <<
transition_group_detection.getChromatograms().size() << " chromatograms" << std::endl;
int group_size = boost::numeric_cast<int>(transition_group_detection.size());
if (group_size == 0 && !ms1only)
{
delete imrmfeature; // free resources before continuing
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Error: Transition group " + transition_group_detection.getTransitionGroupID() +
" has no chromatograms.");
}
bool swath_present = (!swath_maps.empty() && swath_maps[0].sptr->getNrSpectra() > 0);
double xx_lda_prescore;
double precursor_mz(-1);
if (ms1only)
{
///////////////////////////////////
// Call the scoring for MS1 only
///////////////////////////////////
OpenSwath_Scores& scores = mrmfeature.getScores();
precursor_mz = mrmfeature.getMZ();
// S/N scores
OpenSwath::MRMScoring mrmscore_;
scores.sn_ratio = mrmscore_.calcSNScore(imrmfeature, ms1_signal_noise_estimators);
// everything below S/N 1 can be set to zero (and the log safely applied)
if (scores.sn_ratio < 1)
{
scores.log_sn_score = 0;
}
else
{
scores.log_sn_score = std::log(scores.sn_ratio);
}
if (su_.use_sn_score_)
{
mrmfeature.addScore("sn_ratio", scores.sn_ratio);
mrmfeature.addScore("var_log_sn_score", scores.log_sn_score);
// compute subfeature log-SN values
for (Size k = 0; k < transition_group_detection.getPrecursorChromatograms().size(); k++)
{
Feature & f = mrmfeature.getPrecursorFeature(transition_group_detection.getPrecursorChromatograms()[k].getNativeID());
double sn_value = ms1_signal_noise_estimators[k]->getValueAtRT(mrmfeature.getRT());
if (sn_value < 1) {sn_value = 1.0;}
f.setMetaValue("logSN", std::log(sn_value));
}
}
// RT scores
double normalized_experimental_rt = trafo.apply(imrmfeature->getRT());
{
// rt score is delta iRT
double rt_score = mrmscore_.calcRTScore(*pep, normalized_experimental_rt);
scores.normalized_experimental_rt = normalized_experimental_rt;
scores.raw_rt_score = rt_score;
scores.norm_rt_score = rt_score / rt_normalization_factor_;
}
if (su_.use_rt_score_)
{
mrmfeature.addScore("delta_rt", mrmfeature.getRT() - expected_rt);
mrmfeature.addScore("assay_rt", expected_rt);
mrmfeature.addScore("norm_RT", scores.normalized_experimental_rt);
mrmfeature.addScore("rt_score", scores.raw_rt_score);
mrmfeature.addScore("var_norm_rt_score", scores.norm_rt_score);
}
// full spectra scores
if (ms1_map_ && ms1_map_->getNrSpectra() > 0 && mrmfeature.getMZ() > 0)
{
scorer.calculatePrecursorDIAScores(ms1_map_, diascoring_, precursor_mz, imrmfeature->getRT(), *pep, im_range, scores);
}
if (su_.use_ms1_fullscan)
{
mrmfeature.addScore("var_ms1_ppm_diff", scores.ms1_ppm_score);
mrmfeature.addScore("var_ms1_isotope_correlation", scores.ms1_isotope_correlation);
mrmfeature.addScore("var_ms1_isotope_overlap", scores.ms1_isotope_overlap);
}
xx_lda_prescore = -scores.calculate_lda_prescore(scores);
if (scoring_model_ == "single_transition")
{
xx_lda_prescore = -scores.calculate_lda_single_transition(scores);
}
mrmfeature.addScore("main_var_xx_lda_prelim_score", xx_lda_prescore);
mrmfeature.addScore("xx_lda_prelim_score", xx_lda_prescore);
mrmfeature.setOverallQuality(xx_lda_prescore);
}
else //!ms1only
{
///////////////////////////////////
// Call the scoring for fragment ions
///////////////////////////////////
std::vector<double> normalized_library_intensity;
transition_group_detection.getLibraryIntensity(normalized_library_intensity);
OpenSwath::Scoring::normalize_sum(&normalized_library_intensity[0], boost::numeric_cast<int>(normalized_library_intensity.size()));
std::vector<std::string> native_ids_detection;
for (Size i = 0; i < transition_group_detection.size(); i++)
{
std::string native_id = transition_group_detection.getTransitions()[i].getNativeID();
native_ids_detection.push_back(native_id);
}
std::vector<std::string> precursor_ids;
for (Size i = 0; i < transition_group_detection.getPrecursorChromatograms().size(); i++)
{
std::string precursor_id = transition_group_detection.getPrecursorChromatograms()[i].getNativeID();
precursor_ids.push_back(precursor_id);
}
///////////////////////////////////
// Library and chromatographic scores
OpenSwath_Scores& scores = mrmfeature.getScores();
scorer.calculateChromatographicScores(imrmfeature, native_ids_detection, precursor_ids, normalized_library_intensity,
signal_noise_estimators, scores);
double normalized_experimental_rt = trafo.apply(imrmfeature->getRT());
scorer.calculateLibraryScores(imrmfeature, transition_group_detection.getTransitions(), *pep, normalized_experimental_rt, scores);
///////////////////////////////////
// DIA scores
if (swath_present && su_.use_dia_scores_)
{
std::vector<double> masserror_ppm;
scorer.calculateDIAScores(imrmfeature,
transition_group_detection.getTransitions(),
swath_maps, ms1_map_, diascoring_, *pep, scores, masserror_ppm,
drift_target, im_range);
mrmfeature.setMetaValue("masserror_ppm", masserror_ppm);
}
double det_intensity_ratio_score = 0;
if ((double)mrmfeature.getMetaValue("total_xic") > 0)
{
det_intensity_ratio_score = mrmfeature.getIntensity() / (double)mrmfeature.getMetaValue("total_xic");
}
///////////////////////////////////
// Mutual Information scores
double det_mi_ratio_score = 0;
if (su_.use_mi_score_ && su_.use_total_mi_score_)
{
if ((double)mrmfeature.getMetaValue("total_mi") > 0)
{
det_mi_ratio_score = scores.mi_score / (double)mrmfeature.getMetaValue("total_mi");
}
}
///////////////////////////////////
// Unique Ion Signature (UIS) scores
if (su_.use_uis_scores && !transition_group_identification.getTransitions().empty())
{
OpenSwath_Ind_Scores idscores = scoreIdentification_(transition_group_identification, transition_group_detection, scorer, feature_idx,
native_ids_detection, det_intensity_ratio_score,
det_mi_ratio_score, swath_maps,drift_target, im_range);
mrmfeature.IDScoresAsMetaValue(false, idscores);
}
if (su_.use_uis_scores && !transition_group_identification_decoy.getTransitions().empty())
{
OpenSwath_Ind_Scores idscores = scoreIdentification_(transition_group_identification_decoy, transition_group_detection, scorer, feature_idx,
native_ids_detection, det_intensity_ratio_score,
det_mi_ratio_score, swath_maps, drift_target, im_range);
mrmfeature.IDScoresAsMetaValue(true, idscores);
}
if (su_.use_coelution_score_)
{
mrmfeature.addScore("var_xcorr_coelution", scores.xcorr_coelution_score);
mrmfeature.addScore("var_xcorr_coelution_weighted", scores.weighted_coelution_score);
}
if (su_.use_shape_score_)
{
mrmfeature.addScore("var_xcorr_shape", scores.xcorr_shape_score);
mrmfeature.addScore("var_xcorr_shape_weighted", scores.weighted_xcorr_shape);
}
if (su_.use_library_score_)
{
mrmfeature.addScore("var_library_corr", scores.library_corr);
mrmfeature.addScore("var_library_rmsd", scores.library_norm_manhattan);
mrmfeature.addScore("var_library_sangle", scores.library_sangle);
mrmfeature.addScore("var_library_rootmeansquare", scores.library_rootmeansquare);
mrmfeature.addScore("var_library_manhattan", scores.library_manhattan);
mrmfeature.addScore("var_library_dotprod", scores.library_dotprod);
}
if (su_.use_rt_score_)
{
mrmfeature.addScore("delta_rt", mrmfeature.getRT() - expected_rt);
mrmfeature.addScore("assay_rt", expected_rt);
mrmfeature.addScore("norm_RT", scores.normalized_experimental_rt);
mrmfeature.addScore("rt_score", scores.raw_rt_score);
mrmfeature.addScore("var_norm_rt_score", scores.norm_rt_score);
}
// TODO do we really want these intensity scores ?
if (su_.use_intensity_score_)
{
if ((double)mrmfeature.getMetaValue("total_xic") > 0)
{
mrmfeature.addScore("var_intensity_score", mrmfeature.getIntensity() / (double)mrmfeature.getMetaValue("total_xic"));
}
else
{
mrmfeature.addScore("var_intensity_score", 0);
}
}
if (su_.use_total_xic_score_) { mrmfeature.addScore("total_xic", (double)mrmfeature.getMetaValue("total_xic")); }
if (su_.use_total_mi_score_) { mrmfeature.addScore("total_mi", (double)mrmfeature.getMetaValue("total_mi")); }
if (su_.use_nr_peaks_score_) { mrmfeature.addScore("nr_peaks", scores.nr_peaks); }
if (su_.use_sn_score_)
{
mrmfeature.addScore("sn_ratio", scores.sn_ratio);
mrmfeature.addScore("var_log_sn_score", scores.log_sn_score);
// compute subfeature log-SN values
for (Size k = 0; k < transition_group_detection.getChromatograms().size(); k++)
{
Feature & f = mrmfeature.getFeature(transition_group_detection.getChromatograms()[k].getNativeID());
double sn_value = signal_noise_estimators[k]->getValueAtRT(mrmfeature.getRT());
if (sn_value < 1) {sn_value = 1.0;}
f.setMetaValue("logSN", std::log(sn_value));
}
}
if (su_.use_mi_score_)
{
mrmfeature.addScore("var_mi_score", scores.mi_score);
mrmfeature.addScore("var_mi_weighted_score", scores.weighted_mi_score);
if (su_.use_total_mi_score_)
{
if (((double)mrmfeature.getMetaValue("total_mi")) > 0)
{
mrmfeature.addScore("var_mi_ratio_score", scores.mi_score / (double)mrmfeature.getMetaValue("total_mi"));
}
else
{
mrmfeature.addScore("var_mi_ratio_score", 0);
}
}
}
// TODO get it working with imrmfeature
if (su_.use_elution_model_score_)
{
//TODO wouldn't a weighted elution model score be much better? lower intensity traces usually will not have
// a nice profile
scores.elution_model_fit_score = emgscoring_.calcElutionFitScore(mrmfeature, transition_group_detection);
mrmfeature.addScore("var_elution_model_fit_score", scores.elution_model_fit_score);
}
xx_lda_prescore = -scores.calculate_lda_prescore(scores);
if (scoring_model_ == "single_transition")
{
xx_lda_prescore = -scores.calculate_lda_single_transition(scores);
}
if (!swath_present)
{
mrmfeature.addScore("main_var_xx_lda_prelim_score", xx_lda_prescore);
}
mrmfeature.setOverallQuality(xx_lda_prescore);
mrmfeature.addScore("xx_lda_prelim_score", xx_lda_prescore);
// Add the DIA scores and ion mobility scores
if (swath_present && su_.use_dia_scores_)
{
if (su_.use_ms2_isotope_scores)
{
mrmfeature.addScore("var_isotope_correlation_score", scores.isotope_correlation);
mrmfeature.addScore("var_isotope_overlap_score", scores.isotope_overlap);
}
mrmfeature.addScore("var_massdev_score", scores.massdev_score);
mrmfeature.addScore("var_massdev_score_weighted", scores.weighted_massdev_score);
if (su_.use_ionseries_scores)
{
mrmfeature.addScore("var_bseries_score", scores.bseries_score);
mrmfeature.addScore("var_yseries_score", scores.yseries_score);
}
if (su_.use_ms2_isotope_scores)
{
mrmfeature.addScore("var_dotprod_score", scores.dotprod_score_dia);
mrmfeature.addScore("var_manhatt_score", scores.manhatt_score_dia);
}
if (su_.use_ms1_correlation)
{
if (scores.ms1_xcorr_shape_score > -1)
{
mrmfeature.addScore("var_ms1_xcorr_shape", scores.ms1_xcorr_shape_score);
}
if (scores.ms1_xcorr_coelution_score > -1)
{
mrmfeature.addScore("var_ms1_xcorr_coelution", scores.ms1_xcorr_coelution_score);
}
mrmfeature.addScore("var_ms1_xcorr_shape_contrast", scores.ms1_xcorr_shape_contrast_score);
mrmfeature.addScore("var_ms1_xcorr_shape_combined", scores.ms1_xcorr_shape_combined_score);
mrmfeature.addScore("var_ms1_xcorr_coelution_contrast", scores.ms1_xcorr_coelution_contrast_score);
mrmfeature.addScore("var_ms1_xcorr_coelution_combined", scores.ms1_xcorr_coelution_combined_score);
}
if (su_.use_ms1_mi)
{
if (scores.ms1_mi_score > -1)
{
mrmfeature.addScore("var_ms1_mi_score", scores.ms1_mi_score);
}
mrmfeature.addScore("var_ms1_mi_contrast_score", scores.ms1_mi_contrast_score);
mrmfeature.addScore("var_ms1_mi_combined_score", scores.ms1_mi_combined_score);
}
if (su_.use_ms1_fullscan)
{
mrmfeature.addScore("var_ms1_ppm_diff", scores.ms1_ppm_score);
mrmfeature.addScore("var_ms1_isotope_correlation", scores.ms1_isotope_correlation);
mrmfeature.addScore("var_ms1_isotope_overlap", scores.ms1_isotope_overlap);
}
double xx_swath_prescore = -scores.calculate_swath_lda_prescore(scores);
mrmfeature.addScore("main_var_xx_swath_prelim_score", xx_swath_prescore);
mrmfeature.setOverallQuality(xx_swath_prescore);
}
if (swath_present && su_.use_im_scores)
{
mrmfeature.addScore("var_im_xcorr_shape", scores.im_xcorr_shape_score);
mrmfeature.addScore("var_im_xcorr_coelution", scores.im_xcorr_coelution_score);
mrmfeature.addScore("var_im_delta_score", scores.im_delta_score);
mrmfeature.addScore("var_im_ms1_delta_score", scores.im_ms1_delta_score);
mrmfeature.addScore("im_drift", scores.im_drift); // MS2 level
mrmfeature.addScore("im_drift_left", scores.im_drift_left); // MS2 level
mrmfeature.addScore("im_drift_right", scores.im_drift_right); // MS2 level
mrmfeature.addScore("im_drift_weighted", scores.im_drift_weighted); // MS2 level
mrmfeature.addScore("im_log_intensity", scores.im_log_intensity); // MS2 level
mrmfeature.addScore("im_ms1_drift", scores.im_ms1_drift); // MS1 level
mrmfeature.addScore("im_ms1_delta", scores.im_ms1_delta); // MS1 level
mrmfeature.addScore("im_delta", scores.im_delta); // MS2 level
}
precursor_mz = transition_group_detection.getTransitions()[0].getPrecursorMZ();
}
///////////////////////////////////////////////////////////////////////////
// add the peptide hit information to the feature
///////////////////////////////////////////////////////////////////////////
PeptideIdentification pep_id_;
PeptideHit pep_hit_;
if (pep->getChargeState() != 0)
{
pep_hit_.setCharge(pep->getChargeState());
}
pep_hit_.setScore(xx_lda_prescore);
if (swath_present && mrmfeature.metaValueExists("xx_swath_prelim_score"))
{
pep_hit_.setScore(mrmfeature.getMetaValue("xx_swath_prelim_score"));
}
if (pep->isPeptide() && !pep->sequence.empty())
{
pep_hit_.setSequence(AASequence::fromString(pep->sequence));
mrmfeature.setMetaValue("missedCleavages", pd.peptideCount(pep_hit_.getSequence()) - 1);
}
// set protein accession numbers
for (Size k = 0; k < pep->protein_refs.size(); k++)
{
PeptideEvidence pe;
pe.setProteinAccession(pep->protein_refs[k]);
pep_hit_.addPeptideEvidence(pe);
}
pep_id_.insertHit(pep_hit_);
pep_id_.setIdentifier(run_identifier);
mrmfeature.getPeptideIdentifications().push_back(pep_id_);
mrmfeature.ensureUniqueId();
mrmfeature.setMetaValue("PrecursorMZ", precursor_mz);
prepareFeatureOutput_(mrmfeature, ms1only, pep->getChargeState());
mrmfeature.setMetaValue("xx_swath_prelim_score", 0.0);
#pragma omp critical
feature_list.push_back(mrmfeature);
delete imrmfeature;
}
// Order by quality (high to low, via reverse iterator)
std::sort(feature_list.rbegin(), feature_list.rend(), OpenMS::Feature::OverallQualityLess());
for (Size i = 0; i < feature_list.size(); i++)
{
if (stop_report_after_feature_ >= 0 && i >= (Size)stop_report_after_feature_) {break;}
output.push_back(feature_list[i]);
}
// store all data manipulation performed on the features of the transition group
transition_group = transition_group_detection;
}
void MRMFeatureFinderScoring::prepareFeatureOutput_(OpenMS::MRMFeature& mrmfeature, bool ms1only, int charge) const
{
// Prepare the subordinates for the mrmfeature (process all current
// features and then append all precursor subordinate features)
std::vector<Feature> allFeatures = mrmfeature.getFeatures();
double total_intensity = 0, total_peak_apices = 0;
double ms1_total_intensity = 0, ms1_total_peak_apices = 0;
for (std::vector<Feature>::iterator f_it = allFeatures.begin(); f_it != allFeatures.end(); ++f_it)
{
processFeatureForOutput(*f_it, write_convex_hull_, quantification_cutoff_, total_intensity, total_peak_apices, "MS2");
}
// Also append data for MS1 precursors
std::vector<String> precursors_ids;
mrmfeature.getPrecursorFeatureIDs(precursors_ids);
for (std::vector<String>::iterator id_it = precursors_ids.begin(); id_it != precursors_ids.end(); ++id_it)
{
Feature curr_feature = mrmfeature.getPrecursorFeature(*id_it);
if (charge != 0)
{
curr_feature.setCharge(charge);
}
processFeatureForOutput(curr_feature, write_convex_hull_, quantification_cutoff_, ms1_total_intensity, ms1_total_peak_apices, "MS1");
if (ms1only)
{
total_intensity += curr_feature.getIntensity();
total_peak_apices += (double)curr_feature.getMetaValue("peak_apex_int");
}
allFeatures.push_back(curr_feature);
}
mrmfeature.setSubordinates(allFeatures); // add all the subfeatures as subordinates
// overwrite the reported intensities with those above the m/z cutoff
mrmfeature.setIntensity(total_intensity);
mrmfeature.setMetaValue("peak_apices_sum", total_peak_apices);
mrmfeature.setMetaValue("ms1_area_intensity", ms1_total_intensity);
mrmfeature.setMetaValue("ms1_apex_intensity", ms1_total_peak_apices);
}
void MRMFeatureFinderScoring::updateMembers_()
{
stop_report_after_feature_ = (int)param_.getValue("stop_report_after_feature");
rt_extraction_window_ = (double)param_.getValue("rt_extraction_window");
rt_normalization_factor_ = (double)param_.getValue("rt_normalization_factor");
quantification_cutoff_ = (double)param_.getValue("quantification_cutoff");
write_convex_hull_ = param_.getValue("write_convex_hull").toBool();
add_up_spectra_ = param_.getValue("add_up_spectra");
spectrum_addition_method_ = param_.getValue("spectrum_addition_method").toString();
spectrum_merge_method_type_ = param_.getValue("spectrum_merge_method_type").toString();
spacing_for_spectra_resampling_ = param_.getValue("spacing_for_spectra_resampling");
merge_spectra_by_peak_width_fraction_ = param_.getValue("merge_spectra_by_peak_width_fraction");
im_extra_drift_ = (double)param_.getValue("im_extra_drift");
uis_threshold_sn_ = param_.getValue("uis_threshold_sn");
uis_threshold_peak_area_ = param_.getValue("uis_threshold_peak_area");
scoring_model_ = param_.getValue("scoring_model").toString();
sn_win_len_ = (double)param_.getValue("TransitionGroupPicker:PeakPickerChromatogram:sn_win_len");
sn_bin_count_ = (unsigned int)param_.getValue("TransitionGroupPicker:PeakPickerChromatogram:sn_bin_count");
write_log_messages_ = (bool)param_.getValue("TransitionGroupPicker:PeakPickerChromatogram:write_sn_log_messages").toBool();
diascoring_.setParameters(param_.copy("DIAScoring:", true));
emgscoring_.setFitterParam(param_.copy("EMGScoring:", true));
strict_ = (bool)param_.getValue("strict").toBool();
use_ms1_ion_mobility_ = (bool)param_.getValue("use_ms1_ion_mobility").toBool();
apply_im_peak_picking_ = (bool)param_.getValue("apply_im_peak_picking").toBool();
su_.use_coelution_score_ = param_.getValue("Scores:use_coelution_score").toBool();
su_.use_shape_score_ = param_.getValue("Scores:use_shape_score").toBool();
su_.use_rt_score_ = param_.getValue("Scores:use_rt_score").toBool();
su_.use_library_score_ = param_.getValue("Scores:use_library_score").toBool();
su_.use_elution_model_score_ = param_.getValue("Scores:use_elution_model_score").toBool();
su_.use_intensity_score_ = param_.getValue("Scores:use_intensity_score").toBool();
su_.use_total_xic_score_ = param_.getValue("Scores:use_total_xic_score").toBool();
su_.use_total_mi_score_ = param_.getValue("Scores:use_total_mi_score").toBool();
su_.use_nr_peaks_score_ = param_.getValue("Scores:use_nr_peaks_score").toBool();
su_.use_sn_score_ = param_.getValue("Scores:use_sn_score").toBool();
su_.use_mi_score_ = param_.getValue("Scores:use_mi_score").toBool();
su_.use_dia_scores_ = param_.getValue("Scores:use_dia_scores").toBool();
su_.use_im_scores = param_.getValue("Scores:use_ion_mobility_scores").toBool();
su_.use_ms1_correlation = param_.getValue("Scores:use_ms1_correlation").toBool();
su_.use_ms1_fullscan = param_.getValue("Scores:use_ms1_fullscan").toBool();
su_.use_ms1_mi = param_.getValue("Scores:use_ms1_mi").toBool();
su_.use_uis_scores = param_.getValue("Scores:use_uis_scores").toBool();
su_.use_ionseries_scores = param_.getValue("Scores:use_ionseries_scores").toBool();
su_.use_ms2_isotope_scores = param_.getValue("Scores:use_ms2_isotope_scores").toBool();
su_.use_peak_shape_metrics = param_.getValue("Scores:use_peak_shape_metrics").toBool();
}
void MRMFeatureFinderScoring::mapExperimentToTransitionList(const OpenSwath::SpectrumAccessPtr& input,
const TargetedExpType& transition_exp,
TransitionGroupMapType& transition_group_map,
TransformationDescription trafo,
double rt_extraction_window)
{
double rt_min, rt_max, expected_rt;
trafo.invert();
std::map<String, int> chromatogram_map;
Size nr_chromatograms = input->getNrChromatograms();
for (Size i = 0; i < input->getNrChromatograms(); i++)
{
chromatogram_map[input->getChromatogramNativeID(i)] = boost::numeric_cast<int>(i);
}
// Iterate through all transitions and store the transition with the
// corresponding chromatogram in the corresponding transition group
Size progress = 0;
startProgress(0, nr_chromatograms, "Mapping transitions to chromatograms ");
for (Size i = 0; i < transition_exp.getTransitions().size(); i++)
{
// get the current transition and try to find the corresponding chromatogram
const TransitionType* transition = &transition_exp.getTransitions()[i];
if (chromatogram_map.find(transition->getNativeID()) == chromatogram_map.end())
{
OPENMS_LOG_DEBUG << "Error: Transition " + transition->getNativeID() + " from group " +
transition->getPeptideRef() + " does not have a corresponding chromatogram" << std::endl;
if (strict_)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Error: Transition " + transition->getNativeID() + " from group " +
transition->getPeptideRef() + " does not have a corresponding chromatogram");
}
continue;
}
//-----------------------------------
// Retrieve chromatogram and filter it by the desired RT
//-----------------------------------
OpenSwath::ChromatogramPtr cptr = input->getChromatogramById(chromatogram_map[transition->getNativeID()]);
MSChromatogram chromatogram;
// Get the expected retention time, apply the RT-transformation
// (which describes the normalization) and then take the difference.
// Note that we inverted the transformation in the beginning because
// we want to transform from normalized to real RTs here and not the
// other way round.
if (rt_extraction_window > 0)
{
expected_rt = PeptideRefMap_[transition->getPeptideRef()]->rt;
double de_normalized_experimental_rt = trafo.apply(expected_rt);
rt_max = de_normalized_experimental_rt + rt_extraction_window;
rt_min = de_normalized_experimental_rt - rt_extraction_window;
OpenSwathDataAccessHelper::convertToOpenMSChromatogramFilter(chromatogram, cptr, rt_min, rt_max);
}
else
{
OpenSwathDataAccessHelper::convertToOpenMSChromatogram(cptr, chromatogram);
}
// Check for empty chromatograms (e.g. RT transformation is off)
if (chromatogram.empty())
{
std::cerr << "Error: Could not find any points for chromatogram " + transition->getNativeID() + \
". Maybe your retention time transformation is off?" << std::endl;
if (strict_)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Error: Could not find any points for chromatogram " + transition->getNativeID() + \
". Maybe your retention time transformation is off?");
}
}
chromatogram.setMetaValue("product_mz", transition->getProductMZ());
chromatogram.setMetaValue("precursor_mz", transition->getPrecursorMZ());
Precursor prec; prec.setPosition(transition->getPrecursorMZ());
Product prod; prod.setMZ(transition->getProductMZ());
chromatogram.setPrecursor(prec);
chromatogram.setProduct(prod);
chromatogram.setNativeID(transition->getNativeID());
// Create new transition group if there is none for this peptide
if (transition_group_map.find(transition->getPeptideRef()) == transition_group_map.end())
{
MRMTransitionGroupType transition_group;
transition_group.setTransitionGroupID(transition->getPeptideRef());
transition_group_map[transition->getPeptideRef()] = transition_group;
}
// Now add the transition and the chromatogram to the group
MRMTransitionGroupType& transition_group = transition_group_map[transition->getPeptideRef()];
transition_group.addTransition(*transition, transition->getNativeID());
transition_group.addChromatogram(chromatogram, chromatogram.getNativeID());
setProgress(++progress);
}
endProgress();
// The assumption is that for each transition that is in the TargetedExperiment we have exactly one chromatogram
for (TransitionGroupMapType::iterator trgroup_it = transition_group_map.begin(); trgroup_it != transition_group_map.end(); ++trgroup_it)
{
if (!trgroup_it->second.isInternallyConsistent())
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Error: Could not match all transition to all chromatograms:\nFor chromatogram " + \
trgroup_it->second.getTransitionGroupID() + " I found " + String(trgroup_it->second.getChromatograms().size()) + \
" chromatograms but " + String(trgroup_it->second.getTransitions().size()) + " transitions.");
}
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/PeakPickerChromatogram.cpp | .cpp | 18,273 | 410 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/PeakPickerChromatogram.h>
#include <OpenMS/CONCEPT/LogStream.h>
namespace OpenMS
{
PeakPickerChromatogram::PeakPickerChromatogram() :
DefaultParamHandler("PeakPickerChromatogram")
{
// For SWATH-MS data from 5600 TripleTOF, these settings are recommended:
//
// sgolay_frame_length = 9 (29.7s on our data)
// gauss_width = 30 (if even gauss is used)
// use_gauss = false
//
defaults_.setValue("sgolay_frame_length", 15, "The number of subsequent data points used for smoothing.\nThis number has to be uneven. If it is not, 1 will be added.");
defaults_.setValue("sgolay_polynomial_order", 3, "Order of the polynomial that is fitted.");
defaults_.setValue("gauss_width", 50.0, "Gaussian width in seconds, estimated peak size.");
defaults_.setValue("use_gauss", "true", "Use Gaussian filter for smoothing (alternative is Savitzky-Golay filter)");
defaults_.setValidStrings("use_gauss", {"false","true"});
defaults_.setValue("peak_width", -1.0, "Force a certain minimal peak_width on the data (e.g. extend the peak at least by this amount on both sides) in seconds. -1 turns this feature off.");
defaults_.setValue("signal_to_noise", 1.0, "Signal-to-noise threshold at which a peak will not be extended any more. Note that setting this too high (e.g. 1.0) can lead to peaks whose flanks are not fully captured.");
defaults_.setMinFloat("signal_to_noise", 0.0);
defaults_.setValue("sn_win_len", 1000.0, "Signal to noise window length.");
defaults_.setValue("sn_bin_count", 30, "Signal to noise bin count.");
defaults_.setValue("write_sn_log_messages", "false", "Write out log messages of the signal-to-noise estimator in case of sparse windows or median in rightmost histogram bin");
defaults_.setValidStrings("write_sn_log_messages", {"true","false"});
defaults_.setValue("remove_overlapping_peaks", "false", "Try to remove overlapping peaks during peak picking");
defaults_.setValidStrings("remove_overlapping_peaks", {"false","true"});
defaults_.setValue("method", "corrected", "Which method to choose for chromatographic peak-picking (OpenSWATH legacy on raw data, corrected picking on smoothed chromatogram or Crawdad on smoothed chromatogram).");
defaults_.setValidStrings("method", {"legacy","corrected","crawdad"});
// write defaults into Param object param_
defaultsToParam_();
updateMembers_();
// PeakPickerHiRes pp_;
Param pepi_param = pp_.getDefaults();
pepi_param.setValue("signal_to_noise", signal_to_noise_);
// disable spacing constraints, since we're dealing with chromatograms
pepi_param.setValue("spacing_difference", 0.0);
pepi_param.setValue("spacing_difference_gap", 0.0);
pepi_param.setValue("report_FWHM", "true");
pepi_param.setValue("report_FWHM_unit", "absolute");
pp_.setParameters(pepi_param);
}
void PeakPickerChromatogram::pickChromatogram(const MSChromatogram& chromatogram, MSChromatogram& picked_chrom)
{
MSChromatogram s;
pickChromatogram(chromatogram, picked_chrom, s);
}
void PeakPickerChromatogram::pickChromatogram(const MSChromatogram& chromatogram, MSChromatogram& picked_chrom, MSChromatogram& smoothed_chrom)
{
if (!chromatogram.isSorted())
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Chromatogram must be sorted by position");
}
if (chromatogram.empty())
{
OPENMS_LOG_DEBUG << " ==== Chromatogram " << chromatogram.getNativeID() << "empty. Skip picking.";
return;
}
else
{
OPENMS_LOG_DEBUG << " ==== Picking chromatogram " << chromatogram.getNativeID() <<
" with " << chromatogram.size() << " peaks (start at RT " << chromatogram[0].getRT() << " to RT " << chromatogram.back().getRT() << ") "
"using method \'" << method_ << "\'\n";
}
picked_chrom.clear(true);
// Crawdad has its own methods, so we can call the wrapper directly
if (method_ == "crawdad")
{
pickChromatogramCrawdad_(chromatogram, picked_chrom);
return;
}
// Smooth the chromatogram
smoothed_chrom = chromatogram;
if (!use_gauss_)
{
sgolay_.filter(smoothed_chrom);
}
else
{
gauss_.filter(smoothed_chrom);
}
// Find initial seeds (peak picking)
pp_.pick(smoothed_chrom, picked_chrom);
OPENMS_LOG_DEBUG << "Picked " << picked_chrom.size() << " chromatographic peaks.\n";
if (method_ == "legacy")
{
// Legacy is to use the original chromatogram for peak-detection
pickChromatogram_(chromatogram, picked_chrom);
if (remove_overlapping_)
removeOverlappingPeaks_(chromatogram, picked_chrom);
// for peak integration, we want to use the raw data
integratePeaks_(chromatogram);
}
else if (method_ == "corrected")
{
// use the smoothed chromatogram to derive the peak boundaries
pickChromatogram_(smoothed_chrom, picked_chrom);
if (remove_overlapping_)
removeOverlappingPeaks_(smoothed_chrom, picked_chrom);
// for peak integration, we want to use the raw data
integratePeaks_(chromatogram);
}
// Store the result in the picked_chromatogram
OPENMS_POSTCONDITION(picked_chrom.getFloatDataArrays().size() == 1 &&
picked_chrom.getFloatDataArrays()[IDX_FWHM].getName() == "FWHM", "Swath: PeakPicking did not deliver FWHM attributes.")
picked_chrom.getFloatDataArrays().resize(SIZE_OF_FLOATINDICES);
picked_chrom.getFloatDataArrays()[IDX_ABUNDANCE].setName("IntegratedIntensity");
picked_chrom.getFloatDataArrays()[IDX_LEFTBORDER].setName("leftWidth");
picked_chrom.getFloatDataArrays()[IDX_RIGHTBORDER].setName("rightWidth");
// just copy FWHM from initial peak picking
picked_chrom.getFloatDataArrays()[IDX_ABUNDANCE].reserve(picked_chrom.size());
picked_chrom.getFloatDataArrays()[IDX_LEFTBORDER].reserve(picked_chrom.size());
picked_chrom.getFloatDataArrays()[IDX_RIGHTBORDER].reserve(picked_chrom.size());
for (Size i = 0; i < picked_chrom.size(); i++)
{
picked_chrom.getFloatDataArrays()[IDX_ABUNDANCE].push_back(integrated_intensities_[i]);
picked_chrom.getFloatDataArrays()[IDX_LEFTBORDER].push_back((float)chromatogram[left_width_[i]].getRT());
picked_chrom.getFloatDataArrays()[IDX_RIGHTBORDER].push_back((float)chromatogram[right_width_[i]].getRT());
}
}
void PeakPickerChromatogram::pickChromatogram_(const MSChromatogram& chromatogram, MSChromatogram& picked_chrom)
{
integrated_intensities_.clear();
left_width_.clear();
right_width_.clear();
integrated_intensities_.reserve(picked_chrom.size());
left_width_.reserve(picked_chrom.size());
right_width_.reserve(picked_chrom.size());
if (signal_to_noise_ > 0.0)
{
snt_.init(chromatogram);
}
Size current_peak = 0;
for (Size i = 0; i < picked_chrom.size(); i++)
{
const double central_peak_rt = picked_chrom[i].getRT();
current_peak = findClosestPeak_(chromatogram, central_peak_rt, current_peak);
const Size min_i = current_peak;
// peak core found, now extend it to the left
Size k = 2;
while ((min_i - k + 1) > 0
//&& std::fabs(chromatogram[min_i-k].getMZ() - peak_raw_data.begin()->first) < spacing_difference*min_spacing
&& (chromatogram[min_i - k].getIntensity() < chromatogram[min_i - k + 1].getIntensity()
|| (peak_width_ > 0.0 && std::fabs(chromatogram[min_i - k].getRT() - central_peak_rt) < peak_width_))
&& (signal_to_noise_ <= 0.0 || snt_.getSignalToNoise(min_i - k) >= signal_to_noise_))
{
++k;
}
int left_idx = min_i - k + 1;
// to the right
k = 2;
while ((min_i + k) < chromatogram.size()
//&& std::fabs(chromatogram[min_i+k].getMZ() - peak_raw_data.rbegin()->first) < spacing_difference*min_spacing
&& (chromatogram[min_i + k].getIntensity() < chromatogram[min_i + k - 1].getIntensity()
|| (peak_width_ > 0.0 && std::fabs(chromatogram[min_i + k].getRT() - central_peak_rt) < peak_width_))
&& (signal_to_noise_ <= 0.0 || snt_.getSignalToNoise(min_i + k) >= signal_to_noise_) )
{
++k;
}
int right_idx = min_i + k - 1;
left_width_.push_back(left_idx);
right_width_.push_back(right_idx);
integrated_intensities_.push_back(0);
OPENMS_LOG_DEBUG << "Found peak at " << central_peak_rt << " with intensity " << picked_chrom[i].getIntensity()
<< " and borders " << chromatogram[left_width_[i]].getRT() << " " << chromatogram[right_width_[i]].getRT() <<
" (" << chromatogram[right_width_[i]].getRT() - chromatogram[left_width_[i]].getRT() << ") "
<< 0 << " weighted RT " << /* weighted_mz << */ std::endl;
}
}
#ifdef WITH_CRAWDAD
void PeakPickerChromatogram::pickChromatogramCrawdad_(const MSChromatogram& chromatogram, MSChromatogram& picked_chrom)
{
OPENMS_LOG_DEBUG << "Picking chromatogram using crawdad \n";
// copy meta data of the input chromatogram
picked_chrom.clear(true);
picked_chrom.ChromatogramSettings::operator=(chromatogram);
picked_chrom.MetaInfoInterface::operator=(chromatogram);
picked_chrom.setName(chromatogram.getName());
std::vector<double> time;
std::vector<double> intensity;
for (Size i = 0; i < chromatogram.size(); i++)
{
time.push_back(chromatogram[i].getRT());
intensity.push_back(chromatogram[i].getIntensity());
}
CrawdadWrapper crawdad_pp;
crawdad_pp.SetChromatogram(time, intensity);
std::vector<crawpeaks::SlimCrawPeak> result = crawdad_pp.CalcPeaks();
picked_chrom.getFloatDataArrays().clear();
picked_chrom.getFloatDataArrays().resize(SIZE_OF_FLOATINDICES);
picked_chrom.getFloatDataArrays()[IDX_ABUNDANCE].setName("IntegratedIntensity");
picked_chrom.getFloatDataArrays()[IDX_LEFTBORDER].setName("leftWidth");
picked_chrom.getFloatDataArrays()[IDX_RIGHTBORDER].setName("rightWidth");
for (std::vector<crawpeaks::SlimCrawPeak>::iterator it = result.begin(); it != result.end(); ++it)
{
ChromatogramPeak p;
p.setRT(chromatogram[it->peak_rt_idx].getRT());
p.setIntensity(it->peak_area); //chromatogram[it->peak_rt_idx].getIntensity() );
picked_chrom.getFloatDataArrays()[IDX_ABUNDANCE].push_back(it->peak_area);
picked_chrom.getFloatDataArrays()[IDX_LEFTBORDER].push_back(chromatogram[it->start_rt_idx].getRT());
picked_chrom.getFloatDataArrays()[IDX_RIGHTBORDER].push_back(chromatogram[it->stop_rt_idx].getRT());
OPENMS_LOG_DEBUG << "Found peak at " << p.getRT() << " and " << chromatogram[it->peak_rt_idx].getIntensity()
<< " with borders " << chromatogram[it->start_rt_idx].getRT() << " " << chromatogram[it->stop_rt_idx].getRT() << " (" << chromatogram[it->start_rt_idx].getRT() - chromatogram[it->stop_rt_idx].getRT() << ") "
<< it->peak_area << " weighted RT " << /* weighted_mz << */ std::endl;
picked_chrom.push_back(p);
}
}
#else
void PeakPickerChromatogram::pickChromatogramCrawdad_(const MSChromatogram& /* chromatogram */, MSChromatogram& /* picked_chrom */)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"PeakPickerChromatogram was not compiled with crawdad, please choose a different algorithm!");
}
#endif
void PeakPickerChromatogram::removeOverlappingPeaks_(const MSChromatogram& chromatogram, MSChromatogram& picked_chrom)
{
if (picked_chrom.empty()) {return; }
OPENMS_LOG_DEBUG << "Remove overlapping peaks now (size " << picked_chrom.size() << ")\n";
Size current_peak = 0;
// Find overlapping peaks
for (Size i = 0; i < picked_chrom.size() - 1; i++)
{
// Check whether the current right overlaps with the next left
// See whether we can correct this and find some border between the two
// features ...
if (right_width_[i] > left_width_[i + 1])
{
const int current_left_idx = left_width_[i];
const int current_right_idx = right_width_[i];
const int next_left_idx = left_width_[i + 1];
const int next_right_idx = right_width_[i + 1];
OPENMS_LOG_DEBUG << " Found overlapping " << i << " : " << current_left_idx << " " << current_right_idx << '\n';
OPENMS_LOG_DEBUG << " -- with " << i + 1 << " : " << next_left_idx << " " << next_right_idx << '\n';
// Find the peak width and best RT
double central_peak_rt = picked_chrom[i].getPos();
double next_peak_rt = picked_chrom[i + 1].getPos();
current_peak = findClosestPeak_(chromatogram, central_peak_rt, current_peak);
Size next_peak = findClosestPeak_(chromatogram, next_peak_rt, current_peak);
// adjust the right border of the current and left border of next
Size k = 1;
while ((current_peak + k) < chromatogram.size()
&& (chromatogram[current_peak + k].getIntensity() < chromatogram[current_peak + k - 1].getIntensity()))
{
++k;
}
Size new_right_border = current_peak + k - 1;
k = 1;
while ((next_peak - k + 1) > 0
&& (chromatogram[next_peak - k].getIntensity() < chromatogram[next_peak - k + 1].getIntensity()))
{
++k;
}
Size new_left_border = next_peak - k + 1;
// assert that the peaks are now not overlapping any more ...
if (new_left_border < new_right_border)
{
OPENMS_LOG_ERROR << "Something went wrong, peaks are still overlapping!" << " - new left border " << new_left_border << " vs " << new_right_border << " -- will take the mean\n";
new_left_border = (new_left_border + new_right_border) / 2;
new_right_border = (new_left_border + new_right_border) / 2;
}
OPENMS_LOG_DEBUG << "New peak l: " << chromatogram[current_left_idx].getPos()
<< " " << chromatogram[new_right_border].getPos()
<< " int " << integrated_intensities_[i] << '\n';
OPENMS_LOG_DEBUG << "New peak r: " << chromatogram[new_left_border].getPos()
<< " " << chromatogram[next_right_idx].getPos()
<< " int " << integrated_intensities_[i + 1] << '\n';
right_width_[i] = new_right_border;
left_width_[i + 1] = new_left_border;
}
}
}
Size PeakPickerChromatogram::findClosestPeak_(const MSChromatogram& chromatogram, double target_rt, Size current_peak)
{
while (current_peak < chromatogram.size())
{
// check if we have walked past the RT of the peak
if (target_rt < chromatogram[current_peak].getRT())
{
// see which one is closer, the current one or the one before
if (current_peak > 0 &&
std::fabs(target_rt - chromatogram[current_peak - 1].getRT()) <
std::fabs(target_rt - chromatogram[current_peak].getRT()))
{
current_peak--;
}
return current_peak;
}
current_peak++;
}
return current_peak;
}
void PeakPickerChromatogram::integratePeaks_(const MSChromatogram& chromatogram)
{
for (Size i = 0; i < left_width_.size(); i++)
{
const int current_left_idx = left_width_[i];
const int current_right_idx = right_width_[i];
// Also integrate the intensities
integrated_intensities_[i] = 0;
for (int k = current_left_idx; k <= current_right_idx; k++)
{
integrated_intensities_[i] += chromatogram[k].getIntensity();
}
}
}
void PeakPickerChromatogram::updateMembers_()
{
sgolay_frame_length_ = (UInt)param_.getValue("sgolay_frame_length");
sgolay_polynomial_order_ = (UInt)param_.getValue("sgolay_polynomial_order");
gauss_width_ = (double)param_.getValue("gauss_width");
peak_width_ = (double)param_.getValue("peak_width");
signal_to_noise_ = (double)param_.getValue("signal_to_noise");
sn_win_len_ = (double)param_.getValue("sn_win_len");
sn_bin_count_ = (UInt)param_.getValue("sn_bin_count");
// TODO make list, not boolean
use_gauss_ = (bool)param_.getValue("use_gauss").toBool();
remove_overlapping_ = (bool)param_.getValue("remove_overlapping_peaks").toBool();
write_sn_log_messages_ = (bool)param_.getValue("write_sn_log_messages").toBool();
method_ = (String)param_.getValue("method").toString();
if (method_ != "crawdad" && method_ != "corrected" && method_ != "legacy")
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Method needs to be one of: crawdad, corrected, legacy");
}
Param sg_filter_parameters = sgolay_.getParameters();
sg_filter_parameters.setValue("frame_length", sgolay_frame_length_);
sg_filter_parameters.setValue("polynomial_order", sgolay_polynomial_order_);
sgolay_.setParameters(sg_filter_parameters);
Param gfilter_parameters = gauss_.getParameters();
gfilter_parameters.setValue("gaussian_width", gauss_width_);
gauss_.setParameters(gfilter_parameters);
Param snt_parameters = snt_.getParameters();
snt_parameters.setValue("win_len", sn_win_len_);
snt_parameters.setValue("bin_count", sn_bin_count_);
snt_parameters.setValue("write_log_messages", param_.getValue("write_sn_log_messages"));
snt_.setParameters(snt_parameters);
#ifndef WITH_CRAWDAD
if (method_ == "crawdad")
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"PeakPickerChromatogram was not compiled with crawdad, please choose a different algorithm!");
}
#endif
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/OpenSwathScoring.cpp | .cpp | 27,961 | 617 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/OpenSwathScoring.h>
#include <OpenMS/CONCEPT/Macros.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/CONCEPT/LogStream.h>
// scoring
#include <OpenMS/ANALYSIS/OPENSWATH/OpenSwathScores.h>
#include <OpenMS/ANALYSIS/OPENSWATH/DIAScoring.h>
#include <OpenMS/OPENSWATHALGO/ALGO/Scoring.h>
#include <OpenMS/ANALYSIS/OPENSWATH/MRMScoring.h>
#include <OpenMS/ANALYSIS/OPENSWATH/IonMobilityScoring.h>
// auxiliary
#include <OpenMS/ANALYSIS/OPENSWATH/DATAACCESS/DataAccessHelper.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <OpenMS/ANALYSIS/OPENSWATH/SpectrumAddition.h>
#include <utility>
namespace OpenMS
{
/// Constructor
OpenSwathScoring::OpenSwathScoring() :
rt_normalization_factor_(1.0),
spacing_for_spectra_resampling_(0.005),
add_up_spectra_(1),
spectra_addition_method_(SpectrumAdditionMethod::ADDITION),
spectra_merge_method_type_(SpectrumMergeMethodType::FIXED),
im_drift_extra_pcnt_(0.0)
{
}
/// Destructor
OpenSwathScoring::~OpenSwathScoring() = default;
void OpenSwathScoring::initialize(double rt_normalization_factor,
int add_up_spectra,
double spacing_for_spectra_resampling,
double merge_spectra_by_peak_width_fraction,
const double drift_extra,
const OpenSwath_Scores_Usage & su,
const std::string& spectrum_addition_method,
const std::string& spectrum_merge_method_type,
bool use_ms1_ion_mobility,
bool apply_im_peak_picking)
{
this->rt_normalization_factor_ = rt_normalization_factor;
this->add_up_spectra_ = add_up_spectra;
if (spectrum_addition_method == "simple")
{
this->spectra_addition_method_ = SpectrumAdditionMethod::ADDITION;
}
else if (spectrum_addition_method == "resample")
{
this->spectra_addition_method_ = SpectrumAdditionMethod::RESAMPLE;
}
else
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "spectrum_addition_method must be simple or resample", spectrum_addition_method);
}
if (spectrum_merge_method_type == "fixed")
{
this->spectra_merge_method_type_ = SpectrumMergeMethodType::FIXED;
}
else if (spectrum_merge_method_type == "dynamic")
{
this->spectra_merge_method_type_ = SpectrumMergeMethodType::DYNAMIC;
}
else
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "spectrum_merge_method_type must be fixed or dynamic", spectrum_merge_method_type);
}
this->im_drift_extra_pcnt_ = drift_extra;
this->spacing_for_spectra_resampling_ = spacing_for_spectra_resampling;
this->merge_spectra_by_peak_width_fraction_ = merge_spectra_by_peak_width_fraction;
this->su_ = su;
this->use_ms1_ion_mobility_ = use_ms1_ion_mobility;
this->apply_im_peak_picking_ = apply_im_peak_picking;
}
void OpenSwathScoring::calculateDIAScores(OpenSwath::IMRMFeature* imrmfeature,
const std::vector<TransitionType>& transitions,
const std::vector<OpenSwath::SwathMap>& swath_maps,
const OpenSwath::SpectrumAccessPtr& ms1_map,
const OpenMS::DIAScoring& diascoring,
const CompoundType& compound,
OpenSwath_Scores& scores,
std::vector<double>& masserror_ppm,
const double drift_target,// TODO is this needed
const RangeMobility& im_range)
{
OPENMS_PRECONDITION(imrmfeature != nullptr, "Feature to be scored cannot be null");
OPENMS_PRECONDITION(transitions.size() > 0, "There needs to be at least one transition.");
OPENMS_PRECONDITION(swath_maps.size() > 0, "There needs to be at least one swath map.");
std::vector<double> normalized_library_intensity;
getNormalized_library_intensities_(transitions, normalized_library_intensity);
// automatically compute the amount of spectra to add based on the fraction of the retention time peak width, or add a fixed number of spectra
int n_merge_spectra = 1;
if (spectra_merge_method_type_ == SpectrumMergeMethodType::DYNAMIC)
{
double leftWidth = imrmfeature->getMetaValue("leftWidth");
double rightWidth = imrmfeature->getMetaValue("rightWidth");
double peakWidth = rightWidth - leftWidth;
n_merge_spectra = std::max(1, static_cast<int>(std::ceil(peakWidth * merge_spectra_by_peak_width_fraction_)));
OPENMS_LOG_DEBUG
<< "Merging " << n_merge_spectra
<< " spectra between RT peak (" << leftWidth << " - " << rightWidth
<< ") using " << merge_spectra_by_peak_width_fraction_
<< " fraction of peak width (" << peakWidth << ")." << std::endl;
}
else // (spectra_merge_method_type == SpectrumMergeMethodType::FIXED)
{
n_merge_spectra = add_up_spectra_;
}
// find spectrum that is closest to the apex of the peak using binary search
std::vector<OpenSwath::SpectrumPtr> spectra = fetchSpectrumSwath(swath_maps, imrmfeature->getRT(), n_merge_spectra, im_range);
// set the DIA parameters
// TODO Cache these parameters
double dia_extract_window_ = (double)diascoring.getParameters().getValue("dia_extraction_window");
bool dia_extraction_ppm_ = diascoring.getParameters().getValue("dia_extraction_unit") == "ppm";
// score drift time dimension
if ( su_.use_im_scores)
{
IonMobilityScoring::driftScoring(spectra, transitions, scores,
drift_target, im_range,
dia_extract_window_, dia_extraction_ppm_,
im_drift_extra_pcnt_, apply_im_peak_picking_);
}
// Mass deviation score
diascoring.dia_massdiff_score(transitions, spectra, normalized_library_intensity, im_range, scores.massdev_score, scores.weighted_massdev_score, masserror_ppm);
//TODO this score and the next, both rely on the CoarseIsotope of the PeptideAveragine. Maybe we could
// DIA dotproduct and manhattan score based on library intensity and sum formula if present
if (su_.use_ms2_isotope_scores)
{
diascoring.score_with_isotopes(spectra, transitions, im_range, scores.dotprod_score_dia, scores.manhatt_score_dia);
// Isotope correlation / overlap score: Is this peak part of an
// isotopic pattern or is it the monoisotopic peak in an isotopic
// pattern?
// Currently this is computed for an averagine model of a peptide so its
// not optimal for metabolites - but better than nothing, given that for
// most fragments we don't really know their composition
diascoring
.dia_isotope_scores(transitions, spectra, imrmfeature, im_range, scores.isotope_correlation, scores.isotope_overlap);
}
// Peptide-specific scores (only useful, when product transitions are REAL fragments, e.g. not in FFID)
// and only if sequence is known (non-empty)
if (compound.isPeptide() && !compound.sequence.empty() && su_.use_ionseries_scores)
{
// Presence of b/y series score
OpenMS::AASequence aas;
int by_charge_state = 1; // for which charge states should we check b/y series
OpenSwathDataAccessHelper::convertPeptideToAASequence(compound, aas);
diascoring.dia_by_ion_score(spectra, aas, by_charge_state, im_range, scores.bseries_score, scores.yseries_score);
}
RangeMobility im_range_ms1;
if (use_ms1_ion_mobility_)
{
im_range_ms1 = im_range;
}
else // do not extract across IM in MS1
{
im_range_ms1 = RangeMobility();
}
if (ms1_map && ms1_map->getNrSpectra() > 0)
{
double precursor_mz = transitions[0].precursor_mz;
double rt = imrmfeature->getRT();
calculatePrecursorDIAScores(ms1_map, diascoring, precursor_mz, rt, compound, im_range_ms1, scores);
}
if ( (ms1_map && ms1_map->getNrSpectra() > 0) && ( su_.use_im_scores) ) // IM MS1 scores
{
double dia_extract_window_ = (double)diascoring.getParameters().getValue("dia_extraction_window");
bool dia_extraction_ppm_ = diascoring.getParameters().getValue("dia_extraction_unit") == "ppm";
double rt = imrmfeature->getRT();
std::vector<OpenSwath::SpectrumPtr> ms1_spectrum = fetchSpectrumSwath(ms1_map, rt, n_merge_spectra, im_range_ms1);
IonMobilityScoring::driftScoringMS1(ms1_spectrum,
transitions, scores, drift_target, im_range_ms1, dia_extract_window_, dia_extraction_ppm_, im_drift_extra_pcnt_);
IonMobilityScoring::driftScoringMS1Contrast(spectra, ms1_spectrum,
transitions, scores, im_range_ms1, dia_extract_window_, dia_extraction_ppm_, im_drift_extra_pcnt_);
}
}
void OpenSwathScoring::calculatePrecursorDIAScores(const OpenSwath::SpectrumAccessPtr& ms1_map,
const OpenMS::DIAScoring & diascoring,
double precursor_mz,
double rt,
const CompoundType& compound,
RangeMobility im_range,
OpenSwath_Scores & scores)
{
// change im_range based on ms1 settings
if (!use_ms1_ion_mobility_)
{
im_range.clear();
}
// Compute precursor-level scores:
// - compute mass difference in ppm
// - compute isotopic pattern score
if (ms1_map && ms1_map->getNrSpectra() > 0)
{
std::vector<OpenSwath::SpectrumPtr> ms1_spectrum = fetchSpectrumSwath(ms1_map, rt, add_up_spectra_, im_range);
diascoring.dia_ms1_massdiff_score(precursor_mz, ms1_spectrum, im_range, scores.ms1_ppm_score);
// derive precursor charge state (get from data if possible)
int precursor_charge = 1;
if (compound.getChargeState() != 0)
{
precursor_charge = compound.getChargeState();
}
if (compound.isPeptide())
{
if (!compound.sequence.empty())
{
diascoring.dia_ms1_isotope_scores(precursor_mz, ms1_spectrum, im_range, scores.ms1_isotope_correlation,
scores.ms1_isotope_overlap,
AASequence::fromString(compound.sequence).getFormula(Residue::Full, precursor_charge));
}
else
{
diascoring.dia_ms1_isotope_scores_averagine(precursor_mz, ms1_spectrum, precursor_charge, im_range,
scores.ms1_isotope_correlation,
scores.ms1_isotope_overlap);
}
}
else
{
if (!compound.sequence.empty())
{
EmpiricalFormula empf{compound.sequence};
//Note: this only sets the charge to be extracted again in the following function.
// It is not really used in EmpiricalFormula. Also the m/z of the formula is not used since
// it is shadowed by the exact precursor_mz.
//TODO check if charges are the same (in case the charge was actually present in the sum_formula?)
empf.setCharge(precursor_charge);
diascoring.dia_ms1_isotope_scores(precursor_mz, ms1_spectrum, im_range, scores.ms1_isotope_correlation,
scores.ms1_isotope_overlap,
empf);
}
else
{
diascoring.dia_ms1_isotope_scores_averagine(precursor_mz, ms1_spectrum, precursor_charge, im_range,
scores.ms1_isotope_correlation,
scores.ms1_isotope_overlap);
}
}
}
}
void OpenSwathScoring::calculateDIAIdScores(OpenSwath::IMRMFeature* imrmfeature,
const TransitionType & transition,
MRMTransitionGroupType& trgr_detect,
const std::vector<OpenSwath::SwathMap>& swath_maps,
RangeMobility& im_range,
const OpenMS::DIAScoring & diascoring,
OpenSwath_Scores & scores,
const double drift_target)
{
OPENMS_PRECONDITION(imrmfeature != nullptr, "Feature to be scored cannot be null");
OPENMS_PRECONDITION(swath_maps.size() > 0, "There needs to be at least one swath map.");
// automatically compute the amount of spectra to add based on the fraction of the retention time peak width, or add a fixed number of spectra
int n_merge_spectra = 1;
if (spectra_merge_method_type_ == SpectrumMergeMethodType::DYNAMIC)
{
double leftWidth = imrmfeature->getMetaValue("leftWidth");
double rightWidth = imrmfeature->getMetaValue("rightWidth");
double peakWidth = rightWidth - leftWidth;
n_merge_spectra = std::max(1, static_cast<int>(std::ceil(peakWidth * merge_spectra_by_peak_width_fraction_)));
OPENMS_LOG_DEBUG
<< "Merging " << n_merge_spectra
<< " spectra between RT peak (" << leftWidth << " - " << rightWidth
<< ") using " << merge_spectra_by_peak_width_fraction_
<< " fraction of peak width (" << peakWidth << ")." << std::endl;
}
else // (spectra_merge_method_type == SpectrumMergeMethodType::FIXED)
{
n_merge_spectra = add_up_spectra_;
}
// find spectrum that is closest to the apex of the peak using binary search
std::vector<OpenSwath::SpectrumPtr> spectrum = fetchSpectrumSwath(swath_maps, imrmfeature->getRT(), n_merge_spectra, im_range);
// If no charge is given, we assume it to be 1
int putative_product_charge = 1;
if (transition.getProductChargeState() != 0)
{
putative_product_charge = transition.getProductChargeState();
}
// Isotope correlation / overlap score: Is this peak part of an
// isotopic pattern or is it the monoisotopic peak in an isotopic
// pattern?
diascoring.dia_ms1_isotope_scores_averagine(transition.getProductMZ(),
spectrum,
putative_product_charge,
im_range,
scores.isotope_correlation,
scores.isotope_overlap);
// Mass deviation score
diascoring.dia_ms1_massdiff_score(transition.getProductMZ(), spectrum, im_range, scores.massdev_score);
// Drift Scoring for Identification transitions
if (su_.use_im_scores)
{
OPENMS_LOG_DEBUG << "Computing IM scores for identification transition: " << transition.transition_name << " with product mz " << transition.getProductMZ() << " and precursor mz " << transition.getPrecursorMZ() << std::endl;
// Temporary vector container for storing transition to match rest of code.
std::vector<TransitionType> transitionVector;
// Add the existing transition to the vector
transitionVector.push_back(transition);
double dia_extract_window_ = (double)diascoring.getParameters().getValue("dia_extraction_window");
bool dia_extraction_ppm_ = diascoring.getParameters().getValue("dia_extraction_unit") == "ppm";
IonMobilityScoring::driftIdScoring(spectrum, transitionVector, trgr_detect, scores,
drift_target, im_range,
dia_extract_window_, dia_extraction_ppm_,
im_drift_extra_pcnt_, apply_im_peak_picking_);
}
}
void OpenSwathScoring::calculateChromatographicScores(
OpenSwath::IMRMFeature* imrmfeature,
const std::vector<std::string>& native_ids,
const std::vector<std::string>& precursor_ids,
const std::vector<double>& normalized_library_intensity,
std::vector<OpenSwath::ISignalToNoisePtr>& signal_noise_estimators,
OpenSwath_Scores & scores) const
{
OPENMS_PRECONDITION(imrmfeature != nullptr, "Feature to be scored cannot be null");
OpenSwath::MRMScoring mrmscore_;
if (su_.use_coelution_score_ || su_.use_shape_score_ || (!imrmfeature->getPrecursorIDs().empty() && su_.use_ms1_correlation))
mrmscore_.initializeXCorrMatrix(imrmfeature, native_ids);
// XCorr score (coelution)
if (su_.use_coelution_score_)
{
scores.xcorr_coelution_score = mrmscore_.calcXcorrCoelutionScore();
scores.weighted_coelution_score = mrmscore_.calcXcorrCoelutionWeightedScore(normalized_library_intensity);
}
// XCorr score (shape)
// mean over the intensities at the max of the crosscorrelation
// FEATURE : weigh by the intensity as done by mQuest
// FEATURE : normalize with the intensity at the peak group apex?
if (su_.use_shape_score_)
{
scores.xcorr_shape_score = mrmscore_.calcXcorrShapeScore();
scores.weighted_xcorr_shape = mrmscore_.calcXcorrShapeWeightedScore(normalized_library_intensity);
}
// check that the MS1 feature is present and that the MS1 correlation should be calculated
if (!imrmfeature->getPrecursorIDs().empty() && su_.use_ms1_correlation)
{
// we need at least two precursor isotopes
if (precursor_ids.size() > 1)
{
mrmscore_.initializeXCorrPrecursorMatrix(imrmfeature, precursor_ids);
scores.ms1_xcorr_coelution_score = mrmscore_.calcXcorrPrecursorCoelutionScore();
scores.ms1_xcorr_shape_score = mrmscore_.calcXcorrPrecursorShapeScore();
}
mrmscore_.initializeXCorrPrecursorContrastMatrix(imrmfeature, precursor_ids, native_ids); // perform cross-correlation on monoisotopic precursor
scores.ms1_xcorr_coelution_contrast_score = mrmscore_.calcXcorrPrecursorContrastCoelutionScore();
scores.ms1_xcorr_shape_contrast_score = mrmscore_.calcXcorrPrecursorContrastShapeScore();
mrmscore_.initializeXCorrPrecursorCombinedMatrix(imrmfeature, precursor_ids, native_ids); // perform cross-correlation on monoisotopic precursor
scores.ms1_xcorr_coelution_combined_score = mrmscore_.calcXcorrPrecursorCombinedCoelutionScore();
scores.ms1_xcorr_shape_combined_score = mrmscore_.calcXcorrPrecursorCombinedShapeScore();
}
if (su_.use_nr_peaks_score_)
{
scores.nr_peaks = boost::numeric_cast<int>(imrmfeature->size());
}
// Signal to noise scoring
if (su_.use_sn_score_)
{
scores.sn_ratio = mrmscore_.calcSNScore(imrmfeature, signal_noise_estimators);
// everything below S/N 1 can be set to zero (and the log safely applied)
if (scores.sn_ratio < 1)
{
scores.log_sn_score = 0;
}
else
{
scores.log_sn_score = std::log(scores.sn_ratio);
}
}
// Mutual information scoring
if (su_.use_mi_score_)
{
mrmscore_.initializeMIMatrix(imrmfeature, native_ids);
scores.mi_score = mrmscore_.calcMIScore();
scores.weighted_mi_score = mrmscore_.calcMIWeightedScore(normalized_library_intensity);
}
// check that the MS1 feature is present and that the MS1 MI should be calculated
if (!imrmfeature->getPrecursorIDs().empty() && su_.use_ms1_mi)
{
// we need at least two precursor isotopes
if (precursor_ids.size() > 1)
{
mrmscore_.initializeMIPrecursorMatrix(imrmfeature, precursor_ids);
scores.ms1_mi_score = mrmscore_.calcMIPrecursorScore();
}
mrmscore_.initializeMIPrecursorContrastMatrix(imrmfeature, precursor_ids, native_ids);
scores.ms1_mi_contrast_score = mrmscore_.calcMIPrecursorContrastScore();
mrmscore_.initializeMIPrecursorCombinedMatrix(imrmfeature, precursor_ids, native_ids);
scores.ms1_mi_combined_score = mrmscore_.calcMIPrecursorCombinedScore();
}
}
void OpenSwathScoring::calculateChromatographicIdScores(
OpenSwath::IMRMFeature* imrmfeature,
const std::vector<std::string>& native_ids_identification,
const std::vector<std::string>& native_ids_detection,
std::vector<OpenSwath::ISignalToNoisePtr>& signal_noise_estimators,
OpenSwath_Ind_Scores & idscores) const
{
OPENMS_PRECONDITION(imrmfeature != nullptr, "Feature to be scored cannot be null");
OpenSwath::MRMScoring mrmscore_;
mrmscore_.initializeXCorrContrastMatrix(imrmfeature, native_ids_identification, native_ids_detection);
if (su_.use_coelution_score_)
{
idscores.ind_xcorr_coelution_score = mrmscore_.calcSeparateXcorrContrastCoelutionScore();
}
if (su_.use_shape_score_)
{
idscores.ind_xcorr_shape_score = mrmscore_.calcSeparateXcorrContrastShapeScore();
}
// Signal to noise scoring
if (su_.use_sn_score_)
{
idscores.ind_log_sn_score = mrmscore_.calcSeparateSNScore(imrmfeature, signal_noise_estimators);
}
// Mutual information scoring
if (su_.use_mi_score_)
{
mrmscore_.initializeMIContrastMatrix(imrmfeature, native_ids_identification, native_ids_detection);
idscores.ind_mi_score = mrmscore_.calcSeparateMIContrastScore();
}
}
void OpenSwathScoring::calculateLibraryScores(
OpenSwath::IMRMFeature* imrmfeature,
const std::vector<TransitionType> & transitions,
const CompoundType& pep,
const double normalized_feature_rt,
OpenSwath_Scores & scores)
{
OPENMS_PRECONDITION(imrmfeature != nullptr, "Feature to be scored cannot be null");
std::vector<double> normalized_library_intensity;
getNormalized_library_intensities_(transitions, normalized_library_intensity);
std::vector<std::string> native_ids;
native_ids.reserve(transitions.size());
for (const auto& trans : transitions)
{
native_ids.push_back(trans.getNativeID());
}
if (su_.use_library_score_)
{
OpenSwath::MRMScoring::calcLibraryScore(imrmfeature, transitions,
scores.library_corr, scores.library_norm_manhattan, scores.library_manhattan,
scores.library_dotprod, scores.library_sangle, scores.library_rootmeansquare);
}
// Retention time score
if (su_.use_rt_score_)
{
// rt score is delta iRT
double normalized_experimental_rt = normalized_feature_rt;
double rt_score = OpenSwath::MRMScoring::calcRTScore(pep, normalized_experimental_rt);
scores.normalized_experimental_rt = normalized_experimental_rt;
scores.raw_rt_score = rt_score;
scores.norm_rt_score = rt_score / rt_normalization_factor_;
}
}
void OpenSwathScoring::getNormalized_library_intensities_(const std::vector<TransitionType> & transitions,
std::vector<double>& normalized_library_intensity)
{
normalized_library_intensity.clear();
for (Size i = 0; i < transitions.size(); i++)
{
normalized_library_intensity.push_back(transitions[i].getLibraryIntensity());
}
for (Size i = 0; i < normalized_library_intensity.size(); i++)
{
// the library intensity should never be below zero
if (normalized_library_intensity[i] < 0.0) { normalized_library_intensity[i] = 0.0; }
}
OpenSwath::Scoring::normalize_sum(&normalized_library_intensity[0], boost::numeric_cast<int>(normalized_library_intensity.size()));
}
SpectrumSequence OpenSwathScoring::fetchSpectrumSwath(OpenSwath::SpectrumAccessPtr swathmap, double RT, int nr_spectra_to_add, const RangeMobility& im_range)
{
SpectrumSequence all_spectra = swathmap->getMultipleSpectra(RT, nr_spectra_to_add);
if (spectra_addition_method_ == SpectrumAdditionMethod::ADDITION)
{
return all_spectra; // return vector, addition is done later
}
else // (spectra_addition_method_ == SpectrumAdditionMethod::RESAMPLE)
{
std::vector<OpenSwath::SpectrumPtr> spectrum_out;
//added_spec = SpectrumAddition::addUpSpectra(all_spectra, spacing_for_spectra_resampling_, true);
spectrum_out.push_back(SpectrumAddition::addUpSpectra(all_spectra, im_range, spacing_for_spectra_resampling_, true));
return spectrum_out;
}
}
SpectrumSequence OpenSwathScoring::fetchSpectrumSwath(std::vector<OpenSwath::SwathMap> swath_maps, double RT, int nr_spectra_to_add, const RangeMobility& im_range)
{
OPENMS_PRECONDITION(nr_spectra_to_add >= 1, "nr_spectra_to_add must be at least 1.")
OPENMS_PRECONDITION(!swath_maps.empty(), "swath_maps vector cannot be empty")
// This is not SONAR data
if (swath_maps.size() == 1)
{
return fetchSpectrumSwath(swath_maps[0].sptr, RT, nr_spectra_to_add, im_range);
}
else
{
// data is not IM enhanced
if (!im_range.isEmpty())
{
// multiple SWATH maps for a single precursor -> this is SONAR data, in all cases only return a single spectrum
SpectrumSequence all_spectra;
if (spectra_addition_method_ == SpectrumAdditionMethod::ADDITION)
{
for (size_t i = 0; i < swath_maps.size(); ++i)
{
SpectrumSequence spectrumSequence = swath_maps[i].sptr->getMultipleSpectra(RT, nr_spectra_to_add, im_range.getMin(), im_range.getMax());
}
}
else // (spectra_addition_method_ == SpectrumAdditionMethod::RESAMPLE)
{
for (size_t i = 0; i < swath_maps.size(); ++i)
{
SpectrumSequence spectrumSequence = swath_maps[i].sptr->getMultipleSpectra(RT, nr_spectra_to_add, im_range.getMin(), im_range.getMax());
all_spectra.push_back(SpectrumAddition::addUpSpectra(spectrumSequence, spacing_for_spectra_resampling_, true));
}
}
return { SpectrumAddition::addUpSpectra(all_spectra, spacing_for_spectra_resampling_, true) };
}
else // im_range.isEmpty()
{
// multiple SWATH maps for a single precursor -> this is SONAR data, in all cases only return a single spectrum
SpectrumSequence all_spectra;
if (spectra_addition_method_ == SpectrumAdditionMethod::ADDITION)
{
for (size_t i = 0; i < swath_maps.size(); ++i)
{
SpectrumSequence spectrumSequence = swath_maps[i].sptr->getMultipleSpectra(RT, nr_spectra_to_add);
all_spectra.push_back(SpectrumAddition::concatenateSpectra(spectrumSequence));
}
}
else // (spectra_addition_method_ == SpectrumAdditionMethod::RESAMPLE)
{
for (size_t i = 0; i < swath_maps.size(); ++i)
{
SpectrumSequence spectrumSequence = swath_maps[i].sptr->getMultipleSpectra(RT, nr_spectra_to_add);
all_spectra.push_back(SpectrumAddition::addUpSpectra(spectrumSequence, spacing_for_spectra_resampling_, true));
}
}
return { SpectrumAddition::addUpSpectra(all_spectra, spacing_for_spectra_resampling_, true) };
}
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/MRMIonSeries.cpp | .cpp | 23,097 | 667 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: George Rosenberger $
// $Authors: George Rosenberger $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/MRMIonSeries.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <boost/assign.hpp>
#include <boost/lexical_cast.hpp>
namespace OpenMS
{
MRMIonSeries::MRMIonSeries() = default;
MRMIonSeries::~MRMIonSeries() = default;
std::pair<String, double> MRMIonSeries::getIon(IonSeries& ionseries, const String& ionid)
{
if (ionseries.find(ionid) != ionseries.end())
{
return make_pair(ionid, ionseries[ionid]);
}
else
{
return make_pair(String("unannotated"), -1);
}
}
namespace
{
/// Extract charge from annotation string (e.g., "b8^2" -> 2, "y5" -> 1)
int extractChargeFromAnnotation_(const String& annotation)
{
size_t pos = annotation.find('^');
if (pos != std::string::npos && pos + 1 < annotation.size())
{
try
{
return annotation.substr(pos + 1).toInt();
}
catch (...)
{
return 1;
}
}
return 1; // default charge is 1
}
/// Extract ordinal from annotation string (e.g., "b8^2" -> 8, "y15" -> 15)
int extractOrdinalFromAnnotation_(const String& annotation)
{
if (annotation.empty() || annotation == "unannotated")
{
return std::numeric_limits<int>::max();
}
size_t i = 1;
while (i < annotation.size() && std::isdigit(annotation[i]))
{
++i;
}
if (i > 1)
{
try
{
return annotation.substr(1, i - 1).toInt();
}
catch (...)
{
return std::numeric_limits<int>::max();
}
}
return std::numeric_limits<int>::max();
}
} // anonymous namespace
std::pair<String, double> MRMIonSeries::annotateIon(const IonSeries& ionseries, const double ProductMZ, const double mz_threshold)
{
// make sure to only use annotated transitions and to use the theoretical MZ
using namespace boost::assign;
// Iterate over ion type and then ordinal
std::pair<String, double> ion;
String unannotated = "unannotated";
ion = make_pair(unannotated, -1);
double closest_delta = std::numeric_limits<double>::max();
int best_charge = std::numeric_limits<int>::max();
int best_ordinal = std::numeric_limits<int>::max();
// Epsilon for considering two deltas as "equal" - use fraction of mz_threshold
const double delta_epsilon = mz_threshold * 0.01; // 1% of threshold
for (const auto& ordinal : ionseries)
{
double delta = std::fabs(ordinal.second - ProductMZ);
if (delta <= mz_threshold)
{
bool is_better = false;
int this_charge = extractChargeFromAnnotation_(ordinal.first);
int this_ordinal = extractOrdinalFromAnnotation_(ordinal.first);
if (delta < closest_delta - delta_epsilon)
{
// Significantly closer - always prefer
is_better = true;
}
else if (delta <= closest_delta + delta_epsilon)
{
// Within epsilon of current best - use tie-breaking rules
if (this_charge < best_charge)
{
// Prefer lower charge state (b8^2 over b18^4)
is_better = true;
}
else if (this_charge == best_charge && this_ordinal < best_ordinal)
{
// Same charge - prefer shorter fragment (lower ordinal)
is_better = true;
}
}
if (is_better)
{
closest_delta = delta;
best_charge = this_charge;
best_ordinal = this_ordinal;
ion = make_pair(ordinal.first, ordinal.second);
}
}
}
return ion;
}
TargetedExperiment::Interpretation MRMIonSeries::annotationToCVTermList_(const String& annotation)
{
// CVTermList interpretation;
TargetedExperiment::Interpretation interpretation;
String fragment_type;
int fragment_nr = -1;
double fragment_loss = 0;
// int fragment_gain = 0;
std::vector<String> best_annotation;
annotation.split("/", best_annotation);
if (best_annotation[0] == "Precursor_i0" || best_annotation[0] == "MS2_Precursor_i0")
{
return interpretation;
}
else if (best_annotation[0].find("-") != std::string::npos)
{
std::vector<String> best_annotation_loss;
best_annotation[0].split("-", best_annotation_loss);
fragment_type = best_annotation_loss[0].substr(0, 1);
fragment_nr = best_annotation_loss[0].substr(1).toInt();
// SpectraST style neutral loss
try
{
int nl = boost::lexical_cast<int>(best_annotation_loss[1]);
fragment_loss = -1 * nl;
}
catch (boost::bad_lexical_cast &)
{
static const EmpiricalFormula nl_formula(best_annotation_loss[1]);
fragment_loss = -1 * nl_formula.getMonoWeight();
}
}
else if (best_annotation[0].find("+") != std::string::npos)
{
std::vector<String> best_annotation_gain;
best_annotation[0].split("+", best_annotation_gain);
fragment_type = best_annotation_gain[0].substr(0, 1);
fragment_nr = best_annotation_gain[0].substr(1).toInt();
// fragment_gain = String(best_annotation_gain[1]).toInt(); // fragment neutral gain is not implemented as CV term.
}
else
{
fragment_type = best_annotation[0].substr(0, 1);
fragment_nr = best_annotation[0].substr(1).toInt();
}
if (fragment_nr != -1)
{
interpretation.rank = 1; // we only store the best interpretation
}
if (fragment_nr != -1)
{
interpretation.ordinal = fragment_nr;
}
if (fragment_loss < 0)
{
CVTerm frag_loss;
frag_loss.setCVIdentifierRef("MS");
frag_loss.setAccession("MS:1001524");
frag_loss.setName("fragment neutral loss");
frag_loss.setValue(fragment_loss);
interpretation.addCVTerm(frag_loss);
}
// figure out which fragment it is
if (fragment_type == "x")
{
interpretation.iontype = TargetedExperiment::IonType::XIon;
}
else if (fragment_type == "y")
{
interpretation.iontype = TargetedExperiment::IonType::YIon;
}
else if (fragment_type == "z")
{
interpretation.iontype = TargetedExperiment::IonType::ZIon;
}
else if (fragment_type == "a")
{
interpretation.iontype = TargetedExperiment::IonType::AIon;
}
else if (fragment_type == "b")
{
interpretation.iontype = TargetedExperiment::IonType::BIon;
}
else if (fragment_type == "c")
{
interpretation.iontype = TargetedExperiment::IonType::CIon;
}
else
{
interpretation.iontype = TargetedExperiment::IonType::NonIdentified;
}
return interpretation;
}
void MRMIonSeries::annotationToCV_(ReactionMonitoringTransition& tr)
{
OpenMS::ReactionMonitoringTransition::Product p = tr.getProduct();
std::vector<String> best_annotation;
tr.getMetaValue("annotation").toString().split("/", best_annotation);
String annotation;
if (best_annotation[0].find("^") != std::string::npos)
{
std::vector<String> best_annotation_charge;
best_annotation[0].split("^", best_annotation_charge);
p.setChargeState(String(best_annotation_charge[1]).toInt());
annotation = best_annotation_charge[0];
}
else
{
p.setChargeState(1);
annotation = best_annotation[0];
}
TargetedExperiment::Interpretation interpretation = annotationToCVTermList_(annotation);
p.resetInterpretations();
p.addInterpretation(interpretation);
tr.setProduct(p);
}
void MRMIonSeries::annotateTransitionCV(ReactionMonitoringTransition& tr, const String& annotation)
{
tr.setMetaValue("annotation", annotation);
annotationToCV_(tr);
}
void MRMIonSeries::annotateTransition(ReactionMonitoringTransition& tr, const TargetedExperiment::Peptide& peptide, const double precursor_mz_threshold, double product_mz_threshold, const bool enable_reannotation, const std::vector<String>& fragment_types, const std::vector<size_t>& fragment_charges, const bool enable_specific_losses, const bool enable_unspecific_losses, const int round_decPow)
{
OPENMS_PRECONDITION(peptide.hasCharge(), "Cannot annotate transition without a peptide charge state")
// TODO: we should not have transitions without charge states
// OPENMS_PRECONDITION(tr.isProductChargeStateSet(), "Cannot annotate transition without a charge state")
TargetedExperiment::Interpretation interpretation;
OpenMS::AASequence sequence = TargetedExperimentHelper::getAASequence(peptide);
int precursor_charge = 1; // assume default to be 1 (should always be set, see precondition)
int fragment_charge = 1; // assume default to be 1 (should always be set, see precondition)
if (peptide.hasCharge())
{
precursor_charge = peptide.getChargeState();
}
if (tr.isProductChargeStateSet() )
{
fragment_charge = tr.getProductChargeState();
}
double prec_pos = sequence.getMZ(precursor_charge);
bool unannotated = false;
std::pair<String, double> target_ion = std::make_pair(String("unannotated"), -1);
double pos = -1;
String ionstring;
if (!tr.getProduct().getInterpretationList().empty())
{
interpretation = tr.getProduct().getInterpretationList()[0];
AASequence ion;
if (interpretation.ordinal > 0) // if ordinal is set
{
int ordinal = (int)interpretation.ordinal;
if (interpretation.iontype == TargetedExperiment::IonType::XIon)
{
ion = sequence.getSuffix(ordinal);
ionstring += "x";
pos = ion.getMZ(fragment_charge, Residue::XIon);
}
else if (interpretation.iontype == TargetedExperiment::IonType::YIon)
{
ion = sequence.getSuffix(ordinal);
ionstring += "y";
pos = ion.getMZ(fragment_charge, Residue::YIon);
}
else if (interpretation.iontype == TargetedExperiment::IonType::ZIon)
{
ion = sequence.getSuffix(ordinal);
ionstring += "z";
pos = ion.getMZ(fragment_charge, Residue::ZIon);
}
else if (interpretation.iontype == TargetedExperiment::IonType::AIon)
{
ion = sequence.getSuffix(ordinal);
ionstring += "a";
pos = ion.getMZ(fragment_charge, Residue::AIon);
}
else if (interpretation.iontype == TargetedExperiment::IonType::BIon)
{
ion = sequence.getSuffix(ordinal);
ionstring += "b";
pos = ion.getMZ(fragment_charge, Residue::BIon);
}
else if (interpretation.iontype == TargetedExperiment::IonType::CIon)
{
ion = sequence.getSuffix(ordinal);
ionstring += "c";
pos = ion.getMZ(fragment_charge, Residue::CIon);
}
else
{
unannotated = true;
}
}
else
{
unannotated = true;
}
if (std::find(fragment_types.begin(), fragment_types.end(), ionstring) == fragment_types.end())
{
unannotated = true;
}
if (interpretation.ordinal > 0)
{
ionstring += String(interpretation.ordinal);
}
else
{
unannotated = true;
}
if (interpretation.hasCVTerm("MS:1001524") && (enable_specific_losses || enable_unspecific_losses)) // fragment ion neutral loss
{
double nl = interpretation.getCVTerms().at("MS:1001524")[0].getValue().toString().toDouble();
// SpectraST style neutral losses
if (nl == -18)
{
ionstring += "-H2O1";
static const EmpiricalFormula neutralloss_h2o("H2O1"); // -18 H2O loss
pos -= neutralloss_h2o.getMonoWeight() / fragment_charge;
}
else if (nl == -17)
{
ionstring += "-H3N1";
static const EmpiricalFormula neutralloss_nh3("H3N1"); // -17 NH3 loss
pos -= neutralloss_nh3.getMonoWeight() / fragment_charge;
}
else if (nl == -36)
{
ionstring += "-H4O2";
static const EmpiricalFormula neutralloss_h2oh2o("H4O2"); // -36 2 * H2O loss
pos -= neutralloss_h2oh2o.getMonoWeight() / fragment_charge;
}
else if (nl == -34)
{
ionstring += "-H6N2";
static const EmpiricalFormula neutralloss_nh3nh3("H6N2"); // -34 2 * NH3 loss
pos -= neutralloss_nh3nh3.getMonoWeight() / fragment_charge;
}
else if (nl == -35)
{
ionstring += "-H5N1O1";
static const EmpiricalFormula neutralloss_h2onh3("H5N1O1"); // -35 H2O & NH3 loss
pos -= neutralloss_h2onh3.getMonoWeight() / fragment_charge;
}
else if (nl == -64)
{
ionstring += "-C1H4O1S1";
static const EmpiricalFormula neutralloss_ch4so("C1H4O1S1"); // -64 CH4SO loss
pos -= neutralloss_ch4so.getMonoWeight() / fragment_charge;
}
else if (nl == -80)
{
ionstring += "-H1O3P1";
static const EmpiricalFormula neutralloss_hpo3("H1O3P1"); // -80 HPO3 loss
pos -= neutralloss_hpo3.getMonoWeight() / fragment_charge;
}
else if (nl == -98)
{
ionstring += "-H3O4P1";
static const EmpiricalFormula neutralloss_hpo3h2o("H3O4P1"); // -98 HPO3 & H2O loss
pos -= neutralloss_hpo3h2o.getMonoWeight() / fragment_charge;
}
else if (nl == -45)
{
ionstring += "-C1H3N1O1";
static const EmpiricalFormula neutralloss_ch3no("C1H3N1O1"); // -45 CH3NO loss
pos -= neutralloss_ch3no.getMonoWeight() / fragment_charge;
}
else if (nl == -44)
{
ionstring += "-C1O2";
static const EmpiricalFormula neutralloss_co2("C1O2"); // -44 CO2 loss
pos -= neutralloss_co2.getMonoWeight() / fragment_charge;
}
else if (nl == -46)
{
ionstring += "-C1H2O2";
static const EmpiricalFormula neutralloss_hccoh("C1H2O2"); // -46 HCOOH loss
pos -= neutralloss_hccoh.getMonoWeight() / fragment_charge;
}
// Double CV term (compatible with PSI CV terms)
else if (nl < 0)
{
ionstring += String(Math::roundDecimal(nl, round_decPow));
pos -= nl / fragment_charge;
}
else
{
unannotated = true;
}
}
if (fragment_charge >= 1 &&
std::find(fragment_charges.begin(), fragment_charges.end(), fragment_charge) != fragment_charges.end())
{
ionstring += "^" + String(fragment_charge);
tr.setMetaValue("annotation", ionstring);
}
else
{
unannotated = true;
}
}
else
{
unannotated = true;
}
if (enable_reannotation)
{
MRMIonSeries::IonSeries ionseries = getIonSeries(sequence, precursor_charge, fragment_types,
fragment_charges, enable_specific_losses, enable_unspecific_losses);
target_ion = annotateIon(ionseries, tr.getProductMZ(), product_mz_threshold);
ionstring = target_ion.first;
tr.setMetaValue("annotation", ionstring);
pos = Math::roundDecimal(target_ion.second, round_decPow);
prec_pos = Math::roundDecimal(prec_pos, round_decPow);
tr.setProductMZ(pos);
tr.setPrecursorMZ(prec_pos);
if (ionstring == "unannotated")
{
unannotated = true;
}
else
{
annotationToCV_(tr);
interpretation = tr.getProduct().getInterpretationList()[0];
unannotated = false;
}
}
if (!unannotated && std::fabs(tr.getProductMZ() - pos) <= product_mz_threshold && std::fabs(tr.getPrecursorMZ() - prec_pos) <= precursor_mz_threshold)
{
CVTerm frag_mzdelta;
frag_mzdelta.setCVIdentifierRef("MS");
frag_mzdelta.setAccession("MS:1000904");
frag_mzdelta.setName("product ion m/z delta");
frag_mzdelta.setValue(std::fabs(Math::roundDecimal(tr.getProductMZ() - pos, round_decPow)));
interpretation.replaceCVTerm(frag_mzdelta);
pos = Math::roundDecimal(pos, round_decPow);
prec_pos = Math::roundDecimal(prec_pos, round_decPow);
tr.setProductMZ(pos);
tr.setPrecursorMZ(prec_pos);
}
else
{
unannotated = true;
}
if (unannotated)
{
interpretation.iontype = TargetedExperiment::IonType::NonIdentified;
tr.setMetaValue("annotation", "unannotated");
}
else
{
tr.setMetaValue("annotation", ionstring);
annotationToCV_(tr);
}
OpenMS::ReactionMonitoringTransition::Product p = tr.getProduct();
p.resetInterpretations();
p.addInterpretation(interpretation);
tr.setProduct(p);
}
std::map<String, double> MRMIonSeries::getIonSeries(const AASequence& sequence,
size_t precursor_charge,
const std::vector<String>& fragment_types,
const std::vector<size_t>& fragment_charges,
const bool enable_specific_losses,
const bool enable_unspecific_losses,
const int round_decPow)
{
// Static neutral loss formulas
const static EmpiricalFormula H2O = EmpiricalFormula("H2O1");
const static EmpiricalFormula NH3 = EmpiricalFormula("H3N1");
const static EmpiricalFormula CN2 = EmpiricalFormula("C1H2N2");
const static EmpiricalFormula CNO = EmpiricalFormula("C1H2N1O1");
// Static ion type mass adjustments (computed once at first call)
static const double aion_adj = Residue::getInternalToAIon().getMonoWeight();
static const double bion_adj = Residue::getInternalToBIon().getMonoWeight();
static const double cion_adj = Residue::getInternalToCIon().getMonoWeight();
static const double xion_adj = Residue::getInternalToXIon().getMonoWeight();
static const double yion_adj = Residue::getInternalToYIon().getMonoWeight();
static const double zion_adj = Residue::getInternalToZIon().getMonoWeight();
std::map<String, double> ionseries;
const Size seq_size = sequence.size();
if (seq_size == 0)
{
return ionseries;
}
// Pre-compute cumulative internal mass from N-terminus
// cumulative_mass[i] = sum of internal masses of residues 0..i-1
std::vector<double> cumulative_mass(seq_size + 1, 0.0);
for (Size i = 0; i < seq_size; ++i)
{
cumulative_mass[i + 1] = cumulative_mass[i] + sequence[i].getMonoWeight(Residue::Internal);
}
// Get terminal modification masses
const double n_term_mod_mass = sequence.hasNTerminalModification() ?
sequence.getNTerminalModification()->getDiffMonoMass() : 0.0;
const double c_term_mod_mass = sequence.hasCTerminalModification() ?
sequence.getCTerminalModification()->getDiffMonoMass() : 0.0;
// Total internal mass for suffix ion calculations
const double total_internal_mass = cumulative_mass[seq_size];
// Lambda to handle neutral losses for a residue range
auto add_neutral_losses = [&](const String& ft, Size ordinal, Size start_idx, Size end_idx, double pos, size_t charge)
{
for (Size j = start_idx; j < end_idx; ++j)
{
if (sequence[j].hasNeutralLoss())
{
for (const auto& lit : sequence[j].getLossFormulas())
{
if (enable_specific_losses && lit != H2O && lit != NH3 && lit != CN2 && lit != CNO)
{
ionseries[ft + String(ordinal) + "-" + lit.toString() + "^" + String(charge)] =
Math::roundDecimal(pos - lit.getMonoWeight() / charge, round_decPow);
}
else if (enable_unspecific_losses && (lit == H2O || lit == NH3 || lit == CN2 || lit == CNO))
{
ionseries[ft + String(ordinal) + "-" + lit.toString() + "^" + String(charge)] =
Math::roundDecimal(pos - lit.getMonoWeight() / charge, round_decPow);
}
}
}
}
};
for (const auto& ft : fragment_types)
{
for (const auto& charge : fragment_charges)
{
if (charge > precursor_charge)
{
continue;
}
const double proton_mass_contrib = Constants::PROTON_MASS_U * static_cast<double>(charge);
// Determine if this is a prefix (a/b/c) or suffix (x/y/z) ion
bool is_prefix = (ft == "a" || ft == "b" || ft == "c");
bool is_suffix = (ft == "x" || ft == "y" || ft == "z");
if (!is_prefix && !is_suffix)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
ft + " ion series for peptide sequence \"" + sequence.toString() +
"\" with precursor charge +" + String(precursor_charge) + " could not be generated.");
}
// Get the appropriate ion type adjustment
double ion_adj = 0.0;
if (ft == "a") ion_adj = aion_adj;
else if (ft == "b") ion_adj = bion_adj;
else if (ft == "c") ion_adj = cion_adj;
else if (ft == "x") ion_adj = xion_adj;
else if (ft == "y") ion_adj = yion_adj;
else if (ft == "z") ion_adj = zion_adj;
for (Size i = 1; i < seq_size; ++i)
{
double internal_mass;
Size start_idx, end_idx; // For neutral loss iteration
if (is_prefix)
{
// Prefix ion at ordinal i: residues 0..i-1, includes N-term mod
internal_mass = cumulative_mass[i] + n_term_mod_mass;
start_idx = 0;
end_idx = i;
}
else // is_suffix
{
// Suffix ion at ordinal i: residues (seq_size-i)..seq_size-1, includes C-term mod
internal_mass = total_internal_mass - cumulative_mass[seq_size - i] + c_term_mod_mass;
start_idx = seq_size - i;
end_idx = seq_size;
}
// Compute m/z: (internal_mass + protons + ion_adjustment) / charge
const double pos = (internal_mass + proton_mass_contrib + ion_adj) / static_cast<double>(charge);
ionseries[ft + String(i) + "^" + String(charge)] = Math::roundDecimal(pos, round_decPow);
// Handle neutral losses for residues in this fragment
add_neutral_losses(ft, i, start_idx, end_idx, pos, charge);
}
}
}
return ionseries;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/OpenSwathScores.cpp | .cpp | 10,119 | 249 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/OpenSwathScores.h>
namespace OpenMS
{
double OpenSwath_Scores::get_quick_lda_score(double library_corr_,
double library_norm_manhattan_,
double norm_rt_score_,
double xcorr_coelution_score_,
double xcorr_shape_score_,
double log_sn_score_) const
{
// some scores based on manual evaluation of 80 chromatograms
// quick LDA average model on 100 2 x Crossvalidated runs (0.85 TPR/0.17 FDR)
// true: mean 4.2 with sd 1.055
// false: mean -0.07506772 with sd 1.055
// below -0.5 removes around 30% of the peaks
// below 0 removes around 50% of the peaks
// below 0.5 removes around 70% of the peaks
// below 1.0 removes around 85% of the peaks
// below 1.5 removes around 93% of the peaks
// below 2.0 removes around 97% of the peaks
//
// NOTE this score means "better" if it is more negative!
double lda_quick_score =
library_corr_ * -0.5319046 +
library_norm_manhattan_ * 2.1643962 +
norm_rt_score_ * 8.0353047 +
xcorr_coelution_score_ * 0.1458914 +
xcorr_shape_score_ * -1.6901925 +
log_sn_score_ * -0.8002824;
return lda_quick_score;
}
double OpenSwath_Scores::calculate_lda_prescore(const OpenSwath_Scores& scores) const
{
// LDA average model on 100 2 x Crossvalidated runs (0.91 TPR/0.20 FDR)
/*
double xx_old_lda_prescore =
intensity_score * -2.296679 +
library_corr * -0.1223876 +
library_norm_manhattan* 2.013638 +
nr_peaks_score * 0.01683357 +
rt_score * 0.00143999 +
sn_score * -0.1619762 +
total_xic_score * 0.00000003697898 +
xcorr_coelution_score * 0.05909583 +
xcorr_shape_score * -0.4699841;
// NOTE this score means "better" if it is more negative!
*/
return scores.library_corr * -0.34664267 +
scores.library_norm_manhattan * 2.98700722 +
scores.norm_rt_score * 7.05496384 +
scores.xcorr_coelution_score * 0.09445371 +
scores.xcorr_shape_score * -5.71823862 +
scores.log_sn_score * -0.72989582 +
scores.elution_model_fit_score * 1.88443209;
}
double OpenSwath_Scores::calculate_lda_single_transition(const OpenSwath_Scores& scores) const
{
// Manually derived scoring model for single transition peakgroups
return scores.norm_rt_score * 7.05496384 +
scores.log_sn_score * -0.72989582 +
scores.elution_model_fit_score * -1.08443209;
}
double OpenSwath_Scores::calculate_swath_lda_prescore(const OpenSwath_Scores& scores) const
{
// Swath - LDA average model on 100 2 x Crossvalidated runs (0.76 TPR/0.20 FDR) [without elution model]
/*
double xx_old_swath_prescore =
intensity_score * -3.148838e+00 +
library_corr * -7.562403e-02 +
library_norm_manhattan * 1.786286e+00 +
nr_peaks_score * -7.674263e-03 +
rt_score * 1.748377e-03 +
sn_score * -1.372636e-01 +
total_xic_score * 7.278437e-08 +
xcorr_coelution_score * 1.181813e-01 +
weighted_coelution_score * -7.661783e-02 +
xcorr_shape_score * -6.903933e-02 +
weighted_xcorr_shape * -4.234820e-01 +
bseries_score * -2.022380e-02 +
massdev_score * 2.844948e-02 +
massdev_score_weighted * 1.133209e-02 +
yseries_score * -9.510874e-02 +
isotope_corr * -1.619902e+00 +
isotope_overlap * 2.890688e-01 ;
// NOTE this score means "better" if it is more negative!
*/
return scores.library_corr * -0.19011762 +
scores.library_norm_manhattan * 2.47298914 +
scores.norm_rt_score * 5.63906731 +
scores.isotope_correlation * -0.62640133 +
scores.isotope_overlap * 0.36006925 +
scores.massdev_score * 0.08814003 +
scores.xcorr_coelution_score * 0.13978311 +
scores.xcorr_shape_score * -1.16475032 +
scores.yseries_score * -0.19267813 +
scores.log_sn_score * -0.61712054;
/*
Gold standard, best sample
main_var_xx_swath_prelim_score 0.291440015642621
var_bseries_score 0.0496492555026149
var_dotprod_score -0.522561744728316
var_elution_model_fit_score -1.99429446109581
var_intensity_score 1.70915451039584
var_isotope_correlation_score 0.966260829910062
var_isotope_overlap_score -14.216079147368
var_library_corr 0.061432632721274
var_library_dotprod -3.79958938222036
var_library_manhattan -1.36520528433508
var_library_norm_manhattan -6.44998534845163
var_log_sn_score -0.0389995774588385
var_manhatt_score -0.0944805864772705
var_massdev_score 0.0144460056621709
var_massdev_score_weighted -0.0494772144218002
var_norm_rt_score -9.04596725429934
var_xcorr_coelution -0.141763244951207
var_xcorr_coelution_weighted 0.00261409408565438
var_xcorr_shape 4.89741810577371
var_xcorr_shape_weighted 0.342723332762697
var_yseries_score -0.188316503432445
Strep Strep0_Repl2_R02/runlogs_mprophet.tar.gz
main_var_xx_swath_prelim_score 0.231523019269729
var_bseries_score -0.0488528503276347
var_elution_model_fit_score -0.47977060647858
var_intensity_score -0.80664074459128
var_isotope_correlation_score 2.34488326031997
var_isotope_overlap_score -2.14735763746488
var_library_corr -0.395167010986141
var_library_norm_manhattan -13.1295053007338
var_log_sn_score 0.265784828465348
var_massdev_score 0.0150193500103614
var_massdev_score_weighted -0.109859906028132
var_norm_rt_score -25.7107556062008
var_xcorr_coelution 0.244590396074410
var_xcorr_coelution_weighted -0.918578472543494
var_xcorr_shape 2.18720521365230
var_xcorr_shape_weighted -0.815295893352108
var_yseries_score -0.0620070175846356
Strep10_Repl2_R02/runlogs_mprophet.tar.gz
main_var_xx_swath_prelim_score 0.293470108599468
var_bseries_score -0.0129641361717189
var_elution_model_fit_score -0.44993587229358
var_intensity_score -0.828540564651968
var_isotope_correlation_score 2.76284687671386
var_isotope_overlap_score -2.26460097307479
var_library_corr -0.445369627383142
var_library_norm_manhattan -13.2905041886848
var_log_sn_score 0.224626177093898
var_massdev_score 0.0185003919755981
var_massdev_score_weighted -0.0899477179756381
var_norm_rt_score -24.4807649346717
var_xcorr_coelution 0.218195211767293
var_xcorr_coelution_weighted -0.91949559943762
var_xcorr_shape 1.77358514815991
var_xcorr_shape_weighted -0.616535104461374
var_yseries_score -0.0652111196389966
// FINAL AQUA gold standard classifier
human
main_var_xx_swath_prelim_score 0.4384384475524
var_bseries_score 0.00227405501436837
var_elution_model_fit_score -2.06412570248571
var_intensity_score -1.26021147555789
var_isotope_correlation_score 1.21887083303546
var_isotope_overlap_score -1.60051046353231
var_library_corr -0.33958843974352
var_library_norm_manhattan -5.20235596662978
var_log_sn_score 0.24021015633787
var_massdev_score 0.0399855393620327
var_massdev_score_weighted -0.0907785715261295
var_norm_rt_score -16.2155920223681
var_xcorr_coelution 0.0805852135076143
var_xcorr_coelution_weighted -0.387927719728573
var_xcorr_shape 1.885899937033
var_xcorr_shape_weighted 2.45579580649067
var_yseries_score 0.138306574987678
yeast
main_var_xx_swath_prelim_score 0.369009421609329
var_bseries_score 0.0157508674154482
var_elution_model_fit_score -1.67348268698707
var_intensity_score -1.11972743418717
var_isotope_correlation_score 1.68717154416093
var_isotope_overlap_score -1.38410070381813
var_library_corr -0.454409692201745
var_library_norm_manhattan -6.08160902837145
var_log_sn_score 0.157259477914274
var_massdev_score 0.0543919580711367
var_massdev_score_weighted -0.137296627160332
var_norm_rt_score -28.4381743938298
var_xcorr_coelution 0.0256469469673884
var_xcorr_coelution_weighted -0.362865323100099
var_xcorr_shape 1.88863198062243
var_xcorr_shape_weighted 1.3518953353109
var_yseries_score 0.115472572686466
water
main_var_xx_swath_prelim_score 0.174880281226536
var_bseries_score -0.0606466737704899
var_elution_model_fit_score -0.123252502705892
var_intensity_score 1.91714146537607
var_isotope_correlation_score 0.914387652486204
var_isotope_overlap_score -1.46521560409083
var_library_corr -0.485498555013885
var_library_norm_manhattan -8.3847526088391
var_log_sn_score 0.00644514889704832
var_massdev_score 0.0177435175558717
var_massdev_score_weighted -0.0899451169038299
var_norm_rt_score -15.1458716759687
var_xcorr_coelution -0.370050235089866
var_xcorr_coelution_weighted 0.21512520647974
var_xcorr_shape 0.563413547839886
var_xcorr_shape_weighted -0.270773625703933
var_yseries_score -0.0327896378737766
*/
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/DIAPrescoring.cpp | .cpp | 11,847 | 263 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Witold Wolski $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/DIAPrescoring.h>
//#include <OpenMS/OPENSWATHALGO/DATAACCESS/SpectrumHelpers.h>
#include <OpenMS/OPENSWATHALGO/DATAACCESS/TransitionHelper.h>
#include <OpenMS/OPENSWATHALGO/ALGO/StatsHelpers.h>
#include <OpenMS/ANALYSIS/OPENSWATH/DIAHelper.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <boost/lexical_cast.hpp>
#include <iostream>
#include <algorithm>
#include <utility>
namespace OpenMS
{
void getNormalizedLibraryIntensities(
const std::vector<OpenSwath::LightTransition>& transitions,
std::vector<double>& normalizedLibraryIntensities //normalized intensities
)
{
double totalInt = 0.;
for (std::size_t i = 0; i < transitions.size(); ++i)
{
double libInt = transitions[i].getLibraryIntensity();
if (libInt < 0.)
libInt = 0.;
totalInt += libInt;
normalizedLibraryIntensities.push_back(libInt);
}
std::transform(normalizedLibraryIntensities.begin(),
normalizedLibraryIntensities.end(),
normalizedLibraryIntensities.begin(),
[totalInt](auto && PH1) { return std::divides<double>()(std::forward<decltype(PH1)>(PH1), totalInt); });
}
void getMZIntensityFromTransition(const std::vector<OpenSwath::LightTransition>& trans,
std::vector<std::pair<double, double> >& res)
{
for (std::size_t i = 0; i < trans.size(); ++i)
{
res.emplace_back(trans[i].product_mz, trans[i].library_intensity);
}
}
void DiaPrescore::operator()(const OpenSwath::SpectrumAccessPtr& swath_ptr,
OpenSwath::LightTargetedExperiment& transition_exp_used, const RangeMobility& im_range,
OpenSwath::IDataFrameWriter* ivw) const
{
//getParams();
typedef std::map<std::string, std::vector<OpenSwath::LightTransition> > Mmap;
Mmap transmap;
OpenSwath::TransitionHelper::convert(transition_exp_used, transmap);
// std::cout << "nr peptides : " << transmap.size() << std::endl;
Mmap::iterator m_begin = transmap.begin();
Mmap::iterator m_end = transmap.end();
std::vector<std::string> transitionsNames;
for (; m_begin != m_end; ++m_begin)
{
transitionsNames.push_back(m_begin->first);
}
ivw->colnames(transitionsNames);
//iterate over spectra
for (UInt i = 0; i < swath_ptr->getNrSpectra(); ++i)
{
OpenSwath::SpectrumPtr s = swath_ptr->getSpectrumById(i);
SpectrumSequence spec;
spec.push_back(s);
OpenSwath::SpectrumMeta specmeta = swath_ptr->getSpectrumMetaById(i);
std::cout << "Processing Spectrum " << i << "RT " << specmeta.RT << std::endl;
//iterate over spectra
size_t xx = 0;
Mmap::iterator beg = transmap.begin();
Mmap::iterator end = transmap.end();
std::vector<double> score1v;
std::vector<double> score2v;
for (; beg != end; ++beg, ++xx)
{
//std::cout << "analysing transition" << xx << beg->second.size()
// << " " << beg->first << std::endl;
double score1;
double score2;
//OpenSwath::LightPeptide pep;
score(spec, beg->second, im_range, score1, score2);
score1v.push_back(score1);
score2v.push_back(score2);
} //end of for loop over transitions
//std::string ispectrum = boost::lexical_cast<std::string>(i);
std::string specRT = boost::lexical_cast<std::string>(specmeta.RT);
ivw->store("score1_" + specRT, score1v);
ivw->store("score2_" + specRT, score2v);
} //end of for loop over spectra
}
void DiaPrescore::score(const SpectrumSequence& spec,
const std::vector<OpenSwath::LightTransition>& lt,
const RangeMobility& im_range,
double& dotprod,
double& manhattan) const
{
std::vector<std::pair<double, double> > res;
std::vector<std::pair<double, double> > spectrumWIso, spectrumWIsoNegPreIso;
int chg;
// add expected isotope intensities for every transition productMZ based on averagine
//TODO allow usage of annotated formulas from transition.compound.sum_formula
for (const auto& transition : lt)
{
chg = 1;
if (transition.fragment_charge != 0) chg = transition.fragment_charge;
DIAHelpers::addSinglePeakIsotopes2Spec(transition.getProductMZ(),
transition.getLibraryIntensity(),
spectrumWIso,
nr_isotopes_,
chg);
}
// duplicate since we will add differently weighted preIsotope intensities
spectrumWIsoNegPreIso.reserve(spectrumWIso.size());
std::copy(spectrumWIso.begin(), spectrumWIso.end(), back_inserter(spectrumWIsoNegPreIso));
UInt nrNegPeaks = 2;
double avgTheorTransitionInt = std::accumulate(lt.begin(),lt.end(),0.,[](double val, const OpenSwath::LightTransition& lt){return val + lt.getLibraryIntensity();});
avgTheorTransitionInt /= lt.size();
double negWeight = 0.5 * avgTheorTransitionInt; // how much of ONE transition should be negatively weighted at the prePeaks (distributed equally on them)
// for every transition add either zero weighted (for manhattan) or negatively weighted (for dotprod) preIsotope intensities
for (const auto& transition : lt)
{
chg = 1;
if (transition.fragment_charge != 0) chg = transition.fragment_charge;
DIAHelpers::addPreisotopeWeights(transition.getProductMZ(), spectrumWIso, nrNegPeaks, 0.0,
Constants::C13C12_MASSDIFF_U,
chg);
DIAHelpers::addPreisotopeWeights(transition.getProductMZ(),
spectrumWIsoNegPreIso,
nrNegPeaks,
-negWeight,
Constants::C13C12_MASSDIFF_U,
chg);
}
//sort by mz
DIAHelpers::sortByFirst(spectrumWIso);
DIAHelpers::sortByFirst(spectrumWIsoNegPreIso);
// compare against the spectrum with 0 weight preIsotope peaks
std::vector<double> mzTheor, intTheor;
DIAHelpers::extractFirst(spectrumWIso, mzTheor);
DIAHelpers::extractSecond(spectrumWIso, intTheor);
std::vector<double> intExp, mzExp, imExp;
DIAHelpers::integrateWindows(spec, mzTheor, dia_extract_window_, intExp, mzExp, imExp, im_range);
std::transform(intExp.cbegin(), intExp.cend(), intExp.begin(), [](double val){return std::sqrt(val);});
std::transform(intTheor.cbegin(), intTheor.cend(), intTheor.begin(), [](double val){return std::sqrt(val);});
// get sum for normalization. All entries in both should be positive
double intExpTotal = std::accumulate(intExp.cbegin(), intExp.cend(), 0.0);
double intTheorTotal = std::accumulate(intTheor.cbegin(), intTheor.cend(), 0.0);
OpenSwath::normalize(intExp, intExpTotal, intExp);
OpenSwath::normalize(intTheor, intTheorTotal, intTheor);
//TODO think about normalizing the distance by dividing by the max value 2.
// Generally I think a combined manhattan distance is not the best feature here, since because of normalization,
// different transitions affect each other (e.g. if one transition is missing, the other(s) get a much higher
// normalized value and the whole distance is "penalized twice")
// Maybe we could use two features, one for the average manhattan distance and one for matching of the total intensities to the
// library intensities. Also maybe normalising by the max-value or the monoisotope (instead of the total sum) helps?
manhattan = OpenSwath::manhattanDist(intExp.cbegin(), intExp.cend(), intTheor.cbegin());
// compare against the spectrum with negative weight preIsotope peaks
std::vector<double> intTheorNeg;
intTheorNeg.reserve(spectrumWIsoNegPreIso.size());
// WARNING: This was spectrumWIso and therefore with 0 preIso weights in earlier versions! Was this a bug?
// Otherwise, we don't need the second spectrum at all.
DIAHelpers::extractSecond(spectrumWIsoNegPreIso, intTheorNeg);
// Sqrt does not work if we actually have negative values
//std::transform(intTheorNeg.begin(), intTheorNeg.end(), intTheorNeg.begin(), OpenSwath::mySqrt());
double intTheorNegEuclidNorm = OpenSwath::norm(intTheorNeg.cbegin(), intTheorNeg.cend()); // use Euclidean norm since we have negative values
OpenSwath::normalize(intTheorNeg, intTheorNegEuclidNorm, intTheorNeg);
// intExp is normalized already, but we can normalize again with euclidean norm to have the same norm (not sure if it makes much of a difference)
double intExpEuclidNorm = OpenSwath::norm(intExp.cbegin(), intExp.cend());
double intTheorEuclidNorm = OpenSwath::norm(intTheor.cbegin(), intTheor.cend());
OpenSwath::normalize(intExp, intExpEuclidNorm, intExp);
OpenSwath::normalize(intTheor, intTheorEuclidNorm, intTheor);
//calculate maximum possible value and maximum negative value to rescale
// depends on the amount of relative weight is negative
// TODO check if it is the same amount for every spectrum, then we could leave it out.
double negVal = (-negWeight/intTheorNegEuclidNorm) * sqrt(nrNegPeaks*lt.size());
std::vector<double> intTheorNegBest;
intTheorNegBest.resize(intTheorNeg.size());
std::transform(intTheorNeg.begin(), intTheorNeg.end(), intTheorNegBest.begin(),
[&](double val){
if (val > 0.)
{
return val * nrNegPeaks * lt.size() * negWeight/intTheorNegEuclidNorm;
}
else
{
return 0.;
}
});
double intTheorNegBestEuclidNorm = OpenSwath::norm(intTheorNegBest.cbegin(), intTheorNegBest.cend());
OpenSwath::normalize(intTheorNegBest, intTheorNegBestEuclidNorm, intTheorNegBest);
double posVal = OpenSwath::dotProd(intTheorNegBest.cbegin(), intTheorNegBest.cend(), intTheorNeg.cbegin());
dotprod = OpenSwath::dotProd(intExp.cbegin(), intExp.cend(), intTheorNeg.cbegin());
//simplified: dotprod = (((dotprod - negVal) * (1. - -1.)) / (posVal - negVal)) + -1.;
dotprod = (((dotprod - negVal) * 2.) / (posVal - negVal)) - 1.;
}
void DiaPrescore::updateMembers_()
{
dia_extract_window_ = (double) param_.getValue(
"dia_extraction_window");
nr_isotopes_ = (int) param_.getValue("nr_isotopes");
//TODO nr_charges_ is never used???
nr_charges_ = (int) param_.getValue("nr_charges");
}
void DiaPrescore::defineDefaults()
{
defaults_.setValue("dia_extraction_window", 0.1,
"DIA extraction window in Th.");
defaults_.setMinFloat("dia_extraction_window", 0.0); //done
defaults_.setValue("nr_isotopes", 4, "nr of istopes");
defaults_.setValue("nr_charges", 4, "nr charges");
defaultsToParam_();
}
DiaPrescore::DiaPrescore(double dia_extract_window, int nr_isotopes, int nr_charges) :
DefaultParamHandler("DIAPrescore"),
dia_extract_window_(dia_extract_window),
nr_isotopes_(nr_isotopes),
nr_charges_(nr_charges)
{
}
DiaPrescore::DiaPrescore() :
DefaultParamHandler("DIAPrescore")
{
defineDefaults();
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/SwathMapMassCorrection.cpp | .cpp | 33,860 | 808 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest$
// $Authors: Hannes Roest$
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/SwathMapMassCorrection.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/ML/REGRESSION/LinearRegression.h>
#include <OpenMS/ML/REGRESSION/QuadraticRegression.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <OpenMS/ANALYSIS/OPENSWATH/DATAACCESS/SpectrumAccessQuadMZTransforming.h>
#include <OpenMS/OPENSWATHALGO/DATAACCESS/SpectrumHelpers.h> // integrateWindow
#include <OpenMS/ANALYSIS/OPENSWATH/DIAHelper.h>
#include <fstream>
#include <algorithm>
#define SWATHMAPMASSCORRECTION_DEBUG
namespace OpenMS
{
void findBestFeature(const OpenMS::MRMFeatureFinderScoring::MRMTransitionGroupType& transition_group, double& bestRT)
{
// Find the feature with the highest score
bestRT = -1;
double highest_score = -1000;
for (const auto& mrmfeature : transition_group.getFeatures())
{
if (mrmfeature.getOverallQuality() > highest_score)
{
bestRT = mrmfeature.getRT();
highest_score = mrmfeature.getOverallQuality();
}
}
}
std::vector<OpenSwath::SwathMap> findSwathMaps(const OpenMS::MRMFeatureFinderScoring::MRMTransitionGroupType& transition_group,
const std::vector< OpenSwath::SwathMap > & swath_maps)
{
// Get the corresponding SWATH map
std::vector<OpenSwath::SwathMap> used_maps;
for (const auto& m : swath_maps)
{
if (m.lower < transition_group.getTransitions()[0].precursor_mz &&
m.upper >= transition_group.getTransitions()[0].precursor_mz)
{
used_maps.push_back(m);
}
}
return used_maps;
}
// Computes the SwathMaps for PASEF data in which windows can have the same m/z but differ by ion mobility
// NOTE: swathMap is stored as a vector to enable compatibility with downstream function calls (as SONAR) can have multiple windows
std::vector<OpenSwath::SwathMap> SwathMapMassCorrection::findSwathMapsPasef(const OpenMS::MRMFeatureFinderScoring::MRMTransitionGroupType& transition_group,
const std::vector< OpenSwath::SwathMap > & swath_maps)
{
OPENMS_PRECONDITION(transition_group.getTransitions()[0].precursor_im != -1, "All transitions must have a valid IM value (not -1)");
// Although theoretically there can be more than one map, for this case, just use the "best" map, best map is defined as the one in which the IM is closest to the center of the window
std::vector<OpenSwath::SwathMap> used_maps;
for (const auto& m : swath_maps)
{
// If precursor m/z and IM in Swath window
if (m.lower < transition_group.getTransitions()[0].precursor_mz &&
m.upper >= transition_group.getTransitions()[0].precursor_mz &&
m.imLower < transition_group.getTransitions()[0].precursor_im &&
m.imUpper >= transition_group.getTransitions()[0].precursor_im)
{
// if no other windows at this position just add it
if (used_maps.size() == 0)
{
used_maps.push_back(m);
}
else //there is another window at this position, check if the new window found is better
{
double imCenterDiffOld = std::fabs(((used_maps[0].imLower + used_maps[0].imUpper) / 2) - transition_group.getTransitions()[0].precursor_im);
double imCenterDiffNew = std::fabs(((m.imLower + m.imUpper) / 2) - transition_group.getTransitions()[0].precursor_im);
if (imCenterDiffOld > imCenterDiffNew)
{
used_maps[0] = m;
}
}
}
}
return used_maps;
}
SwathMapMassCorrection::SwathMapMassCorrection() :
DefaultParamHandler("SwathMapMassCorrection")
{
defaults_.setValue("mz_extraction_window", -1.0, "M/z extraction window width");
defaults_.setValue("mz_extraction_window_ppm", "false", "Whether m/z extraction is in ppm", {"advanced"});
defaults_.setValidStrings("mz_extraction_window_ppm", {"true","false"});
defaults_.setValue("ms1_im_calibration", "false", "Whether to use MS1 precursor data for the ion mobility calibration (default = false, uses MS2 / fragment ions for calibration)", {"advanced"});
defaults_.setValidStrings("ms1_im_calibration", {"true","false"});
defaults_.setValue("im_extraction_window", -1.0, "Ion mobility extraction window width");
defaults_.setValue("mz_estimation_padding_factor", 1.3, "A padding factor to multiply the estimated m/z window by. For example, a factor of 1.3 will add a 30% padding to the estimated m/z window, so if the estimated m/z window is 18, then 5.4 will be added for a total estimated m/z window of 23.4. A factor of 1.0 will not add any padding to the estimated window.");
defaults_.setMinFloat("mz_estimation_padding_factor", 1.0);
defaults_.setValue("im_estimation_padding_factor", 1.3, "A padding factor to multiply the estimated ion_mobility window by. For example, a factor of 1.3 will add a 30% padding to the estimated ion_mobility window, so if the estimated ion_mobility window is 0.03, then 0.009 will be added for a total estimated ion_mobility window of 0.039. A factor of 1.0 will not add any padding to the estimated window.");
defaults_.setMinFloat("im_estimation_padding_factor", 1.0);
defaults_.setValue("mz_correction_function", "none", "Type of normalization function for m/z calibration.");
defaults_.setValidStrings("mz_correction_function", {"none","regression_delta_ppm","unweighted_regression","weighted_regression","quadratic_regression","weighted_quadratic_regression","weighted_quadratic_regression_delta_ppm","quadratic_regression_delta_ppm"});
defaults_.setValue("im_correction_function", "linear", "Type of normalization function for IM calibration.");
defaults_.setValidStrings("im_correction_function", {"none","linear"});
defaults_.setValue("debug_im_file", "", "Debug file for Ion Mobility calibration.");
defaults_.setValue("debug_mz_file", "", "Debug file for m/z calibration.");
// write defaults into Param object param_
defaultsToParam_();
}
void SwathMapMassCorrection::updateMembers_()
{
mz_extraction_window_ = (double)param_.getValue("mz_extraction_window");
mz_extraction_window_ppm_ = param_.getValue("mz_extraction_window_ppm") == "true";
ms1_im_ = param_.getValue("ms1_im_calibration") == "true";
im_extraction_window_ = (double)param_.getValue("im_extraction_window");
mz_estimation_padding_factor_ = (double)param_.getValue("mz_estimation_padding_factor");
im_estimation_padding_factor_ = (double)param_.getValue("im_estimation_padding_factor");
mz_correction_function_ = param_.getValue("mz_correction_function").toString();
im_correction_function_ = param_.getValue("im_correction_function").toString();
debug_mz_file_ = param_.getValue("debug_mz_file").toString();
debug_im_file_ = param_.getValue("debug_im_file").toString();
}
void SwathMapMassCorrection::correctIM(
const std::map<String, OpenMS::MRMFeatureFinderScoring::MRMTransitionGroupType *> & transition_group_map,
const OpenSwath::LightTargetedExperiment& targeted_exp,
const std::vector< OpenSwath::SwathMap > & swath_maps,
const bool pasef,
TransformationDescription& im_trafo)
{
bool ppm = mz_extraction_window_ppm_;
double mz_extr_window = mz_extraction_window_;
double im_extraction_win = im_extraction_window_;
double im_estimation_padding_factor = im_estimation_padding_factor_;
OPENMS_LOG_DEBUG << "SwathMapMassCorrection::correctIM " << " window " << im_extraction_win << " mz window " << mz_extr_window << " in ppm " << ppm << '\n';
if (im_extraction_win < 0)
{
return;
}
if (im_correction_function_ == "none")
{
return;
}
// if it is not none, then it must be linear
std::ofstream os_im;
if (!debug_im_file_.empty())
{
std::cout.precision(16);
os_im.open(debug_im_file_);
os_im << "mz" << "\t" << "im" << "\t" << "theo_im" << "\t" << "RT" << "\t" << "intensity\n";
os_im.precision(writtenDigits(double()));
}
std::vector<String> trgr_ids;
std::map<std::string, double> pep_im_map;
trgr_ids.reserve(transition_group_map.size());
for (const auto& trgroup_it : transition_group_map)
{
trgr_ids.push_back(trgroup_it.first);
}
for (const auto& cmp : targeted_exp.getCompounds())
{
pep_im_map[cmp.id] = cmp.drift_time;
}
TransformationDescription::DataPoints data_im;
std::vector<double> exp_im;
std::vector<double> theo_im;
// Collect MS1 IM pairs across all transition groups
std::vector<double> exp_im_ms1_all, theo_im_ms1_all;
#pragma omp parallel for
for (SignedSize k = 0; k < (SignedSize)trgr_ids.size(); k++)
{
// we need at least one feature to find the best one
auto transition_group = transition_group_map.at(trgr_ids[k]);
if (transition_group->getFeatures().empty()) continue;
// Find the feature with the highest score
double bestRT;
findBestFeature(*transition_group, bestRT);
// Get the corresponding SWATH map(s), for SONAR there will be more than one map
std::vector<OpenSwath::SwathMap> used_maps;
if (!pasef)
{
used_maps = findSwathMaps(*transition_group, swath_maps);
}
// If pasef then have to check for overlap across IM
else
{
used_maps = findSwathMapsPasef(*transition_group, swath_maps);
}
// We will collect MS1 (exp, theo) points regardless of whether ms1_im_calibration is used for fitting
std::vector<double> exp_im_ms1_local, theo_im_ms1_local;
std::vector<OpenSwath::SwathMap> ms1_maps;
for (const auto& m : swath_maps) {if (m.ms1) ms1_maps.push_back(m);}
if (used_maps.empty())
{
continue;
}
// Get the spectrum for this RT and extract raw data points for all the
// calibrating transitions (fragment m/z values) from the spectrum
// Note that we are not using light clones of the underlying data here,
// so access to the data needs to be in a critical section.
OpenSwath::SpectrumPtr sp_ms1;
OpenSwath::SpectrumPtr sp_ms2;
#pragma omp critical (fetch_spectrum)
{
RangeMobility im_range;
if (ms1_im_)
{
std::vector<OpenSwath::SpectrumPtr> fetchSpectrumArr = OpenSwathScoring().fetchSpectrumSwath(ms1_maps, bestRT, 1, im_range);
sp_ms1 = (!fetchSpectrumArr.empty()) ? fetchSpectrumArr[0] : *new(OpenSwath::SpectrumPtr);
}
else
{
std::vector<OpenSwath::SpectrumPtr> fetchSpectrumArr = OpenSwathScoring().fetchSpectrumSwath(used_maps, bestRT, 1, im_range);
sp_ms2 = (!fetchSpectrumArr.empty()) ? fetchSpectrumArr[0] : *new(OpenSwath::SpectrumPtr);
}
}
for (const auto& tr : transition_group->getTransitions())
{
if (ms1_im_) {continue;}
double intensity(0), im(0), mz(0);
RangeMZ mz_range = DIAHelpers::createMZRangePPM(tr.product_mz, mz_extr_window, ppm);
// get drift time upper/lower offset (this assumes that all chromatograms
// are derived from the same precursor with the same drift time)
auto pepref = tr.getPeptideRef();
double drift_target = pep_im_map[pepref];
RangeMobility im_range;
if (im_extraction_win != -1 ) // im_extraction_win is set
{
im_range.setMax(drift_target);
im_range.minSpanIfSingular(im_extraction_win);
}
// Check that the spectrum really has a drift time array
if (sp_ms2->getDriftTimeArray() == nullptr)
{
OPENMS_LOG_DEBUG << "Did not find a drift time array for peptide " << pepref << " at RT " << bestRT << '\n';
for (const auto& m : used_maps)
{
OPENMS_LOG_DEBUG << " -- Used maps " << m.lower << " to " << m.upper << " MS1 : " << m.ms1 << true << '\n';
}
continue;
}
DIAHelpers::integrateWindow(sp_ms2, mz, im, intensity, mz_range, im_range);
// skip empty windows
if (im <= 0)
{
continue;
}
#pragma omp critical (accum_points)
{
// store result drift time
data_im.push_back(std::make_pair(im, drift_target));
exp_im.push_back(im);
theo_im.push_back(drift_target);
if (!debug_im_file_.empty())
{
os_im << tr.precursor_mz << "\t" << im << "\t" << drift_target << "\t" << bestRT << "\t" << intensity << '\n';
}
}
OPENMS_LOG_DEBUG << tr.precursor_mz << "\t" << im << "\t" << drift_target << "\t" << bestRT << "\t" << intensity << '\n';
}
// Always collect a few MS1 IM points for window estimation (independent of ms1_im_)
// However, if there are no MS1 Maps, then we have to skip window estimation for MS1
if (!transition_group->getTransitions().empty() && !ms1_maps.empty())
{
const auto& tr0 = transition_group->getTransitions()[0];
const auto pepref0 = tr0.getPeptideRef();
const double drift_target0 = pep_im_map[pepref0];
// Define an IM window centered on the theoretical drift time of this peptide
RangeMobility im_range_ms1(drift_target0);
if (im_extraction_win != -1) im_range_ms1.minSpanIfSingular(im_extraction_win);
// Fetch the MS1 spectrum at bestRT (protect raw access in critical section)
OpenSwath::SpectrumPtr sp_ms1_collect;
#pragma omp critical (fetch_spectrum)
{
std::vector<OpenSwath::SpectrumPtr> arr_ms1 =
OpenSwathScoring().fetchSpectrumSwath(ms1_maps, bestRT, 1, im_range_ms1);
sp_ms1_collect = (!arr_ms1.empty()) ? arr_ms1[0] : OpenSwath::SpectrumPtr();
}
if (sp_ms1_collect && sp_ms1_collect->getDriftTimeArray() != nullptr)
{
double mz = 0.0, im = 0.0, intensity = 0.0;
// m/z range isn’t critical for IM; use a narrow window around precursor m/z
RangeMZ dummy = DIAHelpers::createMZRangePPM(tr0.precursor_mz, mz_extr_window, ppm);
DIAHelpers::integrateWindow(sp_ms1_collect, mz, im, intensity, dummy, im_range_ms1);
if (im > 0.0)
{
exp_im_ms1_local.push_back(im);
theo_im_ms1_local.push_back(drift_target0);
}
}
}
// Do MS1 extraction
if (!transition_group->getTransitions().empty() && ms1_im_)
{
const auto& tr = transition_group->getTransitions()[0];
double intensity(0), im(0), mz(0);
RangeMZ mz_range = DIAHelpers::createMZRangePPM(tr.precursor_mz, mz_extr_window, ppm);
// get drift time upper/lower offset (this assumes that all chromatograms
// are derived from the same precursor with the same drift time)
auto pepref = tr.getPeptideRef();
double drift_target = pep_im_map[pepref];
// do not need to check for IM because we are correcting IM
RangeMobility im_range(drift_target);
im_range.minSpanIfSingular(im_extraction_win);
// Check that the spectrum really has a drift time array
if (sp_ms1->getDriftTimeArray() == nullptr)
{
OPENMS_LOG_DEBUG << "Did not find a drift time array for peptide " << pepref << " at RT " << bestRT << '\n';
for (const auto& m : used_maps)
{
OPENMS_LOG_DEBUG << " -- Used maps " << m.lower << " to " << m.upper << " MS1 : " << m.ms1 << true << '\n';
}
continue;
}
DIAHelpers::integrateWindow(sp_ms1, mz, im, intensity, mz_range, im_range);
// skip empty windows
if (im <= 0)
{
continue;
}
#pragma omp critical (accum_points)
{
// store result drift time
data_im.push_back(std::make_pair(im, drift_target));
exp_im.push_back(im);
theo_im.push_back(drift_target);
if (!debug_im_file_.empty())
{
os_im << tr.precursor_mz << "\t" << im << "\t" << drift_target << "\t" << bestRT << "\t" << intensity << '\n';
}
}
OPENMS_LOG_DEBUG << tr.precursor_mz << "\t" << im << "\t" << drift_target << "\t" << bestRT << "\t" << intensity << '\n';
}
#pragma omp critical (accum_points)
{
exp_im_ms1_all.insert(exp_im_ms1_all.end(),
exp_im_ms1_local.begin(), exp_im_ms1_local.end());
theo_im_ms1_all.insert(theo_im_ms1_all.end(),
theo_im_ms1_local.begin(), theo_im_ms1_local.end());
}
}
if (!debug_im_file_.empty()) {os_im.close();}
if (exp_im.empty())
{
throw Exception::UnableToFit(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "UnableToFit-LinearRegression", String("Could not fit a linear model to the data (0 points)."));
}
// linear correction is default (none returns in the beginning of the function)
std::vector<double> im_regression_params;
double confidence_interval_P(0.0);
Math::LinearRegression lr;
lr.computeRegression(confidence_interval_P, exp_im.begin(), exp_im.end(), theo_im.begin()); // to convert exp_im -> theoretical im
im_regression_params.push_back(lr.getIntercept());
im_regression_params.push_back(lr.getSlope());
im_regression_params.push_back(0.0);
OPENMS_LOG_DEBUG << "# im regression parameters: Y = " << im_regression_params[0] << " + " <<
im_regression_params[1] << " X + " << im_regression_params[2] << " X^2\n";
// store IM transformation, using the selected model
im_trafo.setDataPoints(data_im);
Param model_params;
model_params.setValue("symmetric_regression", "false");
String model_type = "linear";
im_trafo.fitModel(model_type, model_params);
// Estimate MS2 ion mobility window
// Use the 0.99 quantile so the window covers ~99% of residuals, ignoring rare extremes (those that are potential outliers).
double fragment_im_window = im_trafo.estimateWindow(0.99, false, true, im_estimation_padding_factor);
setFragmentImWindow(fragment_im_window);
if (!exp_im_ms1_all.empty())
{
TransformationDescription::DataPoints ms1_points;
ms1_points.reserve(exp_im_ms1_all.size());
for (Size i = 0; i < exp_im_ms1_all.size(); ++i)
{
// (x, y) = (experimental IM, theoretical IM)
ms1_points.emplace_back(exp_im_ms1_all[i], theo_im_ms1_all[i]);
}
// Copy the fitted model; don't mutate im_trafo's datapoints
TransformationDescription im_trafo_inv = im_trafo;
im_trafo_inv.setDataPoints(ms1_points);
// Use the 0.99 quantile so the window covers ~99% of residuals, ignoring rare extremes (those that are potential outliers).
const double precursor_im_window = im_trafo_inv.estimateWindow(0.99, /*invert=*/false, /*full_window=*/true, im_estimation_padding_factor);
setPrecursorImWindow(precursor_im_window);
}
OPENMS_LOG_DEBUG << "SwathMapMassCorrection::correctIM done.\n";
}
void SwathMapMassCorrection::correctMZ(
const std::map<String, OpenMS::MRMFeatureFinderScoring::MRMTransitionGroupType *> & transition_group_map,
const OpenSwath::LightTargetedExperiment& targeted_exp,
std::vector< OpenSwath::SwathMap > & swath_maps,
const bool pasef)
{
bool ppm = mz_extraction_window_ppm_;
double mz_extr_window = mz_extraction_window_;
std::string corr_type = mz_correction_function_;
double im_extraction = im_extraction_window_;
double mz_estimation_padding_factor = mz_estimation_padding_factor_;
OPENMS_LOG_DEBUG << "SwathMapMassCorrection::correctMZ with type " << corr_type << " and window " << mz_extr_window << " in ppm " << ppm << '\n';
bool is_ppm = bool(corr_type == "quadratic_regression_delta_ppm" ||
corr_type == "weighted_quadratic_regression_delta_ppm" ||
corr_type == "regression_delta_ppm");
if (corr_type == "none")
{
return;
}
std::ofstream os;
if (!debug_mz_file_.empty())
{
std::cout.precision(16);
os.open(debug_mz_file_);
os << "mz" << "\t" << "theo_mz" << "\t" << "drift_time" << "\t" << "diff_ppm" << "\t" << "log_intensity" << "\t" << "RT\n";
os.precision(writtenDigits(double()));
}
TransformationDescription::DataPoints data_all;
std::vector<double> weights;
std::vector<double> exp_mz;
std::vector<double> theo_mz;
std::vector<double> delta_ppm;
std::map<std::string, double> pep_im_map;
for (const auto& cmp : targeted_exp.getCompounds())
{
pep_im_map[cmp.id] = cmp.drift_time;
}
// Collect MS1 residuals (Δppm) for precursor m/z window estimation
std::vector<double> delta_ppm_ms1;
// Gather MS1 maps once
std::vector<OpenSwath::SwathMap> ms1_maps;
for (const auto& m : swath_maps) if (m.ms1) ms1_maps.push_back(m);
for (auto & trgroup_it : transition_group_map)
{
// we need at least one feature to find the best one
auto transition_group = trgroup_it.second;
const auto& tr = transition_group->getTransitions()[0];
auto pepref = tr.getPeptideRef();
double drift_target = pep_im_map[pepref];
if (transition_group->getFeatures().empty()) continue;
// Find the feature with the highest score
double bestRT;
findBestFeature(*transition_group, bestRT);
// Get the corresponding SWATH map(s), for SONAR there will be more than one map
std::vector<OpenSwath::SwathMap> used_maps;
if (!pasef)
{
used_maps = findSwathMaps(*transition_group, swath_maps);
}
// If pasef then have to check for overlap across IM
else
{
used_maps = findSwathMapsPasef(*transition_group, swath_maps);
}
if (used_maps.empty())
{
continue;
}
// if ion mobility extraction window is set than extract with ion mobility
RangeMobility im_range;
if (im_extraction != -1) // ion mobility extraction is set
{
im_range.setMax(drift_target);
im_range.minSpanIfSingular(im_extraction);
}
// MS2
// Get the spectrum for this RT and extract raw data points for all the
// calibrating transitions (fragment m/z values) from the spectrum
std::vector<OpenSwath::SpectrumPtr> spArr = OpenSwathScoring().fetchSpectrumSwath(used_maps, bestRT, 1, im_range);
OpenSwath::SpectrumPtr sp = (!spArr.empty()) ? spArr[0] : *new(OpenSwath::SpectrumPtr);
for (const auto& tr : transition_group->getTransitions())
{
double mz, intensity, im;
RangeMZ mz_range = DIAHelpers::createMZRangePPM(tr.product_mz, mz_extr_window, ppm);
bool centroided = false;
// integrate spectrum at the position of the theoretical mass
DIAHelpers::integrateWindow(sp, mz, im, intensity, mz_range, im_range, centroided); // Correct using the irt_im
// skip empty windows
if (mz == -1)
{
continue;
}
// store result masses
data_all.push_back(std::make_pair(mz, tr.product_mz));
// regression weight is the log2 intensity
weights.push_back( log(intensity) / log(2.0) );
exp_mz.push_back( mz );
// y = target = theoretical
theo_mz.push_back( tr.product_mz );
double diff_ppm = (mz - tr.product_mz) * 1000000 / mz;
// y = target = delta-ppm
delta_ppm.push_back(diff_ppm);
if (!debug_mz_file_.empty())
{
os << mz << "\t" << tr.product_mz << "\t" << drift_target << "\t" << diff_ppm << "\t" << log(intensity) / log(2.0) << "\t" << bestRT << '\n';
}
OPENMS_LOG_DEBUG << mz << "\t" << tr.product_mz << "\t" << diff_ppm << "\t" << log(intensity) / log(2.0) << "\t" << bestRT << '\n';
}
// MS1 precursor processing for Δppm residuals
if (!ms1_maps.empty())
{
std::vector<OpenSwath::SpectrumPtr> spArr_ms1 =
OpenSwathScoring().fetchSpectrumSwath(ms1_maps, bestRT, 1, im_range);
OpenSwath::SpectrumPtr sp_ms1 = (!spArr_ms1.empty()) ? spArr_ms1[0] : OpenSwath::SpectrumPtr();
if (sp_ms1)
{
// Use theoretical precursor m/z for the calibrant peptide
const double theo_prec_mz = tr.precursor_mz;
RangeMZ mz_range_ms1 = DIAHelpers::createMZRangePPM(theo_prec_mz, mz_extr_window, ppm);
double mz{}, im{}, intensity{};
bool centroided = false;
DIAHelpers::integrateWindow(sp_ms1, mz, im, intensity, mz_range_ms1, im_range, centroided);
if (mz != -1) // got a signal
{
const double dppm = (mz - theo_prec_mz) / theo_prec_mz * 1e6;
delta_ppm_ms1.push_back(std::abs(dppm));
}
}
}
}
// Estimate fragment mz window
{
std::ostringstream ss;
const Size N = std::min<Size>(delta_ppm.size(), 20);
ss << "[SwathMapMassCorrection::correctMZ] MS2 residuals (first "
<< N << " of " << delta_ppm.size() << "): ";
for (Size i = 0; i < N; ++i) { if (i) ss << ", "; ss << delta_ppm[i]; }
if (delta_ppm.size() > N) ss << ", ...";
OPENMS_LOG_DEBUG << ss.str() << '\n';
}
// Use the 0.99 quantile so the window covers ~99% of residuals, ignoring rare extremes (those that are potential outliers).
double fragment_mz_window = estimateWindow(delta_ppm, 0.99, true, mz_estimation_padding_factor);
setFragmentMzWindow(fragment_mz_window);
// Estimate precursor window from MS1 residuals (full width, ppm)
if (!delta_ppm_ms1.empty())
{
std::sort(delta_ppm_ms1.begin(), delta_ppm_ms1.end());
{
std::ostringstream ss;
const Size N = std::min<Size>(delta_ppm_ms1.size(), 20);
ss << "[SwathMapMassCorrection::correctMZ] MS1 residuals (first "
<< N << " of " << delta_ppm_ms1.size() << "): ";
for (Size i = 0; i < N; ++i) { if (i) ss << ", "; ss << delta_ppm_ms1[i]; }
if (delta_ppm_ms1.size() > N) ss << ", ...";
OPENMS_LOG_DEBUG << ss.str() << '\n';
}
// Use the 0.99 quantile so the window covers ~99% of residuals, ignoring rare extremes (those that are potential outliers).
double precursor_mz_window = estimateWindow(delta_ppm_ms1, 0.99, true, mz_estimation_padding_factor);
setPrecursorMzWindow(precursor_mz_window);
}
std::vector<double> regression_params;
if (corr_type == "none" || data_all.size() < 3)
{
return;
}
else if (corr_type == "unweighted_regression")
{
double confidence_interval_P(0.0);
Math::LinearRegression lr;
lr.computeRegression(confidence_interval_P, exp_mz.begin(), exp_mz.end(), theo_mz.begin());
regression_params.push_back(lr.getIntercept());
regression_params.push_back(lr.getSlope());
regression_params.push_back(0.0);
}
else if (corr_type == "weighted_regression")
{
double confidence_interval_P(0.0);
Math::LinearRegression lr;
lr.computeRegressionWeighted(confidence_interval_P, exp_mz.begin(), exp_mz.end(), theo_mz.begin(), weights.begin());
regression_params.push_back(lr.getIntercept());
regression_params.push_back(lr.getSlope());
regression_params.push_back(0.0);
}
else if (corr_type == "quadratic_regression")
{
// Quadratic fit
Math::QuadraticRegression qr;
qr.computeRegression(exp_mz.begin(), exp_mz.end(), theo_mz.begin());
regression_params.push_back(qr.getA());
regression_params.push_back(qr.getB());
regression_params.push_back(qr.getC());
}
else if (corr_type == "weighted_quadratic_regression")
{
// Quadratic fit (weighted)
Math::QuadraticRegression qr;
qr.computeRegressionWeighted(exp_mz.begin(), exp_mz.end(), theo_mz.begin(), weights.begin());
regression_params.push_back(qr.getA());
regression_params.push_back(qr.getB());
regression_params.push_back(qr.getC());
}
else if (corr_type == "quadratic_regression_delta_ppm")
{
// Quadratic fit using ppm differences
Math::QuadraticRegression qr;
qr.computeRegression(exp_mz.begin(), exp_mz.end(), delta_ppm.begin());
regression_params.push_back(qr.getA());
regression_params.push_back(qr.getB());
regression_params.push_back(qr.getC());
}
else if (corr_type == "regression_delta_ppm")
{
// Regression fit using ppm differences
double confidence_interval_P(0.0);
Math::LinearRegression lr;
lr.computeRegression(confidence_interval_P, exp_mz.begin(), exp_mz.end(), delta_ppm.begin());
regression_params.push_back(lr.getIntercept());
regression_params.push_back(lr.getSlope());
regression_params.push_back(0.0);
}
else if (corr_type == "weighted_quadratic_regression_delta_ppm")
{
// Quadratic fit using ppm differences
Math::QuadraticRegression qr;
qr.computeRegressionWeighted(exp_mz.begin(), exp_mz.end(), delta_ppm.begin(), weights.begin());
regression_params.push_back(qr.getA());
regression_params.push_back(qr.getB());
regression_params.push_back(qr.getC());
}
else
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Unknown correction type " + corr_type);
}
OPENMS_LOG_DEBUG << "# mz regression parameters: Y = " << regression_params[0] << " + " <<
regression_params[1] << " X + " << regression_params[2] << " X^2\n";
if (!debug_mz_file_.empty()) {os.close();}
#ifdef SWATHMAPMASSCORRECTION_DEBUG
double s_ppm_before = 0;
double s_ppm_after = 0;
for (TransformationDescription::DataPoints::iterator d = data_all.begin(); d != data_all.end(); ++d)
{
double ppm_before = (d->first - d->second) * 1000000 / d->first;
double predict = d->first*d->first*regression_params[2] + d->first*regression_params[1]+regression_params[0];
double ppm_after = ( predict - d->second) * 1000000 / d->first;
if (is_ppm)
{
double new_mz = d->first - predict*d->first/1000000;
ppm_after = ( new_mz - d->second) * 1000000 / d->first;
}
s_ppm_before += std::fabs(ppm_before);
s_ppm_after += std::fabs(ppm_after);
}
OPENMS_LOG_DEBUG << "sum residual sq ppm before " << s_ppm_before << " / after " << s_ppm_after << '\n';
#endif
// Replace the swath files with a transforming wrapper.
for (SignedSize i = 0; i < boost::numeric_cast<SignedSize>(swath_maps.size()); ++i)
{
swath_maps[i].sptr = std::shared_ptr<OpenSwath::ISpectrumAccess>(
new SpectrumAccessQuadMZTransforming(swath_maps[i].sptr,
regression_params[0], regression_params[1], regression_params[2], is_ppm));
}
OPENMS_LOG_DEBUG << "SwathMapMassCorrection::correctMZ done.\n";
}
double SwathMapMassCorrection::estimateWindow(std::vector<double> residuals, double quantile, bool full_width, double padding_factor)
{
if (residuals.empty()) return 0.0;
// Ensure residuals are absolute errors
for (auto& d : residuals) d = std::abs(d);
// Adaptive half-width (Tukey k=1.5, blend from robust→raw as tail density grows 1%→10%)
// k=1.5 uses the standard Tukey upper fence (Q3 + 1.5·IQR) to cap sparse extremes proposed in Exploratory Data Analysis by John W. Tukey (1977)
// r_sparse=0.01 means if ≤1% of |residuals| exceed the fence, treat them as outliers (favor robust quantile);
// r_dense=0.10 means if ≥10% exceed the fence, tails are genuinely broad (favor raw quantile).
// These values are conservative, widely used in stats.
OpenMS::Math::AdaptiveQuantileResult adaptive_quantile_res = OpenMS::Math::adaptiveQuantile(
residuals.begin(), residuals.end(),
quantile);
const double full = (full_width ? (2.0 * adaptive_quantile_res.blended) : adaptive_quantile_res.blended) * padding_factor;
OPENMS_LOG_DEBUG
<< "[estimateWindow] n=" << residuals.size()
<< " q=" << quantile
<< " half_raw=" << adaptive_quantile_res.half_raw
<< " half_rob=" << adaptive_quantile_res.half_rob
<< " UF=" << adaptive_quantile_res.upper_fence
<< " tail_frac=" << adaptive_quantile_res.tail_fraction
<< " => half_adapt=" << adaptive_quantile_res.blended
<< " full=" << full
<< '\n';
return full;
}
double SwathMapMassCorrection::getFragmentMzWindow() const
{
return fragment_mz_window_;
}
void SwathMapMassCorrection::setFragmentMzWindow(double fragmentMzWindow)
{
fragment_mz_window_ = fragmentMzWindow;
}
double SwathMapMassCorrection::getFragmentImWindow() const
{
return fragment_im_window_;
}
void SwathMapMassCorrection::setFragmentImWindow(double fragmentImWindow)
{
fragment_im_window_ = fragmentImWindow;
}
double SwathMapMassCorrection::getPrecursorMzWindow() const
{
return precursor_mz_window_;
}
void SwathMapMassCorrection::setPrecursorMzWindow(double precursorMzWindow)
{
precursor_mz_window_ = precursorMzWindow;
}
double SwathMapMassCorrection::getPrecursorImWindow() const
{
return precursor_im_window_;
}
void SwathMapMassCorrection::setPrecursorImWindow(double precursorImWindow)
{
precursor_im_window_ = precursorImWindow;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/MRMBatchFeatureSelector.cpp | .cpp | 1,804 | 51 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Douglas McCloskey, Pasquale Domenico Colaianni $
// $Authors: Douglas McCloskey, Pasquale Domenico Colaianni $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/MRMBatchFeatureSelector.h>
#include <OpenMS/ANALYSIS/OPENSWATH/MRMFeatureSelector.h>
#include <OpenMS/KERNEL/FeatureMap.h>
namespace OpenMS
{
void MRMBatchFeatureSelector::batchMRMFeatures(
const MRMFeatureSelector& feature_selector,
const FeatureMap& features,
FeatureMap& selected_features,
const std::vector<MRMFeatureSelector::SelectorParameters>& parameters
)
{
FeatureMap input_features = features;
selected_features.clear();
for (const MRMFeatureSelector::SelectorParameters& params : parameters)
{
feature_selector.selectMRMFeature(input_features, selected_features, params);
input_features = selected_features;
}
}
void MRMBatchFeatureSelector::batchMRMFeaturesQMIP(
const FeatureMap& features,
FeatureMap& selected_features,
const std::vector<MRMFeatureSelector::SelectorParameters>& parameters
)
{
MRMFeatureSelectorQMIP feature_selector;
batchMRMFeatures(feature_selector, features, selected_features, parameters);
}
void MRMBatchFeatureSelector::batchMRMFeaturesScore(
const FeatureMap& features,
FeatureMap& selected_features,
const std::vector<MRMFeatureSelector::SelectorParameters>& parameters
)
{
MRMFeatureSelectorScore feature_selector;
batchMRMFeatures(feature_selector, features, selected_features, parameters);
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/DIAScoring.cpp | .cpp | 19,349 | 437 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest, Witold Wolski $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/DIAScoring.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/CoarseIsotopePatternGenerator.h>
#include <OpenMS/FEATUREFINDER/FeatureFinderAlgorithmPickedHelperStructs.h>
#include <OpenMS/OPENSWATHALGO/ALGO/StatsHelpers.h>
#include <OpenMS/OPENSWATHALGO/DATAACCESS/SpectrumHelpers.h> // integrateWindow
#include <OpenMS/ANALYSIS/OPENSWATH/DIAHelper.h>
#include <OpenMS/ANALYSIS/OPENSWATH/DIAPrescoring.h>
#include <OpenMS/CHEMISTRY/TheoreticalSpectrumGenerator.h>
#include <OpenMS/MATH/MathFunctions.h> // getPPM
#include <numeric>
#include <algorithm>
#include <functional>
#include <cmath> // for isnan
#include <utility>
const double C13C12_MASSDIFF_U = 1.0033548;
namespace OpenMS
{
DIAScoring::DIAScoring() :
DefaultParamHandler("DIAScoring")
{
defaults_.setValue("dia_extraction_window", 0.05, "DIA extraction window in Th or ppm.");
defaults_.setMinFloat("dia_extraction_window", 0.0);
defaults_.setValue("dia_extraction_unit", "Th", "DIA extraction window unit");
defaults_.setValidStrings("dia_extraction_unit", {"Th","ppm"});
defaults_.setValue("dia_centroided", "false", "Use centroided DIA data.");
defaults_.setValidStrings("dia_centroided", {"true","false"});
defaults_.setValue("dia_byseries_intensity_min", 300.0, "DIA b/y series minimum intensity to consider.");
defaults_.setMinFloat("dia_byseries_intensity_min", 0.0);
defaults_.setValue("dia_byseries_ppm_diff", 10.0, "DIA b/y series minimal difference in ppm to consider.");
defaults_.setMinFloat("dia_byseries_ppm_diff", 0.0);
defaults_.setValue("dia_nr_isotopes", 4, "DIA number of isotopes to consider.");
defaults_.setMinInt("dia_nr_isotopes", 0);
defaults_.setValue("dia_nr_charges", 4, "DIA number of charges to consider.");
defaults_.setMinInt("dia_nr_charges", 0);
defaults_.setValue("peak_before_mono_max_ppm_diff", 20.0, "DIA maximal difference in ppm to count a peak at lower m/z when searching for evidence that a peak might not be monoisotopic.");
defaults_.setMinFloat("peak_before_mono_max_ppm_diff", 0.0);
// write defaults into Param object param_
defaultsToParam_();
// for void getBYSeries
{
generator = new TheoreticalSpectrumGenerator();
Param p;
p.setValue("add_metainfo", "true",
"Adds the type of peaks as metainfo to the peaks, like y8+, [M-H2O+2H]++");
generator->setParameters(p);
}
// for simulateSpectrumFromAASequence
// Param p;
// p.setValue("add_metainfo", "false",
// "Adds the type of peaks as metainfo to the peaks, like y8+, [M-H2O+2H]++");
// p.setValue("add_precursor_peaks", "true", "Adds peaks of the precursor to the spectrum, which happen to occur sometimes");
// generator->setParameters(p);
}
DIAScoring::~DIAScoring()
{
delete generator;
}
void DIAScoring::updateMembers_()
{
dia_extract_window_ = (double)param_.getValue("dia_extraction_window");
dia_extraction_ppm_ = param_.getValue("dia_extraction_unit") == "ppm";
dia_centroided_ = param_.getValue("dia_centroided").toBool();
dia_byseries_intensity_min_ = (double)param_.getValue("dia_byseries_intensity_min");
dia_byseries_ppm_diff_ = (double)param_.getValue("dia_byseries_ppm_diff");
dia_nr_isotopes_ = (int)param_.getValue("dia_nr_isotopes");
dia_nr_charges_ = (int)param_.getValue("dia_nr_charges");
peak_before_mono_max_ppm_diff_ = (double)param_.getValue("peak_before_mono_max_ppm_diff");
}
///////////////////////////////////////////////////////////////////////////
// DIA / SWATH scoring
void DIAScoring::dia_isotope_scores(const std::vector<TransitionType>& transitions, std::vector<SpectrumPtrType>& spectrum,
OpenSwath::IMRMFeature* mrmfeature, const RangeMobility& im_range, double& isotope_corr, double& isotope_overlap) const
{
isotope_corr = 0;
isotope_overlap = 0;
// first compute a map of relative intensities from the feature, then compute the score
std::map<std::string, double> intensities;
getFirstIsotopeRelativeIntensities_(transitions, mrmfeature, intensities);
diaIsotopeScoresSub_(transitions, spectrum, intensities, im_range, isotope_corr, isotope_overlap);
}
void DIAScoring::dia_massdiff_score(const std::vector<TransitionType>& transitions,
const SpectrumSequence& spectrum,
const std::vector<double>& normalized_library_intensity,
const RangeMobility& im_range,
double& ppm_score,
double& ppm_score_weighted,
std::vector<double>& diff_ppm) const
{
// Calculate the difference of the theoretical mass and the actually measured mass
ppm_score = 0;
ppm_score_weighted = 0;
diff_ppm.clear();
for (std::size_t k = 0; k < transitions.size(); k++)
{
const TransitionType& transition = transitions[k];
RangeMZ mz_range = DIAHelpers::createMZRangePPM(transition.getProductMZ(), dia_extract_window_, dia_extraction_ppm_);
double mz, intensity, im;
bool signalFound = DIAHelpers::integrateWindow(spectrum, mz, im, intensity, mz_range, im_range, dia_centroided_);
// Continue if no signal was found - we therefore don't make a statement
// about the mass difference if no signal is present.
if (!signalFound)
{
diff_ppm.push_back(-1); // if no signal is found than we set the ppm to -1
continue;
}
double ppm = Math::getPPM(mz, transition.getProductMZ());
diff_ppm.push_back(ppm);
ppm_score += std::fabs(ppm);
ppm_score_weighted += std::fabs(ppm) * normalized_library_intensity[k];
#ifdef MRMSCORING_TESTING
std::cout << " weighted int of the peak is " << mz << " diff is in ppm " << diff_ppm << " thus append " << diff_ppm * diff_ppm << " or weighted " << diff_ppm * normalized_library_intensity[k] << std::endl;
#endif
}
// FEATURE we should not punish so much when one transition is missing!
ppm_score /= transitions.size();
}
bool DIAScoring::dia_ms1_massdiff_score(double precursor_mz, const SpectrumSequence& spectrum,
const RangeMobility& im_range, double& ppm_score) const
{
ppm_score = -1;
double mz, intensity, im;
{
// Calculate the difference of the theoretical mass and the actually measured mass
RangeMZ mz_range = DIAHelpers::createMZRangePPM(precursor_mz, dia_extract_window_, dia_extraction_ppm_);
bool signalFound = DIAHelpers::integrateWindow(spectrum, mz, im, intensity, mz_range, im_range, dia_centroided_);
// Catch if no signal was found and replace it with the most extreme
// value. Otherwise, calculate the difference in ppm.
if (!signalFound)
{
ppm_score = Math::getPPMAbs(precursor_mz + mz_range.getSpan(), precursor_mz);
return false;
}
else
{
ppm_score = Math::getPPMAbs(mz, precursor_mz);
return true;
}
}
}
/// Precursor isotope scores
void DIAScoring::dia_ms1_isotope_scores(double precursor_mz, const std::vector<SpectrumPtrType>& spectrum,
RangeMobility& im_range, double& isotope_corr, double& isotope_overlap, const EmpiricalFormula& sum_formula) const
{
// although precursor_mz can be received from the empirical formula (if non-empty), the actual precursor could be
// slightly different. And also for compounds, usually the neutral sum_formula without adducts is given.
// Therefore calculate the isotopes based on the formula but place them at precursor_mz
std::vector<double> isotopes_int;
getIsotopeIntysFromExpSpec_(precursor_mz, spectrum, sum_formula.getCharge(), im_range, isotopes_int);
double max_ratio = 0;
int nr_occurrences = 0;
// calculate the scores:
// isotope correlation (forward) and the isotope overlap (backward) scores
isotope_corr = scoreIsotopePattern_(isotopes_int, sum_formula);
largePeaksBeforeFirstIsotope_(spectrum, precursor_mz, isotopes_int[0], nr_occurrences, max_ratio, im_range);
isotope_overlap = max_ratio;
}
void DIAScoring::getIsotopeIntysFromExpSpec_(double precursor_mz, const SpectrumSequence& spectrum, int charge_state, const RangeMobility& im_range,
std::vector<double>& isotopes_int) const
{
double abs_charge = std::fabs(static_cast<double>(charge_state));
for (int iso = 0; iso <= dia_nr_isotopes_; ++iso)
{
RangeMZ mz_range = DIAHelpers::createMZRangePPM(precursor_mz + iso * C13C12_MASSDIFF_U / abs_charge, dia_extract_window_, dia_extraction_ppm_);
double mz, intensity, im;
DIAHelpers::integrateWindow(spectrum, mz, im, intensity, mz_range, im_range, dia_centroided_);
isotopes_int.push_back(intensity);
}
}
void DIAScoring::dia_ms1_isotope_scores_averagine(double precursor_mz, const SpectrumSequence& spectrum, int charge_state, RangeMobility& im_range,
double& isotope_corr, double& isotope_overlap) const
{
std::vector<double> exp_isotopes_int;
getIsotopeIntysFromExpSpec_(precursor_mz, spectrum, charge_state, im_range, exp_isotopes_int);
CoarseIsotopePatternGenerator solver(dia_nr_isotopes_ + 1);
// NOTE: this is a rough estimate of the neutral mz value since we would not know the charge carrier for negative ions
IsotopeDistribution isotope_dist = solver.estimateFromPeptideWeight(std::fabs(precursor_mz * charge_state));
double max_ratio;
int nr_occurrences;
// calculate the scores:
// isotope correlation (forward) and the isotope overlap (backward) scores
isotope_corr = scoreIsotopePattern_(exp_isotopes_int, isotope_dist);
largePeaksBeforeFirstIsotope_(spectrum, precursor_mz, exp_isotopes_int[0], nr_occurrences, max_ratio, im_range);
isotope_overlap = max_ratio;
}
void DIAScoring::dia_by_ion_score(const SpectrumSequence& spectrum,
AASequence& sequence, int charge, const RangeMobility& im_range, double& bseries_score,
double& yseries_score) const
{
bseries_score = 0;
yseries_score = 0;
OPENMS_PRECONDITION(charge > 0, "Charge is a positive integer"); // for peptides, charge should be positive
double mz, intensity, im;
std::vector<double> yseries, bseries;
OpenMS::DIAHelpers::getBYSeries(sequence, bseries, yseries, generator, charge);
for (const auto& b_ion_mz : bseries)
{
RangeMZ mz_range = DIAHelpers::createMZRangePPM(b_ion_mz, dia_extract_window_, dia_extraction_ppm_);
bool signalFound = DIAHelpers::integrateWindow(spectrum, mz, im, intensity, mz_range, im_range, dia_centroided_);
double ppmdiff = Math::getPPMAbs(mz, b_ion_mz);
if (signalFound && ppmdiff < dia_byseries_ppm_diff_ && intensity > dia_byseries_intensity_min_)
{
bseries_score++;
}
}
for (const auto& y_ion_mz : yseries)
{
RangeMZ mz_range = DIAHelpers::createMZRangePPM(y_ion_mz, dia_extract_window_, dia_extraction_ppm_);
bool signalFound = DIAHelpers::integrateWindow(spectrum, mz, im, intensity, mz_range, im_range, dia_centroided_);
double ppmdiff = Math::getPPMAbs(mz, y_ion_mz);
if (signalFound && ppmdiff < dia_byseries_ppm_diff_ && intensity > dia_byseries_intensity_min_)
{
yseries_score++;
}
}
}
void DIAScoring::score_with_isotopes(SpectrumSequence& spectrum, const std::vector<TransitionType>& transitions, const RangeMobility& im_range, double& dotprod, double& manhattan) const
{
OpenMS::DiaPrescore dp(dia_extract_window_, dia_nr_isotopes_, dia_nr_charges_);
dp.score(spectrum, transitions, im_range, dotprod, manhattan);
}
///////////////////////////////////////////////////////////////////////////
// Private methods
/// computes a vector of relative intensities for each feature (output to intensities)
void DIAScoring::getFirstIsotopeRelativeIntensities_(
const std::vector<TransitionType>& transitions,
OpenSwath::IMRMFeature* mrmfeature, std::map<std::string, double>& intensities) const
{
for (Size k = 0; k < transitions.size(); k++)
{
std::string native_id = transitions[k].getNativeID();
double rel_intensity = mrmfeature->getFeature(native_id)->getIntensity() / mrmfeature->getIntensity();
intensities.insert(std::pair<std::string, double>(native_id, rel_intensity));
}
}
void DIAScoring::diaIsotopeScoresSub_(const std::vector<TransitionType>& transitions, const SpectrumSequence& spectrum,
std::map<std::string, double>& intensities, //relative intensities
const RangeMobility& im_range,
double& isotope_corr,
double& isotope_overlap) const
{
std::vector<double> isotopes_int;
double max_ratio;
int nr_occurences;
for (Size k = 0; k < transitions.size(); k++)
{
isotopes_int.clear();
const String native_id = transitions[k].getNativeID();
double rel_intensity = intensities[native_id];
// If no charge is given, we assume it to be 1
int putative_fragment_charge = 1;
if (transitions[k].fragment_charge != 0)
{
putative_fragment_charge = transitions[k].fragment_charge;
}
// collect the potential isotopes of this peak
double abs_charge = std::fabs(static_cast<double>(putative_fragment_charge));
for (int iso = 0; iso <= dia_nr_isotopes_; ++iso)
{
RangeMZ mz_range = DIAHelpers::createMZRangePPM(transitions[k].getProductMZ() + iso * C13C12_MASSDIFF_U / abs_charge, dia_extract_window_, dia_extraction_ppm_);
double mz, intensity, im;
DIAHelpers::integrateWindow(spectrum, mz, im, intensity, mz_range, im_range, dia_centroided_);
isotopes_int.push_back(intensity);
}
// calculate the scores:
// isotope correlation (forward) and the isotope overlap (backward) scores
double score = scoreIsotopePattern_(isotopes_int, transitions[k].getProductMZ(), putative_fragment_charge);
isotope_corr += score * rel_intensity;
largePeaksBeforeFirstIsotope_(spectrum, transitions[k].getProductMZ(), isotopes_int[0], nr_occurences, max_ratio, im_range);
isotope_overlap += nr_occurences * rel_intensity;
}
}
void DIAScoring::largePeaksBeforeFirstIsotope_(const SpectrumSequence& spectrum, double mono_mz, double mono_int, int& nr_occurences, double& max_ratio, const RangeMobility& im_range) const
{
double mz, intensity, im;
nr_occurences = 0;
max_ratio = 0.0;
for (int ch = 1; ch <= dia_nr_charges_; ++ch)
{
double center = mono_mz - C13C12_MASSDIFF_U / (double) ch;
RangeMZ mz_range = DIAHelpers::createMZRangePPM(center, dia_extract_window_, dia_extraction_ppm_);
bool signalFound = DIAHelpers::integrateWindow(spectrum, mz, im, intensity, mz_range, im_range, dia_centroided_);
// Continue if no signal was found - we therefore don't make a statement
// about the mass difference if no signal is present.
if (!signalFound)
{
continue;
}
// Compute ratio between the (presumed) monoisotopic peak intensity and the now found peak
double ratio;
if (mono_int != 0)
{
ratio = intensity / mono_int;
}
else
{
ratio = 0;
}
if (ratio > max_ratio) {max_ratio = ratio;}
double ddiff_ppm = std::fabs(mz - center) * 1e6 / center;
// FEATURE we should fit a theoretical distribution to see whether we really are a secondary peak
if (ratio > 1 && ddiff_ppm < peak_before_mono_max_ppm_diff_)
{
//isotope_overlap += 1.0 * rel_intensity;
nr_occurences += 1; // we count how often this happens...
#ifdef MRMSCORING_TESTING
cout << " _ overlap diff ppm " << ddiff_ppm << " and inten ratio " << ratio << " with " << mono_int << endl;
#endif
}
}
}
double DIAScoring::scoreIsotopePattern_(const std::vector<double>& isotopes_int,
double product_mz,
int putative_fragment_charge) const
{
OPENMS_PRECONDITION(putative_fragment_charge != 0, "Charge needs to be set to != 0"); // charge can be positive and negative
IsotopeDistribution isotope_dist;
// create the theoretical distribution from the peptide weight
CoarseIsotopePatternGenerator solver(dia_nr_isotopes_ + 1);
// NOTE: this is a rough estimate of the neutral mz value since we would not know the charge carrier for negative ions
isotope_dist = solver.estimateFromPeptideWeight(std::fabs(product_mz * putative_fragment_charge));
return scoreIsotopePattern_(isotopes_int, isotope_dist);
} //end of dia_isotope_corr_sub
double DIAScoring::scoreIsotopePattern_(const std::vector<double>& isotopes_int,
const EmpiricalFormula& empf) const
{
return scoreIsotopePattern_(isotopes_int,
empf.getIsotopeDistribution(CoarseIsotopePatternGenerator(dia_nr_isotopes_ + 1)));
}
double DIAScoring::scoreIsotopePattern_(const std::vector<double>& isotopes_int,
const IsotopeDistribution& isotope_dist) const
{
typedef OpenMS::FeatureFinderAlgorithmPickedHelperStructs::TheoreticalIsotopePattern TheoreticalIsotopePattern;
TheoreticalIsotopePattern isotopes;
for (IsotopeDistribution::ConstIterator it = isotope_dist.begin(); it != isotope_dist.end(); ++it)
{
isotopes.intensity.push_back(it->getIntensity());
}
isotopes.optional_begin = 0;
isotopes.optional_end = dia_nr_isotopes_;
// scale the distribution to a maximum of 1
double max = 0.0;
for (Size i = 0; i < isotopes.intensity.size(); ++i)
{
if (isotopes.intensity[i] > max)
{
max = isotopes.intensity[i];
}
}
isotopes.max = max;
if (max == 0.) max = 1.;
for (Size i = 0; i < isotopes.intensity.size(); ++i)
{
isotopes.intensity[i] /= max;
}
isotopes.trimmed_left = 0;
// score the pattern against a theoretical one
OPENMS_POSTCONDITION(isotopes_int.size() == isotopes.intensity.size(), "Vectors for pearson correlation do not have the same size.");
double int_score = OpenSwath::cor_pearson(isotopes_int.begin(), isotopes_int.end(), isotopes.intensity.begin());
if (std::isnan(int_score))
{
int_score = 0;
}
return int_score;
} //end of dia_isotope_corr_sub
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/SpectrumAddition.cpp | .cpp | 11,893 | 314 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/SpectrumAddition.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/PROCESSING/RESAMPLING/LinearResamplerAlign.h>
#include <OpenMS/ANALYSIS/OPENSWATH/DATAACCESS/DataAccessHelper.h>
#include <numeric> // std::iota
namespace OpenMS
{
void SpectrumAddition::sortSpectrumByMZ(OpenSwath::Spectrum& spec)
{
// Based off of https://stackoverflow.com/questions/1577475/c-sorting-and-keeping-track-of-indexes/43922758#43922758
//initialize
//std::vector<std::pair<double, Size> > sorted_indices;
std::vector<size_t> sorted_indices(spec.getMZArray()->data.size());
std::iota(sorted_indices.begin(), sorted_indices.end(), 0);
// sort indexes based on comparing values in v
// using std::stable_sort instead of std::sort
// to avoid unnecessary index re-orderings
// when v contains elements of equal values
// get the ordering of the indices where mz is sorted
OpenSwath::BinaryDataArrayPtr mzArr = spec.getMZArray();
std::stable_sort(sorted_indices.begin(), sorted_indices.end(),
[mzArr](size_t i1, size_t i2) {return mzArr->data[i1] < mzArr->data[i2];});
// apply sorting across all arrays
for (auto& da : spec.getDataArrays() )
{
if (da->data.empty()) continue;
for (Size i = 0; i < sorted_indices.size(); ++i)
{
auto j = sorted_indices[i];
while (j<i) j = sorted_indices[j];
std::swap(da->data[i], da->data[j]);
}
}
OPENMS_POSTCONDITION( std::adjacent_find(spec.getMZArray()->data.begin(),
spec.getMZArray()->data.end(), std::greater<double>()) == spec.getMZArray()->data.end(),
"Postcondition violated: m/z vector needs to be sorted!" )
}
OpenSwath::SpectrumPtr SpectrumAddition::addUpSpectra(const SpectrumSequence& all_spectra, double sampling_rate, bool filter_zeros)
{
if (all_spectra.size() == 1) return all_spectra[0];
if (all_spectra.empty())
{
OpenSwath::SpectrumPtr sptr(new OpenSwath::Spectrum);
return sptr;
}
// ensure first one is not empty
if (all_spectra[0]->getMZArray()->data.empty() )
{
OpenSwath::SpectrumPtr sptr(new OpenSwath::Spectrum);
return sptr;
}
// find global min and max -> use as start/endpoints for resampling
double min = all_spectra[0]->getMZArray()->data[0];
double max = all_spectra[0]->getMZArray()->data.back();
double min_spacing = max - min;
for (Size i = 0; i < all_spectra.size(); i++)
{
if (all_spectra[i]->getMZArray()->data.empty() )
{
continue;
}
// estimate sampling rate
for (Size k = 0; k < all_spectra[i]->getMZArray()->data.size() && sampling_rate < 0; k++)
{
if (k > 0)
{
if (min_spacing > all_spectra[i]->getMZArray()->data[k] - all_spectra[i]->getMZArray()->data[k-1] )
{
min_spacing = all_spectra[i]->getMZArray()->data[k] - all_spectra[i]->getMZArray()->data[k-1];
}
}
}
if (all_spectra[i]->getMZArray()->data[0] < min)
{
min = all_spectra[i]->getMZArray()->data[0];
}
if (all_spectra[i]->getMZArray()->data.back() > max)
{
max = all_spectra[i]->getMZArray()->data.back();
}
}
// in case we are asked to estimate the resampling rate
if (sampling_rate < 0) sampling_rate = min_spacing;
// generate the resampled peaks at positions origin+i*spacing_
int number_resampled_points = (max - min) / sampling_rate + 1;
OpenSwath::SpectrumPtr resampled_peak_container(new OpenSwath::Spectrum);
resampled_peak_container->getMZArray()->data.resize(number_resampled_points);
resampled_peak_container->getIntensityArray()->data.resize(number_resampled_points);
std::vector<double>::iterator it = resampled_peak_container->getMZArray()->data.begin();
int cnt = 0;
while (it != resampled_peak_container->getMZArray()->data.end())
{
*it = min + cnt * sampling_rate; // set mz (intensity is zero already)
++it;
++cnt;
}
LinearResamplerAlign lresampler;
// resample all spectra and add to master spectrum
for (Size curr_sp = 0; curr_sp < all_spectra.size(); curr_sp++)
{
lresampler.raster(all_spectra[curr_sp]->getMZArray()->data.begin(),
all_spectra[curr_sp]->getMZArray()->data.end(),
all_spectra[curr_sp]->getIntensityArray()->data.begin(),
all_spectra[curr_sp]->getIntensityArray()->data.end(),
resampled_peak_container->getMZArray()->data.begin(),
resampled_peak_container->getMZArray()->data.end(),
resampled_peak_container->getIntensityArray()->data.begin(),
resampled_peak_container->getIntensityArray()->data.end()
);
}
if (!filter_zeros)
{
OPENMS_POSTCONDITION(std::adjacent_find(resampled_peak_container->getMZArray()->data.begin(),
resampled_peak_container->getMZArray()->data.end(), std::greater<double>()) == resampled_peak_container->getMZArray()->data.end(),
"Postcondition violated: m/z vector needs to be sorted!" )
return resampled_peak_container;
}
else
{
OpenSwath::SpectrumPtr master_spectrum_filtered(new OpenSwath::Spectrum);
for (Size i = 0; i < resampled_peak_container->getIntensityArray()->data.size(); ++i)
{
if (resampled_peak_container->getIntensityArray()->data[i] > 0)
{
master_spectrum_filtered->getIntensityArray()->data.push_back(resampled_peak_container->getIntensityArray()->data[i]);
master_spectrum_filtered->getMZArray()->data.push_back(resampled_peak_container->getMZArray()->data[i]);
}
}
OPENMS_POSTCONDITION( std::adjacent_find(master_spectrum_filtered->getMZArray()->data.begin(),
master_spectrum_filtered->getMZArray()->data.end(), std::greater<double>()) == master_spectrum_filtered->getMZArray()->data.end(),
"Postcondition violated: m/z vector needs to be sorted!" )
return master_spectrum_filtered;
}
}
OpenSwath::SpectrumPtr SpectrumAddition::addUpSpectra(const SpectrumSequence& all_spectra,
const RangeMobility& im_range,
double sampling_rate,
bool filter_zeros)
{
OpenSwath::SpectrumPtr added_spec(new OpenSwath::Spectrum);
// If no spectra found return
if (all_spectra.empty())
{
return added_spec;
}
if (im_range.isEmpty())
{
return addUpSpectra(all_spectra, sampling_rate, filter_zeros);
}
// since resampling is not supported on 3D data first filter by drift time (if possible) and then add
// (!im_range.isEmpty())
SpectrumSequence filteredSpectra;
for (auto spec: all_spectra)
{
filteredSpectra.push_back(OpenSwath::ISpectrumAccess::filterByDrift(spec, im_range.getMin(), im_range.getMax()));
}
return addUpSpectra(filteredSpectra, sampling_rate, filter_zeros);
}
OpenSwath::SpectrumPtr SpectrumAddition::concatenateSpectra(const SpectrumSequence& all_spectra)
{
OpenSwath::SpectrumPtr added_spec(new OpenSwath::Spectrum);
// Ensure that we have the same number of data arrays as in the input spectrum
// copying the extra data arrays descriptions onto the added spectra
if (!all_spectra.empty() && all_spectra[0]->getDataArrays().size() > 2)
{
for (Size k = 2; k < all_spectra[0]->getDataArrays().size(); k++)
{
OpenSwath::BinaryDataArrayPtr tmp (new OpenSwath::BinaryDataArray());
tmp->description = all_spectra[0]->getDataArrays()[k]->description;
added_spec->getDataArrays().push_back(tmp);
}
}
// Simply concatenate all spectra together and sort in the end
for (const auto& s : all_spectra)
{
for (Size k = 0; k < s->getDataArrays().size(); k++)
{
auto& v1 = added_spec->getDataArrays()[k]->data;
auto& v2 = s->getDataArrays()[k]->data;
v1.reserve( v1.size() + v2.size() );
v1.insert( v1.end(), v2.begin(), v2.end() );
}
}
sortSpectrumByMZ(*added_spec);
return added_spec;
}
OpenMS::MSSpectrum SpectrumAddition::addUpSpectra(const std::vector<MSSpectrum>& all_spectra, double sampling_rate, bool filter_zeros)
{
OPENMS_PRECONDITION(all_spectra.empty() || all_spectra[0].getFloatDataArrays().empty(), "Can only resample spectra with 2 data dimensions (no ion mobility spectra)")
if (all_spectra.size() == 1) return all_spectra[0];
if (all_spectra.empty()) return MSSpectrum();
// ensure first one is not empty
if (all_spectra[0].empty() ) return MSSpectrum();
// find global min and max -> use as start/endpoints for resampling
double min = all_spectra[0][0].getMZ();
double max = all_spectra[0][all_spectra[0].size()-1].getMZ();
double min_spacing = max - min;
for (Size i = 0; i < all_spectra.size(); i++)
{
if (all_spectra[i].empty())
{
continue;
}
// estimate sampling rate
for (Size k = 0; k < all_spectra[i].size() && sampling_rate < 0; k++)
{
if (k > 0)
{
if (min_spacing > all_spectra[i][k].getMZ() - all_spectra[i][k-1].getMZ() )
{
min_spacing = all_spectra[i][k].getMZ() - all_spectra[i][k-1].getMZ();
}
}
if (all_spectra[i][k].getMZ() < min) min = all_spectra[i][k].getMZ();
if (all_spectra[i][k].getMZ() > max) max = all_spectra[i][k].getMZ();
}
if (all_spectra[i][0].getMZ() < min) min = all_spectra[i][0].getMZ();
if (all_spectra[i][ all_spectra[i].size() -1].getMZ() > max) max = all_spectra[i][ all_spectra[i].size() -1].getMZ();
}
// in case we are asked to estimate the resampling rate
if (sampling_rate < 0) sampling_rate = min_spacing;
// generate the resampled peaks at positions origin+i*spacing_
int number_resampled_points = (max - min) / sampling_rate + 1;
MSSpectrum resampled_peak_container;
resampled_peak_container.resize(number_resampled_points);
MSSpectrum::iterator it = resampled_peak_container.begin();
for (int i = 0; i < number_resampled_points; ++i)
{
it->setMZ(min + i * sampling_rate);
it->setIntensity(0);
++it;
}
// resample all spectra and add to master spectrum
LinearResamplerAlign lresampler;
MSSpectrum master_spectrum = resampled_peak_container;
for (Size curr_sp = 0; curr_sp < all_spectra.size(); curr_sp++)
{
MSSpectrum input_spectrum;
MSSpectrum output_spectrum = resampled_peak_container;
lresampler.raster(all_spectra[curr_sp].begin(), all_spectra[curr_sp].end(), output_spectrum.begin(), output_spectrum.end());
// add to master spectrum
for (Size i = 0; i < output_spectrum.size(); ++i)
{
master_spectrum[i].setIntensity(master_spectrum[i].getIntensity() + output_spectrum[i].getIntensity());
}
}
if (!filter_zeros)
{
return master_spectrum;
}
else
{
MSSpectrum master_spectrum_filtered;
for (Size i = 0; i < master_spectrum.size(); ++i)
{
if (master_spectrum[i].getIntensity() > 0)
{
master_spectrum_filtered.push_back(master_spectrum[i]);
}
}
return master_spectrum_filtered;
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/MRMFeatureFilter.cpp | .cpp | 88,263 | 1,477 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Douglas McCloskey, Pasquale Domenico Colaianni $
// $Authors: Douglas McCloskey, Pasquale Domenico Colaianni $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/MRMFeatureFilter.h>
#include <OpenMS/ANALYSIS/OPENSWATH/MRMFeatureQC.h>
#include <OpenMS/ANALYSIS/QUANTITATION/AbsoluteQuantitationMethod.h>
#include <OpenMS/ANALYSIS/MRM/ReactionMonitoringTransition.h>
#include <OpenMS/ANALYSIS/TARGETED/TargetedExperiment.h>
#include <OpenMS/KERNEL/MRMFeature.h>
#include <OpenMS/KERNEL/Feature.h>
#include <OpenMS/KERNEL/FeatureMap.h>
#include <OpenMS/DATASTRUCTURES/DefaultParamHandler.h>
#include <OpenMS/CONCEPT/LogStream.h>
namespace OpenMS
{
MRMFeatureFilter::MRMFeatureFilter() :
DefaultParamHandler("MRMFeatureFilter")
{
getDefaultParameters(defaults_);
defaultsToParam_(); // write defaults into Param object param_
}
MRMFeatureFilter::~MRMFeatureFilter() = default;
void MRMFeatureFilter::getDefaultParameters(Param& params) const
{
params.clear();
params.setValue("flag_or_filter", "flag", "Flag or Filter (i.e., remove) Components or transitions that do not pass the QC.", {"advanced"});
params.setValidStrings("flag_or_filter", {"flag","filter"});
}
void MRMFeatureFilter::updateMembers_()
{
flag_or_filter_ = param_.getValue("flag_or_filter").toString();
}
void MRMFeatureFilter::FilterFeatureMap(FeatureMap& features,
const MRMFeatureQC& filter_criteria,
const TargetedExperiment& transitions
)
{
// initialize QC variables
FeatureMap features_filtered;
// iterate through each component_group/feature
for (size_t feature_it = 0; feature_it < features.size(); ++feature_it)
{
String component_group_name = (String)features.at(feature_it).getMetaValue("PeptideRef");
std::map<String, int> labels_and_transition_types = countLabelsAndTransitionTypes(features.at(feature_it), transitions);
// initialize the new feature and subordinates
std::vector<Feature> subordinates_filtered;
bool cg_qc_pass = true;
StringList cg_qc_fail_message_vec;
UInt cg_tests_count{ 0 };
// iterate through each component/sub-feature
for (size_t sub_it = 0; sub_it < features.at(feature_it).getSubordinates().size(); ++sub_it)
{
String component_name = (String)features.at(feature_it).getSubordinates().at(sub_it).getMetaValue("native_id");
bool c_qc_pass = true;
StringList c_qc_fail_message_vec;
// iterate through multi-feature/multi-sub-feature QCs/filters
// iterate through component_groups
for (size_t cg_qc_it = 0; cg_qc_it < filter_criteria.component_group_qcs.size(); ++cg_qc_it)
{
if (filter_criteria.component_group_qcs.at(cg_qc_it).component_group_name == component_group_name)
{
const double rt = features.at(feature_it).getRT();
if (!checkRange(rt,
filter_criteria.component_group_qcs.at(cg_qc_it).retention_time_l,
filter_criteria.component_group_qcs.at(cg_qc_it).retention_time_u))
{
cg_qc_pass = false;
cg_qc_fail_message_vec.push_back("retention_time");
}
const double intensity = features.at(feature_it).getIntensity();
if (!checkRange(intensity,
filter_criteria.component_group_qcs.at(cg_qc_it).intensity_l,
filter_criteria.component_group_qcs.at(cg_qc_it).intensity_u))
{
cg_qc_pass = false;
cg_qc_fail_message_vec.push_back("intensity");
}
const double quality = features.at(feature_it).getOverallQuality();
if (!checkRange(quality,
filter_criteria.component_group_qcs.at(cg_qc_it).overall_quality_l,
filter_criteria.component_group_qcs.at(cg_qc_it).overall_quality_u))
{
cg_qc_pass = false;
cg_qc_fail_message_vec.push_back("overall_quality");
}
// labels and transition counts QC
if (!checkRange(labels_and_transition_types["n_heavy"],
filter_criteria.component_group_qcs.at(cg_qc_it).n_heavy_l,
filter_criteria.component_group_qcs.at(cg_qc_it).n_heavy_u))
{
cg_qc_pass = false;
cg_qc_fail_message_vec.push_back("n_heavy");
}
if (!checkRange(labels_and_transition_types["n_light"],
filter_criteria.component_group_qcs.at(cg_qc_it).n_light_l,
filter_criteria.component_group_qcs.at(cg_qc_it).n_light_u))
{
cg_qc_pass = false;
cg_qc_fail_message_vec.push_back("n_light");
}
if (!checkRange(labels_and_transition_types["n_detecting"],
filter_criteria.component_group_qcs.at(cg_qc_it).n_detecting_l,
filter_criteria.component_group_qcs.at(cg_qc_it).n_detecting_u))
{
cg_qc_pass = false;
cg_qc_fail_message_vec.push_back("n_detecting");
}
if (!checkRange(labels_and_transition_types["n_quantifying"],
filter_criteria.component_group_qcs.at(cg_qc_it).n_quantifying_l,
filter_criteria.component_group_qcs.at(cg_qc_it).n_quantifying_u))
{
cg_qc_pass = false;
cg_qc_fail_message_vec.push_back("n_quantifying");
}
if (!checkRange(labels_and_transition_types["n_identifying"],
filter_criteria.component_group_qcs.at(cg_qc_it).n_identifying_l,
filter_criteria.component_group_qcs.at(cg_qc_it).n_identifying_u))
{
cg_qc_pass = false;
cg_qc_fail_message_vec.push_back("n_identifying");
}
if (!checkRange(labels_and_transition_types["n_transitions"],
filter_criteria.component_group_qcs.at(cg_qc_it).n_transitions_l,
filter_criteria.component_group_qcs.at(cg_qc_it).n_transitions_u))
{
cg_qc_pass = false;
cg_qc_fail_message_vec.push_back("n_transitions");
}
cg_tests_count += 9;
// ion ratio QC
for (size_t sub_it2 = 0; sub_it2 < features.at(feature_it).getSubordinates().size(); ++sub_it2)
{
String component_name2 = (String)features.at(feature_it).getSubordinates().at(sub_it2).getMetaValue("native_id");
// find the ion ratio pair
if (!filter_criteria.component_group_qcs.at(cg_qc_it).ion_ratio_pair_name_1.empty()
&& !filter_criteria.component_group_qcs.at(cg_qc_it).ion_ratio_pair_name_2.empty()
&& filter_criteria.component_group_qcs.at(cg_qc_it).ion_ratio_pair_name_1 == component_name
&& filter_criteria.component_group_qcs.at(cg_qc_it).ion_ratio_pair_name_2 == component_name2)
{
double ion_ratio = calculateIonRatio(features.at(feature_it).getSubordinates().at(sub_it), features.at(feature_it).getSubordinates().at(sub_it2), filter_criteria.component_group_qcs.at(cg_qc_it).ion_ratio_feature_name);
if (!checkRange(ion_ratio,
filter_criteria.component_group_qcs.at(cg_qc_it).ion_ratio_l,
filter_criteria.component_group_qcs.at(cg_qc_it).ion_ratio_u))
{
cg_qc_pass = false;
cg_qc_fail_message_vec.push_back("ion_ratio_pair[" + component_name + "/" + component_name2 + "]");
}
++cg_tests_count;
}
}
//std::pair<const String, std::pair<double, double>>
for (const auto& kv : filter_criteria.component_group_qcs.at(cg_qc_it).meta_value_qc)
{
bool metavalue_exists{ false };
if (!checkMetaValue(features.at(feature_it), kv.first, kv.second.first, kv.second.second, metavalue_exists))
{
cg_qc_pass = false;
cg_qc_fail_message_vec.push_back(kv.first);
}
if (metavalue_exists) ++cg_tests_count;
}
}
}
UInt c_tests_count{ 0 };
// iterate through feature/sub-feature QCs/filters
for (size_t c_qc_it = 0; c_qc_it < filter_criteria.component_qcs.size(); ++c_qc_it)
{
if (filter_criteria.component_qcs.at(c_qc_it).component_name == component_name)
{
// RT check
const double rt = features.at(feature_it).getSubordinates().at(sub_it).getRT();
if (!checkRange(rt,
filter_criteria.component_qcs.at(c_qc_it).retention_time_l,
filter_criteria.component_qcs.at(c_qc_it).retention_time_u))
{
c_qc_pass = false;
c_qc_fail_message_vec.push_back("retention_time");
}
// intensity check
double intensity = features.at(feature_it).getSubordinates().at(sub_it).getIntensity();
if (!checkRange(intensity,
filter_criteria.component_qcs.at(c_qc_it).intensity_l,
filter_criteria.component_qcs.at(c_qc_it).intensity_u))
{
c_qc_pass = false;
c_qc_fail_message_vec.push_back("intensity");
}
// overall quality check getQuality
double quality = features.at(feature_it).getSubordinates().at(sub_it).getOverallQuality();
if (!checkRange(quality,
filter_criteria.component_qcs.at(c_qc_it).overall_quality_l,
filter_criteria.component_qcs.at(c_qc_it).overall_quality_u))
{
c_qc_pass = false;
c_qc_fail_message_vec.push_back("overall_quality");
}
c_tests_count += 3;
// metaValue checks
for (auto const& kv : filter_criteria.component_qcs.at(c_qc_it).meta_value_qc)
{
bool metavalue_exists{ false };
if (!checkMetaValue(features.at(feature_it).getSubordinates().at(sub_it), kv.first, kv.second.first, kv.second.second, metavalue_exists))
{
c_qc_pass = false;
c_qc_fail_message_vec.push_back(kv.first);
}
if (metavalue_exists) ++c_tests_count;
}
}
}
const double c_score = c_tests_count ? 1.0 - c_qc_fail_message_vec.size() / (double)c_tests_count : 1.0;
features.at(feature_it).getSubordinates().at(sub_it).setMetaValue("QC_transition_score", c_score);
// Copy or Flag passing/failing subordinates
if (c_qc_pass && flag_or_filter_ == "filter")
{
subordinates_filtered.push_back(features.at(feature_it).getSubordinates().at(sub_it));
}
else if (c_qc_pass && flag_or_filter_ == "flag")
{
features.at(feature_it).getSubordinates().at(sub_it).setMetaValue("QC_transition_pass", true);
features.at(feature_it).getSubordinates().at(sub_it).setMetaValue("QC_transition_message", StringList());
}
else if (!c_qc_pass && flag_or_filter_ == "filter")
{
// do nothing
}
else if (!c_qc_pass && flag_or_filter_ == "flag")
{
features.at(feature_it).getSubordinates().at(sub_it).setMetaValue("QC_transition_pass", false);
features.at(feature_it).getSubordinates().at(sub_it).setMetaValue("QC_transition_message", getUniqueSorted(c_qc_fail_message_vec));
}
}
const double cg_score = cg_tests_count ? 1.0 - cg_qc_fail_message_vec.size() / (double)cg_tests_count : 1.0;
features.at(feature_it).setMetaValue("QC_transition_group_score", cg_score);
// Copy or Flag passing/failing Features
if (cg_qc_pass && flag_or_filter_ == "filter" && !subordinates_filtered.empty())
{
Feature feature_filtered(features.at(feature_it));
feature_filtered.setSubordinates(subordinates_filtered);
features_filtered.push_back(feature_filtered);
}
else if (cg_qc_pass && flag_or_filter_ == "filter" && subordinates_filtered.empty())
{
// do nothing
}
else if (cg_qc_pass && flag_or_filter_ == "flag")
{
features.at(feature_it).setMetaValue("QC_transition_group_pass", true);
features.at(feature_it).setMetaValue("QC_transition_group_message", StringList());
}
else if (!cg_qc_pass && flag_or_filter_ == "filter")
{
// do nothing
}
else if (!cg_qc_pass && flag_or_filter_ == "flag")
{
features.at(feature_it).setMetaValue("QC_transition_group_pass", false);
features.at(feature_it).setMetaValue("QC_transition_group_message", getUniqueSorted(cg_qc_fail_message_vec));
}
}
// replace with the filtered featureMap
if (flag_or_filter_ == "filter")
{
features = features_filtered;
}
}
void MRMFeatureFilter::FilterFeatureMapPercRSD(FeatureMap& features, const MRMFeatureQC& filter_criteria, const MRMFeatureQC& filter_values)
{
// initialize QC variables
FeatureMap features_filtered;
// iterate through each component_group/feature
for (size_t feature_it = 0; feature_it < features.size(); ++feature_it)
{
String component_group_name = (String)features.at(feature_it).getMetaValue("PeptideRef");
// initialize the new feature and subordinates
std::vector<Feature> subordinates_filtered;
bool cg_qc_pass = true;
StringList cg_qc_fail_message_vec;
UInt cg_tests_count{ 0 };
// iterate through each component/sub-feature
for (size_t sub_it = 0; sub_it < features.at(feature_it).getSubordinates().size(); ++sub_it)
{
String component_name = (String)features.at(feature_it).getSubordinates().at(sub_it).getMetaValue("native_id");
bool c_qc_pass = true;
StringList c_qc_fail_message_vec;
// iterate through multi-feature/multi-sub-feature QCs/filters
// iterate through component_groups
for (size_t cg_qc_it = 0; cg_qc_it < filter_criteria.component_group_qcs.size(); ++cg_qc_it)
{
if (filter_criteria.component_group_qcs.at(cg_qc_it).component_group_name == component_group_name)
{
if (!checkRange(filter_values.component_group_qcs.at(cg_qc_it).retention_time_u,
filter_criteria.component_group_qcs.at(cg_qc_it).retention_time_l,
filter_criteria.component_group_qcs.at(cg_qc_it).retention_time_u))
{
cg_qc_pass = false;
cg_qc_fail_message_vec.push_back("retention_time");
}
if (!checkRange(filter_values.component_group_qcs.at(cg_qc_it).intensity_u,
filter_criteria.component_group_qcs.at(cg_qc_it).intensity_l,
filter_criteria.component_group_qcs.at(cg_qc_it).intensity_u))
{
cg_qc_pass = false;
cg_qc_fail_message_vec.push_back("intensity");
}
if (!checkRange(filter_values.component_group_qcs.at(cg_qc_it).overall_quality_u,
filter_criteria.component_group_qcs.at(cg_qc_it).overall_quality_l,
filter_criteria.component_group_qcs.at(cg_qc_it).overall_quality_u))
{
cg_qc_pass = false;
cg_qc_fail_message_vec.push_back("overall_quality");
}
cg_tests_count += 3;
// ion ratio QC
if (!filter_criteria.component_group_qcs.at(cg_qc_it).ion_ratio_pair_name_1.empty()
&& !filter_criteria.component_group_qcs.at(cg_qc_it).ion_ratio_pair_name_2.empty()) {
if (!checkRange(filter_values.component_group_qcs.at(cg_qc_it).ion_ratio_u,
filter_criteria.component_group_qcs.at(cg_qc_it).ion_ratio_l,
filter_criteria.component_group_qcs.at(cg_qc_it).ion_ratio_u))
{
cg_qc_pass = false;
cg_qc_fail_message_vec.push_back("ion_ratio_pair[" + filter_criteria.component_group_qcs.at(cg_qc_it).ion_ratio_pair_name_1 + "/" + filter_criteria.component_group_qcs.at(cg_qc_it).ion_ratio_pair_name_2 + "]");
}
++cg_tests_count;
}
for (const auto& kv : filter_criteria.component_group_qcs.at(cg_qc_it).meta_value_qc)
{
if (!checkRange(filter_values.component_group_qcs.at(cg_qc_it).meta_value_qc.at(kv.first).second,
kv.second.first,
kv.second.second))
{
cg_qc_pass = false;
cg_qc_fail_message_vec.push_back(kv.first);
}
++cg_tests_count;
}
}
}
UInt c_tests_count{ 0 };
// iterate through feature/sub-feature QCs/filters
for (size_t c_qc_it = 0; c_qc_it < filter_criteria.component_qcs.size(); ++c_qc_it)
{
if (filter_criteria.component_qcs.at(c_qc_it).component_name == component_name)
{
// RT check
if (!checkRange(filter_values.component_qcs.at(c_qc_it).retention_time_u,
filter_criteria.component_qcs.at(c_qc_it).retention_time_l,
filter_criteria.component_qcs.at(c_qc_it).retention_time_u))
{
c_qc_pass = false;
c_qc_fail_message_vec.push_back("retention_time");
}
// intensity check
if (!checkRange(filter_values.component_qcs.at(c_qc_it).intensity_u,
filter_criteria.component_qcs.at(c_qc_it).intensity_l,
filter_criteria.component_qcs.at(c_qc_it).intensity_u))
{
c_qc_pass = false;
c_qc_fail_message_vec.push_back("intensity");
}
// overall quality check getQuality
if (!checkRange(filter_values.component_qcs.at(c_qc_it).overall_quality_u,
filter_criteria.component_qcs.at(c_qc_it).overall_quality_l,
filter_criteria.component_qcs.at(c_qc_it).overall_quality_u))
{
c_qc_pass = false;
c_qc_fail_message_vec.push_back("overall_quality");
}
c_tests_count += 3;
// metaValue checks
for (auto const& kv : filter_criteria.component_qcs.at(c_qc_it).meta_value_qc)
{
if (!checkRange(filter_values.component_qcs.at(c_qc_it).meta_value_qc.at(kv.first).second,
kv.second.first,
kv.second.second))
{
c_qc_pass = false;
c_qc_fail_message_vec.push_back(kv.first);
}
++c_tests_count;
}
}
}
const double c_score = c_tests_count ? 1.0 - c_qc_fail_message_vec.size() / (double)c_tests_count : 1.0;
features.at(feature_it).getSubordinates().at(sub_it).setMetaValue("QC_transition_%RSD_score", c_score);
// Copy or Flag passing/failing subordinates
if (c_qc_pass && flag_or_filter_ == "filter")
{
subordinates_filtered.push_back(features.at(feature_it).getSubordinates().at(sub_it));
}
else if (c_qc_pass && flag_or_filter_ == "flag")
{
features.at(feature_it).getSubordinates().at(sub_it).setMetaValue("QC_transition_%RSD_pass", true);
features.at(feature_it).getSubordinates().at(sub_it).setMetaValue("QC_transition_%RSD_message", StringList());
}
else if (!c_qc_pass && flag_or_filter_ == "filter")
{
// do nothing
}
else if (!c_qc_pass && flag_or_filter_ == "flag")
{
features.at(feature_it).getSubordinates().at(sub_it).setMetaValue("QC_transition_%RSD_pass", false);
features.at(feature_it).getSubordinates().at(sub_it).setMetaValue("QC_transition_%RSD_message", getUniqueSorted(c_qc_fail_message_vec));
}
}
const double cg_score = cg_tests_count ? 1.0 - cg_qc_fail_message_vec.size() / (double)cg_tests_count : 1.0;
features.at(feature_it).setMetaValue("QC_transition_group_%RSD_score", cg_score);
// Copy or Flag passing/failing Features
if (cg_qc_pass && flag_or_filter_ == "filter" && !subordinates_filtered.empty())
{
Feature feature_filtered(features.at(feature_it));
feature_filtered.setSubordinates(subordinates_filtered);
features_filtered.push_back(feature_filtered);
}
else if (cg_qc_pass && flag_or_filter_ == "filter" && subordinates_filtered.empty())
{
// do nothing
}
else if (cg_qc_pass && flag_or_filter_ == "flag")
{
features.at(feature_it).setMetaValue("QC_transition_group_%RSD_pass", true);
features.at(feature_it).setMetaValue("QC_transition_group_%RSD_message", StringList());
}
else if (!cg_qc_pass && flag_or_filter_ == "filter")
{
// do nothing
}
else if (!cg_qc_pass && flag_or_filter_ == "flag")
{
features.at(feature_it).setMetaValue("QC_transition_group_%RSD_pass", false);
features.at(feature_it).setMetaValue("QC_transition_group_%RSD_message", getUniqueSorted(cg_qc_fail_message_vec));
}
}
// replace with the filtered featureMap
if (flag_or_filter_ == "filter")
{
features = features_filtered;
}
}
void MRMFeatureFilter::FilterFeatureMapBackgroundInterference(FeatureMap& features, const MRMFeatureQC& filter_criteria, const MRMFeatureQC& filter_values)
{
// initialize QC variables
FeatureMap features_filtered;
// iterate through each component_group/feature
for (size_t feature_it = 0; feature_it < features.size(); ++feature_it)
{
String component_group_name = (String)features.at(feature_it).getMetaValue("PeptideRef");
// initialize the new feature and subordinates
std::vector<Feature> subordinates_filtered;
bool cg_qc_pass = true;
StringList cg_qc_fail_message_vec;
UInt cg_tests_count{ 0 };
// iterate through each component/sub-feature
for (size_t sub_it = 0; sub_it < features.at(feature_it).getSubordinates().size(); ++sub_it)
{
String component_name = (String)features.at(feature_it).getSubordinates().at(sub_it).getMetaValue("native_id");
bool c_qc_pass = true;
StringList c_qc_fail_message_vec;
// iterate through multi-feature/multi-sub-feature QCs/filters
// iterate through component_groups
for (size_t cg_qc_it = 0; cg_qc_it < filter_criteria.component_group_qcs.size(); ++cg_qc_it)
{
if (filter_criteria.component_group_qcs.at(cg_qc_it).component_group_name == component_group_name)
{
// intensity check
const double perc_background_interference = filter_values.component_group_qcs.at(cg_qc_it).intensity_u / features.at(feature_it).getIntensity() * 100;
if (!checkRange(perc_background_interference,
filter_criteria.component_group_qcs.at(cg_qc_it).intensity_l,
filter_criteria.component_group_qcs.at(cg_qc_it).intensity_u))
{
cg_qc_pass = false;
cg_qc_fail_message_vec.push_back("intensity");
}
++cg_tests_count;
}
}
UInt c_tests_count{ 0 };
// iterate through feature/sub-feature QCs/filters
for (size_t c_qc_it = 0; c_qc_it < filter_criteria.component_qcs.size(); ++c_qc_it)
{
if (filter_criteria.component_qcs.at(c_qc_it).component_name == component_name)
{
// intensity check
const double perc_background_interference = filter_values.component_qcs.at(c_qc_it).intensity_u / features.at(feature_it).getSubordinates().at(sub_it).getIntensity() * 100;
if (!checkRange(perc_background_interference,
filter_criteria.component_qcs.at(c_qc_it).intensity_l,
filter_criteria.component_qcs.at(c_qc_it).intensity_u))
{
c_qc_pass = false;
c_qc_fail_message_vec.push_back("intensity");
}
++c_tests_count;
}
}
const double c_score = c_tests_count ? 1.0 - c_qc_fail_message_vec.size() / (double)c_tests_count : 1.0;
features.at(feature_it).getSubordinates().at(sub_it).setMetaValue("QC_transition_%BackgroundInterference_score", c_score);
// Copy or Flag passing/failing subordinates
if (c_qc_pass && flag_or_filter_ == "filter")
{
subordinates_filtered.push_back(features.at(feature_it).getSubordinates().at(sub_it));
}
else if (c_qc_pass && flag_or_filter_ == "flag")
{
features.at(feature_it).getSubordinates().at(sub_it).setMetaValue("QC_transition_%BackgroundInterference_pass", true);
features.at(feature_it).getSubordinates().at(sub_it).setMetaValue("QC_transition_%BackgroundInterference_message", StringList());
}
else if (!c_qc_pass && flag_or_filter_ == "filter")
{
// do nothing
}
else if (!c_qc_pass && flag_or_filter_ == "flag")
{
features.at(feature_it).getSubordinates().at(sub_it).setMetaValue("QC_transition_%BackgroundInterference_pass", false);
features.at(feature_it).getSubordinates().at(sub_it).setMetaValue("QC_transition_%BackgroundInterference_message", getUniqueSorted(c_qc_fail_message_vec));
}
}
const double cg_score = cg_tests_count ? 1.0 - cg_qc_fail_message_vec.size() / (double)cg_tests_count : 1.0;
features.at(feature_it).setMetaValue("QC_transition_group_%BackgroundInterference_score", cg_score);
// Copy or Flag passing/failing Features
if (cg_qc_pass && flag_or_filter_ == "filter" && !subordinates_filtered.empty())
{
Feature feature_filtered(features.at(feature_it));
feature_filtered.setSubordinates(subordinates_filtered);
features_filtered.push_back(feature_filtered);
}
else if (cg_qc_pass && flag_or_filter_ == "filter" && subordinates_filtered.empty())
{
// do nothing
}
else if (cg_qc_pass && flag_or_filter_ == "flag")
{
features.at(feature_it).setMetaValue("QC_transition_group_%BackgroundInterference_pass", true);
features.at(feature_it).setMetaValue("QC_transition_group_%BackgroundInterference_message", StringList());
}
else if (!cg_qc_pass && flag_or_filter_ == "filter")
{
// do nothing
}
else if (!cg_qc_pass && flag_or_filter_ == "flag")
{
features.at(feature_it).setMetaValue("QC_transition_group_%BackgroundInterference_pass", false);
features.at(feature_it).setMetaValue("QC_transition_group_%BackgroundInterference_message", getUniqueSorted(cg_qc_fail_message_vec));
}
}
// replace with the filtered featureMap
if (flag_or_filter_ == "filter")
{
features = features_filtered;
}
}
void MRMFeatureFilter::EstimateDefaultMRMFeatureQCValues(const std::vector<FeatureMap>& samples, MRMFeatureQC& filter_template, const TargetedExperiment& transitions, const bool& init_template_values) const
{
// iterate through each sample and accumulate the min/max values in the samples in the filter_template
for (size_t sample_it = 0; sample_it < samples.size(); sample_it++) {
// iterate through each component_group/feature
for (size_t feature_it = 0; feature_it < samples.at(sample_it).size(); ++feature_it)
{
String component_group_name = (String)samples.at(sample_it).at(feature_it).getMetaValue("PeptideRef");
std::map<String, int> labels_and_transition_types = countLabelsAndTransitionTypes(samples.at(sample_it).at(feature_it), transitions);
// iterate through each component/sub-feature
for (size_t sub_it = 0; sub_it < samples.at(sample_it).at(feature_it).getSubordinates().size(); ++sub_it)
{
String component_name = (String)samples.at(sample_it).at(feature_it).getSubordinates().at(sub_it).getMetaValue("native_id");
// iterate through multi-feature/multi-sub-feature QCs/filters
// iterate through component_groups
for (size_t cg_qc_it = 0; cg_qc_it < filter_template.component_group_qcs.size(); ++cg_qc_it)
{
if (filter_template.component_group_qcs.at(cg_qc_it).component_group_name == component_group_name)
{
const double rt = samples.at(sample_it).at(feature_it).getRT();
if (sample_it == 0 && init_template_values) {
initRange(rt,
filter_template.component_group_qcs.at(cg_qc_it).retention_time_l,
filter_template.component_group_qcs.at(cg_qc_it).retention_time_u);
} else {
updateRange(rt,
filter_template.component_group_qcs.at(cg_qc_it).retention_time_l,
filter_template.component_group_qcs.at(cg_qc_it).retention_time_u);
}
const double intensity = samples.at(sample_it).at(feature_it).getIntensity();
if (sample_it == 0 && init_template_values) {
initRange(intensity,
filter_template.component_group_qcs.at(cg_qc_it).intensity_l,
filter_template.component_group_qcs.at(cg_qc_it).intensity_u);
} else {
updateRange(intensity,
filter_template.component_group_qcs.at(cg_qc_it).intensity_l,
filter_template.component_group_qcs.at(cg_qc_it).intensity_u);
}
const double quality = samples.at(sample_it).at(feature_it).getOverallQuality();
if (sample_it == 0 && init_template_values) {
initRange(quality,
filter_template.component_group_qcs.at(cg_qc_it).overall_quality_l,
filter_template.component_group_qcs.at(cg_qc_it).overall_quality_u);
} else {
updateRange(quality,
filter_template.component_group_qcs.at(cg_qc_it).overall_quality_l,
filter_template.component_group_qcs.at(cg_qc_it).overall_quality_u);
}
// labels and transition counts QC
if (sample_it == 0 && init_template_values) {
initRange(labels_and_transition_types["n_heavy"],
filter_template.component_group_qcs.at(cg_qc_it).n_heavy_l,
filter_template.component_group_qcs.at(cg_qc_it).n_heavy_u);
initRange(labels_and_transition_types["n_light"],
filter_template.component_group_qcs.at(cg_qc_it).n_light_l,
filter_template.component_group_qcs.at(cg_qc_it).n_light_u);
initRange(labels_and_transition_types["n_detecting"],
filter_template.component_group_qcs.at(cg_qc_it).n_detecting_l,
filter_template.component_group_qcs.at(cg_qc_it).n_detecting_u);
initRange(labels_and_transition_types["n_quantifying"],
filter_template.component_group_qcs.at(cg_qc_it).n_quantifying_l,
filter_template.component_group_qcs.at(cg_qc_it).n_quantifying_u);
initRange(labels_and_transition_types["n_identifying"],
filter_template.component_group_qcs.at(cg_qc_it).n_identifying_l,
filter_template.component_group_qcs.at(cg_qc_it).n_identifying_u);
initRange(labels_and_transition_types["n_transitions"],
filter_template.component_group_qcs.at(cg_qc_it).n_transitions_l,
filter_template.component_group_qcs.at(cg_qc_it).n_transitions_u);
} else {
updateRange(labels_and_transition_types["n_heavy"],
filter_template.component_group_qcs.at(cg_qc_it).n_heavy_l,
filter_template.component_group_qcs.at(cg_qc_it).n_heavy_u);
updateRange(labels_and_transition_types["n_light"],
filter_template.component_group_qcs.at(cg_qc_it).n_light_l,
filter_template.component_group_qcs.at(cg_qc_it).n_light_u);
updateRange(labels_and_transition_types["n_detecting"],
filter_template.component_group_qcs.at(cg_qc_it).n_detecting_l,
filter_template.component_group_qcs.at(cg_qc_it).n_detecting_u);
updateRange(labels_and_transition_types["n_quantifying"],
filter_template.component_group_qcs.at(cg_qc_it).n_quantifying_l,
filter_template.component_group_qcs.at(cg_qc_it).n_quantifying_u);
updateRange(labels_and_transition_types["n_identifying"],
filter_template.component_group_qcs.at(cg_qc_it).n_identifying_l,
filter_template.component_group_qcs.at(cg_qc_it).n_identifying_u);
updateRange(labels_and_transition_types["n_transitions"],
filter_template.component_group_qcs.at(cg_qc_it).n_transitions_l,
filter_template.component_group_qcs.at(cg_qc_it).n_transitions_u);
}
// ion ratio QC
for (size_t sub_it2 = 0; sub_it2 < samples.at(sample_it).at(feature_it).getSubordinates().size(); ++sub_it2)
{
String component_name2 = (String)samples.at(sample_it).at(feature_it).getSubordinates().at(sub_it2).getMetaValue("native_id");
// find the ion ratio pair
if (!filter_template.component_group_qcs.at(cg_qc_it).ion_ratio_pair_name_1.empty()
&& !filter_template.component_group_qcs.at(cg_qc_it).ion_ratio_pair_name_2.empty()
&& filter_template.component_group_qcs.at(cg_qc_it).ion_ratio_pair_name_1 == component_name
&& filter_template.component_group_qcs.at(cg_qc_it).ion_ratio_pair_name_2 == component_name2)
{
double ion_ratio = calculateIonRatio(samples.at(sample_it).at(feature_it).getSubordinates().at(sub_it), samples.at(sample_it).at(feature_it).getSubordinates().at(sub_it2), filter_template.component_group_qcs.at(cg_qc_it).ion_ratio_feature_name);
if (sample_it == 0 && init_template_values) {
initRange(ion_ratio,
filter_template.component_group_qcs.at(cg_qc_it).ion_ratio_l,
filter_template.component_group_qcs.at(cg_qc_it).ion_ratio_u);
} else {
updateRange(ion_ratio,
filter_template.component_group_qcs.at(cg_qc_it).ion_ratio_l,
filter_template.component_group_qcs.at(cg_qc_it).ion_ratio_u);
}
}
}
for (auto& kv : filter_template.component_group_qcs.at(cg_qc_it).meta_value_qc)
{
bool metavalue_exists{ false };
if (sample_it == 0 && init_template_values) {
updateMetaValue(samples.at(sample_it).at(feature_it), kv.first, kv.second.first, kv.second.second, metavalue_exists);
} else {
initMetaValue(samples.at(sample_it).at(feature_it), kv.first, kv.second.first, kv.second.second, metavalue_exists);
}
}
}
}
// iterate through feature/sub-feature QCs/filters
for (size_t c_qc_it = 0; c_qc_it < filter_template.component_qcs.size(); ++c_qc_it)
{
if (filter_template.component_qcs.at(c_qc_it).component_name == component_name)
{
// RT check
const double rt = samples.at(sample_it).at(feature_it).getSubordinates().at(sub_it).getRT();
if (sample_it == 0 && init_template_values) {
initRange(rt,
filter_template.component_qcs.at(c_qc_it).retention_time_l,
filter_template.component_qcs.at(c_qc_it).retention_time_u);
} else {
updateRange(rt,
filter_template.component_qcs.at(c_qc_it).retention_time_l,
filter_template.component_qcs.at(c_qc_it).retention_time_u);
}
// intensity check
double intensity = samples.at(sample_it).at(feature_it).getSubordinates().at(sub_it).getIntensity();
if (sample_it == 0 && init_template_values) {
initRange(intensity,
filter_template.component_qcs.at(c_qc_it).intensity_l,
filter_template.component_qcs.at(c_qc_it).intensity_u);
} else {
updateRange(intensity,
filter_template.component_qcs.at(c_qc_it).intensity_l,
filter_template.component_qcs.at(c_qc_it).intensity_u);
}
// overall quality check getQuality
double quality = samples.at(sample_it).at(feature_it).getSubordinates().at(sub_it).getOverallQuality();
if (sample_it == 0 && init_template_values) {
initRange(quality,
filter_template.component_qcs.at(c_qc_it).overall_quality_l,
filter_template.component_qcs.at(c_qc_it).overall_quality_u);
} else {
updateRange(quality,
filter_template.component_qcs.at(c_qc_it).overall_quality_l,
filter_template.component_qcs.at(c_qc_it).overall_quality_u);
}
// metaValue checks
for (auto& kv : filter_template.component_qcs.at(c_qc_it).meta_value_qc)
{
bool metavalue_exists{ false };
if (sample_it == 0 && init_template_values) {
initMetaValue(samples.at(sample_it).at(feature_it).getSubordinates().at(sub_it), kv.first, kv.second.first, kv.second.second, metavalue_exists);
} else {
updateMetaValue(samples.at(sample_it).at(feature_it).getSubordinates().at(sub_it), kv.first, kv.second.first, kv.second.second, metavalue_exists);
}
}
}
}
}
}
}
}
void MRMFeatureFilter::TransferLLOQAndULOQToCalculatedConcentrationBounds(const std::vector<AbsoluteQuantitationMethod>& quantitation_method, MRMFeatureQC& filter_template)
{
// Iterate through the quantitation method and update the MetaValue for `calculated_concentration` in the filter_template
for (const AbsoluteQuantitationMethod& quant_method : quantitation_method) {
if (quant_method.getLLOQ() == 0 && quant_method.getULOQ() == 0) continue;
// iterate through feature/sub-feature QCs/filters
for (size_t c_qc_it = 0; c_qc_it < filter_template.component_qcs.size(); ++c_qc_it) {
if (filter_template.component_qcs.at(c_qc_it).component_name == quant_method.getComponentName()) {
// update the lower/upper bound for the `calculated_concentration` metaValue
filter_template.component_qcs.at(c_qc_it).meta_value_qc.at("calculated_concentration").first = quant_method.getLLOQ();
filter_template.component_qcs.at(c_qc_it).meta_value_qc.at("calculated_concentration").second = quant_method.getULOQ();
}
}
}
}
void MRMFeatureFilter::EstimatePercRSD(const std::vector<FeatureMap>& samples, MRMFeatureQC& filter_template, const TargetedExperiment& transitions) const
{
// iterate through each sample and accumulate the values in the filter_values
std::vector<MRMFeatureQC> filter_values;
accumulateFilterValues(filter_values, samples, filter_template, transitions);
// Determine the AVE for each filter_template value
MRMFeatureQC filter_mean;
calculateFilterValuesMean(filter_mean, filter_values, filter_template);
// Determine the STD for each filter_template value
MRMFeatureQC filter_var;
calculateFilterValuesVar(filter_var, filter_values, filter_mean, filter_template);
// Determine the %RSD for each filter_template value
calculateFilterValuesPercRSD(filter_template, filter_mean, filter_var);
}
void MRMFeatureFilter::EstimateBackgroundInterferences(const std::vector<FeatureMap>& samples, MRMFeatureQC& filter_template, const TargetedExperiment& transitions) const
{
// iterate through each sample and accumulate the values in the filter_values
std::vector<MRMFeatureQC> filter_values;
accumulateFilterValues(filter_values, samples, filter_template, transitions);
// Determine the AVE for each filter_template value
calculateFilterValuesMean(filter_template, filter_values, filter_template);
}
std::map<String, int> MRMFeatureFilter::countLabelsAndTransitionTypes(
const Feature& component_group,
const TargetedExperiment& transitions) const
{
int n_heavy(0), n_light(0), n_quant(0), n_detect(0), n_ident(0), n_trans(0);
std::map<String, int> output;
for (size_t cg_it = 0; cg_it < component_group.getSubordinates().size(); ++cg_it)
{
// extract out the matching transition
ReactionMonitoringTransition transition;
for (size_t trans_it = 0; trans_it < transitions.getTransitions().size(); ++trans_it)
{
if (transitions.getTransitions()[trans_it].getNativeID() == component_group.getSubordinates()[cg_it].getMetaValue("native_id"))
{
transition = transitions.getTransitions()[trans_it];
break;
}
}
// count labels and transition types
String label_type = (String)component_group.getSubordinates()[cg_it].getMetaValue("LabelType");
if (label_type == "Heavy")
{
++n_heavy;
}
else if (label_type == "Light")
{
++n_light;
}
if (transition.isQuantifyingTransition())
{
++n_quant;
}
if (transition.isIdentifyingTransition())
{
++n_ident;
}
if (transition.isDetectingTransition())
{
++n_detect;
}
++n_trans;
}
// record
output["n_heavy"] = n_heavy;
output["n_light"] = n_light;
output["n_quantifying"] = n_quant;
output["n_identifying"] = n_ident;
output["n_detecting"] = n_detect;
output["n_transitions"] = n_trans;
return output;
}
double MRMFeatureFilter::calculateIonRatio(const Feature& component_1, const Feature& component_2, const String& feature_name) const
{
double ratio = 0.0;
// member feature_name access
if (feature_name == "intensity")
{
if (component_1.metaValueExists("native_id")&& component_2.metaValueExists("native_id"))
{
const double feature_1 = component_1.getIntensity();
const double feature_2 = component_2.getIntensity();
ratio = feature_1 / feature_2;
}
else if (component_1.metaValueExists("native_id"))
{
OPENMS_LOG_DEBUG << "Warning: no IS found for component " << component_1.getMetaValue("native_id") << "." << std::endl;
const double feature_1 = component_1.getIntensity();
ratio = feature_1;
}
}
// metaValue feature_name access
else
{
if (component_1.metaValueExists(feature_name)&& component_2.metaValueExists(feature_name))
{
const double feature_1 = component_1.getMetaValue(feature_name);
const double feature_2 = component_2.getMetaValue(feature_name);
ratio = feature_1 / feature_2;
}
else if (component_1.metaValueExists(feature_name))
{
OPENMS_LOG_DEBUG << "Warning: no IS found for component " << component_1.getMetaValue("native_id") << "." << std::endl;
const double feature_1 = component_1.getMetaValue(feature_name);
ratio = feature_1;
}
else
{
OPENMS_LOG_DEBUG << "Feature metaValue " << feature_name << " not found for components " << component_1.getMetaValue("native_id") << " and " << component_2.getMetaValue("native_id") << ".";
}
}
return ratio;
}
double MRMFeatureFilter::calculateRTDifference(Feature& component_1, Feature& component_2) const
{
return std::abs(component_1.getRT() - component_2.getRT());
}
double MRMFeatureFilter::calculateResolution(Feature& component_1, Feature& component_2) const
{
// Resolution = 2 * |RT2 - RT1| / (W1 + W2)
// where W is the peak width at base
// For Gaussian peaks: width at base ≈ 1.7 * FWHM
double rt_diff = std::abs(component_1.getRT() - component_2.getRT());
// Try to get width from metavalues (width_at_50 is FWHM)
double width_1 = 0.0;
double width_2 = 0.0;
if (component_1.metaValueExists("width_at_50"))
{
width_1 = static_cast<double>(component_1.getMetaValue("width_at_50")) * 1.7;
}
else
{
// Fall back to using the feature's bounding box width
width_1 = component_1.getWidth();
}
if (component_2.metaValueExists("width_at_50"))
{
width_2 = static_cast<double>(component_2.getMetaValue("width_at_50")) * 1.7;
}
else
{
width_2 = component_2.getWidth();
}
double width_sum = width_1 + width_2;
if (width_sum <= 0.0)
{
return 0.0;
}
return 2.0 * rt_diff / width_sum;
}
bool MRMFeatureFilter::checkMetaValue(
const Feature& component,
const String& meta_value_key,
const double& meta_value_l,
const double& meta_value_u,
bool& key_exists
) const
{
bool check = true;
if (component.metaValueExists(meta_value_key)) {
key_exists = true;
const double meta_value = (double)component.getMetaValue(meta_value_key);
check = checkRange(meta_value, meta_value_l, meta_value_u);
} else {
key_exists = false;
OPENMS_LOG_DEBUG << "Warning: no metaValue found for transition_id " << component.getMetaValue("native_id") << " for metaValue key " << meta_value_key << ".";
}
return check;
}
void MRMFeatureFilter::updateMetaValue(const Feature& component, const String& meta_value_key, double& meta_value_l, double& meta_value_u, bool& key_exists) const
{
if (component.metaValueExists(meta_value_key))
{
key_exists = true;
const double meta_value = (double)component.getMetaValue(meta_value_key);
updateRange(meta_value, meta_value_l, meta_value_u);
}
else
{
key_exists = false;
OPENMS_LOG_DEBUG << "Warning: no metaValue found for transition_id " << component.getMetaValue("native_id") << " for metaValue key " << meta_value_key << ".";
}
}
void MRMFeatureFilter::setMetaValue(const Feature& component, const String& meta_value_key, double& meta_value_l, double& meta_value_u, bool& key_exists) const
{
if (component.metaValueExists(meta_value_key))
{
key_exists = true;
const double meta_value = (double)component.getMetaValue(meta_value_key);
setRange(meta_value, meta_value_l, meta_value_u);
}
else
{
key_exists = false;
OPENMS_LOG_DEBUG << "Warning: no metaValue found for transition_id " << component.getMetaValue("native_id") << " for metaValue key " << meta_value_key << ".";
}
}
void MRMFeatureFilter::initMetaValue(const Feature& component, const String& meta_value_key, double& meta_value_l, double& meta_value_u, bool& key_exists) const
{
if (component.metaValueExists(meta_value_key))
{
key_exists = true;
const double meta_value = (double)component.getMetaValue(meta_value_key);
initRange(meta_value, meta_value_l, meta_value_u);
}
else
{
key_exists = false;
OPENMS_LOG_DEBUG << "Warning: no metaValue found for transition_id " << component.getMetaValue("native_id") << " for metaValue key " << meta_value_key << ".";
}
}
StringList MRMFeatureFilter::getUniqueSorted(const StringList& messages) const
{
StringList unique{ messages };
std::sort(unique.begin(), unique.end());
unique.erase(std::unique(unique.begin(), unique.end()), unique.end());
return unique;
}
void MRMFeatureFilter::accumulateFilterValues(std::vector<MRMFeatureQC>& filter_values, const std::vector<FeatureMap>& samples, const MRMFeatureQC& filter_template, const TargetedExperiment& transitions) const
{
// iterate through each sample and accumulate the values in the filter_values
for (size_t sample_it = 0; sample_it < samples.size(); sample_it++) {
MRMFeatureQC filter_value = filter_template;
// iterate through each component_group/feature
for (size_t feature_it = 0; feature_it < samples.at(sample_it).size(); ++feature_it)
{
String component_group_name = (String)samples.at(sample_it).at(feature_it).getMetaValue("PeptideRef");
std::map<String, int> labels_and_transition_types = countLabelsAndTransitionTypes(samples.at(sample_it).at(feature_it), transitions);
// iterate through each component/sub-feature
for (size_t sub_it = 0; sub_it < samples.at(sample_it).at(feature_it).getSubordinates().size(); ++sub_it)
{
String component_name = (String)samples.at(sample_it).at(feature_it).getSubordinates().at(sub_it).getMetaValue("native_id");
// iterate through multi-feature/multi-sub-feature QCs/filters
// iterate through component_groups
for (size_t cg_qc_it = 0; cg_qc_it < filter_value.component_group_qcs.size(); ++cg_qc_it)
{
if (filter_value.component_group_qcs.at(cg_qc_it).component_group_name == component_group_name)
{
const double rt = samples.at(sample_it).at(feature_it).getRT();
setRange(rt,
filter_value.component_group_qcs.at(cg_qc_it).retention_time_l,
filter_value.component_group_qcs.at(cg_qc_it).retention_time_u);
const double intensity = samples.at(sample_it).at(feature_it).getIntensity();
setRange(intensity,
filter_value.component_group_qcs.at(cg_qc_it).intensity_l,
filter_value.component_group_qcs.at(cg_qc_it).intensity_u);
const double quality = samples.at(sample_it).at(feature_it).getOverallQuality();
setRange(quality,
filter_value.component_group_qcs.at(cg_qc_it).overall_quality_l,
filter_value.component_group_qcs.at(cg_qc_it).overall_quality_u);
// labels and transition counts QC
setRange(labels_and_transition_types["n_heavy"],
filter_value.component_group_qcs.at(cg_qc_it).n_heavy_l,
filter_value.component_group_qcs.at(cg_qc_it).n_heavy_u);
setRange(labels_and_transition_types["n_light"],
filter_value.component_group_qcs.at(cg_qc_it).n_light_l,
filter_value.component_group_qcs.at(cg_qc_it).n_light_u);
setRange(labels_and_transition_types["n_detecting"],
filter_value.component_group_qcs.at(cg_qc_it).n_detecting_l,
filter_value.component_group_qcs.at(cg_qc_it).n_detecting_u);
setRange(labels_and_transition_types["n_quantifying"],
filter_value.component_group_qcs.at(cg_qc_it).n_quantifying_l,
filter_value.component_group_qcs.at(cg_qc_it).n_quantifying_u);
setRange(labels_and_transition_types["n_identifying"],
filter_value.component_group_qcs.at(cg_qc_it).n_identifying_l,
filter_value.component_group_qcs.at(cg_qc_it).n_identifying_u);
setRange(labels_and_transition_types["n_transitions"],
filter_value.component_group_qcs.at(cg_qc_it).n_transitions_l,
filter_value.component_group_qcs.at(cg_qc_it).n_transitions_u);
// ion ratio QC
for (size_t sub_it2 = 0; sub_it2 < samples.at(sample_it).at(feature_it).getSubordinates().size(); ++sub_it2)
{
String component_name2 = (String)samples.at(sample_it).at(feature_it).getSubordinates().at(sub_it2).getMetaValue("native_id");
// find the ion ratio pair
if (!filter_value.component_group_qcs.at(cg_qc_it).ion_ratio_pair_name_1.empty()
&& !filter_value.component_group_qcs.at(cg_qc_it).ion_ratio_pair_name_2.empty()
&& filter_value.component_group_qcs.at(cg_qc_it).ion_ratio_pair_name_1 == component_name
&& filter_value.component_group_qcs.at(cg_qc_it).ion_ratio_pair_name_2 == component_name2)
{
double ion_ratio = calculateIonRatio(samples.at(sample_it).at(feature_it).getSubordinates().at(sub_it), samples.at(sample_it).at(feature_it).getSubordinates().at(sub_it2), filter_value.component_group_qcs.at(cg_qc_it).ion_ratio_feature_name);
setRange(ion_ratio,
filter_value.component_group_qcs.at(cg_qc_it).ion_ratio_l,
filter_value.component_group_qcs.at(cg_qc_it).ion_ratio_u);
}
}
for (auto& kv : filter_value.component_group_qcs.at(cg_qc_it).meta_value_qc)
{
bool metavalue_exists{ false };
setMetaValue(samples.at(sample_it).at(feature_it), kv.first, kv.second.first, kv.second.second, metavalue_exists);
}
}
}
// iterate through feature/sub-feature QCs/filters
for (size_t c_qc_it = 0; c_qc_it < filter_value.component_qcs.size(); ++c_qc_it)
{
if (filter_value.component_qcs.at(c_qc_it).component_name == component_name)
{
// RT check
const double rt = samples.at(sample_it).at(feature_it).getSubordinates().at(sub_it).getRT();
setRange(rt,
filter_value.component_qcs.at(c_qc_it).retention_time_l,
filter_value.component_qcs.at(c_qc_it).retention_time_u);
// intensity check
double intensity = samples.at(sample_it).at(feature_it).getSubordinates().at(sub_it).getIntensity();
setRange(intensity,
filter_value.component_qcs.at(c_qc_it).intensity_l,
filter_value.component_qcs.at(c_qc_it).intensity_u);
// overall quality check getQuality
double quality = samples.at(sample_it).at(feature_it).getSubordinates().at(sub_it).getOverallQuality();
setRange(quality,
filter_value.component_qcs.at(c_qc_it).overall_quality_l,
filter_value.component_qcs.at(c_qc_it).overall_quality_u);
// metaValue checks
for (auto& kv : filter_value.component_qcs.at(c_qc_it).meta_value_qc)
{
bool metavalue_exists{ false };
setMetaValue(samples.at(sample_it).at(feature_it).getSubordinates().at(sub_it), kv.first, kv.second.first, kv.second.second, metavalue_exists);
}
}
}
}
}
filter_values.push_back(filter_value);
}
}
void MRMFeatureFilter::zeroFilterValues(MRMFeatureQC& filter_zeros, const MRMFeatureQC& filter_template) const
{
// Create a zero filter template for subsequent AVE and STD calculations
filter_zeros = filter_template;
for (size_t cg_qc_it = 0; cg_qc_it < filter_zeros.component_group_qcs.size(); ++cg_qc_it) {
filter_zeros.component_group_qcs.at(cg_qc_it).retention_time_l = 0;
filter_zeros.component_group_qcs.at(cg_qc_it).retention_time_u = 0;
filter_zeros.component_group_qcs.at(cg_qc_it).intensity_l = 0;
filter_zeros.component_group_qcs.at(cg_qc_it).intensity_u = 0;
filter_zeros.component_group_qcs.at(cg_qc_it).overall_quality_l = 0;
filter_zeros.component_group_qcs.at(cg_qc_it).overall_quality_u = 0;
filter_zeros.component_group_qcs.at(cg_qc_it).n_heavy_l = 0;
filter_zeros.component_group_qcs.at(cg_qc_it).n_heavy_u = 0;
filter_zeros.component_group_qcs.at(cg_qc_it).n_light_l = 0;
filter_zeros.component_group_qcs.at(cg_qc_it).n_light_u = 0;
filter_zeros.component_group_qcs.at(cg_qc_it).n_detecting_l = 0;
filter_zeros.component_group_qcs.at(cg_qc_it).n_detecting_u = 0;
filter_zeros.component_group_qcs.at(cg_qc_it).n_quantifying_l = 0;
filter_zeros.component_group_qcs.at(cg_qc_it).n_quantifying_u = 0;
filter_zeros.component_group_qcs.at(cg_qc_it).n_identifying_l = 0;
filter_zeros.component_group_qcs.at(cg_qc_it).n_identifying_u = 0;
filter_zeros.component_group_qcs.at(cg_qc_it).n_transitions_l = 0;
filter_zeros.component_group_qcs.at(cg_qc_it).n_transitions_u = 0;
filter_zeros.component_group_qcs.at(cg_qc_it).ion_ratio_l = 0;
filter_zeros.component_group_qcs.at(cg_qc_it).ion_ratio_u = 0;
for (auto& kv : filter_zeros.component_group_qcs.at(cg_qc_it).meta_value_qc) {
kv.second.first = 0;
kv.second.second = 0;
}
}
for (size_t c_qc_it = 0; c_qc_it < filter_zeros.component_qcs.size(); ++c_qc_it) {
filter_zeros.component_qcs.at(c_qc_it).retention_time_l = 0;
filter_zeros.component_qcs.at(c_qc_it).retention_time_u = 0;
filter_zeros.component_qcs.at(c_qc_it).intensity_l = 0;
filter_zeros.component_qcs.at(c_qc_it).intensity_u = 0;
filter_zeros.component_qcs.at(c_qc_it).overall_quality_l = 0;
filter_zeros.component_qcs.at(c_qc_it).overall_quality_u = 0;
for (auto& kv : filter_zeros.component_qcs.at(c_qc_it).meta_value_qc) {
kv.second.first = 0;
kv.second.second = 0;
}
}
}
void MRMFeatureFilter::calculateFilterValuesMean(MRMFeatureQC& filter_mean, const std::vector<MRMFeatureQC>& filter_values, const MRMFeatureQC& filter_template) const
{
// Determine the AVE for each filter_template value
zeroFilterValues(filter_mean, filter_template);
for (const MRMFeatureQC& filter : filter_values) { // Accumulate the sum
for (size_t cg_qc_it = 0; cg_qc_it < filter_mean.component_group_qcs.size(); ++cg_qc_it) {
filter_mean.component_group_qcs.at(cg_qc_it).retention_time_l += filter.component_group_qcs.at(cg_qc_it).retention_time_l;
filter_mean.component_group_qcs.at(cg_qc_it).retention_time_u += filter.component_group_qcs.at(cg_qc_it).retention_time_u;
filter_mean.component_group_qcs.at(cg_qc_it).intensity_l += filter.component_group_qcs.at(cg_qc_it).intensity_l;
filter_mean.component_group_qcs.at(cg_qc_it).intensity_u += filter.component_group_qcs.at(cg_qc_it).intensity_u;
filter_mean.component_group_qcs.at(cg_qc_it).overall_quality_l += filter.component_group_qcs.at(cg_qc_it).overall_quality_l;
filter_mean.component_group_qcs.at(cg_qc_it).overall_quality_u += filter.component_group_qcs.at(cg_qc_it).overall_quality_u;
filter_mean.component_group_qcs.at(cg_qc_it).n_heavy_l += filter.component_group_qcs.at(cg_qc_it).n_heavy_l;
filter_mean.component_group_qcs.at(cg_qc_it).n_heavy_u += filter.component_group_qcs.at(cg_qc_it).n_heavy_u;
filter_mean.component_group_qcs.at(cg_qc_it).n_light_l += filter.component_group_qcs.at(cg_qc_it).n_light_l;
filter_mean.component_group_qcs.at(cg_qc_it).n_light_u += filter.component_group_qcs.at(cg_qc_it).n_light_u;
filter_mean.component_group_qcs.at(cg_qc_it).n_detecting_l += filter.component_group_qcs.at(cg_qc_it).n_detecting_l;
filter_mean.component_group_qcs.at(cg_qc_it).n_detecting_u += filter.component_group_qcs.at(cg_qc_it).n_detecting_u;
filter_mean.component_group_qcs.at(cg_qc_it).n_quantifying_l += filter.component_group_qcs.at(cg_qc_it).n_quantifying_l;
filter_mean.component_group_qcs.at(cg_qc_it).n_quantifying_u += filter.component_group_qcs.at(cg_qc_it).n_quantifying_u;
filter_mean.component_group_qcs.at(cg_qc_it).n_identifying_l += filter.component_group_qcs.at(cg_qc_it).n_identifying_l;
filter_mean.component_group_qcs.at(cg_qc_it).n_identifying_u += filter.component_group_qcs.at(cg_qc_it).n_identifying_u;
filter_mean.component_group_qcs.at(cg_qc_it).n_transitions_l += filter.component_group_qcs.at(cg_qc_it).n_transitions_l;
filter_mean.component_group_qcs.at(cg_qc_it).n_transitions_u += filter.component_group_qcs.at(cg_qc_it).n_transitions_u;
filter_mean.component_group_qcs.at(cg_qc_it).ion_ratio_l += filter.component_group_qcs.at(cg_qc_it).ion_ratio_l;
filter_mean.component_group_qcs.at(cg_qc_it).ion_ratio_u += filter.component_group_qcs.at(cg_qc_it).ion_ratio_u;
for (auto& kv : filter_mean.component_group_qcs.at(cg_qc_it).meta_value_qc) {
kv.second.first += filter.component_group_qcs.at(cg_qc_it).meta_value_qc.at(kv.first).first;
kv.second.second += filter.component_group_qcs.at(cg_qc_it).meta_value_qc.at(kv.first).second;
}
}
for (size_t c_qc_it = 0; c_qc_it < filter_mean.component_qcs.size(); ++c_qc_it) {
filter_mean.component_qcs.at(c_qc_it).retention_time_l += filter.component_qcs.at(c_qc_it).retention_time_l;
filter_mean.component_qcs.at(c_qc_it).retention_time_u += filter.component_qcs.at(c_qc_it).retention_time_u;
filter_mean.component_qcs.at(c_qc_it).intensity_l += filter.component_qcs.at(c_qc_it).intensity_l;
filter_mean.component_qcs.at(c_qc_it).intensity_u += filter.component_qcs.at(c_qc_it).intensity_u;
filter_mean.component_qcs.at(c_qc_it).overall_quality_l += filter.component_qcs.at(c_qc_it).overall_quality_l;
filter_mean.component_qcs.at(c_qc_it).overall_quality_u += filter.component_qcs.at(c_qc_it).overall_quality_u;
for (auto& kv : filter_mean.component_qcs.at(c_qc_it).meta_value_qc) {
kv.second.first += filter.component_qcs.at(c_qc_it).meta_value_qc.at(kv.first).first;
kv.second.second += filter.component_qcs.at(c_qc_it).meta_value_qc.at(kv.first).second;
}
}
}
for (size_t cg_qc_it = 0; cg_qc_it < filter_mean.component_group_qcs.size(); ++cg_qc_it) {// Divide by the size (performed separately due to int types...)
filter_mean.component_group_qcs.at(cg_qc_it).retention_time_l = filter_mean.component_group_qcs.at(cg_qc_it).retention_time_l / filter_values.size();
filter_mean.component_group_qcs.at(cg_qc_it).retention_time_u = filter_mean.component_group_qcs.at(cg_qc_it).retention_time_u / filter_values.size();
filter_mean.component_group_qcs.at(cg_qc_it).intensity_l = filter_mean.component_group_qcs.at(cg_qc_it).intensity_l / filter_values.size();
filter_mean.component_group_qcs.at(cg_qc_it).intensity_u = filter_mean.component_group_qcs.at(cg_qc_it).intensity_u / filter_values.size();
filter_mean.component_group_qcs.at(cg_qc_it).overall_quality_l = filter_mean.component_group_qcs.at(cg_qc_it).overall_quality_l / filter_values.size();
filter_mean.component_group_qcs.at(cg_qc_it).overall_quality_u = filter_mean.component_group_qcs.at(cg_qc_it).overall_quality_u / filter_values.size();
filter_mean.component_group_qcs.at(cg_qc_it).n_heavy_l = filter_mean.component_group_qcs.at(cg_qc_it).n_heavy_l / filter_values.size();
filter_mean.component_group_qcs.at(cg_qc_it).n_heavy_u = filter_mean.component_group_qcs.at(cg_qc_it).n_heavy_u / filter_values.size();
filter_mean.component_group_qcs.at(cg_qc_it).n_light_l = filter_mean.component_group_qcs.at(cg_qc_it).n_light_l / filter_values.size();
filter_mean.component_group_qcs.at(cg_qc_it).n_light_u = filter_mean.component_group_qcs.at(cg_qc_it).n_light_u / filter_values.size();
filter_mean.component_group_qcs.at(cg_qc_it).n_detecting_l = filter_mean.component_group_qcs.at(cg_qc_it).n_detecting_l / filter_values.size();
filter_mean.component_group_qcs.at(cg_qc_it).n_detecting_u = filter_mean.component_group_qcs.at(cg_qc_it).n_detecting_u / filter_values.size();
filter_mean.component_group_qcs.at(cg_qc_it).n_quantifying_l = filter_mean.component_group_qcs.at(cg_qc_it).n_quantifying_l / filter_values.size();
filter_mean.component_group_qcs.at(cg_qc_it).n_quantifying_u = filter_mean.component_group_qcs.at(cg_qc_it).n_quantifying_u / filter_values.size();
filter_mean.component_group_qcs.at(cg_qc_it).n_identifying_l = filter_mean.component_group_qcs.at(cg_qc_it).n_identifying_l / filter_values.size();
filter_mean.component_group_qcs.at(cg_qc_it).n_identifying_u = filter_mean.component_group_qcs.at(cg_qc_it).n_identifying_u / filter_values.size();
filter_mean.component_group_qcs.at(cg_qc_it).n_transitions_l = filter_mean.component_group_qcs.at(cg_qc_it).n_transitions_l / filter_values.size();
filter_mean.component_group_qcs.at(cg_qc_it).n_transitions_u = filter_mean.component_group_qcs.at(cg_qc_it).n_transitions_u / filter_values.size();
filter_mean.component_group_qcs.at(cg_qc_it).ion_ratio_l = filter_mean.component_group_qcs.at(cg_qc_it).ion_ratio_l / filter_values.size();
filter_mean.component_group_qcs.at(cg_qc_it).ion_ratio_u = filter_mean.component_group_qcs.at(cg_qc_it).ion_ratio_u / filter_values.size();
for (auto& kv : filter_mean.component_group_qcs.at(cg_qc_it).meta_value_qc) {
kv.second.first = filter_mean.component_group_qcs.at(cg_qc_it).meta_value_qc.at(kv.first).first / filter_values.size();
kv.second.second = filter_mean.component_group_qcs.at(cg_qc_it).meta_value_qc.at(kv.first).second / filter_values.size();
}
}
for (size_t c_qc_it = 0; c_qc_it < filter_mean.component_qcs.size(); ++c_qc_it) {
filter_mean.component_qcs.at(c_qc_it).retention_time_l = filter_mean.component_qcs.at(c_qc_it).retention_time_l / filter_values.size();
filter_mean.component_qcs.at(c_qc_it).retention_time_u = filter_mean.component_qcs.at(c_qc_it).retention_time_u / filter_values.size();
filter_mean.component_qcs.at(c_qc_it).intensity_l = filter_mean.component_qcs.at(c_qc_it).intensity_l / filter_values.size();
filter_mean.component_qcs.at(c_qc_it).intensity_u = filter_mean.component_qcs.at(c_qc_it).intensity_u / filter_values.size();
filter_mean.component_qcs.at(c_qc_it).overall_quality_l = filter_mean.component_qcs.at(c_qc_it).overall_quality_l / filter_values.size();
filter_mean.component_qcs.at(c_qc_it).overall_quality_u = filter_mean.component_qcs.at(c_qc_it).overall_quality_u / filter_values.size();
for (auto& kv : filter_mean.component_qcs.at(c_qc_it).meta_value_qc) {
kv.second.first = filter_mean.component_qcs.at(c_qc_it).meta_value_qc.at(kv.first).first / filter_values.size();
kv.second.second = filter_mean.component_qcs.at(c_qc_it).meta_value_qc.at(kv.first).second / filter_values.size();
}
}
}
void MRMFeatureFilter::calculateFilterValuesVar(MRMFeatureQC& filter_var, const std::vector<MRMFeatureQC>& filter_values, const MRMFeatureQC& filter_mean, const MRMFeatureQC& filter_template) const
{
// Determine the STD for each filter_template value
zeroFilterValues(filter_var, filter_template);
for (const MRMFeatureQC& filter : filter_values) { // Accumulate the squared sum of the difference from the mean
for (size_t cg_qc_it = 0; cg_qc_it < filter_var.component_group_qcs.size(); ++cg_qc_it) {
filter_var.component_group_qcs.at(cg_qc_it).retention_time_l += std::pow(filter.component_group_qcs.at(cg_qc_it).retention_time_l - filter_mean.component_group_qcs.at(cg_qc_it).retention_time_l, 2);
filter_var.component_group_qcs.at(cg_qc_it).retention_time_u += std::pow(filter.component_group_qcs.at(cg_qc_it).retention_time_u - filter_mean.component_group_qcs.at(cg_qc_it).retention_time_u, 2);
filter_var.component_group_qcs.at(cg_qc_it).intensity_l += std::pow(filter.component_group_qcs.at(cg_qc_it).intensity_l - filter_mean.component_group_qcs.at(cg_qc_it).intensity_l, 2);
filter_var.component_group_qcs.at(cg_qc_it).intensity_u += std::pow(filter.component_group_qcs.at(cg_qc_it).intensity_u - filter_mean.component_group_qcs.at(cg_qc_it).intensity_u, 2);
filter_var.component_group_qcs.at(cg_qc_it).overall_quality_l += std::pow(filter.component_group_qcs.at(cg_qc_it).overall_quality_l - filter_mean.component_group_qcs.at(cg_qc_it).overall_quality_l, 2);
filter_var.component_group_qcs.at(cg_qc_it).overall_quality_u += std::pow(filter.component_group_qcs.at(cg_qc_it).overall_quality_u - filter_mean.component_group_qcs.at(cg_qc_it).overall_quality_u, 2);
filter_var.component_group_qcs.at(cg_qc_it).n_heavy_l += std::pow(filter.component_group_qcs.at(cg_qc_it).n_heavy_l - filter_mean.component_group_qcs.at(cg_qc_it).n_heavy_l, 2);
filter_var.component_group_qcs.at(cg_qc_it).n_heavy_u += std::pow(filter.component_group_qcs.at(cg_qc_it).n_heavy_u - filter_mean.component_group_qcs.at(cg_qc_it).n_heavy_u, 2);
filter_var.component_group_qcs.at(cg_qc_it).n_light_l += std::pow(filter.component_group_qcs.at(cg_qc_it).n_light_l - filter_mean.component_group_qcs.at(cg_qc_it).n_light_l, 2);
filter_var.component_group_qcs.at(cg_qc_it).n_light_u += std::pow(filter.component_group_qcs.at(cg_qc_it).n_light_u - filter_mean.component_group_qcs.at(cg_qc_it).n_light_u, 2);
filter_var.component_group_qcs.at(cg_qc_it).n_detecting_l += std::pow(filter.component_group_qcs.at(cg_qc_it).n_detecting_l - filter_mean.component_group_qcs.at(cg_qc_it).n_detecting_l, 2);
filter_var.component_group_qcs.at(cg_qc_it).n_detecting_u += std::pow(filter.component_group_qcs.at(cg_qc_it).n_detecting_u - filter_mean.component_group_qcs.at(cg_qc_it).n_detecting_u, 2);
filter_var.component_group_qcs.at(cg_qc_it).n_quantifying_l += std::pow(filter.component_group_qcs.at(cg_qc_it).n_quantifying_l - filter_mean.component_group_qcs.at(cg_qc_it).n_quantifying_l, 2);
filter_var.component_group_qcs.at(cg_qc_it).n_quantifying_u += std::pow(filter.component_group_qcs.at(cg_qc_it).n_quantifying_u - filter_mean.component_group_qcs.at(cg_qc_it).n_quantifying_u, 2);
filter_var.component_group_qcs.at(cg_qc_it).n_identifying_l += std::pow(filter.component_group_qcs.at(cg_qc_it).n_identifying_l - filter_mean.component_group_qcs.at(cg_qc_it).n_identifying_l, 2);
filter_var.component_group_qcs.at(cg_qc_it).n_identifying_u += std::pow(filter.component_group_qcs.at(cg_qc_it).n_identifying_u - filter_mean.component_group_qcs.at(cg_qc_it).n_identifying_u, 2);
filter_var.component_group_qcs.at(cg_qc_it).n_transitions_l += std::pow(filter.component_group_qcs.at(cg_qc_it).n_transitions_l - filter_mean.component_group_qcs.at(cg_qc_it).n_transitions_l, 2);
filter_var.component_group_qcs.at(cg_qc_it).n_transitions_u += std::pow(filter.component_group_qcs.at(cg_qc_it).n_transitions_u - filter_mean.component_group_qcs.at(cg_qc_it).n_transitions_u, 2);
filter_var.component_group_qcs.at(cg_qc_it).ion_ratio_l += std::pow(filter.component_group_qcs.at(cg_qc_it).ion_ratio_l - filter_mean.component_group_qcs.at(cg_qc_it).ion_ratio_l, 2);
filter_var.component_group_qcs.at(cg_qc_it).ion_ratio_u += std::pow(filter.component_group_qcs.at(cg_qc_it).ion_ratio_u - filter_mean.component_group_qcs.at(cg_qc_it).ion_ratio_u, 2);
for (auto& kv : filter_var.component_group_qcs.at(cg_qc_it).meta_value_qc) {
kv.second.first += std::pow(filter.component_group_qcs.at(cg_qc_it).meta_value_qc.at(kv.first).first - filter_mean.component_group_qcs.at(cg_qc_it).meta_value_qc.at(kv.first).first, 2);
kv.second.second += std::pow(filter.component_group_qcs.at(cg_qc_it).meta_value_qc.at(kv.first).second - filter_mean.component_group_qcs.at(cg_qc_it).meta_value_qc.at(kv.first).second, 2);
}
}
for (size_t c_qc_it = 0; c_qc_it < filter_var.component_qcs.size(); ++c_qc_it) {
filter_var.component_qcs.at(c_qc_it).retention_time_l += std::pow(filter.component_qcs.at(c_qc_it).retention_time_l - filter_mean.component_qcs.at(c_qc_it).retention_time_l, 2);
filter_var.component_qcs.at(c_qc_it).retention_time_u += std::pow(filter.component_qcs.at(c_qc_it).retention_time_u - filter_mean.component_qcs.at(c_qc_it).retention_time_u, 2);
filter_var.component_qcs.at(c_qc_it).intensity_l += std::pow(filter.component_qcs.at(c_qc_it).intensity_l - filter_mean.component_qcs.at(c_qc_it).intensity_l, 2);
filter_var.component_qcs.at(c_qc_it).intensity_u += std::pow(filter.component_qcs.at(c_qc_it).intensity_u - filter_mean.component_qcs.at(c_qc_it).intensity_u, 2);
filter_var.component_qcs.at(c_qc_it).overall_quality_l += std::pow(filter.component_qcs.at(c_qc_it).overall_quality_l - filter_mean.component_qcs.at(c_qc_it).overall_quality_l, 2);
filter_var.component_qcs.at(c_qc_it).overall_quality_u += std::pow(filter.component_qcs.at(c_qc_it).overall_quality_u - filter_mean.component_qcs.at(c_qc_it).overall_quality_u, 2);
for (auto& kv : filter_var.component_qcs.at(c_qc_it).meta_value_qc) {
kv.second.first += std::pow(filter.component_qcs.at(c_qc_it).meta_value_qc.at(kv.first).first - filter_mean.component_qcs.at(c_qc_it).meta_value_qc.at(kv.first).first, 2);
kv.second.second += std::pow(filter.component_qcs.at(c_qc_it).meta_value_qc.at(kv.first).second - filter_mean.component_qcs.at(c_qc_it).meta_value_qc.at(kv.first).second, 2);
}
}
}
for (size_t cg_qc_it = 0; cg_qc_it < filter_var.component_group_qcs.size(); ++cg_qc_it) {// Divide by the size (performed separately due to int types...)
filter_var.component_group_qcs.at(cg_qc_it).retention_time_l = filter_var.component_group_qcs.at(cg_qc_it).retention_time_l / (filter_values.size() - 1);
filter_var.component_group_qcs.at(cg_qc_it).retention_time_u = filter_var.component_group_qcs.at(cg_qc_it).retention_time_u / (filter_values.size() - 1);
filter_var.component_group_qcs.at(cg_qc_it).intensity_l = filter_var.component_group_qcs.at(cg_qc_it).intensity_l / (filter_values.size() - 1);
filter_var.component_group_qcs.at(cg_qc_it).intensity_u = filter_var.component_group_qcs.at(cg_qc_it).intensity_u / (filter_values.size() - 1);
filter_var.component_group_qcs.at(cg_qc_it).overall_quality_l = filter_var.component_group_qcs.at(cg_qc_it).overall_quality_l / (filter_values.size() - 1);
filter_var.component_group_qcs.at(cg_qc_it).overall_quality_u = filter_var.component_group_qcs.at(cg_qc_it).overall_quality_u / (filter_values.size() - 1);
filter_var.component_group_qcs.at(cg_qc_it).n_heavy_l = filter_var.component_group_qcs.at(cg_qc_it).n_heavy_l / (filter_values.size() - 1);
filter_var.component_group_qcs.at(cg_qc_it).n_heavy_u = filter_var.component_group_qcs.at(cg_qc_it).n_heavy_u / (filter_values.size() - 1);
filter_var.component_group_qcs.at(cg_qc_it).n_light_l = filter_var.component_group_qcs.at(cg_qc_it).n_light_l / (filter_values.size() - 1);
filter_var.component_group_qcs.at(cg_qc_it).n_light_u = filter_var.component_group_qcs.at(cg_qc_it).n_light_u / (filter_values.size() - 1);
filter_var.component_group_qcs.at(cg_qc_it).n_detecting_l = filter_var.component_group_qcs.at(cg_qc_it).n_detecting_l / (filter_values.size() - 1);
filter_var.component_group_qcs.at(cg_qc_it).n_detecting_u = filter_var.component_group_qcs.at(cg_qc_it).n_detecting_u / (filter_values.size() - 1);
filter_var.component_group_qcs.at(cg_qc_it).n_quantifying_l = filter_var.component_group_qcs.at(cg_qc_it).n_quantifying_l / (filter_values.size() - 1);
filter_var.component_group_qcs.at(cg_qc_it).n_quantifying_u = filter_var.component_group_qcs.at(cg_qc_it).n_quantifying_u / (filter_values.size() - 1);
filter_var.component_group_qcs.at(cg_qc_it).n_identifying_l = filter_var.component_group_qcs.at(cg_qc_it).n_identifying_l / (filter_values.size() - 1);
filter_var.component_group_qcs.at(cg_qc_it).n_identifying_u = filter_var.component_group_qcs.at(cg_qc_it).n_identifying_u / (filter_values.size() - 1);
filter_var.component_group_qcs.at(cg_qc_it).n_transitions_l = filter_var.component_group_qcs.at(cg_qc_it).n_transitions_l / (filter_values.size() - 1);
filter_var.component_group_qcs.at(cg_qc_it).n_transitions_u = filter_var.component_group_qcs.at(cg_qc_it).n_transitions_u / (filter_values.size() - 1);
filter_var.component_group_qcs.at(cg_qc_it).ion_ratio_l = filter_var.component_group_qcs.at(cg_qc_it).ion_ratio_l / (filter_values.size() - 1);
filter_var.component_group_qcs.at(cg_qc_it).ion_ratio_u = filter_var.component_group_qcs.at(cg_qc_it).ion_ratio_u / (filter_values.size() - 1);
for (auto& kv : filter_var.component_group_qcs.at(cg_qc_it).meta_value_qc) {
kv.second.first = filter_var.component_group_qcs.at(cg_qc_it).meta_value_qc.at(kv.first).first / (filter_values.size() - 1);
kv.second.second = filter_var.component_group_qcs.at(cg_qc_it).meta_value_qc.at(kv.first).second / (filter_values.size() - 1);
}
}
for (size_t c_qc_it = 0; c_qc_it < filter_var.component_qcs.size(); ++c_qc_it) {
filter_var.component_qcs.at(c_qc_it).retention_time_l = filter_var.component_qcs.at(c_qc_it).retention_time_l / (filter_values.size() - 1);
filter_var.component_qcs.at(c_qc_it).retention_time_u = filter_var.component_qcs.at(c_qc_it).retention_time_u / (filter_values.size() - 1);
filter_var.component_qcs.at(c_qc_it).intensity_l = filter_var.component_qcs.at(c_qc_it).intensity_l / (filter_values.size() - 1);
filter_var.component_qcs.at(c_qc_it).intensity_u = filter_var.component_qcs.at(c_qc_it).intensity_u / (filter_values.size() - 1);
filter_var.component_qcs.at(c_qc_it).overall_quality_l = filter_var.component_qcs.at(c_qc_it).overall_quality_l / (filter_values.size() - 1);
filter_var.component_qcs.at(c_qc_it).overall_quality_u = filter_var.component_qcs.at(c_qc_it).overall_quality_u / (filter_values.size() - 1);
for (auto& kv : filter_var.component_qcs.at(c_qc_it).meta_value_qc) {
kv.second.first = filter_var.component_qcs.at(c_qc_it).meta_value_qc.at(kv.first).first / (filter_values.size() - 1);
kv.second.second = filter_var.component_qcs.at(c_qc_it).meta_value_qc.at(kv.first).second / (filter_values.size() - 1);
}
}
}
void MRMFeatureFilter::calculateFilterValuesPercRSD(MRMFeatureQC& filter_rsd, const MRMFeatureQC& filter_mean, const MRMFeatureQC& filter_var) const
{
// Determine the %RSD for each filter_rsd value
for (size_t cg_qc_it = 0; cg_qc_it < filter_rsd.component_group_qcs.size(); ++cg_qc_it) {
filter_rsd.component_group_qcs.at(cg_qc_it).retention_time_l = (filter_mean.component_group_qcs.at(cg_qc_it).retention_time_l != 0) ? std::sqrt(filter_var.component_group_qcs.at(cg_qc_it).retention_time_l) / filter_mean.component_group_qcs.at(cg_qc_it).retention_time_l * 100 : 0;
filter_rsd.component_group_qcs.at(cg_qc_it).retention_time_u = (filter_mean.component_group_qcs.at(cg_qc_it).retention_time_l != 0) ? std::sqrt(filter_var.component_group_qcs.at(cg_qc_it).retention_time_l) / filter_mean.component_group_qcs.at(cg_qc_it).retention_time_l * 100 : 0;
filter_rsd.component_group_qcs.at(cg_qc_it).intensity_l = (filter_mean.component_group_qcs.at(cg_qc_it).intensity_l != 0) ? std::sqrt(filter_var.component_group_qcs.at(cg_qc_it).intensity_l) / filter_mean.component_group_qcs.at(cg_qc_it).intensity_l * 100 : 0;
filter_rsd.component_group_qcs.at(cg_qc_it).intensity_u = (filter_mean.component_group_qcs.at(cg_qc_it).intensity_u != 0) ? std::sqrt(filter_var.component_group_qcs.at(cg_qc_it).intensity_u) / filter_mean.component_group_qcs.at(cg_qc_it).intensity_u * 100 : 0;
filter_rsd.component_group_qcs.at(cg_qc_it).overall_quality_l = (filter_mean.component_group_qcs.at(cg_qc_it).overall_quality_l != 0) ? std::sqrt(filter_var.component_group_qcs.at(cg_qc_it).overall_quality_l) / filter_mean.component_group_qcs.at(cg_qc_it).overall_quality_l * 100 : 0;
filter_rsd.component_group_qcs.at(cg_qc_it).overall_quality_u = (filter_mean.component_group_qcs.at(cg_qc_it).overall_quality_u != 0) ? std::sqrt(filter_var.component_group_qcs.at(cg_qc_it).overall_quality_u) / filter_mean.component_group_qcs.at(cg_qc_it).overall_quality_u * 100 : 0;
filter_rsd.component_group_qcs.at(cg_qc_it).n_heavy_l = (filter_mean.component_group_qcs.at(cg_qc_it).n_heavy_l != 0) ? std::sqrt(filter_var.component_group_qcs.at(cg_qc_it).n_heavy_l) / filter_mean.component_group_qcs.at(cg_qc_it).n_heavy_l * 100 : 0;
filter_rsd.component_group_qcs.at(cg_qc_it).n_heavy_u = (filter_mean.component_group_qcs.at(cg_qc_it).n_heavy_u != 0) ? std::sqrt(filter_var.component_group_qcs.at(cg_qc_it).n_heavy_u) / filter_mean.component_group_qcs.at(cg_qc_it).n_heavy_u * 100 : 0;
filter_rsd.component_group_qcs.at(cg_qc_it).n_light_l = (filter_mean.component_group_qcs.at(cg_qc_it).n_light_l != 0) ? std::sqrt(filter_var.component_group_qcs.at(cg_qc_it).n_light_l) / filter_mean.component_group_qcs.at(cg_qc_it).n_light_l * 100 : 0;
filter_rsd.component_group_qcs.at(cg_qc_it).n_light_u = (filter_mean.component_group_qcs.at(cg_qc_it).n_light_u != 0) ? std::sqrt(filter_var.component_group_qcs.at(cg_qc_it).n_light_u) / filter_mean.component_group_qcs.at(cg_qc_it).n_light_u * 100 : 0;
filter_rsd.component_group_qcs.at(cg_qc_it).n_detecting_l = (filter_mean.component_group_qcs.at(cg_qc_it).n_detecting_l != 0) ? std::sqrt(filter_var.component_group_qcs.at(cg_qc_it).n_detecting_l) / filter_mean.component_group_qcs.at(cg_qc_it).n_detecting_l * 100 : 0;
filter_rsd.component_group_qcs.at(cg_qc_it).n_detecting_u = (filter_mean.component_group_qcs.at(cg_qc_it).n_detecting_u != 0) ? std::sqrt(filter_var.component_group_qcs.at(cg_qc_it).n_detecting_u) / filter_mean.component_group_qcs.at(cg_qc_it).n_detecting_u * 100 : 0;
filter_rsd.component_group_qcs.at(cg_qc_it).n_quantifying_l = (filter_mean.component_group_qcs.at(cg_qc_it).n_quantifying_l != 0) ? std::sqrt(filter_var.component_group_qcs.at(cg_qc_it).n_quantifying_l) / filter_mean.component_group_qcs.at(cg_qc_it).n_quantifying_l * 100 : 0;
filter_rsd.component_group_qcs.at(cg_qc_it).n_quantifying_u = (filter_mean.component_group_qcs.at(cg_qc_it).n_quantifying_u != 0) ? std::sqrt(filter_var.component_group_qcs.at(cg_qc_it).n_quantifying_u) / filter_mean.component_group_qcs.at(cg_qc_it).n_quantifying_u * 100 : 0;
filter_rsd.component_group_qcs.at(cg_qc_it).n_identifying_l = (filter_mean.component_group_qcs.at(cg_qc_it).n_identifying_l != 0) ? std::sqrt(filter_var.component_group_qcs.at(cg_qc_it).n_identifying_l) / filter_mean.component_group_qcs.at(cg_qc_it).n_identifying_l * 100 : 0;
filter_rsd.component_group_qcs.at(cg_qc_it).n_identifying_u = (filter_mean.component_group_qcs.at(cg_qc_it).n_identifying_u != 0) ? std::sqrt(filter_var.component_group_qcs.at(cg_qc_it).n_identifying_u) / filter_mean.component_group_qcs.at(cg_qc_it).n_identifying_u * 100 : 0;
filter_rsd.component_group_qcs.at(cg_qc_it).n_transitions_l = (filter_mean.component_group_qcs.at(cg_qc_it).n_transitions_l != 0) ? std::sqrt(filter_var.component_group_qcs.at(cg_qc_it).n_transitions_l) / filter_mean.component_group_qcs.at(cg_qc_it).n_transitions_l * 100 : 0;
filter_rsd.component_group_qcs.at(cg_qc_it).n_transitions_u = (filter_mean.component_group_qcs.at(cg_qc_it).n_transitions_u != 0) ? std::sqrt(filter_var.component_group_qcs.at(cg_qc_it).n_transitions_u) / filter_mean.component_group_qcs.at(cg_qc_it).n_transitions_u * 100 : 0;
filter_rsd.component_group_qcs.at(cg_qc_it).ion_ratio_l = (filter_mean.component_group_qcs.at(cg_qc_it).ion_ratio_l != 0) ? std::sqrt(filter_var.component_group_qcs.at(cg_qc_it).ion_ratio_l) / filter_mean.component_group_qcs.at(cg_qc_it).ion_ratio_l * 100 : 0;
filter_rsd.component_group_qcs.at(cg_qc_it).ion_ratio_u = (filter_mean.component_group_qcs.at(cg_qc_it).ion_ratio_u != 0) ? std::sqrt(filter_var.component_group_qcs.at(cg_qc_it).ion_ratio_u) / filter_mean.component_group_qcs.at(cg_qc_it).ion_ratio_u * 100 : 0;
for (auto& kv : filter_rsd.component_group_qcs.at(cg_qc_it).meta_value_qc) {
kv.second.first = (filter_mean.component_group_qcs.at(cg_qc_it).meta_value_qc.at(kv.first).first != 0) ? std::sqrt(filter_var.component_group_qcs.at(cg_qc_it).meta_value_qc.at(kv.first).first) / filter_mean.component_group_qcs.at(cg_qc_it).meta_value_qc.at(kv.first).first * 100 : 0;
kv.second.second = (filter_mean.component_group_qcs.at(cg_qc_it).meta_value_qc.at(kv.first).second != 0) ? std::sqrt(filter_var.component_group_qcs.at(cg_qc_it).meta_value_qc.at(kv.first).second) / filter_mean.component_group_qcs.at(cg_qc_it).meta_value_qc.at(kv.first).second * 100 : 0;
}
}
for (size_t c_qc_it = 0; c_qc_it < filter_rsd.component_qcs.size(); ++c_qc_it) {
filter_rsd.component_qcs.at(c_qc_it).retention_time_l = (filter_mean.component_qcs.at(c_qc_it).retention_time_l != 0) ? std::sqrt(filter_var.component_qcs.at(c_qc_it).retention_time_l) / filter_mean.component_qcs.at(c_qc_it).retention_time_l * 100 : 0;
filter_rsd.component_qcs.at(c_qc_it).retention_time_u = (filter_mean.component_qcs.at(c_qc_it).retention_time_u != 0) ? std::sqrt(filter_var.component_qcs.at(c_qc_it).retention_time_u) / filter_mean.component_qcs.at(c_qc_it).retention_time_u * 100 : 0;
filter_rsd.component_qcs.at(c_qc_it).intensity_l = (filter_mean.component_qcs.at(c_qc_it).intensity_l != 0) ? std::sqrt(filter_var.component_qcs.at(c_qc_it).intensity_l) / filter_mean.component_qcs.at(c_qc_it).intensity_l * 100 : 0;
filter_rsd.component_qcs.at(c_qc_it).intensity_u = (filter_mean.component_qcs.at(c_qc_it).intensity_u != 0) ? std::sqrt(filter_var.component_qcs.at(c_qc_it).intensity_u) / filter_mean.component_qcs.at(c_qc_it).intensity_u * 100 : 0;
filter_rsd.component_qcs.at(c_qc_it).overall_quality_l = (filter_mean.component_qcs.at(c_qc_it).overall_quality_l != 0) ? std::sqrt(filter_var.component_qcs.at(c_qc_it).overall_quality_l) / filter_mean.component_qcs.at(c_qc_it).overall_quality_l * 100 : 0;
filter_rsd.component_qcs.at(c_qc_it).overall_quality_u = (filter_mean.component_qcs.at(c_qc_it).overall_quality_u != 0) ? std::sqrt(filter_var.component_qcs.at(c_qc_it).overall_quality_u) / filter_mean.component_qcs.at(c_qc_it).overall_quality_u * 100 : 0;
for (auto& kv : filter_rsd.component_qcs.at(c_qc_it).meta_value_qc) {
kv.second.first = (filter_mean.component_qcs.at(c_qc_it).meta_value_qc.at(kv.first).first != 0) ? std::sqrt(filter_var.component_qcs.at(c_qc_it).meta_value_qc.at(kv.first).first) / filter_mean.component_qcs.at(c_qc_it).meta_value_qc.at(kv.first).first * 100 : 0;
kv.second.second = (filter_mean.component_qcs.at(c_qc_it).meta_value_qc.at(kv.first).second != 0) ? std::sqrt(filter_var.component_qcs.at(c_qc_it).meta_value_qc.at(kv.first).second) / filter_mean.component_qcs.at(c_qc_it).meta_value_qc.at(kv.first).second * 100 : 0;
}
}
}
template <typename T>
bool MRMFeatureFilter::checkRange(const T& value, const T& value_l, const T& value_u) const
{
return value >= value_l&& value <= value_u;
}
template<typename T>
void MRMFeatureFilter::updateRange(const T& value, T& value_l, T& value_u) const
{
if (value < value_l) value_l = value;
if (value > value_u) value_u = value;
}
template<typename T>
void MRMFeatureFilter::setRange(const T& value, T& value_l, T& value_u) const
{
if (value >= T(0)) {
value_l = T(0);
value_u = value;
} else {
value_l = value;
value_u = T(0);
}
}
template<typename T>
void MRMFeatureFilter::initRange(const T& value, T& value_l, T& value_u) const
{
value_l = value;
value_u = value;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/MRMTransitionGroupPicker.cpp | .cpp | 8,096 | 148 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/MRMTransitionGroupPicker.h>
#include <OpenMS/PROCESSING/CENTROIDING/PeakPickerHiRes.h>
namespace OpenMS
{
// Simple linear interpolation at point x between x0 and x1
double lin_interpolate(double x, double x0, double x1, double y0, double y1)
{
double slope = (y1 - y0) / (x1 - x0);
double delta_y = (x - x0) * slope;
return y0 + delta_y;
}
MRMTransitionGroupPicker::MRMTransitionGroupPicker() :
DefaultParamHandler("MRMTransitionGroupPicker")
{
defaults_.setValue("stop_after_feature", -1, "Stop finding after feature (ordered by intensity; -1 means do not stop).");
defaults_.setValue("stop_after_intensity_ratio", 0.0001, "Stop after reaching intensity ratio");
defaults_.setValue("min_peak_width", 0.001, "Minimal peak width (s), discard all peaks below this value (-1 means no action).", {"advanced"});
defaults_.setValue("peak_integration", "original", "Calculate the peak area and height either the smoothed or the raw chromatogram data.", {"advanced"});
defaults_.setValidStrings("peak_integration", {"original","smoothed"});
defaults_.setValue("background_subtraction", "none", "Remove background from peak signal using estimated noise levels. The 'original' method is only provided for historical purposes, please use the 'exact' method and set parameters using the PeakIntegrator: settings. The same original or smoothed chromatogram specified by peak_integration will be used for background estimation.", {"advanced"});
defaults_.setValidStrings("background_subtraction", {"none","original","exact"});
defaults_.setValue("recalculate_peaks", "false", "Tries to get better peak picking by looking at peak consistency of all picked peaks. Tries to use the consensus (median) peak border if the variation within the picked peaks is too large.", {"advanced"});
defaults_.setValidStrings("recalculate_peaks", {"true","false"});
defaults_.setValue("use_precursors", "false", "Use precursor chromatogram for peak picking (note that this may lead to precursor signal driving the peak picking)", {"advanced"});
defaults_.setValidStrings("use_precursors", {"true","false"});
defaults_.setValue("use_consensus", "true", "Use consensus peak boundaries when computing transition group picking (if false, compute independent peak boundaries for each transition)", {"advanced"});
defaults_.setValidStrings("use_consensus", {"true","false"});
defaults_.setValue("recalculate_peaks_max_z", 1.0, "Determines the maximal Z-Score (difference measured in standard deviations) that is considered too large for peak boundaries. If the Z-Score is above this value, the median is used for peak boundaries (default value 1.0).", {"advanced"});
defaults_.setValue("minimal_quality", -10000.0, "Only if compute_peak_quality is set, this parameter will not consider peaks below this quality threshold", {"advanced"});
defaults_.setValue("resample_boundary", 15.0, "For computing peak quality, how many extra seconds should be sample left and right of the actual peak", {"advanced"});
defaults_.setValue("compute_peak_quality", "false", "Tries to compute a quality value for each peakgroup and detect outlier transitions. The resulting score is centered around zero and values above 0 are generally good and below -1 or -2 are usually bad.", {"advanced"});
defaults_.setValidStrings("compute_peak_quality", {"true","false"});
defaults_.setValue("compute_peak_shape_metrics", "false", "Calculates various peak shape metrics (e.g., tailing) that can be used for downstream QC/QA.", {"advanced"});
defaults_.setValidStrings("compute_peak_shape_metrics", {"true","false"});
defaults_.setValue("compute_total_mi", "false", "Compute mutual information metrics for individual transitions that can be used for OpenSWATH/IPF scoring.", {"advanced"});
defaults_.setValidStrings("compute_total_mi", {"true","false"});
defaults_.setValue("boundary_selection_method", "largest", "Method to use when selecting the best boundaries for peaks.", {"advanced"});
defaults_.setValidStrings("boundary_selection_method", {"largest","widest"});
defaults_.insert("PeakPickerChromatogram:", PeakPickerChromatogram().getDefaults());
defaults_.insert("PeakIntegrator:", PeakIntegrator().getDefaults());
// write defaults into Param object param_
defaultsToParam_();
updateMembers_();
}
MRMTransitionGroupPicker::~MRMTransitionGroupPicker() = default;
MRMTransitionGroupPicker& MRMTransitionGroupPicker::operator=(const MRMTransitionGroupPicker& rhs)
{
if (&rhs == this)
return *this;
// don't copy parameters
return *this;
}
void MRMTransitionGroupPicker::updateMembers_()
{
stop_after_feature_ = (int)param_.getValue("stop_after_feature");
stop_after_intensity_ratio_ = (double)param_.getValue("stop_after_intensity_ratio");
peak_integration_ = param_.getValue("peak_integration").toString();
background_subtraction_ = param_.getValue("background_subtraction").toString();
recalculate_peaks_ = (bool)param_.getValue("recalculate_peaks").toBool();
use_precursors_ = (bool)param_.getValue("use_precursors").toBool();
use_consensus_ = (bool)param_.getValue("use_consensus").toBool();
recalculate_peaks_max_z_ = (double)param_.getValue("recalculate_peaks_max_z");
compute_peak_quality_ = (bool)param_.getValue("compute_peak_quality").toBool();
compute_peak_shape_metrics_ = (bool)param_.getValue("compute_peak_shape_metrics").toBool();
compute_total_mi_ = (bool)param_.getValue("compute_total_mi").toBool();
min_qual_ = (double)param_.getValue("minimal_quality");
min_peak_width_ = (double)param_.getValue("min_peak_width");
resample_boundary_ = (double)param_.getValue("resample_boundary");
boundary_selection_method_ = param_.getValue("boundary_selection_method").toString();
picker_.setParameters(param_.copy("PeakPickerChromatogram:", true));
pi_.setParameters(param_.copy("PeakIntegrator:", true));
}
void MRMTransitionGroupPicker::findLargestPeak(const std::vector<MSChromatogram >& picked_chroms, int& chr_idx, int& peak_idx)
{
double largest = 0.0;
for (Size k = 0; k < picked_chroms.size(); k++)
{
for (Size i = 0; i < picked_chroms[k].size(); i++)
{
if (picked_chroms[k][i].getIntensity() > largest)
{
largest = picked_chroms[k][i].getIntensity();
chr_idx = (int)k;
peak_idx = (int)i;
}
}
}
}
void MRMTransitionGroupPicker::findWidestPeakIndices(const std::vector<MSChromatogram>& picked_chroms, Int& chrom_idx, Int& point_idx) const
{
double max_width{0};
for (Size i = 0; i < picked_chroms.size(); ++i)
{
for (Size k = 0; k < picked_chroms[i].size(); ++k)
{
// Skip peaks already "consumed" by previous iterations
if (picked_chroms[i][k].getIntensity() == 0.0) {continue; }
const double left_rt = picked_chroms[i].getFloatDataArrays()[PeakPickerChromatogram::IDX_LEFTBORDER][k];
const double right_rt = picked_chroms[i].getFloatDataArrays()[PeakPickerChromatogram::IDX_RIGHTBORDER][k];
const double local_peak_width = right_rt - left_rt;
OPENMS_LOG_DEBUG << "findWidestPeakIndices(): local_peak_width=" << local_peak_width << std::endl;
if (local_peak_width > max_width)
{
max_width = local_peak_width;
chrom_idx = static_cast<Int>(i);
point_idx = static_cast<Int>(k);
OPENMS_LOG_DEBUG << "findWidestPeakIndices(): max_width=" << max_width << "; chrom_idx=" << chrom_idx << "; point_idx=" << point_idx << std::endl;
}
}
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/SwathQC.cpp | .cpp | 6,179 | 191 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/SwathQC.h>
#include <OpenMS/ANALYSIS/OPENSWATH/DATAACCESS/DataAccessHelper.h>
#include <OpenMS/CONCEPT/Exception.h>
#include <OpenMS/PROCESSING/DEISOTOPING/Deisotoper.h>
#include <OpenMS/METADATA/ExperimentalSettings.h>
#include <OpenMS/PROCESSING/CENTROIDING/PeakPickerHiRes.h>
#include <nlohmann/json.hpp>
#include <cmath> // round
#include <iomanip> // setw
namespace OpenSwath
{
using namespace OpenMS;
SwathQC::SwathQC(const size_t cd_spectra, const double decon_ms1_mz_tol)
: cd_(),
nr_ms1_spectra_(0),
cd_spectra_(cd_spectra),
decon_ms1_mz_tol_(decon_ms1_mz_tol),
ms1_spectra_seen_(0)
{
}
std::function<void(const ExperimentalSettings&)> SwathQC::getExpSettingsFunc()
{
auto f = [this](const ExperimentalSettings& es)
{
// if member is set, we already have what we want. Besides, some parsers might call this function
// during parse, where the information is probably missing
if (nr_ms1_spectra_ > 0) return;
if (!es.metaValueExists("nr_ms1_spectra"))
{
// throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Expected meta value 'nr_ms1_spectra'");
nr_ms1_spectra_ = 0;
}
else
{
// change the member, when the lambda gets called (acts like a callback function)
nr_ms1_spectra_ = es.getMetaValue("nr_ms1_spectra");
}
};
return f;
}
std::function<void(const MSSpectrum&)> SwathQC::getSpectraProcessingFunc()
{
auto f = [this](const MSSpectrum& spec)
{
// only look at MS1 spectra (for now)
if (spec.getMSLevel() != 1) return;
if (!isSubsampledSpectrum_(nr_ms1_spectra_, cd_spectra_, ms1_spectra_seen_))
{
return;
}
++ms1_spectra_seen_;
PeakPickerHiRes pp;
auto t = spec.getType(true);
MSSpectrum tmp;
if (t == MSSpectrum::SpectrumSettings::SpectrumType::PROFILE)
{
pp.pick(spec, tmp);
}
else if (t == MSSpectrum::SpectrumSettings::SpectrumType::CENTROID)
{
tmp = spec; // make a copy, since deisotopeAndSingleCharge() will modify
}
else
{
return; // unknown: too dangerous to analyse
}
if (tmp.empty())
{
return; // something went wrong with the spectrum after peak picking (e.g. returned empty spectrum)
}
// Note: this will pick up also non-peptide signals; filtering by averagine might yield better results
Deisotoper::deisotopeAndSingleCharge(tmp, this->decon_ms1_mz_tol_, false, 1, 10, true, 3, 10, false, true);
if (tmp.getIntegerDataArrays().empty())
{
throw Exception::Postcondition(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "IntegerDataArray must not be empty!");
}
const auto& ida = tmp.getIntegerDataArrays().back();
if (ida.getName() != "charge")
{
throw Exception::Postcondition(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "IntegerDataArray.back().getName() != \"charge\"");
}
// add charges to output map
for (const auto& q : ida)
{
++cd_[q];
}
};
return f;
}
SwathQC::ChargeDistribution SwathQC::getChargeDistribution(const std::vector<SwathMap>& swath_maps, const size_t nr_samples, const double mz_tol)
{
ChargeDistribution cd;
SwathQC sq(nr_samples, mz_tol);
sq.setNrMS1Spectra(0); // leave at 0, such that all incoming spectra are sampled
auto f_spec = sq.getSpectraProcessingFunc();
for (const SwathMap& m : swath_maps)
{
// only look at MS1 swath maps
if (!m.ms1) continue;
MSSpectrum s;
size_t nr_spec = m.sptr->getNrSpectra();
for (size_t i = 0; i < nr_spec; ++i)
{
// we do not convert all spectra from SWATHMap (hence not using the sampling build into getSpectraProcessingFunc())
// , since this is potentially expensive, but rather only take the ones we need
if (!isSubsampledSpectrum_(nr_spec, nr_samples, i)) continue;
OpenMS::OpenSwathDataAccessHelper::convertToOpenMSSpectrum(m.sptr->getSpectrumById(int(i)), s);
f_spec(s);
}
}
return sq.getChargeDistribution();
}
void SwathQC::storeJSON(const OpenMS::String& filename)
{
using json = nlohmann::json;
json out;
out["ChargeDistributionMS1"] = cd_;
std::ofstream o(filename);
o << std::setw(2) << out << std::endl;
// check after writing, to include check for full disk
if (!o) // fail || bad
{
throw Exception::FileNotWritable(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, filename);
}
o.close();
}
const SwathQC::ChargeDistribution& SwathQC::getChargeDistribution() const
{
return cd_;
}
void SwathQC::setNrMS1Spectra(size_t nr)
{
nr_ms1_spectra_ = nr;
}
bool SwathQC::isSubsampledSpectrum_(const size_t total_spec_count, const size_t subsample_count, const size_t idx)
{
// if number of MS1 spectra is unknown, we sample everything
if (total_spec_count == 0) return true;
if (idx >= total_spec_count) return false;
if (subsample_count == 0) return false;
// use floating points step-size to ensure uniform sampling from spectra range
double spec_count = (double)total_spec_count;
double step_size = spec_count / std::min(spec_count, (double)subsample_count); // guaranteed >= 1
// estimate the number of steps we need to get to 'idx'
double steps = idx / step_size;
// but the number of steps can only be integral ... try both possibilities
double steps_low = std::floor(steps) * step_size;
double steps_high = std::ceil(steps) * step_size;
return (std::lround(steps_low) == (long)idx || std::lround(steps_high) == (long)idx);
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/PeakPickerMobilogram.cpp | .cpp | 20,741 | 452 | // Copyright (c) 2002-present, The OpenMS Team -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Justin Sing $
// $Authors: Justin Sing $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/PeakPickerMobilogram.h>
#include <iostream>
#include <iomanip> // For std::setw
#include <vector>
#include <algorithm> // For std::min_element and std::max_element
#include <fstream>
#include <iostream>
#include <random>
#include <sstream>
namespace OpenMS
{
PeakPickerMobilogram::PeakPickerMobilogram() :
DefaultParamHandler("PeakPickerMobilogram")
// ProgressLogger()
{
defaults_.setValue("sgolay_frame_length", 9, "The number of subsequent data points used for smoothing.\nThis number has to be uneven. If it is not, 1 will be added.");
defaults_.setValue("sgolay_polynomial_order", 3, "Order of the polynomial that is fitted.");
defaults_.setValue("gauss_width", 0.002, "Gaussian width in seconds, estimated peak size.");
defaults_.setValue("use_gauss", "false", "Use Gaussian filter for smoothing (alternative is Savitzky-Golay filter)");
defaults_.setValidStrings("use_gauss", {"false","true"});
defaults_.setValue("peak_width", -1.0, "Force a certain minimal peak_width on the data (e.g. extend the peak at least by this amount on both sides) in seconds. -1 turns this feature off.");
defaults_.setValue("signal_to_noise", 1.0, "Signal-to-noise threshold at which a peak will not be extended any more. Note that setting this too high (e.g. 1.0) can lead to peaks whose flanks are not fully captured.");
defaults_.setMinFloat("signal_to_noise", 0.0);
defaults_.setValue("sn_win_len", 1, "Signal to noise window length.");
defaults_.setValue("sn_bin_count", 4, "Signal to noise bin count.");
defaults_.setValue("write_sn_log_messages", "false", "Write out log messages of the signal-to-noise estimator in case of sparse windows or median in rightmost histogram bin");
defaults_.setValidStrings("write_sn_log_messages", {"true","false"});
defaults_.setValue("remove_overlapping_peaks", "false", "Try to remove overlapping peaks during peak picking");
defaults_.setValidStrings("remove_overlapping_peaks", {"false","true"});
defaults_.setValue("method", "corrected", "Which method to choose for mobilogram peak-picking (OpenSWATH legacy on raw data, corrected picking on smoothed mobilogram).");
defaults_.setValidStrings("method", {"legacy","corrected","crawdad"});
// write defaults into Param object param_
defaultsToParam_();
updateMembers_();
// PeakPickerHiRes pp_;
Param pepi_param = pp_.getDefaults();
pepi_param.setValue("signal_to_noise", signal_to_noise_);
// disable spacing constraints, since we're dealing with mobilograms
pepi_param.setValue("spacing_difference", 0.0);
pepi_param.setValue("spacing_difference_gap", 0.0);
pepi_param.setValue("report_FWHM", "true");
pepi_param.setValue("report_FWHM_unit", "absolute");
pp_.setParameters(pepi_param);
}
void PeakPickerMobilogram::pickMobilogram(const Mobilogram& mobilogram, Mobilogram& picked_mobilogram)
{
Mobilogram s;
pickMobilogram(mobilogram, picked_mobilogram, s);
}
void PeakPickerMobilogram::pickMobilogram(Mobilogram mobilogram, Mobilogram& picked_mobilogram, Mobilogram& smoothed_mobilogram)
{
if (!mobilogram.isSorted())
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Mobilogram needs to be sorted by position.");
}
if (mobilogram.empty())
{
OPENMS_LOG_DEBUG << " ==== Mobilogram empty. Skip picking.";
return;
}
else
{
OPENMS_LOG_DEBUG << " ==== Picking mobilogram with " << mobilogram.size() << " peaks (start at IM " << mobilogram[0].getMobility() << " to IM " << mobilogram.back().getMobility() << ") "
"using method \'" << method_ << "\'" << std::endl;
}
picked_mobilogram.clear();
// Smooth the mobilogram
smoothed_mobilogram = mobilogram;
if (!use_gauss_)
{
sgolay_.filter(smoothed_mobilogram);
}
else
{
gauss_.filter(smoothed_mobilogram);
}
// Find initial seeds (peak picking)
pp_.pick(smoothed_mobilogram, picked_mobilogram);
if (method_ == "legacy")
{
// Legacy is to use the original chromatogram for peak-detection
pickMobilogram_(mobilogram, picked_mobilogram);
if (remove_overlapping_)
removeOverlappingPeaks_(mobilogram, picked_mobilogram);
// for peak integration, we want to use the raw data
integratePeaks_(mobilogram);
}
else if (method_ == "corrected")
{
// use the smoothed chromatogram to derive the peak boundaries
pickMobilogram_(smoothed_mobilogram, picked_mobilogram);
if (remove_overlapping_)
removeOverlappingPeaks_(smoothed_mobilogram, picked_mobilogram);
// for peak integration, we want to use the raw data
integratePeaks_(mobilogram);
}
picked_mobilogram.getFloatDataArrays().resize(SIZE_OF_FLOATINDICES);
picked_mobilogram.getFloatDataArrays()[IDX_ABUNDANCE].setName("IntegratedIntensity");
picked_mobilogram.getFloatDataArrays()[IDX_LEFTBORDER].setName("leftWidth");
picked_mobilogram.getFloatDataArrays()[IDX_RIGHTBORDER].setName("rightWidth");
picked_mobilogram.getFloatDataArrays()[IDX_OF_LEFTBORDER_IDX].setName("leftWidthIndex");
picked_mobilogram.getFloatDataArrays()[IDX_OF_RIGHTBORDER_IDX].setName("rightWidthIndex");
// just copy FWHM from initial peak picking
picked_mobilogram.getFloatDataArrays()[IDX_ABUNDANCE].reserve(picked_mobilogram.size());
picked_mobilogram.getFloatDataArrays()[IDX_LEFTBORDER].reserve(picked_mobilogram.size());
picked_mobilogram.getFloatDataArrays()[IDX_RIGHTBORDER].reserve(picked_mobilogram.size());
picked_mobilogram.getFloatDataArrays()[IDX_OF_LEFTBORDER_IDX].reserve(picked_mobilogram.size());
picked_mobilogram.getFloatDataArrays()[IDX_OF_RIGHTBORDER_IDX].reserve(picked_mobilogram.size());
for (Size i = 0; i < picked_mobilogram.size(); i++)
{
picked_mobilogram.getFloatDataArrays()[IDX_ABUNDANCE].push_back(integrated_intensities_[i]);
picked_mobilogram.getFloatDataArrays()[IDX_LEFTBORDER].push_back((float)mobilogram[left_width_[i]].getMobility());
picked_mobilogram.getFloatDataArrays()[IDX_RIGHTBORDER].push_back((float)mobilogram[right_width_[i]].getMobility());
picked_mobilogram.getFloatDataArrays()[IDX_OF_LEFTBORDER_IDX].push_back(left_width_[i]);
picked_mobilogram.getFloatDataArrays()[IDX_OF_RIGHTBORDER_IDX].push_back(right_width_[i]);
}
}
void PeakPickerMobilogram::filterTopPeak(Mobilogram& picked_mobilogram, std::vector<Mobilogram>& mobilograms, PeakPickerMobilogram::PeakPositions& peak_pos)
{
peak_pos = filterTopPeak_(picked_mobilogram, mobilograms);
}
void PeakPickerMobilogram::filterTopPeak(Mobilogram& picked_mobilogram, Mobilogram& mobilogram, PeakPickerMobilogram::PeakPositions& peak_pos)
{
peak_pos = filterTopPeak_(picked_mobilogram, mobilogram);
}
void PeakPickerMobilogram::pickMobilogram_(const Mobilogram& mobilogram, Mobilogram& picked_mobilogram)
{
integrated_intensities_.clear();
left_width_.clear();
right_width_.clear();
integrated_intensities_.reserve(picked_mobilogram.size());
left_width_.reserve(picked_mobilogram.size());
right_width_.reserve(picked_mobilogram.size());
Size current_peak = 0;
for (Size i = 0; i < picked_mobilogram.size(); i++)
{
const double central_peak_im = picked_mobilogram[i].getMobility();
current_peak = findClosestPeak_(mobilogram, central_peak_im, current_peak);
const Size min_i = current_peak;
// peak core found, now extend it to the left
Size k = 2;
while ((min_i - k + 1) > 0
&& (mobilogram[min_i - k].getIntensity() < mobilogram[min_i - k + 1].getIntensity()
|| (peak_width_ > 0.0 && std::fabs(mobilogram[min_i - k].getMobility() - central_peak_im) < peak_width_)))
{
++k;
}
int left_idx = min_i - k + 1;
// to the right
k = 2;
while ((min_i + k) < mobilogram.size()
&& (mobilogram[min_i + k].getIntensity() < mobilogram[min_i + k - 1].getIntensity()
|| (peak_width_ > 0.0 && std::fabs(mobilogram[min_i + k].getMobility() - central_peak_im) < peak_width_)))
{
++k;
}
int right_idx = min_i + k - 1;
left_width_.push_back(left_idx);
right_width_.push_back(right_idx);
integrated_intensities_.push_back(0);
OPENMS_LOG_DEBUG << "Found peak at " << central_peak_im << " with intensity " << picked_mobilogram[i].getIntensity()
<< " and borders " << mobilogram[left_width_[i]].getMobility() << " " << mobilogram[right_width_[i]].getMobility() <<
" (" << mobilogram[right_width_[i]].getMobility() - mobilogram[left_width_[i]].getMobility() << ") "
<< 0 << " weighted IM " << /* weighted_mz << */ std::endl;
}
}
void PeakPickerMobilogram::integratePeaks_(const Mobilogram& mobilogram)
{
for (Size i = 0; i < left_width_.size(); i++)
{
const int current_left_idx = left_width_[i];
const int current_right_idx = right_width_[i];
// Also integrate the intensities
integrated_intensities_[i] = 0;
for (int k = current_left_idx; k <= current_right_idx; k++)
{
integrated_intensities_[i] += mobilogram[k].getIntensity();
}
}
}
Size PeakPickerMobilogram::findClosestPeak_(const Mobilogram& mobilogram, double target_im, Size current_peak)
{
while (current_peak < mobilogram.size())
{
// check if we have walked past the IM of the peak
if (target_im < mobilogram[current_peak].getMobility())
{
// see which one is closer, the current one or the one before
if (current_peak > 0 &&
std::fabs(target_im - mobilogram[current_peak - 1].getMobility()) <
std::fabs(target_im - mobilogram[current_peak].getMobility()))
{
current_peak--;
}
return current_peak;
}
current_peak++;
}
return current_peak;
}
PeakPickerMobilogram::PeakPositions PeakPickerMobilogram::findHighestPeak_(const std::vector<double> intensities,
const std::vector<Size> left_widths,
const std::vector<Size> right_widths,
const size_t im_size)
{
// If no peaks were found, return a peak at the center of the mobilogram
if (intensities.empty())
{
OPENMS_LOG_DEBUG << "No peaks found in mobilogram. Returning peak at center of original mobilogram." << std::endl;
return PeakPickerMobilogram::PeakPositions{0, im_size / 2, im_size-1};
}
// Find the iterator pointing to the maximum element
auto max_it = std::max_element(intensities.begin(), intensities.end());
// Get the index of the maximum element
size_t max_index = std::distance(intensities.begin(), max_it);
// Return the tuple
return PeakPickerMobilogram::PeakPositions{left_widths[max_index], max_index, right_widths[max_index]};
}
void PeakPickerMobilogram::filterPeakIntensities_(Mobilogram& mobilogram,
size_t left_index,
size_t right_index)
{
// Create a temporary vector to hold the filtered peaks
std::vector<MobilityPeak1D> filtered_peaks;
for (size_t i = left_index; i <= right_index; ++i) {
const auto& peak = mobilogram[i];
// Collect the peaks within the range
filtered_peaks.push_back(peak);
}
// Clear existing data and replace with filtered peaks
mobilogram.clear();
for (const auto& peak : filtered_peaks) {
mobilogram.push_back(peak);
}
}
void PeakPickerMobilogram::filterPeakIntensities_(std::vector<Mobilogram>& mobilograms,
size_t left_index,
size_t right_index)
{
for (auto& mobilogram : mobilograms) {
// Create a temporary vector to hold the filtered peaks
std::vector<MobilityPeak1D> filtered_peaks;
for (size_t i = left_index; i <= right_index; ++i) {
const auto& peak = mobilogram[i];
// Collect the peaks within the range
filtered_peaks.push_back(peak);
}
// Clear existing data and replace with filtered peaks
mobilogram.clear();
for (const auto& peak : filtered_peaks) {
mobilogram.push_back(peak);
}
}
}
std::vector<double> PeakPickerMobilogram::extractFloatValues_(const OpenMS::DataArrays::FloatDataArray& floatDataArray)
{
std::vector<double> result;
if (floatDataArray.empty()) {
return result;
}
result.reserve(floatDataArray.size());
for (size_t i = 0; i < floatDataArray.size(); ++i) {
result.push_back(static_cast<double>(floatDataArray[i]));
}
return result;
}
std::vector<std::size_t> PeakPickerMobilogram::extractIntValues_(const OpenMS::DataArrays::FloatDataArray& floatDataArray)
{
std::vector<std::size_t> result;
if (floatDataArray.empty()) {
return result;
}
result.reserve(floatDataArray.size());
for (size_t i = 0; i < floatDataArray.size(); ++i) {
result.push_back(static_cast<std::size_t>(floatDataArray[i])); // Convert and add to vector
}
return result;
}
PeakPickerMobilogram::PeakPositions PeakPickerMobilogram::filterTopPeak_(Mobilogram& picked_mobilogram, std::vector<Mobilogram>& mobilograms)
{
const auto& apex_abundance_data = extractFloatValues_(picked_mobilogram.getFloatDataArrays()[IDX_ABUNDANCE]);
const auto& leftwidth_data = extractIntValues_(picked_mobilogram.getFloatDataArrays()[IDX_OF_LEFTBORDER_IDX]);
const auto& rightwidth_data = extractIntValues_(picked_mobilogram.getFloatDataArrays()[IDX_OF_RIGHTBORDER_IDX]);
PeakPositions peak_pos = findHighestPeak_(apex_abundance_data, leftwidth_data, rightwidth_data, mobilograms[0].size());
OPENMS_LOG_DEBUG << " -- filtering mobilograms for highest peak at positions " << "(" << peak_pos.left << " - " << peak_pos.right << ")" << std::endl;
filterPeakIntensities_(mobilograms, peak_pos.left, peak_pos.right);
return peak_pos;
}
PeakPickerMobilogram::PeakPositions PeakPickerMobilogram::filterTopPeak_(Mobilogram& picked_mobilogram, Mobilogram& mobilogram)
{
const auto& apex_abundance_data = extractFloatValues_(picked_mobilogram.getFloatDataArrays()[IDX_ABUNDANCE]);
const auto& leftwidth_data = extractIntValues_(picked_mobilogram.getFloatDataArrays()[IDX_OF_LEFTBORDER_IDX]);
const auto& rightwidth_data = extractIntValues_(picked_mobilogram.getFloatDataArrays()[IDX_OF_RIGHTBORDER_IDX]);
PeakPositions peak_pos = findHighestPeak_(apex_abundance_data, leftwidth_data, rightwidth_data, mobilogram.size());
OPENMS_LOG_DEBUG << " -- filtering mobilogram for highest peak at positions " << "(" << peak_pos.left << " - " << peak_pos.right << ")" << std::endl;
filterPeakIntensities_(mobilogram, peak_pos.left, peak_pos.right);
return peak_pos;
}
void PeakPickerMobilogram::removeOverlappingPeaks_(const Mobilogram& mobilogram, Mobilogram& picked_mobilogram)
{
if (picked_mobilogram.empty()) {return; }
OPENMS_LOG_DEBUG << "Remove overlapping peaks now (size " << picked_mobilogram.size() << ")" << std::endl;
Size current_peak = 0;
// Find overlapping peaks
for (Size i = 0; i < picked_mobilogram.size() - 1; i++)
{
// Check whether the current right overlaps with the next left
// See whether we can correct this and find some border between the two
// features ...
if (right_width_[i] > left_width_[i + 1])
{
const int current_left_idx = left_width_[i];
const int current_right_idx = right_width_[i];
const int next_left_idx = left_width_[i + 1];
const int next_right_idx = right_width_[i + 1];
OPENMS_LOG_DEBUG << " Found overlapping " << i << " : " << current_left_idx << " " << current_right_idx << std::endl;
OPENMS_LOG_DEBUG << " -- with " << i + 1 << " : " << next_left_idx << " " << next_right_idx << std::endl;
// Find the peak width and best IM
double central_peak_mz = picked_mobilogram[i].getMobility();
double next_peak_mz = picked_mobilogram[i + 1].getMobility();
current_peak = findClosestPeak_(mobilogram, central_peak_mz, current_peak);
Size next_peak = findClosestPeak_(mobilogram, next_peak_mz, current_peak);
// adjust the right border of the current and left border of next
Size k = 1;
while ((current_peak + k) < mobilogram.size()
&& (mobilogram[current_peak + k].getIntensity() < mobilogram[current_peak + k - 1].getIntensity()))
{
++k;
}
Size new_right_border = current_peak + k - 1;
k = 1;
while ((next_peak - k + 1) > 0
&& (mobilogram[next_peak - k].getIntensity() < mobilogram[next_peak - k + 1].getIntensity()))
{
++k;
}
Size new_left_border = next_peak - k + 1;
// assert that the peaks are now not overlapping any more ...
if (new_left_border < new_right_border)
{
std::cerr << "Something went wrong, peaks are still overlapping!" << " - new left border " << new_left_border << " vs " << new_right_border << " -- will take the mean" << std::endl;
new_left_border = (new_left_border + new_right_border) / 2;
new_right_border = (new_left_border + new_right_border) / 2;
}
OPENMS_LOG_DEBUG << "New peak l: " << mobilogram[current_left_idx].getMobility() << " " << mobilogram[new_right_border].getMobility() << " int " << integrated_intensities_[i] << std::endl;
OPENMS_LOG_DEBUG << "New peak r: " << mobilogram[new_left_border].getMobility() << " " << mobilogram[next_right_idx].getMobility() << " int " << integrated_intensities_[i + 1] << std::endl;
right_width_[i] = new_right_border;
left_width_[i + 1] = new_left_border;
}
}
}
void PeakPickerMobilogram::updateMembers_()
{
sgolay_frame_length_ = (UInt)param_.getValue("sgolay_frame_length");
sgolay_polynomial_order_ = (UInt)param_.getValue("sgolay_polynomial_order");
gauss_width_ = (double)param_.getValue("gauss_width");
peak_width_ = (double)param_.getValue("peak_width");
signal_to_noise_ = (double)param_.getValue("signal_to_noise");
sn_win_len_ = (double)param_.getValue("sn_win_len");
sn_bin_count_ = (UInt)param_.getValue("sn_bin_count");
// TODO make list, not boolean
use_gauss_ = (bool)param_.getValue("use_gauss").toBool();
write_sn_log_messages_ = (bool)param_.getValue("write_sn_log_messages").toBool();
method_ = (String)param_.getValue("method").toString();
Param sg_filter_parameters = sgolay_.getParameters();
sg_filter_parameters.setValue("frame_length", sgolay_frame_length_);
sg_filter_parameters.setValue("polynomial_order", sgolay_polynomial_order_);
sgolay_.setParameters(sg_filter_parameters);
Param gfilter_parameters = gauss_.getParameters();
gfilter_parameters.setValue("gaussian_width", gauss_width_);
gauss_.setParameters(gfilter_parameters);
}
} | C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/IonMobilityScoring.cpp | .cpp | 31,322 | 728 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/KERNEL/Mobilogram.h>
#include <OpenMS/KERNEL/MobilityPeak1D.h>
#include <OpenMS/ANALYSIS/OPENSWATH/IonMobilityScoring.h>
#include <OpenMS/CONCEPT/Macros.h>
#include <OpenMS/CONCEPT/LogStream.h>
// scoring
#include <OpenMS/OPENSWATHALGO/ALGO/Scoring.h>
#include <OpenMS/ANALYSIS/OPENSWATH/MRMScoring.h>
// auxiliary
#include <OpenMS/ANALYSIS/OPENSWATH/DATAACCESS/DataAccessHelper.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <OpenMS/ANALYSIS/OPENSWATH/SpectrumAddition.h>
#include <OpenMS/ANALYSIS/OPENSWATH/DIAHelper.h>
#include <OpenMS/ANALYSIS/OPENSWATH/PeakPickerMobilogram.h>
// #define DEBUG_IMSCORING
namespace OpenMS
{
std::vector<double> IonMobilityScoring::computeGrid_(const std::vector< Mobilogram >& mobilograms, double eps)
{
// Extract all ion mobility values across all transitions and produce a
// grid of all permitted ion mobility values
std::vector<double> im_grid;
std::vector< double > mobilityValues;
for (const auto & im_profile : mobilograms)
{
mobilityValues.reserve(mobilityValues.size() + im_profile.size());
for (const auto & k : im_profile)
{
mobilityValues.push_back(k.getMobility());
}
}
// sort all extracted values
std::sort(mobilityValues.begin(), mobilityValues.end());
// Reduce mobility values to grid (consider equal if closer than eps)
//
// In some cases there are not enough datapoints available (one of the
// transitions has no datapoints)
if (!mobilityValues.empty())
{
im_grid.push_back( mobilityValues[0] );
for (Size k = 1; k < mobilityValues.size(); k++)
{
double diff = fabs(mobilityValues[k] - mobilityValues[k-1]);
if (diff > eps)
{
im_grid.push_back( mobilityValues[k] );
}
}
}
return im_grid;
}
void IonMobilityScoring::alignToGrid_(const Mobilogram& profile,
const std::vector<double>& im_grid,
Mobilogram & aligned_profile,
double eps,
Size & max_peak_idx)
{
auto pr_it = profile.begin();
max_peak_idx = 0;
double max_int = 0;
for (Size k = 0; k < im_grid.size(); k++)
{
MobilityPeak1D mobi_peak;
// In each iteration, the IM value of pr_it should be equal to or
// larger than the master container. If it is equal, we add the current
// data point, if it is larger we add zero and advance the counter k.
if (pr_it != profile.end() && fabs(pr_it->getMobility() - im_grid[k] ) < eps*10)
{
mobi_peak.setIntensity(pr_it->getIntensity());
mobi_peak.setMobility(pr_it->getMobility());
++pr_it;
}
else
{
mobi_peak.setIntensity(0.0);
mobi_peak.setMobility(im_grid[k]);
}
// OPENMS_LOG_DEBUG << "grid position " << im_grid[k] << " profile position " << pr_it->first << '\n';
// check that we did not advance past
if (pr_it != profile.end() && (im_grid[k] - pr_it->getMobility()) > eps*10)
{
OPENMS_LOG_ERROR << "This should never happen, pr_it has advanced past the master container: " << im_grid[k] << " / " << pr_it->getMobility() << '\n';
throw Exception::OutOfRange(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION);
}
// collect maxima
if (pr_it != profile.end() && pr_it->getIntensity() > max_int)
{
max_int = pr_it->getIntensity();
max_peak_idx = k;
}
aligned_profile.push_back(mobi_peak);
}
}
void IonMobilityScoring::extractIntensities(const std::vector< Mobilogram >& mobilograms,
std::vector<std::vector<double>>& int_values)
{
int_values.clear();
int_values.reserve(mobilograms.size());
for (const auto& mobilogram : mobilograms)
{
std::vector<double> mobility_int;
mobility_int.reserve(mobilogram.size());
for (const auto & k : mobilogram)
{
mobility_int.push_back(k.getIntensity());
}
int_values.emplace_back(std::move(mobility_int));
}
}
std::vector<double> IonMobilityScoring::extractIntensities(const Mobilogram& mobilogram) {
std::vector<double> mobility_int;
mobility_int.reserve(mobilogram.size());
for (const auto& k : mobilogram)
{
mobility_int.push_back(k.getIntensity());
}
return mobility_int;
}
// compute ion mobilogram as well as im weighted average. This is based off of integrateWindows() in DIAHelper.cpp
void IonMobilityScoring::computeIonMobilogram(const SpectrumSequence& spectra,
const RangeMZ& mz_range,
const RangeMobility& im_range,
double & im,
double & intensity,
Mobilogram & res,
double eps)
{
// rounding multiplier for the ion mobility value
// TODO: how to improve this -- will work up to 42949.67296
double IM_IDX_MULT = 1/eps;
// We need to store all values that map to the same ion mobility in the
// same spot in the ion mobilogram (they are not sorted by ion mobility in
// the input data), therefore create a map to map to bins.
std::map< int, double> im_chrom;
for (const auto& spectrum : spectra)
{
OPENMS_PRECONDITION(spectrum->getDriftTimeArray() != nullptr, "Cannot filter by drift time if no drift time is available.");
OPENMS_PRECONDITION(spectrum->getMZArray()->data.size() == spectrum->getIntensityArray()->data.size(), "MZ and Intensity array need to have the same length.");
OPENMS_PRECONDITION(spectrum->getMZArray()->data.size() == spectrum->getDriftTimeArray()->data.size(), "MZ and Drift Time array need to have the same length.");
auto mz_arr_end = spectrum->getMZArray()->data.end();
auto int_it = spectrum->getIntensityArray()->data.begin();
auto im_it = spectrum->getDriftTimeArray()->data.begin();
// this assumes that the spectra are sorted!
auto mz_it = std::lower_bound(spectrum->getMZArray()->data.begin(), mz_arr_end, mz_range.getMin());
// auto mz_it_end = std::lower_bound(mz_it, mz_arr_end, mz_end);
// also advance intensity and ion mobility iterator now
auto iterator_pos = std::distance(spectrum->getMZArray()->data.begin(), mz_it);
std::advance(int_it, iterator_pos);
std::advance(im_it, iterator_pos);
// Start iteration from mz start, end iteration when mz value is larger than mz_end, only store only storing ion mobility values that are in the range
double mz_end = mz_range.getMax();
while ((mz_it < mz_arr_end) && (*mz_it < mz_end))
{
if (im_range.contains(*im_it))
{
intensity += (*int_it);
im += (*int_it) * (*im_it);
im_chrom[ int((*im_it)*IM_IDX_MULT) ] += *int_it;
}
++mz_it;
++int_it;
++im_it;
}
}
// compute the weighted average ion mobility
if (intensity > 0.)
{
im /= intensity;
}
else
{
im = -1;
intensity = 0;
}
res.reserve(res.size() + im_chrom.size());
for (const auto& k : im_chrom)
{
res.emplace_back(k.first / IM_IDX_MULT, k.second ); // add MobilityPeak1D(mobility, intensity)
}
}
Mobilogram sumAlignedMobilograms(const std::vector<Mobilogram>& aligned_mobilograms)
{
if (aligned_mobilograms.empty()) return {};
OPENMS_PRECONDITION(
std::all_of(aligned_mobilograms.begin() + 1, aligned_mobilograms.end(),
[reference_size = aligned_mobilograms[0].size()](const Mobilogram& mobilogram) {
return mobilogram.size() == reference_size;
}),
"All Mobilograms in aligned_mobilograms must have the same size."
);
Mobilogram summed_mobilogram;
// Use the first mobilogram to set the structure
const auto& first_mobilogram = aligned_mobilograms[0];
// Reserve space in advance
summed_mobilogram.reserve(first_mobilogram.size());
for (size_t j = 0; j < first_mobilogram.size(); ++j) {
MobilityPeak1D summed_peak{first_mobilogram[j].getMobility(), 0.0};
// Sum intensities from all mobilograms
for (const auto& mobilogram : aligned_mobilograms) {
if (j < mobilogram.size()) {
summed_peak.setIntensity(summed_peak.getIntensity() + mobilogram[j].getIntensity());
}
}
summed_mobilogram.push_back(summed_peak);
}
return summed_mobilogram;
}
/// Constructor
IonMobilityScoring::IonMobilityScoring() = default;
/// Destructor
IonMobilityScoring::~IonMobilityScoring() = default;
void IonMobilityScoring::driftScoringMS1Contrast(const SpectrumSequence& spectra, const SpectrumSequence& ms1spectrum,
const std::vector<TransitionType> & transitions,
OpenSwath_Scores & scores,
RangeMobility im_range,
const double dia_extract_window_,
const bool dia_extraction_ppm_,
const double drift_extra)
{
OPENMS_PRECONDITION(!spectra.empty(), "Spectra cannot be empty")
OPENMS_PRECONDITION(!ms1spectrum.empty(), "MS1 spectrum cannot be empty")
OPENMS_PRECONDITION(!transitions.empty(), "Need at least one transition");
//TODO not sure what error format is best
for (const auto& s:spectra)
{
if (s->getDriftTimeArray() == nullptr)
{
OPENMS_LOG_DEBUG << " ERROR: Drift time is missing in ion mobility spectrum!\n";
return;
}
}
for (const auto& s:ms1spectrum)
{
if (s->getDriftTimeArray() == nullptr)
{
OPENMS_LOG_DEBUG << " ERROR: Drift time is missing in MS1 ion mobility spectrum!\n";
return;
}
}
double eps = 1e-5; // eps for two grid cells to be considered equal
// extend IM range by drift_extra
im_range.scaleBy(drift_extra * 2. + 1); // multiple by 2 because want drift extra to be extended by that amount on either side
// Step 1: MS2 extraction
std::vector< Mobilogram > ms2_mobilograms;
for (std::size_t k = 0; k < transitions.size(); k++)
{
double im(0), intensity(0);
Mobilogram res;
const TransitionType transition = transitions[k];
// Calculate the difference of the theoretical ion mobility and the actually measured ion mobility
RangeMZ mz_range = DIAHelpers::createMZRangePPM(transition.getProductMZ(), dia_extract_window_, dia_extraction_ppm_);
computeIonMobilogram(spectra, mz_range, im_range, im, intensity, res, eps);
ms2_mobilograms.push_back(std::move(res));
}
// Step 2: MS1 extraction
double im(0), intensity(0);
Mobilogram ms1_profile;
std::vector< Mobilogram > ms1_mobilograms;
RangeMZ mz_range = DIAHelpers::createMZRangePPM(transitions[0].getPrecursorMZ(), dia_extract_window_, dia_extraction_ppm_);
computeIonMobilogram(ms1spectrum, mz_range, im_range, im, intensity, ms1_profile, eps); // TODO: aggregate over isotopes
ms2_mobilograms.push_back(ms1_profile);
std::vector<double> im_grid = computeGrid_(ms2_mobilograms, eps); // ensure grid is based on all profiles!
ms2_mobilograms.pop_back();
// Step 3: Align the IonMobilogram vectors to the grid
std::vector< Mobilogram > aligned_ms2_mobilograms;
for (const auto & mobilogram : ms2_mobilograms)
{
Mobilogram aligned_mobilogram;
Size max_peak_idx = 0;
alignToGrid_(mobilogram, im_grid, aligned_mobilogram, eps, max_peak_idx);
aligned_ms2_mobilograms.push_back(aligned_mobilogram);
}
Mobilogram aligned_ms1_mobilograms;
Size max_peak_idx = 0;
alignToGrid_(ms1_profile, im_grid, aligned_ms1_mobilograms, eps, max_peak_idx);
std::vector<double> ms1_int_values;
ms1_int_values.reserve(aligned_ms1_mobilograms.size());
for (const auto & k : aligned_ms1_mobilograms)
{
ms1_int_values.push_back(k.getIntensity());
}
// Step 4: MS1 contrast scores
std::vector< std::vector< double > > aligned_int_vec;
extractIntensities(aligned_ms2_mobilograms, aligned_int_vec);
{
OpenSwath::MRMScoring mrmscore_;
mrmscore_.initializeXCorrPrecursorContrastMatrix({ms1_int_values}, aligned_int_vec);
OPENMS_LOG_DEBUG << "all-all: Contrast Scores : coelution precursor : " << mrmscore_.calcXcorrPrecursorContrastCoelutionScore() << " / shape precursor " <<
mrmscore_.calcXcorrPrecursorContrastShapeScore() << '\n';
scores.im_ms1_contrast_coelution = mrmscore_.calcXcorrPrecursorContrastCoelutionScore();
scores.im_ms1_contrast_shape = mrmscore_.calcXcorrPrecursorContrastShapeScore();
}
// Step 5: contrast precursor vs summed fragment ions
std::vector<double> fragment_values;
fragment_values.resize(ms1_int_values.size(), 0);
for (Size k = 0; k < fragment_values.size(); k++)
{
for (Size i = 0; i < aligned_int_vec.size(); i++)
{
fragment_values[k] += aligned_int_vec[i][k];
}
}
OpenSwath::MRMScoring mrmscore_;
// horribly broken: provides vector of length 1, but expects at least length 2 in calcXcorrPrecursorContrastCoelutionScore()
mrmscore_.initializeXCorrPrecursorContrastMatrix({ms1_int_values}, {fragment_values});
OPENMS_LOG_DEBUG << "Contrast Scores : coelution precursor : " << mrmscore_.calcXcorrPrecursorContrastSumFragCoelutionScore() << " / shape precursor " <<
mrmscore_.calcXcorrPrecursorContrastSumFragShapeScore() << '\n';
// in order to prevent assertion error call calcXcorrPrecursorContrastSumFragCoelutionScore, same as calcXcorrPrecursorContrastCoelutionScore() however different assertion
scores.im_ms1_sum_contrast_coelution = mrmscore_.calcXcorrPrecursorContrastSumFragCoelutionScore();
// in order to prevent assertion error call calcXcorrPrecursorContrastSumFragShapeScore(), same as calcXcorrPrecursorContrastShapeScore() however different assertion.
scores.im_ms1_sum_contrast_shape = mrmscore_.calcXcorrPrecursorContrastSumFragShapeScore();
}
void IonMobilityScoring::driftScoringMS1(const SpectrumSequence & spectra,
const std::vector<TransitionType> & transitions,
OpenSwath_Scores & scores,
const double drift_target,
RangeMobility im_range,
const double dia_extract_window_,
const bool dia_extraction_ppm_,
const double drift_extra)
{
OPENMS_PRECONDITION(!spectra.empty(), "Spectra cannot be empty")
OPENMS_PRECONDITION(!transitions.empty(), "Need at least one transition");
for (auto s:spectra){
if (s->getDriftTimeArray() == nullptr)
{
OPENMS_LOG_DEBUG << " ERROR: Drift time is missing in ion mobility spectrum!\n";
return;
}
}
im_range.scaleBy(drift_extra * 2. + 1); // multiple by 2 because want drift extra to be extended by that amount on either side
double im(0), intensity(0), mz(0);
RangeMZ mz_range = DIAHelpers::createMZRangePPM(transitions[0].getPrecursorMZ(), dia_extract_window_, dia_extraction_ppm_);
DIAHelpers::integrateWindow(spectra, mz, im, intensity, mz_range, im_range);
// Record the measured ion mobility
scores.im_ms1_drift = im;
// Calculate the difference of the theoretical ion mobility and the actually measured ion mobility
scores.im_ms1_delta_score = fabs(drift_target - im);
scores.im_ms1_delta = drift_target - im;
}
void IonMobilityScoring::driftScoring(const SpectrumSequence& spectra,
const std::vector<TransitionType> & transitions,
OpenSwath_Scores & scores,
const double drift_target,
RangeMobility im_range,
const double dia_extract_window_,
const bool dia_extraction_ppm_,
const double drift_extra,
const bool apply_im_peak_picking)
{
OPENMS_PRECONDITION(!spectra.empty(), "Spectra cannot be empty");
for (auto s:spectra)
{
if (s->getDriftTimeArray() == nullptr)
{
OPENMS_LOG_DEBUG << " ERROR: Drift time is missing in ion mobility spectrum!\n";
return;
}
}
double eps = 1e-5; // eps for two grid cells to be considered equal
im_range.scaleBy(drift_extra * 2. + 1); // multiple by 2 because want drift extra to be extended by that amount on either side
double delta_drift = 0;
double delta_drift_abs = 0;
double computed_im = 0;
double computed_im_weighted = 0;
double sum_intensity = 0;
int tr_used = 0;
// Step 1: MS2 extraction
std::vector< Mobilogram > ms2_mobilograms;
for (std::size_t k = 0; k < transitions.size(); k++)
{
const TransitionType transition = transitions[k];
Mobilogram res;
double im(0), intensity(0);
// Calculate the difference of the theoretical ion mobility and the actually measured ion mobility
RangeMZ mz_range = DIAHelpers::createMZRangePPM(transition.getProductMZ(), dia_extract_window_, dia_extraction_ppm_);
//double left(transition.getProductMZ()), right(transition.getProductMZ());
//DIAHelpers::adjustExtractionWindow(right, left, dia_extract_window_, dia_extraction_ppm_);
computeIonMobilogram(spectra, mz_range, im_range, im, intensity, res, eps);
ms2_mobilograms.push_back(std::move(res));
// TODO what do to about those that have no signal ?
if (intensity <= 0.0) {continue;} // note: im is -1 then
tr_used++;
delta_drift_abs += fabs(drift_target - im);
delta_drift += drift_target - im;
OPENMS_LOG_DEBUG << " -- have delta drift time " << fabs(drift_target -im ) << " with im " << im << '\n';
computed_im += im;
computed_im_weighted += im * intensity;
sum_intensity += intensity;
// delta_drift_weighted += delta_drift * normalized_library_intensity[k];
// weights += normalized_library_intensity[k];
}
if (tr_used != 0)
{
delta_drift /= tr_used;
delta_drift_abs /= tr_used;
computed_im /= tr_used;
computed_im_weighted /= sum_intensity;
}
else
{
delta_drift = -1;
delta_drift_abs = -1;
computed_im = -1;
computed_im_weighted = -1;
}
OPENMS_LOG_DEBUG << " Scoring delta drift time " << delta_drift << '\n';
OPENMS_LOG_DEBUG << " Scoring weighted delta drift time " << computed_im_weighted << " -> get difference " << std::fabs(computed_im_weighted - drift_target)<< '\n';
scores.im_delta_score = delta_drift_abs;
scores.im_delta = delta_drift;
scores.im_drift = computed_im;
scores.im_drift_weighted = computed_im_weighted;
scores.im_log_intensity = std::log(sum_intensity + 1);
// Step 2: Align the IonMobilogram vectors to the grid
std::vector<double> im_grid = computeGrid_(ms2_mobilograms, eps);
std::vector< Mobilogram > aligned_ms2_mobilograms;
for (const auto & mobilogram : ms2_mobilograms)
{
Mobilogram aligned_mobilogram;
Size max_peak_idx = 0;
alignToGrid_(mobilogram, im_grid, aligned_mobilogram, eps, max_peak_idx);
if (!aligned_mobilogram.empty()) aligned_ms2_mobilograms.push_back(std::move(aligned_mobilogram));
}
if ( apply_im_peak_picking ) {
PeakPickerMobilogram::PeakPositions peak_pos{};
// Ion mobilogram cannot be empty and cannot have a single point
if ( !aligned_ms2_mobilograms.empty() && aligned_ms2_mobilograms[0].size()!=1 )
{
Mobilogram summed_mobilogram = sumAlignedMobilograms(aligned_ms2_mobilograms);
PeakPickerMobilogram picker_;
Param picker_params = picker_.getParameters();
picker_params.setValue("method", "corrected");
picker_.setParameters(picker_params);
Mobilogram picked_mobilogram;
picker_.pickMobilogram(summed_mobilogram, picked_mobilogram);
picker_.filterTopPeak(picked_mobilogram, aligned_ms2_mobilograms, peak_pos);
scores.im_drift_left = im_grid[peak_pos.left];
scores.im_drift_right = im_grid[peak_pos.right];
}
else
{
scores.im_drift_left = -1;
scores.im_drift_right = -1;
}
}
// Step 3: Compute cross-correlation scores based on ion mobilograms
if (aligned_ms2_mobilograms.size() < 2)
{
scores.im_xcorr_coelution_score = 0;
scores.im_xcorr_shape_score = std::numeric_limits<double>::quiet_NaN();
return;
}
std::vector< std::vector< double > > aligned_int_vec;
extractIntensities(aligned_ms2_mobilograms, aligned_int_vec);
OpenSwath::MRMScoring mrmscore_;
mrmscore_.initializeXCorrMatrix(aligned_int_vec);
double xcorr_coelution_score = mrmscore_.calcXcorrCoelutionScore();
double xcorr_shape_score = mrmscore_.calcXcorrShapeScore(); // can be nan!
scores.im_xcorr_coelution_score = xcorr_coelution_score;
scores.im_xcorr_shape_score = xcorr_shape_score;
}
void IonMobilityScoring::driftIdScoring(const SpectrumSequence& spectra,
const std::vector<TransitionType> & transition,
MRMTransitionGroupType& trgr_detect,
OpenSwath_Scores &scores,
const double drift_target,
RangeMobility im_range,
const double dia_extract_window_,
const bool dia_extraction_ppm_,
const double drift_extra,
const bool apply_im_peak_picking)
{
// OPENMS_PRECONDITION(spectrum != nullptr, "Spectrum cannot be null");
// OPENMS_PRECONDITION(!transition.empty(), "Need at least one transition");
// if (spectrum->getDriftTimeArray() == nullptr)
// {
// OPENMS_LOG_DEBUG << " ERROR: Drift time is missing in ion mobility spectrum!\n";
// return;
// }
OPENMS_PRECONDITION(!spectra.empty(), "Spectra cannot be empty");
for (auto s:spectra)
{
if (s->getDriftTimeArray() == nullptr)
{
OPENMS_LOG_DEBUG << " ERROR: Drift time is missing in ion mobility spectrum!\n";
return;
}
}
double eps = 1e-5; // eps for two grid cells to be considered equal
im_range.scaleBy(drift_extra * 2. + 1); // multiple by 2 because want drift extra to be extended by that amount on either side
Mobilogram res;
double im(0), intensity(0);
RangeMZ mz_range = DIAHelpers::createMZRangePPM(transition[0].getProductMZ(), dia_extract_window_, dia_extraction_ppm_);
computeIonMobilogram(spectra, mz_range, im_range, im, intensity, res, eps);
// Record the measured ion mobility
scores.im_drift = im;
// Calculate the difference of the theoretical ion mobility and the actually measured ion mobility
scores.im_delta_score = fabs(drift_target - im);
scores.im_delta = drift_target - im;
scores.im_log_intensity = std::log1p(intensity);
OPENMS_LOG_DEBUG << "Identification Transition IM Scoring for " << transition[0].transition_name << " range (" << im_range.getMin() << " - " << im_range.getMax() << ") IM = " << im << " im_delta = " << drift_target - im << " int = " << intensity << " log int = " << std::log(intensity+1) << '\n';
// Cross-Correlation of Identification against Detection Mobilogram Features
std::vector< Mobilogram > mobilograms;
// Step 1: MS2 detection transitions extraction
for (std::size_t k = 0; k < trgr_detect.getTransitions().size(); k++)
{
double detection_im(0), detection_intensity(0);
Mobilogram detection_mobilograms;
const TransitionType detection_transition = trgr_detect.getTransitions()[k];
RangeMZ detection_mz_range = DIAHelpers::createMZRangePPM(detection_transition.getProductMZ(), dia_extract_window_, dia_extraction_ppm_);
computeIonMobilogram(spectra, detection_mz_range, im_range, detection_im, detection_intensity, detection_mobilograms, eps);
mobilograms.push_back( std::move(detection_mobilograms) );
}
// Step 2: MS2 single identification transition extraction
double identification_im(0), identification_intensity(0);
Mobilogram identification_mobilogram;
RangeMZ identification_mz_range = DIAHelpers::createMZRangePPM(transition[0].getProductMZ(), dia_extract_window_, dia_extraction_ppm_);
computeIonMobilogram(spectra, identification_mz_range, im_range, identification_im, identification_intensity, identification_mobilogram, eps);
mobilograms.push_back(identification_mobilogram);
// Check to make sure IM of identification is not -1, otherwise assign 0 for scores
if ( identification_im != -1 )
{
std::vector<double> im_grid = computeGrid_(mobilograms, eps); // ensure grid is based on all profiles!
mobilograms.pop_back();
// Step 3.0: Align the IonMobilogram vectors to the grid
std::vector< Mobilogram > aligned_mobilograms;
for (const auto &mobilogram : mobilograms)
{
Mobilogram aligned_mobilogram;
Size max_peak_idx = 0;
alignToGrid_(mobilogram, im_grid, aligned_mobilogram, eps, max_peak_idx);
aligned_mobilograms.push_back(std::move(aligned_mobilogram));
}
// Step 3.1: Align the Identification IonMobilogram vectors to the same grid as the detection transitions
Mobilogram aligned_identification_mobilogram;
Size max_peak_idx = 0;
alignToGrid_(identification_mobilogram,
im_grid,
aligned_identification_mobilogram,
eps,
max_peak_idx);
if ( apply_im_peak_picking )
{
PeakPickerMobilogram::PeakPositions peak_pos{};
PeakPickerMobilogram picker_;
Param picker_params = picker_.getParameters();
picker_params.setValue("method", "corrected");
picker_.setParameters(picker_params);
Mobilogram picked_mobilogram;
// Ion mobilogram cannot be empty and cannot have a single point
if ( !aligned_mobilograms.empty() && aligned_mobilograms[0].size()!=1 )
{
Mobilogram summed_mobilogram = sumAlignedMobilograms(aligned_mobilograms);
picker_.pickMobilogram(summed_mobilogram, picked_mobilogram);
picker_.filterTopPeak(picked_mobilogram, aligned_mobilograms, peak_pos);
scores.im_drift_left = im_grid[peak_pos.left];
scores.im_drift_right = im_grid[peak_pos.right];
}
else
{
scores.im_drift_left = -1;
scores.im_drift_right = -1;
}
// Identification ion mobilogram cannot be empty and cannot have a single point
if ( !aligned_identification_mobilogram.empty() && aligned_identification_mobilogram.size()!=1 )
{
picker_.filterTopPeak(picked_mobilogram, aligned_identification_mobilogram, peak_pos);
}
}
std::vector< std::vector< double > > aligned_int_vec;
extractIntensities(aligned_mobilograms, aligned_int_vec);
std::vector< double > identification_int_values = extractIntensities(aligned_identification_mobilogram);
// Step 4: Identification transition contrast scores
{
OpenSwath::MRMScoring mrmscore_;
mrmscore_.initializeXCorrPrecursorContrastMatrix({identification_int_values}, aligned_int_vec);
OPENMS_LOG_DEBUG << "all-all: Contrast Scores : coelution identification transition : "
<< mrmscore_.calcXcorrPrecursorContrastCoelutionScore()
<< " / shape identification transition " <<
mrmscore_.calcXcorrPrecursorContrastShapeScore() << '\n';
scores.im_ind_contrast_coelution = mrmscore_.calcXcorrPrecursorContrastCoelutionScore();
scores.im_ind_contrast_shape = mrmscore_.calcXcorrPrecursorContrastShapeScore();
}
// Step 5: contrast identification transition vs summed detecting transition ions
std::vector<double> fragment_values;
fragment_values.resize(identification_int_values.size(), 0);
for (Size k = 0; k < fragment_values.size(); k++)
{
for (Size i = 0; i < aligned_int_vec.size(); i++)
{
fragment_values[k] += aligned_int_vec[i][k];
}
}
OpenSwath::MRMScoring mrmscore_;
// horribly broken: provides vector of length 1, but expects at least length 2 in calcXcorrPrecursorContrastCoelutionScore()
mrmscore_.initializeXCorrPrecursorContrastMatrix({identification_int_values}, {fragment_values});
OPENMS_LOG_DEBUG << "Contrast Scores : coelution identification transition : "
<< mrmscore_.calcXcorrPrecursorContrastSumFragCoelutionScore()
<< " / shape identification transition " <<
mrmscore_.calcXcorrPrecursorContrastSumFragShapeScore() << '\n';
// in order to prevent assertion error call calcXcorrPrecursorContrastSumFragCoelutionScore, same as calcXcorrPrecursorContrastCoelutionScore() however different assertion
scores.im_ind_sum_contrast_coelution = mrmscore_.calcXcorrPrecursorContrastSumFragCoelutionScore();
// in order to prevent assertion error call calcXcorrPrecursorContrastSumFragShapeScore(), same as calcXcorrPrecursorContrastShapeScore() however different assertion.
scores.im_ind_sum_contrast_shape = mrmscore_.calcXcorrPrecursorContrastSumFragShapeScore();
} else {
OPENMS_LOG_DEBUG << "Identification Transition IM Scoring for " << transition[0].transition_name << " was -1. There was most likely no drift spectrum for the transition, setting cross-correlation scores to 0!\n";
scores.im_ind_contrast_coelution = 0;
scores.im_ind_contrast_shape = 0;
scores.im_ind_sum_contrast_coelution = 0;
scores.im_ind_sum_contrast_shape = 0;
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/MRMFeatureQC.cpp | .cpp | 453 | 16 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Douglas McCloskey $
// $Authors: Douglas McCloskey $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/MRMFeatureQC.h>
namespace OpenMS
{
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/ChromatogramExtractor.cpp | .cpp | 14,581 | 365 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/ChromatogramExtractor.h>
#include <OpenMS/ANALYSIS/OPENSWATH/OpenSwathHelper.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/CONCEPT/LogStream.h>
#define IMPLIES(a, b) !(a) || (b)
namespace OpenMS
{
template <typename MapT, typename PepT>
bool populateMS1Transition(MapT & pep2tr,
const PepT & pep,
ChromatogramExtractor::ExtractionCoordinates & coord)
{
// default values for RT window (negative range)
coord.rt_end = -1;
coord.rt_start = 0;
// Catch cases where a compound has no transitions
if (pep2tr.count(pep.id) == 0)
{
OPENMS_LOG_INFO << "Warning: no transitions found for compound " << pep.id << std::endl;
coord.id = OpenSwathHelper::computePrecursorId(pep.id, 0);
return false;
}
// This is slightly awkward but the m/z of the precursor is *not*
// stored in the precursor object but only in the transition object
// itself. So we have to get the first transition to look it up.
auto transition = (*pep2tr[pep.id][0]);
coord.mz = transition.getPrecursorMZ();
// Set chromatogram reference id: even though we use the peptide id
// here, it is possible that these ids overlap with the transition
// ids, leading to bad downstream consequences (e.g. ambiguity which
// chromatograms are precursor and which ones are fragment
// chromatograms). This is especially problematic with pqp files
// where peptide precursors and transitions are simply numbered and
// are guaranteed to overlap.
coord.id = OpenSwathHelper::computePrecursorId(pep.id, 0);
return true;
}
template <typename TransitionT>
void populateMS2Transition(const TransitionT & transition,
ChromatogramExtractor::ExtractionCoordinates & coord)
{
// default values for RT window (negative range)
coord.rt_end = -1;
coord.rt_start = 0;
coord.mz = transition.getProductMZ();
coord.mz_precursor = transition.getPrecursorMZ();
coord.id = transition.getNativeID();
}
const TargetedExperimentHelper::PeptideCompound* getPeptideHelperMS2_(const OpenMS::TargetedExperiment& transition_exp_used,
const OpenMS::ReactionMonitoringTransition& transition,
bool do_peptides)
{
OPENMS_PRECONDITION(IMPLIES(do_peptides, !transition.getPeptideRef().empty()), "PeptideRef cannot be empty for peptides")
OPENMS_PRECONDITION(IMPLIES(!do_peptides, !transition.getCompoundRef().empty()), "CompoundRef cannot be empty for compounds")
if (do_peptides)
{
return &transition_exp_used.getPeptideByRef(transition.getPeptideRef());
}
else
{
return &transition_exp_used.getCompoundByRef(transition.getCompoundRef());
}
}
const TargetedExperimentHelper::PeptideCompound* getPeptideHelperMS1_(const OpenMS::TargetedExperiment & transition_exp_used,
Size i,
bool do_peptides)
{
OPENMS_PRECONDITION(IMPLIES(do_peptides, i < transition_exp_used.getPeptides().size()), "Index i must be smaller than the number of peptides")
OPENMS_PRECONDITION(IMPLIES(!do_peptides, i < transition_exp_used.getCompounds().size()), "Index i must be smaller than the number of compounds")
if (do_peptides)
{
return &transition_exp_used.getPeptides()[i];
}
else
{
return &transition_exp_used.getCompounds()[i];
}
}
void ChromatogramExtractor::prepare_coordinates(std::vector< OpenSwath::ChromatogramPtr > & output_chromatograms,
std::vector< ExtractionCoordinates > & coordinates,
const OpenSwath::LightTargetedExperiment & transition_exp_used,
const double rt_extraction_window,
const bool ms1,
const int ms1_isotopes)
{
// hash of the peptide reference containing all transitions
std::map<String, std::vector<const OpenSwath::LightTransition*> > pep2tr;
for (Size i = 0; i < transition_exp_used.getTransitions().size(); i++)
{
String ref = transition_exp_used.getTransitions()[i].getPeptideRef();
pep2tr[ref].push_back(&transition_exp_used.getTransitions()[i]);
}
std::map<String, const OpenSwath::LightCompound*> tr2pep;
for (const auto & p : transition_exp_used.getCompounds()) {tr2pep[p.id] = &p;}
// Determine iteration size:
// When extracting MS1/precursor transitions, we iterate over compounds.
// Otherwise (for SWATH/fragment ions), we iterate over the transitions.
Size itersize;
if (ms1)
{
itersize = transition_exp_used.getCompounds().size();
}
else
{
itersize = transition_exp_used.getTransitions().size();
}
for (Size i = 0; i < itersize; i++)
{
OpenSwath::ChromatogramPtr s(new OpenSwath::Chromatogram);
output_chromatograms.push_back(s);
ChromatogramExtractor::ExtractionCoordinates coord;
OpenSwath::LightCompound pep;
OpenSwath::LightTransition transition;
if (ms1)
{
pep = transition_exp_used.getCompounds()[i];
if (!populateMS1Transition(pep2tr, pep, coord))
{
// Catch cases where a compound has no transitions
coordinates.push_back(coord);
continue;
}
}
else
{
transition = transition_exp_used.getTransitions()[i];
pep = (*tr2pep[transition.getPeptideRef()]);
populateMS2Transition(transition, coord);
}
if (rt_extraction_window >= 0)
{
// if 'rt_extraction_window' is non-zero, just use the (first) RT value
double rt = pep.rt;
coord.rt_start = rt - rt_extraction_window / 2.0;
coord.rt_end = rt + rt_extraction_window / 2.0;
}
coord.ion_mobility = pep.getDriftTime();
coordinates.push_back(coord);
if (ms1 && ms1_isotopes > 0)
{
for (int k = 1; k <= ms1_isotopes; k++)
{
OpenSwath::ChromatogramPtr s(new OpenSwath::Chromatogram);
output_chromatograms.push_back(s);
ChromatogramExtractor::ExtractionCoordinates coord_new = coord;
coord_new.id = OpenSwathHelper::computePrecursorId(pep.id, k);
coord_new.mz = coord.mz + k * Constants::C13C12_MASSDIFF_U;
coordinates.push_back(coord_new);
}
}
}
// sort result, use stable_sort to ensure that ordering is preserved
std::stable_sort(coordinates.begin(), coordinates.end(), ChromatogramExtractor::ExtractionCoordinates::SortExtractionCoordinatesByMZ);
}
void ChromatogramExtractor::prepare_coordinates(std::vector< OpenSwath::ChromatogramPtr > & output_chromatograms,
std::vector< ExtractionCoordinates > & coordinates,
const OpenMS::TargetedExperiment & transition_exp_used,
const double rt_extraction_window,
const bool ms1,
const int ms1_isotopes)
{
// hash of the peptide reference containing all transitions
typedef std::map<String, std::vector<const ReactionMonitoringTransition*> > PeptideTransitionMapType;
PeptideTransitionMapType pep2tr;
for (Size i = 0; i < transition_exp_used.getTransitions().size(); i++)
{
String ref = transition_exp_used.getTransitions()[i].getPeptideRef();
if (ref.empty()) ref = transition_exp_used.getTransitions()[i].getCompoundRef();
pep2tr[ref].push_back(&transition_exp_used.getTransitions()[i]);
}
// std::map<String, const TargetedExperimentHelper::PeptideCompound* > tr2pep;
// for (const auto & p : transition_exp_used.getPeptides()) {tr2pep[p.id] = &p;}
// for (const auto & c : transition_exp_used.getCompounds()) {tr2pep[c.id] = &c;}
bool have_peptides = (!transition_exp_used.getPeptides().empty());
// Determine iteration size (nr peptides or nr transitions)
Size itersize;
if (ms1)
{
if (have_peptides)
{
itersize = transition_exp_used.getPeptides().size();
}
else
{
itersize = transition_exp_used.getCompounds().size();
}
}
else
{
itersize = transition_exp_used.getTransitions().size();
}
for (Size i = 0; i < itersize; i++)
{
OpenSwath::ChromatogramPtr s(new OpenSwath::Chromatogram);
output_chromatograms.push_back(s);
ChromatogramExtractor::ExtractionCoordinates coord;
const TargetedExperimentHelper::PeptideCompound* pep;
OpenMS::ReactionMonitoringTransition transition;
if (ms1)
{
pep = getPeptideHelperMS1_(transition_exp_used, i, have_peptides);
if (!populateMS1Transition(pep2tr, *pep, coord))
{
// Catch cases where a compound has no transitions
coordinates.push_back(coord);
continue;
}
}
else
{
transition = transition_exp_used.getTransitions()[i];
pep = getPeptideHelperMS2_(transition_exp_used, transition, have_peptides);
populateMS2Transition(transition, coord);
}
if (rt_extraction_window < 0) {} // construct for NAN (see below)
else
{
if (!pep->hasRetentionTime())
{
// we don't have retention times -> this is only a problem if we actually
// wanted to use the RT limit feature.
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Error: Peptide " + pep->id + " does not have retention time information which is necessary to perform an RT-limited extraction");
}
else if (std::isnan(rt_extraction_window)) // if 'rt_extraction_window' is NAN, we assume that RT start/end is encoded in the data
{
// TODO: better use a single RT entry with start/end
if (pep->rts.size() != 2)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Error: Expected exactly two retention time entries for peptide '" + pep->id + "', found " + String(pep->rts.size()));
}
coord.rt_start = pep->rts[0].getRT();
coord.rt_end = pep->rts[1].getRT();
}
else // if 'rt_extraction_window' is non-zero, just use the (first) RT value
{
double rt = pep->getRetentionTime();
coord.rt_start = rt - rt_extraction_window / 2.0;
coord.rt_end = rt + rt_extraction_window / 2.0;
}
}
coord.ion_mobility = pep->getDriftTime();
coordinates.push_back(coord);
if (ms1 && ms1_isotopes > 0 && false)
{
for (int k = 1; k <= ms1_isotopes; k++)
{
OpenSwath::ChromatogramPtr s(new OpenSwath::Chromatogram);
output_chromatograms.push_back(s);
ChromatogramExtractor::ExtractionCoordinates coord_new = coord;
coord_new.id = OpenSwathHelper::computePrecursorId(pep->id, k);
coord_new.mz = coord.mz + k * Constants::C13C12_MASSDIFF_U;
coordinates.push_back(coord_new);
}
}
}
// sort result, use stable_sort to ensure that ordering is preserved
std::stable_sort(coordinates.begin(), coordinates.end(), ChromatogramExtractor::ExtractionCoordinates::SortExtractionCoordinatesByMZ);
}
bool ChromatogramExtractor::outsideExtractionWindow_(const ReactionMonitoringTransition& transition, double current_rt,
const TransformationDescription& trafo, double rt_extraction_window)
{
if (rt_extraction_window < 0)
{
return false;
}
// Get the expected retention time, apply the RT-transformation
// (which describes the normalization) and then take the difference.
// Note that we inverted the transformation in the beginning because
// we want to transform from normalized to real RTs here and not the
// other way round.
double expected_rt = PeptideRTMap_[transition.getPeptideRef()];
double de_normalized_experimental_rt = trafo.apply(expected_rt);
if (current_rt < de_normalized_experimental_rt - rt_extraction_window / 2.0 ||
current_rt > de_normalized_experimental_rt + rt_extraction_window / 2.0 )
{
return true;
}
return false;
}
int ChromatogramExtractor::getFilterNr_(const String& filter)
{
if (filter == "tophat")
{
return 1;
}
else if (filter == "bartlett")
{
return 2;
}
else
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Filter either needs to be tophat or bartlett");
}
}
void ChromatogramExtractor::populatePeptideRTMap_(OpenMS::TargetedExperiment& transition_exp, double rt_extraction_window)
{
// Store the peptide retention times in an intermediate map
PeptideRTMap_.clear();
for (Size i = 0; i < transition_exp.getPeptides().size(); i++)
{
const TargetedExperiment::Peptide& pep = transition_exp.getPeptides()[i];
if (!pep.hasRetentionTime())
{
// we don't have retention times -> this is only a problem if we actually
// wanted to use the RT limit feature.
if (rt_extraction_window >= 0)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Error: Peptide " + pep.id + " does not have retention time information which is necessary to perform an RT-limited extraction");
}
continue;
}
PeptideRTMap_[pep.id] = pep.getRetentionTime();
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/PeakIntegrator.cpp | .cpp | 5,345 | 104 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Douglas McCloskey, Pasquale Domenico Colaianni $
// $Authors: Douglas McCloskey, Pasquale Domenico Colaianni $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/PeakIntegrator.h>
namespace OpenMS
{
PeakIntegrator::PeakIntegrator() :
DefaultParamHandler("PeakIntegrator")
{
getDefaultParameters(defaults_);
defaultsToParam_(); // write defaults into Param object param_
}
PeakIntegrator::~PeakIntegrator() = default;
PeakIntegrator::PeakArea PeakIntegrator::integratePeak(const MSChromatogram& chromatogram, const double left, const double right) const
{
return integratePeak_(chromatogram, left, right);
}
PeakIntegrator::PeakArea PeakIntegrator::integratePeak(const MSChromatogram& chromatogram, MSChromatogram::ConstIterator& left, MSChromatogram::ConstIterator& right) const
{
return integratePeak_(chromatogram, left->getRT(), right->getRT());
}
PeakIntegrator::PeakArea PeakIntegrator::integratePeak(const MSSpectrum& spectrum, const double left, const double right) const
{
return integratePeak_(spectrum, left, right);
}
PeakIntegrator::PeakArea PeakIntegrator::integratePeak(const MSSpectrum& spectrum, MSSpectrum::ConstIterator& left, MSSpectrum::ConstIterator& right) const
{
return integratePeak_(spectrum, left->getMZ(), right->getMZ());
}
PeakIntegrator::PeakBackground PeakIntegrator::estimateBackground(const MSChromatogram& chromatogram, const double left, const double right, const double peak_apex_pos) const
{
return estimateBackground_(chromatogram, left, right, peak_apex_pos);
}
PeakIntegrator::PeakBackground PeakIntegrator::estimateBackground(const MSChromatogram& chromatogram, MSChromatogram::ConstIterator& left, MSChromatogram::ConstIterator& right, const double peak_apex_pos) const
{
return estimateBackground_(chromatogram, left->getRT(), right->getRT(), peak_apex_pos);
}
PeakIntegrator::PeakBackground PeakIntegrator::estimateBackground(const MSSpectrum& spectrum, const double left, const double right, const double peak_apex_pos) const
{
return estimateBackground_(spectrum, left, right, peak_apex_pos);
}
PeakIntegrator::PeakBackground PeakIntegrator::estimateBackground(const MSSpectrum& spectrum, MSSpectrum::ConstIterator& left, MSSpectrum::ConstIterator& right, const double peak_apex_pos) const
{
return estimateBackground_(spectrum, left->getMZ(), right->getMZ(), peak_apex_pos);
}
PeakIntegrator::PeakShapeMetrics PeakIntegrator::calculatePeakShapeMetrics(const MSChromatogram& chromatogram, const double left, const double right, const double peak_height, const double peak_apex_pos) const
{
return calculatePeakShapeMetrics_(chromatogram, left, right, peak_height, peak_apex_pos);
}
PeakIntegrator::PeakShapeMetrics PeakIntegrator::calculatePeakShapeMetrics(const MSChromatogram& chromatogram, MSChromatogram::ConstIterator& left, MSChromatogram::ConstIterator& right, const double peak_height, const double peak_apex_pos) const
{
return calculatePeakShapeMetrics_(chromatogram, left->getRT(), right->getRT(), peak_height, peak_apex_pos);
}
PeakIntegrator::PeakShapeMetrics PeakIntegrator::calculatePeakShapeMetrics(const MSSpectrum& spectrum, const double left, const double right, const double peak_height, const double peak_apex_pos) const
{
return calculatePeakShapeMetrics_(spectrum, left, right, peak_height, peak_apex_pos);
}
PeakIntegrator::PeakShapeMetrics PeakIntegrator::calculatePeakShapeMetrics(const MSSpectrum& spectrum, MSSpectrum::ConstIterator& left, MSSpectrum::ConstIterator& right, const double peak_height, const double peak_apex_pos) const
{
return calculatePeakShapeMetrics_(spectrum, left->getMZ(), right->getMZ(), peak_height, peak_apex_pos);
}
void PeakIntegrator::getDefaultParameters(Param& params)
{
params.clear();
params.setValue("integration_type", INTEGRATION_TYPE_INTENSITYSUM, "The integration technique to use in integratePeak() and estimateBackground() which uses either the summed intensity, integration by Simpson's rule or trapezoidal integration.");
params.setValidStrings("integration_type", {"intensity_sum","simpson","trapezoid"});
params.setValue("baseline_type", BASELINE_TYPE_BASETOBASE, "The baseline type to use in estimateBackground() based on the peak boundaries. A rectangular baseline shape is computed based either on the minimal intensity of the peak boundaries, the maximum intensity or the average intensity (base_to_base).");
params.setValidStrings("baseline_type", {"base_to_base","vertical_division","vertical_division_min","vertical_division_max"});
params.setValue("fit_EMG", "false", "Fit the chromatogram/spectrum to the EMG peak model.");
params.setValidStrings("fit_EMG", {"false","true"});
}
void PeakIntegrator::updateMembers_()
{
integration_type_ = (String)param_.getValue("integration_type").toString();
baseline_type_ = (String)param_.getValue("baseline_type").toString();
fit_EMG_ = param_.getValue("fit_EMG").toBool();
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/DIAHelper.cpp | .cpp | 19,560 | 530 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Witold Wolski, Hannes Roest $
// $Authors: Witold Wolski, Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/DIAHelper.h>
#include <OpenMS/CHEMISTRY/TheoreticalSpectrumGenerator.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/CoarseIsotopePatternGenerator.h>
#include <OpenMS/FEATUREFINDER/FeatureFinderAlgorithmPickedHelperStructs.h>
#include <OpenMS/KERNEL/MSSpectrum.h>
#include <OpenMS/MATH/MathFunctions.h>
#include <utility>
#include <OpenMS/CONCEPT/LogStream.h>
namespace OpenMS::DIAHelpers
{
void adjustExtractionWindow(double& right, double& left, const double& mz_extract_window, const bool& mz_extraction_ppm)
{
OPENMS_PRECONDITION(mz_extract_window > 0, "MZ extraction window needs to be larger than zero.");
if (mz_extraction_ppm)
{
left -= left * mz_extract_window / 2e6;
right += right * mz_extract_window / 2e6;
}
else
{
left -= mz_extract_window / 2.0;
right += mz_extract_window / 2.0;
}
// If the left value is now < 0, this is invalid correct it to be 0
if (left < 0)
{
left = 0;
}
}
// Helper for integrate window returns the sum of all intensities, sum of all ion mobilities and sum of all mz
// no expensive division calls
// assumes mz, im and intensity should already be initiated.
void integrateWindow_(const OpenSwath::SpectrumPtr& spectrum,
double & mz,
double & im,
double & intensity,
const RangeMZ & mz_range,
const RangeMobility & im_range,
bool centroided)
{
OPENMS_PRECONDITION(spectrum != nullptr, "precondition: Spectrum cannot be nullptr");
OPENMS_PRECONDITION(spectrum->getMZArray() != nullptr, "precondition: Cannot integrate if no m/z is available.");
//OPENMS_PRECONDITION(!spectrum->getMZArray()->data.empty(), " precondition: Warning: Cannot integrate if spectrum is empty"); // This is not a failure should check for this afterwards
OPENMS_PRECONDITION(std::adjacent_find(spectrum->getMZArray()->data.begin(),
spectrum->getMZArray()->data.end(), std::greater<double>()) == spectrum->getMZArray()->data.end(),
"Precondition violated: m/z vector needs to be sorted!" );
OPENMS_PRECONDITION(spectrum->getMZArray()->data.size() == spectrum->getIntensityArray()->data.size(), "precondition: MZ and Intensity array need to have the same length.");
// ion mobility specific preconditions
//OPENMS_PRECONDITION((im_range.isEmpty()) && (spectrum->getDriftTimeArray() != nullptr), "precondition: Cannot integrate with drift time if no drift time is available."); This is not a failure can handle this
OPENMS_PRECONDITION((spectrum->getDriftTimeArray() == nullptr) || (spectrum->getDriftTimeArray()->data.empty()) || (spectrum->getMZArray()->data.size() == spectrum->getDriftTimeArray()->data.size()), "precondition: MZ and Drift Time array need to have the same length.");
OPENMS_PRECONDITION(!centroided, throw Exception::NotImplemented(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION));
if ( spectrum->getMZArray()->data.empty() )
{
OPENMS_LOG_WARN << "Warning: Cannot integrate if spectrum is empty" << std::endl;
return;
}
// if im_range is set, than integrate across dirft time
if (!im_range.isEmpty()) // if imRange supplied, integrate across IM
{
if (spectrum->getDriftTimeArray() == nullptr)
{
//throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Cannot integrate with drift time if no drift time is available");
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Cannot integrate with drift time if no drift time is available");
}
}
if (!centroided)
{
// get the weighted average for noncentroided data.
// TODO this is not optimal if there are two peaks in this window (e.g. if the window is too large)
auto mz_arr_end = spectrum->getMZArray()->data.end();
auto int_it = spectrum->getIntensityArray()->data.begin();
// this assumes that the spectra are sorted!
auto mz_it = std::lower_bound(spectrum->getMZArray()->data.begin(), mz_arr_end, mz_range.getMin());
// also advance intensity iterator now
auto iterator_pos = std::distance(spectrum->getMZArray()->data.begin(), mz_it);
std::advance(int_it, iterator_pos);
double mz_end = mz_range.getMax(); // store the maximum mz value in a double to minimize function calls
if ( !im_range.isEmpty() ) // integrate across im as well
{
auto im_it = spectrum->getDriftTimeArray()->data.begin();
// also advance ion mobility iterator now
std::advance(im_it, iterator_pos);
// Start iteration from mz start, end iteration when mz value is larger than mz_end, only store only storing ion mobility values that are in the range
//while ( (mz_it != mz_arr_end) && (*mz_it < mz_end) )
while ( (mz_it != mz_arr_end) && (*mz_it < mz_end) )
{
if (im_range.contains(*im_it))
{
intensity += (*int_it);
im += (*int_it) * (*im_it);
mz += (*int_it) * (*mz_it);
}
++mz_it;
++int_it;
++im_it;
}
}
else // where do not have IM
{
while ( mz_it != mz_arr_end && *mz_it < mz_end )
{
intensity += (*int_it);
mz += (*int_it) * (*mz_it);
++mz_it;
++int_it;
}
}
}
else
{
throw Exception::NotImplemented(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION);
}
}
void integrateWindows(const OpenSwath::SpectrumPtr& spectrum,
const std::vector<double> & windowsCenter,
double width,
std::vector<double> & integratedWindowsIntensity,
std::vector<double> & integratedWindowsMZ,
std::vector<double> & integratedWindowsIm,
const RangeMobility & range_im,
bool remZero)
{
std::vector<double>::const_iterator beg = windowsCenter.begin();
std::vector<double>::const_iterator end = windowsCenter.end();
double mz, intensity, im;
for (; beg != end; ++beg)
{
// assemble RangeMZ object based on window
RangeMZ range_mz(*beg);
range_mz.minSpanIfSingular(width);
if (integrateWindow(spectrum, mz, im, intensity, range_mz, range_im, false))
{
integratedWindowsIntensity.push_back(intensity);
integratedWindowsMZ.push_back(mz);
integratedWindowsIm.push_back(im);
}
else if (!remZero)
{
integratedWindowsIntensity.push_back(0.);
integratedWindowsMZ.push_back(*beg);
if ( !range_im.isEmpty() )
{
integratedWindowsIm.push_back( range_im.center() ); // average drift time
}
else
{
integratedWindowsIm.push_back(-1);
}
}
}
}
void integrateWindows(const SpectrumSequence& spectra,
const std::vector<double> & windowsCenter,
double width,
std::vector<double> & integratedWindowsIntensity,
std::vector<double> & integratedWindowsMZ,
std::vector<double> & integratedWindowsIm,
const RangeMobility& range_im,
bool remZero)
{
double mz(-1), intensity(0), im(-1);
if (windowsCenter.empty())
{
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "No windows supplied!");
return;
}
if (spectra.empty())
{
OPENMS_LOG_WARN << "Warning: no spectra provided" << std::endl;
return;
}
std::vector<double>::const_iterator beg = windowsCenter.begin();
std::vector<double>::const_iterator end = windowsCenter.end();
for (; beg != end; ++beg)
{
//assemble rangeMZ object based on windows
RangeMZ range_mz(*beg);
range_mz.minSpanIfSingular(width);
if (integrateWindow(spectra, mz, im, intensity, range_mz, range_im, false))
{
integratedWindowsIntensity.push_back(intensity);
integratedWindowsMZ.push_back(mz);
integratedWindowsIm.push_back(im);
}
else if (!remZero)
{
integratedWindowsIntensity.push_back(0.);
integratedWindowsMZ.push_back(*beg); // push back center of window
if ( !range_im.isEmpty() )
{
integratedWindowsIm.push_back(range_im.center()); // push back average drift
}
else
{
integratedWindowsIm.push_back(-1.);
}
}
}
}
bool integrateWindow(const OpenSwath::SpectrumPtr& spectrum,
double & mz,
double & im,
double & intensity,
const RangeMZ & range_mz,
const RangeMobility & range_im,
bool centroided)
{
// initiate the values
mz = 0;
im = 0;
intensity = 0;
integrateWindow_(spectrum, mz, im, intensity, range_mz, range_im, centroided);
// Post processing get the weighted average mz and im by dividing my intensity
if (intensity > 0.)
{
mz /= intensity;
if ( !range_im.isEmpty() )
{
im /= intensity;
}
else
{
im = -1;
}
return true;
}
else
{
im = -1;
mz = -1;
intensity = 0;
return false;
}
}
bool integrateWindow(const SpectrumSequence& spectra,
double & mz,
double & im,
double & intensity,
const RangeMZ & range_mz,
const RangeMobility & range_im,
bool centroided)
{
// initiate the values
mz = 0;
im = 0;
intensity = 0;
if (!spectra.empty())
{
for (const auto& s : spectra)
{
integrateWindow_(s, mz, im, intensity, range_mz, range_im, centroided);
}
// Post processing get the weighted average mz and im by dividing my intensity
if (intensity > 0.)
{
mz /= intensity;
if ( !range_im.isEmpty() )
{
im /= intensity;
}
else // if no IM set to -1
{
im = -1;
}
return true;
}
else
{
// if (intensity <= 0)
im = -1;
mz = -1;
intensity = 0;
return false;
}
}
else
{
// if (all_spectra.empty())
OPENMS_LOG_WARN << "Warning: no spectra provided" << std::endl;
im = -1;
mz = -1;
intensity = 0;
return false;
}
}
// for SWATH -- get the theoretical b and y series masses for a sequence
void getBYSeries(const AASequence& a, //
std::vector<double>& bseries, //
std::vector<double>& yseries, //
TheoreticalSpectrumGenerator const * generator,
int charge)
{
// Note: We pass TheoreticalSpectrumGenerator ptr, as constructing it each time is too slow.
OPENMS_PRECONDITION(charge > 0, "For constructing b/y series we require charge being a positive integer");
if (a.empty()) return;
PeakSpectrum spec;
generator->getSpectrum(spec, a, charge, charge);
// Data array is present if AASequence is not empty
const PeakSpectrum::StringDataArray& ion_name = spec.getStringDataArrays()[0];
for (Size i = 0; i != spec.size(); ++i)
{
if (ion_name[i][0] == 'y')
{
yseries.push_back(spec[i].getMZ());
}
else if (ion_name[i][0] == 'b')
{
bseries.push_back(spec[i].getMZ());
}
}
} // end getBYSeries
// for SWATH -- get the theoretical b and y series masses for a sequence
void getTheorMasses(const AASequence& a,
std::vector<double>& masses,
TheoreticalSpectrumGenerator const * generator,
int charge)
{
// Note: We pass TheoreticalSpectrumGenerator ptr, as constructing it each time is too slow.
OPENMS_PRECONDITION(charge > 0, "Charge is a positive integer");
PeakSpectrum spec;
generator->getSpectrum(spec, a, charge, charge);
for (PeakSpectrum::iterator it = spec.begin();
it != spec.end(); ++it)
{
masses.push_back(it->getMZ());
}
} // end getBYSeries
void getAveragineIsotopeDistribution(const double product_mz,
std::vector<std::pair<double, double> >& isotopes_spec,
int charge,
const int nr_isotopes,
const double mannmass)
{
charge = std::abs(charge);
typedef OpenMS::FeatureFinderAlgorithmPickedHelperStructs::TheoreticalIsotopePattern TheoreticalIsotopePattern;
// create the theoretical distribution
CoarseIsotopePatternGenerator solver(nr_isotopes);
TheoreticalIsotopePattern isotopes;
//Note: this is a rough estimate of the weight, usually the protons should be deducted first, left for backwards compatibility.
auto d = solver.estimateFromPeptideWeight(product_mz * charge);
double mass = product_mz;
for (IsotopeDistribution::Iterator it = d.begin(); it != d.end(); ++it)
{
isotopes_spec.emplace_back(mass, it->getIntensity());
mass += mannmass / charge;
}
} //end of dia_isotope_corr_sub
//simulate spectrum from AASequence
void simulateSpectrumFromAASequence(const AASequence& aa,
std::vector<double>& first_isotope_masses, //[out]
std::vector<std::pair<double, double> >& isotope_masses, //[out]
TheoreticalSpectrumGenerator const * generator, int charge)
{
getTheorMasses(aa, first_isotope_masses, generator, charge);
for (std::size_t i = 0; i < first_isotope_masses.size(); ++i)
{
getAveragineIsotopeDistribution(first_isotope_masses[i], isotope_masses,
charge);
}
}
/// given an experimental spectrum add isotope pattern.
void addIsotopes2Spec(const std::vector<std::pair<double, double> >& spec,
std::vector<std::pair<double, double> >& isotope_masses, //[out]
Size nr_isotopes, int charge)
{
for (std::size_t i = 0; i < spec.size(); ++i)
{
std::vector<std::pair<double, double> > isotopes;
getAveragineIsotopeDistribution(spec[i].first, isotopes, charge, nr_isotopes);
for (Size j = 0; j < isotopes.size(); ++j)
{
isotopes[j].second *= spec[i].second; //multiple isotope intensity by spec intensity
isotope_masses.push_back(isotopes[j]);
}
}
}
/// given a peak of experimental mz and intensity, add isotope pattern to a "spectrum".
void addSinglePeakIsotopes2Spec(double mz, double ity,
std::vector<std::pair<double, double>>& isotope_masses, //[out]
Size nr_isotopes, int charge)
{
std::vector<std::pair<double, double> > isotopes;
getAveragineIsotopeDistribution(mz, isotopes, charge, nr_isotopes);
for (Size j = 0; j < isotopes.size(); ++j)
{
isotopes[j].second *= ity; //multiple isotope intensity by spec intensity
isotope_masses.push_back(isotopes[j]);
}
}
//Add masses before first isotope
void addPreisotopeWeights(const std::vector<double>& first_isotope_masses,
std::vector<std::pair<double, double> >& isotope_spec, // output
UInt nr_peaks, double pre_isotope_peaks_weight, // weight of pre isotope peaks
double mannmass, int charge)
{
charge = std::abs(charge);
for (std::size_t i = 0; i < first_isotope_masses.size(); ++i)
{
Size mul = 1.;
for (UInt j = 0; j < nr_peaks; ++j, ++mul)
{
isotope_spec.emplace_back(first_isotope_masses[i] - (mul * mannmass) / charge,
pre_isotope_peaks_weight);
}
}
sortByFirst(isotope_spec);
}
//Add masses before first isotope
void addPreisotopeWeights(double mz,
std::vector<std::pair<double, double> >& isotope_spec, // output
UInt nr_peaks, double pre_isotope_peaks_weight, // weight of pre isotope peaks
double mannmass, int charge)
{
charge = std::abs(charge);
Size mul = 1;
for (UInt j = 0; j < nr_peaks; ++j, ++mul)
{
isotope_spec.emplace_back(mz - (mul * mannmass) / charge,
pre_isotope_peaks_weight);
}
}
struct MassSorter
{
bool operator()(const std::pair<double, double>& left,
const std::pair<double, double>& right)
{
return left.first < right.first;
}
};
void sortByFirst(std::vector<std::pair<double, double> >& tmp)
{
std::sort(tmp.begin(), tmp.end(), MassSorter());
}
void extractFirst(const std::vector<std::pair<double, double> >& peaks,
std::vector<double>& mass)
{
for (std::size_t i = 0; i < peaks.size(); ++i)
{
mass.push_back(peaks[i].first);
}
}
void extractSecond(const std::vector<std::pair<double, double> >& peaks,
std::vector<double>& mass)
{
for (std::size_t i = 0; i < peaks.size(); ++i)
{
mass.push_back(peaks[i].second);
}
}
RangeMZ createMZRangePPM(const double mzRef, const double dia_extraction_window, const bool is_ppm)
{
RangeMZ rangeMZ(mzRef);
if (is_ppm)
{
double ppm = Math::ppmToMass(dia_extraction_window, mzRef);
rangeMZ.minSpanIfSingular(ppm);
}
else
{
rangeMZ.minSpanIfSingular(dia_extraction_window);
}
return rangeMZ;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/SwathWindowLoader.cpp | .cpp | 4,975 | 134 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest$
// $Authors: Hannes Roest$
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/SwathWindowLoader.h>
#include <OpenMS/CONCEPT/Exception.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/CONCEPT/Types.h>
#include <OpenMS/DATASTRUCTURES/String.h>
#include <OpenMS/DATASTRUCTURES/StringUtils.h>
#include <OpenMS/SYSTEM/SysInfo.h>
#include <fstream>
#include <iostream>
#include <sstream>
namespace OpenMS
{
void SwathWindowLoader::annotateSwathMapsFromFile(const std::string & filename,
std::vector< OpenSwath::SwathMap >& swath_maps, bool do_sort, bool force)
{
std::vector<double> swath_prec_lower_, swath_prec_upper_;
readSwathWindows(filename, swath_prec_lower_, swath_prec_upper_);
// Sort the windows by the start of the lower window
if (do_sort)
{
std::sort(swath_maps.begin(), swath_maps.end(), [](const OpenSwath::SwathMap& left, const OpenSwath::SwathMap& right)
{
return left.upper < right.upper;
});
}
Size i = 0, j = 0;
for (; i < swath_maps.size(); i++)
{
if (swath_maps[i].ms1)
{ // skip to next map (only increase i)
continue;
}
if (j >= swath_prec_lower_.size())
{
OPENMS_LOG_FATAL_ERROR << "Trying to access annotation for SWATH map " << j
<< " but there are only " << swath_prec_lower_.size() << " windows in the"
<< " swath_windows_file. Please check your input." << std::endl;
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"The number of SWATH maps read from the raw data and from the annotation file do not match.");
}
OPENMS_LOG_INFO << "Re-annotate from file: SWATH " <<
swath_maps[i].lower << " / " << swath_maps[i].upper << " (raw data) is annotated via swath_windows_file with " <<
swath_prec_lower_[j] << " / " << swath_prec_upper_[j] << std::endl;
// new boundaries should be smaller/equal than the original ones from the data
if (!(swath_maps[i].lower <= swath_prec_lower_[j] && swath_prec_upper_[j] <= swath_maps[i].upper))
{
String err = "SWATH window #" + String(j+1) + " from swath_windows_file extends beyond the Swath window of the data."
" Did you forget to apply the sort_swath_maps flag? (override with -force)";
if (force)
{
OPENMS_LOG_ERROR << err << "\nOverridden with -force.\n";
}
else
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, err);
}
}
swath_maps[i].lower = swath_prec_lower_[j];
swath_maps[i].upper = swath_prec_upper_[j];
j++;
}
if (j != swath_prec_upper_.size())
{
OPENMS_LOG_FATAL_ERROR << "The number of SWATH maps read from the raw data (" <<
j << ") and from the annotation file (" << swath_prec_upper_.size() << ") do not match." << std::endl;
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"The number of SWATH maps read from the raw data and from the annotation file do not match.");
}
}
void SwathWindowLoader::readSwathWindows(const std::string & filename,
std::vector<double> & swath_prec_lower_,
std::vector<double> & swath_prec_upper_ )
{
std::ifstream data(filename.c_str());
String line;
std::vector<String> headerSubstrings;
double lower, upper;
// Check for presence of header
std::getline(data, line);
try
{ // If string can be successfully converted to double (excluding initial spaces) then the first line is not a header
StringUtils::split(line.trim().substitute('\t', ' '), ' ', headerSubstrings);
StringUtils::toDouble(headerSubstrings[0]);
OPENMS_LOG_INFO << "Swath Header not found" << std::endl;
}
catch (const Exception::ConversionError &)
{
OPENMS_LOG_INFO << "Read Swath window header: '" << line << std::endl;
std::getline(data, line);
}
// read the rest of the SWATH window file
do
{
std::stringstream lineStream(line);
lineStream >> lower;
lineStream >> upper;
swath_prec_lower_.push_back(lower);
swath_prec_upper_.push_back(upper);
if (!(lower < upper))
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Swath window file contains illegal ranges", line);
}
} while (std::getline(data, line));
assert(swath_prec_lower_.size() == swath_prec_upper_.size());
OPENMS_LOG_INFO << "Read Swath window file with " << swath_prec_lower_.size() << " SWATH windows." << std::endl;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/MRMDecoy.cpp | .cpp | 43,793 | 1,161 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: George Rosenberger $
// $Authors: George Rosenberger, Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/MRMDecoy.h>
#include <OpenMS/CHEMISTRY/ModificationsDB.h>
#include <OpenMS/ANALYSIS/OPENSWATH/DATAACCESS/DataAccessHelper.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/MATH/MathFunctions.h>
#include <boost/assign.hpp>
#include <boost/lexical_cast.hpp>
#include <boost/random/mersenne_twister.hpp>
#include <boost/random/uniform_int.hpp>
#include <boost/random/variate_generator.hpp>
#include <boost/unordered_map.hpp>
#include <unordered_set>
namespace OpenMS
{
MRMDecoy::MRMDecoy() :
DefaultParamHandler("MRMDecoy"),
ProgressLogger()
{
defaults_.setValue("non_shuffle_pattern", "KRP", "Residues to not shuffle (keep at a constant position when shuffling). Default is 'KPR' to not shuffle lysine, arginine and proline.");
defaults_.setValue("keepPeptideNTerm", "true", "Whether to keep peptide N terminus constant when shuffling / reversing.", {"advanced"});
defaults_.setValidStrings("keepPeptideNTerm", {"true","false"});
defaults_.setValue("keepPeptideCTerm", "true", "Whether to keep peptide C terminus constant when shuffling / reversing.", {"advanced"});
defaults_.setValidStrings("keepPeptideCTerm", {"true","false"});
// write defaults into Param object param_
defaultsToParam_();
}
void MRMDecoy::updateMembers_()
{
keep_const_pattern_ = param_.getValue("non_shuffle_pattern").toString();
keepN_ = param_.getValue("keepPeptideNTerm").toBool();
keepC_ = param_.getValue("keepPeptideCTerm").toBool();
}
MRMDecoy::IndexType MRMDecoy::findFixedResidues(const std::string& sequence,
bool keepN, bool keepC, const OpenMS::String& keep_const_pattern)
{
// also blocks both N- and C-terminus from shuffling if required
MRMDecoy::IndexType idx;
for (size_t i = 0; i < sequence.size(); i++)
{
if ( (keepN && i == 0) || (keepC && i + 1 == sequence.size()))
{
idx.push_back(i);
continue;
}
for (size_t j = 0; j < keep_const_pattern.size(); j++)
{
if (sequence[i] == keep_const_pattern[j])
{
idx.push_back(i);
}
}
}
return idx;
}
MRMDecoy::IndexType MRMDecoy::findFixedResidues_(const std::string& sequence) const
{
return MRMDecoy::findFixedResidues(sequence, false, false, keep_const_pattern_);
}
MRMDecoy::IndexType MRMDecoy::findFixedAndTermResidues_(const std::string& sequence) const
{
return MRMDecoy::findFixedResidues(sequence, keepN_, keepC_, keep_const_pattern_);
}
float MRMDecoy::AASequenceIdentity(const String& sequence, const String& decoy) const
{
OPENMS_PRECONDITION(sequence.size() == decoy.size(), "Cannot compare two sequences of unequal length");
std::vector<char> sequence_v(sequence.begin(), sequence.end());
std::vector<char> decoy_v(decoy.begin(), decoy.end());
int running = 0;
for (Size i = 0; i < sequence_v.size(); i++)
{
if (sequence_v[i] == decoy_v[i])
{
running += 1;
}
}
double identity = (double) running / sequence_v.size();
return identity;
}
//
// Helper functions for converting between heavy and light modification formats
//
namespace detail
{
/// Convert heavy Peptide modifications to light format (location + unimod_id only)
inline std::vector<OpenSwath::LightModification> toLightMods(
const std::vector<TargetedExperiment::Peptide::Modification>& heavy_mods)
{
std::vector<OpenSwath::LightModification> light_mods;
light_mods.reserve(heavy_mods.size());
for (const auto& mod : heavy_mods)
{
OpenSwath::LightModification lm;
lm.location = mod.location;
lm.unimod_id = mod.unimod_id;
light_mods.push_back(lm);
}
return light_mods;
}
/// Apply location updates from light modifications back to heavy modifications
inline void applyLocationUpdates(
std::vector<TargetedExperiment::Peptide::Modification>& heavy_mods,
const std::vector<OpenSwath::LightModification>& light_mods)
{
OPENMS_PRECONDITION(heavy_mods.size() == light_mods.size(), "Modification count mismatch");
for (Size i = 0; i < heavy_mods.size(); ++i)
{
heavy_mods[i].location = light_mods[i].location;
}
}
} // namespace detail
OpenMS::TargetedExperiment::Peptide MRMDecoy::shufflePeptide(
OpenMS::TargetedExperiment::Peptide peptide, const double identity_threshold, int seed,
const int max_attempts) const
{
#ifdef DEBUG_MRMDECOY
std::cout << " shuffle peptide " << peptide.sequence << '\n';
seed = 41;
#endif
// Delegate to light implementation
auto light_mods = detail::toLightMods(peptide.mods);
auto [new_sequence, new_mods] = shufflePeptideLight(
peptide.sequence, light_mods, identity_threshold, seed, max_attempts);
peptide.sequence = new_sequence;
detail::applyLocationUpdates(peptide.mods, new_mods);
#ifdef DEBUG_MRMDECOY
for (Size j = 0; j < peptide.mods.size(); j++)
{
std::cout << " position after shuffling " << peptide.mods[j].location << " mass difference " << peptide.mods[j].mono_mass_delta << '\n';
}
#endif
return peptide;
}
OpenMS::TargetedExperiment::Peptide MRMDecoy::reversePeptide(
const OpenMS::TargetedExperiment::Peptide& peptide, const bool keepN, const bool keepC,
const String& const_pattern)
{
// Delegate to light implementation
auto light_mods = detail::toLightMods(peptide.mods);
auto [new_sequence, new_mods] = reversePeptideLight(
peptide.sequence, light_mods, keepN, keepC, const_pattern);
OpenMS::TargetedExperiment::Peptide reversed = peptide;
reversed.sequence = new_sequence;
detail::applyLocationUpdates(reversed.mods, new_mods);
return reversed;
}
OpenMS::TargetedExperiment::Peptide MRMDecoy::pseudoreversePeptide_(
const OpenMS::TargetedExperiment::Peptide& peptide) const
{
return MRMDecoy::reversePeptide(peptide, false, true);
}
OpenMS::TargetedExperiment::Peptide MRMDecoy::reversePeptide_(
const OpenMS::TargetedExperiment::Peptide& peptide) const
{
return MRMDecoy::reversePeptide(peptide, false, false);
}
void MRMDecoy::switchKR(OpenMS::TargetedExperiment::Peptide& peptide) const
{
// Delegate to light implementation (operates directly on sequence string)
switchKRLight(peptide.sequence);
}
bool MRMDecoy::hasCNterminalMods_(const OpenMS::TargetedExperiment::Peptide& peptide, bool checkCterminalAA) const
{
// Delegate to light implementation
auto light_mods = detail::toLightMods(peptide.mods);
return hasCNterminalModsLight_(light_mods, peptide.sequence.size(), checkCterminalAA);
}
String MRMDecoy::getModifiedPeptideSequence_(const OpenMS::TargetedExperiment::Peptide& pep) const
{
String full_peptide_name;
for (int loc = -1; loc <= (int)pep.sequence.size(); loc++)
{
if (loc > -1 && loc < (int)pep.sequence.size())
{
full_peptide_name += pep.sequence[loc];
}
// C-terminal and N-terminal modifications may be at positions -1 or pep.sequence
for (Size modloc = 0; modloc < pep.mods.size(); modloc++)
{
if (pep.mods[modloc].location == loc)
{
full_peptide_name += "(UniMod:" + String(pep.mods[modloc].unimod_id) + ")";
}
}
}
return full_peptide_name;
}
//
// Light versions of decoy generation methods
// These operate directly on strings and LightModification vectors for memory efficiency
//
std::pair<std::string, std::vector<OpenSwath::LightModification>> MRMDecoy::reversePeptideLight(
const std::string& sequence,
const std::vector<OpenSwath::LightModification>& modifications,
const bool keepN,
const bool keepC,
const String& const_pattern)
{
std::string reversed = sequence;
std::vector<OpenSwath::LightModification> reversed_mods = modifications;
// Block tryptic residues and N-/C-terminus from reversing
MRMDecoy::IndexType idx = MRMDecoy::findFixedResidues(sequence, keepN, keepC, const_pattern);
std::vector<Size> peptide_index;
for (Size i = 0; i < sequence.size(); i++)
{
peptide_index.push_back(i);
}
// Erase the indices where K/P/R are (from the back to preserve indices)
for (IndexType::reverse_iterator it = idx.rbegin(); it != idx.rend(); ++it)
{
peptide_index.erase(peptide_index.begin() + *it);
}
// Reverse the peptide index
std::reverse(peptide_index.begin(), peptide_index.end());
// Re-insert the fixed positions at their original places
for (IndexType::iterator it = idx.begin(); it != idx.end(); ++it)
{
peptide_index.insert(peptide_index.begin() + *it, *it);
}
// Apply the reversed index to create the new sequence
for (Size i = 0; i < peptide_index.size(); i++)
{
reversed[i] = sequence[peptide_index[i]];
}
// Relocate modifications based on index mapping
for (auto& mod : reversed_mods)
{
// C and N terminal mods are implicitly not reversed (positions -1 and sequence.size())
if (mod.location >= 0 && mod.location < static_cast<int>(sequence.size()))
{
for (Size k = 0; k < peptide_index.size(); k++)
{
if (static_cast<int>(peptide_index[k]) == mod.location)
{
mod.location = static_cast<int>(k);
break;
}
}
}
}
return std::make_pair(reversed, reversed_mods);
}
std::pair<std::string, std::vector<OpenSwath::LightModification>> MRMDecoy::pseudoreversePeptideLight_(
const std::string& sequence,
const std::vector<OpenSwath::LightModification>& modifications) const
{
// Match heavy version: pseudoreversePeptide_ calls reversePeptide with no const_pattern (defaults to empty)
return MRMDecoy::reversePeptideLight(sequence, modifications, false, true);
}
void MRMDecoy::switchKRLight(std::string& sequence)
{
static std::string aa[] =
{
"A", "N", "D", "C", "E", "Q", "G", "H", "I", "L", "M", "F", "S", "T", "W",
"Y", "V"
};
constexpr int aa_size = 17;
if (sequence.empty()) return;
Size lastAA = sequence.size() - 1;
if (sequence[lastAA] == 'K')
{
sequence[lastAA] = 'R';
}
else if (sequence[lastAA] == 'R')
{
sequence[lastAA] = 'K';
}
else
{
// Use portable FNV-1a hash of sequence to deterministically select replacement AA.
// This ensures the same peptide always gets the same replacement regardless of
// processing order, making results reproducible across platforms (including ARM64).
uint64_t hash = 14695981039346656037ULL; // FNV offset basis
for (char c : sequence)
{
hash ^= static_cast<uint64_t>(c);
hash *= 1099511628211ULL; // FNV prime
}
int res_pos = static_cast<int>(hash % aa_size);
sequence[lastAA] = aa[res_pos][0];
}
}
bool MRMDecoy::hasCNterminalModsLight_(
const std::vector<OpenSwath::LightModification>& modifications,
size_t sequence_length,
bool checkCterminalAA)
{
for (const auto& mod : modifications)
{
// N-terminal modification at position -1
if (mod.location == -1)
{
return true;
}
// C-terminal modification at position sequence_length
if (mod.location == static_cast<int>(sequence_length))
{
return true;
}
// Check C-terminal AA position if requested
if (checkCterminalAA && mod.location == static_cast<int>(sequence_length) - 1)
{
return true;
}
}
return false;
}
std::pair<std::string, std::vector<OpenSwath::LightModification>> MRMDecoy::shufflePeptideLight(
const std::string& sequence,
const std::vector<OpenSwath::LightModification>& modifications,
const double identity_threshold,
int seed,
const int max_attempts) const
{
if (seed == -1)
{
seed = time(nullptr);
}
std::string shuffled = sequence;
std::vector<OpenSwath::LightModification> shuffled_mods = modifications;
// Working copy of input that may be mutated during iterations
std::string peptide_seq = sequence;
boost::mt19937 generator(seed);
boost::uniform_int<> uni_dist;
boost::variate_generator<boost::mt19937&, boost::uniform_int<> > pseudoRNG(generator, uni_dist);
static std::string aa[] =
{
"A", "N", "D", "C", "E", "Q", "G", "H", "I", "L", "M", "F", "S", "T", "W",
"Y", "V"
};
constexpr int aa_size = 17;
int attempts = 0;
// Loop: attempt to shuffle and check whether difference is large enough
// Note: compare against peptide_seq (not sequence) since peptide_seq may be mutated
while (AASequenceIdentity(peptide_seq, shuffled) > identity_threshold &&
attempts < max_attempts)
{
// Block tryptic residues and N-/C-terminus from shuffling
MRMDecoy::IndexType idx = findFixedAndTermResidues_(peptide_seq);
shuffled = peptide_seq;
shuffled_mods = modifications;
std::vector<Size> peptide_index;
for (Size i = 0; i < peptide_seq.size(); i++)
{
peptide_index.push_back(i);
}
// Erase the indices where K/P/R are (from the back to preserve indices)
for (IndexType::reverse_iterator it = idx.rbegin(); it != idx.rend(); ++it)
{
peptide_index.erase(peptide_index.begin() + *it);
}
// Shuffle the peptide index (Fisher-Yates shuffle)
if (peptide_index.begin() != peptide_index.end())
{
for (std::vector<Size>::iterator pI_it = peptide_index.begin() + 1; pI_it != peptide_index.end(); ++pI_it)
{
std::iter_swap(pI_it, peptide_index.begin() + pseudoRNG((pI_it - peptide_index.begin()) + 1));
}
}
// Re-insert the fixed positions at their original places
for (IndexType::iterator it = idx.begin(); it != idx.end(); ++it)
{
peptide_index.insert(peptide_index.begin() + *it, *it);
}
// Apply shuffled index to create new sequence
for (Size i = 0; i < peptide_index.size(); i++)
{
shuffled[i] = peptide_seq[peptide_index[i]];
}
// Relocate modifications based on index mapping
for (Size j = 0; j < shuffled_mods.size(); j++)
{
// C and N terminal mods are implicitly not shuffled (positions -1 and sequence.size())
if (shuffled_mods[j].location >= 0 && shuffled_mods[j].location < static_cast<int>(peptide_seq.size()))
{
for (Size k = 0; k < peptide_index.size(); k++)
{
if (static_cast<int>(peptide_index[k]) == shuffled_mods[j].location)
{
shuffled_mods[j].location = static_cast<int>(k);
break;
}
}
}
}
++attempts;
// Every 10 attempts (at 9, 19, 29...), mutate a random non-fixed AA to help convergence
// This matches the heavy version's timing (attempts % 10 == 9)
if (attempts % 10 == 9)
{
// Important: pick the new amino acid FIRST (matching heavy version's RNG call order)
int res_pos = (pseudoRNG() % aa_size);
// Find positions that are not modified and not N/C terminal
int pep_pos = -1;
size_t pos_trials = 0;
while (pep_pos < 0 && pos_trials < shuffled.size())
{
pep_pos = (pseudoRNG() % shuffled.size());
// Check if position is modified or is N/C terminus
bool is_modified = false;
for (const auto& mod : shuffled_mods)
{
if (mod.location == pep_pos)
{
is_modified = true;
break;
}
}
if (is_modified || (pep_pos == 0) || (pep_pos == static_cast<int>(shuffled.size() - 1)))
{
pep_pos = -1;
}
else
{
// Mutate the amino acid
shuffled[pep_pos] = aa[res_pos][0];
}
++pos_trials;
}
// Important: persist the mutation for next iteration (like heavy version)
peptide_seq = shuffled;
}
}
return std::make_pair(shuffled, shuffled_mods);
}
void MRMDecoy::generateDecoys(const OpenMS::TargetedExperiment& exp, OpenMS::TargetedExperiment& dec,
const String& method, const double aim_decoy_fraction, const bool do_switchKR,
const String& decoy_tag, const int max_attempts, const double identity_threshold,
const double precursor_mz_shift, const double product_mz_shift, const double product_mz_threshold,
const std::vector<String>& fragment_types, const std::vector<size_t>& fragment_charges,
const bool enable_specific_losses, const bool enable_unspecific_losses, const int round_decPow) const
{
MRMIonSeries mrmis;
MRMDecoy::PeptideVectorType peptides, decoy_peptides;
MRMDecoy::ProteinVectorType proteins, decoy_proteins;
MRMDecoy::TransitionVectorType decoy_transitions;
for (Size i = 0; i < exp.getProteins().size(); i++)
{
OpenMS::TargetedExperiment::Protein protein = exp.getProteins()[i];
protein.id = decoy_tag + protein.id;
proteins.push_back(protein);
}
std::vector<size_t> item_list, selection_list;
item_list.reserve(exp.getPeptides().size());
for (Size k = 0; k < exp.getPeptides().size(); k++) {item_list.push_back(k);}
if (aim_decoy_fraction > 1.0)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Decoy fraction needs to be less than one (values larger than one currently not supported).");
}
else if (aim_decoy_fraction < 1.0)
{
Math::RandomShuffler shuffler;
shuffler.portable_random_shuffle(item_list.begin(), item_list.end());
selection_list.reserve(aim_decoy_fraction * exp.getPeptides().size());
Size k = 0;
while (selection_list.size() < aim_decoy_fraction * exp.getPeptides().size())
{
selection_list.push_back( item_list[ k++ % item_list.size() ]);
}
}
else
{
selection_list = item_list;
}
// Create an unordered_map of all precursors (modified peptide sequence + charge) to their IDs, this will be used to search if a decoy sequence is also a target sequence
std::unordered_map<std::string, std::string> allPeptideSequences;
for (const auto& pep_idx: selection_list)
{
OpenMS::TargetedExperiment::Peptide peptide = exp.getPeptides()[pep_idx];
// create a modified peptide sequence string
allPeptideSequences[MRMDecoy::getModifiedPeptideSequence_(peptide) + String(peptide.getChargeState()) ] = peptide.id;
}
std::unordered_set<String> exclusion_peptides;
// Go through all peptides and apply the decoy method to the sequence
// (pseudo-reverse, reverse or shuffle). Then set the peptides and proteins of the decoy
// experiment.
Size progress = 0;
startProgress(0, selection_list.size(), "Generating decoy peptides");
for (const auto& pep_idx : selection_list)
{
setProgress(++progress);
OpenMS::TargetedExperiment::Peptide peptide = exp.getPeptides()[pep_idx];
peptide.id = decoy_tag + peptide.id;
if (!peptide.getPeptideGroupLabel().empty())
{
peptide.setPeptideGroupLabel(decoy_tag + peptide.getPeptideGroupLabel());
}
if (method == "pseudo-reverse")
{
// exclude peptide if it has C/N terminal modifications because we can't do a (partial) reverse
if (MRMDecoy::hasCNterminalMods_(peptide, do_switchKR))
{
OPENMS_LOG_DEBUG << "[peptide] Skipping " << peptide.id << " due to C/N-terminal modifications\n";
exclusion_peptides.insert(peptide.id);
}
else
{
peptide = MRMDecoy::pseudoreversePeptide_(peptide);
if (do_switchKR) { switchKR(peptide); }
}
}
else if (method == "reverse")
{
// exclude peptide if it has C/N terminal modifications because we can't do a (partial) reverse
if (MRMDecoy::hasCNterminalMods_(peptide, false))
{
OPENMS_LOG_DEBUG << "[peptide] Skipping " << peptide.id << " due to C/N-terminal modifications\n";
exclusion_peptides.insert(peptide.id);
}
else
{
peptide = MRMDecoy::reversePeptide_(peptide);
}
}
else if (method == "shuffle")
{
peptide = MRMDecoy::shufflePeptide(peptide, identity_threshold, -1, max_attempts);
if (do_switchKR && MRMDecoy::hasCNterminalMods_(peptide, do_switchKR))
{
OPENMS_LOG_DEBUG << "[peptide] Skipping " << peptide.id << " due to C/N-terminal modifications\n";
exclusion_peptides.insert(peptide.id);
}
else if (do_switchKR) { switchKR(peptide); }
}
// Check that the decoy precursor does not happen to be a target precursor AND is not already present.
// Use getModifiedPeptideSequence_ to match the key format used when populating allPeptideSequences above.
const std::string peptide_key = MRMDecoy::getModifiedPeptideSequence_(peptide) + String(peptide.getChargeState());
if (allPeptideSequences.find(peptide_key) != allPeptideSequences.end())
{
OPENMS_LOG_DEBUG << "[peptide] Skipping " << peptide.id << " since decoy peptide is also a target peptide or this decoy peptide is already present\n";
exclusion_peptides.insert(peptide.id);
}
else
{
// Since this decoy will be added, add it to the precursor map so that the same decoy is not added twice
OPENMS_LOG_DEBUG << "[peptide] adding " << peptide.id << " to master list of peptides \n";
allPeptideSequences[peptide_key] = peptide.id;
}
for (Size prot_idx = 0; prot_idx < peptide.protein_refs.size(); ++prot_idx)
{
peptide.protein_refs[prot_idx] = decoy_tag + peptide.protein_refs[prot_idx];
}
peptides.push_back(peptide);
}
endProgress();
dec.setPeptides(peptides); // temporary set peptides, overwrite later again!
// hash of the peptide reference containing all transitions
MRMDecoy::PeptideTransitionMapType peptide_trans_map;
for (Size i = 0; i < exp.getTransitions().size(); i++)
{
peptide_trans_map[exp.getTransitions()[i].getPeptideRef()].push_back(&exp.getTransitions()[i]);
}
progress = 0;
startProgress(0, peptide_trans_map.size(), "Generating decoy transitions");
for (MRMDecoy::PeptideTransitionMapType::iterator pep_it = peptide_trans_map.begin();
pep_it != peptide_trans_map.end(); ++pep_it)
{
setProgress(++progress);
String peptide_ref = pep_it->first;
String decoy_peptide_ref = decoy_tag + pep_it->first; // see above, the decoy peptide id is computed deterministically from the target id
if (!dec.hasPeptide(decoy_peptide_ref)) { continue; }
const TargetedExperiment::Peptide& target_peptide = exp.getPeptideByRef(peptide_ref);
const TargetedExperiment::Peptide& decoy_peptide = dec.getPeptideByRef(decoy_peptide_ref);
OpenMS::AASequence target_peptide_sequence = TargetedExperimentHelper::getAASequence(target_peptide);
OpenMS::AASequence decoy_peptide_sequence = TargetedExperimentHelper::getAASequence(decoy_peptide);
int decoy_charge = 1;
int target_charge = 1;
if (decoy_peptide.hasCharge()) { decoy_charge = decoy_peptide.getChargeState(); }
if (target_peptide.hasCharge()) { target_charge = target_peptide.getChargeState(); }
MRMIonSeries::IonSeries decoy_ionseries = mrmis.getIonSeries(decoy_peptide_sequence, decoy_charge,
fragment_types, fragment_charges, enable_specific_losses,
enable_unspecific_losses, round_decPow);
MRMIonSeries::IonSeries target_ionseries = mrmis.getIonSeries(target_peptide_sequence, target_charge,
fragment_types, fragment_charges, enable_specific_losses,
enable_unspecific_losses, round_decPow);
// Compute (new) decoy precursor m/z based on the K/R replacement and the AA changes in the shuffle algorithm
double decoy_precursor_mz = decoy_peptide_sequence.getMZ(decoy_charge);
decoy_precursor_mz += precursor_mz_shift; // fix for TOPPView: Duplicate precursor MZ is not displayed.
for (Size i = 0; i < pep_it->second.size(); i++)
{
const ReactionMonitoringTransition tr = *(pep_it->second[i]);
if (!tr.isDetectingTransition() || tr.getDecoyTransitionType() == ReactionMonitoringTransition::DECOY)
{
continue;
}
ReactionMonitoringTransition decoy_tr = tr; // copy the target transition
decoy_tr.setNativeID(decoy_tag + tr.getNativeID());
decoy_tr.setDecoyTransitionType(ReactionMonitoringTransition::DECOY);
decoy_tr.setPrecursorMZ(decoy_precursor_mz);
// determine the current annotation for the target ion and then select
// the appropriate decoy ion for this target transition
std::pair<String, double> targetion = mrmis.annotateIon(target_ionseries, tr.getProductMZ(), product_mz_threshold);
std::pair<String, double> decoyion = mrmis.getIon(decoy_ionseries, targetion.first);
if (method == "shift")
{
decoy_tr.setProductMZ(decoyion.second + product_mz_shift);
}
else
{
decoy_tr.setProductMZ(decoyion.second);
}
decoy_tr.setPeptideRef(decoy_tag + tr.getPeptideRef());
if (decoyion.second > 0)
{
decoy_transitions.push_back(decoy_tr);
}
else
{
// transition could not be annotated, remove whole peptide
exclusion_peptides.insert(decoy_tr.getPeptideRef());
OPENMS_LOG_DEBUG << "[peptide] Skipping " << decoy_tr.getPeptideRef() << " due to missing annotation\n";
}
} // end loop over transitions
} // end loop over peptides
endProgress();
decoy_transitions.erase(std::remove_if(
decoy_transitions.begin(), decoy_transitions.end(),
[&exclusion_peptides](const OpenMS::ReactionMonitoringTransition& tr)
{
return exclusion_peptides.find(tr.getPeptideRef()) != exclusion_peptides.end();
}), decoy_transitions.end());
dec.setTransitions(std::move(decoy_transitions));
std::unordered_set<String> protein_ids;
decoy_peptides.reserve(peptides.size());
for (const auto& peptide : peptides)
{
// Check if peptide has any transitions left
if (exclusion_peptides.find(peptide.id) == exclusion_peptides.end())
{
decoy_peptides.push_back(peptide);
for (Size j = 0; j < peptide.protein_refs.size(); ++j)
{
protein_ids.insert(peptide.protein_refs[j]);
}
}
else
{
OPENMS_LOG_DEBUG << "[peptide] Skipping " << peptide.id << " due to missing transitions\n";
}
}
decoy_proteins.reserve(proteins.size());
for (const auto& protein : proteins)
{
// Check if protein has any peptides left
if (protein_ids.find(protein.id) != protein_ids.end())
{
decoy_proteins.push_back(protein);
}
else
{
OPENMS_LOG_DEBUG << "[protein] Skipping " << protein.id << " due to missing peptides\n";
}
}
dec.setPeptides(std::move(decoy_peptides));
dec.setProteins(std::move(decoy_proteins));
}
void MRMDecoy::generateDecoysLight(const OpenSwath::LightTargetedExperiment& exp,
OpenSwath::LightTargetedExperiment& dec,
const String& method,
const double aim_decoy_fraction,
const bool do_switchKR,
const String& decoy_tag,
const int max_attempts,
const double identity_threshold,
const double precursor_mz_shift,
const double product_mz_shift,
const double product_mz_threshold,
const std::vector<String>& fragment_types,
const std::vector<size_t>& fragment_charges,
const bool enable_specific_losses,
const bool enable_unspecific_losses,
const int round_decPow) const
{
MRMIonSeries mrmis;
std::vector<OpenSwath::LightCompound> decoy_compounds;
std::vector<OpenSwath::LightProtein> decoy_proteins;
std::vector<OpenSwath::LightTransition> decoy_transitions;
// Create decoy proteins
for (const auto& protein : exp.proteins)
{
OpenSwath::LightProtein decoy_protein = protein;
decoy_protein.id = decoy_tag + protein.id;
decoy_proteins.push_back(decoy_protein);
}
std::vector<size_t> item_list, selection_list;
item_list.reserve(exp.compounds.size());
for (Size k = 0; k < exp.compounds.size(); k++) { item_list.push_back(k); }
if (aim_decoy_fraction > 1.0)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Decoy fraction needs to be less than one");
}
else if (aim_decoy_fraction < 1.0)
{
Math::RandomShuffler shuffler;
shuffler.portable_random_shuffle(item_list.begin(), item_list.end());
selection_list.reserve(aim_decoy_fraction * exp.compounds.size());
Size k = 0;
while (selection_list.size() < aim_decoy_fraction * exp.compounds.size())
{
selection_list.push_back(item_list[k++ % item_list.size()]);
}
}
else
{
selection_list = item_list;
}
// Create map of all peptide sequences to detect duplicates
std::unordered_map<std::string, std::string> allPeptideSequences;
for (const auto& idx : selection_list)
{
const auto& compound = exp.compounds[idx];
if (compound.isPeptide())
{
allPeptideSequences[compound.sequence + std::to_string(compound.charge)] = compound.id;
}
}
std::unordered_set<std::string> exclusion_peptides;
// Build compound map
std::map<std::string, const OpenSwath::LightCompound*> compound_map;
for (const auto& compound : exp.compounds)
{
compound_map[compound.id] = &compound;
}
// Go through all compounds and generate decoys
Size progress = 0;
startProgress(0, selection_list.size(), "Generating decoy peptides (Light)");
for (const auto& idx : selection_list)
{
setProgress(++progress);
OpenSwath::LightCompound decoy_compound = exp.compounds[idx];
const std::string original_id = decoy_compound.id;
decoy_compound.id = decoy_tag + decoy_compound.id;
// Update protein refs
for (auto& prot_ref : decoy_compound.protein_refs)
{
prot_ref = decoy_tag + prot_ref;
}
// Update peptide_group_label
if (!decoy_compound.peptide_group_label.empty())
{
decoy_compound.peptide_group_label = decoy_tag + decoy_compound.peptide_group_label;
}
if (!decoy_compound.isPeptide())
{
// For metabolites, just copy with decoy tag
decoy_compounds.push_back(decoy_compound);
continue;
}
// Parse sequence to AASequence
OpenMS::AASequence original_sequence;
try
{
original_sequence = AASequence::fromString(decoy_compound.sequence);
}
catch (Exception::InvalidValue&)
{
OPENMS_LOG_DEBUG << "[peptide] Skipping " << decoy_compound.id << " - cannot parse sequence\n";
exclusion_peptides.insert(decoy_compound.id);
continue;
}
// Use pure light-path decoy generation methods (no heavy Peptide allocation)
std::string decoy_sequence;
std::vector<OpenSwath::LightModification> decoy_mods;
// Get unmodified sequence for decoy generation
std::string unmodified_sequence = original_sequence.toUnmodifiedString();
// Apply decoy method using light methods
if (method == "pseudo-reverse")
{
if (hasCNterminalModsLight_(decoy_compound.modifications, unmodified_sequence.size(), do_switchKR))
{
OPENMS_LOG_DEBUG << "[peptide] Skipping " << decoy_compound.id << " due to C/N-terminal modifications\n";
exclusion_peptides.insert(decoy_compound.id);
continue;
}
auto result = pseudoreversePeptideLight_(unmodified_sequence, decoy_compound.modifications);
decoy_sequence = result.first;
decoy_mods = result.second;
if (do_switchKR) { switchKRLight(decoy_sequence); }
}
else if (method == "reverse")
{
if (hasCNterminalModsLight_(decoy_compound.modifications, unmodified_sequence.size(), false))
{
OPENMS_LOG_DEBUG << "[peptide] Skipping " << decoy_compound.id << " due to C/N-terminal modifications\n";
exclusion_peptides.insert(decoy_compound.id);
continue;
}
// Note: heavy reversePeptide_() calls reversePeptide(peptide, false, false) with no const_pattern
// so we must also use empty const_pattern here (the default)
auto result = reversePeptideLight(unmodified_sequence, decoy_compound.modifications, false, false);
decoy_sequence = result.first;
decoy_mods = result.second;
}
else if (method == "shuffle")
{
auto result = shufflePeptideLight(unmodified_sequence, decoy_compound.modifications, identity_threshold, -1, max_attempts);
decoy_sequence = result.first;
decoy_mods = result.second;
if (do_switchKR && hasCNterminalModsLight_(decoy_mods, decoy_sequence.size(), do_switchKR))
{
OPENMS_LOG_DEBUG << "[peptide] Skipping " << decoy_compound.id << " due to C/N-terminal modifications\n";
exclusion_peptides.insert(decoy_compound.id);
continue;
}
else if (do_switchKR)
{
switchKRLight(decoy_sequence);
}
}
else
{
decoy_sequence = unmodified_sequence;
decoy_mods = decoy_compound.modifications;
}
// Build modified sequence string with UniMod notation (must be done before duplicate check)
// This matches the Heavy path which uses getModifiedPeptideSequence_() for duplicate detection
String full_decoy_sequence;
for (int loc = -1; loc <= static_cast<int>(decoy_sequence.size()); loc++)
{
if (loc > -1 && loc < static_cast<int>(decoy_sequence.size()))
{
full_decoy_sequence += decoy_sequence[loc];
}
// Add modifications at this location
for (const auto& mod : decoy_mods)
{
if (mod.location == loc)
{
full_decoy_sequence += "(UniMod:" + String(mod.unimod_id) + ")";
}
}
}
decoy_compound.sequence = full_decoy_sequence;
// Check for duplicates using MODIFIED sequence + charge (matching Heavy path behavior)
// The Heavy path uses getModifiedPeptideSequence_(peptide) + charge for storing decoys,
// which includes UniMod annotations. This ensures peptides with the same unmodified
// sequence but different modifications are NOT considered duplicates.
std::string decoy_key = full_decoy_sequence + String(decoy_compound.charge);
if (allPeptideSequences.find(decoy_key) != allPeptideSequences.end())
{
OPENMS_LOG_DEBUG << "[peptide] Skipping " << decoy_compound.id << " since decoy peptide is also a target peptide or this decoy peptide is already present\n";
exclusion_peptides.insert(decoy_compound.id);
continue;
}
// Add to map with modified sequence (matching Heavy path line 612)
OPENMS_LOG_DEBUG << "[peptide] adding " << decoy_compound.id << " to master list of peptides\n";
allPeptideSequences[decoy_key] = decoy_compound.id;
// Update modifications
decoy_compound.modifications = decoy_mods;
decoy_compounds.push_back(decoy_compound);
}
endProgress();
// Build decoy compound map
std::map<std::string, const OpenSwath::LightCompound*> decoy_compound_map;
for (const auto& compound : decoy_compounds)
{
decoy_compound_map[compound.id] = &compound;
}
// Group transitions by peptide_ref
std::map<std::string, std::vector<const OpenSwath::LightTransition*>> peptide_trans_map;
for (const auto& tr : exp.transitions)
{
peptide_trans_map[tr.peptide_ref].push_back(&tr);
}
progress = 0;
startProgress(0, peptide_trans_map.size(), "Generating decoy transitions (Light)");
for (const auto& pep_it : peptide_trans_map)
{
setProgress(++progress);
const std::string& peptide_ref = pep_it.first;
std::string decoy_peptide_ref = decoy_tag + peptide_ref;
// Check if we have this decoy compound
auto decoy_comp_it = decoy_compound_map.find(decoy_peptide_ref);
if (decoy_comp_it == decoy_compound_map.end())
{
continue;
}
auto target_comp_it = compound_map.find(peptide_ref);
if (target_comp_it == compound_map.end())
{
continue;
}
const OpenSwath::LightCompound* target_compound = target_comp_it->second;
const OpenSwath::LightCompound* decoy_compound = decoy_comp_it->second;
if (!target_compound->isPeptide())
{
// For metabolites, just copy transitions with decoy tag
for (const auto* tr : pep_it.second)
{
OpenSwath::LightTransition decoy_tr = *tr;
decoy_tr.transition_name = decoy_tag + tr->transition_name;
decoy_tr.peptide_ref = decoy_peptide_ref;
decoy_tr.setDecoy(true);
decoy_transitions.push_back(decoy_tr);
}
continue;
}
OpenMS::AASequence target_sequence, decoy_sequence;
try
{
target_sequence = AASequence::fromString(target_compound->sequence);
decoy_sequence = AASequence::fromString(decoy_compound->sequence);
}
catch (Exception::InvalidValue&)
{
OPENMS_LOG_DEBUG << "[transition] Skipping transitions for " << peptide_ref << " - cannot parse sequence\n";
continue;
}
int target_charge = target_compound->charge > 0 ? target_compound->charge : 1;
int decoy_charge = decoy_compound->charge > 0 ? decoy_compound->charge : 1;
MRMIonSeries::IonSeries target_ionseries = mrmis.getIonSeries(
target_sequence, target_charge, fragment_types, fragment_charges,
enable_specific_losses, enable_unspecific_losses, round_decPow);
MRMIonSeries::IonSeries decoy_ionseries = mrmis.getIonSeries(
decoy_sequence, decoy_charge, fragment_types, fragment_charges,
enable_specific_losses, enable_unspecific_losses, round_decPow);
double decoy_precursor_mz = decoy_sequence.getMZ(decoy_charge) + precursor_mz_shift;
for (const auto* tr : pep_it.second)
{
if (!tr->isDetectingTransition() || tr->getDecoy())
{
continue;
}
OpenSwath::LightTransition decoy_tr = *tr;
decoy_tr.transition_name = decoy_tag + tr->transition_name;
decoy_tr.peptide_ref = decoy_peptide_ref;
decoy_tr.setDecoy(true);
decoy_tr.precursor_mz = decoy_precursor_mz;
// Annotate and get decoy fragment m/z
std::pair<String, double> targetion = mrmis.annotateIon(target_ionseries, tr->product_mz, product_mz_threshold);
std::pair<String, double> decoyion = mrmis.getIon(decoy_ionseries, targetion.first);
if (method == "shift")
{
decoy_tr.product_mz = decoyion.second + product_mz_shift;
}
else
{
decoy_tr.product_mz = decoyion.second;
}
if (decoyion.second > 0)
{
// Update fragment type/number/charge from annotation (format: b4^1, y10^2, etc.)
if (!targetion.first.empty())
{
decoy_tr.setFragmentType(targetion.first.substr(0, 1));
std::string num_str;
size_t i = 1;
for (; i < targetion.first.size() && std::isdigit(targetion.first[i]); ++i)
{
num_str += targetion.first[i];
}
if (!num_str.empty())
{
decoy_tr.fragment_nr = static_cast<int16_t>(std::stoi(num_str));
}
// Extract charge after '^'
if (i < targetion.first.size() && targetion.first[i] == '^')
{
std::string charge_str;
for (size_t j = i + 1; j < targetion.first.size() && std::isdigit(targetion.first[j]); ++j)
{
charge_str += targetion.first[j];
}
if (!charge_str.empty())
{
decoy_tr.fragment_charge = static_cast<int8_t>(std::stoi(charge_str));
}
}
}
decoy_transitions.push_back(decoy_tr);
}
else
{
// Transition could not be annotated, exclude whole peptide (matching Heavy path behavior)
OPENMS_LOG_DEBUG << "[peptide] Skipping " << decoy_peptide_ref << " due to missing annotation\n";
exclusion_peptides.insert(decoy_peptide_ref);
}
}
}
endProgress();
// Filter out excluded peptides from transitions
decoy_transitions.erase(
std::remove_if(decoy_transitions.begin(), decoy_transitions.end(),
[&exclusion_peptides](const OpenSwath::LightTransition& tr) {
return exclusion_peptides.find(tr.peptide_ref) != exclusion_peptides.end();
}),
decoy_transitions.end());
// Filter compounds
std::vector<OpenSwath::LightCompound> filtered_compounds;
std::unordered_set<std::string> protein_ids;
for (const auto& compound : decoy_compounds)
{
if (exclusion_peptides.find(compound.id) == exclusion_peptides.end())
{
filtered_compounds.push_back(compound);
for (const auto& prot_ref : compound.protein_refs)
{
protein_ids.insert(prot_ref);
}
}
}
// Filter proteins
std::vector<OpenSwath::LightProtein> filtered_proteins;
for (const auto& protein : decoy_proteins)
{
if (protein_ids.find(protein.id) != protein_ids.end())
{
filtered_proteins.push_back(protein);
}
}
dec.transitions = std::move(decoy_transitions);
dec.compounds = std::move(filtered_compounds);
dec.proteins = std::move(filtered_proteins);
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/MRMScoring.cpp | .cpp | 38,236 | 916 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/MRMScoring.h>
#include <OpenMS/OPENSWATHALGO/ALGO/StatsHelpers.h>
#include <OpenMS/DATASTRUCTURES/Matrix.h>
#include <OpenMS/DATASTRUCTURES/MatrixEigen.h>
#include <OpenMS/OPENSWATHALGO/Macros.h>
//#define MRMSCORING_TESTING
#include <algorithm>
#include <iostream>
#include <iterator>
#include <cmath> // for isnan
namespace OpenSwath
{
const MRMScoring::XCorrMatrixType& MRMScoring::getXCorrMatrix() const
{
return xcorr_matrix_;
}
void MRMScoring::initializeXCorrMatrix(const std::vector< std::vector< double > >& data)
{
xcorr_matrix_.resize(data.size(), data.size());
xcorr_matrix_max_peak_.resize(data.size(), data.size());
xcorr_matrix_max_peak_sec_.resize(data.size(), data.size());
std::vector< std::vector< double > > tmp_data = data;
for (std::size_t i = 0; i < tmp_data.size(); i++)
{
Scoring::standardize_data(tmp_data[i]);
}
for (std::size_t i = 0; i < data.size(); i++)
{
for (std::size_t j = i; j < data.size(); j++)
{
// compute normalized cross correlation
xcorr_matrix_(i, j) = Scoring::normalizedCrossCorrelationPost(tmp_data[i], tmp_data[j], static_cast<int>(data[i].size()), 1);
auto x = Scoring::xcorrArrayGetMaxPeak(xcorr_matrix_(i, j));
xcorr_matrix_max_peak_(i, j) = std::abs(x->first);
xcorr_matrix_max_peak_sec_(i, j) = x->second;
}
}
}
const MRMScoring::XCorrMatrixType& MRMScoring::getXCorrContrastMatrix() const
{
return xcorr_contrast_matrix_;
}
const MRMScoring::XCorrMatrixType& MRMScoring::getXCorrPrecursorContrastMatrix() const
{
return xcorr_precursor_contrast_matrix_;
}
const MRMScoring::XCorrMatrixType& MRMScoring::getXCorrPrecursorCombinedMatrix() const
{
return xcorr_precursor_combined_matrix_;
}
void fillIntensityFromFeature(OpenSwath::IMRMFeature* mrmfeature, const std::vector<std::string>& ids, std::vector<std::vector<double>>& intensity)
{
intensity.resize(ids.size());
for (std::size_t i = 0; i < intensity.size(); i++)
{
MRMScoring::FeatureType fi = mrmfeature->getFeature(ids[i]);
fi->getIntensity(intensity[i]);
}
}
void fillIntensityFromPrecursorFeature(OpenSwath::IMRMFeature* mrmfeature, const std::vector<std::string>& ids, std::vector<std::vector<double>>& intensity)
{
intensity.resize(ids.size());
for (std::size_t i = 0; i < intensity.size(); i++)
{
MRMScoring::FeatureType fi = mrmfeature->getPrecursorFeature(ids[i]);
fi->getIntensity(intensity[i]);
}
}
void MRMScoring::initializeXCorrMatrix(OpenSwath::IMRMFeature* mrmfeature, const std::vector<std::string>& native_ids)
{
std::vector<std::vector<double>> intensity;
fillIntensityFromFeature(mrmfeature, native_ids, intensity);
for (std::size_t i = 0; i < intensity.size(); i++)
{
Scoring::standardize_data(intensity[i]);
}
xcorr_matrix_.resize(native_ids.size(), native_ids.size());
xcorr_matrix_max_peak_.resize(native_ids.size(), native_ids.size());
xcorr_matrix_max_peak_sec_.resize(native_ids.size(), native_ids.size());
for (std::size_t i = 0; i < native_ids.size(); i++)
{
for (std::size_t j = i; j < native_ids.size(); j++)
{
// compute normalized cross correlation
xcorr_matrix_(i, j) = Scoring::normalizedCrossCorrelationPost(intensity[i], intensity[j], static_cast<int>(intensity[i].size()), 1);
auto x = Scoring::xcorrArrayGetMaxPeak(xcorr_matrix_(i, j));
xcorr_matrix_max_peak_(i, j) = std::abs(x->first);
xcorr_matrix_max_peak_sec_(i, j) = x->second;
}
}
}
void MRMScoring::initializeXCorrContrastMatrix(OpenSwath::IMRMFeature* mrmfeature, const std::vector<std::string>& native_ids_set1, const std::vector<std::string>& native_ids_set2)
{
std::vector<std::vector<double>> intensityi, intensityj;
fillIntensityFromFeature(mrmfeature, native_ids_set1, intensityi);
for (std::size_t i = 0; i < intensityi.size(); i++)
{
Scoring::standardize_data(intensityi[i]);
}
fillIntensityFromFeature(mrmfeature, native_ids_set2, intensityj);
for (std::size_t i = 0; i < intensityj.size(); i++)
{
Scoring::standardize_data(intensityj[i]);
}
xcorr_contrast_matrix_.resize(native_ids_set1.size(), native_ids_set2.size());
xcorr_contrast_matrix_max_peak_sec_.resize(native_ids_set1.size(), native_ids_set2.size());
for (std::size_t i = 0; i < native_ids_set1.size(); i++)
{
for (std::size_t j = 0; j < native_ids_set2.size(); j++)
{
// compute normalized cross correlation
xcorr_contrast_matrix_(i, j) = Scoring::normalizedCrossCorrelationPost(intensityi[i], intensityj[j], static_cast<int>(intensityi[i].size()), 1);
auto x = Scoring::xcorrArrayGetMaxPeak(xcorr_contrast_matrix_(i, j));
xcorr_contrast_matrix_max_peak_sec_(i, j) = x->second;
}
}
}
void MRMScoring::initializeXCorrPrecursorMatrix(OpenSwath::IMRMFeature* mrmfeature, const std::vector<std::string>& precursor_ids)
{
std::vector<std::vector<double>> intensity;
fillIntensityFromPrecursorFeature(mrmfeature, precursor_ids, intensity);
for (std::size_t i = 0; i < intensity.size(); i++)
{
Scoring::standardize_data(intensity[i]);
}
xcorr_precursor_matrix_.resize(precursor_ids.size(), precursor_ids.size());
for (std::size_t i = 0; i < precursor_ids.size(); i++)
{
for (std::size_t j = i; j < precursor_ids.size(); j++)
{
// compute normalized cross correlation
xcorr_precursor_matrix_(i, j) = Scoring::normalizedCrossCorrelationPost(intensity[i], intensity[j], static_cast<int>(intensity[i].size()), 1);
}
}
}
void MRMScoring::initializeXCorrPrecursorContrastMatrix(OpenSwath::IMRMFeature* mrmfeature, const std::vector<std::string>& precursor_ids, const std::vector<std::string>& native_ids)
{
std::vector<std::vector<double>> intensityi, intensityj;
fillIntensityFromPrecursorFeature(mrmfeature, precursor_ids, intensityi);
for (std::size_t i = 0; i < intensityi.size(); i++)
{
Scoring::standardize_data(intensityi[i]);
}
fillIntensityFromFeature(mrmfeature, native_ids, intensityj);
for (std::size_t i = 0; i < intensityj.size(); i++)
{
Scoring::standardize_data(intensityj[i]);
}
xcorr_precursor_contrast_matrix_.resize(precursor_ids.size(), native_ids.size());
for (std::size_t i = 0; i < precursor_ids.size(); i++)
{
for (std::size_t j = 0; j < native_ids.size(); j++)
{
// compute normalized cross correlation
xcorr_precursor_contrast_matrix_(i, j) = Scoring::normalizedCrossCorrelationPost(intensityi[i], intensityj[j], static_cast<int>(intensityi[i].size()), 1);
}
}
}
void MRMScoring::initializeXCorrPrecursorContrastMatrix(const std::vector< std::vector< double > >& data_precursor, const std::vector< std::vector< double > >& data_fragments)
{
xcorr_precursor_contrast_matrix_.resize(data_precursor.size(), data_fragments.size());
std::vector< std::vector< double > > tmp_data_precursor = data_precursor;
std::vector< std::vector< double > > tmp_data_fragments = data_fragments;
for (std::size_t i = 0; i < tmp_data_precursor.size(); i++)
{
Scoring::standardize_data(tmp_data_precursor[i]);
}
for (std::size_t i = 0; i < tmp_data_fragments.size(); i++)
{
Scoring::standardize_data(tmp_data_fragments[i]);
}
for (std::size_t i = 0; i < data_precursor.size(); i++)
{
for (std::size_t j = 0; j < data_fragments.size(); j++)
{
// compute normalized cross correlation
xcorr_precursor_contrast_matrix_(i, j) = Scoring::normalizedCrossCorrelationPost(tmp_data_precursor[i], tmp_data_fragments[j], static_cast<int>(tmp_data_precursor[i].size()), 1);
#ifdef MRMSCORING_TESTING
std::cout << " fill xcorr_precursor_contrast_matrix_ "<< tmp_data_precursor[i].size() << " / " << tmp_data_fragments[j].size() << " : " << xcorr_precursor_contrast_matrix_[i][j].data.size() << '\n';
#endif
}
}
}
void MRMScoring::initializeXCorrPrecursorCombinedMatrix(OpenSwath::IMRMFeature* mrmfeature, const std::vector<std::string>& precursor_ids, const std::vector<std::string>& native_ids)
{
std::vector<std::vector<double>> intensityi, intensityj;
fillIntensityFromPrecursorFeature(mrmfeature, precursor_ids, intensityi);
fillIntensityFromFeature(mrmfeature, native_ids, intensityj);
std::vector<std::vector<double>> combined_intensity;
for (std::size_t i = 0; i < intensityi.size(); i++)
{
combined_intensity.push_back(intensityi[i]);
}
for (std::size_t j = 0; j < intensityj.size(); j++)
{
combined_intensity.push_back(intensityj[j]);
}
for (std::size_t i = 0; i < combined_intensity.size(); i++)
{
Scoring::standardize_data(combined_intensity[i]);
}
xcorr_precursor_combined_matrix_.resize(combined_intensity.size(), combined_intensity.size());
for (std::size_t i = 0; i < combined_intensity.size(); i++)
{
for (std::size_t j = i; j < combined_intensity.size(); j++)
{
// compute normalized cross correlation
xcorr_precursor_combined_matrix_(i, j) = Scoring::normalizedCrossCorrelationPost(combined_intensity[i], combined_intensity[j], static_cast<int>(combined_intensity[i].size()), 1);
}
}
}
// see /IMSB/users/reiterl/bin/code/biognosys/trunk/libs/mrm_libs/MRM_pgroup.pm
// _calc_xcorr_coelution_score
//
// for each i,j get xcorr_matrix array => find max of the crosscorrelation
// store the delta to the retention time
// return $deltascore_mean + $deltascore_stdev
double MRMScoring::calcXcorrCoelutionScore()
{
OPENSWATH_PRECONDITION(xcorr_matrix_max_peak_.rows() > 1, "Expect cross-correlation matrix of at least 2x2");
OpenSwath::mean_and_stddev msc;
for (long int i = 0; i < xcorr_matrix_max_peak_.rows(); i++)
{
for (long int j = i; j < xcorr_matrix_max_peak_.rows(); j++)
{
// first is the X value (RT), should be an int
//deltas.push_back(std::abs(Scoring::xcorrArrayGetMaxPeak(xcorr_matrix_.getValue(i, j))->first));
msc(xcorr_matrix_max_peak_(i,j));
#ifdef MRMSCORING_TESTING
std::cout << "&&_xcoel append " << std::abs(Scoring::xcorrArrayGetMaxPeak(xcorr_matrix_[i][j])->first) << '\n';
#endif
}
}
double deltas_mean = msc.mean();
double deltas_stdv = msc.sample_stddev();
double xcorr_coelution_score = deltas_mean + deltas_stdv;
return xcorr_coelution_score;
}
double MRMScoring::calcXcorrCoelutionWeightedScore(
const std::vector<double>& normalized_library_intensity)
{
OPENSWATH_PRECONDITION(xcorr_matrix_max_peak_.rows() > 1, "Expect cross-correlation matrix of at least 2x2");
#ifdef MRMSCORING_TESTING
double weights = 0;
#endif
double deltas{0};
for (long int i = 0; i < xcorr_matrix_max_peak_.rows(); i++)
{
deltas += (xcorr_matrix_max_peak_(i, i)//std::abs(Scoring::xcorrArrayGetMaxPeak(xcorr_matrix_.getValue(i, i))->first)
* normalized_library_intensity[i]
* normalized_library_intensity[i]);
#ifdef MRMSCORING_TESTING
std::cout << "_xcoel_weighted " << i << " " << i << " " << Scoring::xcorrArrayGetMaxPeak(xcorr_matrix_[i][i])->first << " weight " <<
normalized_library_intensity[i] * normalized_library_intensity[i] << '\n';
weights += normalized_library_intensity[i] * normalized_library_intensity[i];
#endif
for (long int j = i + 1; j < xcorr_matrix_max_peak_.rows(); j++)
{
// first is the X value (RT), should be an int
deltas += (xcorr_matrix_max_peak_(i, j)//std::abs(Scoring::xcorrArrayGetMaxPeak(xcorr_matrix_.getValue(i, j))->first)
* normalized_library_intensity[i]
* normalized_library_intensity[j] * 2);
#ifdef MRMSCORING_TESTING
std::cout << "_xcoel_weighted " << i << " " << j << " " << Scoring::xcorrArrayGetMaxPeak(xcorr_matrix_[i][j])->first << " weight " <<
normalized_library_intensity[i] * normalized_library_intensity[j] * 2 << '\n';
weights += normalized_library_intensity[i] * normalized_library_intensity[j];
#endif
}
}
#ifdef MRMSCORING_TESTING
std::cout << " all weights sum " << weights << '\n';
#endif
return deltas;
}
std::vector<double> MRMScoring::calcSeparateXcorrContrastCoelutionScore()
{
OPENSWATH_PRECONDITION(xcorr_contrast_matrix_.rows() > 0 && xcorr_contrast_matrix_.cols() > 1, "Expect cross-correlation matrix of at least 1x2");
std::vector<double > deltas;
for (long int i = 0; i < xcorr_contrast_matrix_.rows(); i++)
{
double deltas_id = 0;
for (long int j = 0; j < xcorr_contrast_matrix_.cols(); j++)
{
// first is the X value (RT), should be an int
auto x = Scoring::xcorrArrayGetMaxPeak(xcorr_contrast_matrix_(i, j));
deltas_id += std::abs(x->first);
#ifdef MRMSCORING_TESTING
std::cout << "&&_xcoel append " << xcorr_contrast_matrix_max_peak_getValue(i, j) << '\n';
#endif
}
deltas.push_back(deltas_id / xcorr_contrast_matrix_.cols());
}
return deltas;
}
double MRMScoring::calcXcorrPrecursorCoelutionScore()
{
OPENSWATH_PRECONDITION(xcorr_precursor_matrix_.rows() > 1, "Expect cross-correlation matrix of at least 2x2");
OpenSwath::mean_and_stddev msc;
for (long int i = 0; i < xcorr_precursor_matrix_.rows(); i++)
{
for (long int j = i; j < xcorr_precursor_matrix_.rows(); j++)
{
// first is the X value (RT), should be an int
auto x = Scoring::xcorrArrayGetMaxPeak(xcorr_precursor_matrix_(i, j));
msc(std::abs(x->first));
#ifdef MRMSCORING_TESTING
std::cout << "&&_xcoel append " << std::abs(Scoring::xcorrArrayGetMaxPeak(xcorr_precursor_matrix_[i][j])->first) << '\n';
#endif
}
}
double deltas_mean = msc.mean();
double deltas_stdv = msc.sample_stddev();
double xcorr_coelution_score = deltas_mean + deltas_stdv;
return xcorr_coelution_score;
}
double MRMScoring::calcXcorrPrecursorContrastCoelutionScore()
{
OPENSWATH_PRECONDITION(xcorr_precursor_contrast_matrix_.rows() > 0 && xcorr_precursor_contrast_matrix_.cols() > 1, "Expect cross-correlation matrix of at least 1x2");
OpenSwath::mean_and_stddev msc;
size_t n_entries = xcorr_precursor_contrast_matrix_.size();
auto em = OpenMS::eigenView(xcorr_precursor_contrast_matrix_);
for (size_t i = 0; i < n_entries; i++)
{
// first is the X value (RT), should be an int
auto e = *(em.data() + i);
msc(std::abs(Scoring::xcorrArrayGetMaxPeak(e)->first));
#ifdef MRMSCORING_TESTING
std::cout << "&&_xcoel append " << std::abs(Scoring::xcorrArrayGetMaxPeak(xcorr_precursor_contrast_matrix_[i][j])->first) << '\n';
#endif
}
double deltas_mean = msc.mean();
double deltas_stdv = msc.sample_stddev();
double xcorr_coelution_score = deltas_mean + deltas_stdv;
return xcorr_coelution_score;
}
double MRMScoring::calcXcorrPrecursorContrastSumFragCoelutionScore()
{
OPENSWATH_PRECONDITION(xcorr_precursor_contrast_matrix_.rows() > 0 && xcorr_precursor_contrast_matrix_.cols() > 0, "Expect cross-correlation matrix of at least 1x1");
OpenSwath::mean_and_stddev msc;
size_t n_entries = xcorr_precursor_contrast_matrix_.size();
auto em = OpenMS::eigenView(xcorr_precursor_contrast_matrix_);
for (size_t i = 0; i < n_entries; i++)
{
// first is the X value (RT), should be an int
auto e = *(em.data() + i);
msc(std::abs(Scoring::xcorrArrayGetMaxPeak(e)->first));
#ifdef MRMSCORING_TESTING
std::cout << "&&_xcoel append " << std::abs(Scoring::xcorrArrayGetMaxPeak(xcorr_precursor_contrast_matrix_[i][j])->first) << '\n';
#endif
}
double deltas_mean = msc.mean();
double deltas_stdv = msc.sample_stddev();
double xcorr_coelution_score = deltas_mean + deltas_stdv;
return xcorr_coelution_score;
}
double MRMScoring::calcXcorrPrecursorCombinedCoelutionScore()
{
OPENSWATH_PRECONDITION(xcorr_precursor_combined_matrix_.rows() > 1, "Expect cross-correlation matrix of at least 2x2");
OpenSwath::mean_and_stddev msc;
for (long int i = 0; i < xcorr_precursor_combined_matrix_.rows(); i++)
{
for (long int j = i; j < xcorr_precursor_combined_matrix_.rows(); j++)
{
// first is the X value (RT), should be an int
auto x = Scoring::xcorrArrayGetMaxPeak(xcorr_precursor_combined_matrix_(i, j));
msc(std::abs(x->first));
#ifdef MRMSCORING_TESTING
std::cout << "&&_xcoel append " << std::abs(Scoring::xcorrArrayGetMaxPeak(xcorr_precursor_combined_matrix_[i][j])->first) << '\n';
#endif
}
}
double deltas_mean = msc.mean();
double deltas_stdv = msc.sample_stddev();
double xcorr_coelution_score = deltas_mean + deltas_stdv;
return xcorr_coelution_score;
}
// see /IMSB/users/reiterl/bin/code/biognosys/trunk/libs/mrm_libs/MRM_pgroup.pm
// _calc_xcorr_shape_score
//
// for each i,j get xcorr_matrix array => find max of the crosscorrelation
// calculate whether the maximal crosscorrelation coincides with the maximal intensity
///
double MRMScoring::calcXcorrShapeScore()
{
OPENSWATH_PRECONDITION(xcorr_matrix_max_peak_sec_.rows() > 1, "Expect cross-correlation matrix of at least 2x2");
size_t element_number{0};
double intensities{0};
for (long int i = 0; i < xcorr_matrix_max_peak_sec_.rows(); i++)
{
for (long int j = i; j < xcorr_matrix_max_peak_sec_.rows(); j++)
{
// second is the Y value (intensity)
intensities += xcorr_matrix_max_peak_sec_(i, j);
element_number++;
}
}
return intensities / element_number;
}
double MRMScoring::calcXcorrShapeWeightedScore(
const std::vector<double>& normalized_library_intensity)
{
OPENSWATH_PRECONDITION(xcorr_matrix_max_peak_sec_.rows() > 1, "Expect cross-correlation matrix of at least 2x2");
// TODO (hroest) : check implementation
// see _calc_weighted_xcorr_shape_score in MRM_pgroup.pm
// -- they only multiply up the intensity once
double intensities{0};
for (long int i = 0; i < xcorr_matrix_max_peak_sec_.rows(); i++)
{
intensities += (xcorr_matrix_max_peak_sec_(i, i)
* normalized_library_intensity[i]
* normalized_library_intensity[i]);
#ifdef MRMSCORING_TESTING
std::cout << "_xcorr_weighted " << i << " " << i << " " << Scoring::xcorrArrayGetMaxPeak(xcorr_matrix_[i][i])->second << " weight " <<
normalized_library_intensity[i] * normalized_library_intensity[i] << '\n';
#endif
for (long int j = i + 1; j < xcorr_matrix_max_peak_sec_.rows(); j++)
{
intensities += (xcorr_matrix_max_peak_sec_(i, j)
* normalized_library_intensity[i]
* normalized_library_intensity[j] * 2);
#ifdef MRMSCORING_TESTING
std::cout << "_xcorr_weighted " << i << " " << j << " " << Scoring::xcorrArrayGetMaxPeak(xcorr_matrix_[i][j])->second << " weight " <<
normalized_library_intensity[i] * normalized_library_intensity[j] * 2 << '\n';
#endif
}
}
return intensities;
}
double MRMScoring::calcXcorrContrastShapeScore()
{
OPENSWATH_PRECONDITION(xcorr_contrast_matrix_max_peak_sec_.rows() > 0 && xcorr_contrast_matrix_max_peak_sec_.cols() > 1, "Expect cross-correlation matrix of at least 1x2");
auto em = OpenMS::eigenView(xcorr_contrast_matrix_max_peak_sec_);
return em.sum();
}
std::vector<double> MRMScoring::calcSeparateXcorrContrastShapeScore()
{
OPENSWATH_PRECONDITION(xcorr_contrast_matrix_max_peak_sec_.rows() > 0 && xcorr_contrast_matrix_max_peak_sec_.cols() > 1, "Expect cross-correlation matrix of at least 1x2");
std::vector<double> intensities;
for (long int i = 0; i < xcorr_contrast_matrix_max_peak_sec_.rows(); i++)
{
double intensities_id = 0;
for (long int j = 0; j < xcorr_contrast_matrix_max_peak_sec_.cols(); j++)
{
// second is the Y value (intensity)
intensities_id += xcorr_contrast_matrix_max_peak_sec_(i,j);
}
intensities.push_back(intensities_id / xcorr_contrast_matrix_max_peak_sec_.cols());
}
return intensities;
}
double MRMScoring::calcXcorrPrecursorShapeScore()
{
OPENSWATH_PRECONDITION(xcorr_precursor_matrix_.rows() > 1, "Expect cross-correlation matrix of at least 2x2");
double intensities{0};
for(long int i = 0; i < xcorr_precursor_matrix_.rows(); i++)
{
for(long int j = i; j < xcorr_precursor_matrix_.cols(); j++)
{
auto x = Scoring::xcorrArrayGetMaxPeak(xcorr_precursor_matrix_(i, j));
intensities += x->second;
}
}
//xcorr_precursor_matrix_ is a triangle matrix
size_t element_number = xcorr_precursor_matrix_.rows()*xcorr_precursor_matrix_.rows()/2 + (xcorr_precursor_matrix_.rows()+1)/2;
return intensities / element_number;
}
double MRMScoring::calcXcorrPrecursorContrastSumFragShapeScore()
{
OPENSWATH_PRECONDITION(xcorr_precursor_contrast_matrix_.rows() > 0 && xcorr_precursor_contrast_matrix_.cols() > 0, "Expect cross-correlation matrix of at least 1x1");
double intensities{0};
auto em = OpenMS::eigenView(xcorr_precursor_contrast_matrix_);
size_t n_elements = em.size();
for (size_t i = 0; i != n_elements; ++i)
{
const auto& e = *(em.data() + i);
intensities += Scoring::xcorrArrayGetMaxPeak(e)->second;;
}
return intensities / (double)n_elements;
}
double MRMScoring::calcXcorrPrecursorContrastShapeScore()
{
OPENSWATH_PRECONDITION(xcorr_precursor_contrast_matrix_.rows() > 0 && xcorr_precursor_contrast_matrix_.cols() > 1, "Expect cross-correlation matrix of at least 1x2");
double intensities{0};
auto em = OpenMS::eigenView(xcorr_precursor_contrast_matrix_);
size_t n_elements = em.size();
for (size_t i = 0; i != n_elements; ++i)
{
const auto& e = *(em.data() + i);
intensities += Scoring::xcorrArrayGetMaxPeak(e)->second;
}
return intensities / (double)n_elements;
}
double MRMScoring::calcXcorrPrecursorCombinedShapeScore()
{
OPENSWATH_PRECONDITION(xcorr_precursor_combined_matrix_.rows() > 1, "Expect cross-correlation matrix of at least 2x2");
double intensities{0};
for(long int i = 0; i < xcorr_precursor_combined_matrix_.rows(); i++)
{
for(long int j = i; j < xcorr_precursor_combined_matrix_.cols(); j++)
{
auto x = Scoring::xcorrArrayGetMaxPeak(xcorr_precursor_combined_matrix_(i, j));
intensities += x->second;
}
}
//xcorr_precursor-combined_matrix_ is a triangle matrix
size_t element_number = xcorr_precursor_combined_matrix_.rows()*xcorr_precursor_combined_matrix_.rows()/2 + (xcorr_precursor_combined_matrix_.rows()+1)/2;
return intensities / element_number;
}
void MRMScoring::calcLibraryScore(OpenSwath::IMRMFeature* mrmfeature, const std::vector<TransitionType>& transitions,
double& correlation, double& norm_manhattan, double& manhattan, double& dotprod, double& spectral_angle, double& rmsd)
{
std::vector<double> library_intensity;
std::vector<double> experimental_intensity;
std::string native_id;
for (std::size_t k = 0; k < transitions.size(); k++)
{
native_id = transitions[k].getNativeID();
double intensity = transitions[k].getLibraryIntensity();
// the library intensity should never be below zero
if (intensity < 0.0)
{
intensity = 0.0;
}
experimental_intensity.push_back(static_cast<double>(mrmfeature->getFeature(native_id)->getIntensity()));
library_intensity.push_back(intensity);
}
OPENSWATH_PRECONDITION(library_intensity.size() == experimental_intensity.size(), "Both vectors need to have the same size");
#ifdef MRMSCORING_TESTING
for (std::size_t k = 0; k < transitions.size(); k++)
{
native_id = transitions[k].getNativeID();
std::cout << native_id << " Lib vs exp " << library_intensity[k] << " " << experimental_intensity[k] << '\n';
}
#endif
manhattan = OpenSwath::manhattanScoring(experimental_intensity, library_intensity);
dotprod = OpenSwath::dotprodScoring(experimental_intensity, library_intensity);
spectral_angle = Scoring::SpectralAngle(&experimental_intensity[0], &library_intensity[0], static_cast<unsigned int>(transitions.size()));
if (std::isnan(spectral_angle))
{
spectral_angle = 0.0;
}
Scoring::normalize_sum(&experimental_intensity[0], static_cast<unsigned int>(transitions.size()));
Scoring::normalize_sum(&library_intensity[0], static_cast<unsigned int>(transitions.size()));
norm_manhattan = Scoring::NormalizedManhattanDist(&experimental_intensity[0], &library_intensity[0], static_cast<unsigned int>(transitions.size()));
rmsd = Scoring::RootMeanSquareDeviation(&experimental_intensity[0], &library_intensity[0], static_cast<unsigned int>(transitions.size()));
correlation = OpenSwath::cor_pearson(experimental_intensity.begin(), experimental_intensity.end(), library_intensity.begin());
if (std::isnan(correlation))
{
correlation = -1.0;
}
}
double MRMScoring::calcRTScore(const PeptideType& peptide, double normalized_experimental_rt)
{
double expected_rt;
expected_rt = peptide.rt;
if (expected_rt <= -1000)
{
return 0;
}
// use the transformed experimental retention time and then take the difference.
double rt_score = std::fabs(normalized_experimental_rt - expected_rt);
return rt_score;
}
double MRMScoring::calcSNScore(OpenSwath::IMRMFeature* mrmfeature, std::vector<OpenSwath::ISignalToNoisePtr>& signal_noise_estimators)
{
OPENSWATH_PRECONDITION(signal_noise_estimators.size() > 0, "Input S/N estimators needs to be larger than 0");
double sn_score = 0;
if (signal_noise_estimators.empty())
{
return 0;
}
for (std::size_t k = 0; k < signal_noise_estimators.size(); k++)
{
sn_score += signal_noise_estimators[k]->getValueAtRT(mrmfeature->getRT());
}
return sn_score / signal_noise_estimators.size();
}
std::vector<double> MRMScoring::calcSeparateSNScore(OpenSwath::IMRMFeature* mrmfeature, std::vector<OpenSwath::ISignalToNoisePtr>& signal_noise_estimators)
{
OPENSWATH_PRECONDITION(signal_noise_estimators.size() > 0, "Input S/N estimators needs to be larger than 0");
std::vector<double> sn_scores;
if (signal_noise_estimators.empty())
{
return {};
}
for (std::size_t k = 0; k < signal_noise_estimators.size(); k++)
{
if (signal_noise_estimators[k]->getValueAtRT(mrmfeature->getRT()) < 1)
// everything below S/N 1 can be set to zero (and the log safely applied)
{
sn_scores.push_back(0);
}
else
{
sn_scores.push_back(std::log(signal_noise_estimators[k]->getValueAtRT(mrmfeature->getRT())));
}
}
return sn_scores;
}
const OpenMS::Matrix<double> & MRMScoring::getMIMatrix() const
{
return mi_matrix_;
}
const OpenMS::Matrix<double> & MRMScoring::getMIContrastMatrix() const
{
return mi_contrast_matrix_;
}
const OpenMS::Matrix<double> & MRMScoring::getMIPrecursorContrastMatrix() const
{
return mi_precursor_contrast_matrix_;
}
const OpenMS::Matrix<double> & MRMScoring::getMIPrecursorCombinedMatrix() const
{
return mi_precursor_combined_matrix_;
}
void MRMScoring::initializeMIMatrix(OpenSwath::IMRMFeature* mrmfeature, const std::vector<std::string>& native_ids)
{
std::vector<std::vector<double>> intensity;
std::vector<std::vector<unsigned int>> rank_vec{};
fillIntensityFromFeature(mrmfeature, native_ids, intensity);
std::vector<unsigned int> max_rank_vec = Scoring::computeRankVector(intensity, rank_vec);
mi_matrix_.resize(native_ids.size(), native_ids.size());
mi_matrix_.fill(0.0);
for (std::size_t i = 0; i < native_ids.size(); i++)
{
for (std::size_t j = i; j < native_ids.size(); j++)
{
// compute ranked mutual information
mi_matrix_(i, j) = Scoring::rankedMutualInformation(rank_vec[i], rank_vec[j], max_rank_vec[i], max_rank_vec[j]);
}
}
}
void MRMScoring::initializeMIContrastMatrix(OpenSwath::IMRMFeature* mrmfeature, const std::vector<std::string>& native_ids_set1, const std::vector<std::string>& native_ids_set2)
{
std::vector<std::vector<double>> intensityi, intensityj;
std::vector<std::vector<unsigned int>> rank_vec1{}, rank_vec2{};
fillIntensityFromFeature(mrmfeature, native_ids_set1, intensityi);
fillIntensityFromFeature(mrmfeature, native_ids_set2, intensityj);
std::vector<unsigned int> max_rank_vec1 = Scoring::computeRankVector(intensityi, rank_vec1);
std::vector<unsigned int> max_rank_vec2 = Scoring::computeRankVector(intensityj, rank_vec2);
mi_contrast_matrix_.resize(native_ids_set1.size(), native_ids_set2.size());
for (std::size_t i = 0; i < native_ids_set1.size(); i++)
{
for (std::size_t j = 0; j < native_ids_set2.size(); j++)
{
// compute ranked mutual information
mi_contrast_matrix_(i, j) = Scoring::rankedMutualInformation(rank_vec1[i], rank_vec2[j], max_rank_vec1[i], max_rank_vec2[j]);
}
}
}
void MRMScoring::initializeMIPrecursorMatrix(OpenSwath::IMRMFeature* mrmfeature, const std::vector<std::string>& precursor_ids)
{
std::vector<std::vector<double>> intensity;
std::vector<std::vector<unsigned int>> rank_vec;
fillIntensityFromPrecursorFeature(mrmfeature, precursor_ids, intensity);
std::vector<unsigned int> max_rank_vec = Scoring::computeRankVector(intensity, rank_vec);
mi_precursor_matrix_.resize(precursor_ids.size(), precursor_ids.size());
mi_precursor_matrix_.fill(0.0);
for (std::size_t i = 0; i < precursor_ids.size(); i++)
{
for (std::size_t j = i; j < precursor_ids.size(); j++)
{
// compute ranked mutual information
mi_precursor_matrix_(i, j) = Scoring::rankedMutualInformation(rank_vec[i], rank_vec[j], max_rank_vec[i], max_rank_vec[j]);
}
}
}
void MRMScoring::initializeMIPrecursorContrastMatrix(OpenSwath::IMRMFeature* mrmfeature, const std::vector<std::string>& precursor_ids, const std::vector<std::string>& native_ids)
{
std::vector<std::vector<double>> intensityi, intensityj;
std::vector<std::vector<unsigned int>> rank_vec1{}, rank_vec2{};
fillIntensityFromPrecursorFeature(mrmfeature, precursor_ids, intensityi);
fillIntensityFromFeature(mrmfeature, native_ids, intensityj);
std::vector<unsigned int> max_rank_vec1 = Scoring::computeRankVector(intensityi, rank_vec1);
std::vector<unsigned int> max_rank_vec2 = Scoring::computeRankVector(intensityj, rank_vec2);
mi_precursor_contrast_matrix_.resize(precursor_ids.size(), native_ids.size());
for (std::size_t i = 0; i < precursor_ids.size(); i++)
{
for (std::size_t j = 0; j < native_ids.size(); j++)
{
// compute ranked mutual information
mi_precursor_contrast_matrix_(i, j) = Scoring::rankedMutualInformation(rank_vec1[i], rank_vec2[j], max_rank_vec1[i], max_rank_vec2[j]);
}
}
}
void MRMScoring::initializeMIPrecursorCombinedMatrix(OpenSwath::IMRMFeature* mrmfeature, const std::vector<std::string>& precursor_ids, const std::vector<std::string>& native_ids)
{
std::vector<std::vector<unsigned int>> rank_vec{};
std::vector<std::vector<double>> intensity;
fillIntensityFromPrecursorFeature(mrmfeature, precursor_ids, intensity);
std::vector<unsigned int> max_rank_vec = Scoring::computeRankVector(intensity, rank_vec);
intensity.clear();
fillIntensityFromFeature(mrmfeature, native_ids, intensity);
std::vector<unsigned int> max_rank_vec_tmp = Scoring::computeRankVector(intensity, rank_vec);
max_rank_vec.reserve(max_rank_vec.size() + native_ids.size());
max_rank_vec.insert(max_rank_vec.end(), max_rank_vec_tmp.begin(), max_rank_vec_tmp.end());
mi_precursor_combined_matrix_.resize(rank_vec.size(), rank_vec.size());
for (std::size_t i = 0; i < rank_vec.size(); i++)
{
for (std::size_t j = i; j < rank_vec.size(); j++)
{
// compute ranked mutual information
double curr_mutual_score = Scoring::rankedMutualInformation(rank_vec[i], rank_vec[j], max_rank_vec[i], max_rank_vec[j]);
mi_precursor_combined_matrix_(i, j) = curr_mutual_score;
if (i != j) mi_precursor_combined_matrix_(j, i) = curr_mutual_score;
}
}
}
double MRMScoring::calcMIScore()
{
OPENSWATH_PRECONDITION(mi_matrix_.rows() > 1, "Expect mutual information matrix of at least 2x2");
auto em = OpenMS::eigenView(mi_matrix_);
double mi_scores = em.sum();
//mi_matrix_ is a triangular matrix
size_t element_number = mi_matrix_.rows() * mi_matrix_.rows() / 2 + (mi_matrix_.rows() + 1) / 2;
return mi_scores / element_number;
}
double MRMScoring::calcMIWeightedScore(
const std::vector<double>& normalized_library_intensity)
{
OPENSWATH_PRECONDITION(mi_matrix_.rows() > 1, "Expect mutual information matrix of at least 2x2");
double mi_scores{0};
for (long int i = 0; i < mi_matrix_.rows(); i++)
{
mi_scores += mi_matrix_(i, i)
* normalized_library_intensity[i]
* normalized_library_intensity[i];
#ifdef MRMSCORING_TESTING
std::cout << "_mi_weighted " << i << " " << i << " " << mi_matrix_[i][i] << " weight " <<
normalized_library_intensity[i] * normalized_library_intensity[i] << '\n';
#endif
for (long int j = i + 1; j < mi_matrix_.rows(); j++)
{
mi_scores += mi_matrix_(i, j)
* normalized_library_intensity[i]
* normalized_library_intensity[j] * 2;
#ifdef MRMSCORING_TESTING
std::cout << "_mi_weighted " << i << " " << j << " " << mi_matrix_[i][j] << " weight " <<
normalized_library_intensity[i] * normalized_library_intensity[j] * 2 << '\n';
#endif
}
}
return mi_scores;
}
double MRMScoring::calcMIPrecursorScore()
{
OPENSWATH_PRECONDITION(mi_precursor_matrix_.rows() > 1, "Expect mutual information matrix of at least 2x2");
auto em = OpenMS::eigenView(mi_precursor_matrix_);
double mi_scores = em.sum();
//mi_precursor_matrix_ is a triangular matrix
size_t element_number = mi_precursor_matrix_.rows()*mi_precursor_matrix_.rows()/2 + (mi_precursor_matrix_.rows()+1)/2;
return mi_scores / (double)element_number;
}
double MRMScoring::calcMIPrecursorContrastScore()
{
OPENSWATH_PRECONDITION(mi_precursor_contrast_matrix_.rows() > 0 && mi_precursor_contrast_matrix_.cols() > 1, "Expect mutual information matrix of at least 1x2");
auto em = OpenMS::eigenView(mi_precursor_contrast_matrix_);
size_t n_entries = em.size();
double mi_scores = em.sum();
return mi_scores / (double)n_entries;
}
double MRMScoring::calcMIPrecursorCombinedScore()
{
OPENSWATH_PRECONDITION(mi_precursor_combined_matrix_.rows() > 1, "Expect mutual information matrix of at least 2x2");
auto em = OpenMS::eigenView(mi_precursor_combined_matrix_);
size_t n_entries = em.size();
double mi_scores = em.sum();
return mi_scores / (double)n_entries;
}
std::vector<double> MRMScoring::calcSeparateMIContrastScore()
{
OPENSWATH_PRECONDITION(mi_contrast_matrix_.rows() > 0 && mi_contrast_matrix_.cols() > 1, "Expect mutual information matrix of at least 1x2");
std::vector<double> mi_scores;
mi_scores.resize(mi_contrast_matrix_.rows());
for (long int i = 0; i < mi_contrast_matrix_.rows(); i++)
{
double mi_scores_id = 0;
for (long int j = 0; j < mi_contrast_matrix_.cols(); j++)
{
mi_scores_id += mi_contrast_matrix_(i, j);
}
mi_scores[i] = mi_scores_id / mi_contrast_matrix_.cols();
}
return mi_scores;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/ChromatogramExtractorAlgorithm.cpp | .cpp | 12,520 | 374 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/ChromatogramExtractorAlgorithm.h>
#include <OpenMS/DATASTRUCTURES/String.h>
#include <OpenMS/CONCEPT/Exception.h>
#include <algorithm>
#include <iostream>
namespace OpenMS
{
void ChromatogramExtractorAlgorithm::extract_value_tophat(
const std::vector<double>::const_iterator& mz_start,
std::vector<double>::const_iterator& mz_it,
const std::vector<double>::const_iterator& mz_end,
std::vector<double>::const_iterator& int_it,
const double mz,
double& integrated_intensity,
const double mz_extraction_window,
const bool ppm)
{
integrated_intensity = 0;
if (mz_start == mz_end)
{
return;
}
// calculate extraction window
double left, right;
if (ppm)
{
left = mz - mz * mz_extraction_window / 2.0 * 1.0e-6;
right = mz + mz * mz_extraction_window / 2.0 * 1.0e-6;
}
else
{
left = mz - mz_extraction_window / 2.0;
right = mz + mz_extraction_window / 2.0;
}
std::vector<double>::const_iterator mz_walker;
std::vector<double>::const_iterator int_walker;
// advance the mz / int iterator until we hit the m/z value of the next transition
while (mz_it != mz_end && (*mz_it) < mz)
{
mz_it++;
int_it++;
}
// walk right and left and add to our intensity
mz_walker = mz_it;
int_walker = int_it;
// if we moved past the end of the spectrum, we need to try the last peak
// of the spectrum (it could still be within the window)
if (mz_it == mz_end)
{
--mz_walker;
--int_walker;
}
// add the current peak if it is between right and left
if ((*mz_walker) > left && (*mz_walker) < right)
{
integrated_intensity += (*int_walker);
}
// (i) Walk to the left one step and then keep walking left until we go
// outside the window. Note for the first step to the left we have to
// check for the walker becoming equal to the first data point.
mz_walker = mz_it;
int_walker = int_it;
if (mz_it != mz_start)
{
--mz_walker;
--int_walker;
// Special case: target m/z is larger than first data point but the first
// data point is inside the window.
// Then, mz_it is the second data point, mz_walker now points to the very
// first data point. If mz_it was the first data point, we already added
// it above. We still need to add this point if it is inside the window
// (while loop below will not catch it)
if (mz_walker == mz_start && (*mz_walker) > left && (*mz_walker) < right)
{
integrated_intensity += (*int_walker);
}
}
while (mz_walker != mz_start && (*mz_walker) > left && (*mz_walker) < right)
{
integrated_intensity += (*int_walker);
--mz_walker;
--int_walker;
}
// (ii) Walk to the right one step and then keep walking right until we are
// outside the window
mz_walker = mz_it;
int_walker = int_it;
if (mz_it != mz_end)
{
++mz_walker;
++int_walker;
}
while (mz_walker != mz_end && (*mz_walker) > left && (*mz_walker) < right)
{
integrated_intensity += (*int_walker);
++mz_walker;
++int_walker;
}
}
void ChromatogramExtractorAlgorithm::extract_value_tophat(
const std::vector<double>::const_iterator& mz_start,
std::vector<double>::const_iterator& mz_it,
const std::vector<double>::const_iterator& mz_end,
std::vector<double>::const_iterator& int_it,
std::vector<double>::const_iterator& im_it,
const double mz,
const double im,
double& integrated_intensity,
const double mz_extraction_window,
const double im_extraction_window,
const bool ppm)
{
// Note that we have a 3D spectrum with m/z, intensity and ion mobility.
// The spectrum is sorted by m/z but we expect to have ion mobility
// information for each m/z point as well. Right now we simply filter by
// ion mobility and skip data that does not fall within the ion mobility
// window.
integrated_intensity = 0;
if (mz_start == mz_end)
{
return;
}
// calculate extraction window
double left, right;
if (ppm)
{
left = mz - mz * mz_extraction_window / 2.0 * 1.0e-6;
right = mz + mz * mz_extraction_window / 2.0 * 1.0e-6;
}
else
{
left = mz - mz_extraction_window / 2.0;
right = mz + mz_extraction_window / 2.0;
}
double left_im = im - im_extraction_window / 2.0;
double right_im = im + im_extraction_window / 2.0;
std::vector<double>::const_iterator mz_walker;
std::vector<double>::const_iterator im_walker;
std::vector<double>::const_iterator int_walker;
// advance the mz / int iterator until we hit the m/z value of the next transition
while (mz_it != mz_end && (*mz_it) < mz)
{
mz_it++;
im_it++;
int_it++;
}
// walk right and left and add to our intensity
mz_walker = mz_it;
im_walker = im_it;
int_walker = int_it;
// if we moved past the end of the spectrum, we need to try the last peak
// of the spectrum (it could still be within the window)
if (mz_it == mz_end)
{
--mz_walker;
--im_walker;
--int_walker;
}
// add the current peak if it is between right and left
if ((*mz_walker) > left && (*mz_walker) < right && (*im_walker) > left_im && (*im_walker) < right_im)
{
integrated_intensity += (*int_walker);
}
// (i) Walk to the left one step and then keep walking left until we go
// outside the window. Note for the first step to the left we have to
// check for the walker becoming equal to the first data point.
mz_walker = mz_it;
int_walker = int_it;
im_walker = im_it;
if (mz_it != mz_start)
{
--mz_walker;
--im_walker;
--int_walker;
// Special case: target m/z is larger than first data point but the first
// data point is inside the window.
// Then, mz_it is the second data point, mz_walker now points to the very
// first data point. If mz_it was the first data point, we already added
// it above. We still need to add this point if it is inside the window
// (while loop below will not catch it)
if (mz_walker == mz_start && (*mz_walker) > left && (*mz_walker) < right && (*im_walker) > left_im && (*im_walker) < right_im)
{
integrated_intensity += (*int_walker);
}
}
while (mz_walker != mz_start && (*mz_walker) > left && (*mz_walker) < right)
{
if (*im_walker > left_im && *im_walker < right_im) integrated_intensity += (*int_walker);
--mz_walker;
--im_walker;
--int_walker;
}
// (ii) Walk to the right one step and then keep walking right until we are
// outside the window
mz_walker = mz_it;
im_walker = im_it;
int_walker = int_it;
if (mz_it != mz_end)
{
++im_walker;
++mz_walker;
++int_walker;
}
while (mz_walker != mz_end && (*mz_walker) > left && (*mz_walker) < right)
{
if (*im_walker > left_im && *im_walker < right_im) integrated_intensity += (*int_walker);
++mz_walker;
++im_walker;
++int_walker;
}
}
void ChromatogramExtractorAlgorithm::extractChromatograms(const OpenSwath::SpectrumAccessPtr& input,
std::vector< OpenSwath::ChromatogramPtr >& output,
const std::vector<ExtractionCoordinates>& extraction_coordinates,
double mz_extraction_window,
bool ppm,
double im_extraction_window,
const String& filter)
{
Size input_size = input->getNrSpectra();
if (input_size < 1)
{
return;
}
if (output.size() != extraction_coordinates.size())
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Output and extraction coordinates need to have the same size: "+ String(output.size()) + " != " + String(extraction_coordinates.size()) );
}
int used_filter = getFilterNr_(filter);
// assert that they are sorted!
if (std::adjacent_find(extraction_coordinates.begin(), extraction_coordinates.end(),
ExtractionCoordinates::SortExtractionCoordinatesReverseByMZ) != extraction_coordinates.end())
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Input to extractChromatogram needs to be sorted by m/z");
}
//go through all spectra
startProgress(0, input_size, "Extracting chromatograms");
for (Size scan_idx = 0; scan_idx < input_size; ++scan_idx)
{
setProgress(scan_idx);
OpenSwath::SpectrumPtr sptr = input->getSpectrumById(scan_idx);
OpenSwath::SpectrumMeta s_meta = input->getSpectrumMetaById(scan_idx);
OpenSwath::BinaryDataArrayPtr mz_arr = sptr->getMZArray();
OpenSwath::BinaryDataArrayPtr int_arr = sptr->getIntensityArray();
std::vector<double>::const_iterator mz_start = mz_arr->data.begin();
std::vector<double>::const_iterator mz_end = mz_arr->data.end();
std::vector<double>::const_iterator mz_it = mz_arr->data.begin();
std::vector<double>::const_iterator int_it = int_arr->data.begin();
std::vector<double>::const_iterator im_it;
if (sptr->getMZArray()->data.empty())
{
continue;
}
// Look for ion mobility array
bool has_im = (im_extraction_window > 0.0);
if (has_im)
{
OpenSwath::BinaryDataArrayPtr im_arr = sptr->getDriftTimeArray();
if (im_arr != nullptr)
{
im_it = im_arr->data.begin();
}
else
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Requested ion mobility extraction but no ion mobility array found.");
}
}
// go through all transitions / chromatograms which are sorted by
// ProductMZ. We can use this to step through the spectrum and at the
// same time step through the transitions. We increase the peak counter
// until we hit the next transition and then extract the signal.
for (Size k = 0; k < extraction_coordinates.size(); ++k)
{
double integrated_intensity = 0;
double current_rt = s_meta.RT;
if (extraction_coordinates[k].rt_end - extraction_coordinates[k].rt_start > 0 &&
(current_rt < extraction_coordinates[k].rt_start ||
current_rt > extraction_coordinates[k].rt_end) )
{
continue;
}
const bool use_im = (extraction_coordinates[k].ion_mobility >= 0.0 && has_im);
if (!use_im && used_filter == 1)
{
extract_value_tophat(mz_start, mz_it, mz_end, int_it,
extraction_coordinates[k].mz, integrated_intensity, mz_extraction_window, ppm);
}
else if (use_im && used_filter == 1)
{
if (extraction_coordinates[k].ion_mobility < 0)
{
std::cerr << "WARNING : Drift time of ion is negative!" << std::endl;
}
extract_value_tophat(mz_start, mz_it, mz_end, int_it, im_it,
extraction_coordinates[k].mz, extraction_coordinates[k].ion_mobility,
integrated_intensity, mz_extraction_window, im_extraction_window, ppm);
}
else if (used_filter == 2)
{
throw Exception::NotImplemented(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION);
}
output[k]->getTimeArray()->data.push_back(current_rt);
output[k]->getIntensityArray()->data.push_back(integrated_intensity);
}
}
endProgress();
}
int ChromatogramExtractorAlgorithm::getFilterNr_(const String& filter)
{
if (filter == "tophat")
{
return 1;
}
else if (filter == "bartlett")
{
return 2;
}
else
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Filter either needs to be tophat or bartlett");
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/OpenSwathWorkflow.cpp | .cpp | 51,551 | 1,154 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/OpenSwathWorkflow.h>
#include <cmath>
#include <unordered_map>
// OpenSwathCalibrationWorkflow
namespace OpenMS
{
OpenSwath::SpectrumAccessPtr loadMS1Map(const std::vector< OpenSwath::SwathMap > & swath_maps, bool load_into_memory)
{
OpenSwath::SpectrumAccessPtr ms1_map;
// store reference to MS1 map for later -> note that this is *not* threadsafe!
for (SignedSize i = 0; i < boost::numeric_cast<SignedSize>(swath_maps.size()); ++i)
{
// if (swath_maps[i].ms1 && use_ms1_traces_)
if (swath_maps[i].ms1)
{
ms1_map = swath_maps[i].sptr;
}
}
if (load_into_memory)
{
// This creates an InMemory object that keeps all data in memory
// but provides the same access functionality to the raw data as
// any object implementing ISpectrumAccess
ms1_map = std::shared_ptr<SpectrumAccessOpenMSInMemory>( new SpectrumAccessOpenMSInMemory(*ms1_map) );
}
return ms1_map;
}
TransformationDescription OpenSwathCalibrationWorkflow::performRTNormalization(
const OpenSwath::LightTargetedExperiment& irt_transitions,
std::vector< OpenSwath::SwathMap > & swath_maps,
TransformationDescription& im_trafo,
double min_rsq,
double min_coverage,
const Param& feature_finder_param,
const ChromExtractParams& cp_irt,
const Param& irt_detection_param,
const Param& calibration_param,
const String& irt_mzml_out,
Size debug_level,
bool pasef,
bool load_into_memory)
{
OPENMS_LOG_DEBUG << "performRTNormalization method starting\n";
std::vector< OpenMS::MSChromatogram > irt_chromatograms;
TransformationDescription trafo; // dummy
this->simpleExtractChromatograms_(swath_maps, irt_transitions, irt_chromatograms, trafo, cp_irt, pasef, load_into_memory);
// debug output of the iRT chromatograms
if (irt_mzml_out.empty() && debug_level > 1)
{
String irt_mzml_out = "debug_irts.mzML";
}
if (!irt_mzml_out.empty())
{
try
{
PeakMap exp;
exp.setChromatograms(irt_chromatograms);
FileHandler().storeExperiment(irt_mzml_out, exp, {FileTypes::MZML});
}
catch (OpenMS::Exception::UnableToCreateFile& /*e*/)
{
OPENMS_LOG_DEBUG << "Error creating file " + irt_mzml_out + ", not writing out iRT chromatogram file" << '\n';
}
catch (OpenMS::Exception::BaseException& /*e*/)
{
OPENMS_LOG_DEBUG << "Error writing to file " + irt_mzml_out + ", not writing out iRT chromatogram file" << '\n';
}
}
OPENMS_LOG_DEBUG << "Extracted number of chromatograms from iRT files: " << irt_chromatograms.size() << std::endl;
// perform RT and m/z correction on the data
TransformationDescription tr = doDataNormalization_(irt_transitions,
irt_chromatograms, im_trafo, swath_maps,
min_rsq, min_coverage, feature_finder_param,
irt_detection_param, calibration_param, pasef);
return tr;
}
TransformationDescription OpenSwathCalibrationWorkflow::doDataNormalization_(
const OpenSwath::LightTargetedExperiment& targeted_exp,
const std::vector< OpenMS::MSChromatogram >& chromatograms,
TransformationDescription& im_trafo,
std::vector< OpenSwath::SwathMap > & swath_maps,
double min_rsq,
double min_coverage,
const Param& default_ffparam,
const Param& irt_detection_param,
const Param& calibration_param,
const bool pasef)
{
OPENMS_LOG_DEBUG << "Start of doDataNormalization_ method\n";
this->startProgress(0, 1, "Retention time normalization");
bool estimateBestPeptides = irt_detection_param.getValue("estimateBestPeptides").toBool();
if (estimateBestPeptides)
{
OPENMS_LOG_DEBUG << "Activated the 'estimateBestPeptides' option.\n";
}
// 1. Estimate the retention time range of the iRT peptides over all assays
std::pair<double,double> RTRange = OpenSwathHelper::estimateRTRange(targeted_exp);
OPENMS_LOG_DEBUG << "Detected retention time range from " << RTRange.first << " to " << RTRange.second << '\n';
// 2. Store the peptide retention times in an intermediate map
std::map<OpenMS::String, double> PeptideRTMap;
for (Size i = 0; i < targeted_exp.getCompounds().size(); i++)
{
PeptideRTMap[targeted_exp.getCompounds()[i].id] = targeted_exp.getCompounds()[i].rt;
}
// 3. Pick input chromatograms to identify RT pairs from the input data
const OpenSwath::LightTargetedExperiment& transition_exp_used = targeted_exp;
// Change the feature finding parameters:
// - no RT score (since we don't know the correct retention time)
// - no RT window
// - no elution model score
// - no peak quality (use all peaks)
// - if best peptides should be used, use peak quality
MRMFeatureFinderScoring featureFinder;
Param feature_finder_param(default_ffparam);
feature_finder_param.setValue("Scores:use_rt_score", "false");
feature_finder_param.setValue("Scores:use_elution_model_score", "false");
feature_finder_param.setValue("rt_extraction_window", -1.0);
feature_finder_param.setValue("stop_report_after_feature", 1);
feature_finder_param.setValue("TransitionGroupPicker:PeakPickerChromatogram:signal_to_noise", 1.0); // set to 1.0 in all cases
feature_finder_param.setValue("TransitionGroupPicker:compute_peak_quality", "false"); // no peak quality -> take all peaks!
if (estimateBestPeptides)
{
feature_finder_param.setValue("TransitionGroupPicker:compute_peak_quality", "true");
feature_finder_param.setValue("TransitionGroupPicker:minimal_quality", irt_detection_param.getValue("InitialQualityCutoff"));
}
featureFinder.setParameters(feature_finder_param);
FeatureMap featureFile; // for results
OpenMS::MRMFeatureFinderScoring::TransitionGroupMapType transition_group_map; // for results
std::vector<OpenSwath::SwathMap> empty_swath_maps;
TransformationDescription empty_trafo; // empty transformation
// Prepare the data with the chromatograms
std::shared_ptr<PeakMap > xic_map(new PeakMap);
xic_map->setChromatograms(chromatograms);
OpenSwath::SpectrumAccessPtr chromatogram_ptr = OpenSwath::SpectrumAccessPtr(new OpenMS::SpectrumAccessOpenMS(xic_map));
featureFinder.setStrictFlag(false); // TODO remove this, it should be strict (e.g. all transitions need to be present for RT norm)
featureFinder.pickExperiment(chromatogram_ptr, featureFile, transition_exp_used, empty_trafo, empty_swath_maps, transition_group_map);
// 4. Find most likely correct feature for each compound and add it to the
// "pairs" vector by computing pairs of iRT and real RT.
//
// Note that the quality threshold will only be applied if
// estimateBestPeptides is true
std::vector<std::pair<double, double> > pairs; // store the RT pairs to write the output trafoXML
std::map<std::string, double> best_features = OpenSwathHelper::simpleFindBestFeature(transition_group_map,
estimateBestPeptides, irt_detection_param.getValue("OverallQualityCutoff"));
OPENMS_LOG_DEBUG << "Extracted best features: " << best_features.size() << '\n';
// Create pairs vector and store peaks
std::map<String, OpenMS::MRMFeatureFinderScoring::MRMTransitionGroupType *> trgrmap_allpeaks; // store all peaks above cutoff
for (std::map<std::string, double>::iterator it = best_features.begin(); it != best_features.end(); ++it)
{
pairs.emplace_back(it->second, PeptideRTMap[it->first]); // pair<exp_rt, theor_rt>
if (transition_group_map.find(it->first) != transition_group_map.end())
{
trgrmap_allpeaks[ it->first ] = &transition_group_map[ it->first];
}
}
// 5. Perform the outlier detection
std::vector<std::pair<double, double> > pairs_corrected;
String outlier_method = irt_detection_param.getValue("outlierMethod").toString();
if (outlier_method == "iter_residual" || outlier_method == "iter_jackknife")
{
pairs_corrected = MRMRTNormalizer::removeOutliersIterative(pairs, min_rsq, min_coverage,
irt_detection_param.getValue("useIterativeChauvenet").toBool(), outlier_method);
}
else if (outlier_method == "ransac")
{
// First, estimate of the maximum deviation from RT that is tolerated:
// Because 120 min gradient can have around 4 min elution shift, we use
// a default value of 3 % of the gradient to find upper RT threshold (3.6 min).
double pcnt_rt_threshold = irt_detection_param.getValue("RANSACMaxPercentRTThreshold");
double max_rt_threshold = (RTRange.second - RTRange.first) * pcnt_rt_threshold / 100.0;
pairs_corrected = MRMRTNormalizer::removeOutliersRANSAC(pairs, min_rsq, min_coverage,
irt_detection_param.getValue("RANSACMaxIterations"), max_rt_threshold,
irt_detection_param.getValue("RANSACSamplingSize"));
}
else if (outlier_method == "none")
{
pairs_corrected = pairs;
}
else
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
String("Illegal argument '") + outlier_method +
"' used for outlierMethod (valid: 'iter_residual', 'iter_jackknife', 'ransac', 'none').");
}
OPENMS_LOG_DEBUG << "Performed outlier detection, left with features: " << pairs_corrected.size() << '\n';
// 6. Check whether the found peptides fulfill the binned coverage criteria
// set by the user.
if (estimateBestPeptides)
{
bool enoughPeptides = MRMRTNormalizer::computeBinnedCoverage(RTRange, pairs_corrected,
irt_detection_param.getValue("NrRTBins"),
irt_detection_param.getValue("MinPeptidesPerBin"),
irt_detection_param.getValue("MinBinsFilled") );
if (!enoughPeptides)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"There were not enough bins with the minimal number of peptides");
}
}
if (pairs_corrected.size() < 2)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"There are less than 2 iRT normalization peptides, not enough for an RT correction.");
}
// 7. Select the "correct" peaks for m/z (and IM) correction (e.g. remove those not
// part of the linear regression)
std::map<String, OpenMS::MRMFeatureFinderScoring::MRMTransitionGroupType *> trgrmap_final; // store all peaks above cutoff
for (const auto& it : trgrmap_allpeaks)
{
if (it.second->getFeatures().empty() ) {continue;}
const MRMFeature& feat = it.second->getBestFeature();
// Check if the current feature is in the list of pairs used for the
// linear RT regression (using other features may result in wrong
// calibration values).
// Matching only by RT is not perfect but should work for most cases.
for (Size pit = 0; pit < pairs_corrected.size(); pit++)
{
if (fabs(feat.getRT() - pairs_corrected[pit].first ) < 1e-2)
{
trgrmap_final[ it.first ] = it.second;
break;
}
}
}
// 8. Correct m/z (and IM) deviations using SwathMapMassCorrection
// m/z correction is done with the -irt_im_extraction parameters
SwathMapMassCorrection mc;
mc.setParameters(calibration_param);
mc.correctMZ(trgrmap_final, targeted_exp, swath_maps, pasef);
mc.correctIM(trgrmap_final, targeted_exp, swath_maps, pasef, im_trafo);
// Get estimated extraction windows
setEstimatedMzWindow(mc.getFragmentMzWindow());
setEstimatedImWindow(mc.getFragmentImWindow());
setEstimatedMs1MzWindow(mc.getPrecursorMzWindow());
setEstimatedMs1ImWindow(mc.getPrecursorImWindow());
// 9. store RT transformation, using the selected model
TransformationDescription trafo_out;
trafo_out.setDataPoints(pairs_corrected);
Param model_params;
model_params.setValue("symmetric_regression", "false");
model_params.setValue("span", irt_detection_param.getValue("lowess:span"));
model_params.setValue("auto_span", irt_detection_param.getValue("lowess:auto_span"));
model_params.setValue("auto_span_min", irt_detection_param.getValue("lowess:auto_span_min"));
model_params.setValue("auto_span_max", irt_detection_param.getValue("lowess:auto_span_max"));
model_params.setValue("auto_span_grid", irt_detection_param.getValue("lowess:auto_span_grid"));
model_params.setValue("num_nodes", irt_detection_param.getValue("b_spline:num_nodes"));
String model_type = irt_detection_param.getValue("alignmentMethod").toString();
trafo_out.fitModel(model_type, model_params);
OPENMS_LOG_DEBUG << "Final RT mapping:\n";
for (Size i = 0; i < pairs_corrected.size(); i++)
{
OPENMS_LOG_DEBUG << pairs_corrected[i].first << " " << pairs_corrected[i].second << '\n';
}
OPENMS_LOG_DEBUG << "End of doDataNormalization_ method\n";
this->endProgress();
return trafo_out;
}
void OpenSwathCalibrationWorkflow::simpleExtractChromatograms_(
const std::vector< OpenSwath::SwathMap > & swath_maps,
const OpenSwath::LightTargetedExperiment& irt_transitions,
std::vector< OpenMS::MSChromatogram > & chromatograms,
const TransformationDescription& trafo,
const ChromExtractParams & cp,
bool pasef,
bool load_into_memory)
{
TransformationDescription trafo_inverse = trafo;
trafo_inverse.invert();
// If this is pasef data, do chromatogram extraction beforehand in unparallel workflow
std::vector<int> tr_win_map; // maps transition k to dia map i from which it should be extracted, only used if pasef flag is on
if (pasef)
{
// Before calling this function, check to ensure that precursors actually have IM data
for (Size k = 0; k < irt_transitions.transitions.size(); k++)
{
const OpenSwath::LightTransition& tr = irt_transitions.transitions[k];
if (tr.getPrecursorIM() == -1)
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Transition " + tr.getNativeID() + " does not have a valid IM value, this must be set to use the -pasef flag");
}
}
OpenSwathHelper::selectSwathTransitionsPasef(irt_transitions, tr_win_map, cp.min_upper_edge_dist, swath_maps);
}
this->startProgress(0, 1, "Extract iRT chromatograms");
#ifdef _OPENMP
#pragma omp parallel for schedule(dynamic,1)
#endif
for (SignedSize map_idx = 0; map_idx < boost::numeric_cast<SignedSize>(swath_maps.size()); ++map_idx)
{
std::vector< OpenMS::MSChromatogram > tmp_chromatograms;
if (!swath_maps[map_idx].ms1) // skip MS1
{
OpenSwath::LightTargetedExperiment transition_exp_used;
if (pasef)
{
// Step 1.2: select transitions based on matching PRM/PASEF window (best window)
std::set<std::string> matching_compounds;
for (Size k = 0; k < tr_win_map.size(); k++)
{
if (tr_win_map[k] == map_idx)
{
const OpenSwath::LightTransition& tr = irt_transitions.transitions[k];
transition_exp_used.transitions.push_back(tr);
matching_compounds.insert(tr.getPeptideRef());
OPENMS_LOG_DEBUG << "Adding Precursor with m/z " << tr.getPrecursorMZ() << " and IM of " << tr.getPrecursorIM() << " to swath with mz lower of " << swath_maps[map_idx].lower << " m/z upper of " << swath_maps[map_idx].upper << " im lower of " << swath_maps[map_idx].imLower << " and im upper of " << swath_maps[map_idx].imUpper << '\n';
}
}
std::set<std::string> matching_proteins;
for (Size i = 0; i < irt_transitions.compounds.size(); i++)
{
if (matching_compounds.find(irt_transitions.compounds[i].id) != matching_compounds.end())
{
transition_exp_used.compounds.push_back( irt_transitions.compounds[i] );
for (Size j = 0; j < irt_transitions.compounds[i].protein_refs.size(); j++)
{
matching_proteins.insert(irt_transitions.compounds[i].protein_refs[j]);
}
}
}
for (Size i = 0; i < irt_transitions.proteins.size(); i++)
{
if (matching_proteins.find(irt_transitions.proteins[i].id) != matching_proteins.end())
{
transition_exp_used.proteins.push_back( irt_transitions.proteins[i] );
}
}
}
else
{
OpenSwathHelper::selectSwathTransitions(irt_transitions, transition_exp_used,
cp.min_upper_edge_dist, swath_maps[map_idx].lower, swath_maps[map_idx].upper);
}
if (!transition_exp_used.getTransitions().empty()) // skip if no transitions found
{
std::vector< OpenSwath::ChromatogramPtr > tmp_out;
std::vector< ChromatogramExtractor::ExtractionCoordinates > coordinates;
ChromatogramExtractor extractor;
OpenSwath::SpectrumAccessPtr current_swath_map = swath_maps[map_idx].sptr;
if (load_into_memory)
{
// This creates an InMemory object that keeps all data in memory
current_swath_map = std::shared_ptr<SpectrumAccessOpenMSInMemory>( new SpectrumAccessOpenMSInMemory(*current_swath_map) );
}
prepareExtractionCoordinates_(tmp_out, coordinates, transition_exp_used, trafo_inverse, cp);
extractor.extractChromatograms(current_swath_map, tmp_out, coordinates, cp.mz_extraction_window,
cp.ppm, cp.im_extraction_window, cp.extraction_function);
extractor.return_chromatogram(tmp_out, coordinates,
transition_exp_used, SpectrumSettings(), tmp_chromatograms, false, cp.im_extraction_window);
#ifdef _OPENMP
#pragma omp critical (osw_write_chroms)
#endif
{
int nr_empty_chromatograms = 0;
OPENMS_LOG_DEBUG << "[simple] Extracted " << tmp_chromatograms.size() << " chromatograms from SWATH map " <<
map_idx << " with m/z " << swath_maps[map_idx].lower << " to " << swath_maps[map_idx].upper << ":\n";
for (Size chrom_idx = 0; chrom_idx < tmp_chromatograms.size(); chrom_idx++)
{
// Check TIC and remove empty chromatograms (can happen if the
// extraction window is outside the mass spectrometric acquisition
// window).
double tic = std::accumulate(tmp_out[chrom_idx]->getIntensityArray()->data.begin(),
tmp_out[chrom_idx]->getIntensityArray()->data.end(),0.0);
OPENMS_LOG_DEBUG << "Chromatogram " << coordinates[chrom_idx].id << " with size "
<< tmp_out[chrom_idx]->getIntensityArray()->data.size() << " and TIC " << tic << '\n';
if (tic > 0.0)
{
// add the chromatogram to the output
chromatograms.push_back(tmp_chromatograms[chrom_idx]);
}
else
{
OPENMS_LOG_DEBUG << " - Warning: Empty chromatogram " << coordinates[chrom_idx].id <<
" detected. Will skip it!\n";
nr_empty_chromatograms++;
}
}
if (nr_empty_chromatograms > 0)
{
std::cerr << " - Warning: Detected " << nr_empty_chromatograms << " empty chromatograms. Will skip them!\n";
}
}
}
else
{
OPENMS_LOG_DEBUG << "Extracted no transitions from SWATH map " << map_idx << " with m/z " <<
swath_maps[map_idx].lower << " to " << swath_maps[map_idx].upper << '\n';
}
}
}
this->endProgress();
}
void OpenSwathCalibrationWorkflow::addChromatograms(MSChromatogram& base_chrom, const MSChromatogram& newchrom)
{
if (base_chrom.empty())
{
base_chrom = newchrom;
}
LinearResamplerAlign ls;
ls.raster(newchrom.begin(), newchrom.end(), base_chrom.begin(), base_chrom.end());
}
double OpenSwathCalibrationWorkflow::getEstimatedMzWindow() const
{
return estimated_mz_window_;
}
void OpenSwathCalibrationWorkflow::setEstimatedMzWindow(double estimatedMzWindow)
{
estimated_mz_window_ = estimatedMzWindow;
}
double OpenSwathCalibrationWorkflow::getEstimatedImWindow() const
{
return estimated_im_window_;
}
void OpenSwathCalibrationWorkflow::setEstimatedImWindow(double estimatedImWindow)
{
estimated_im_window_ = estimatedImWindow;
}
double OpenSwathCalibrationWorkflow::getEstimatedMs1MzWindow() const
{
return estimated_ms1_mz_window_;
}
void OpenSwathCalibrationWorkflow::setEstimatedMs1MzWindow(double estimatedMs1MzWindow)
{
estimated_ms1_mz_window_ = estimatedMs1MzWindow;
}
double OpenSwathCalibrationWorkflow::getEstimatedMs1ImWindow() const
{
return estimated_ms1_im_window_;
}
void OpenSwathCalibrationWorkflow::setEstimatedMs1ImWindow(double estimatedMs1ImWindow)
{
estimated_ms1_im_window_ = estimatedMs1ImWindow;
}
}
// OpenSwathWorkflow
namespace OpenMS
{
void OpenSwathWorkflow::performExtraction(
const std::vector< OpenSwath::SwathMap > & swath_maps,
const TransformationDescription& trafo,
const ChromExtractParams & cp,
const ChromExtractParams & cp_ms1,
const Param & feature_finder_param,
const OpenSwath::LightTargetedExperiment& transition_exp,
FeatureMap& out_featureFile,
bool store_features,
OpenSwathOSWWriter & osw_writer,
Interfaces::IMSDataConsumer * chromConsumer,
int batchSize,
int ms1_isotopes,
bool load_into_memory)
{
osw_writer.writeHeader();
bool ms1_only = (swath_maps.size() == 1 && swath_maps[0].ms1);
// Compute inversion of the transformation
TransformationDescription trafo_inverse = trafo;
trafo_inverse.invert();
std::cout << "Will analyze " << transition_exp.transitions.size() << " transitions in total.\n";
int progress = 0;
this->startProgress(0, swath_maps.size(), "Extracting and scoring transitions");
// (i) Obtain precursor chromatograms (MS1) if precursor extraction is enabled
ChromExtractParams ms1_cp(cp_ms1);
if (ms1_only && !use_ms1_traces_)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Error, you need to enable use_ms1_traces when run in MS1 mode." );
}
if (use_ms1_traces_) ms1_map_ = loadMS1Map(swath_maps, load_into_memory);
// (ii) Precursor extraction only
if (ms1_only)
{
std::vector< MSChromatogram > ms1_chromatograms;
MS1Extraction_(ms1_map_, swath_maps, ms1_chromatograms, ms1_cp,
transition_exp, trafo_inverse, ms1_only, ms1_isotopes);
FeatureMap featureFile;
std::shared_ptr<MSExperiment> empty_exp = std::shared_ptr<MSExperiment>(new MSExperiment);
const OpenSwath::LightTargetedExperiment& transition_exp_used = transition_exp;
scoreAllChromatograms_(std::vector<MSChromatogram>(), ms1_chromatograms, swath_maps, transition_exp_used,
feature_finder_param, trafo,
cp.rt_extraction_window, featureFile, osw_writer, ms1_isotopes, true);
// write features to output if so desired
std::vector< OpenMS::MSChromatogram > chromatograms;
writeOutFeaturesAndChroms_(chromatograms, ms1_chromatograms, featureFile, out_featureFile, store_features, chromConsumer);
}
// (iii) map transitions to individual DIA windows for cases where this is
// non-trivial (e.g. when there is m/z overlap and a transition could be
// extracted from more than one window
std::vector<int> tr_win_map; // maps transition k to dia map i from which it should be extracted
//
// currently not supported to do PASEF and PRM
if (prm_ & pasef_) {
std::cerr << "Setting -pasef and -matching_window_only flags simultaneously is not currently supported.\n";
throw Exception::NotImplemented(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION);
}
else if (prm_)
{
// Here we deal with overlapping PRM / DIA windows: we only want to extract
// each peptide from a single window and we assume that PRM windows are
// centered around the target peptide. We therefore select for each peptide
// the best-matching PRM / DIA window:
tr_win_map.resize(transition_exp.transitions.size(), -1);
for (SignedSize i = 0; i < boost::numeric_cast<SignedSize>(swath_maps.size()); ++i)
{
for (Size k = 0; k < transition_exp.transitions.size(); k++)
{
const OpenSwath::LightTransition& tr = transition_exp.transitions[k];
// If the transition falls inside the current PRM / DIA window, check
// if the window is potentially a better match for extraction than
// the one previously stored in the map:
if (swath_maps[i].lower < tr.getPrecursorMZ() && tr.getPrecursorMZ() < swath_maps[i].upper &&
std::fabs(swath_maps[i].upper - tr.getPrecursorMZ()) >= cp.min_upper_edge_dist)
{
if (tr_win_map[k] == -1) tr_win_map[k] = i;
if (
std::fabs(swath_maps[ tr_win_map[k] ].center - tr.getPrecursorMZ() ) >
std::fabs(swath_maps[ i ].center - tr.getPrecursorMZ() ) )
{
// current PRM / DIA window "i" is a better match
tr_win_map[k] = i;
}
}
}
}
}
else if (pasef_)
{
// For PASEF experiments it is possible to have DIA windows with the same m/z however different IM.
// Extract from the DIA window in which the precursor is more centered across its IM.
tr_win_map.resize(transition_exp.transitions.size(), -1);
for (SignedSize i = 0; i < boost::numeric_cast<SignedSize>(swath_maps.size()); ++i)
{
for (Size k = 0; k < transition_exp.transitions.size(); k++)
{
const OpenSwath::LightTransition& tr = transition_exp.transitions[k];
// If the transition falls inside the current DIA window (both in IM and m/z axis), check
// if the window is potentially a better match for extraction than
// the one previously stored in the map:
if (
swath_maps[i].imLower < tr.getPrecursorIM() && tr.getPrecursorIM() < swath_maps[i].imUpper &&
swath_maps[i].lower < tr.getPrecursorMZ() && tr.getPrecursorMZ() < swath_maps[i].upper &&
std::fabs(swath_maps[i].upper - tr.getPrecursorMZ()) >= cp.min_upper_edge_dist )
{
if (tr_win_map[k] == -1) tr_win_map[k] = i;
// Check if the current window is better than the previously assigned window (across IM)
double imOld = std::fabs(((swath_maps[ tr_win_map[k] ].imLower + swath_maps [ tr_win_map[k] ].imUpper) / 2) - tr.getPrecursorIM() );
double imNew = std::fabs(((swath_maps[ i ].imLower + swath_maps [ i ].imUpper) / 2) - tr.getPrecursorIM() );
if (imOld > imNew)
{
// current DIA window "i" is a better match
OPENMS_LOG_DEBUG << "For Precursor " << tr.getPrecursorIM() << "Replacing Swath Map with IM center of " <<
imOld << " with swath map of im center " << imNew << '\n';
tr_win_map[k] = i;
}
}
}
}
}
else {
};
// (iv) Perform extraction and scoring of fragment ion chromatograms (MS2)
// We set dynamic scheduling such that the maps are worked on in the order
// in which they were given to the program / acquired. This gives much
// better load balancing than static allocation.
#ifdef _OPENMP
#ifdef MT_ENABLE_NESTED_OPENMP
int total_nr_threads = omp_get_max_threads(); // store total number of threads we are allowed to use
if (threads_outer_loop_ > -1)
{
std::cout << "Setting up nested loop with " << std::min(threads_outer_loop_, omp_get_max_threads()) << " threads out of "<< omp_get_max_threads() << '\n';
omp_set_nested(1);
omp_set_dynamic(0);
omp_set_num_threads(std::min(threads_outer_loop_, omp_get_max_threads()) ); // use at most threads_outer_loop_ threads here
}
else
{
std::cout << "Use non-nested loop with " << total_nr_threads << " threads.\n";
}
#endif
#pragma omp parallel for schedule(dynamic,1)
#endif
for (SignedSize i = 0; i < boost::numeric_cast<SignedSize>(swath_maps.size()); ++i)
{
if (!swath_maps[i].ms1) // skip MS1
{
// Step 1: select which transitions to extract (proceed in batches)
OpenSwath::LightTargetedExperiment transition_exp_used_all;
if (!(prm_ || pasef_))
{
// Step 1.1: select transitions matching the window
OpenSwathHelper::selectSwathTransitions(transition_exp, transition_exp_used_all,
cp.min_upper_edge_dist, swath_maps[i].lower, swath_maps[i].upper);
}
else
{
// Step 1.2: select transitions based on matching PRM/PASEF window (best window)
std::set<std::string> matching_compounds;
for (Size k = 0; k < tr_win_map.size(); k++)
{
if (tr_win_map[k] == i)
{
const OpenSwath::LightTransition& tr = transition_exp.transitions[k];
transition_exp_used_all.transitions.push_back(tr);
matching_compounds.insert(tr.getPeptideRef());
OPENMS_LOG_DEBUG << "Adding Precursor with m/z " << tr.getPrecursorMZ() << " and IM of " << tr.getPrecursorIM() << " to swath with mz upper of " << swath_maps[i].upper << " im lower of " << swath_maps[i].imLower << " and im upper of " << swath_maps[i].imUpper << '\n';
}
}
std::set<std::string> matching_proteins;
for (Size i = 0; i < transition_exp.compounds.size(); i++)
{
if (matching_compounds.find(transition_exp.compounds[i].id) != matching_compounds.end())
{
transition_exp_used_all.compounds.push_back( transition_exp.compounds[i] );
for (Size j = 0; j < transition_exp.compounds[i].protein_refs.size(); j++)
{
matching_proteins.insert(transition_exp.compounds[i].protein_refs[j]);
}
}
}
for (Size i = 0; i < transition_exp.proteins.size(); i++)
{
if (matching_proteins.find(transition_exp.proteins[i].id) != matching_proteins.end())
{
transition_exp_used_all.proteins.push_back( transition_exp.proteins[i] );
}
}
}
if (!transition_exp_used_all.getTransitions().empty()) // skip if no transitions found
{
OpenSwath::SpectrumAccessPtr current_swath_map = swath_maps[i].sptr;
if (load_into_memory)
{
// This creates an InMemory object that keeps all data in memory
current_swath_map = std::shared_ptr<SpectrumAccessOpenMSInMemory>( new SpectrumAccessOpenMSInMemory(*current_swath_map) );
}
int batch_size;
if (batchSize <= 0 || batchSize >= (int)transition_exp_used_all.getCompounds().size())
{
batch_size = transition_exp_used_all.getCompounds().size();
}
else
{
batch_size = batchSize;
}
SignedSize nr_batches = (transition_exp_used_all.getCompounds().size() / batch_size);
#ifdef _OPENMP
#ifdef MT_ENABLE_NESTED_OPENMP
// If we have a multiple of threads_outer_loop_ here, then use nested
// parallelization here. E.g. if we use 8 threads for the outer loop,
// but we have a total of 24 cores available, each of the 8 threads
// will then create a team of 3 threads to work on the batches
// individually.
//
// We should avoid oversubscribing the CPUs, therefore we use integer division.
// -- see https://docs.oracle.com/cd/E19059-01/stud.10/819-0501/2_nested.html
int outer_thread_nr = omp_get_thread_num();
omp_set_num_threads(std::max(1, total_nr_threads / threads_outer_loop_) );
#pragma omp parallel for schedule(dynamic, 1)
#endif
#endif
for (SignedSize pep_idx = 0; pep_idx <= nr_batches; pep_idx++)
{
OpenSwath::SpectrumAccessPtr current_swath_map_inner = current_swath_map;
#ifdef _OPENMP
#ifdef MT_ENABLE_NESTED_OPENMP
// To ensure multi-threading safe access to the individual spectra, we
// need to use a light clone of the spectrum access (if multiple threads
// share a single filestream and call seek on it, chaos will ensue).
if (total_nr_threads / threads_outer_loop_ > 1)
{
current_swath_map_inner = current_swath_map->lightClone();
}
#endif
#pragma omp critical (osw_write_stdout)
#endif
{
std::cout << "Thread " <<
#ifdef _OPENMP
#ifdef MT_ENABLE_NESTED_OPENMP
outer_thread_nr << "_" << omp_get_thread_num() << " " <<
#else
omp_get_thread_num() << "_0 " <<
#endif
#else
"0" <<
#endif
"will analyze " << transition_exp_used_all.getCompounds().size() << " compounds and "
<< transition_exp_used_all.getTransitions().size() << " transitions "
"from SWATH " << i << " (batch " << pep_idx << " out of " << nr_batches << ")\n";
}
// Create the new, batch-size transition experiment
OpenSwath::LightTargetedExperiment transition_exp_used;
selectCompoundsForBatch_(transition_exp_used_all, transition_exp_used, batch_size, pep_idx);
// Extract MS1 chromatograms for this batch
std::vector< MSChromatogram > ms1_chromatograms;
if (ms1_map_ != nullptr)
{
OpenSwath::SpectrumAccessPtr threadsafe_ms1 = ms1_map_->lightClone();
MS1Extraction_(threadsafe_ms1, swath_maps, ms1_chromatograms, ms1_cp,
transition_exp_used, trafo_inverse, ms1_only, ms1_isotopes);
}
// Step 2.1: extract these transitions
ChromatogramExtractor extractor;
std::vector< OpenSwath::ChromatogramPtr > chrom_list;
std::vector< ChromatogramExtractor::ExtractionCoordinates > coordinates;
// Step 2.2: prepare the extraction coordinates and extract chromatograms
// chrom_list contains one entry for each fragment ion (transition) in transition_exp_used
prepareExtractionCoordinates_(chrom_list, coordinates, transition_exp_used, trafo_inverse, cp);
extractor.extractChromatograms(current_swath_map_inner, chrom_list, coordinates, cp.mz_extraction_window,
cp.ppm, cp.im_extraction_window, cp.extraction_function);
// Step 2.3: convert chromatograms back to OpenMS::MSChromatogram and write to output
PeakMap chrom_exp;
extractor.return_chromatogram(chrom_list, coordinates, transition_exp_used, SpectrumSettings(),
chrom_exp.getChromatograms(), false, cp.im_extraction_window);
// Step 3: score these extracted transitions
FeatureMap featureFile;
std::vector< OpenSwath::SwathMap > tmp = {swath_maps[i]};
tmp.back().sptr = current_swath_map_inner;
scoreAllChromatograms_(chrom_exp.getChromatograms(), ms1_chromatograms, tmp, transition_exp_used,
feature_finder_param, trafo, cp.rt_extraction_window, featureFile, osw_writer, ms1_isotopes);
// Step 4: write all chromatograms and features out into an output object / file
// (this needs to be done in a critical section since we only have one
// output file and one output map).
#pragma omp critical (osw_write_out)
{
writeOutFeaturesAndChroms_(chrom_exp.getChromatograms(), ms1_chromatograms, featureFile, out_featureFile, store_features, chromConsumer);
}
}
} // continue 2 (no continue due to OpenMP)
} // continue 1 (no continue due to OpenMP)
#pragma omp critical (progress)
this->setProgress(++progress);
}
this->endProgress();
#ifdef _OPENMP
#ifdef MT_ENABLE_NESTED_OPENMP
if (threads_outer_loop_ > -1)
{
omp_set_num_threads(total_nr_threads); // set number of available threads back to initial value
}
#endif
#endif
}
void OpenSwathWorkflow::writeOutFeaturesAndChroms_(
std::vector< OpenMS::MSChromatogram > & chromatograms,
std::vector< MSChromatogram >& ms1_chromatograms,
const FeatureMap & featureFile,
FeatureMap& out_featureFile,
bool store_features,
Interfaces::IMSDataConsumer * chromConsumer)
{
// write out MS1 chromatograms to output if so desired
for (Size j = 0; j < ms1_chromatograms.size(); j++)
{
if (ms1_chromatograms[j].empty()) continue; // skip empty chromatograms
// write MS1 chromatograms to disk
chromConsumer->consumeChromatogram( ms1_chromatograms[j] );
}
// write chromatograms to output if so desired
for (Size chrom_idx = 0; chrom_idx < chromatograms.size(); ++chrom_idx)
{
if (!chromatograms[chrom_idx].empty())
{
chromConsumer->consumeChromatogram(chromatograms[chrom_idx]);
}
}
// write features to output if so desired
if (store_features)
{
for (FeatureMap::const_iterator feature_it = featureFile.begin();
feature_it != featureFile.end(); ++feature_it)
{
out_featureFile.push_back(*feature_it);
}
for (std::vector<ProteinIdentification>::const_iterator protid_it =
featureFile.getProteinIdentifications().begin();
protid_it != featureFile.getProteinIdentifications().end();
++protid_it)
{
out_featureFile.getProteinIdentifications().push_back(*protid_it);
}
}
}
void OpenSwathWorkflowBase::MS1Extraction_(const OpenSwath::SpectrumAccessPtr& ms1_map,
const std::vector< OpenSwath::SwathMap > & /* swath_maps */,
std::vector< MSChromatogram >& ms1_chromatograms,
const ChromExtractParams& cp,
const OpenSwath::LightTargetedExperiment& transition_exp,
const TransformationDescription& trafo_inverse,
bool /* ms1_only */,
int ms1_isotopes)
{
std::vector< OpenSwath::ChromatogramPtr > chrom_list;
std::vector< ChromatogramExtractor::ExtractionCoordinates > coordinates;
OpenSwath::LightTargetedExperiment transition_exp_used = transition_exp; // copy for const correctness
ChromatogramExtractor extractor;
// prepare the extraction coordinates and extract chromatogram
prepareExtractionCoordinates_(chrom_list, coordinates, transition_exp_used, trafo_inverse, cp, true, ms1_isotopes);
extractor.extractChromatograms(ms1_map, chrom_list, coordinates, cp.mz_extraction_window,
cp.ppm, cp.im_extraction_window, cp.extraction_function);
extractor.return_chromatogram(chrom_list, coordinates, transition_exp_used,
SpectrumSettings(), ms1_chromatograms, true, cp.im_extraction_window);
}
void OpenSwathWorkflow::scoreAllChromatograms_(
const std::vector< OpenMS::MSChromatogram > & ms2_chromatograms,
const std::vector< OpenMS::MSChromatogram > & ms1_chromatograms,
const std::vector< OpenSwath::SwathMap >& swath_maps,
const OpenSwath::LightTargetedExperiment& transition_exp,
const Param& feature_finder_param,
const TransformationDescription& trafo,
const double rt_extraction_window,
FeatureMap& output,
OpenSwathOSWWriter & osw_writer,
int nr_ms1_isotopes,
bool ms1only) const
{
TransformationDescription trafo_inv = trafo;
trafo_inv.invert();
MRMFeatureFinderScoring featureFinder;
MRMTransitionGroupPicker trgroup_picker;
// To ensure multi-threading safe access to the individual spectra, we
// need to use a light clone of the spectrum access (if multiple threads
// share a single filestream and call seek on it, chaos will ensue).
if (use_ms1_traces_ && ms1_map_)
{
OpenSwath::SpectrumAccessPtr threadsafe_ms1 = ms1_map_->lightClone();
featureFinder.setMS1Map( threadsafe_ms1 );
}
else if (use_ms1_traces_ && !ms1_map_)
{
OPENMS_LOG_WARN << "WARNING: Attempted to use MS1 traces but no MS1 map was provided: Will not use MS1 signal!\n";
}
// If use_total_mi_score is defined, we need to instruct MRMTransitionGroupPicker to compute the score
Param trgroup_picker_param = feature_finder_param.copy("TransitionGroupPicker:", true);
if ((bool)feature_finder_param.getValue("Scores:use_total_mi_score").toBool())
{
trgroup_picker_param.setValue("compute_total_mi", "true");
}
trgroup_picker.setParameters(trgroup_picker_param);
featureFinder.setParameters(feature_finder_param);
featureFinder.prepareProteinPeptideMaps_(transition_exp);
// Map ms1 chromatogram id to sequence number (unordered for O(1) lookup)
std::unordered_map<String, int> ms1_chromatogram_map;
ms1_chromatogram_map.reserve(ms1_chromatograms.size());
for (Size i = 0; i < ms1_chromatograms.size(); i++)
{
ms1_chromatogram_map[ms1_chromatograms[i].getNativeID()] = boost::numeric_cast<int>(i);
}
// Map chromatogram id to sequence number (unordered for O(1) lookup)
std::unordered_map<String, int> chromatogram_map;
chromatogram_map.reserve(ms2_chromatograms.size());
for (Size i = 0; i < ms2_chromatograms.size(); i++)
{
chromatogram_map[ms2_chromatograms[i].getNativeID()] = boost::numeric_cast<int>(i);
}
// Map peptide id to sequence number (unordered for O(1) lookup)
std::unordered_map<String, int> assay_peptide_map;
assay_peptide_map.reserve(transition_exp.getCompounds().size());
for (Size i = 0; i < transition_exp.getCompounds().size(); i++)
{
assay_peptide_map[transition_exp.getCompounds()[i].id] = boost::numeric_cast<int>(i);
}
// Map peptide id to corresponding transitions
typedef std::map<String, std::vector< const TransitionType* > > AssayMapT;
AssayMapT assay_map;
// create an entry for each member (ensure there is one even if we don't
// have any transitions for it, e.g. in the case of ms1 only)
for (Size i = 0; i < transition_exp.getCompounds().size(); i++)
{
assay_map[transition_exp.getCompounds()[i].id] = std::vector< const TransitionType* >();
}
for (Size i = 0; i < transition_exp.getTransitions().size(); i++)
{
assay_map[transition_exp.getTransitions()[i].getPeptideRef()].push_back(&transition_exp.getTransitions()[i]);
}
std::vector<String> to_osw_output;
///////////////////////////////////
// Start of main function
// Iterating over all the assays
///////////////////////////////////
for (AssayMapT::iterator assay_it = assay_map.begin(); assay_it != assay_map.end(); ++assay_it)
{
// Create new MRMTransitionGroup
String id = assay_it->first;
MRMTransitionGroupType transition_group;
transition_group.setTransitionGroupID(id);
double expected_rt = transition_exp.getCompounds()[ assay_peptide_map[id] ].rt;
// 1. Go through all transitions, for each transition get
// the chromatogram and the assay to the MRMTransitionGroup
const TransitionType* detection_assay_it = nullptr; // store last detecting transition
for (const TransitionType* transition : assay_it->second)
{
if (transition->isDetectingTransition())
{
detection_assay_it = transition;
}
// continue if we only have MS1 (we wont have any chromatograms for
// the transitions)
if (ms1only) {continue;}
if (chromatogram_map.find(transition->getNativeID()) == chromatogram_map.end())
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Error, did not find chromatogram for transition " + transition->getNativeID() );
}
// Convert chromatogram to MSChromatogram and filter
auto chromatogram = ms2_chromatograms[ chromatogram_map[transition->getNativeID()] ];
chromatogram.setNativeID(transition->getNativeID());
if (rt_extraction_window > 0)
{
double de_normalized_experimental_rt = trafo_inv.apply(expected_rt);
double rt_max = de_normalized_experimental_rt + rt_extraction_window;
double rt_min = de_normalized_experimental_rt - rt_extraction_window;
chromatogram.erase(std::remove_if(chromatogram.begin(), chromatogram.end(),
[rt_min, rt_max](const ChromatogramPeak& chr)
{ return chr.getRT() > rt_max || chr.getRT() < rt_min; })
, chromatogram.end());
}
// Add the transition and the chromatogram to the MRMTransitionGroup
transition_group.addTransition(*transition, transition->getNativeID());
transition_group.addChromatogram(chromatogram, chromatogram.getNativeID());
}
// currently .osw and .featureXML are mutually exclusive
if (osw_writer.isActive()) { output.clear(); }
// 2. Set the MS1 chromatograms for the different isotopes, if available
// (note that for 3 isotopes, we include the monoisotopic peak plus three
// isotopic traces)
for (int iso = 0; iso <= nr_ms1_isotopes; iso++)
{
String prec_id = OpenSwathHelper::computePrecursorId(transition_group.getTransitionGroupID(), iso);
if (!ms1_chromatograms.empty() && ms1_chromatogram_map.find(prec_id) != ms1_chromatogram_map.end())
{
MSChromatogram chromatogram = ms1_chromatograms[ ms1_chromatogram_map[prec_id] ];
transition_group.addPrecursorChromatogram(chromatogram, chromatogram.getNativeID());
}
}
// 3. / 4. Process the MRMTransitionGroup: find peakgroups and score them
trgroup_picker.pickTransitionGroup(transition_group);
featureFinder.scorePeakgroups(transition_group, trafo, swath_maps, output, ms1only);
// Ensure that a detection transition is used to derive features for output
if (detection_assay_it == nullptr && !output.empty())
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Error, did not find any detection transition for feature " + id );
}
// 5. Add to the output osw if given
if (osw_writer.isActive() && !output.empty()) // implies that detection_assay_it was set
{
const OpenSwath::LightCompound pep;
to_osw_output.push_back(osw_writer.prepareLine(OpenSwath::LightCompound(), // not used currently: transition_exp.getCompounds()[ assay_peptide_map[id] ],
nullptr, // not used currently: detection_assay_it,
output,
id));
}
}
// Only write at the very end since this is a step that needs a barrier
if (osw_writer.isActive())
{
#ifdef _OPENMP
#pragma omp critical (osw_write_tsv)
#endif
{
osw_writer.writeLines(to_osw_output);
}
}
}
void OpenSwathWorkflow::selectCompoundsForBatch_(const OpenSwath::LightTargetedExperiment& transition_exp_used_all,
OpenSwath::LightTargetedExperiment& transition_exp_used, int batch_size, size_t j)
{
// compute batch start/end
size_t start = j * batch_size;
size_t end = j * batch_size + batch_size;
if (end > transition_exp_used_all.compounds.size())
{
end = transition_exp_used_all.compounds.size();
}
// Create the new, batch-size transition experiment
transition_exp_used.proteins = transition_exp_used_all.proteins;
transition_exp_used.compounds.insert(transition_exp_used.compounds.end(),
transition_exp_used_all.compounds.begin() + start, transition_exp_used_all.compounds.begin() + end);
copyBatchTransitions_(transition_exp_used.compounds, transition_exp_used_all.transitions, transition_exp_used.transitions);
}
void OpenSwathWorkflow::copyBatchTransitions_(const std::vector<OpenSwath::LightCompound>& used_compounds,
const std::vector<OpenSwath::LightTransition>& all_transitions,
std::vector<OpenSwath::LightTransition>& output)
{
std::set<std::string> selected_compounds;
for (Size i = 0; i < used_compounds.size(); i++)
{
selected_compounds.insert(used_compounds[i].id);
}
for (Size i = 0; i < all_transitions.size(); i++)
{
if (selected_compounds.find(all_transitions[i].peptide_ref) != selected_compounds.end())
{
output.push_back(all_transitions[i]);
}
}
}
void OpenSwathWorkflowBase::prepareExtractionCoordinates_(std::vector< OpenSwath::ChromatogramPtr > & chrom_list,
std::vector< ChromatogramExtractorAlgorithm::ExtractionCoordinates > & coordinates,
const OpenSwath::LightTargetedExperiment & transition_exp_used,
const TransformationDescription& trafo_inverse,
const ChromExtractParams & cp,
const bool ms1,
const int ms1_isotopes) const
{
if (cp.rt_extraction_window < 0)
{
ChromatogramExtractor::prepare_coordinates(chrom_list, coordinates, transition_exp_used, cp.rt_extraction_window, ms1, ms1_isotopes);
}
else
{
// Use an rt extraction window of 0.0 which will just write the retention time in start / end positions
// Then correct the start/end positions and add the extra_rt_extract parameter
ChromatogramExtractor::prepare_coordinates(chrom_list, coordinates, transition_exp_used, 0.0, ms1, ms1_isotopes);
for (std::vector< ChromatogramExtractor::ExtractionCoordinates >::iterator it = coordinates.begin(); it != coordinates.end(); ++it)
{
it->rt_start = trafo_inverse.apply(it->rt_start) - (cp.rt_extraction_window + cp.extra_rt_extract)/ 2.0;
it->rt_end = trafo_inverse.apply(it->rt_end) + (cp.rt_extraction_window + cp.extra_rt_extract)/ 2.0;
}
}
}
} | C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/MRMFeatureSelector.cpp | .cpp | 13,845 | 380 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Douglas McCloskey, Pasquale Domenico Colaianni, Svetlana Kutuzova $
// $Authors: Douglas McCloskey, Pasquale Domenico Colaianni, Svetlana Kutuzova $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/MRMFeatureSelector.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/DATASTRUCTURES/LPWrapper.h>
#include <OpenMS/KERNEL/FeatureMap.h>
#include <algorithm>
#include <set>
namespace OpenMS
{
Int MRMFeatureSelector::addVariable_(
LPWrapper& problem,
const String& name,
const bool bounded,
const double obj,
const VariableType variableType
) const
{
const Int index = problem.addColumn();
if (bounded)
{
problem.setColumnBounds(index, 0, 1, LPWrapper::DOUBLE_BOUNDED);
}
else
{
problem.setColumnBounds(index, 0, 1, LPWrapper::UNBOUNDED);
}
problem.setColumnName(index, name);
if (variableType == VariableType::INTEGER)
{
problem.setColumnType(index, LPWrapper::INTEGER);
}
else if (variableType == VariableType::CONTINUOUS)
{
problem.setColumnType(index, LPWrapper::CONTINUOUS);
}
else
{
throw std::runtime_error("Variable type not supported\n");
}
problem.setObjective(index, obj);
return index;
}
void MRMFeatureSelector::addConstraint_(
LPWrapper& problem,
const std::vector<Int>& indices,
const std::vector<double>& values,
const String& name,
const double lb,
const double ub,
const LPWrapper::Type param
) const
{
problem.addRow(indices, values, name, lb, ub, param);
}
void MRMFeatureSelectorScore::optimize(
const std::vector<std::pair<double, String>>& time_to_name,
const std::map<String, std::vector<Feature>>& feature_name_map,
std::vector<String>& result,
const SelectorParameters& parameters
) const
{
result.clear();
std::set<String> variables;
LPWrapper problem;
problem.setObjectiveSense(LPWrapper::MIN);
for (const std::pair<double, String>& elem : time_to_name)
{
std::vector<Int> constraints;
for (const Feature& feature : feature_name_map.at(elem.second))
{
const String name1 = elem.second + "_" + String(feature.getUniqueId());
if (variables.count(name1) == 0)
{
const double score = computeScore_(feature, parameters.score_weights);
const Int col_idx = addVariable_(problem, name1, true, score, parameters.variable_type);
constraints.push_back(col_idx);
variables.insert(name1);
}
}
std::vector<double> constraints_values(constraints.size(), 1.0);
addConstraint_(problem, constraints, constraints_values, elem.second + "_constraint", 1.0, 1.0, LPWrapper::DOUBLE_BOUNDED);
}
LPWrapper::SolverParam param;
problem.solve(param);
for (Int c = 0; c < problem.getNumberOfColumns(); ++c)
{
if (problem.getColumnValue(c) >= parameters.optimal_threshold)
{
result.push_back(problem.getColumnName(c));
}
}
}
String MRMFeatureSelector::removeSpaces_(String str) const
{
String::iterator end_pos = std::remove(str.begin(), str.end(), ' ');
str.erase(end_pos, str.end());
return str;
}
void MRMFeatureSelectorQMIP::optimize(
const std::vector<std::pair<double, String>>& time_to_name,
const std::map<String, std::vector<Feature>>& feature_name_map,
std::vector<String>& result,
const SelectorParameters& parameters
) const
{
result.clear();
std::set<String> variables;
LPWrapper problem;
problem.setObjectiveSense(LPWrapper::MIN);
for (Int cnt1 = 0; static_cast<Size>(cnt1) < time_to_name.size(); ++cnt1)
{
const Size start_iter = std::max(cnt1 - parameters.nn_threshold, 0);
const Size stop_iter = std::min(static_cast<Size>(cnt1 + parameters.nn_threshold + 1), time_to_name.size()); // assuming nn_threshold >= -1
std::vector<Int> constraints;
const std::vector<Feature>& feature_row1 = feature_name_map.at(time_to_name[cnt1].second);
for (Size i = 0; i < feature_row1.size(); ++i)
{
const String name1 = time_to_name[cnt1].second + "_" + String(feature_row1[i].getUniqueId());
if (variables.count(name1) == 0)
{
constraints.push_back(addVariable_(problem, name1, true, 0, parameters.variable_type));
variables.insert(name1);
}
else
{
constraints.push_back(problem.getColumnIndex(name1));
}
double score_1 = computeScore_(feature_row1[i], parameters.score_weights);
const Size n_score_weights = parameters.score_weights.size();
if (n_score_weights > 1)
{
score_1 = std::pow(score_1, 1.0 / n_score_weights);
}
const Int index1 = problem.getColumnIndex(name1);
for (Size cnt2 = start_iter; cnt2 < stop_iter; ++cnt2)
{
if (static_cast<Size>(cnt1) == cnt2)
{
continue;
}
const std::vector<Feature>& feature_row2 = feature_name_map.at(time_to_name[cnt2].second);
const double locality_weight = parameters.locality_weight
? 1.0 / (parameters.nn_threshold - std::abs(static_cast<Int>(start_iter + cnt2) - cnt1) + 1)
: 1.0;
const double tr_delta_expected = time_to_name[cnt1].first - time_to_name[cnt2].first;
for (Size j = 0; j < feature_row2.size(); ++j)
{
const String name2 = time_to_name[cnt2].second + "_" + String(feature_row2[j].getUniqueId());
if (variables.count(name2) == 0)
{
addVariable_(problem, name2, true, 0, parameters.variable_type);
variables.insert(name2);
}
const String var_qp_name = time_to_name[cnt1].second + "_" + String(i) + "-" + time_to_name[cnt2].second + "_" + String(j);
const Int index_var_qp = addVariable_(problem, var_qp_name, true, 0, VariableType::CONTINUOUS);
const Int index_var_abs = addVariable_(problem, var_qp_name + "-ABS", false, 1, VariableType::CONTINUOUS);
const Int index2 = problem.getColumnIndex(name2);
double score_2 = computeScore_(feature_row2[j], parameters.score_weights);
if (n_score_weights > 1)
{
score_2 = std::pow(score_2, 1.0 / n_score_weights);
}
const double tr_delta = feature_row1[i].getRT() - feature_row2[j].getRT();
const double score = locality_weight * score_1 * score_2 * (tr_delta - tr_delta_expected);
addConstraint_(problem, {index1, index_var_qp}, {1.0, -1.0}, var_qp_name + "-QP1", 0.0, 1.0, LPWrapper::LOWER_BOUND_ONLY);
addConstraint_(problem, {index2, index_var_qp}, {1.0, -1.0}, var_qp_name + "-QP2", 0.0, 1.0, LPWrapper::LOWER_BOUND_ONLY);
addConstraint_(problem, {index1, index2, index_var_qp}, {1.0, 1.0, -1.0}, var_qp_name + "-QP3", 0.0, 1.0, LPWrapper::UPPER_BOUND_ONLY);
std::vector<Int> indices_abs = {index_var_abs, index_var_qp};
addConstraint_(problem, indices_abs, {-1.0, score}, var_qp_name + "-obj+", -1.0, 0.0, LPWrapper::UPPER_BOUND_ONLY);
addConstraint_(problem, indices_abs, {-1.0, -score}, var_qp_name + "-obj-", -1.0, 0.0, LPWrapper::UPPER_BOUND_ONLY);
}
}
}
std::vector<double> constraints_values(constraints.size(), 1.0);
addConstraint_(problem, constraints, constraints_values, time_to_name[cnt1].second + "_constraint", 1.0, 1.0, LPWrapper::DOUBLE_BOUNDED);
// addConstraint_(problem, constraints, constraints_values, time_to_name[cnt1].second + "_constraint", 1.0, 1.0, LPWrapper::FIXED); // glpk
}
LPWrapper::SolverParam param;
problem.solve(param);
for (Int c = 0; c < problem.getNumberOfColumns(); ++c)
{
const String name = problem.getColumnName(c);
if (problem.getColumnValue(c) > parameters.optimal_threshold && variables.count(name))
{
result.push_back(name);
}
}
}
void MRMFeatureSelector::constructTargTransList_(
const FeatureMap& features,
std::vector<std::pair<double, String>>& time_to_name,
std::map<String, std::vector<Feature>>& feature_name_map,
const bool select_transition_group
) const
{
time_to_name.clear();
feature_name_map.clear();
std::set<String> names;
for (const Feature& feature : features)
{
const String component_group_name = removeSpaces_(feature.getMetaValue("PeptideRef").toString());
const double assay_retention_time = feature.getMetaValue("assay_rt");
if (names.count(component_group_name) == 0)
{
time_to_name.emplace_back(assay_retention_time, component_group_name);
names.insert(component_group_name);
}
if (feature_name_map.count(component_group_name) == 0)
{
feature_name_map[component_group_name] = std::vector<Feature>();
}
feature_name_map[component_group_name].push_back(feature);
if (select_transition_group)
{
continue;
}
for (const Feature& subordinate : feature.getSubordinates())
{
const String component_name = removeSpaces_(subordinate.getMetaValue("native_id").toString());
if (names.count(component_name))
{
time_to_name.emplace_back(assay_retention_time, component_name);
names.insert(component_name);
}
if (feature_name_map.count(component_name) == 0)
{
feature_name_map[component_name] = std::vector<Feature>();
}
feature_name_map[component_name].push_back(subordinate);
}
}
}
void MRMFeatureSelector::selectMRMFeature(
const FeatureMap& features,
FeatureMap& selected_filtered,
const SelectorParameters& parameters
) const
{
selected_filtered.clear();
if (features.empty())
{
return;
}
std::vector<std::pair<double, String>> time_to_name;
std::map<String, std::vector<Feature>> feature_name_map;
constructTargTransList_(features, time_to_name, feature_name_map, parameters.select_transition_group);
sort(time_to_name.begin(), time_to_name.end());
Int window_length = parameters.segment_window_length;
Int step_length = parameters.segment_step_length;
if (window_length == -1 && step_length == -1)
{
window_length = step_length = time_to_name.size();
}
Size n_segments = time_to_name.size() / step_length;
if (time_to_name.size() % step_length)
{
++n_segments;
}
std::vector<String> result_names;
for (Size i = 0; i < n_segments; ++i)
{
const Size start = step_length * i;
const Size end = std::min(start + window_length, time_to_name.size());
const std::vector<std::pair<double, String>> time_slice(time_to_name.begin() + start, time_to_name.begin() + end);
std::vector<String> result;
optimize(time_slice, feature_name_map, result, parameters);
result_names.insert(result_names.end(), result.begin(), result.end());
}
const std::set<String> result_names_set(result_names.begin(), result_names.end());
for (const Feature& feature : features)
{
std::vector<Feature> subordinates_filtered;
for (const Feature& subordinate : feature.getSubordinates())
{
const String feature_name = parameters.select_transition_group
? removeSpaces_(feature.getMetaValue("PeptideRef").toString()) + "_" + String(feature.getUniqueId())
: removeSpaces_(subordinate.getMetaValue("native_id").toString()) + "_" + String(feature.getUniqueId());
if (result_names_set.count(feature_name))
{
subordinates_filtered.push_back(subordinate);
}
}
if (!subordinates_filtered.empty())
{
Feature feature_filtered(feature);
feature_filtered.setSubordinates(subordinates_filtered);
selected_filtered.push_back(feature_filtered);
}
}
}
double MRMFeatureSelector::computeScore_(const Feature& feature, const std::map<String, MRMFeatureSelector::LambdaScore>& score_weights) const
{
double score_1 = 1.0;
for (const std::pair<const String, LambdaScore>& score_weight : score_weights)
{
const String& metavalue_name = score_weight.first;
const LambdaScore lambda_score = score_weight.second;
if (!feature.metaValueExists(metavalue_name))
{
OPENMS_LOG_WARN << "computeScore_(): Metavalue \"" << metavalue_name << "\" not found.\n";
continue;
}
const double value = weightScore_(feature.getMetaValue(metavalue_name), lambda_score);
if (value > 0.0 && !std::isnan(value) && !std::isinf(value))
{
score_1 *= value;
}
}
return score_1;
}
double MRMFeatureSelector::weightScore_(const double score, const LambdaScore lambda_score) const
{
if (lambda_score == LambdaScore::LINEAR)
{
return score;
}
else if (lambda_score == LambdaScore::INVERSE)
{
return 1.0 / score;
}
else if (lambda_score == LambdaScore::LOG)
{
return std::log(score);
}
else if (lambda_score == LambdaScore::INVERSE_LOG)
{
return 1.0 / std::log(score);
}
else if (lambda_score == LambdaScore::INVERSE_LOG10)
{
return 1.0 / std::log10(score);
}
else
{
throw Exception::IllegalArgument(__FILE__, __LINE__, __FUNCTION__,
"`lambda_score`'s value is not handled by any current condition.");
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/MasstraceCorrelator.cpp | .cpp | 11,068 | 321 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/MasstraceCorrelator.h>
#include <OpenMS/ANALYSIS/OPENSWATH/MRMScoring.h>
#include <OpenMS/OPENSWATHALGO/ALGO/Scoring.h>
#include <OpenMS/PROCESSING/NOISEESTIMATION/SignalToNoiseEstimatorMedian.h>
#include <OpenMS/MATH/StatisticFunctions.h>
// #define DEBUG_MASSTRACES
// #include <assert.h>
bool SortDoubleDoublePairFirst(const std::pair<double, double>& left,
const std::pair<double, double>& right);
namespace OpenMS
{
using namespace std;
using namespace OpenSwath;
MasstraceCorrelator::MasstraceCorrelator()
: DefaultParamHandler("MRMFeatureFinderScoring"),
ProgressLogger()
{
defaults_.setValue("sgolay_frame_length",15,"The number of subsequent data points used for smoothing.\nThis number has to be uneven. If it is not, 1 will be added.");
defaults_.setValue("sgolay_polynomial_order",3,"Order or the polynomial that is fitted.");
defaults_.setValue("gauss_width",50,"Gaussian width.");
// write defaults into Param object param_
defaultsToParam_();
}
MasstraceCorrelator::~MasstraceCorrelator() = default;
void MasstraceCorrelator::matchMassTraces_(
const MasstracePointsType& hull_points1,
const MasstracePointsType& hull_points2,
std::vector<double>& vec1, std::vector<double>& vec2, double mindiff, double padEnds)
{
Size k=0,m=0;
// If we do not pad the ends, we advance the longer array until the shorter one starts
if (!padEnds)
{
while (k<hull_points1.size() && m<hull_points2.size() )
{
if (fabs(hull_points1[k].first - hull_points2[m].first) < mindiff)
{
break;
}
else if (hull_points1[k].first > hull_points2[m].first )
{
m++;
}
else if (hull_points1[k].first < hull_points2[m].first )
{
k++;
}
}
}
while (k<hull_points1.size() && m<hull_points2.size() )
{
if (fabs(hull_points1[k].first - hull_points2[m].first) < mindiff)
{
vec1.push_back(hull_points1[k].second);
vec2.push_back(hull_points2[m].second);
m++; k++;
}
else if (hull_points1[k].first > hull_points2[m].first )
{
//need to advance m, assume that vector 1 is zero
vec1.push_back(0);
vec2.push_back(hull_points2[m].second);
m++;
}
else if (hull_points1[k].first < hull_points2[m].first )
{
//need to advance k, assume that vector 2 is zero
vec1.push_back(hull_points1[k].second);
vec2.push_back(0);
k++;
}
else
{
cout << "Error, cannot be here" << endl;
}
}
// If we do not pad the ends, we can return now
if (!padEnds) {return;}
// If one vector is not at the end, we need to copy the rest and fill up with
// zeros in the other.
while (k<hull_points1.size())
{
vec1.push_back(hull_points1[k].second);
vec2.push_back(0);
k++;
}
while (m<hull_points2.size())
{
vec1.push_back(0);
vec2.push_back(hull_points2[m].second);
m++;
}
}
void MasstraceCorrelator::scoreHullpoints(const MasstracePointsType& hull_points1, const MasstracePointsType& hull_points2,
int& lag, double& lag_intensity, double& pearson_score,
const double min_corr, const int /* max_lag */, const double mindiff)
{
std::vector<double> vec1;
std::vector<double> vec2;
matchMassTraces_(hull_points1, hull_points2, vec1, vec2, mindiff);
pearson_score = Math::pearsonCorrelationCoefficient(vec1.begin(), vec1.end(), vec2.begin(), vec2.end() );
// If the correlation is below the minimum level, we can already return at this point
if (pearson_score <= min_corr)
{
return;
}
Scoring::XCorrArrayType xcorr_array = Scoring::normalizedCrossCorrelation(vec1, vec2, vec1.size(), 1);
Scoring::XCorrArrayType::const_iterator pt = Scoring::xcorrArrayGetMaxPeak(xcorr_array);
lag = pt->first; // the lag / RT at the maximal Xcorr value =~ coelution score
lag_intensity = pt->second; // the intensity at the maximal Xcorr value =~ shape score
}
void MasstraceCorrelator::createConsensusMapCache(const ConsensusMap& map,
std::vector< MasstracePointsType >& feature_points,
std::vector< std::pair<double,double> >& max_intensities,
std::vector< double >& rt_cache)
{
startProgress(0, map.size(), "create consensus map cache");
for (Size i = 0; i < map.size(); ++i)
{
setProgress(i);
const ConsensusFeature::HandleSetType* f1_features = &map[i].getFeatures();
// get the points into a vector of pairs (RT, intensity)
MasstracePointsType f1_points;
for (ConsensusFeature::HandleSetType::iterator it = f1_features->begin(); it != f1_features->end(); ++it)
{
f1_points.push_back(std::make_pair(it->getRT(), it->getIntensity()));
}
std::sort(f1_points.begin(), f1_points.end(), SortDoubleDoublePairFirst);
feature_points.push_back(f1_points);
// find maximum intensity and store it
double max_int = 0, max_mz =0;
for (ConsensusFeature::HandleSetType::iterator it = f1_features->begin(); it != f1_features->end(); ++it)
{
if (it->getIntensity() > max_int)
{
max_int = it->getIntensity();
max_mz = it->getMZ();
}
}
max_intensities.emplace_back(max_mz, max_int);
rt_cache.push_back(map[i].getRT());
}
endProgress();
}
void MasstraceCorrelator::createPseudoSpectra(const ConsensusMap& map,
MSExperiment& pseudo_spectra,
Size min_peak_nr, double min_correlation,
int max_lag, double max_rt_apex_difference)
{
// Parameters
// double min_correlation = 0.7;
// double max_lag = 1;
// double max_rt_apex_difference = 3;
// //double max_rt_apex_difference = 5000;
#ifdef DEBUG_MASSTRACES
int opcounts = 0;
int comparisons = 0;
int nr_full_evals = 0;
int nr_peaks_added = 0;
#endif
Size j;
double firstpoint, lastpoint, current_rt;
double* rt_cache_ptr;
int lag; double lag_intensity; double pearson_score;
// cache datastructures
std::vector< MasstracePointsType > feature_points;
std::vector< std::pair<double,double> > max_intensities;
std::vector< double > rt_cache;
createConsensusMapCache(map, feature_points, max_intensities, rt_cache);
std::map<int, int> used_already;
// go through all consensus features in the map and use
startProgress(0, map.size(), "correlating masstraces ");
for (Size i = 0; i < map.size(); ++i)
{
setProgress(i);
if (used_already.find(i) != used_already.end())
{
continue;
}
used_already[i] = 0;
// Prepare a new pseudo spectrum
MSSpectrum spectrum;
spectrum.getFloatDataArrays().clear();
spectrum.getFloatDataArrays().resize(5);
spectrum.getFloatDataArrays()[0].setName("RT_apex");
spectrum.getFloatDataArrays()[1].setName("RT_diff");
spectrum.getFloatDataArrays()[2].setName("lag");
spectrum.getFloatDataArrays()[3].setName("pearson_score");
spectrum.getFloatDataArrays()[4].setName("lag_intensity");
spectrum.setRT(rt_cache[i]);
spectrum.setMSLevel(2);
// create the first peak of this spectrum == seed peak
Peak1D peak;
peak.setMZ(max_intensities[i].first);
peak.setIntensity(max_intensities[i].second);
spectrum.push_back(peak);
// store the RT of the current feature and the first/last points of this feature
firstpoint = feature_points[i].front().first;
lastpoint = feature_points[i].back().first;
current_rt = rt_cache[i];
rt_cache_ptr = &rt_cache[i];
// go through all features with lower intensity in the map
for (j = i+1; j < map.size(); ++j)
{
// If the center of this trace is outside the masstrace of the parent, ignore this pair.
// If the difference between the rt_max of the two features is too large, ignore this pair.
++rt_cache_ptr;
if ( fabs( (*rt_cache_ptr) - current_rt ) > max_rt_apex_difference) {continue;}
if ( (*rt_cache_ptr) < firstpoint || (*rt_cache_ptr) > lastpoint ) {continue;}
// We score the two vectors against each other in terms of several properties / scores
scoreHullpoints(feature_points[i], feature_points[j], lag, lag_intensity, pearson_score, min_correlation, max_lag);
#ifdef DEBUG_MASSTRACES
cout << j << ". Checking mass trace at RT: "<< map[j].getRT() << " m/z: " << map[j].getMZ()
<< " scores: [lag: " << lag << "] / [pearson: " << pearson_score << "]"<< endl;
comparisons += 1;
opcounts += feature_points[i].size() * feature_points[j].size();
if (pearson_score > min_correlation)
{
nr_full_evals++;
}
#endif
// If all conditions are fulfilled, we add this feature as a peak. Note
// that we need to check the pearson_score FIRST because the lag score is
// only calculated if the pearson score is above the minimal value
if (pearson_score > min_correlation && lag >= -max_lag && lag <= max_lag)
{
// mark this masstrace as used already, thus we cannot use it as a seed any more
used_already[j] = 0;
#ifdef DEBUG_MASSTRACES
nr_peaks_added++;
#endif
Peak1D tmp_peak;
tmp_peak.setMZ(max_intensities[j].first);
tmp_peak.setIntensity(max_intensities[j].second);
spectrum.push_back(tmp_peak);
spectrum.getFloatDataArrays()[0].push_back(map[j].getRT());
spectrum.getFloatDataArrays()[1].push_back(fabs(map[i].getRT() - map[j].getRT()));
spectrum.getFloatDataArrays()[2].push_back(lag);
spectrum.getFloatDataArrays()[3].push_back(pearson_score);
spectrum.getFloatDataArrays()[4].push_back(lag_intensity);
}
}
if (spectrum.size() > min_peak_nr)
{
pseudo_spectra.addSpectrum(spectrum);
#ifdef DEBUG_MASSTRACES
cout << "Add spectrum " << i << " of size " << spectrum.size() << " at " << spectrum.getRT() << endl;
cout << "===========================================================================" << endl;
#endif
}
}
endProgress();
#ifdef DEBUG_MASSTRACES
cout << "Nr operations " << opcounts << " / nr comparisons " << comparisons << " / full evaluations " << nr_full_evals << " :: nr spectra " << pseudo_spectra.size() << endl;
cout << "Nr peaks added " << nr_peaks_added << " out of total " << map.size() << endl;
#endif
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/MRMAssay.cpp | .cpp | 88,219 | 2,209 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: George Rosenberger $
// $Authors: George Rosenberger $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/MRMAssay.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <boost/lexical_cast.hpp>
#include <map>
#include <regex>
#include <set>
#include <unordered_map>
#include <unordered_set>
using namespace std;
namespace OpenMS
{
namespace
{
/// Helper function to sort all FragmentSeqMaps in an IonMapT by m/z
/// This enables binary search in getMatchingPeptidoforms_
void sortIonMap(MRMAssay::IonMapT& ion_map)
{
for (auto& swath_entry : ion_map)
{
for (auto& seq_entry : swath_entry.second)
{
std::sort(seq_entry.second.begin(), seq_entry.second.end(),
[](const std::pair<double, std::string>& a, const std::pair<double, std::string>& b)
{ return a.first < b.first; });
}
}
}
/// Helper function to sort and deduplicate all IonSeries in a PeptideMapT
/// This avoids repeated sorting in generateTargetAssays_/generateDecoyAssays_
void sortAndDeduplicatePeptideMap(MRMAssay::PeptideMapT& peptide_map)
{
for (auto& entry : peptide_map)
{
auto& ions = entry.second;
std::sort(ions.begin(), ions.end());
ions.erase(std::unique(ions.begin(), ions.end()), ions.end());
}
}
} // anonymous namespace
MRMAssay::MRMAssay() = default;
MRMAssay::~MRMAssay() = default;
std::vector<std::string> MRMAssay::getMatchingPeptidoforms_(const double fragment_ion,
const FragmentSeqMap& ions,
const double mz_threshold)
{
std::vector<std::string> isoforms;
// ions must be sorted by m/z (first element of pair) for binary search to work
// Use lower_bound to find first element >= (fragment_ion - mz_threshold)
auto lower = std::lower_bound(ions.begin(), ions.end(), fragment_ion - mz_threshold,
[](const std::pair<double, std::string>& elem, double val) { return elem.first < val; });
// Iterate from lower bound until we exceed the threshold
for (auto it = lower; it != ions.end() && it->first <= fragment_ion + mz_threshold; ++it)
{
isoforms.push_back(it->second);
}
std::sort(isoforms.begin(), isoforms.end());
isoforms.erase(std::unique(isoforms.begin(), isoforms.end()), isoforms.end());
return isoforms;
}
int MRMAssay::getSwath_(const std::vector<std::pair<double, double> >& swathes, const double precursor_mz)
{
if (swathes.empty())
{
return -1;
}
// Binary search: find first swath with lower bound > precursor_mz
// Swaths are sorted by lower bound (first element)
auto it = std::upper_bound(swathes.begin(), swathes.end(), precursor_mz,
[](double mz, const std::pair<double, double>& s) { return mz < s.first; });
// Back up to find candidate swath(s) that might contain precursor_mz
// If precursor falls in overlap region, we want the LAST (upper) matching swath
int swath = -1;
// Check the swath before upper_bound result (if exists)
if (it != swathes.begin())
{
auto candidate = it - 1;
if (precursor_mz >= candidate->first && precursor_mz <= candidate->second)
{
swath = static_cast<int>(candidate - swathes.begin());
}
}
// Check if the upper_bound swath itself also contains precursor (overlap case)
// If so, prefer this one (higher index, matching original "last match" semantics)
if (it != swathes.end() && precursor_mz >= it->first && precursor_mz <= it->second)
{
swath = static_cast<int>(it - swathes.begin());
}
return swath;
}
bool MRMAssay::isInSwath_(const std::vector<std::pair<double, double> >& swathes, const double precursor_mz, const double product_mz)
{
int swath_idx = getSwath_(swathes, precursor_mz);
if (swath_idx == -1) { return true; } // remove all transitions that are not in swath range
const std::pair<double, double>& swath = swathes[swath_idx]; // Use cached index, avoid redundant call
return (product_mz >= swath.first && product_mz <= swath.second);
}
std::string MRMAssay::getRandomSequence_(size_t sequence_size, boost::variate_generator<boost::mt19937&, boost::uniform_int<> >
pseudoRNG)
{
std::string aa[] =
{
"A", "N", "D", "C", "E", "Q", "G", "H", "I", "L", "M", "F", "S", "T", "W",
"Y", "V"
};
size_t aa_size = 17;
std::string peptide_sequence = "";
for (size_t i = 0; i < sequence_size; ++i)
{
size_t pos = (pseudoRNG() % aa_size);
peptide_sequence += aa[pos];
}
return peptide_sequence;
}
std::vector<std::vector<size_t> > MRMAssay::nchoosekcombinations_(const std::vector<size_t>& n, size_t k)
{
std::vector<std::vector<size_t> > combinations;
std::string bitmask(k, 1);
bitmask.resize(n.size(), 0);
do
{
std::vector<size_t> combination;
for (size_t i = 0; i < n.size(); ++i)
{
if (bitmask[i])
{
combination.push_back(n[i]);
}
}
combinations.push_back(combination);
} while (std::prev_permutation(bitmask.begin(), bitmask.end()));
return combinations;
}
std::vector<OpenMS::AASequence> MRMAssay::addModificationsSequences_(const std::vector<OpenMS::AASequence>& sequences, const std::vector<std::vector<size_t> >& mods_combs, const OpenMS::String& modification)
{
std::vector<OpenMS::AASequence> modified_sequences;
bool multi_mod_switch = false;
bool skip_invalid_mod_seq = false;
OpenMS::ModificationsDB* ptr = ModificationsDB::getInstance();
std::set<const ResidueModification*> modifiable_nterm;
ptr->searchModifications(modifiable_nterm, modification, "", ResidueModification::N_TERM);
std::set<const ResidueModification*> modifiable_cterm;
ptr->searchModifications(modifiable_cterm, modification, "", ResidueModification::C_TERM);
for (std::vector<OpenMS::AASequence>::const_iterator sq_it = sequences.begin(); sq_it != sequences.end(); ++sq_it)
{
for (std::vector<std::vector<size_t> >::const_iterator mc_it = mods_combs.begin(); mc_it != mods_combs.end(); ++mc_it)
{
multi_mod_switch = false;
skip_invalid_mod_seq = false;
OpenMS::AASequence temp_sequence = *sq_it;
for (std::vector<size_t>::const_iterator pos_it = mc_it->begin(); pos_it != mc_it->end(); ++pos_it)
{
if (*pos_it == 0)
{
// Check first to make sure ending residue is NTerm modifiable
if ( !modifiable_nterm.empty() && (temp_sequence[0].getOneLetterCode() == OpenMS::String((*modifiable_nterm.begin())->getOrigin()) || (*modifiable_nterm.begin())->getOrigin() == 'X') )
{
temp_sequence.setNTerminalModification(modification);
}
else
{
OPENMS_LOG_DEBUG << "[addModificationsSequences_] Skipping addition of N-Term " << OpenMS::String((*modifiable_nterm.begin())->getId()) <<
" to last residue (" << temp_sequence[temp_sequence.size() - 1].getOneLetterCode() << ") of peptide " << temp_sequence.toUniModString() <<
" , because it does not match viable N-Term residue specificity (" <<
OpenMS::String((*modifiable_nterm.begin())->getOrigin()) << ") in ModificationDB.\n";
skip_invalid_mod_seq = true;
}
}
else if (*pos_it == temp_sequence.size() + 1)
{
// Check first to make sure ending residue is CTerm modifiable
if ( !modifiable_cterm.empty() && (temp_sequence.toUnmodifiedString().back() == (*modifiable_cterm.begin())->getOrigin() || (*modifiable_cterm.begin())->getOrigin() == 'X') )
{
temp_sequence.setCTerminalModification(modification);
}
else
{
OPENMS_LOG_DEBUG << "[addModificationsSequences_] Skipping addition of C-Term " << OpenMS::String((*modifiable_cterm.begin())->getId()) <<
" to last residue (" << temp_sequence.toUnmodifiedString().back() << ") of peptide " << temp_sequence.toUniModString() <<
" , because it does not match viable C-Term residue specificity (" <<
OpenMS::String((*modifiable_cterm.begin())->getOrigin()) << ") in ModificationDB.\n";
skip_invalid_mod_seq = true;
}
}
else
{
if (!temp_sequence[*pos_it - 1].isModified())
{
temp_sequence.setModification(*pos_it - 1, modification);
}
else
{
multi_mod_switch = true;
}
}
}
if (skip_invalid_mod_seq) { continue; }
if (!multi_mod_switch) { modified_sequences.push_back(temp_sequence); }
}
}
return modified_sequences;
}
std::vector<OpenMS::AASequence> MRMAssay::generateTheoreticalPeptidoforms_(const OpenMS::AASequence& sequence)
{
std::map<OpenMS::String, size_t> mods;
std::vector<OpenMS::AASequence> sequences = {AASequence::fromString(sequence.toUnmodifiedString())};
OpenMS::ModificationsDB* ptr = ModificationsDB::getInstance();
if (sequence.hasNTerminalModification())
{
mods[sequence.getNTerminalModificationName()] += 1;
}
if (sequence.hasCTerminalModification())
{
mods[sequence.getCTerminalModificationName()] += 1;
}
for (size_t i = 0; i < sequence.size(); ++i)
{
if (sequence[i].isModified())
{
mods[sequence.getResidue(i).getModificationName()] += 1;
}
}
// For each modification, create all (n choose k) theoretical peptidoforms
for (const auto& mod_it : mods)
{
std::vector<size_t> mods_res;
std::set<const ResidueModification*> modifiable_nterm;
ptr->searchModifications(modifiable_nterm, mod_it.first, "", ResidueModification::N_TERM);
if (!modifiable_nterm.empty())
{
mods_res.push_back(0);
}
std::set<const ResidueModification*> modifiable_cterm;
ptr->searchModifications(modifiable_cterm, mod_it.first, "", ResidueModification::C_TERM);
if (!modifiable_cterm.empty())
{
mods_res.push_back(sequence.size() + 1);
}
for (size_t i = 0; i < sequence.size(); ++i)
{
std::set<const ResidueModification*> modifiable_residues;
ptr->searchModifications(modifiable_residues, mod_it.first, sequence.getResidue(i).getOneLetterCode(), ResidueModification::ANYWHERE);
if (!modifiable_residues.empty())
{
mods_res.push_back(i + 1);
}
}
std::vector<std::vector<size_t> > mods_combs = nchoosekcombinations_(mods_res, mod_it.second);
sequences = addModificationsSequences_(sequences, mods_combs, mod_it.first);
}
return sequences;
}
std::vector<OpenMS::AASequence> MRMAssay::generateTheoreticalPeptidoformsDecoy_(const OpenMS::AASequence& sequence, const OpenMS::AASequence& decoy_sequence)
{
std::map<OpenMS::String, size_t> mods;
std::vector<OpenMS::AASequence> decoy_sequences;
decoy_sequences.push_back(AASequence::fromString(decoy_sequence.toUnmodifiedString()));
OpenMS::ModificationsDB* ptr = ModificationsDB::getInstance();
if (sequence.hasNTerminalModification())
{
mods[sequence.getNTerminalModificationName()] += 1;
}
if (sequence.hasCTerminalModification())
{
mods[sequence.getCTerminalModificationName()] += 1;
}
for (size_t i = 0; i < sequence.size(); ++i)
{
if (sequence[i].isModified())
{
mods[sequence.getResidue(i).getModificationName()] += 1;
}
}
for (const auto& mod_it : mods)
{
std::vector<size_t> mods_res;
std::set<const ResidueModification*> modifiable_nterm;
ptr->searchModifications(modifiable_nterm, mod_it.first, "", ResidueModification::N_TERM);
if (!modifiable_nterm.empty())
{
mods_res.push_back(0);
}
std::set<const ResidueModification*> modifiable_cterm;
ptr->searchModifications(modifiable_cterm, mod_it.first, "", ResidueModification::C_TERM);
if (!modifiable_cterm.empty())
{
mods_res.push_back(sequence.size() + 1);
}
for (size_t i = 0; i < sequence.size(); ++i)
{
std::set<const ResidueModification*> modifiable_residues;
ptr->searchModifications(modifiable_residues, mod_it.first, sequence.getResidue(i).getOneLetterCode(), ResidueModification::ANYWHERE);
if (!modifiable_residues.empty())
{
mods_res.push_back(i + 1);
}
}
std::vector<std::vector<size_t> > mods_combs = nchoosekcombinations_(mods_res, mod_it.second);
decoy_sequences = addModificationsSequences_(decoy_sequences, mods_combs, mod_it.first);
}
return decoy_sequences;
}
void MRMAssay::generateTargetInSilicoMap_(const OpenMS::TargetedExperiment& exp,
const std::vector<String>& fragment_types,
const std::vector<size_t>& fragment_charges,
bool enable_specific_losses,
bool enable_unspecific_losses,
bool enable_ms2_precursors,
const std::vector<std::pair<double, double> >& swathes,
int round_decPow,
size_t max_num_alternative_localizations,
SequenceMapT & TargetSequenceMap,
IonMapT & TargetIonMap,
PeptideMapT& TargetPeptideMap)
{
OpenMS::MRMIonSeries mrmis;
// Step 1: Generate target in silico peptide map containing theoretical transitions
Size progress = 0;
startProgress(0, exp.getPeptides().size(), "Generation of target in silico peptide map");
for (size_t i = 0; i < exp.getPeptides().size(); ++i)
{
setProgress(progress++);
TargetedExperiment::Peptide peptide = exp.getPeptides()[i];
OpenMS::AASequence peptide_sequence = TargetedExperimentHelper::getAASequence(peptide);
int precursor_charge = 1;
if (peptide.hasCharge())
{
precursor_charge = peptide.getChargeState();
}
double precursor_mz = peptide_sequence.getMZ(precursor_charge);
int precursor_swath = getSwath_(swathes, precursor_mz);
// Compute all alternative peptidoforms compatible with ModificationsDB
const vector<AASequence> alternative_peptide_sequences = generateTheoreticalPeptidoforms_(peptide_sequence);
// Some permutations might be too complex, skip if threshold is reached
if (alternative_peptide_sequences.size() > max_num_alternative_localizations)
{
OPENMS_LOG_DEBUG << "[uis] Peptide skipped (too many permutations possible): " << peptide.id << '\n';
continue;
}
// Iterate over all peptidoforms
for (const auto& alt_aa : alternative_peptide_sequences)
{
// Cache string representations to avoid repeated conversions
const String unmod_str = alt_aa.toUnmodifiedString();
const String mod_str = alt_aa.toString();
// Append peptidoform to index
TargetSequenceMap[precursor_swath][unmod_str].insert(mod_str);
// Generate theoretical ion series
auto ionseries = mrmis.getIonSeries(alt_aa, precursor_charge,
fragment_types, fragment_charges, enable_specific_losses,
enable_unspecific_losses);
if (enable_ms2_precursors)
{
// Add precursor to theoretical transitions
double prec_mz = Math::roundDecimal(precursor_mz, round_decPow);
TargetIonMap[precursor_swath][unmod_str].emplace_back(prec_mz, mod_str);
TargetPeptideMap[peptide.id].emplace_back("MS2_Precursor_i0", prec_mz);
}
// Iterate over all theoretical transitions
for (const auto& im_it : ionseries)
{
// Append transition to indices to find interfering transitions
double fragment_mz = Math::roundDecimal(im_it.second, round_decPow);
TargetIonMap[precursor_swath][unmod_str].emplace_back(fragment_mz, mod_str);
TargetPeptideMap[peptide.id].emplace_back(im_it.first, fragment_mz);
}
}
}
endProgress();
}
void MRMAssay::generateDecoySequences_(const SequenceMapT& TargetSequenceMap,
std::map<String, String>& DecoySequenceMap, int shuffle_seed)
{
// Step 2a: Generate decoy sequences that share peptidoform properties with targets
if (shuffle_seed == -1)
{
shuffle_seed = time(nullptr);
}
boost::mt19937 generator(shuffle_seed);
boost::uniform_int<> uni_dist;
boost::variate_generator<boost::mt19937&, boost::uniform_int<> > pseudoRNG(generator, uni_dist);
Size progress = 0;
startProgress(0, TargetSequenceMap.size(), "Target-decoy mapping");
std::string decoy_peptide_string;
// Iterate over swathes
for (const auto& sm_it : TargetSequenceMap)
{
setProgress(progress++);
// Iterate over each unmodified peptide sequence in current SWATH
for (const auto& ta_it : sm_it.second)
{
// Get a random unmodified peptide sequence as base for later modification
if (DecoySequenceMap[ta_it.first].empty())
{
decoy_peptide_string = getRandomSequence_(ta_it.first.size(), pseudoRNG);
}
else
{
decoy_peptide_string = DecoySequenceMap[ta_it.first];
}
// Iterate over all target peptidoforms for the current unmodified
// peptide sequence and replace decoy residues with modified target
// residues
for (const auto & se_it : ta_it.second)
{
OpenMS::AASequence seq = AASequence::fromString(se_it);
if (seq.hasNTerminalModification())
{
decoy_peptide_string = decoy_peptide_string.replace(0, 1, seq.getSubsequence(0, 1).toUnmodifiedString());
}
if (seq.hasCTerminalModification())
{
decoy_peptide_string = decoy_peptide_string.replace(decoy_peptide_string.size() - 1, 1, seq.getSubsequence(decoy_peptide_string.size() - 1, 1).toUnmodifiedString());
}
for (size_t i = 0; i < seq.size(); ++i)
{
if (seq[i].isModified())
{
decoy_peptide_string = decoy_peptide_string.replace(i, 1, seq.getSubsequence(i, 1).toUnmodifiedString());
}
}
DecoySequenceMap[ta_it.first] = decoy_peptide_string;
}
}
}
endProgress();
}
void MRMAssay::generateDecoyInSilicoMap_(const OpenMS::TargetedExperiment& exp,
const std::vector<String>& fragment_types,
const std::vector<size_t>& fragment_charges,
bool enable_specific_losses,
bool enable_unspecific_losses,
bool enable_ms2_precursors,
const std::vector<std::pair<double, double> >& swathes,
int round_decPow,
TargetDecoyMapT& TargetDecoyMap,
PeptideMapT& TargetPeptideMap,
std::map<String, String>& DecoySequenceMap,
IonMapT & DecoyIonMap,
PeptideMapT& DecoyPeptideMap)
{
MRMIonSeries mrmis;
// Step 2b: Generate decoy in silico peptide map containing theoretical transitions
Size progress = 0;
startProgress(0, exp.getPeptides().size(), "Generation of decoy in silico peptide map");
for (size_t i = 0; i < exp.getPeptides().size(); ++i)
{
setProgress(progress++);
TargetedExperiment::Peptide peptide = exp.getPeptides()[i];
int precursor_charge = 1;
if (peptide.hasCharge())
{
precursor_charge = peptide.getChargeState();
}
// Skip if target peptide is not in map, e.g. permutation threshold was reached
if (TargetPeptideMap.find(peptide.id) == TargetPeptideMap.end())
{
continue;
}
OpenMS::AASequence peptide_sequence = TargetedExperimentHelper::getAASequence(peptide);
double precursor_mz = peptide_sequence.getMZ(precursor_charge);
int precursor_swath = getSwath_(swathes, precursor_mz);
// Copy properties of target peptide to decoy and get sequence from map
TargetedExperiment::Peptide decoy_peptide = peptide;
decoy_peptide.sequence = DecoySequenceMap[peptide.sequence];
TargetDecoyMap[peptide.id] = decoy_peptide;
OpenMS::AASequence decoy_peptide_sequence = TargetedExperimentHelper::getAASequence(decoy_peptide);
// Compute all alternative peptidoforms compatible with ModificationsDB
// Infers residue specificity from target sequence but is applied to decoy sequence
const vector<AASequence> alternative_decoy_peptide_sequences = generateTheoreticalPeptidoformsDecoy_(peptide_sequence, decoy_peptide_sequence);
// Iterate over all peptidoforms
for (const auto& alt_aa : alternative_decoy_peptide_sequences)
{
// Cache string representations to avoid repeated conversions
const String unmod_str = alt_aa.toUnmodifiedString();
const String mod_str = alt_aa.toString();
// Generate theoretical ion series
MRMIonSeries::IonSeries ionseries = mrmis.getIonSeries(alt_aa, precursor_charge, // use same charge state as target
fragment_types, fragment_charges, enable_specific_losses, enable_unspecific_losses);
if (enable_ms2_precursors)
{
// Add precursor to theoretical transitions
double prec_mz = Math::roundDecimal(precursor_mz, round_decPow);
DecoyIonMap[precursor_swath][unmod_str].emplace_back(prec_mz, mod_str);
DecoyPeptideMap[peptide.id].emplace_back("MS2_Precursor_i0", prec_mz);
}
// Iterate over all theoretical transitions
for (const auto& im_it : ionseries)
{
// Append transition to indices to find interfering transitions
double fragment_mz = Math::roundDecimal(im_it.second, round_decPow);
DecoyIonMap[precursor_swath][unmod_str].emplace_back(fragment_mz, mod_str);
DecoyPeptideMap[decoy_peptide.id].emplace_back(im_it.first, fragment_mz);
}
}
}
endProgress();
}
void MRMAssay::generateTargetAssays_(const OpenMS::TargetedExperiment& exp,
TransitionVectorType& transitions,
double mz_threshold,
const std::vector<std::pair<double, double> >& swathes,
int round_decPow,
const PeptideMapT& TargetPeptideMap,
const IonMapT & TargetIonMap)
{
MRMIonSeries mrmis;
// Step 3: Generate target identification transitions
Size progress = 0;
startProgress(0, TargetPeptideMap.size(), "Generation of target identification transitions");
// Iterate over all target peptides
int transition_index = 0;
for (const auto& pep_it : TargetPeptideMap)
{
setProgress(progress++);
TargetedExperiment::Peptide peptide = exp.getPeptideByRef(pep_it.first);
int precursor_charge = 1;
if (peptide.hasCharge())
{
precursor_charge = peptide.getChargeState();
}
AASequence peptide_sequence = TargetedExperimentHelper::getAASequence(peptide);
int target_precursor_swath = getSwath_(swathes, peptide_sequence.getMZ(precursor_charge));
// Iterate over all transitions (already sorted and deduplicated by sortAndDeduplicatePeptideMap)
for (const auto& tr : pep_it.second)
{
// Compute the set of peptidoforms mapping to this transition
vector<string> isoforms = getMatchingPeptidoforms_(
tr.second, TargetIonMap.at(target_precursor_swath).at(peptide_sequence.toUnmodifiedString()), mz_threshold);
// Check that transition maps to at least one peptidoform
if (!isoforms.empty())
{
ReactionMonitoringTransition trn;
trn.setDetectingTransition(false);
trn.setMetaValue("insilico_transition", "true");
trn.setPrecursorMZ(Math::roundDecimal(peptide_sequence.getMZ(precursor_charge), round_decPow));
trn.setProductMZ(tr.second);
trn.setPeptideRef(peptide.id);
mrmis.annotateTransitionCV(trn, tr.first);
trn.setIdentifyingTransition(true);
trn.setQuantifyingTransition(false);
// Set transition name containing mapping to peptidoforms with potential peptidoforms enumerated in brackets
String identifier = String(transition_index) + "_" + String("UIS") + \
"_{" + ListUtils::concatenate(isoforms, "|") + "}_" + \
String(trn.getPrecursorMZ()) + "_" + String(trn.getProductMZ()) + "_" +
String(peptide.getRetentionTime()) + "_" + tr.first;
trn.setName(identifier);
trn.setNativeID(identifier);
trn.setMetaValue("Peptidoforms", ListUtils::concatenate(isoforms, "|"));
OPENMS_LOG_DEBUG << "[uis] Transition " << trn.getNativeID() << '\n';
// Append transition
transitions.push_back(trn);
}
transition_index++;
}
OPENMS_LOG_DEBUG << "[uis] Peptide " << peptide.id << '\n';
}
endProgress();
}
void MRMAssay::generateDecoyAssays_(const OpenMS::TargetedExperiment& exp,
TransitionVectorType& transitions,
double mz_threshold,
const std::vector<std::pair<double, double> >& swathes,
int round_decPow,
const PeptideMapT& DecoyPeptideMap,
std::map<String, TargetedExperiment::Peptide>& TargetDecoyMap,
const IonMapT& DecoyIonMap,
const IonMapT& TargetIonMap)
{
MRMIonSeries mrmis;
// Step 4: Generate decoy identification transitions
Size progress = 0;
startProgress(0, DecoyPeptideMap.size(), "Generation of decoy identification transitions");
// Iterate over all decoy peptides
int transition_index = 0;
for (const auto & decoy_pep_it : DecoyPeptideMap)
{
setProgress(progress++);
const TargetedExperiment::Peptide& target_peptide = exp.getPeptideByRef(decoy_pep_it.first);
int precursor_charge = 1;
if (target_peptide.hasCharge())
{
precursor_charge = target_peptide.getChargeState();
}
AASequence target_peptide_sequence = TargetedExperimentHelper::getAASequence(target_peptide);
int target_precursor_swath = getSwath_(swathes, target_peptide_sequence.getMZ(precursor_charge));
TargetedExperiment::Peptide decoy_peptide = TargetDecoyMap[decoy_pep_it.first];
OpenMS::AASequence decoy_peptide_sequence = TargetedExperimentHelper::getAASequence(decoy_peptide);
// Iterate over all transitions (already sorted and deduplicated by sortAndDeduplicatePeptideMap)
for (const auto& decoy_tr : decoy_pep_it.second)
{
// Check mapping of transitions to other peptidoforms
vector<string> decoy_isoforms = getMatchingPeptidoforms_(
decoy_tr.second, DecoyIonMap.at(target_precursor_swath).at(decoy_peptide_sequence.toUnmodifiedString()), mz_threshold);
// Check that transition maps to at least one peptidoform
if (!decoy_isoforms.empty())
{
ReactionMonitoringTransition trn;
trn.setDecoyTransitionType(ReactionMonitoringTransition::DECOY);
trn.setDetectingTransition(false);
trn.setMetaValue("insilico_transition", "true");
trn.setPrecursorMZ(Math::roundDecimal(target_peptide_sequence.getMZ(precursor_charge), round_decPow));
trn.setProductMZ(decoy_tr.second);
trn.setPeptideRef(decoy_peptide.id);
mrmis.annotateTransitionCV(trn, decoy_tr.first);
trn.setIdentifyingTransition(true);
trn.setQuantifyingTransition(false);
// Set transition name containing mapping to peptidoforms with potential peptidoforms enumerated in brackets
String identifier = String(transition_index) + "_" + String("UISDECOY") +
"_{" + ListUtils::concatenate(decoy_isoforms, "|") + "}_" +
String(trn.getPrecursorMZ()) + "_" + String(trn.getProductMZ()) + "_" +
String(decoy_peptide.getRetentionTime()) + "_" + decoy_tr.first;
trn.setName(identifier);
trn.setNativeID(identifier);
trn.setMetaValue("Peptidoforms", ListUtils::concatenate(decoy_isoforms, "|"));
OPENMS_LOG_DEBUG << "[uis] Decoy transition " << trn.getNativeID() << '\n';
// Check if decoy transition is overlapping with target transition
vector<string> target_isoforms_overlap = getMatchingPeptidoforms_(
decoy_tr.second, TargetIonMap.at(target_precursor_swath).at(target_peptide_sequence.toUnmodifiedString()), mz_threshold);
if (!target_isoforms_overlap.empty())
{
OPENMS_LOG_DEBUG << "[uis] Skipping overlapping decoy transition " << trn.getNativeID() << '\n';
continue;
}
else
{
// Append transition
transitions.push_back(trn);
}
}
transition_index++;
}
}
endProgress();
}
void MRMAssay::reannotateTransitions(OpenMS::TargetedExperiment& exp,
double precursor_mz_threshold,
double product_mz_threshold,
const std::vector<String>& fragment_types,
const std::vector<size_t>& fragment_charges,
bool enable_specific_losses,
bool enable_unspecific_losses,
int round_decPow)
{
PeptideVectorType peptides;
ProteinVectorType proteins;
TransitionVectorType transitions;
OpenMS::MRMIonSeries mrmis;
// hash of the peptide reference containing all transitions
MRMAssay::PeptideTransitionMapType peptide_trans_map;
for (Size i = 0; i < exp.getTransitions().size(); i++)
{
peptide_trans_map[exp.getTransitions()[i].getPeptideRef()].push_back(&exp.getTransitions()[i]);
}
Size progress = 0;
startProgress(0, exp.getTransitions().size(), "Annotating transitions");
for (MRMAssay::PeptideTransitionMapType::iterator pep_it = peptide_trans_map.begin();
pep_it != peptide_trans_map.end(); ++pep_it)
{
String peptide_ref = pep_it->first;
TargetedExperiment::Peptide target_peptide = exp.getPeptideByRef(peptide_ref);
OpenMS::AASequence target_peptide_sequence = TargetedExperimentHelper::getAASequence(target_peptide);
int precursor_charge = 1;
if (target_peptide.hasCharge()) {precursor_charge = target_peptide.getChargeState();}
MRMIonSeries::IonSeries target_ionseries = mrmis.getIonSeries(
target_peptide_sequence, precursor_charge, fragment_types,
fragment_charges, enable_specific_losses,
enable_unspecific_losses, round_decPow);
// Generate theoretical precursor m.z
double precursor_mz = target_peptide_sequence.getMZ(precursor_charge);
precursor_mz = Math::roundDecimal(precursor_mz, round_decPow);
for (Size i = 0; i < pep_it->second.size(); i++)
{
setProgress(++progress);
ReactionMonitoringTransition tr = *(pep_it->second[i]);
// Annotate transition from theoretical ion series
std::pair<String, double> targetion = mrmis.annotateIon(target_ionseries, tr.getProductMZ(), product_mz_threshold);
// Ensure that precursor m/z is within threshold
if (std::fabs(tr.getPrecursorMZ() - precursor_mz) > precursor_mz_threshold)
{
targetion.first = "unannotated";
}
// Set precursor m/z to theoretical value
tr.setPrecursorMZ(precursor_mz);
// Set product m/z to theoretical value
tr.setProductMZ(targetion.second);
// Skip unannotated transitions from previous step
if (targetion.first == "unannotated")
{
OPENMS_LOG_DEBUG << "[unannotated] Skipping " << target_peptide_sequence.toString()
<< " PrecursorMZ: " << tr.getPrecursorMZ() << " ProductMZ: " << tr.getProductMZ()
<< " " << tr.getMetaValue("annotation") << '\n';
continue;
}
else
{
OPENMS_LOG_DEBUG << "[selected] " << target_peptide_sequence.toString() << " PrecursorMZ: " << tr.getPrecursorMZ() << " ProductMZ: " << tr.getProductMZ() << " " << tr.getMetaValue("annotation") << '\n';
}
// Set CV terms
mrmis.annotateTransitionCV(tr, targetion.first);
// Add reference to parent precursor
tr.setPeptideRef(target_peptide.id);
// Append transition
transitions.push_back(tr);
}
}
endProgress();
exp.setTransitions(std::move(transitions));
}
void MRMAssay::restrictTransitions(OpenMS::TargetedExperiment& exp, double lower_mz_limit, double upper_mz_limit, const std::vector<std::pair<double, double> >& swathes)
{
OpenMS::MRMIonSeries mrmis;
PeptideVectorType peptides;
ProteinVectorType proteins;
TransitionVectorType transitions;
// Reserve capacity to avoid reallocations (upper bound: all transitions pass filter)
transitions.reserve(exp.getTransitions().size());
Size progress = 0;
startProgress(0, exp.getTransitions().size(), "Restricting transitions");
for (Size i = 0; i < exp.getTransitions().size(); ++i)
{
setProgress(++progress);
ReactionMonitoringTransition tr = exp.getTransitions()[i];
const TargetedExperiment::Peptide& target_peptide = exp.getPeptideByRef(tr.getPeptideRef());
OpenMS::AASequence target_peptide_sequence = TargetedExperimentHelper::getAASequence(target_peptide);
// Check annotation for unannotated interpretations
if (!tr.getProduct().getInterpretationList().empty())
{
// Check if transition is unannotated at primary annotation and if yes, skip
if (tr.getProduct().getInterpretationList()[0].iontype == TargetedExperiment::IonType::NonIdentified)
{
OPENMS_LOG_DEBUG << "[unannotated] Skipping " << target_peptide_sequence
<< " PrecursorMZ: " << tr.getPrecursorMZ() << " ProductMZ: " << tr.getProductMZ()
<< " " << tr.getMetaValue("annotation") << '\n';
continue;
}
}
// Check if product m/z falls into swath from precursor m/z and if yes, skip
if (!swathes.empty())
{
if (MRMAssay::isInSwath_(swathes, tr.getPrecursorMZ(), tr.getProductMZ()))
{
OPENMS_LOG_DEBUG << "[swath] Skipping " << target_peptide_sequence << " PrecursorMZ: " << tr.getPrecursorMZ() << " ProductMZ: " << tr.getProductMZ() << '\n';
continue;
}
}
// Check if product m/z is outside of m/z boundaries and if yes, skip
if (tr.getProductMZ() < lower_mz_limit || tr.getProductMZ() > upper_mz_limit)
{
OPENMS_LOG_DEBUG << "[mz_limit] Skipping " << target_peptide_sequence << " PrecursorMZ: " << tr.getPrecursorMZ() << " ProductMZ: " << tr.getProductMZ() << '\n';
continue;
}
// Append transition
transitions.push_back(tr);
}
exp.setTransitions(std::move(transitions));
endProgress();
}
void MRMAssay::detectingTransitions(OpenMS::TargetedExperiment& exp, int min_transitions, int max_transitions)
{
PeptideVectorType peptides;
ProteinVectorType proteins;
TransitionVectorType transitions;
std::unordered_set<String> peptide_ids;
std::unordered_set<String> ProteinList;
std::map<String, TransitionVectorType> TransitionsMap;
// Generate a map of peptides to transitions for easy access
for (Size i = 0; i < exp.getTransitions().size(); ++i)
{
ReactionMonitoringTransition tr = exp.getTransitions()[i];
if (TransitionsMap.find(tr.getPeptideRef()) == TransitionsMap.end())
{
TransitionsMap[tr.getPeptideRef()];
}
TransitionsMap[tr.getPeptideRef()].push_back(tr);
}
// Reserve capacity to avoid reallocations (upper bounds)
transitions.reserve(exp.getTransitions().size());
peptides.reserve(exp.getPeptides().size());
proteins.reserve(exp.getProteins().size());
Size progress = 0;
startProgress(0, TransitionsMap.size() + exp.getPeptides().size() + exp.getProteins().size(), "Select detecting transitions");
for (std::map<String, TransitionVectorType>::iterator m = TransitionsMap.begin();
m != TransitionsMap.end(); ++m)
{
setProgress(++progress);
// Ensure that all precursors have the minimum number of transitions
if (m->second.size() >= (Size)min_transitions)
{
// LibraryIntensity stores all reference transition intensities of a precursor
std::vector<double> LibraryIntensity;
LibraryIntensity.reserve(m->second.size());
for (const auto& tr_it : m->second)
{
LibraryIntensity.push_back(tr_it.getLibraryIntensity());
}
// Sort by intensity descending and keep only top max_transitions for determining threshold
std::sort(LibraryIntensity.begin(), LibraryIntensity.end(), std::greater<double>());
if ((Size)max_transitions < LibraryIntensity.size())
{
LibraryIntensity.resize(max_transitions);
}
// Build set of top intensities for O(log n) lookup
std::set<double> top_intensities(LibraryIntensity.begin(), LibraryIntensity.end());
// Check if transitions are among the ones with maximum intensity
// If several transitions have the same intensities ensure restriction max_transitions
Size j = 0; // transition number index
for (TransitionVectorType::iterator tr_it = m->second.begin(); tr_it != m->second.end(); ++tr_it)
{
ReactionMonitoringTransition tr = *tr_it;
if (
top_intensities.find(tr.getLibraryIntensity()) != top_intensities.end() &&
tr.getDecoyTransitionType() != ReactionMonitoringTransition::DECOY &&
j < (Size)max_transitions)
{
// Set meta value tag for detecting transition
tr.setDetectingTransition(true);
j += 1;
}
else
{
continue;
}
// Append transition
transitions.push_back(tr);
// Append transition_group_id to index
peptide_ids.insert(tr.getPeptideRef());
}
}
}
for (const auto& peptide : exp.getPeptides())
{
setProgress(++progress);
// Check if peptide has any transitions left
if (peptide_ids.find(peptide.id) != peptide_ids.end())
{
peptides.push_back(peptide);
for (const auto& protein_ref : peptide.protein_refs)
{
ProteinList.insert(protein_ref);
}
}
else
{
OPENMS_LOG_DEBUG << "[peptide] Skipping " << peptide.id << '\n';
}
}
for (const auto& protein : exp.getProteins())
{
setProgress(++progress);
// Check if protein has any peptides left
if (ProteinList.find(protein.id) != ProteinList.end())
{
proteins.push_back(protein);
}
else
{
OPENMS_LOG_DEBUG << "[protein] Skipping " << protein.id << '\n';
}
}
exp.setTransitions(std::move(transitions));
exp.setPeptides(std::move(peptides));
exp.setProteins(std::move(proteins));
endProgress();
}
void MRMAssay::uisTransitions(OpenMS::TargetedExperiment& exp,
const std::vector<String>& fragment_types,
const std::vector<size_t>& fragment_charges,
bool enable_specific_losses,
bool enable_unspecific_losses,
bool enable_ms2_precursors,
double mz_threshold,
const std::vector<std::pair<double, double> >& swathes,
int round_decPow,
size_t max_num_alternative_localizations,
int shuffle_seed,
bool disable_decoy_transitions)
{
OpenMS::MRMIonSeries mrmis;
TransitionVectorType transitions = exp.getTransitions();
// Different maps to store temporary data for fast access
// TargetIonMap & DecoyIonMap: Store product m/z of all peptidoforms to find interfering transitions
IonMapT TargetIonMap, DecoyIonMap;
// TargetPeptideMap & DecoyPeptideMap: Store theoretical transitions of all peptidoforms
PeptideMapT TargetPeptideMap, DecoyPeptideMap;
// TargetSequenceMap, DecoySequenceMap & TargetDecoyMap: Link targets and UIS decoys
SequenceMapT TargetSequenceMap;
std::map<String, String> DecoySequenceMap;
std::map<String, TargetedExperiment::Peptide> TargetDecoyMap;
// Step 1: Generate target in silico peptide map containing theoretical transitions
generateTargetInSilicoMap_(exp, fragment_types, fragment_charges, enable_specific_losses, enable_unspecific_losses, enable_ms2_precursors, swathes, round_decPow, max_num_alternative_localizations, TargetSequenceMap, TargetIonMap, TargetPeptideMap);
// Sort TargetIonMap by m/z for binary search in getMatchingPeptidoforms_
sortIonMap(TargetIonMap);
// Sort and deduplicate TargetPeptideMap to avoid repeated sorting in generateTargetAssays_
sortAndDeduplicatePeptideMap(TargetPeptideMap);
// Reserve capacity for new UIS transitions (estimate: ~6 transitions per peptide, x2 for decoys)
size_t estimated_new_transitions = TargetPeptideMap.size() * 6 * (disable_decoy_transitions ? 1 : 2);
transitions.reserve(transitions.size() + estimated_new_transitions);
// Step 2: Generate target identification transitions
generateTargetAssays_(exp, transitions, mz_threshold, swathes, round_decPow, TargetPeptideMap, TargetIonMap);
if (!disable_decoy_transitions)
{
// Step 3a: Generate decoy sequences that share peptidoform properties with targets
generateDecoySequences_(TargetSequenceMap, DecoySequenceMap, shuffle_seed);
// Step 3b: Generate decoy in silico peptide map containing theoretical transitions
generateDecoyInSilicoMap_(exp, fragment_types, fragment_charges, enable_specific_losses, enable_unspecific_losses, enable_ms2_precursors, swathes, round_decPow, TargetDecoyMap, TargetPeptideMap, DecoySequenceMap, DecoyIonMap, DecoyPeptideMap);
// Sort DecoyIonMap by m/z for binary search in getMatchingPeptidoforms_
sortIonMap(DecoyIonMap);
// Sort and deduplicate DecoyPeptideMap to avoid repeated sorting in generateDecoyAssays_
sortAndDeduplicatePeptideMap(DecoyPeptideMap);
// Step 4: Generate decoy identification transitions
generateDecoyAssays_(exp, transitions, mz_threshold, swathes, round_decPow, DecoyPeptideMap, TargetDecoyMap, DecoyIonMap, TargetIonMap);
}
exp.setTransitions(transitions);
}
void MRMAssay::filterMinMaxTransitionsCompound(OpenMS::TargetedExperiment& exp, int min_transitions, int max_transitions)
{
CompoundVectorType compounds;
std::vector<String> compound_ids;
TransitionVectorType transitions;
std::map<String, TransitionVectorType> TransitionsMap;
// Generate a map of compounds to transitions for easy access
for (Size i = 0; i < exp.getTransitions().size(); ++i)
{
ReactionMonitoringTransition tr = exp.getTransitions()[i];
if (TransitionsMap.find(tr.getCompoundRef()) == TransitionsMap.end())
{
TransitionsMap[tr.getCompoundRef()];
}
TransitionsMap[tr.getCompoundRef()].push_back(tr);
}
// Reserve capacity to avoid reallocations (upper bounds)
transitions.reserve(exp.getTransitions().size());
compounds.reserve(exp.getCompounds().size());
compound_ids.reserve(exp.getCompounds().size());
for (std::map<String, TransitionVectorType>::iterator m = TransitionsMap.begin();
m != TransitionsMap.end(); ++m)
{
// Ensure that all precursors have the minimum number of transitions or are a decoy transitions
if (m->second.size() >= (Size)min_transitions || m->second[0].getDecoyTransitionType() == ReactionMonitoringTransition::DECOY)
{
// LibraryIntensity stores all reference transition intensities of a precursor
std::vector<double> LibraryIntensity;
LibraryIntensity.reserve(m->second.size());
for (const auto& tr_it : m->second)
{
LibraryIntensity.push_back(tr_it.getLibraryIntensity());
}
// Reverse-sort by intensity and delete all elements after max_transitions to find the best candidates
std::sort(LibraryIntensity.begin(), LibraryIntensity.end(), std::greater<double>());
if ((Size)max_transitions < LibraryIntensity.size())
{
LibraryIntensity.resize(max_transitions);
}
// Build set of top intensities for O(log n) lookup
std::set<double> top_intensities(LibraryIntensity.begin(), LibraryIntensity.end());
// Check if transitions are among the ones with maximum intensity
// If several transitions have the same intensities ensure restriction max_transitions
Size j = 0; // transition number index
for (TransitionVectorType::iterator tr_it = m->second.begin(); tr_it != m->second.end(); ++tr_it)
{
ReactionMonitoringTransition tr = *tr_it;
if (top_intensities.find(tr.getLibraryIntensity()) != top_intensities.end() && j < (Size)max_transitions)
{
// Set meta value tag for detecting transition
tr.setDetectingTransition(true);
j += 1;
}
else
{
continue;
}
// Append transition
transitions.push_back(tr);
// Append transition_group_id to index
if (std::find(compound_ids.begin(), compound_ids.end(), tr.getCompoundRef()) == compound_ids.end())
{
compound_ids.push_back(tr.getCompoundRef());
}
}
}
}
for (Size i = 0; i < exp.getCompounds().size(); ++i)
{
TargetedExperiment::Compound compound = exp.getCompounds()[i];
// Check if compound has any transitions left
if (std::find(compound_ids.begin(), compound_ids.end(), compound.id) != compound_ids.end())
{
compounds.push_back(compound);
}
else
{
OPENMS_LOG_DEBUG << "[compound] Skipping " << compound.id << " - not enough transitions."<< '\n';
}
}
exp.setTransitions(transitions);
exp.setCompounds(compounds);
}
void MRMAssay::filterUnreferencedDecoysCompound(OpenMS::TargetedExperiment &exp)
{
vector<TargetedExperiment::Compound> compounds = exp.getCompounds();
vector<std::string> descriptions_targets;
vector<std::string> descriptions_decoys;
vector<std::string> difference_target_decoys;
vector<std::pair<std::string, std::string>> reference_decoys;
vector<std::string> single_decoy_id;
String decoy_suffix = "_decoy";
for (const auto &it : compounds)
{
// extract potential target TransitionIds based on the decoy annotation '0_CompoundName_decoy_[M+H]+_448_0'
if (it.id.find("decoy") != std::string::npos)
{
String current_decoy = it.id;
String potential_target = current_decoy;
potential_target.erase(potential_target.find(decoy_suffix), decoy_suffix.size());
descriptions_decoys.emplace_back(potential_target);
reference_decoys.emplace_back(std::make_pair(current_decoy, potential_target));
}
else
{
descriptions_targets.emplace_back(it.id);
}
}
// compare the actual target TransitionsIds with the potential ones
difference_target_decoys.clear();
std::sort(descriptions_targets.begin(), descriptions_targets.end());
std::sort(descriptions_decoys.begin(), descriptions_decoys.end());
std::set_difference(descriptions_decoys.begin(),
descriptions_decoys.end(),
descriptions_targets.begin(),
descriptions_targets.end(),
std::inserter(difference_target_decoys, difference_target_decoys.begin()));
// translate the potential targets back to decoy annotations
for (const auto &it : difference_target_decoys)
{
auto iter = std::find_if(reference_decoys.begin(),
reference_decoys.end(),
[&it](const std::pair<String, String> &element) { return element.second == it; });
if (iter != reference_decoys.end())
{
single_decoy_id.emplace_back(iter->first);
}
}
// remove decoy compound due to missing target
vector<TargetedExperiment::Compound> filtered_compounds;
for (const auto& it : compounds)
{
// Check if decoy was filtered
if (std::find(single_decoy_id.begin(), single_decoy_id.end(), it.id) != single_decoy_id.end())
{
OPENMS_LOG_DEBUG << "The decoy " << it.id << " was filtered due to missing a respective target.\n";
}
else
{
filtered_compounds.push_back(it);
}
}
// remove decoy transitions due to missing target
vector<ReactionMonitoringTransition> filtered_transitions;
for (const auto& it : exp.getTransitions())
{
// Check if compound has any transitions left
if (std::find(single_decoy_id.begin(), single_decoy_id.end(), it.getCompoundRef()) != single_decoy_id.end())
{
OPENMS_LOG_DEBUG << "The decoy " << it.getCompoundRef()
<< " was filtered due to missing a respective target.\n";
}
else
{
filtered_transitions.push_back(it);
}
}
exp.setCompounds(filtered_compounds);
exp.setTransitions(filtered_transitions);
}
// =====================================================================
// Light (memory-efficient) versions of the above methods
// =====================================================================
void MRMAssay::reannotateTransitionsLight(OpenSwath::LightTargetedExperiment& exp,
double precursor_mz_threshold,
double product_mz_threshold,
const std::vector<String>& fragment_types,
const std::vector<size_t>& fragment_charges,
bool enable_specific_losses,
bool enable_unspecific_losses,
int round_decPow)
{
std::vector<OpenSwath::LightTransition> transitions;
OpenMS::MRMIonSeries mrmis;
// Build compound map for quick lookup
std::unordered_map<std::string, const OpenSwath::LightCompound*> compound_map;
for (const auto& compound : exp.compounds)
{
compound_map[compound.id] = &compound;
}
// Group transitions by peptide_ref
std::map<std::string, std::vector<OpenSwath::LightTransition*>> peptide_trans_map;
for (auto& tr : exp.transitions)
{
peptide_trans_map[tr.peptide_ref].push_back(&tr);
}
Size progress = 0;
startProgress(0, exp.transitions.size(), "Annotating transitions (Light)");
for (auto& pep_it : peptide_trans_map)
{
const std::string& peptide_ref = pep_it.first;
// Get compound
auto comp_it = compound_map.find(peptide_ref);
if (comp_it == compound_map.end() || !comp_it->second->isPeptide())
{
// Skip non-peptide compounds (metabolomics) - just keep transitions as-is
for (auto* tr : pep_it.second)
{
setProgress(++progress);
transitions.push_back(*tr);
}
continue;
}
const OpenSwath::LightCompound* compound = comp_it->second;
OpenMS::AASequence target_peptide_sequence;
try
{
target_peptide_sequence = AASequence::fromString(compound->sequence);
}
catch (Exception::InvalidValue&)
{
// Can't parse sequence, keep transitions as-is
for (auto* tr : pep_it.second)
{
setProgress(++progress);
transitions.push_back(*tr);
}
continue;
}
int precursor_charge = compound->charge > 0 ? compound->charge : 1;
MRMIonSeries::IonSeries target_ionseries = mrmis.getIonSeries(
target_peptide_sequence, precursor_charge, fragment_types,
fragment_charges, enable_specific_losses,
enable_unspecific_losses, round_decPow);
// Generate theoretical precursor m/z
double precursor_mz = target_peptide_sequence.getMZ(precursor_charge);
precursor_mz = Math::roundDecimal(precursor_mz, round_decPow);
for (auto* tr : pep_it.second)
{
setProgress(++progress);
// Annotate transition from theoretical ion series
std::pair<String, double> targetion = mrmis.annotateIon(target_ionseries, tr->product_mz, product_mz_threshold);
// Ensure that precursor m/z is within threshold
if (std::fabs(tr->precursor_mz - precursor_mz) > precursor_mz_threshold)
{
targetion.first = "unannotated";
}
// Skip unannotated transitions
if (targetion.first == "unannotated")
{
OPENMS_LOG_DEBUG << "[unannotated] Skipping " << target_peptide_sequence.toString()
<< " PrecursorMZ: " << tr->precursor_mz << " ProductMZ: " << tr->product_mz << '\n';
continue;
}
OpenSwath::LightTransition new_tr = *tr;
// Set precursor m/z to theoretical value
new_tr.precursor_mz = precursor_mz;
// Set product m/z to theoretical value
new_tr.product_mz = targetion.second;
// Parse fragment type/number/charge from annotation
// Format examples: "y7", "b3^1", "y5^2", "y5-H2O1"
std::string annotation = targetion.first;
if (!annotation.empty())
{
new_tr.setFragmentType(annotation.substr(0, 1));
// Extract fragment number
std::string num_str;
size_t i = 1;
for (; i < annotation.size() && std::isdigit(annotation[i]); ++i)
{
num_str += annotation[i];
}
if (!num_str.empty())
{
new_tr.fragment_nr = static_cast<int16_t>(std::stoi(num_str));
}
// Extract charge after '^'
if (i < annotation.size() && annotation[i] == '^')
{
std::string charge_str;
for (size_t j = i + 1; j < annotation.size() && std::isdigit(annotation[j]); ++j)
{
charge_str += annotation[j];
}
if (!charge_str.empty())
{
new_tr.fragment_charge = static_cast<int8_t>(std::stoi(charge_str));
}
}
}
transitions.push_back(new_tr);
}
}
endProgress();
exp.transitions = std::move(transitions);
}
void MRMAssay::restrictTransitionsLight(OpenSwath::LightTargetedExperiment& exp,
double lower_mz_limit,
double upper_mz_limit,
const std::vector<std::pair<double, double> >& swathes)
{
std::vector<OpenSwath::LightTransition> transitions;
// Reserve capacity to avoid reallocations (upper bound: all transitions pass filter)
transitions.reserve(exp.transitions.size());
// Build compound map for quick lookup
std::unordered_map<std::string, const OpenSwath::LightCompound*> compound_map;
for (const auto& compound : exp.compounds)
{
compound_map[compound.id] = &compound;
}
Size progress = 0;
startProgress(0, exp.transitions.size(), "Restricting transitions (Light)");
for (const auto& tr : exp.transitions)
{
setProgress(++progress);
// Check if product m/z falls into swath from precursor m/z and if yes, skip
if (!swathes.empty())
{
if (MRMAssay::isInSwath_(swathes, tr.precursor_mz, tr.product_mz))
{
OPENMS_LOG_DEBUG << "[swath] Skipping PrecursorMZ: " << tr.precursor_mz << " ProductMZ: " << tr.product_mz << '\n';
continue;
}
}
// Check if product m/z is outside of m/z boundaries and if yes, skip
if (tr.product_mz < lower_mz_limit || tr.product_mz > upper_mz_limit)
{
OPENMS_LOG_DEBUG << "[mz_limit] Skipping PrecursorMZ: " << tr.precursor_mz << " ProductMZ: " << tr.product_mz << '\n';
continue;
}
// Append transition
transitions.push_back(tr);
}
exp.transitions = std::move(transitions);
endProgress();
}
void MRMAssay::detectingTransitionsLight(OpenSwath::LightTargetedExperiment& exp,
int min_transitions,
int max_transitions)
{
std::vector<OpenSwath::LightTransition> transitions;
std::vector<OpenSwath::LightCompound> compounds;
std::vector<OpenSwath::LightProtein> proteins;
std::unordered_set<std::string> peptide_ids;
std::unordered_set<std::string> protein_list;
// Group transitions by peptide_ref
std::map<std::string, std::vector<OpenSwath::LightTransition>> transitions_map;
for (const auto& tr : exp.transitions)
{
transitions_map[tr.peptide_ref].push_back(tr);
}
// Reserve capacity to avoid reallocations (upper bounds)
transitions.reserve(exp.transitions.size());
compounds.reserve(exp.compounds.size());
proteins.reserve(exp.proteins.size());
Size progress = 0;
startProgress(0, transitions_map.size() + exp.compounds.size() + exp.proteins.size(), "Select detecting transitions (Light)");
for (auto& m : transitions_map)
{
setProgress(++progress);
// Ensure that all precursors have the minimum number of transitions
if (m.second.size() >= static_cast<size_t>(min_transitions))
{
// LibraryIntensity stores all reference transition intensities of a precursor
std::vector<double> library_intensity;
library_intensity.reserve(m.second.size());
for (const auto& tr : m.second)
{
library_intensity.push_back(tr.library_intensity);
}
// Sort by intensity descending and keep only top max_transitions
std::sort(library_intensity.begin(), library_intensity.end(), std::greater<double>());
if (static_cast<size_t>(max_transitions) < library_intensity.size())
{
library_intensity.resize(max_transitions);
}
// Build set of top intensities for O(log n) lookup
std::set<double> top_intensities(library_intensity.begin(), library_intensity.end());
// Check if transitions are among the ones with maximum intensity
size_t j = 0;
for (auto& tr : m.second)
{
if (top_intensities.find(tr.library_intensity) != top_intensities.end() &&
!tr.getDecoy() &&
j < static_cast<size_t>(max_transitions))
{
tr.setDetectingTransition(true);
transitions.push_back(tr);
peptide_ids.insert(tr.peptide_ref);
j++;
}
}
}
}
// Build compound map
std::unordered_map<std::string, const OpenSwath::LightCompound*> compound_map;
for (const auto& compound : exp.compounds)
{
compound_map[compound.id] = &compound;
}
for (const auto& compound : exp.compounds)
{
setProgress(++progress);
if (peptide_ids.find(compound.id) != peptide_ids.end())
{
compounds.push_back(compound);
for (const auto& protein_ref : compound.protein_refs)
{
protein_list.insert(protein_ref);
}
}
}
for (const auto& protein : exp.proteins)
{
setProgress(++progress);
if (protein_list.find(protein.id) != protein_list.end())
{
proteins.push_back(protein);
}
}
exp.transitions = std::move(transitions);
exp.compounds = std::move(compounds);
exp.proteins = std::move(proteins);
endProgress();
}
// =====================================================================
// Light (memory-efficient) versions of IPF methods
// =====================================================================
namespace
{
/// Helper function to reconstruct AASequence from a LightCompound
/// Handles two cases:
/// 1. PQP input: compound.sequence contains full modified sequence (e.g., "AALIGS(UniMod:21)LGSIFK")
/// 2. TraML input (via DataAccessHelper): compound.sequence is unmodified, mods in compound.modifications
OpenMS::AASequence getAASequenceFromLightCompound(const OpenSwath::LightCompound& compound)
{
OpenMS::AASequence aas = AASequence::fromString(compound.sequence);
// Check if sequence was parsed with modifications already (PQP case)
// If so, don't apply modifications again
bool has_mods = aas.hasNTerminalModification() || aas.hasCTerminalModification();
if (!has_mods)
{
for (Size i = 0; i < aas.size() && !has_mods; ++i)
{
if (aas[i].isModified())
{
has_mods = true;
}
}
}
// If the sequence already has modifications, return it as-is
if (has_mods)
{
return aas;
}
// Otherwise, apply modifications from the LightCompound (TraML case)
OpenMS::ModificationsDB* mod_db = OpenMS::ModificationsDB::getInstance();
for (const auto& mod : compound.modifications)
{
if (mod.unimod_id < 0)
{
continue; // Skip invalid modifications
}
String mod_name = "UniMod:" + String(mod.unimod_id);
if (mod.location == -1)
{
// N-terminal modification
const ResidueModification* res_mod = mod_db->getModification(mod_name, "", ResidueModification::N_TERM);
if (res_mod)
{
aas.setNTerminalModification(res_mod);
}
}
else if (mod.location == static_cast<int>(aas.size()))
{
// C-terminal modification
const ResidueModification* res_mod = mod_db->getModification(mod_name, "", ResidueModification::C_TERM);
if (res_mod)
{
aas.setCTerminalModification(res_mod);
}
}
else if (mod.location >= 0 && mod.location < static_cast<int>(aas.size()))
{
// Residue modification
String residue = String(aas[mod.location].getOneLetterCode());
const ResidueModification* res_mod = mod_db->getModification(mod_name, residue, ResidueModification::ANYWHERE);
if (res_mod)
{
aas.setModification(mod.location, res_mod);
}
}
}
return aas;
}
} // anonymous namespace
void MRMAssay::generateTargetInSilicoMapLight_(const OpenSwath::LightTargetedExperiment& exp,
const std::vector<String>& fragment_types,
const std::vector<size_t>& fragment_charges,
bool enable_specific_losses,
bool enable_unspecific_losses,
bool enable_ms2_precursors,
const std::vector<std::pair<double, double> >& swathes,
int round_decPow,
size_t max_num_alternative_localizations,
SequenceMapT& TargetSequenceMap,
IonMapT& TargetIonMap,
PeptideMapT& TargetPeptideMap)
{
OpenMS::MRMIonSeries mrmis;
Size progress = 0;
startProgress(0, exp.compounds.size(), "Generation of target in silico peptide map (Light)");
for (const auto& compound : exp.compounds)
{
setProgress(progress++);
// Skip non-peptide compounds (metabolites)
if (!compound.isPeptide())
{
continue;
}
// Reconstruct AASequence from compound (including modifications)
OpenMS::AASequence peptide_sequence;
try
{
peptide_sequence = getAASequenceFromLightCompound(compound);
}
catch (Exception::BaseException&)
{
OPENMS_LOG_DEBUG << "[uis] Skipping compound (cannot parse sequence): " << compound.id << '\n';
continue;
}
int precursor_charge = compound.charge > 0 ? compound.charge : 1;
double precursor_mz = peptide_sequence.getMZ(precursor_charge);
int precursor_swath = getSwath_(swathes, precursor_mz);
// Compute all alternative peptidoforms compatible with ModificationsDB
const std::vector<AASequence> alternative_peptide_sequences = generateTheoreticalPeptidoforms_(peptide_sequence);
// Some permutations might be too complex, skip if threshold is reached
if (alternative_peptide_sequences.size() > max_num_alternative_localizations)
{
OPENMS_LOG_DEBUG << "[uis] Peptide skipped (too many permutations possible): " << compound.id << '\n';
continue;
}
// Iterate over all peptidoforms
for (const auto& alt_aa : alternative_peptide_sequences)
{
// Cache string representations to avoid repeated conversions
const String unmod_str = alt_aa.toUnmodifiedString();
const String mod_str = alt_aa.toString();
// Append peptidoform to index
TargetSequenceMap[precursor_swath][unmod_str].insert(mod_str);
// Generate theoretical ion series
auto ionseries = mrmis.getIonSeries(alt_aa, precursor_charge,
fragment_types, fragment_charges, enable_specific_losses,
enable_unspecific_losses);
if (enable_ms2_precursors)
{
// Add precursor to theoretical transitions
double prec_mz = Math::roundDecimal(precursor_mz, round_decPow);
TargetIonMap[precursor_swath][unmod_str].emplace_back(prec_mz, mod_str);
TargetPeptideMap[compound.id].emplace_back("MS2_Precursor_i0", prec_mz);
}
// Iterate over all theoretical transitions
for (const auto& im_it : ionseries)
{
// Append transition to indices to find interfering transitions
double fragment_mz = Math::roundDecimal(im_it.second, round_decPow);
TargetIonMap[precursor_swath][unmod_str].emplace_back(fragment_mz, mod_str);
TargetPeptideMap[compound.id].emplace_back(im_it.first, fragment_mz);
}
}
}
endProgress();
}
void MRMAssay::generateDecoyInSilicoMapLight_(const OpenSwath::LightTargetedExperiment& exp,
const std::vector<String>& fragment_types,
const std::vector<size_t>& fragment_charges,
bool enable_specific_losses,
bool enable_unspecific_losses,
bool enable_ms2_precursors,
const std::vector<std::pair<double, double> >& swathes,
int round_decPow,
TargetDecoyMapLightT& TargetDecoyMap,
const PeptideMapT& TargetPeptideMap,
const std::map<String, String>& DecoySequenceMap,
IonMapT& DecoyIonMap,
PeptideMapT& DecoyPeptideMap)
{
OpenMS::MRMIonSeries mrmis;
Size progress = 0;
startProgress(0, exp.compounds.size(), "Generation of decoy in silico peptide map (Light)");
for (const auto& compound : exp.compounds)
{
setProgress(progress++);
// Skip non-peptide compounds (metabolites)
if (!compound.isPeptide())
{
continue;
}
// Skip compounds that are not in target peptide map (e.g., those with too many permutations)
if (TargetPeptideMap.find(compound.id) == TargetPeptideMap.end())
{
continue;
}
// Reconstruct AASequence from compound (including modifications)
OpenMS::AASequence peptide_sequence;
try
{
peptide_sequence = getAASequenceFromLightCompound(compound);
}
catch (Exception::BaseException&)
{
continue;
}
String unmodified_sequence = peptide_sequence.toUnmodifiedString();
// Check if we have a decoy sequence for this target
auto decoy_it = DecoySequenceMap.find(unmodified_sequence);
if (decoy_it == DecoySequenceMap.end())
{
continue;
}
// Create decoy compound - keep same id as target (same as heavy version)
// Decoy transitions reference target compound ids since no decoy compounds
// are added to exp.compounds. Decoys are distinguished by the decoy flag.
OpenSwath::LightCompound decoy_compound = compound;
// Build decoy sequence with modifications
AASequence decoy_sequence = AASequence::fromString(decoy_it->second);
// Generate theoretical peptidoforms for decoy using target as template
const std::vector<AASequence> alternative_peptide_sequences =
generateTheoreticalPeptidoformsDecoy_(peptide_sequence, decoy_sequence);
int precursor_charge = compound.charge > 0 ? compound.charge : 1;
// Use TARGET peptide's precursor for SWATH index (same as heavy version)
double target_precursor_mz = peptide_sequence.getMZ(precursor_charge);
int precursor_swath = getSwath_(swathes, target_precursor_mz);
// Iterate over all peptidoforms
for (const auto& alt_aa : alternative_peptide_sequences)
{
// Cache string representations to avoid repeated conversions
const String unmod_str = alt_aa.toUnmodifiedString();
const String mod_str = alt_aa.toString();
// Generate theoretical ion series
auto ionseries = mrmis.getIonSeries(alt_aa, precursor_charge,
fragment_types, fragment_charges, enable_specific_losses,
enable_unspecific_losses);
if (enable_ms2_precursors)
{
// Use TARGET precursor m/z (same as heavy version)
double prec_mz = Math::roundDecimal(target_precursor_mz, round_decPow);
DecoyIonMap[precursor_swath][unmod_str].emplace_back(prec_mz, mod_str);
// Use TARGET compound id as key (same as heavy version)
DecoyPeptideMap[compound.id].emplace_back("MS2_Precursor_i0", prec_mz);
}
for (const auto& im_it : ionseries)
{
double fragment_mz = Math::roundDecimal(im_it.second, round_decPow);
DecoyIonMap[precursor_swath][unmod_str].emplace_back(fragment_mz, mod_str);
// Use TARGET compound id as key (same as heavy version)
DecoyPeptideMap[compound.id].emplace_back(im_it.first, fragment_mz);
}
}
// Store decoy compound in map
decoy_compound.sequence = alternative_peptide_sequences.empty() ? decoy_it->second : alternative_peptide_sequences[0].toString();
TargetDecoyMap[compound.id] = decoy_compound;
}
endProgress();
}
void MRMAssay::generateTargetAssaysLight_(const OpenSwath::LightTargetedExperiment& exp,
std::vector<OpenSwath::LightTransition>& transitions,
double mz_threshold,
const std::vector<std::pair<double, double> >& swathes,
int round_decPow,
const PeptideMapT& TargetPeptideMap,
const IonMapT& TargetIonMap)
{
// Build compound lookup map
std::unordered_map<std::string, const OpenSwath::LightCompound*> compound_map;
for (const auto& compound : exp.compounds)
{
compound_map[compound.id] = &compound;
}
Size progress = 0;
startProgress(0, TargetPeptideMap.size(), "Generation of target identification transitions (Light)");
size_t transition_idx = transitions.size();
for (const auto& pep_it : TargetPeptideMap)
{
setProgress(progress++);
const String& peptide_id = pep_it.first;
auto comp_it = compound_map.find(peptide_id);
if (comp_it == compound_map.end())
{
continue;
}
const OpenSwath::LightCompound* compound = comp_it->second;
// Reconstruct AASequence from compound (including modifications)
OpenMS::AASequence peptide_sequence;
try
{
peptide_sequence = getAASequenceFromLightCompound(*compound);
}
catch (Exception::BaseException&)
{
continue;
}
int precursor_charge = compound->charge > 0 ? compound->charge : 1;
double precursor_mz = peptide_sequence.getMZ(precursor_charge);
int precursor_swath = getSwath_(swathes, precursor_mz);
String unmod_str = peptide_sequence.toUnmodifiedString();
// Check if we have data for this SWATH/sequence combination
auto swath_it = TargetIonMap.find(precursor_swath);
if (swath_it == TargetIonMap.end())
{
continue;
}
auto seq_it = swath_it->second.find(unmod_str);
if (seq_it == swath_it->second.end())
{
continue;
}
// Iterate over transitions (already sorted and deduplicated by sortAndDeduplicatePeptideMap)
for (const auto& ion : pep_it.second)
{
const std::string& annotation = ion.first;
double product_mz = ion.second;
// Find matching peptidoforms
std::vector<std::string> matching_pfs = getMatchingPeptidoforms_(
product_mz, seq_it->second, mz_threshold);
if (matching_pfs.empty())
{
continue;
}
// Create light transition
OpenSwath::LightTransition tr;
tr.transition_name = String(transition_idx++) + "_UIS_" + peptide_id + "_" +
String(Math::roundDecimal(precursor_mz, round_decPow)) + "_" +
String(Math::roundDecimal(product_mz, round_decPow));
tr.peptide_ref = peptide_id;
tr.precursor_mz = precursor_mz;
tr.product_mz = product_mz;
tr.library_intensity = -1.0; // Placeholder for IPF identifying transitions
tr.setDecoy(false);
tr.setDetectingTransition(false);
tr.setIdentifyingTransition(true);
tr.setQuantifyingTransition(false);
tr.peptidoforms = matching_pfs;
// Parse fragment type/number/charge from annotation to populate compact fields
// Annotation format: b4^1, y10^2, etc.
if (!annotation.empty() && annotation != "MS2_Precursor_i0")
{
tr.setFragmentType(annotation.substr(0, 1));
std::string num_str;
size_t i = 1;
for (; i < annotation.size() && std::isdigit(annotation[i]); ++i)
{
num_str += annotation[i];
}
if (!num_str.empty())
{
tr.fragment_nr = static_cast<int16_t>(std::stoi(num_str));
}
// Extract charge after '^'
if (i < annotation.size() && annotation[i] == '^')
{
std::string charge_str;
for (size_t j = i + 1; j < annotation.size() && std::isdigit(annotation[j]); ++j)
{
charge_str += annotation[j];
}
if (!charge_str.empty())
{
tr.fragment_charge = static_cast<int8_t>(std::stoi(charge_str));
}
}
}
transitions.push_back(std::move(tr));
}
}
endProgress();
}
void MRMAssay::generateDecoyAssaysLight_(const OpenSwath::LightTargetedExperiment& exp,
std::vector<OpenSwath::LightTransition>& transitions,
double mz_threshold,
const std::vector<std::pair<double, double> >& swathes,
int round_decPow,
const PeptideMapT& DecoyPeptideMap,
const TargetDecoyMapLightT& TargetDecoyMap,
const IonMapT& DecoyIonMap,
const IonMapT& TargetIonMap)
{
// Build compound lookup map for targets
std::unordered_map<std::string, const OpenSwath::LightCompound*> compound_map;
for (const auto& compound : exp.compounds)
{
compound_map[compound.id] = &compound;
}
Size progress = 0;
startProgress(0, DecoyPeptideMap.size(), "Generation of decoy identification transitions (Light)");
size_t transition_idx = transitions.size();
for (const auto& pep_it : DecoyPeptideMap)
{
setProgress(progress++);
// DecoyPeptideMap keys are TARGET compound ids (same as heavy version)
const String& target_id = pep_it.first;
auto decoy_comp_it = TargetDecoyMap.find(target_id);
if (decoy_comp_it == TargetDecoyMap.end())
{
continue;
}
const OpenSwath::LightCompound& decoy_compound = decoy_comp_it->second;
auto target_comp_it = compound_map.find(target_id);
if (target_comp_it == compound_map.end())
{
continue;
}
const OpenSwath::LightCompound* target_compound = target_comp_it->second;
OpenMS::AASequence decoy_sequence;
OpenMS::AASequence target_sequence;
try
{
// Decoy sequence is already a full modified string from generateDecoyInSilicoMapLight_
decoy_sequence = AASequence::fromString(decoy_compound.sequence);
// Target sequence needs to be reconstructed from compound + modifications
target_sequence = getAASequenceFromLightCompound(*target_compound);
}
catch (Exception::BaseException&)
{
continue;
}
int precursor_charge = decoy_compound.charge > 0 ? decoy_compound.charge : 1;
double decoy_precursor_mz = decoy_sequence.getMZ(precursor_charge);
double target_precursor_mz = target_sequence.getMZ(precursor_charge);
// Use TARGET precursor swath for lookups - same as heavy version
// (DecoyIonMap was populated using target swath in generateDecoyInSilicoMapLight_)
int target_precursor_swath = getSwath_(swathes, target_precursor_mz);
String decoy_unmod_str = decoy_sequence.toUnmodifiedString();
String target_unmod_str = target_sequence.toUnmodifiedString();
// Check if we have decoy ion data (use TARGET swath, not decoy swath)
auto decoy_swath_it = DecoyIonMap.find(target_precursor_swath);
if (decoy_swath_it == DecoyIonMap.end())
{
continue;
}
auto decoy_seq_it = decoy_swath_it->second.find(decoy_unmod_str);
if (decoy_seq_it == decoy_swath_it->second.end())
{
continue;
}
// Iterate over transitions (already sorted and deduplicated by sortAndDeduplicatePeptideMap)
for (const auto& ion : pep_it.second)
{
const std::string& annotation = ion.first;
double product_mz = ion.second;
// Find matching decoy peptidoforms
std::vector<std::string> matching_pfs = getMatchingPeptidoforms_(
product_mz, decoy_seq_it->second, mz_threshold);
if (matching_pfs.empty())
{
continue;
}
// Check for overlap with target transitions
auto target_swath_it = TargetIonMap.find(target_precursor_swath);
if (target_swath_it != TargetIonMap.end())
{
auto target_seq_it = target_swath_it->second.find(target_unmod_str);
if (target_seq_it != target_swath_it->second.end())
{
std::vector<std::string> target_overlap = getMatchingPeptidoforms_(
product_mz, target_seq_it->second, mz_threshold);
if (!target_overlap.empty())
{
// Skip this decoy transition as it overlaps with target
continue;
}
}
}
// Create light transition
OpenSwath::LightTransition tr;
tr.transition_name = String(transition_idx++) + "_UISDECOY_" + decoy_compound.id + "_" +
String(Math::roundDecimal(decoy_precursor_mz, round_decPow)) + "_" +
String(Math::roundDecimal(product_mz, round_decPow));
tr.peptide_ref = decoy_compound.id;
tr.precursor_mz = decoy_precursor_mz;
tr.product_mz = product_mz;
tr.library_intensity = -1.0; // Placeholder for IPF identifying transitions
tr.setDecoy(true);
tr.setDetectingTransition(false);
tr.setIdentifyingTransition(true);
tr.setQuantifyingTransition(false);
tr.peptidoforms = matching_pfs;
// Parse fragment type/number/charge from annotation to populate compact fields
// Annotation format: b4^1, y10^2, etc.
if (!annotation.empty() && annotation != "MS2_Precursor_i0")
{
tr.setFragmentType(annotation.substr(0, 1));
std::string num_str;
size_t i = 1;
for (; i < annotation.size() && std::isdigit(annotation[i]); ++i)
{
num_str += annotation[i];
}
if (!num_str.empty())
{
tr.fragment_nr = static_cast<int16_t>(std::stoi(num_str));
}
// Extract charge after '^'
if (i < annotation.size() && annotation[i] == '^')
{
std::string charge_str;
for (size_t j = i + 1; j < annotation.size() && std::isdigit(annotation[j]); ++j)
{
charge_str += annotation[j];
}
if (!charge_str.empty())
{
tr.fragment_charge = static_cast<int8_t>(std::stoi(charge_str));
}
}
}
transitions.push_back(std::move(tr));
}
}
endProgress();
}
void MRMAssay::uisTransitionsLight(OpenSwath::LightTargetedExperiment& exp,
const std::vector<String>& fragment_types,
const std::vector<size_t>& fragment_charges,
bool enable_specific_losses,
bool enable_unspecific_losses,
bool enable_ms2_precursors,
double mz_threshold,
const std::vector<std::pair<double, double> >& swathes,
int round_decPow,
size_t max_num_alternative_localizations,
int shuffle_seed,
bool disable_decoy_transitions)
{
// Use same map types as heavy version - they use std::string keys
SequenceMapT TargetSequenceMap;
IonMapT TargetIonMap;
PeptideMapT TargetPeptideMap;
std::vector<OpenSwath::LightTransition> transitions;
// Step 1: Generate target in-silico map
generateTargetInSilicoMapLight_(exp, fragment_types, fragment_charges,
enable_specific_losses, enable_unspecific_losses, enable_ms2_precursors,
swathes, round_decPow, max_num_alternative_localizations,
TargetSequenceMap, TargetIonMap, TargetPeptideMap);
// Sort TargetIonMap by m/z for binary search in getMatchingPeptidoforms_
sortIonMap(TargetIonMap);
// Sort and deduplicate TargetPeptideMap to avoid repeated sorting in generateTargetAssaysLight_
sortAndDeduplicatePeptideMap(TargetPeptideMap);
// Reserve capacity for new UIS transitions (estimate: ~6 transitions per peptide, x2 for decoys)
size_t estimated_new_transitions = TargetPeptideMap.size() * 6 * (disable_decoy_transitions ? 1 : 2);
transitions.reserve(estimated_new_transitions);
// Step 2: Generate target identification transitions
generateTargetAssaysLight_(exp, transitions, mz_threshold, swathes,
round_decPow, TargetPeptideMap, TargetIonMap);
if (!disable_decoy_transitions)
{
// Step 3: Generate decoy sequences and in-silico map
std::map<String, String> DecoySequenceMap;
IonMapT DecoyIonMap;
PeptideMapT DecoyPeptideMap;
TargetDecoyMapLightT TargetDecoyMap;
// Reuse the existing heavy generateDecoySequences_ - it only uses SequenceMapT
generateDecoySequences_(TargetSequenceMap, DecoySequenceMap, shuffle_seed);
generateDecoyInSilicoMapLight_(exp, fragment_types, fragment_charges,
enable_specific_losses, enable_unspecific_losses, enable_ms2_precursors,
swathes, round_decPow, TargetDecoyMap, TargetPeptideMap,
DecoySequenceMap, DecoyIonMap, DecoyPeptideMap);
// Sort DecoyIonMap by m/z for binary search in getMatchingPeptidoforms_
sortIonMap(DecoyIonMap);
// Sort and deduplicate DecoyPeptideMap to avoid repeated sorting in generateDecoyAssaysLight_
sortAndDeduplicatePeptideMap(DecoyPeptideMap);
// Step 4: Generate decoy identification transitions
generateDecoyAssaysLight_(exp, transitions, mz_threshold, swathes,
round_decPow, DecoyPeptideMap, TargetDecoyMap, DecoyIonMap, TargetIonMap);
}
// Append new transitions to experiment
exp.transitions.reserve(exp.transitions.size() + transitions.size());
for (auto& tr : transitions)
{
exp.transitions.push_back(std::move(tr));
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/TransitionTSVFile.cpp | .cpp | 78,945 | 2,001 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/TransitionTSVFile.h>
#include <OpenMS/ANALYSIS/OPENSWATH/DATAACCESS/DataAccessHelper.h>
#include <OpenMS/CHEMISTRY/ModificationsDB.h>
#include <OpenMS/CHEMISTRY/ResidueDB.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/FORMAT/TextFile.h>
#include <utility>
namespace OpenMS
{
template<class T> // primary template
bool extractName(T& value, const std::string& header_name,
const std::vector<std::string>& tmp_line,
const std::map<std::string, int>& header_dict)
{
auto tmp = header_dict.find( header_name );
if (tmp != header_dict.end())
{
value = tmp_line[ tmp->second ];
// perform cleanup
value = value.remove('"');
value = value.remove('\'');
value = value.remove(',');
return true;
}
return false;
}
template<> // specialization for int
bool extractName<int>(int& value, const std::string& header_name,
const std::vector<std::string>& tmp_line,
const std::map<std::string, int>& header_dict)
{
auto tmp = header_dict.find( header_name );
if (tmp != header_dict.end() && !String(tmp_line[ tmp->second ]).empty())
{
value = String(tmp_line[ tmp->second ]).toInt();
return true;
}
return false;
}
template<> // specialization for double
bool extractName<double>(double& value, const std::string& header_name,
const std::vector<std::string>& tmp_line,
const std::map<std::string, int>& header_dict)
{
auto tmp = header_dict.find(header_name);
if (tmp != header_dict.end() && !String(tmp_line[ tmp->second ]).empty())
{
value = String(tmp_line[ tmp->second ]).toDouble();
return true;
}
return false;
}
template<> // specialization for bool
bool extractName<bool>(bool& value, const std::string& header_name,
const std::vector<std::string>& tmp_line,
const std::map<std::string, int>& header_dict)
{
auto tmp = header_dict.find( header_name );
if (tmp != header_dict.end() && !String(tmp_line[ tmp->second ]).empty())
{
OpenMS::String str_value = tmp_line[ tmp->second ];
if (str_value == "1" || str_value.toUpper() == "TRUE") value = true;
else if (str_value == "0" || str_value.toUpper() == "FALSE") value = false;
else return false;
// all went well, we set the value and can return
return true;
}
return false;
}
TransitionTSVFile::TransitionTSVFile() :
DefaultParamHandler("TransitionTSVFile")
{
defaults_.setValue("retentionTimeInterpretation", "iRT", "How to interpret the provided retention time (the retention time column can either be interpreted to be in iRT, minutes or seconds)", {"advanced"});
defaults_.setValidStrings("retentionTimeInterpretation", {"iRT","seconds","minutes"});
defaults_.setValue("override_group_label_check", "false", "Override an internal check that assures that all members of the same PeptideGroupLabel have the same PeptideSequence (this ensures that only different isotopic forms of the same peptide can be grouped together in the same label group). Only turn this off if you know what you are doing.", {"advanced"});
defaults_.setValidStrings("override_group_label_check", {"true","false"});
defaults_.setValue("force_invalid_mods", "false", "Force reading even if invalid modifications are encountered (OpenMS may not recognize the modification)", {"advanced"});
defaults_.setValidStrings("force_invalid_mods", {"true","false"});
// write defaults into Param object param_
defaultsToParam_();
updateMembers_();
}
TransitionTSVFile::~TransitionTSVFile() = default;
void TransitionTSVFile::updateMembers_()
{
retentionTimeInterpretation_ = param_.getValue("retentionTimeInterpretation").toString();
override_group_label_check_ = param_.getValue("override_group_label_check").toBool();
force_invalid_mods_ = param_.getValue("force_invalid_mods").toBool();
}
const std::vector<std::string> TransitionTSVFile::header_names_ =
{
"PrecursorMz",
"ProductMz",
"PrecursorCharge",
"ProductCharge",
"LibraryIntensity",
"NormalizedRetentionTime",
"PeptideSequence",
"ModifiedPeptideSequence",
"PeptideGroupLabel",
"LabelType",
"CompoundName",
"SumFormula",
"SMILES",
"Adducts",
"ProteinId",
"UniprotId",
"GeneName",
"FragmentType",
"FragmentSeriesNumber",
"Annotation",
"CollisionEnergy",
"PrecursorIonMobility",
"TransitionGroupId",
"TransitionId",
"Decoy",
"DetectingTransition",
"IdentifyingTransition",
"QuantifyingTransition",
"Peptidoforms"
};
void TransitionTSVFile::getTSVHeader_(const std::string& line, char& delimiter, std::map<std::string, int>& header_dict) const
{
std::string tmp;
std::vector<std::string> header;
Size min_header_size = 4;
const std::array<char, 3> possible_delimiters = {'\t', ';', ','};
for (auto possible_delimiter : possible_delimiters)
{
header.clear();
std::stringstream lineStream(line);
delimiter = possible_delimiter;
while (std::getline(lineStream, tmp, delimiter))
{
String tmp2(tmp);
tmp2 = tmp2.remove('"');
tmp2 = tmp2.remove('\'');
tmp2 = tmp2.remove(',');
header.push_back(tmp2);
}
if (header.size() >= min_header_size)
{
break; // found the delimiter, got the correct header
}
}
for (Size i = 0; i < header.size(); i++)
{
header_dict[header[i]] = i;
}
char txt_delimiter = delimiter;
if (txt_delimiter == '\t')
{
txt_delimiter = 't';
}
// could not determine the delimiter correctly
if (header.size() < min_header_size)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Determined your csv/tsv file to have delimiter '" + (String)txt_delimiter +
"', but the parsed header has only " + (String)header.size() + " fields instead of the minimal " +
(String)min_header_size + ". Please check your input file.");
}
}
void TransitionTSVFile::readUnstructuredTSVInput_(const char* filename, FileTypes::Type filetype, std::vector<TSVTransition>& transition_list)
{
std::ifstream data(filename);
std::string line;
std::string tmp;
// read header
std::vector<std::string> tmp_line;
std::map<std::string, int> header_dict;
char delimiter = ',';
// SpectraST MRM Files do not have a header
if (filetype == FileTypes::MRM)
{
delimiter = '\t';
header_dict["SpectraSTBestSample"] = 0;
header_dict["SpectraSTmaxNumUsed/totNumUsed"] = 1;
header_dict["SpectraSTpI"] = 2;
header_dict["PrecursorMz"] = 3;
header_dict["SpectraSTRetentionTime"] = 4;
header_dict["ProductMz"] = 5;
header_dict["LibraryIntensity"] = 6;
header_dict["SpectraSTAnnotation"] = 7;
header_dict["FragmentCharge"] = 8;
header_dict["SpectraSTFullPeptideName"] = 9;
header_dict["SpectraSTUnknown"] = 10;
header_dict["SpectraSTNumberOfProteinsMappedTo"] = 11;
header_dict["ProteinName"] = 12;
}
// Read header for TSV input
else
{
TextFile::getLine(data, line);
getTSVHeader_(line, delimiter, header_dict);
}
bool spectrast_legacy = false; // we will check below if SpectraST was run in legacy (<5.0) mode or if the RT normalization was forgotten.
int cnt = 0;
while (TextFile::getLine(data, line)) // make sure line endings are handled correctly
{
line.push_back(delimiter); // avoid losing last column if it is empty
std::stringstream lineStream(line);
while (std::getline(lineStream, tmp, delimiter)) // default getline is fine here, we only want to split the line
{
tmp_line.push_back(tmp);
}
cnt++;
#ifdef TRANSITIONTSVREADER_TESTING
for (Size i = 0; i < tmp_line.size(); i++)
{
std::cout << "line " << i << " " << tmp_line[i] << '\n';
}
for (const auto& iter : header_dict)
{
std::cout << "header " << iter.first << " " << iter.second << '\n';
}
#endif
if (tmp_line.size() != header_dict.size())
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Error reading the file on line " + String(cnt) + ": length of the header and length of the line" +
" do not match: " + String(tmp_line.size()) + " != " + String(header_dict.size()));
}
TSVTransition mytransition;
bool skip_transition = false; // skip unannotated transitions in SpectraST MRM files
//// Required columns (they are guaranteed to be present, see getTSVHeader_)
// PrecursorMz
mytransition.precursor = String(tmp_line[header_dict["PrecursorMz"]]).toDouble();
// ProductMz
if (!extractName<double>(mytransition.product, "ProductMz", tmp_line, header_dict) &&
!extractName<double>(mytransition.product, "FragmentMz", tmp_line, header_dict)) // Spectronaut
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Expected a header named ProductMz or FragmentMz but found none");
}
// LibraryIntensity
if (!extractName<double>(mytransition.library_intensity, "LibraryIntensity", tmp_line, header_dict) &&
!extractName<double>(mytransition.library_intensity, "RelativeIntensity", tmp_line, header_dict) && // Spectronaut
!extractName<double>(mytransition.library_intensity, "RelativeFragmentIntensity", tmp_line, header_dict)) // Spectronaut
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Expected a header named LibraryIntensity or RelativeFragmentIntensity but found none");
}
//// Additional columns for both proteomics and metabolomics
// NormalizedRetentionTime
if (!extractName<double>(mytransition.rt_calibrated, "RetentionTimeCalculatorScore", tmp_line, header_dict) && // Skyline
!extractName<double>(mytransition.rt_calibrated, "iRT", tmp_line, header_dict) && // Spectronaut
!extractName<double>(mytransition.rt_calibrated, "NormalizedRetentionTime", tmp_line, header_dict) &&
!extractName<double>(mytransition.rt_calibrated, "RetentionTime", tmp_line, header_dict) &&
!extractName<double>(mytransition.rt_calibrated, "Tr_recalibrated", tmp_line, header_dict))
{
if (header_dict.find("SpectraSTRetentionTime") != header_dict.end())
{
spectrastRTExtract(tmp_line[header_dict["SpectraSTRetentionTime"]], mytransition.rt_calibrated, spectrast_legacy);
}
else
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Expected a header named RetentionTime, NormalizedRetentionTime, iRT, RetentionTimeCalculatorScore, Tr_recalibrated or SpectraSTRetentionTime but found none");
}
}
// PrecursorCharge
void(!extractName(mytransition.precursor_charge, "PrecursorCharge", tmp_line, header_dict) &&
!extractName(mytransition.precursor_charge, "Charge", tmp_line, header_dict)); // charge is assumed to be the charge of the precursor
void(!extractName(mytransition.fragment_type, "FragmentType", tmp_line, header_dict) &&
!extractName(mytransition.fragment_type, "FragmentIonType", tmp_line, header_dict)); // Skyline
void(!extractName(mytransition.fragment_charge, "FragmentCharge", tmp_line, header_dict) &&
!extractName(mytransition.fragment_charge, "ProductCharge", tmp_line, header_dict));
void(!extractName<int>(mytransition.fragment_nr, "FragmentSeriesNumber", tmp_line, header_dict) &&
!extractName<int>(mytransition.fragment_nr, "FragmentNumber", tmp_line, header_dict) &&
!extractName<int>(mytransition.fragment_nr, "FragmentIonOrdinal", tmp_line, header_dict));
void(extractName<double>(mytransition.drift_time, "PrecursorIonMobility", tmp_line, header_dict));
void(extractName<double>(mytransition.fragment_mzdelta, "FragmentMzDelta", tmp_line, header_dict));
void(extractName<int>(mytransition.fragment_modification, "FragmentModification", tmp_line, header_dict));
//// Proteomics
extractName(mytransition.GeneName, "GeneName", tmp_line, header_dict);
String proteins;
void(!extractName(proteins, "ProteinName", tmp_line, header_dict) &&
!extractName(proteins, "ProteinId", tmp_line, header_dict)); // Spectronaut
if (proteins != "NA" && !proteins.empty())
{
proteins.split(';', mytransition.ProteinName);
}
void(extractName(mytransition.peptide_group_label, "PeptideGroupLabel", tmp_line, header_dict));
void(extractName(mytransition.label_type, "LabelType", tmp_line, header_dict));
void(!extractName(mytransition.PeptideSequence, "PeptideSequence", tmp_line, header_dict) &&
!extractName(mytransition.PeptideSequence, "Sequence", tmp_line, header_dict) && // Skyline
!extractName(mytransition.PeptideSequence, "StrippedSequence", tmp_line, header_dict)); // Spectronaut
void(!extractName(mytransition.FullPeptideName, "FullUniModPeptideName", tmp_line, header_dict) &&
!extractName(mytransition.FullPeptideName, "FullPeptideName", tmp_line, header_dict) &&
!extractName(mytransition.FullPeptideName, "ModifiedSequence", tmp_line, header_dict) && // Spectronaut
!extractName(mytransition.FullPeptideName, "ModifiedPeptideSequence", tmp_line, header_dict));
//// IPF
String peptidoforms;
void(!extractName<bool>(mytransition.detecting_transition, "detecting_transition", tmp_line, header_dict) &&
!extractName<bool>(mytransition.detecting_transition, "DetectingTransition", tmp_line, header_dict));
void(!extractName<bool>(mytransition.identifying_transition, "identifying_transition", tmp_line, header_dict) &&
!extractName<bool>(mytransition.identifying_transition, "IdentifyingTransition", tmp_line, header_dict));
void(!extractName<bool>(mytransition.quantifying_transition, "quantifying_transition", tmp_line, header_dict) &&
!extractName<bool>(mytransition.quantifying_transition, "QuantifyingTransition", tmp_line, header_dict) &&
!extractName<bool>(mytransition.quantifying_transition, "Quantitative", tmp_line, header_dict)); // Skyline
void(extractName(peptidoforms, "Peptidoforms", tmp_line, header_dict));
peptidoforms.split('|', mytransition.peptidoforms);
//// Targeted Metabolomics
void(extractName(mytransition.CompoundName, "CompoundName", tmp_line, header_dict));
void(extractName(mytransition.SumFormula, "SumFormula", tmp_line, header_dict));
void(extractName(mytransition.SMILES, "SMILES", tmp_line, header_dict));
void(extractName(mytransition.Adducts, "Adducts", tmp_line, header_dict));
//// Meta
void(extractName(mytransition.Annotation, "Annotation", tmp_line, header_dict));
// UniprotId
String uniprot_ids;
void(!extractName(uniprot_ids, "UniprotId", tmp_line, header_dict) &&
!extractName(uniprot_ids, "UniprotID", tmp_line, header_dict));
if (uniprot_ids != "NA" && !uniprot_ids.empty())
{
uniprot_ids.split(';', mytransition.uniprot_id);
}
void(!extractName<double>(mytransition.CE, "CE", tmp_line, header_dict) &&
!extractName<double>(mytransition.CE, "CollisionEnergy", tmp_line, header_dict));
// Decoy
void(!extractName<bool>(mytransition.decoy, "decoy", tmp_line, header_dict) &&
!extractName<bool>(mytransition.decoy, "Decoy", tmp_line, header_dict) &&
!extractName<bool>(mytransition.decoy, "IsDecoy", tmp_line, header_dict));
if (header_dict.find("SpectraSTAnnotation") != header_dict.end())
{
skip_transition = spectrastAnnotationExtract(tmp_line[header_dict["SpectraSTAnnotation"]], mytransition);
}
//// Generate Group IDs
// SpectraST
if (filetype == FileTypes::MRM)
{
std::vector<String> substrings;
String(tmp_line[header_dict["SpectraSTFullPeptideName"]]).split("/", substrings);
AASequence peptide = AASequence::fromString(substrings[0]);
mytransition.FullPeptideName = peptide.toString();
mytransition.PeptideSequence = peptide.toUnmodifiedString();
mytransition.precursor_charge = substrings[1];
mytransition.transition_name = String(cnt);
mytransition.group_id = mytransition.FullPeptideName + String("_") + String(mytransition.precursor_charge);
}
// Generate transition_group_id and transition_name if not defined
else
{
// Use TransitionId if available, else generate from attributes
if (!extractName(mytransition.transition_name, "transition_name", tmp_line, header_dict) &&
!extractName(mytransition.transition_name, "TransitionName", tmp_line, header_dict) &&
!extractName(mytransition.transition_name, "TransitionId", tmp_line, header_dict))
{
mytransition.transition_name = String(cnt);
}
// Use TransitionGroupId if available, else generate from attributes
if (!extractName(mytransition.group_id, "transition_group_id", tmp_line, header_dict) &&
!extractName(mytransition.group_id, "TransitionGroupId", tmp_line, header_dict) &&
!extractName(mytransition.group_id, "TransitionGroupName", tmp_line, header_dict))
{
mytransition.group_id = AASequence::fromString(mytransition.FullPeptideName).toString() + String("_") + String(mytransition.precursor_charge);
}
}
cleanupTransitions_(mytransition);
if (!skip_transition)
{
transition_list.push_back(mytransition);
}
#ifdef TRANSITIONTSVREADER_TESTING
std::cout << mytransition.precursor << '\n';
std::cout << mytransition.product << '\n';
std::cout << mytransition.rt_calibrated << '\n';
std::cout << mytransition.transition_name << '\n';
std::cout << mytransition.CE << '\n';
std::cout << mytransition.library_intensity << '\n';
std::cout << mytransition.group_id << '\n';
std::cout << mytransition.decoy << '\n';
std::cout << mytransition.PeptideSequence << '\n';
std::cout << mytransition.ProteinName << '\n';
std::cout << mytransition.Annotation << '\n';
std::cout << mytransition.FullPeptideName << '\n';
std::cout << mytransition.precursor_charge << '\n';
std::cout << mytransition.peptide_group_label << '\n';
std::cout << mytransition.fragment_charge << '\n';
std::cout << mytransition.fragment_nr << '\n';
std::cout << mytransition.fragment_mzdelta << '\n';
std::cout << mytransition.fragment_modification << '\n';
std::cout << mytransition.fragment_type << '\n';
std::cout << mytransition.uniprot_id << '\n';
#endif
tmp_line.clear();
}
if (spectrast_legacy && retentionTimeInterpretation_ == "iRT")
{
std::cout << "Warning: SpectraST was not run in RT normalization mode but the converted list was interpreted to have iRT units. Check whether you need to adapt the parameter -algorithm:retentionTimeInterpretation. You can ignore this warning if you used a legacy SpectraST 4.0 file.\n";
}
}
void TransitionTSVFile::spectrastRTExtract(const String& str_inp, double & value, bool & spectrast_legacy)
{
// If SpectraST was run in RT normalization mode, the retention time is annotated as following: "3887.50(57.30)"
// 3887.50 refers to the non-normalized RT of the individual or consensus run, and 57.30 refers to the normalized
// iRT.
size_t start_position = str_inp.find("(");
if (start_position != std::string::npos)
{
++start_position;
size_t end_position = str_inp.find(")");
if (end_position != std::string::npos)
{
value = String(str_inp.substr(start_position, end_position - start_position)).toDouble();
}
}
else
{
// SpectraST was run without RT Normalization mode
spectrast_legacy = true;
value = str_inp.toDouble();
}
}
bool TransitionTSVFile::spectrastAnnotationExtract(const String& str_inp, TSVTransition & mytransition)
{
// Parses SpectraST fragment ion annotations
// Example: y13^2/0.000,b16-18^2/-0.013,y7-45/0.000
// Important: m2:8 are not yet supported! See SpectraSTPeakList::annotateInternalFragments for further information
mytransition.Annotation = str_inp;
std::vector<String> all_fragment_annotations;
str_inp.split(",", all_fragment_annotations);
if (all_fragment_annotations[0].find("[") == std::string::npos && // non-unique peak annotation
all_fragment_annotations[0].find("]") == std::string::npos && // non-unique peak annotation
all_fragment_annotations[0].find("I") == std::string::npos && // immonium ion
all_fragment_annotations[0].find("p") == std::string::npos && // precursor ion
all_fragment_annotations[0].find("i") == std::string::npos && // isotope ion
all_fragment_annotations[0].find("m") == std::string::npos &&
all_fragment_annotations[0].find("?") == std::string::npos
)
{
std::vector<String> best_fragment_annotation_with_deviation;
all_fragment_annotations[0].split("/", best_fragment_annotation_with_deviation);
String best_fragment_annotation = best_fragment_annotation_with_deviation[0];
if (best_fragment_annotation.find("^") != std::string::npos)
{
std::vector<String> best_fragment_annotation_charge;
best_fragment_annotation.split("^", best_fragment_annotation_charge);
mytransition.fragment_charge = String(best_fragment_annotation_charge[1]);
best_fragment_annotation = best_fragment_annotation_charge[0];
}
else
{
mytransition.fragment_charge = 1; // assume 1 (most frequent charge state)
}
if (best_fragment_annotation.find("-") != std::string::npos)
{
std::vector<String> best_fragment_annotation_modification;
best_fragment_annotation.split("-", best_fragment_annotation_modification);
mytransition.fragment_type = best_fragment_annotation_modification[0].substr(0, 1);
mytransition.fragment_nr = String(best_fragment_annotation_modification[0].substr(1)).toInt();
mytransition.fragment_modification = -1 * String(best_fragment_annotation_modification[1]).toInt();
}
else if (best_fragment_annotation.find("+") != std::string::npos)
{
std::vector<String> best_fragment_annotation_modification;
best_fragment_annotation.split("+", best_fragment_annotation_modification);
mytransition.fragment_type = best_fragment_annotation_modification[0].substr(0, 1);
mytransition.fragment_nr = String(best_fragment_annotation_modification[0].substr(1)).toInt();
mytransition.fragment_modification = String(best_fragment_annotation_modification[1]).toInt();
}
else
{
mytransition.fragment_type = best_fragment_annotation.substr(0, 1);
mytransition.fragment_nr = String(best_fragment_annotation.substr(1)).toInt();
mytransition.fragment_modification = 0;
}
mytransition.fragment_mzdelta = String(best_fragment_annotation_with_deviation[1]).toDouble();
}
else
{
// The fragment ion could not be annotated and will likely not be used for detection transitions;
// we thus skip it and reduce the size of the output TraML.
return true;
}
return false;
}
void TransitionTSVFile::cleanupTransitions_(TSVTransition& mytransition)
{
// deal with FullPeptideNames like PEPTIDE/2
std::vector<String> substrings;
mytransition.FullPeptideName.split("/", substrings);
if (substrings.size() == 2)
{
mytransition.FullPeptideName = substrings[0];
mytransition.precursor_charge = substrings[1];
}
}
void TransitionTSVFile::TSVToTargetedExperiment_(std::vector<TSVTransition>& transition_list, OpenMS::TargetedExperiment& exp)
{
// For the CV terms, see
// http://psidev.cvs.sourceforge.net/viewvc/psidev/psi/psi-ms/mzML/controlledVocabulary/psi-ms.obo
typedef std::vector<OpenMS::TargetedExperiment::Compound> CompoundVectorType;
CompoundVectorType compounds;
PeptideVectorType peptides;
ProteinVectorType proteins;
std::map<String, int> peptide_map;
std::map<String, int> compound_map;
std::map<String, int> protein_map;
resolveMixedSequenceGroups_(transition_list);
Size progress = 0;
startProgress(0, transition_list.size(), "conversion to internal data representation");
for (auto tr_it = transition_list.begin(); tr_it != transition_list.end(); ++tr_it)
{
ReactionMonitoringTransition rm_trans;
createTransition_(tr_it, rm_trans);
exp.addTransition(rm_trans);
// check whether we need a new peptide
if (peptide_map.find(tr_it->group_id) == peptide_map.end() &&
compound_map.find(tr_it->group_id) == compound_map.end() )
{
// should we make a peptide or a compound ?
if (tr_it->isPeptide())
{
OpenMS::TargetedExperiment::Peptide peptide;
createPeptide_(tr_it, peptide);
peptides.push_back(peptide);
peptide_map[peptide.id] = 0;
}
else
{
OpenMS::TargetedExperiment::Compound compound;
createCompound_(tr_it, compound);
compounds.push_back(compound);
compound_map[compound.id] = 0;
}
}
// check whether we need new proteins
for (size_t i = 0; i < tr_it->ProteinName.size(); ++i)
{
if (tr_it->isPeptide() && protein_map.find(tr_it->ProteinName[i]) == protein_map.end())
{
OpenMS::TargetedExperiment::Protein protein;
String protein_name = tr_it->ProteinName[i];
String uniprot_id = "";
if (tr_it->uniprot_id.size() == tr_it->ProteinName.size())
{
uniprot_id = tr_it->uniprot_id[i];
}
createProtein_(protein_name, uniprot_id, protein);
proteins.push_back(protein);
protein_map[tr_it->ProteinName[i]] = 0;
}
}
setProgress(progress++);
}
endProgress();
exp.setCompounds(compounds);
exp.setPeptides(peptides);
exp.setProteins(proteins);
OPENMS_POSTCONDITION(exp.getTransitions().size() == transition_list.size(), "Input and output list need to have equal size.")
}
void TransitionTSVFile::streamTSVToLightTargetedExperiment_(const char* filename, FileTypes::Type filetype, OpenSwath::LightTargetedExperiment& exp)
{
// Maps for deduplication
std::map<String, int> compound_map;
std::map<String, int> protein_map;
// Map for detecting mixed sequence groups inline (peptide_group_label -> first sequence seen)
std::map<String, String> label_to_sequence;
std::ifstream data(filename);
std::string line;
std::string tmp;
// Parse header
std::vector<std::string> tmp_line;
std::map<std::string, int> header_dict;
char delimiter = ',';
// SpectraST MRM Files do not have a header
if (filetype == FileTypes::MRM)
{
delimiter = '\t';
header_dict["SpectraSTBestSample"] = 0;
header_dict["SpectraSTmaxNumUsed/totNumUsed"] = 1;
header_dict["SpectraSTpI"] = 2;
header_dict["PrecursorMz"] = 3;
header_dict["SpectraSTRetentionTime"] = 4;
header_dict["ProductMz"] = 5;
header_dict["LibraryIntensity"] = 6;
header_dict["SpectraSTAnnotation"] = 7;
header_dict["FragmentCharge"] = 8;
header_dict["SpectraSTFullPeptideName"] = 9;
header_dict["SpectraSTUnknown"] = 10;
header_dict["SpectraSTNumberOfProteinsMappedTo"] = 11;
header_dict["ProteinName"] = 12;
}
else
{
TextFile::getLine(data, line);
getTSVHeader_(line, delimiter, header_dict);
}
bool spectrast_legacy = false;
int cnt = 0;
Size progress = 0;
// Count lines for progress (seek back after)
Size total_lines = 0;
{
std::ifstream count_stream(filename);
std::string count_line;
while (TextFile::getLine(count_stream, count_line)) { ++total_lines; }
if (total_lines > 0) --total_lines; // subtract header
}
startProgress(0, total_lines, "streaming TSV to LightTargetedExperiment");
// Single TSVTransition buffer - reused for each line
TSVTransition mytransition;
while (TextFile::getLine(data, line))
{
line.push_back(delimiter); // avoid losing last column if it is empty
std::stringstream lineStream(line);
tmp_line.clear();
while (std::getline(lineStream, tmp, delimiter))
{
tmp_line.push_back(tmp);
}
cnt++;
if (tmp_line.size() != header_dict.size())
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Error reading the file on line " + String(cnt) + ": length of the header and length of the line" +
" do not match: " + String(tmp_line.size()) + " != " + String(header_dict.size()));
}
// Reset mytransition to defaults
mytransition = TSVTransition();
bool skip_transition = false;
//// Required columns
mytransition.precursor = String(tmp_line[header_dict["PrecursorMz"]]).toDouble();
if (!extractName<double>(mytransition.product, "ProductMz", tmp_line, header_dict) &&
!extractName<double>(mytransition.product, "FragmentMz", tmp_line, header_dict))
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Expected a header named ProductMz or FragmentMz but found none");
}
if (!extractName<double>(mytransition.library_intensity, "LibraryIntensity", tmp_line, header_dict) &&
!extractName<double>(mytransition.library_intensity, "RelativeIntensity", tmp_line, header_dict) &&
!extractName<double>(mytransition.library_intensity, "RelativeFragmentIntensity", tmp_line, header_dict))
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Expected a header named LibraryIntensity or RelativeFragmentIntensity but found none");
}
//// Additional columns
if (!extractName<double>(mytransition.rt_calibrated, "RetentionTimeCalculatorScore", tmp_line, header_dict) &&
!extractName<double>(mytransition.rt_calibrated, "iRT", tmp_line, header_dict) &&
!extractName<double>(mytransition.rt_calibrated, "NormalizedRetentionTime", tmp_line, header_dict) &&
!extractName<double>(mytransition.rt_calibrated, "RetentionTime", tmp_line, header_dict) &&
!extractName<double>(mytransition.rt_calibrated, "Tr_recalibrated", tmp_line, header_dict))
{
if (header_dict.find("SpectraSTRetentionTime") != header_dict.end())
{
spectrastRTExtract(tmp_line[header_dict["SpectraSTRetentionTime"]], mytransition.rt_calibrated, spectrast_legacy);
}
else
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Expected a header named RetentionTime, NormalizedRetentionTime, iRT, RetentionTimeCalculatorScore, Tr_recalibrated or SpectraSTRetentionTime but found none");
}
}
void(!extractName(mytransition.precursor_charge, "PrecursorCharge", tmp_line, header_dict) &&
!extractName(mytransition.precursor_charge, "Charge", tmp_line, header_dict));
void(!extractName(mytransition.fragment_type, "FragmentType", tmp_line, header_dict) &&
!extractName(mytransition.fragment_type, "FragmentIonType", tmp_line, header_dict));
void(!extractName(mytransition.fragment_charge, "FragmentCharge", tmp_line, header_dict) &&
!extractName(mytransition.fragment_charge, "ProductCharge", tmp_line, header_dict));
void(!extractName<int>(mytransition.fragment_nr, "FragmentSeriesNumber", tmp_line, header_dict) &&
!extractName<int>(mytransition.fragment_nr, "FragmentNumber", tmp_line, header_dict) &&
!extractName<int>(mytransition.fragment_nr, "FragmentIonOrdinal", tmp_line, header_dict));
void(extractName<double>(mytransition.drift_time, "PrecursorIonMobility", tmp_line, header_dict));
void(extractName<double>(mytransition.fragment_mzdelta, "FragmentMzDelta", tmp_line, header_dict));
void(extractName<int>(mytransition.fragment_modification, "FragmentModification", tmp_line, header_dict));
//// Proteomics
extractName(mytransition.GeneName, "GeneName", tmp_line, header_dict);
String proteins;
void(!extractName(proteins, "ProteinName", tmp_line, header_dict) &&
!extractName(proteins, "ProteinId", tmp_line, header_dict));
if (proteins != "NA" && !proteins.empty())
{
proteins.split(';', mytransition.ProteinName);
}
void(extractName(mytransition.peptide_group_label, "PeptideGroupLabel", tmp_line, header_dict));
void(extractName(mytransition.label_type, "LabelType", tmp_line, header_dict));
void(!extractName(mytransition.PeptideSequence, "PeptideSequence", tmp_line, header_dict) &&
!extractName(mytransition.PeptideSequence, "Sequence", tmp_line, header_dict) &&
!extractName(mytransition.PeptideSequence, "StrippedSequence", tmp_line, header_dict));
void(!extractName(mytransition.FullPeptideName, "FullUniModPeptideName", tmp_line, header_dict) &&
!extractName(mytransition.FullPeptideName, "FullPeptideName", tmp_line, header_dict) &&
!extractName(mytransition.FullPeptideName, "ModifiedSequence", tmp_line, header_dict) &&
!extractName(mytransition.FullPeptideName, "ModifiedPeptideSequence", tmp_line, header_dict));
//// IPF
String peptidoforms;
void(!extractName<bool>(mytransition.detecting_transition, "detecting_transition", tmp_line, header_dict) &&
!extractName<bool>(mytransition.detecting_transition, "DetectingTransition", tmp_line, header_dict));
void(!extractName<bool>(mytransition.identifying_transition, "identifying_transition", tmp_line, header_dict) &&
!extractName<bool>(mytransition.identifying_transition, "IdentifyingTransition", tmp_line, header_dict));
void(!extractName<bool>(mytransition.quantifying_transition, "quantifying_transition", tmp_line, header_dict) &&
!extractName<bool>(mytransition.quantifying_transition, "QuantifyingTransition", tmp_line, header_dict) &&
!extractName<bool>(mytransition.quantifying_transition, "Quantitative", tmp_line, header_dict));
void(extractName(peptidoforms, "Peptidoforms", tmp_line, header_dict));
peptidoforms.split('|', mytransition.peptidoforms);
//// Targeted Metabolomics
void(extractName(mytransition.CompoundName, "CompoundName", tmp_line, header_dict));
void(extractName(mytransition.SumFormula, "SumFormula", tmp_line, header_dict));
void(extractName(mytransition.SMILES, "SMILES", tmp_line, header_dict));
void(extractName(mytransition.Adducts, "Adducts", tmp_line, header_dict));
//// Meta
void(extractName(mytransition.Annotation, "Annotation", tmp_line, header_dict));
String uniprot_ids;
void(!extractName(uniprot_ids, "UniprotId", tmp_line, header_dict) &&
!extractName(uniprot_ids, "UniprotID", tmp_line, header_dict));
if (uniprot_ids != "NA" && !uniprot_ids.empty())
{
uniprot_ids.split(';', mytransition.uniprot_id);
}
void(!extractName<double>(mytransition.CE, "CE", tmp_line, header_dict) &&
!extractName<double>(mytransition.CE, "CollisionEnergy", tmp_line, header_dict));
void(!extractName<bool>(mytransition.decoy, "decoy", tmp_line, header_dict) &&
!extractName<bool>(mytransition.decoy, "Decoy", tmp_line, header_dict) &&
!extractName<bool>(mytransition.decoy, "IsDecoy", tmp_line, header_dict));
if (header_dict.find("SpectraSTAnnotation") != header_dict.end())
{
skip_transition = spectrastAnnotationExtract(tmp_line[header_dict["SpectraSTAnnotation"]], mytransition);
}
//// Generate Group IDs
if (filetype == FileTypes::MRM)
{
std::vector<String> substrings;
String(tmp_line[header_dict["SpectraSTFullPeptideName"]]).split("/", substrings);
AASequence peptide = AASequence::fromString(substrings[0]);
mytransition.FullPeptideName = peptide.toString();
mytransition.PeptideSequence = peptide.toUnmodifiedString();
mytransition.precursor_charge = substrings[1];
mytransition.transition_name = String(cnt);
mytransition.group_id = mytransition.FullPeptideName + String("_") + String(mytransition.precursor_charge);
}
else
{
if (!extractName(mytransition.transition_name, "transition_name", tmp_line, header_dict) &&
!extractName(mytransition.transition_name, "TransitionName", tmp_line, header_dict) &&
!extractName(mytransition.transition_name, "TransitionId", tmp_line, header_dict))
{
mytransition.transition_name = String(cnt);
}
if (!extractName(mytransition.group_id, "transition_group_id", tmp_line, header_dict) &&
!extractName(mytransition.group_id, "TransitionGroupId", tmp_line, header_dict) &&
!extractName(mytransition.group_id, "TransitionGroupName", tmp_line, header_dict))
{
mytransition.group_id = AASequence::fromString(mytransition.FullPeptideName).toString() + String("_") + String(mytransition.precursor_charge);
}
}
cleanupTransitions_(mytransition);
if (skip_transition)
{
setProgress(progress++);
continue;
}
// --- Inline mixed sequence group detection ---
String effective_label = mytransition.peptide_group_label;
if (!effective_label.empty())
{
auto it = label_to_sequence.find(effective_label);
if (it == label_to_sequence.end())
{
// First time seeing this label - record the sequence
label_to_sequence[effective_label] = mytransition.PeptideSequence;
}
else if (!it->second.empty() && !mytransition.PeptideSequence.empty() && it->second != mytransition.PeptideSequence)
{
// Conflict detected!
if (override_group_label_check_)
{
OPENMS_LOG_WARN << "Warning: Found multiple peptide sequences for peptide label group " << effective_label <<
". Since 'override_group_label_check' is on, nothing will be changed.\n";
}
else
{
OPENMS_LOG_WARN << "Warning: Found multiple peptide sequences for peptide label group " << effective_label <<
". This is most likely an error and to fix this, a new peptide label group will be inferred - " <<
"to override this decision, please use the override_group_label_check parameter.\n";
effective_label = mytransition.group_id;
}
}
}
// --- Create LightTransition ---
OpenSwath::LightTransition transition;
transition.transition_name = mytransition.transition_name;
transition.peptide_ref = mytransition.group_id;
transition.library_intensity = mytransition.library_intensity;
transition.precursor_mz = mytransition.precursor;
transition.product_mz = mytransition.product;
transition.precursor_im = mytransition.drift_time;
transition.fragment_charge = 0;
if (!mytransition.fragment_charge.empty() && mytransition.fragment_charge != "NA")
{
transition.fragment_charge = static_cast<int8_t>(mytransition.fragment_charge.toInt());
}
transition.setDecoy(mytransition.decoy);
transition.setDetectingTransition(mytransition.detecting_transition);
transition.setIdentifyingTransition(mytransition.identifying_transition);
transition.setQuantifyingTransition(mytransition.quantifying_transition);
transition.fragment_nr = static_cast<int16_t>(mytransition.fragment_nr);
transition.setFragmentType(mytransition.fragment_type);
transition.peptidoforms.assign(mytransition.peptidoforms.begin(), mytransition.peptidoforms.end());
exp.transitions.push_back(std::move(transition));
// --- Create compound if needed ---
if (compound_map.find(mytransition.group_id) == compound_map.end())
{
OpenSwath::LightCompound compound;
compound.id = mytransition.group_id;
compound.drift_time = mytransition.drift_time;
compound.rt = mytransition.rt_calibrated;
compound.charge = 0;
if (!mytransition.precursor_charge.empty() && mytransition.precursor_charge != "NA")
{
compound.charge = mytransition.precursor_charge.toInt();
}
compound.peptide_group_label = effective_label; // Use potentially corrected label
compound.gene_name = mytransition.GeneName;
if (mytransition.isPeptide())
{
compound.sequence = mytransition.FullPeptideName.empty() ? mytransition.PeptideSequence : mytransition.FullPeptideName;
compound.protein_refs.assign(mytransition.ProteinName.begin(), mytransition.ProteinName.end());
// Parse modifications from FullPeptideName if available
String sequence = mytransition.FullPeptideName.empty() ? mytransition.PeptideSequence : mytransition.FullPeptideName;
try
{
AASequence aa_sequence = AASequence::fromString(sequence);
if (aa_sequence.hasNTerminalModification())
{
OpenSwath::LightModification mod;
mod.location = -1;
mod.unimod_id = aa_sequence.getNTerminalModification()->getUniModRecordId();
compound.modifications.push_back(mod);
}
if (aa_sequence.hasCTerminalModification())
{
OpenSwath::LightModification mod;
mod.location = static_cast<int>(aa_sequence.size());
mod.unimod_id = aa_sequence.getCTerminalModification()->getUniModRecordId();
compound.modifications.push_back(mod);
}
for (Size i = 0; i != aa_sequence.size(); i++)
{
if (aa_sequence[i].isModified())
{
OpenSwath::LightModification mod;
mod.location = static_cast<int>(i);
mod.unimod_id = aa_sequence.getResidue(i).getModification()->getUniModRecordId();
compound.modifications.push_back(mod);
}
}
}
catch (Exception::InvalidValue&)
{
OPENMS_LOG_DEBUG << "Could not parse modifications from sequence: " << sequence << '\n';
}
}
else
{
compound.compound_name = mytransition.CompoundName;
compound.sum_formula = mytransition.SumFormula;
}
compound.label_type = mytransition.label_type;
compound.smiles = mytransition.SMILES;
compound.adducts = mytransition.Adducts;
exp.compounds.push_back(std::move(compound));
compound_map[mytransition.group_id] = 0;
}
// --- Create proteins if needed ---
for (Size i = 0; i < mytransition.ProteinName.size(); ++i)
{
if (mytransition.isPeptide() && protein_map.find(mytransition.ProteinName[i]) == protein_map.end())
{
OpenSwath::LightProtein protein;
protein.id = mytransition.ProteinName[i];
protein.sequence = "";
if (i < mytransition.uniprot_id.size())
{
protein.uniprot_id = mytransition.uniprot_id[i];
}
exp.proteins.push_back(std::move(protein));
protein_map[mytransition.ProteinName[i]] = 0;
}
}
setProgress(progress++);
}
endProgress();
}
void TransitionTSVFile::resolveMixedSequenceGroups_(std::vector<TransitionTSVFile::TSVTransition>& transition_list) const
{
// Create temporary map by group label
std::map<String, std::vector<TSVTransition*> > label_transition_map;
for (auto & tr_it : transition_list)
{
if (!tr_it.peptide_group_label.empty())
{
label_transition_map[tr_it.peptide_group_label].push_back(&tr_it);
}
}
// Iterate through all the group labels and perform sanity check whether
// the peptide sequence is the same for all of them
for (auto & pep_it : label_transition_map)
{
String curr_sequence;
if (!pep_it.second.empty())
{
curr_sequence = (*pep_it.second.begin())->PeptideSequence;
}
for (auto & tr_it : pep_it.second)
{
// Sanity check: different peptide sequence in the same peptide label
// group means that something is probably wrong ...
if (!curr_sequence.empty() && tr_it->PeptideSequence != curr_sequence)
{
if (override_group_label_check_)
{
// We wont fix it but give out a warning
OPENMS_LOG_WARN << "Warning: Found multiple peptide sequences for peptide label group " << pep_it.first <<
". Since 'override_group_label_check' is on, nothing will be changed.\n";
}
else
{
// Lets fix it and inform the user
OPENMS_LOG_WARN << "Warning: Found multiple peptide sequences for peptide label group " << pep_it.first <<
". This is most likely an error and to fix this, a new peptide label group will be inferred - " <<
"to override this decision, please use the override_group_label_check parameter.\n";
tr_it->peptide_group_label = tr_it->group_id;
}
}
}
}
}
void TransitionTSVFile::createTransition_(std::vector<TSVTransition>::iterator& tr_it, OpenMS::ReactionMonitoringTransition& rm_trans)
{
// the following attributes will be stored as meta values (userParam):
// - annotation (as by SpectraST)
// the following attributes will be stored as CV values (CV):
// - collision energy
// - library intensity (product ion intensity)
// - decoy / target transition (binary MS:1002007 or MS:1002008)
// the following attributes will be stored as attributes:
// - id (native id)
// the following attributes will be stored in sub-tags:
// - Precursor:
// * target precursor mass isolation window [Q1] (CV Param)
// - Product:
// * charge state (CV Param)
// * target product mass isolation window [Q3] (CV Param)
// - Interpretation (only best)
// * Fragment number (number in series) (CV Param)
// * Fragment type (which series) (CV Param)
rm_trans.setNativeID(tr_it->transition_name);
rm_trans.setPrecursorMZ(tr_it->precursor);
rm_trans.setProductMZ(tr_it->product);
if (tr_it->isPeptide())
{
rm_trans.setPeptideRef(tr_it->group_id);
}
else
{
rm_trans.setCompoundRef(tr_it->group_id);
}
rm_trans.setLibraryIntensity(tr_it->library_intensity);
if (!tr_it->fragment_charge.empty() && tr_it->fragment_charge != "NA")
{
OpenMS::ReactionMonitoringTransition::Product p = rm_trans.getProduct();
p.setChargeState(tr_it->fragment_charge.toInt());
rm_trans.setProduct(p);
}
// add interpretation
OpenMS::ReactionMonitoringTransition::Product p = rm_trans.getProduct();
TargetedExperiment::Interpretation interpretation;
// check if we have any information about the interpretation
bool interpretation_set = false;
if (tr_it->fragment_nr != -1 ||
tr_it->fragment_mzdelta != -1 ||
tr_it->fragment_modification < 0 ||
!tr_it->fragment_type.empty() )
{
interpretation_set = true;
}
if (tr_it->fragment_nr != -1)
{
interpretation.rank = 1; // we only store the best interpretation
}
if (tr_it->fragment_nr != -1)
{
interpretation.ordinal = tr_it->fragment_nr;
}
if (tr_it->fragment_mzdelta != -1)
{
CVTerm frag_mzdelta;
frag_mzdelta.setCVIdentifierRef("MS");
frag_mzdelta.setAccession("MS:1000904");
frag_mzdelta.setName("product ion m/z delta");
frag_mzdelta.setValue(tr_it->fragment_mzdelta);
interpretation.addCVTerm(frag_mzdelta);
}
if (tr_it->fragment_modification < 0)
{
CVTerm frag_loss;
frag_loss.setCVIdentifierRef("MS");
frag_loss.setAccession("MS:1001524");
frag_loss.setName("fragment neutral loss");
frag_loss.setValue(tr_it->fragment_modification);
interpretation.addCVTerm(frag_loss);
}
// figure out which fragment it is
if (tr_it->fragment_type == "v")
{
CVTerm ion;
ion.setCVIdentifierRef("MS");
ion.setAccession("MS:1001237");
ion.setName("frag: v ion");
interpretation.addCVTerm(ion);
}
else if (tr_it->fragment_type == "w")
{
CVTerm ion;
ion.setCVIdentifierRef("MS");
ion.setAccession("MS:1001238");
ion.setName("frag: w ion");
interpretation.addCVTerm(ion);
}
else if (tr_it->fragment_type == "x")
{
interpretation.iontype = TargetedExperiment::IonType::XIon;
}
else if (tr_it->fragment_type == "y")
{
interpretation.iontype = TargetedExperiment::IonType::YIon;
}
else if (tr_it->fragment_type == "z")
{
interpretation.iontype = TargetedExperiment::IonType::ZIon;
}
else if (tr_it->fragment_type == "a")
{
interpretation.iontype = TargetedExperiment::IonType::AIon;
}
else if (tr_it->fragment_type == "b")
{
interpretation.iontype = TargetedExperiment::IonType::BIon;
}
else if (tr_it->fragment_type == "c")
{
interpretation.iontype = TargetedExperiment::IonType::CIon;
}
else if (tr_it->fragment_type == "d")
{
CVTerm ion;
ion.setCVIdentifierRef("MS");
ion.setAccession("MS:1001236");
ion.setName("frag: d ion");
interpretation.addCVTerm(ion);
}
else if (tr_it->fragment_type == "unknown")
{
// unknown means that we should write CV Term "1001240"
interpretation.iontype = TargetedExperiment::IonType::NonIdentified;
}
else if (tr_it->fragment_type.empty())
{
// empty means that we have no information whatsoever
interpretation.iontype = TargetedExperiment::IonType::Unannotated;
}
else
{
interpretation.iontype = TargetedExperiment::IonType::NonIdentified;
}
// don't add empty interpretations
if (interpretation_set)
{
p.addInterpretation(interpretation);
}
rm_trans.setProduct(p);
// add collision energy
if (tr_it->CE > 0.0)
{
CVTerm CE;
CE.setCVIdentifierRef("MS");
CE.setAccession("MS:1000045"); // collision energy
CE.setName("collision energy");
CE.setValue(tr_it->CE);
rm_trans.addCVTerm(CE);
}
if (!tr_it->decoy)
{
rm_trans.setDecoyTransitionType(ReactionMonitoringTransition::TARGET);
}
else
{
rm_trans.setDecoyTransitionType(ReactionMonitoringTransition::DECOY);
}
if (!tr_it->Annotation.empty())
{
rm_trans.setMetaValue("annotation", tr_it->Annotation);
}
rm_trans.setDetectingTransition(tr_it->detecting_transition);
rm_trans.setIdentifyingTransition(tr_it->identifying_transition);
rm_trans.setQuantifyingTransition(tr_it->quantifying_transition);
if (!tr_it->peptidoforms.empty())
{
rm_trans.setMetaValue("Peptidoforms", ListUtils::concatenate(tr_it->peptidoforms, "|"));
}
}
void TransitionTSVFile::createProtein_(String protein_name, const String& uniprot_id, OpenMS::TargetedExperiment::Protein& protein)
{
// the following attributes will be stored as CV values (CV):
// - uniprot accession number (if available)
// the following attributes will be stored as attributes:
// - id
protein.id = std::move(protein_name);
if (!uniprot_id.empty())
{
// accession numbers
CVTerm acc;
OpenMS::DataValue dtype(uniprot_id);
acc.setCVIdentifierRef("MS");
acc.setAccession("MS:1000885"); // Accession number for a specific protein in a database.
acc.setName("protein accession");
acc.setValue(dtype);
protein.addCVTerm(acc);
}
}
void TransitionTSVFile::interpretRetentionTime_(std::vector<TargetedExperiment::RetentionTime>& retention_times, const OpenMS::DataValue& rt_value)
{
TargetedExperiment::RetentionTime retention_time;
retention_time.setRT(rt_value);
if (retentionTimeInterpretation_ == "iRT")
{
retention_time.retention_time_type = TargetedExperimentHelper::RetentionTime::RTType::IRT;
// no unit, since it is iRT (normalized RT)
}
else if (retentionTimeInterpretation_ == "seconds" || retentionTimeInterpretation_ == "minutes")
{
retention_time.retention_time_type = TargetedExperimentHelper::RetentionTime::RTType::LOCAL;
if (retentionTimeInterpretation_ == "seconds")
{
retention_time.retention_time_unit = TargetedExperimentHelper::RetentionTime::RTUnit::SECOND;
}
else if (retentionTimeInterpretation_ == "minutes")
{
retention_time.retention_time_unit = TargetedExperimentHelper::RetentionTime::RTUnit::MINUTE;
}
}
retention_times.push_back(retention_time);
}
void TransitionTSVFile::createPeptide_(std::vector<TSVTransition>::const_iterator tr_it, OpenMS::TargetedExperiment::Peptide& peptide)
{
// the following attributes will be stored as meta values (userParam):
// - full_peptide_name (full unimod peptide name)
// the following attributes will be stored as CV values (CV):
// - retention time
// - charge state
// - group label
// the following attributes will be stored as attributes:
// - id
// - sequence
peptide.id = tr_it->group_id;
peptide.sequence = tr_it->PeptideSequence;
// per peptide user params
peptide.setMetaValue("full_peptide_name", tr_it->FullPeptideName);
if (!tr_it->label_type.empty())
{
peptide.setMetaValue("LabelType", tr_it->label_type);
}
if (!tr_it->GeneName.empty())
{
peptide.setMetaValue("GeneName", tr_it->GeneName);
}
if (!tr_it->SumFormula.empty())
{
peptide.setMetaValue("SumFormula", tr_it->SumFormula);
}
// per peptide CV terms
peptide.setPeptideGroupLabel(tr_it->peptide_group_label);
if (!tr_it->precursor_charge.empty() && tr_it->precursor_charge != "NA")
{
peptide.setChargeState(tr_it->precursor_charge.toInt());
}
// add retention time for the peptide
std::vector<TargetedExperiment::RetentionTime> retention_times;
OpenMS::DataValue rt_value(tr_it->rt_calibrated);
interpretRetentionTime_(retention_times, rt_value);
peptide.rts = retention_times;
// add ion mobility drift time
if (tr_it->drift_time >= 0.0)
{
peptide.setDriftTime(tr_it->drift_time);
}
// Try to parse full UniMod string including modifications. If the string
// is not parseable (e.g. contains invalid modifications), we can force
// reading and only parse the "naked" sequence.
// Note: If the user did not provide a modified sequence string, we will
// fall back to the "naked" sequence by default.
std::vector<TargetedExperiment::Peptide::Modification> mods;
AASequence aa_sequence;
String sequence = tr_it->FullPeptideName;
if (sequence.empty()) sequence = tr_it->PeptideSequence;
try
{
aa_sequence = AASequence::fromString(sequence);
} catch (Exception::InvalidValue & e)
{
if (force_invalid_mods_)
{
// fallback: parse the "naked" peptide sequence which should always work
OPENMS_LOG_DEBUG << "Invalid sequence when parsing '" << tr_it->FullPeptideName << "'\n";
aa_sequence = AASequence::fromString(tr_it->PeptideSequence);
}
else
{
OPENMS_LOG_DEBUG << "Invalid sequence when parsing '" << tr_it->FullPeptideName << "'\n";
std::cerr << "Error while reading file (use 'force_invalid_mods' parameter to override): " << e.what() << '\n';
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Invalid input, cannot parse: " + tr_it->FullPeptideName);
}
}
peptide.protein_refs = tr_it->ProteinName;
// check if the naked peptide sequence is equal to the unmodified AASequence
if (peptide.sequence != aa_sequence.toUnmodifiedString())
{
if (force_invalid_mods_)
{
// something is wrong, return and do not try and add any modifications
return;
}
OPENMS_LOG_WARN << "Warning: The peptide sequence " << peptide.sequence << " and the full peptide name " << aa_sequence <<
" are not equal. Please check your input.\n";
OPENMS_LOG_WARN << "(use force_invalid_mods to override)\n";
}
// Unfortunately, we cannot store an AASequence here but have to work with
// the TraML modification object.
// In TraML, the modification the AA starts with residue 1 but the
// OpenMS objects start with zero -> we start counting with zero here
// and the TraML handler will add 1 when storing the file.
{
if (aa_sequence.hasNTerminalModification())
{
const ResidueModification& rmod = *(aa_sequence.getNTerminalModification());
addModification_(mods, -1, rmod);
}
if (aa_sequence.hasCTerminalModification())
{
const ResidueModification& rmod = *(aa_sequence.getCTerminalModification());
addModification_(mods, aa_sequence.size(), rmod);
}
for (Size i = 0; i != aa_sequence.size(); i++)
{
if (aa_sequence[i].isModified())
{
const ResidueModification& rmod = *(aa_sequence.getResidue(i).getModification());
addModification_(mods, i, rmod);
}
}
}
peptide.mods = mods;
OPENMS_POSTCONDITION(aa_sequence.toUnmodifiedString() == peptide.sequence,
(String("Internal error: the sequences of the naked and modified peptide sequence are unequal(")
+ aa_sequence.toUnmodifiedString() + " != " + peptide.sequence).c_str())
}
void TransitionTSVFile::createCompound_(std::vector<TSVTransition>::const_iterator tr_it, OpenMS::TargetedExperiment::Compound& compound)
{
// the following attributes will be stored as meta values (userParam):
// - CompoundName (name of the compound)
// - Adducts (adduct associated to the compound)
// the following attributes will be stored as CV values (CV):
// - label type
// the following attributes will be stored as attributes:
// - retention time
// - charge state
// - SMILES
// - id
compound.id = tr_it->group_id;
compound.molecular_formula = tr_it->SumFormula;
compound.smiles_string = tr_it->SMILES;
compound.setMetaValue("CompoundName", tr_it->CompoundName);
if (!tr_it->Adducts.empty()) compound.setMetaValue("Adducts", tr_it->Adducts);
// does this apply to compounds as well?
if (!tr_it->label_type.empty())
{
compound.setMetaValue("LabelType", tr_it->label_type);
}
// add ion mobility drift time
if (tr_it->drift_time >= 0.0)
{
compound.setDriftTime(tr_it->drift_time);
}
if (!tr_it->precursor_charge.empty() && tr_it->precursor_charge != "NA")
{
compound.setChargeState(tr_it->precursor_charge.toInt());
}
// add retention time for the compound
std::vector<TargetedExperiment::RetentionTime> retention_times;
OpenMS::DataValue rt_value(tr_it->rt_calibrated);
interpretRetentionTime_(retention_times, rt_value);
compound.rts = retention_times;
}
void TransitionTSVFile::addModification_(std::vector<TargetedExperiment::Peptide::Modification>& mods,
int location,
const ResidueModification& rmod)
{
TargetedExperiment::Peptide::Modification mod;
mod.location = location;
mod.mono_mass_delta = rmod.getDiffMonoMass();
mod.avg_mass_delta = rmod.getDiffAverageMass();
mod.unimod_id = rmod.getUniModRecordId(); // NOTE: will be -1 if not found in UniMod (e.g. user-defined modifications)
mods.push_back(mod);
}
TransitionTSVFile::TSVTransition TransitionTSVFile::convertTransition_(const ReactionMonitoringTransition* it, OpenMS::TargetedExperiment& targeted_exp)
{
TSVTransition mytransition;
mytransition.precursor = it->getPrecursorMZ();
mytransition.product = it->getProductMZ();
mytransition.rt_calibrated = -1;
mytransition.fragment_type = "";
mytransition.fragment_nr = -1;
mytransition.fragment_charge = "NA";
if (!it->getPeptideRef().empty())
{
const OpenMS::TargetedExperiment::Peptide& pep = targeted_exp.getPeptideByRef(it->getPeptideRef());
mytransition.group_id = it->getPeptideRef();
#ifdef TRANSITIONTSVREADER_TESTING
OPENMS_LOG_DEBUG << "Peptide rts empty " <<
pep.rts.empty() << " or no cv term " << pep.getRetentionTime() << '\n';
#endif
if (pep.hasRetentionTime())
{
mytransition.rt_calibrated = pep.getRetentionTime();
}
mytransition.PeptideSequence = pep.sequence;
mytransition.GeneName = "NA";
if (!pep.protein_refs.empty())
{
for (auto & prot_ref : pep.protein_refs)
{
const OpenMS::TargetedExperiment::Protein& prot = targeted_exp.getProteinByRef(prot_ref);
mytransition.ProteinName.push_back(prot.id);
if (prot.hasCVTerm("MS:1000885"))
{
mytransition.uniprot_id.push_back(prot.getCVTerms().at("MS:1000885")[0].getValue().toString());
}
}
}
mytransition.FullPeptideName = TargetedExperimentHelper::getAASequence(pep).toUniModString();
mytransition.drift_time = -1;
if (pep.getDriftTime() >= 0.0)
{
mytransition.drift_time = pep.getDriftTime();
}
mytransition.precursor_charge = "NA";
if (pep.hasCharge())
{
mytransition.precursor_charge = String(pep.getChargeState());
}
mytransition.peptide_group_label = "NA";
if (!pep.getPeptideGroupLabel().empty())
{
mytransition.peptide_group_label = pep.getPeptideGroupLabel();
}
if (pep.metaValueExists("LabelType"))
{
mytransition.label_type = pep.getMetaValue("LabelType").toString();
}
if (pep.metaValueExists("GeneName"))
{
mytransition.GeneName = pep.getMetaValue("GeneName").toString();
}
}
else if (!it->getCompoundRef().empty())
{
const OpenMS::TargetedExperiment::Compound& compound = targeted_exp.getCompoundByRef(it->getCompoundRef());
mytransition.group_id = it->getCompoundRef();
if (compound.hasRetentionTime())
{
mytransition.rt_calibrated = compound.getRetentionTime();
}
mytransition.drift_time = -1;
if (compound.getDriftTime() >= 0.0)
{
mytransition.drift_time = compound.getDriftTime();
}
mytransition.precursor_charge = "NA";
if (compound.hasCharge())
{
mytransition.precursor_charge = String(compound.getChargeState());
}
// get metabolomics specific terms
mytransition.SumFormula = compound.molecular_formula;
mytransition.SMILES = compound.smiles_string;
if (compound.metaValueExists("CompoundName"))
{
mytransition.CompoundName = compound.getMetaValue("CompoundName");
}
if (compound.metaValueExists("Adducts"))
{
mytransition.Adducts = compound.getMetaValue("Adducts");
}
}
else
{
// Error?
}
if (it->isProductChargeStateSet())
{
mytransition.fragment_charge = String(it->getProductChargeState());
}
const auto & product = it->getProduct();
for (const auto& int_it : product.getInterpretationList())
{
// only report first / best interpretation
if (int_it.rank == 1 || product.getInterpretationList().size() == 1)
{
if (int_it.ordinal != 0) mytransition.fragment_nr = int_it.ordinal;
switch (int_it.iontype)
{
case Residue::AIon:
mytransition.fragment_type = "a";
break;
case Residue::BIon:
mytransition.fragment_type = "b";
break;
case Residue::CIon:
mytransition.fragment_type = "c";
break;
case Residue::XIon:
mytransition.fragment_type = "x";
break;
case Residue::YIon:
mytransition.fragment_type = "y";
break;
case Residue::ZIon:
mytransition.fragment_type = "z";
break;
case Residue::Zp1Ion:
mytransition.fragment_type = "z.";
break;
case Residue::Zp2Ion:
mytransition.fragment_type = "z'";
break;
case Residue::Precursor:
mytransition.fragment_type = "prec";
break;
case Residue::BIonMinusH20:
mytransition.fragment_type = "b-H20";
break;
case Residue::YIonMinusH20:
mytransition.fragment_type = "y-H20";
break;
case Residue::BIonMinusNH3:
mytransition.fragment_type = "b-NH3";
break;
case Residue::YIonMinusNH3:
mytransition.fragment_type = "y-NH3";
break;
case Residue::NonIdentified:
mytransition.fragment_type = "unknown";
break;
case Residue::Unannotated:
// means no annotation and no input cvParam - to write out a cvParam, use Residue::NonIdentified
mytransition.fragment_type = "";
break;
// invalid values
case Residue::Full: break;
case Residue::Internal: break;
case Residue::NTerminal: break;
case Residue::CTerminal: break;
case Residue::SizeOfResidueType: break;
}
}
}
mytransition.transition_name = it->getNativeID();
mytransition.CE = -1;
if (it->hasCVTerm("MS:1000045"))
{
mytransition.CE = it->getCVTerms().at("MS:1000045")[0].getValue().toString().toDouble();
}
mytransition.library_intensity = -1;
if (it->getLibraryIntensity() > -100)
{
mytransition.library_intensity = it->getLibraryIntensity();
}
mytransition.decoy = false;
if (it->getDecoyTransitionType() == ReactionMonitoringTransition::TARGET)
{
mytransition.decoy = false;
}
else if (it->getDecoyTransitionType() == ReactionMonitoringTransition::DECOY)
{
mytransition.decoy = true;
}
mytransition.Annotation = "NA";
if (it->metaValueExists("annotation"))
{
mytransition.Annotation = it->getMetaValue("annotation").toString();
}
if (it->metaValueExists("Peptidoforms"))
{
String(it->getMetaValue("Peptidoforms")).split('|', mytransition.peptidoforms);
}
mytransition.detecting_transition = it->isDetectingTransition();
mytransition.identifying_transition = it->isIdentifyingTransition();
mytransition.quantifying_transition = it->isQuantifyingTransition();
return mytransition;
}
void TransitionTSVFile::writeTSVOutput_(const char* filename, OpenMS::TargetedExperiment& targeted_exp)
{
std::vector<TSVTransition> mytransitions;
Size progress = 0;
startProgress(0, targeted_exp.getTransitions().size(), "writing OpenSWATH Transition List TSV file");
for (const auto& tr : targeted_exp.getTransitions())
{
mytransitions.push_back(convertTransition_(&tr, targeted_exp));
setProgress(progress++);
}
endProgress();
// start writing
std::ofstream os(filename);
os.precision(writtenDigits(double()));
for (Size i = 0; i < header_names_.size(); i++)
{
os << header_names_[i];
if (i != header_names_.size() - 1)
{
os << "\t";
}
}
os << '\n';
for (const auto& it : mytransitions)
{
String line;
line +=
(String)it.precursor + "\t"
+ (String)it.product + "\t"
+ (String)it.precursor_charge + "\t"
+ (String)it.fragment_charge + "\t"
+ (String)it.library_intensity + "\t"
+ (String)it.rt_calibrated + "\t"
+ (String)it.PeptideSequence + "\t"
+ (String)it.FullPeptideName + "\t"
+ (String)it.peptide_group_label + "\t"
+ (String)it.label_type + "\t"
+ (String)it.CompoundName + "\t"
+ (String)it.SumFormula + "\t"
+ (String)it.SMILES + "\t"
+ (String)it.Adducts + "\t"
+ ListUtils::concatenate(it.ProteinName, ";") + "\t"
+ ListUtils::concatenate(it.uniprot_id, ";") + "\t"
+ (String)it.GeneName + "\t"
+ (String)it.fragment_type + "\t"
+ (String)it.fragment_nr + "\t"
+ (String)it.Annotation + "\t"
+ (String)it.CE + "\t"
+ (String)it.drift_time + "\t"
+ (String)it.group_id + "\t"
+ (String)it.transition_name + "\t"
+ (String)it.decoy + "\t"
+ (String)it.detecting_transition + "\t"
+ (String)it.identifying_transition + "\t"
+ (String)it.quantifying_transition + "\t"
+ ListUtils::concatenate(it.peptidoforms, "|");
os << line << '\n';
}
os.close();
}
// public methods
void TransitionTSVFile::convertTargetedExperimentToTSV(const char* filename, OpenMS::TargetedExperiment& targeted_exp)
{
if (targeted_exp.containsInvalidReferences())
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Your input file contains invalid references, cannot process file.");
}
writeTSVOutput_(filename, targeted_exp);
}
void TransitionTSVFile::convertTSVToTargetedExperiment(const char* filename, FileTypes::Type filetype, OpenMS::TargetedExperiment& targeted_exp)
{
std::vector<TSVTransition> transition_list;
readUnstructuredTSVInput_(filename, filetype, transition_list);
TSVToTargetedExperiment_(transition_list, targeted_exp);
}
void TransitionTSVFile::convertTSVToTargetedExperiment(const char* filename, FileTypes::Type filetype, OpenSwath::LightTargetedExperiment& targeted_exp)
{
// Use streaming parser for memory efficiency (~5x reduction in peak memory)
streamTSVToLightTargetedExperiment_(filename, filetype, targeted_exp);
}
void TransitionTSVFile::validateTargetedExperiment(const OpenMS::TargetedExperiment& targeted_exp)
{
if (targeted_exp.containsInvalidReferences())
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Invalid input, contains duplicate or invalid references");
}
}
void TransitionTSVFile::convertLightTargetedExperimentToTSV(const char* filename, const OpenSwath::LightTargetedExperiment& targeted_exp)
{
// Build maps for quick lookup
std::map<std::string, const OpenSwath::LightCompound*> compound_map;
for (const auto& compound : targeted_exp.compounds)
{
compound_map[compound.id] = &compound;
}
std::map<std::string, const OpenSwath::LightProtein*> protein_map;
for (const auto& protein : targeted_exp.proteins)
{
protein_map[protein.id] = &protein;
}
// start writing
std::ofstream os(filename);
os.precision(writtenDigits(double()));
// Write header
for (Size i = 0; i < header_names_.size(); i++)
{
os << header_names_[i];
if (i != header_names_.size() - 1)
{
os << "\t";
}
}
os << '\n';
Size progress = 0;
startProgress(0, targeted_exp.transitions.size(), "writing OpenSWATH Transition List TSV file");
for (const auto& tr : targeted_exp.transitions)
{
setProgress(progress++);
// Get associated compound
const OpenSwath::LightCompound* compound = nullptr;
auto comp_it = compound_map.find(tr.peptide_ref);
if (comp_it != compound_map.end())
{
compound = comp_it->second;
}
// Build protein names and uniprot ids
std::vector<String> protein_names;
std::vector<String> uniprot_ids;
if (compound != nullptr)
{
for (const auto& prot_ref : compound->protein_refs)
{
protein_names.push_back(prot_ref);
auto prot_it = protein_map.find(prot_ref);
if (prot_it != protein_map.end() && !prot_it->second->uniprot_id.empty())
{
uniprot_ids.push_back(prot_it->second->uniprot_id);
}
}
}
String precursor_charge = "NA";
if (compound != nullptr && compound->charge != 0)
{
precursor_charge = String(compound->charge);
}
String fragment_charge = "NA";
if (tr.fragment_charge != 0)
{
fragment_charge = String(tr.fragment_charge);
}
String peptide_group_label;
String label_type;
String compound_name;
String sum_formula;
String smiles;
String adducts;
String gene_name;
String peptide_sequence;
String full_peptide_name;
double rt_calibrated = -1;
double drift_time = -1;
if (compound != nullptr)
{
peptide_group_label = compound->peptide_group_label;
label_type = compound->label_type;
gene_name = compound->gene_name;
rt_calibrated = compound->rt;
drift_time = compound->drift_time;
if (compound->isPeptide())
{
full_peptide_name = compound->sequence;
// Extract unmodified sequence
try
{
peptide_sequence = AASequence::fromString(compound->sequence).toUnmodifiedString();
}
catch (Exception::InvalidValue&)
{
peptide_sequence = compound->sequence;
}
}
else
{
compound_name = compound->compound_name;
sum_formula = compound->sum_formula;
smiles = compound->smiles;
adducts = compound->adducts;
}
}
// Use precursor_im from transition if compound drift_time not set
if (drift_time < 0 && tr.precursor_im >= 0)
{
drift_time = tr.precursor_im;
}
String annotation = tr.getAnnotation();
if (annotation.empty())
{
annotation = "NA";
}
// Default empty fields to "NA" to match heavy-path output
if (peptide_group_label.empty())
{
peptide_group_label = "NA";
}
if (gene_name.empty())
{
gene_name = "NA";
}
// Note: label_type, compound_name, sum_formula, smiles, adducts are left empty if not set
// (for consistency with heavy path output)
if (peptide_sequence.empty())
{
peptide_sequence = "NA";
}
if (full_peptide_name.empty())
{
full_peptide_name = "NA";
}
// Join protein names (use "NA" if empty) and uniprot ids (leave empty if not set, for consistency with heavy path)
String protein_names_str = protein_names.empty() ? "NA" : ListUtils::concatenate(protein_names, ";");
String uniprot_ids_str = ListUtils::concatenate(uniprot_ids, ";");
String line;
line +=
String(tr.precursor_mz) + "\t"
+ String(tr.product_mz) + "\t"
+ precursor_charge + "\t"
+ fragment_charge + "\t"
+ String(tr.library_intensity) + "\t"
+ String(rt_calibrated) + "\t"
+ peptide_sequence + "\t"
+ full_peptide_name + "\t"
+ peptide_group_label + "\t"
+ label_type + "\t"
+ compound_name + "\t"
+ sum_formula + "\t"
+ smiles + "\t"
+ adducts + "\t"
+ protein_names_str + "\t"
+ uniprot_ids_str + "\t"
+ gene_name + "\t"
+ String(tr.getFragmentType()) + "\t"
+ String(tr.fragment_nr) + "\t"
+ annotation + "\t"
+ String(-1.0) + "\t" // CE not stored in Light
+ String(drift_time) + "\t"
+ tr.peptide_ref + "\t"
+ tr.transition_name + "\t"
+ String(tr.getDecoy()) + "\t"
+ String(tr.isDetectingTransition()) + "\t"
+ String(tr.isIdentifyingTransition()) + "\t"
+ String(tr.isQuantifyingTransition()) + "\t"
+ ListUtils::concatenate(std::vector<String>(tr.peptidoforms.begin(), tr.peptidoforms.end()), "|");
os << line << '\n';
}
endProgress();
os.close();
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/DATAACCESS/SpectrumAccessQuadMZTransforming.cpp | .cpp | 2,063 | 62 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/DATAACCESS/SpectrumAccessQuadMZTransforming.h>
#include <utility>
namespace OpenMS
{
SpectrumAccessQuadMZTransforming::SpectrumAccessQuadMZTransforming(
OpenSwath::SpectrumAccessPtr sptr,
double a, double b, double c, bool ppm) :
SpectrumAccessTransforming(std::move(sptr)),
a_(a),
b_(b),
c_(c),
ppm_(ppm)
{}
SpectrumAccessQuadMZTransforming::~SpectrumAccessQuadMZTransforming() = default;
std::shared_ptr<OpenSwath::ISpectrumAccess> SpectrumAccessQuadMZTransforming::lightClone() const
{
// Create a light clone of *this by initializing a new
// SpectrumAccessQuadMZTransforming with a light clone of the underlying
// SpectrumAccess object and the parameters.
return std::shared_ptr<SpectrumAccessQuadMZTransforming>(
new SpectrumAccessQuadMZTransforming(sptr_->lightClone(), a_, b_, c_, ppm_));
}
OpenSwath::SpectrumPtr SpectrumAccessQuadMZTransforming::getSpectrumById(int id)
{
OpenSwath::SpectrumPtr s = sptr_->getSpectrumById(id);
for (size_t i = 0; i < s->getMZArray()->data.size(); i++)
{
// mz = a + b * mz + c * mz^2
double predict =
a_ +
b_ * s->getMZArray()->data[i] +
c_ * s->getMZArray()->data[i] * s->getMZArray()->data[i];
// If ppm is true, we predicted the ppm deviation, not the actual new mass
if (ppm_)
{
s->getMZArray()->data[i] = s->getMZArray()->data[i] - predict*s->getMZArray()->data[i]/1000000;
}
else
{
s->getMZArray()->data[i] = predict;
}
}
return s;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/DATAACCESS/SpectrumAccessSqMass.cpp | .cpp | 6,888 | 223 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/DATAACCESS/SpectrumAccessSqMass.h>
#include <algorithm> // std::lower_bound, std::upper_bound, std::sort
namespace OpenMS
{
/// Constructor
SpectrumAccessSqMass::SpectrumAccessSqMass(const OpenMS::Internal::MzMLSqliteHandler& handler) :
handler_(handler)
{}
SpectrumAccessSqMass::SpectrumAccessSqMass(const OpenMS::Internal::MzMLSqliteHandler& handler, const std::vector<int> & indices) :
handler_(handler),
sidx_(indices)
{}
SpectrumAccessSqMass::SpectrumAccessSqMass(const SpectrumAccessSqMass& sp, const std::vector<int>& indices) :
handler_(sp.handler_)
{
if (indices.empty())
{
sidx_ = sp.sidx_;
}
else if (sp.sidx_.empty())
{
sidx_ = indices;
}
else
{
// we only want to select a subset of the currently selected indices
for (Size k = 0; k < indices.size(); k++)
{
if (indices[k] >= (int)sp.sidx_.size()) throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
String("Error creating SpectrumAccessSqMass with an index ") + indices[k] + " that exceeds the number of available data " + sp.sidx_.size());
sidx_.push_back( sp.sidx_[ indices[k] ] );
}
}
}
/// Destructor
SpectrumAccessSqMass::~SpectrumAccessSqMass() = default;
/// Copy constructor
SpectrumAccessSqMass::SpectrumAccessSqMass(const SpectrumAccessSqMass & rhs) :
handler_(rhs.handler_),
sidx_(rhs.sidx_)
{
}
/// Light clone operator (actual data will not get copied)
std::shared_ptr<OpenSwath::ISpectrumAccess> SpectrumAccessSqMass::lightClone() const
{
return std::shared_ptr<SpectrumAccessSqMass>(new SpectrumAccessSqMass(*this));
}
OpenSwath::SpectrumPtr SpectrumAccessSqMass::getSpectrumById(int id)
{
std::vector<int> indices;
if (sidx_.empty())
{
indices.push_back(id);
}
else
{
indices.push_back(sidx_[id]);
}
// read MSSpectra and prepare for conversion
std::vector<MSSpectrum> tmp_spectra;
handler_.readSpectra(tmp_spectra, indices, false);
const MSSpectrumType& spectrum = tmp_spectra[0];
OpenSwath::BinaryDataArrayPtr intensity_array(new OpenSwath::BinaryDataArray);
OpenSwath::BinaryDataArrayPtr mz_array(new OpenSwath::BinaryDataArray);
for (MSSpectrumType::const_iterator it = spectrum.begin(); it != spectrum.end(); ++it)
{
mz_array->data.push_back(it->getMZ());
intensity_array->data.push_back(it->getIntensity());
}
OpenSwath::SpectrumPtr sptr(new OpenSwath::Spectrum);
sptr->setMZArray(mz_array);
sptr->setIntensityArray(intensity_array);
return sptr;
}
OpenSwath::SpectrumMeta SpectrumAccessSqMass::getSpectrumMetaById(int id) const
{
std::vector<int> indices;
if (sidx_.empty())
{
indices.push_back(id);
}
else
{
indices.push_back(sidx_[id]);
}
// read MSSpectra and prepare for conversion
std::vector<MSSpectrum> tmp_spectra;
handler_.readSpectra(tmp_spectra, indices, false);
const MSSpectrumType& spectrum = tmp_spectra[0];
OpenSwath::SpectrumMeta m;
m.id = spectrum.getNativeID();
m.RT = spectrum.getRT();
m.ms_level = spectrum.getMSLevel();
return m;
}
void SpectrumAccessSqMass::getAllSpectra(std::vector< OpenSwath::SpectrumPtr > & spectra, std::vector< OpenSwath::SpectrumMeta > & spectra_meta) const
{
// read MSSpectra and prepare for conversion
std::vector<MSSpectrum> tmp_spectra;
if (sidx_.empty())
{
MSExperiment exp;
{
handler_.readExperiment(exp, false);
}
tmp_spectra = exp.getSpectra();
}
else
{
handler_.readSpectra(tmp_spectra, sidx_, false);
}
spectra.reserve(tmp_spectra.size());
spectra_meta.reserve(tmp_spectra.size());
for (Size k = 0; k < tmp_spectra.size(); k++)
{
const MSSpectrumType& spectrum = tmp_spectra[k];
OpenSwath::BinaryDataArrayPtr intensity_array(new OpenSwath::BinaryDataArray);
OpenSwath::BinaryDataArrayPtr mz_array(new OpenSwath::BinaryDataArray);
for (MSSpectrumType::const_iterator it = spectrum.begin(); it != spectrum.end(); ++it)
{
mz_array->data.push_back(it->getMZ());
intensity_array->data.push_back(it->getIntensity());
}
OpenSwath::SpectrumMeta m;
m.id = spectrum.getNativeID();
m.RT = spectrum.getRT();
m.ms_level = spectrum.getMSLevel();
spectra_meta.push_back(m);
OpenSwath::SpectrumPtr sptr(new OpenSwath::Spectrum);
sptr->setMZArray(mz_array);
sptr->setIntensityArray(intensity_array);
spectra.push_back(sptr);
}
}
std::vector<std::size_t> SpectrumAccessSqMass::getSpectraByRT(double RT, double deltaRT) const
{
OPENMS_PRECONDITION(deltaRT >= 0, "Delta RT needs to be a positive number");
std::vector<std::size_t> res = handler_.getSpectraIndicesbyRT(RT, deltaRT, sidx_);
if (sidx_.empty())
{
return res;
}
else
{
// we need to map the resulting indices back to the external indices
std::vector<std::size_t> res_mapped;
for (Size k = 0; k < res.size(); k++)
{
for (Size s_it = 0; s_it < sidx_.size(); s_it++)
{
if (res[k] == (size_t)sidx_[s_it]) {res_mapped.push_back(s_it);}
}
}
return res_mapped;
}
}
size_t SpectrumAccessSqMass::getNrSpectra() const
{
size_t res;
if (sidx_.empty())
{
res = handler_.getNrSpectra();
}
else
{
res = sidx_.size();
}
return res;
}
OpenSwath::ChromatogramPtr SpectrumAccessSqMass::getChromatogramById(int /* id */)
{
throw Exception::NotImplemented(__FILE__,__LINE__,OPENMS_PRETTY_FUNCTION);
}
size_t SpectrumAccessSqMass::getNrChromatograms() const
{
size_t res;
// TODO: currently chrom indices are not supported
res = handler_.getNrChromatograms();
return res;
}
std::string SpectrumAccessSqMass::getChromatogramNativeID(int /* id */) const
{
throw Exception::NotImplemented(__FILE__,__LINE__,OPENMS_PRETTY_FUNCTION);
}
} //end namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/DATAACCESS/SpectrumAccessOpenMS.cpp | .cpp | 6,713 | 194 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/DATAACCESS/SpectrumAccessOpenMS.h>
#include <utility>
namespace OpenMS
{
SpectrumAccessOpenMS::SpectrumAccessOpenMS(std::shared_ptr<MSExperimentType> ms_experiment)
{
// store shared pointer to the actual MSExperiment
ms_experiment_ = std::move(ms_experiment);
}
SpectrumAccessOpenMS::~SpectrumAccessOpenMS() = default;
SpectrumAccessOpenMS::SpectrumAccessOpenMS(const SpectrumAccessOpenMS & rhs) :
ms_experiment_(rhs.ms_experiment_)
{
// this only copies the pointers and not the actual data ...
}
std::shared_ptr<OpenSwath::ISpectrumAccess> SpectrumAccessOpenMS::lightClone() const
{
return std::shared_ptr<SpectrumAccessOpenMS>(new SpectrumAccessOpenMS(*this));
}
OpenSwath::SpectrumPtr SpectrumAccessOpenMS::getSpectrumById(int id)
{
OPENMS_PRECONDITION(id >= 0, "Id needs to be larger than zero");
OPENMS_PRECONDITION(id < (int)getNrSpectra(), "Id cannot be larger than number of spectra");
const MSSpectrumType& spectrum = (*ms_experiment_)[id];
OpenSwath::BinaryDataArrayPtr intensity_array(new OpenSwath::BinaryDataArray);
OpenSwath::BinaryDataArrayPtr mz_array(new OpenSwath::BinaryDataArray);
mz_array->data.reserve(spectrum.size());
intensity_array->data.reserve(spectrum.size());
for (const auto& it : spectrum)
{
mz_array->data.push_back(it.getMZ());
intensity_array->data.push_back(it.getIntensity());
}
OpenSwath::SpectrumPtr sptr(new OpenSwath::Spectrum);
sptr->setMZArray(mz_array);
sptr->setIntensityArray(intensity_array);
for (const auto& fda : spectrum.getFloatDataArrays() )
{
OpenSwath::BinaryDataArrayPtr tmp(new OpenSwath::BinaryDataArray);
tmp->data.reserve(fda.size());
for (const auto& val : fda)
{
tmp->data.push_back(val);
}
tmp->description = fda.getName();
sptr->getDataArrays().push_back(tmp);
}
for (const auto& ida : spectrum.getIntegerDataArrays() )
{
OpenSwath::BinaryDataArrayPtr tmp(new OpenSwath::BinaryDataArray);
tmp->data.reserve(ida.size());
for (const auto& val : ida)
{
tmp->data.push_back(val);
}
tmp->description = ida.getName();
sptr->getDataArrays().push_back(tmp);
}
return sptr;
}
OpenSwath::SpectrumMeta SpectrumAccessOpenMS::getSpectrumMetaById(int id) const
{
OPENMS_PRECONDITION(id >= 0, "Id needs to be larger than zero");
OPENMS_PRECONDITION(id < (int)getNrSpectra(), "Id cannot be larger than number of spectra");
OpenSwath::SpectrumMeta meta;
meta.RT = (*ms_experiment_)[id].getRT();
meta.ms_level = (*ms_experiment_)[id].getMSLevel();
return meta;
}
OpenSwath::ChromatogramPtr SpectrumAccessOpenMS::getChromatogramById(int id)
{
OPENMS_PRECONDITION(id >= 0, "Id needs to be larger than zero");
OPENMS_PRECONDITION(id < (int)getNrChromatograms(), "Id cannot be larger than number of chromatograms");
const MSChromatogramType& chromatogram = ms_experiment_->getChromatograms()[id];
OpenSwath::BinaryDataArrayPtr intensity_array(new OpenSwath::BinaryDataArray);
OpenSwath::BinaryDataArrayPtr rt_array(new OpenSwath::BinaryDataArray);
rt_array->data.reserve(chromatogram.size());
intensity_array->data.reserve(chromatogram.size());
for (const auto& it : chromatogram)
{
rt_array->data.push_back(it.getRT());
intensity_array->data.push_back(it.getIntensity());
}
OpenSwath::ChromatogramPtr cptr(new OpenSwath::Chromatogram);
cptr->setTimeArray(rt_array);
cptr->setIntensityArray(intensity_array);
for (const auto& fda : chromatogram.getFloatDataArrays() )
{
OpenSwath::BinaryDataArrayPtr tmp(new OpenSwath::BinaryDataArray);
tmp->data.reserve(fda.size());
for (const auto& val : fda)
{
tmp->data.push_back(val);
}
tmp->description = fda.getName();
cptr->getDataArrays().push_back(tmp);
}
for (const auto& ida : chromatogram.getIntegerDataArrays() )
{
OpenSwath::BinaryDataArrayPtr tmp(new OpenSwath::BinaryDataArray);
tmp->data.reserve(ida.size());
for (const auto& val : ida)
{
tmp->data.push_back(val);
}
tmp->description = ida.getName();
cptr->getDataArrays().push_back(tmp);
}
return cptr;
}
std::vector<std::size_t> SpectrumAccessOpenMS::getSpectraByRT(double RT, double deltaRT) const
{
OPENMS_PRECONDITION(deltaRT >= 0, "Delta RT needs to be a positive number");
// we first perform a search for the spectrum that is past the
// beginning of the RT domain. Then we add this spectrum and try to add
// further spectra as long as they are below RT + deltaRT.
std::vector<std::size_t> result;
auto spectrum = ms_experiment_->RTBegin(RT - deltaRT);
if (spectrum == ms_experiment_->end()) return result;
result.push_back(std::distance(ms_experiment_->begin(), spectrum));
spectrum++;
while (spectrum != ms_experiment_->end() && spectrum->getRT() <= RT + deltaRT)
{
result.push_back(spectrum - ms_experiment_->begin());
spectrum++;
}
return result;
}
size_t SpectrumAccessOpenMS::getNrChromatograms() const
{
return ms_experiment_->getChromatograms().size();
}
ChromatogramSettings SpectrumAccessOpenMS::getChromatogramMetaInfo(int id) const
{
OPENMS_PRECONDITION(id >= 0, "Id needs to be larger than zero");
OPENMS_PRECONDITION(id < (int)getNrChromatograms(), "Id cannot be larger than number of spectra");
return ms_experiment_->getChromatograms()[id];
}
std::string SpectrumAccessOpenMS::getChromatogramNativeID(int id) const
{
OPENMS_PRECONDITION(id >= 0, "Id needs to be larger than zero");
OPENMS_PRECONDITION(id < (int)getNrChromatograms(), "Id cannot be larger than number of spectra");
return ms_experiment_->getChromatograms()[id].getNativeID();
}
size_t SpectrumAccessOpenMS::getNrSpectra() const
{
return ms_experiment_->size();
}
SpectrumSettings SpectrumAccessOpenMS::getSpectraMetaInfo(int id) const
{
OPENMS_PRECONDITION(id >= 0, "Id needs to be larger than zero");
OPENMS_PRECONDITION(id < (int)getNrSpectra(), "Id cannot be larger than number of spectra");
return (*ms_experiment_)[id];
}
} //end namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/DATAACCESS/SpectrumAccessTransforming.cpp | .cpp | 1,579 | 60 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/DATAACCESS/SpectrumAccessTransforming.h>
#include <utility>
namespace OpenMS
{
SpectrumAccessTransforming::SpectrumAccessTransforming(OpenSwath::SpectrumAccessPtr sptr) :
sptr_(std::move(sptr))
{}
SpectrumAccessTransforming::~SpectrumAccessTransforming() = default;
size_t SpectrumAccessTransforming::getNrChromatograms() const
{
return sptr_->getNrChromatograms();
}
OpenSwath::SpectrumPtr SpectrumAccessTransforming::getSpectrumById(int id)
{
return sptr_->getSpectrumById(id);
}
OpenSwath::SpectrumMeta SpectrumAccessTransforming::getSpectrumMetaById(int id) const
{
return sptr_->getSpectrumMetaById(id);
}
std::vector<std::size_t> SpectrumAccessTransforming::getSpectraByRT(double RT, double deltaRT) const
{
return sptr_->getSpectraByRT(RT, deltaRT);
}
size_t SpectrumAccessTransforming::getNrSpectra() const
{
return sptr_->getNrSpectra();
}
OpenSwath::ChromatogramPtr SpectrumAccessTransforming::getChromatogramById(int id)
{
return sptr_->getChromatogramById(id);
}
std::string SpectrumAccessTransforming::getChromatogramNativeID(int id) const
{
return sptr_->getChromatogramNativeID(id);
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/DATAACCESS/SpectrumAccessOpenMSCached.cpp | .cpp | 5,387 | 138 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/DATAACCESS/SpectrumAccessOpenMSCached.h>
#include <OpenMS/FORMAT/HANDLERS/CachedMzMLHandler.h>
namespace OpenMS
{
SpectrumAccessOpenMSCached::SpectrumAccessOpenMSCached(const String& filename) :
CachedmzML(filename)
{
}
SpectrumAccessOpenMSCached::~SpectrumAccessOpenMSCached() = default;
SpectrumAccessOpenMSCached::SpectrumAccessOpenMSCached(const SpectrumAccessOpenMSCached & rhs) :
CachedmzML(rhs)
{
// this only copies the indices and meta-data
}
std::shared_ptr<OpenSwath::ISpectrumAccess> SpectrumAccessOpenMSCached::lightClone() const
{
return std::shared_ptr<SpectrumAccessOpenMSCached>(new SpectrumAccessOpenMSCached(*this));
}
OpenSwath::SpectrumPtr SpectrumAccessOpenMSCached::getSpectrumById(int id)
{
OPENMS_PRECONDITION(id >= 0, "Id needs to be larger than zero");
OPENMS_PRECONDITION(id < (int)getNrSpectra(), "Id cannot be larger than number of spectra");
int ms_level = -1;
double rt = -1.0;
if ( !ifs_.seekg(spectra_index_[id]) )
{
std::cerr << "Error while reading spectrum " << id << " - seekg created an error when trying to change position to " << spectra_index_[id] << "." << std::endl;
std::cerr << "Maybe an invalid position was supplied to seekg, this can happen for example when reading large files (>2GB) on 32bit systems." << std::endl;
throw Exception::ParseError(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Error while changing position of input stream pointer.", filename_cached_);
}
OpenSwath::SpectrumPtr sptr(new OpenSwath::Spectrum);
sptr->getDataArrays() = Internal::CachedMzMLHandler::readSpectrumFast(ifs_, ms_level, rt);
return sptr;
}
OpenSwath::SpectrumMeta SpectrumAccessOpenMSCached::getSpectrumMetaById(int id) const
{
OPENMS_PRECONDITION(id >= 0, "Id needs to be larger than zero");
OPENMS_PRECONDITION(id < (int)getNrSpectra(), "Id cannot be larger than number of spectra");
OpenSwath::SpectrumMeta meta;
meta.RT = meta_ms_experiment_[id].getRT();
meta.ms_level = meta_ms_experiment_[id].getMSLevel();
return meta;
}
OpenSwath::ChromatogramPtr SpectrumAccessOpenMSCached::getChromatogramById(int id)
{
OPENMS_PRECONDITION(id >= 0, "Id needs to be larger than zero");
OPENMS_PRECONDITION(id < (int)getNrChromatograms(), "Id cannot be larger than number of chromatograms");
if ( !ifs_.seekg(chrom_index_[id]) )
{
std::cerr << "Error while reading chromatogram " << id << " - seekg created an error when trying to change position to " << chrom_index_[id] << "." << std::endl;
std::cerr << "Maybe an invalid position was supplied to seekg, this can happen for example when reading large files (>2GB) on 32bit systems." << std::endl;
throw Exception::ParseError(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Error while changing position of input stream pointer.", filename_cached_);
}
OpenSwath::ChromatogramPtr cptr(new OpenSwath::Chromatogram);
cptr->getDataArrays() = Internal::CachedMzMLHandler::readChromatogramFast(ifs_);
return cptr;
}
std::vector<std::size_t> SpectrumAccessOpenMSCached::getSpectraByRT(double RT, double deltaRT) const
{
OPENMS_PRECONDITION(deltaRT >= 0, "Delta RT needs to be a positive number");
// we first perform a search for the spectrum that is past the
// beginning of the RT domain. Then we add this spectrum and try to add
// further spectra as long as they are below RT + deltaRT.
std::vector<std::size_t> result;
auto spectrum = meta_ms_experiment_.RTBegin(RT - deltaRT);
if (spectrum == meta_ms_experiment_.end()) return result;
result.push_back(std::distance(meta_ms_experiment_.begin(), spectrum));
spectrum++;
while (spectrum != meta_ms_experiment_.end() && spectrum->getRT() < RT + deltaRT)
{
result.push_back(spectrum - meta_ms_experiment_.begin());
spectrum++;
}
return result;
}
size_t SpectrumAccessOpenMSCached::getNrSpectra() const
{
return meta_ms_experiment_.size();
}
SpectrumSettings SpectrumAccessOpenMSCached::getSpectraMetaInfo(int id) const
{
return meta_ms_experiment_[id];
}
size_t SpectrumAccessOpenMSCached::getNrChromatograms() const
{
return meta_ms_experiment_.getChromatograms().size();
}
ChromatogramSettings SpectrumAccessOpenMSCached::getChromatogramMetaInfo(int id) const
{
OPENMS_PRECONDITION(id >= 0, "Id needs to be larger than zero");
OPENMS_PRECONDITION(id < (int)getNrChromatograms(), "Id cannot be larger than number of spectra");
return meta_ms_experiment_.getChromatograms()[id];
}
std::string SpectrumAccessOpenMSCached::getChromatogramNativeID(int id) const
{
OPENMS_PRECONDITION(id >= 0, "Id needs to be larger than zero");
OPENMS_PRECONDITION(id < (int)getNrChromatograms(), "Id cannot be larger than number of spectra");
return meta_ms_experiment_.getChromatograms()[id].getNativeID();
}
} //end namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/DATAACCESS/DataAccessHelper.cpp | .cpp | 11,452 | 318 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/DATAACCESS/DataAccessHelper.h>
#include <OpenMS/CHEMISTRY/ModificationsDB.h>
namespace OpenMS
{
void OpenSwathDataAccessHelper::convertToOpenMSSpectrum(const OpenSwath::SpectrumPtr& sptr, OpenMS::MSSpectrum & spectrum)
{
std::vector<double>::const_iterator mz_it = sptr->getMZArray()->data.begin();
std::vector<double>::const_iterator int_it = sptr->getIntensityArray()->data.begin();
if (!spectrum.empty()) spectrum.clear(false);
Peak1D p;
spectrum.reserve(sptr->getMZArray()->data.size());
for (; mz_it != sptr->getMZArray()->data.end(); ++mz_it, ++int_it)
{
p.setMZ(*mz_it);
p.setIntensity(*int_it);
spectrum.push_back(p);
}
}
OpenSwath::SpectrumPtr OpenSwathDataAccessHelper::convertToSpectrumPtr(const OpenMS::MSSpectrum & spectrum)
{
OpenSwath::SpectrumPtr sptr(new OpenSwath::Spectrum);
OpenSwath::BinaryDataArrayPtr intensity_array = sptr->getIntensityArray();
OpenSwath::BinaryDataArrayPtr mz_array = sptr->getMZArray();
mz_array->data.reserve(spectrum.size());
intensity_array->data.reserve(spectrum.size());
for (MSSpectrum::const_iterator it = spectrum.begin(); it != spectrum.end(); ++it)
{
mz_array->data.push_back(it->getMZ());
intensity_array->data.push_back(it->getIntensity());
}
return sptr;
}
OpenSwath::ChromatogramPtr OpenSwathDataAccessHelper::convertToChromatogramPtr(const OpenMS::MSChromatogram & chromatogram)
{
OpenSwath::ChromatogramPtr cptr(new OpenSwath::Chromatogram);
OpenSwath::BinaryDataArrayPtr intensity_array = cptr->getIntensityArray();
OpenSwath::BinaryDataArrayPtr rt_array = cptr->getTimeArray();
rt_array->data.reserve(chromatogram.size());
intensity_array->data.reserve(chromatogram.size());
for (MSChromatogram::const_iterator it = chromatogram.begin(); it != chromatogram.end(); ++it)
{
rt_array->data.push_back(it->getRT());
intensity_array->data.push_back(it->getIntensity());
}
return cptr;
}
void OpenSwathDataAccessHelper::convertToOpenMSChromatogram(const OpenSwath::ChromatogramPtr& cptr, OpenMS::MSChromatogram & chromatogram)
{
std::vector<double>::const_iterator rt_it = cptr->getTimeArray()->data.begin();
std::vector<double>::const_iterator int_it = cptr->getIntensityArray()->data.begin();
if (!chromatogram.empty()) chromatogram.clear(false);
ChromatogramPeak peak;
chromatogram.reserve(cptr->getTimeArray()->data.size());
for (; rt_it != cptr->getTimeArray()->data.end(); ++rt_it, ++int_it)
{
peak.setRT(*rt_it);
peak.setIntensity(*int_it);
chromatogram.push_back(peak);
}
}
void OpenSwathDataAccessHelper::convertToOpenMSChromatogramFilter(OpenMS::MSChromatogram & chromatogram,
const OpenSwath::ChromatogramPtr& cptr,
double rt_min,
double rt_max)
{
std::vector<double>::const_iterator rt_it = cptr->getTimeArray()->data.begin();
std::vector<double>::const_iterator int_it = cptr->getIntensityArray()->data.begin();
ChromatogramPeak peak;
chromatogram.clear(false);
chromatogram.reserve(cptr->getTimeArray()->data.size());
for (; rt_it != cptr->getTimeArray()->data.end(); ++rt_it, ++int_it)
{
if (*rt_it < rt_min || *rt_it > rt_max)
{
continue;
}
peak.setRT(*rt_it);
peak.setIntensity(*int_it);
chromatogram.push_back(peak);
}
}
void OpenSwathDataAccessHelper::convertTargetedExp(const OpenMS::TargetedExperiment & transition_exp_, OpenSwath::LightTargetedExperiment & transition_exp)
{
//copy proteins
for (Size i = 0; i < transition_exp_.getProteins().size(); i++)
{
OpenSwath::LightProtein p;
p.id = transition_exp_.getProteins()[i].id;
transition_exp.proteins.push_back(p);
}
//copy peptides and store as compounds
for (Size i = 0; i < transition_exp_.getPeptides().size(); i++)
{
OpenSwath::LightCompound p;
OpenSwathDataAccessHelper::convertTargetedCompound(transition_exp_.getPeptides()[i], p);
transition_exp.compounds.push_back(p);
}
//copy compounds and store as compounds
for (Size i = 0; i < transition_exp_.getCompounds().size(); i++)
{
OpenSwath::LightCompound c;
OpenSwathDataAccessHelper::convertTargetedCompound(transition_exp_.getCompounds()[i], c);
transition_exp.compounds.push_back(c);
}
//mapping of transitions
for (const auto& transition : transition_exp_.getTransitions())
{
OpenSwath::LightTransition t;
t.transition_name = transition.getNativeID();
t.product_mz = transition.getProductMZ();
t.precursor_mz = transition.getPrecursorMZ();
t.library_intensity = transition.getLibraryIntensity();
t.peptide_ref = transition.getPeptideRef();
// If compound is a peptide, get the ion mobility information from the compound
if (!t.peptide_ref.empty())
{
OpenSwath::LightCompound p = transition_exp.getPeptideByRef(t.peptide_ref);
t.precursor_im = p.getDriftTime();
}
// try compound ref
else // (t.peptide_ref.empty())
{
t.peptide_ref = transition.getCompoundRef();
}
if (transition.isProductChargeStateSet())
{
t.fragment_charge = static_cast<int8_t>(transition.getProductChargeState());
}
t.setDecoy(false);
// legacy
#if 1
const auto& cv_terms = transition.getCVTerms();
if (cv_terms.find("decoy") != cv_terms.end() && cv_terms.at("decoy")[0].getValue().toString() == "1" )
{
t.setDecoy(true);
}
else if (cv_terms.find("MS:1002007") != cv_terms.end()) // target SRM transition
{
t.setDecoy(false);
}
else if (cv_terms.find("MS:1002008") != cv_terms.end()) // decoy SRM transition
{
t.setDecoy(true);
}
else if (cv_terms.find("MS:1002007") != cv_terms.end() && cv_terms.find("MS:1002008") != cv_terms.end()) // both == illegal
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Transition " + t.transition_name + " cannot be target and decoy at the same time.");
}
else
#endif
if (transition.getDecoyTransitionType() == ReactionMonitoringTransition::UNKNOWN ||
transition.getDecoyTransitionType() == ReactionMonitoringTransition::TARGET)
{
// assume its target
t.setDecoy(false);
}
else if (transition.getDecoyTransitionType() == ReactionMonitoringTransition::DECOY)
{
t.setDecoy(true);
}
t.setDetectingTransition(transition.isDetectingTransition());
t.setIdentifyingTransition(transition.isIdentifyingTransition());
t.setQuantifyingTransition(transition.isQuantifyingTransition());
transition_exp.transitions.push_back(t);
}
}
void OpenSwathDataAccessHelper::convertTargetedCompound(const TargetedExperiment::Peptide& pep, OpenSwath::LightCompound & p)
{
OpenSwath::LightModification light_mod;
p.id = pep.id;
if (pep.hasRetentionTime())
{
p.rt = pep.getRetentionTime();
if (pep.getRetentionTimeUnit() == TargetedExperimentHelper::RetentionTime::RTUnit::MINUTE)
{
p.rt = 60 * pep.getRetentionTime();
}
}
p.setDriftTime(pep.getDriftTime());
if (pep.hasCharge())
{
p.charge = pep.getChargeState();
}
p.sequence = pep.sequence;
p.peptide_group_label = pep.getPeptideGroupLabel();
if (pep.metaValueExists("GeneName"))
{
p.gene_name = (std::string)pep.getMetaValue("GeneName");
}
// Is it potentially a metabolomics compound
if (pep.metaValueExists("SumFormula"))
{
p.sum_formula = (std::string)pep.getMetaValue("SumFormula");
}
if (pep.metaValueExists("CompoundName"))
{
p.compound_name = (std::string)pep.getMetaValue("CompoundName");
}
p.protein_refs.clear();
if (!pep.protein_refs.empty())
{
p.protein_refs.insert( p.protein_refs.begin(), pep.protein_refs.begin(), pep.protein_refs.end() );
}
// Mapping of peptide modifications (don't do this for metabolites...)
if (p.isPeptide())
{
OpenMS::AASequence aa_sequence = TargetedExperimentHelper::getAASequence(pep);
if (aa_sequence.hasNTerminalModification())
{
const ResidueModification& rmod = *(aa_sequence.getNTerminalModification());
light_mod.location = -1;
light_mod.unimod_id = rmod.getUniModRecordId();
p.modifications.push_back(light_mod);
}
if (aa_sequence.hasCTerminalModification())
{
const ResidueModification& rmod = *(aa_sequence.getCTerminalModification());
light_mod.location = boost::numeric_cast<int>(aa_sequence.size());
light_mod.unimod_id = rmod.getUniModRecordId();
p.modifications.push_back(light_mod);
}
for (Size i = 0; i != aa_sequence.size(); i++)
{
if (aa_sequence[i].isModified())
{
// search the residue in the modification database (if the sequence is valid, we should find it)
const ResidueModification& rmod = *(aa_sequence.getResidue(i).getModification());
light_mod.location = boost::numeric_cast<int>(i);
light_mod.unimod_id = rmod.getUniModRecordId();
p.modifications.push_back(light_mod);
}
}
}
}
void OpenSwathDataAccessHelper::convertTargetedCompound(const TargetedExperiment::Compound& compound, OpenSwath::LightCompound & comp)
{
comp.id = compound.id;
if (compound.hasRetentionTime())
{
comp.rt = compound.getRetentionTime();
if (compound.getRetentionTimeUnit() == TargetedExperimentHelper::RetentionTime::RTUnit::MINUTE)
{
comp.rt = 60 * compound.getRetentionTime();
}
}
comp.setDriftTime(compound.getDriftTime());
if (compound.hasCharge())
{
comp.charge = compound.getChargeState();
}
comp.sum_formula = (std::string)compound.molecular_formula;
if (compound.metaValueExists("CompoundName"))
{
comp.compound_name = (std::string)compound.getMetaValue("CompoundName");
}
}
void OpenSwathDataAccessHelper::convertPeptideToAASequence(const OpenSwath::LightCompound & peptide, AASequence & aa_sequence)
{
OPENMS_PRECONDITION(peptide.isPeptide(), "Function needs peptide, not metabolite")
aa_sequence = AASequence::fromString(peptide.sequence);
for (const auto & it : peptide.modifications)
{
if (it.unimod_id != -1)
{
TargetedExperimentHelper::setModification(it.location,
int(peptide.sequence.size()),
"UniMod:" + String(it.unimod_id), aa_sequence);
}
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/DATAACCESS/SimpleOpenMSSpectraAccessFactory.cpp | .cpp | 1,914 | 58 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest, Witold Wolski$
// $Authors: Hannes Roest, Witold Wolski$
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/DATAACCESS/SimpleOpenMSSpectraAccessFactory.h>
#include <OpenMS/ANALYSIS/OPENSWATH/DATAACCESS/SpectrumAccessOpenMS.h>
#include <OpenMS/ANALYSIS/OPENSWATH/DATAACCESS/SpectrumAccessOpenMSCached.h>
namespace OpenMS
{
bool SimpleOpenMSSpectraFactory::isExperimentCached(const std::shared_ptr<PeakMap>& exp)
{
for (std::size_t i = 0; i < exp->getSpectra().size(); ++i)
{
for (std::size_t j = 0; j < exp->getSpectra()[i].getDataProcessing().size(); j++)
{
if (exp->getSpectra()[i].getDataProcessing()[j]->metaValueExists("cached_data"))
{
return true;
}
}
}
for (std::size_t i = 0; i < exp->getChromatograms().size(); ++i)
{
for (std::size_t j = 0; j < exp->getChromatograms()[i].getDataProcessing().size(); j++)
{
if (exp->getChromatograms()[i].getDataProcessing()[j]->metaValueExists("cached_data"))
{
return true;
}
}
}
return false;
}
OpenSwath::SpectrumAccessPtr SimpleOpenMSSpectraFactory::getSpectrumAccessOpenMSPtr(const std::shared_ptr<PeakMap>& exp)
{
bool is_cached = SimpleOpenMSSpectraFactory::isExperimentCached(exp);
if (is_cached)
{
OpenSwath::SpectrumAccessPtr experiment(new OpenMS::SpectrumAccessOpenMSCached(exp->getLoadedFilePath()));
return experiment;
}
else
{
OpenSwath::SpectrumAccessPtr experiment(new OpenMS::SpectrumAccessOpenMS(exp));
return experiment;
}
}
}//end Namespace
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/DATAACCESS/SpectrumAccessOpenMSInMemory.cpp | .cpp | 4,825 | 125 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/DATAACCESS/SpectrumAccessOpenMSInMemory.h>
#include <OpenMS/ANALYSIS/OPENSWATH/DATAACCESS/SpectrumAccessSqMass.h>
#include <algorithm> // std::lower_bound, std::upper_bound, std::sort
namespace OpenMS
{
SpectrumAccessOpenMSInMemory::SpectrumAccessOpenMSInMemory(OpenSwath::ISpectrumAccess & origin)
{
// special case: we can grab the data directly (and fast)
if (dynamic_cast<SpectrumAccessSqMass*> (&origin))
{
SpectrumAccessSqMass* tmp = dynamic_cast<SpectrumAccessSqMass*> (&origin);
tmp->getAllSpectra(spectra_, spectra_meta_);
}
else
{
for (Size i = 0; i < origin.getNrSpectra(); ++i)
{
spectra_.push_back( origin.getSpectrumById(i) );
spectra_meta_.push_back( origin.getSpectrumMetaById(i) );
}
for (Size i = 0; i < origin.getNrChromatograms(); ++i)
{
chromatograms_.push_back( origin.getChromatogramById(i) );
chromatogram_ids_.push_back( origin.getChromatogramNativeID(i) );
}
}
OPENMS_POSTCONDITION(spectra_.size() == spectra_meta_.size(), "Spectra and meta data needs to match")
OPENMS_POSTCONDITION(chromatogram_ids_.size() == chromatograms_.size(), "Chromatograms and meta data needs to match")
}
SpectrumAccessOpenMSInMemory::~SpectrumAccessOpenMSInMemory() = default;
SpectrumAccessOpenMSInMemory::SpectrumAccessOpenMSInMemory(const SpectrumAccessOpenMSInMemory & rhs) :
spectra_(rhs.spectra_),
spectra_meta_(rhs.spectra_meta_),
chromatograms_(rhs.chromatograms_),
chromatogram_ids_(rhs.chromatogram_ids_)
{
// this only copies the pointers and not the actual data ...
}
std::shared_ptr<OpenSwath::ISpectrumAccess> SpectrumAccessOpenMSInMemory::lightClone() const
{
return std::shared_ptr<SpectrumAccessOpenMSInMemory>(new SpectrumAccessOpenMSInMemory(*this));
}
OpenSwath::SpectrumPtr SpectrumAccessOpenMSInMemory::getSpectrumById(int id)
{
OPENMS_PRECONDITION(id >= 0, "Id needs to be larger than zero");
OPENMS_PRECONDITION(id < (int)getNrSpectra(), "Id cannot be larger than number of spectra");
return spectra_[id];
}
OpenSwath::SpectrumMeta SpectrumAccessOpenMSInMemory::getSpectrumMetaById(int id) const
{
OPENMS_PRECONDITION(id >= 0, "Id needs to be larger than zero");
OPENMS_PRECONDITION(id < (int)getNrSpectra(), "Id cannot be larger than number of spectra");
return spectra_meta_[id];
}
std::vector<std::size_t> SpectrumAccessOpenMSInMemory::getSpectraByRT(double RT, double deltaRT) const
{
OPENMS_PRECONDITION(deltaRT >= 0, "Delta RT needs to be a positive number");
// we first perform a search for the spectrum that is past the
// beginning of the RT domain. Then we add this spectrum and try to add
// further spectra as long as they are below RT + deltaRT.
std::vector<std::size_t> result;
OpenSwath::SpectrumMeta s;
s.RT = RT - deltaRT;
auto spectrum = std::lower_bound(spectra_meta_.begin(), spectra_meta_.end(), s, OpenSwath::SpectrumMeta::RTLess());
if (spectrum == spectra_meta_.end()) return result;
result.push_back(std::distance(spectra_meta_.begin(), spectrum));
++spectrum;
while (spectrum->RT < RT + deltaRT && spectrum != spectra_meta_.end())
{
result.push_back(std::distance(spectra_meta_.begin(), spectrum));
++spectrum;
}
return result;
}
size_t SpectrumAccessOpenMSInMemory::getNrSpectra() const
{
OPENMS_PRECONDITION(spectra_.size() == spectra_meta_.size(), "Spectra and meta data needs to match")
return spectra_.size();
}
OpenSwath::ChromatogramPtr SpectrumAccessOpenMSInMemory::getChromatogramById(int id)
{
OPENMS_PRECONDITION(id >= 0, "Id needs to be larger than zero");
OPENMS_PRECONDITION(id < (int)getNrChromatograms(), "Id cannot be larger than number of chromatograms");
return chromatograms_[id];
}
size_t SpectrumAccessOpenMSInMemory::getNrChromatograms() const
{
OPENMS_PRECONDITION(chromatogram_ids_.size() == chromatograms_.size(), "Chromatograms and meta data needs to match")
return chromatograms_.size();
}
std::string SpectrumAccessOpenMSInMemory::getChromatogramNativeID(int id) const
{
OPENMS_PRECONDITION(id >= 0, "Id needs to be larger than zero");
OPENMS_PRECONDITION(id < (int)getNrChromatograms(), "Id cannot be larger than number of spectra");
return chromatogram_ids_[id];
}
} //end namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/OPENSWATH/DATAACCESS/MRMFeatureAccessOpenMS.cpp | .cpp | 4,585 | 139 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/DATAACCESS/MRMFeatureAccessOpenMS.h>
#include <OpenMS/KERNEL/MSChromatogram.h>
#include <OpenMS/ANALYSIS/MRM/ReactionMonitoringTransition.h>
namespace OpenMS
{
MRMFeatureOpenMS::MRMFeatureOpenMS(MRMFeature& mrmfeature) :
mrmfeature_(mrmfeature)
{
std::vector<String> ids;
mrmfeature.getFeatureIDs(ids);
for (std::vector<String>::iterator it = ids.begin(); it != ids.end(); ++it)
{
std::shared_ptr<FeatureOpenMS> ptr = std::shared_ptr<FeatureOpenMS>(new FeatureOpenMS(mrmfeature.getFeature(*it)));
features_[*it] = ptr;
}
std::vector<String> p_ids;
mrmfeature.getPrecursorFeatureIDs(p_ids);
for (std::vector<String>::iterator it = p_ids.begin(); it != p_ids.end(); ++it)
{
std::shared_ptr<FeatureOpenMS> ptr = std::shared_ptr<FeatureOpenMS>(new FeatureOpenMS(mrmfeature.getPrecursorFeature(*it)));
precursor_features_[*it] = ptr;
}
}
FeatureOpenMS::FeatureOpenMS(Feature& feature)
{
feature_ = &feature; // store raw ptr to the feature
}
FeatureOpenMS::~FeatureOpenMS() = default;
void FeatureOpenMS::getRT(std::vector<double>& rt) const
{
OPENMS_PRECONDITION(feature_->getConvexHulls().size() == 1, "There needs to exactly one convex hull per feature.");
ConvexHull2D::PointArrayType data_points = feature_->getConvexHulls()[0].getHullPoints();
for (ConvexHull2D::PointArrayType::iterator it = data_points.begin(); it != data_points.end(); ++it)
{
rt.push_back(it->getX());
}
}
void FeatureOpenMS::getIntensity(std::vector<double>& intens) const
{
OPENMS_PRECONDITION(feature_->getConvexHulls().size() == 1, "There needs to exactly one convex hull per feature.");
ConvexHull2D::PointArrayType data_points = feature_->getConvexHulls()[0].getHullPoints();
for (ConvexHull2D::PointArrayType::iterator it = data_points.begin(); it != data_points.end(); ++it)
{
intens.push_back(it->getY());
}
}
float FeatureOpenMS::getIntensity() const
{
return feature_->getIntensity();
}
double FeatureOpenMS::getRT() const
{
return feature_->getRT();
}
MRMFeatureOpenMS::~MRMFeatureOpenMS() = default;
std::shared_ptr<OpenSwath::IFeature> MRMFeatureOpenMS::getFeature(std::string nativeID)
{
OPENMS_PRECONDITION(features_.find(nativeID) != features_.end(), "Feature needs to exist");
return std::static_pointer_cast<OpenSwath::IFeature>(features_[nativeID]);
}
std::shared_ptr<OpenSwath::IFeature> MRMFeatureOpenMS::getPrecursorFeature(std::string nativeID)
{
OPENMS_PRECONDITION(precursor_features_.find(nativeID) != precursor_features_.end(), "Precursor feature needs to exist");
return std::static_pointer_cast<OpenSwath::IFeature>(precursor_features_[nativeID]);
}
std::vector<std::string> MRMFeatureOpenMS::getNativeIDs() const
{
std::vector<std::string> v;
for (std::map<std::string, std::shared_ptr<FeatureOpenMS> >::const_iterator it = features_.begin(); it != features_.end(); ++it)
{
v.push_back(it->first);
}
return v;
}
std::vector<std::string> MRMFeatureOpenMS::getPrecursorIDs() const
{
std::vector<std::string> v;
for (std::map<std::string, std::shared_ptr<FeatureOpenMS> >::const_iterator it = precursor_features_.begin(); it != precursor_features_.end(); ++it)
{
v.push_back(it->first);
}
return v;
}
float MRMFeatureOpenMS::getIntensity() const
{
return mrmfeature_.getIntensity();
}
double MRMFeatureOpenMS::getRT() const
{
return mrmfeature_.getRT();
}
double MRMFeatureOpenMS::getMetaValue(std::string name) const
{
return mrmfeature_.getMetaValue(name);
}
size_t MRMFeatureOpenMS::size() const
{
return features_.size();
}
// default instances
MSSpectrum spec;
MSChromatogram chrom;
SignalToNoiseOpenMS< MSSpectrum> spec_signal_to_noise_openms(spec, 1.0, 3, true);
SignalToNoiseOpenMS< MSChromatogram > chrom_signal_to_noise_openms(chrom, 1.0, 3, true);
MRMTransitionGroup<MSSpectrum, ReactionMonitoringTransition> trgroup;
TransitionGroupOpenMS<MSSpectrum, ReactionMonitoringTransition> default_transition_group_openms(trgroup);
} //end namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/DigestionEnzymeDB.cpp | .cpp | 489 | 16 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Xiao Liang $
// $Authors: Xiao Liang, Chris Bielow $
// --------------------------------------------------------------------------
//
#include <OpenMS/CHEMISTRY/DigestionEnzymeDB.h>
// implementation of template class is in the header file
namespace OpenMS
{
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/ResidueModification.cpp | .cpp | 23,664 | 804 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Andreas Bertsch $
// --------------------------------------------------------------------------
//
#include <OpenMS/CHEMISTRY/ResidueModification.h>
#include <OpenMS/CHEMISTRY/ModificationsDB.h>
#include <OpenMS/CHEMISTRY/Residue.h>
#include <OpenMS/CONCEPT/Exception.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <cmath>
#include <iostream>
#include <utility>
using namespace std;
namespace OpenMS
{
ResidueModification::ResidueModification() :
unimod_record_id_(-1),
term_spec_(ResidueModification::ANYWHERE),
origin_('X'),
classification_(ResidueModification::ARTIFACT),
average_mass_(0.0),
mono_mass_(0.0),
diff_average_mass_(0.0),
diff_mono_mass_(0.0),
neutral_loss_mono_masses_(0),
neutral_loss_average_masses_(0)
{
}
bool ResidueModification::operator<(const ResidueModification& rhs) const
{
return std::tie(
id_,
full_id_,
psi_mod_accession_,
unimod_record_id_,
full_name_,
name_,
term_spec_,
origin_,
classification_,
average_mass_,
mono_mass_,
diff_average_mass_,
diff_mono_mass_,
formula_,
diff_formula_,
synonyms_,
neutral_loss_diff_formulas_,
neutral_loss_mono_masses_,
neutral_loss_average_masses_
) < std::tie(
rhs.id_,
rhs.full_id_,
rhs.psi_mod_accession_,
rhs.unimod_record_id_,
rhs.full_name_,
rhs.name_,
rhs.term_spec_,
rhs.origin_,
rhs.classification_,
rhs.average_mass_,
rhs.mono_mass_,
rhs.diff_average_mass_,
rhs.diff_mono_mass_,
rhs.formula_,
rhs.diff_formula_,
rhs.synonyms_,
rhs.neutral_loss_diff_formulas_,
rhs.neutral_loss_mono_masses_,
rhs.neutral_loss_average_masses_
);
}
bool ResidueModification::operator==(const ResidueModification& rhs) const
{
return id_ == rhs.id_ &&
full_id_ == rhs.full_id_ &&
psi_mod_accession_ == rhs.psi_mod_accession_ &&
unimod_record_id_ == rhs.unimod_record_id_ &&
full_name_ == rhs.full_name_ &&
name_ == rhs.name_ &&
term_spec_ == rhs.term_spec_ &&
origin_ == rhs.origin_ &&
classification_ == rhs.classification_ &&
average_mass_ == rhs.average_mass_ &&
mono_mass_ == rhs.mono_mass_ &&
diff_average_mass_ == rhs.diff_average_mass_ &&
diff_mono_mass_ == rhs.diff_mono_mass_ &&
formula_ == rhs.formula_ &&
diff_formula_ == rhs.diff_formula_ &&
synonyms_ == rhs.synonyms_ &&
neutral_loss_diff_formulas_ == rhs.neutral_loss_diff_formulas_ &&
neutral_loss_mono_masses_ == rhs.neutral_loss_mono_masses_ &&
neutral_loss_average_masses_ == rhs.neutral_loss_average_masses_;
}
bool ResidueModification::operator!=(const ResidueModification& rhs) const
{
return !(*this == rhs);
}
ResidueModification::~ResidueModification() = default;
void ResidueModification::setId(const String& id)
{
id_ = id;
}
const String& ResidueModification::getId() const
{
return id_;
}
void ResidueModification::setFullId(const String& full_id)
{
if (full_id.empty())
{
if (id_.empty())
{
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Cannot create full ID for modification with missing (short) ID.");
}
String specificity;
if (term_spec_ != ResidueModification::ANYWHERE)
{
specificity = getTermSpecificityName(); // "C-term" or "N-term"
}
if (!specificity.empty() && (origin_ != 'X'))
{
specificity += " " + String(origin_);
}
else if (specificity.empty())
{
specificity = origin_; // shouldn't be "X" in this case
}
full_id_ = id_ + " (" + specificity + ")";
}
else
{
full_id_ = full_id;
}
}
const String& ResidueModification::getFullId() const
{
return full_id_;
}
void ResidueModification::setPSIMODAccession(const String& id)
{
psi_mod_accession_ = id;
}
const String& ResidueModification::getPSIMODAccession() const
{
return psi_mod_accession_;
}
void ResidueModification::setUniModRecordId(const Int& id)
{
unimod_record_id_ = id;
}
const Int& ResidueModification::getUniModRecordId() const
{
return unimod_record_id_;
}
const String ResidueModification::getUniModAccession() const
{
if (unimod_record_id_ < 0) return "";
return String("UniMod:") + unimod_record_id_; // return copy of temp object
}
void ResidueModification::setFullName(const String& full_name)
{
full_name_ = full_name;
}
const String& ResidueModification::getFullName() const
{
return full_name_;
}
void ResidueModification::setName(const String& name)
{
name_ = name;
}
const String& ResidueModification::getName() const
{
return name_;
}
void ResidueModification::setTermSpecificity(TermSpecificity term_spec)
{
if (term_spec == NUMBER_OF_TERM_SPECIFICITY)
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Not a valid terminal specificity", String(term_spec));
}
term_spec_ = term_spec;
}
void ResidueModification::setTermSpecificity(const String& term_spec)
{
if (term_spec == "C-term")
{
term_spec_ = C_TERM;
}
else if (term_spec == "N-term")
{
term_spec_ = N_TERM;
}
else if (term_spec == "none")
{
term_spec_ = ANYWHERE;
}
else if (term_spec == "Protein N-term")
{
term_spec_ = PROTEIN_N_TERM;
}
else if (term_spec == "Protein C-term")
{
term_spec_ = PROTEIN_C_TERM;
}
else
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Not a valid terminal specificity", term_spec);
}
}
ResidueModification::TermSpecificity ResidueModification::getTermSpecificity() const
{
return term_spec_;
}
String ResidueModification::getTermSpecificityName(TermSpecificity term_spec) const
{
if (term_spec == NUMBER_OF_TERM_SPECIFICITY)
{
term_spec = term_spec_;
}
switch (term_spec)
{
case C_TERM: return "C-term";
case N_TERM: return "N-term";
case PROTEIN_C_TERM: return "Protein C-term";
case PROTEIN_N_TERM: return "Protein N-term";
case ANYWHERE: return "none";
default: break; // shouldn't happen
}
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "No name for this terminal specificity", String(term_spec));
}
void ResidueModification::setOrigin(char origin)
{
if ((origin >= 'A') && (origin <= 'Y') && (origin != 'B') && (origin != 'J'))
{
origin_ = origin;
}
else if ((origin >= 'a') && (origin <= 'y') && (origin != 'b') && (origin != 'j'))
{
origin_ = toupper(origin);
}
else
{
String msg = "Modification '" + id_ + "': origin must be a letter from A to Y, excluding B and J.";
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, msg, String(origin));
}
}
char ResidueModification::getOrigin() const
{
return origin_;
}
void ResidueModification::setSourceClassification(SourceClassification classification)
{
classification_ = classification;
}
void ResidueModification::setSourceClassification(const String& classification)
{
String c = classification;
c.toLower();
if (c == "artifact" || c == "artefact") // unimod uses Artefact (BE) not Artifact (AE)
{
classification_ = ARTIFACT;
return;
}
if (c == "natural")
{
classification_ = NATURAL;
return;
}
if (c == "hypothetical")
{
classification_ = HYPOTHETICAL;
return;
}
if (c == "post-translational")
{
classification_ = POSTTRANSLATIONAL;
return;
}
if (c == "multiple")
{
classification_ = MULTIPLE;
return;
}
if (c == "chemical derivative")
{
classification_ = CHEMICAL_DERIVATIVE;
return;
}
if (c == "isotopic label")
{
classification_ = ISOTOPIC_LABEL;
return;
}
if (c == "pre-translational")
{
classification_ = PRETRANSLATIONAL;
return;
}
if (c == "other glycosylation")
{
classification_ = OTHER_GLYCOSYLATION;
return;
}
if (c == "n-linked glycosylation")
{
classification_ = NLINKED_GLYCOSYLATION;
return;
}
if (c == "aa substitution")
{
classification_ = AA_SUBSTITUTION;
return;
}
if (c == "other")
{
classification_ = OTHER;
return;
}
if (c == "non-standard residue")
{
classification_ = NONSTANDARD_RESIDUE;
return;
}
if (c == "co-translational")
{
classification_ = COTRANSLATIONAL;
return;
}
if (c == "o-linked glycosylation")
{
classification_ = OLINKED_GLYCOSYLATION;
return;
}
classification_ = UNKNOWN;
//cerr << "ResidueModification: Unknown source classification '" << classification << "'" << endl;
return;
}
ResidueModification::SourceClassification ResidueModification::getSourceClassification() const
{
return classification_;
}
String ResidueModification::getSourceClassificationName(SourceClassification classification) const
{
if (classification == NUMBER_OF_SOURCE_CLASSIFICATIONS)
{
classification = classification_;
}
switch (classification)
{
case ARTIFACT: return "Artefact"; // return Artefact (BE) not Artifcat (AE)
case NATURAL: return "Natural";
case HYPOTHETICAL: return "Hypothetical";
case POSTTRANSLATIONAL: return "Post-translational";
case MULTIPLE: return "Multiple";
case CHEMICAL_DERIVATIVE: return "Chemical derivative";
case ISOTOPIC_LABEL: return "Isotopic label";
case PRETRANSLATIONAL: return "Pre-translational";
case OTHER_GLYCOSYLATION: return "Other glycosylation";
case NLINKED_GLYCOSYLATION: return "N-linked glycosylation";
case AA_SUBSTITUTION: return "AA substitution";
case OTHER: return "Other";
case NONSTANDARD_RESIDUE: return "Non-standard residue";
case COTRANSLATIONAL: return "Co-translational";
case OLINKED_GLYCOSYLATION: return "O-linked glycosylation";
case UNKNOWN: return "";
default: return "Unknown";
}
}
void ResidueModification::setAverageMass(double mass)
{
average_mass_ = mass;
}
double ResidueModification::getAverageMass() const
{
return average_mass_;
}
void ResidueModification::setMonoMass(double mass)
{
mono_mass_ = mass;
}
double ResidueModification::getMonoMass() const
{
return mono_mass_;
}
void ResidueModification::setDiffAverageMass(double mass)
{
diff_average_mass_ = mass;
}
double ResidueModification::getDiffAverageMass() const
{
return diff_average_mass_;
}
void ResidueModification::setDiffMonoMass(double mass)
{
diff_mono_mass_ = mass;
}
double ResidueModification::getDiffMonoMass() const
{
return diff_mono_mass_;
}
void ResidueModification::setFormula(const String& formula)
{
formula_ = formula;
}
const String& ResidueModification::getFormula() const
{
return formula_;
}
void ResidueModification::setDiffFormula(const EmpiricalFormula& diff_formula)
{
diff_formula_ = diff_formula;
}
const EmpiricalFormula& ResidueModification::getDiffFormula() const
{
return diff_formula_;
}
void ResidueModification::addSynonym(const String& synonym)
{
synonyms_.insert(synonym);
}
void ResidueModification::setSynonyms(const set<String>& synonyms)
{
synonyms_ = synonyms;
}
const set<String>& ResidueModification::getSynonyms() const
{
return synonyms_;
}
void ResidueModification::setNeutralLossDiffFormulas(const vector<EmpiricalFormula>& diff_formulas)
{
neutral_loss_diff_formulas_ = diff_formulas;
}
const vector<EmpiricalFormula>& ResidueModification::getNeutralLossDiffFormulas() const
{
return neutral_loss_diff_formulas_;
}
void ResidueModification::setNeutralLossMonoMasses(vector<double> mono_masses)
{
neutral_loss_mono_masses_ = std::move(mono_masses);
}
vector<double> ResidueModification::getNeutralLossMonoMasses() const
{
return neutral_loss_mono_masses_;
}
void ResidueModification::setNeutralLossAverageMasses(vector<double> average_masses)
{
neutral_loss_average_masses_ = std::move(average_masses);
}
vector<double> ResidueModification::getNeutralLossAverageMasses() const
{
return neutral_loss_average_masses_;
}
bool ResidueModification::hasNeutralLoss() const
{
return !neutral_loss_diff_formulas_.empty() && !neutral_loss_diff_formulas_[0].isCharged();
}
bool ResidueModification::isUserDefined() const
{
return id_.empty() && !full_id_.empty();
}
const ResidueModification* ResidueModification::createUnknownFromMassString(const String& mod,
const double mass,
const bool delta_mass,
const TermSpecificity specificity,
const Residue* residue)
{
ModificationsDB* mod_db = ModificationsDB::getInstance();
// -----------------------------------
// Dealing with an unknown modification
// -----------------------------------
// Notes on mass calculation: AASequence::getMonoWeight uses DiffMonoMass
// for its calculation of C/N-terminal modification mass and it uses
// getMonoWeight(Residue::Internal) for each Residue. The Residue weight is
// set when adding a modification using setModification_
if (specificity == ResidueModification::N_TERM || specificity == ResidueModification::PROTEIN_N_TERM)
{
String residue_name = "[" + mod + "]";
String residue_id = ".n" + residue_name;
// Check if it already exists, if not create new modification, transfer
// ownership to ModDB
if (!mod_db->has(residue_id))
{
unique_ptr<ResidueModification> new_mod(new ResidueModification);
new_mod->setFullId(residue_id); // setting FullId but not Id makes it a user-defined mod
new_mod->setFullName(residue_name); // display name
new_mod->setTermSpecificity(specificity);
// set masses
if (delta_mass)
{
new_mod->setMonoMass(mass + Residue::getInternalToNTerm().getMonoWeight());
// new_mod->setAverageMass(mass + residue->getAverageWeight());
new_mod->setDiffMonoMass(mass);
}
else
{
new_mod->setMonoMass(mass);
// new_mod->setAverageMass(mass);
new_mod->setDiffMonoMass(mass - Residue::getInternalToNTerm().getMonoWeight());
}
return mod_db->addModification(std::move(new_mod));
}
else
{
Size mod_idx = mod_db->findModificationIndex(residue_id);
return mod_db->getModification(mod_idx);
}
}
else
if (specificity == ResidueModification::C_TERM || specificity == ResidueModification::PROTEIN_C_TERM)
{
String residue_name = "[" + mod + "]";
String residue_id = ".c" + residue_name;
// Check if it already exists, if not create new modification, transfer
// ownership to ModDB
if (!mod_db->has(residue_id))
{
unique_ptr<ResidueModification> new_mod(new ResidueModification);
new_mod->setFullId(residue_id); // setting FullId but not Id makes it a user-defined mod
new_mod->setFullName(residue_name); // display name
new_mod->setTermSpecificity(specificity);
// set masses
if (delta_mass)
{
new_mod->setMonoMass(mass + Residue::getInternalToCTerm().getMonoWeight());
// new_mod->setAverageMass(mass + residue->getAverageWeight());
new_mod->setDiffMonoMass(mass);
}
else
{
new_mod->setMonoMass(mass);
// new_mod->setAverageMass(mass);
new_mod->setDiffMonoMass(mass - Residue::getInternalToCTerm().getMonoWeight());
}
return mod_db->addModification(std::move(new_mod));
}
else
{
Size mod_idx = mod_db->findModificationIndex(residue_id);
return mod_db->getModification(mod_idx);
}
}
else
{
if (residue == nullptr)
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Cannot create non-terminal mod without origin AA residue.", "nullptr");
}
String modification_name = "[" + mod + "]";
String residue_id = String(residue->getOneLetterCode()) + modification_name; // e.g. N[12345.6]
if (!mod_db->has(residue_id))
{
// create new modification
unique_ptr<ResidueModification> new_mod(new ResidueModification);
new_mod->setFullId(residue_id); // setting FullId but not Id makes it a user-defined mod
new_mod->setFullName(modification_name); // display name
// We will set origin to make sure the same modification will be used
// for the same AA
new_mod->setOrigin(residue->getOneLetterCode()[0]);
// set masses
if (delta_mass)
{
new_mod->setMonoMass(mass + residue->getMonoWeight());
new_mod->setAverageMass(mass + residue->getAverageWeight());
new_mod->setDiffMonoMass(mass);
}
else
{
new_mod->setMonoMass(mass);
new_mod->setAverageMass(mass);
new_mod->setDiffMonoMass(mass - residue->getMonoWeight());
}
return mod_db->addModification(std::move(new_mod));
}
else
{
Size mod_idx = mod_db->findModificationIndex(residue_id);
return mod_db->getModification(mod_idx);
}
}
}
const ResidueModification* ResidueModification::combineMods(const ResidueModification* base,
const std::set<const ResidueModification*>& addons,
bool allow_unknown_masses,
const Residue* residue)
{
if (addons.empty() && base == nullptr)
{
return nullptr;
}
if (base != nullptr && base->isUserDefined() && !allow_unknown_masses)
{ // do not apply anything to an already merged mod
OPENMS_LOG_INFO << "Note: Invalid merge operation on already merged/user-defined modification!\n";
return base;
}
const ResidueModification* mod_merged = base;
std::set<const ResidueModification*>::const_iterator it_addon = addons.begin();
if (base == nullptr) // base is empty
{
mod_merged = *it_addon;
++it_addon; // start adding at the second element
}
if (it_addon == addons.end()) return mod_merged; // only one mod in total given
// compute combined mass ...
double new_mass{ mod_merged->getDiffMonoMass() };
for (const auto& mod_new : addons)
{ // build a new mod, combining the existing and the new one
if (mod_merged->getTermSpecificity() != mod_new->getTermSpecificity())
{
throw Exception::Precondition(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Modifications to be merged to not have the same term specificity: " + mod_merged->getTermSpecificityName() + " != " + mod_new->getTermSpecificityName());
}
if (mod_merged->getOrigin() != mod_new->getOrigin())
{
throw Exception::Precondition(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("Modifications to be merged to not have the same origin: ") + mod_merged->getOrigin() + " != " + mod_new->getOrigin());
}
new_mass += mod_new->getDiffMonoMass();
}
// sanity check: mods and residue need same origin
if (mod_merged->getTermSpecificity() == ANYWHERE && residue != nullptr && residue->getOneLetterCode()[0] != mod_merged->getOrigin())
{
throw Exception::Precondition(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("Modification and residue do not have the same origin: ") + mod_merged->getOrigin() + " != " + residue->getOneLetterCode());
}
// create new residue from it
const ResidueModification* mod_sum =
ResidueModification::createUnknownFromMassString(getDiffMonoMassString(new_mass),
new_mass,
true,
mod_merged->getTermSpecificity(),
residue);
return mod_sum;
}
String ResidueModification::toString() const
{
String ret;
if (term_spec_ != ANYWHERE) ret = ".";
else ret = origin_;
if (!id_.empty())
{
ret.reserve(id_.size() + 3);
ret += "(";
ret += id_;
ret += ")";
return ret;
}
if (!getFullName().empty())
{ // using this is questionable ... it will not work for default mods (which do have id_ defined, so this case will never happen... but still)
ret += getFullName();
return ret;
}
if (isUserDefined()) // id_ empty but full_id_ is present
{ // user-defined modification
if (diff_mono_mass_ != 0.0)
{
ret += getDiffMonoMassWithBracket(diff_mono_mass_);
}
else if (mono_mass_ != 0.0)
{
ret += getMonoMassWithBracket(mono_mass_);
}
else
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Residue has an invalid user-defined modification. This is a bug. Please report it!", "");
}
return ret;
}
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "ResidueModification is in an invalid state. This is a bug. Please report it!", "");
}
String ResidueModification::getDiffMonoMassString(const double diff_mono_mass)
{
return String(diff_mono_mass < 0.0 ? "-" : "+") += std::fabs(diff_mono_mass);
}
String ResidueModification::getDiffMonoMassWithBracket(const double diff_mono_mass)
{
String ret = '[';
ret += getDiffMonoMassString(diff_mono_mass);
ret += ']';
return ret;
}
String ResidueModification::getMonoMassWithBracket(const double mono_mass)
{
if (mono_mass < 0.0)
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Modification has negative mono mass. Cannot distinguish between delta masses due to '-'!", String(mono_mass));
}
String ret = '[';
ret += mono_mass;
ret += ']';
return ret;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/ResidueDB.cpp | .cpp | 22,171 | 523 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Andreas Bertsch, Jang Jang Jin, Timo Sachsenberg$
// --------------------------------------------------------------------------
//
#include <OpenMS/CHEMISTRY/ResidueDB.h>
#include <OpenMS/CHEMISTRY/ResidueModification.h>
#include <OpenMS/CHEMISTRY/ModificationsDB.h>
#include <OpenMS/CHEMISTRY/Residue.h>
#include <OpenMS/DATASTRUCTURES/ListUtils.h>
#include <iostream>
using namespace std;
namespace OpenMS
{
ResidueDB::ResidueDB()
{
initResidues_();
}
ResidueDB* ResidueDB::getInstance()
{
static ResidueDB* db_ = new ResidueDB(); // Meyers' singleton -> thread safe
return db_;
}
ResidueDB::~ResidueDB()
{
// free memory
for (auto& r : const_residues_) { delete r; }
for (auto& r : const_modified_residues_) { delete r; }
}
const Residue* ResidueDB::getResidue(const String& name) const
{
if (name.empty())
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "No residue specified.", "");
}
const Residue* r{};
#pragma omp critical (ResidueDB)
{
auto it = residue_names_.find(name);
if (it != residue_names_.end())
{
r = it->second;
}
}
if (r == nullptr)
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Residue not found: ", name);
}
return r;
}
const Residue* ResidueDB::getResidue(const unsigned char& one_letter_code) const
{
//TODO why does this not throw but the String version does??
//no lock required here because read only and array is initialized in thread-safe constructor
return residue_by_one_letter_code_[one_letter_code];
}
Size ResidueDB::getNumberOfResidues() const
{
Size s;
#pragma omp critical (ResidueDB)
{
s = const_residues_.size();
}
return s;
}
Size ResidueDB::getNumberOfModifiedResidues() const
{
Size s;
#pragma omp critical (ResidueDB)
{
s = const_modified_residues_.size();
}
return s;
}
const set<const Residue*> ResidueDB::getResidues(const String& residue_set) const
{
set<const Residue*> s;
#pragma omp critical (ResidueDB)
{
auto it = residues_by_set_.find(residue_set);
if (it != residues_by_set_.end())
{
s = it->second;
}
}
if (s.empty())
{
cout << "Residue set cannot be found: '" + residue_set + "'" << endl;
}
return s;
}
void ResidueDB::initResidues_()
{
buildResidues_();
}
void ResidueDB::addResidue_(Residue* r)
{
if (!r->isModified())
{ // add (unmodified) residue to residue_names, residues, and const_residues
const_residues_.insert(r);
addResidueNames_(r);
}
else
{ // add modified residue to const_modified_residues_, and residue_mod_names_
const_modified_residues_.insert(r);
addModifiedResidueNames_(r);
}
return;
}
bool ResidueDB::hasResidue(const String& res_name) const
{
bool found = false;
#pragma omp critical (ResidueDB)
{
found = residue_names_.find(res_name) != residue_names_.end();
}
return found;
}
bool ResidueDB::hasResidue(const Residue* residue) const
{
bool found = false;
#pragma omp critical (ResidueDB)
{
found = (const_residues_.find(residue) != const_residues_.end() ||
const_modified_residues_.find(residue) != const_modified_residues_.end());
}
return found;
}
void ResidueDB::buildResidues_()
{
Residue* alanine = new Residue("Alanine", "Ala", "A", EmpiricalFormula("C3H7NO2"), 2.35, 9.87, -1.00, 0.00, 881.82, 0.00, set<String>{"L-Alanine", "alanine", "Alanin", "alanin", "ALA"});
insertResidueAndAssociateWithResidueSet_(alanine, {"All","Natural20","Natural19WithoutI","Natural19WithoutL","Natural19J","AmbiguousWithoutX","Ambiguous","AllNatural"});
Residue* cysteine = new Residue("Cysteine", "Cys", "C", EmpiricalFormula("C3H7NO2S"), 1.92, 10.70, 8.18, 0.00, 0.12, 880.99, set<String>{"CYS", "Cystine"});
insertResidueAndAssociateWithResidueSet_(cysteine, {"All","Natural20","Natural19WithoutI","Natural19WithoutL","Natural19J","AmbiguousWithoutX","Ambiguous","AllNatural"});
Residue* aspartate = new Residue("Aspartate", "Asp", "D", EmpiricalFormula("C4H7NO4"), 1.99, 9.90, 3.90, 784.0, 880.02, -0.63, set<String>{"ASP"});
aspartate->addLossName("water");
aspartate->addLossFormula(EmpiricalFormula("H2O"));
insertResidueAndAssociateWithResidueSet_(aspartate, {"All","Natural20","Natural19WithoutI","Natural19WithoutL","Natural19J","AmbiguousWithoutX","Ambiguous","AllNatural"} );
Residue* glutamate = new Residue ( "Glutamate", "Glu", "E", EmpiricalFormula("C5H9NO4"), 2.10, 9.47, 4.07, 790.0, 880.10, -0.39, set<String>{"GLU"});
glutamate->addLossName("water");
glutamate->addLossFormula(EmpiricalFormula("H2O"));
glutamate->addNTermLossName("water");
glutamate->addNTermLossFormula(EmpiricalFormula("H2O"));
insertResidueAndAssociateWithResidueSet_(glutamate, {"All","Natural20","Natural19WithoutI","Natural19WithoutL","Natural19J","AmbiguousWithoutX","Ambiguous","AllNatural"} );
Residue* phenylalanine = new Residue ( "Phenylalanine", "Phe", "F", EmpiricalFormula( "C9H11NO2"), 2.20, 9.31, -1.0, 0.00, 881.08, 0.03, set<String>{"PHE"});
insertResidueAndAssociateWithResidueSet_(phenylalanine, {"All","Natural20","Natural19WithoutI","Natural19WithoutL","Natural19J","AmbiguousWithoutX","Ambiguous","AllNatural"} );
Residue* glycine = new Residue ( "Glycine", "Gly", "G", EmpiricalFormula( "C2H5NO2"), 2.35, 9.78, -1.0, 0.00, 881.17, 0.92, set<String>{"GLY"} );
insertResidueAndAssociateWithResidueSet_(glycine, {"All","Natural20","Natural19WithoutI","Natural19WithoutL","Natural19J","AmbiguousWithoutX","Ambiguous","AllNatural"} );
Residue* histidin = new Residue ( "Histidine", "His", "H", EmpiricalFormula( "C6H9N3O2"), 1.80, 9.33, 6.04, 927.84, 881.27, -0.19, set<String>{"HIS"});
insertResidueAndAssociateWithResidueSet_(histidin, {"All","Natural20","Natural19WithoutI","Natural19WithoutL","Natural19J","AmbiguousWithoutX","Ambiguous","AllNatural"});
Residue* isoleucine = new Residue ( "Isoleucine", "Ile", "I", EmpiricalFormula( "C6H13NO2"), 2.32, 9.76, -1.0, 0.00, 880.99, -1.17, set<String>{"ILE"});
insertResidueAndAssociateWithResidueSet_(isoleucine, {"All","Natural20","Natural19WithoutL","Natural19J","AmbiguousWithoutX","Ambiguous","AllNatural"});
Residue* lysine = new Residue ( "Lysine", "Lys", "K", EmpiricalFormula( "C6H14N2O2"), 2.16, 9.06, 10.54, 926.74, 880.06, -0.71, set<String>{ "LYS"});
lysine->addLossName("ammonia");
lysine->addLossFormula(EmpiricalFormula("NH3"));
insertResidueAndAssociateWithResidueSet_(lysine, {"All","Natural20","Natural19WithoutI","Natural19WithoutL","Natural19J","AmbiguousWithoutX","Ambiguous","AllNatural"} );
Residue* leucine = new Residue ( "Leucine", "Leu", "L", EmpiricalFormula( "C6H13NO2"), 2.33, 9.74, -1.0, 0.00, 881.88, -0.09, set<String>{ "LEU"});
insertResidueAndAssociateWithResidueSet_(leucine, {"All","Natural20","Natural19WithoutI","Natural19J","AmbiguousWithoutX","Ambiguous","AllNatural"} );
Residue* methionine = new Residue ( "Methionine", "Met", "M", EmpiricalFormula( "C5H11NO2S"), 2.13, 9.28, -1.0, 830.0, 881.38, 0.30, set<String>{ "MET"});
insertResidueAndAssociateWithResidueSet_(methionine, {"All","Natural20","Natural19WithoutI","Natural19WithoutL","Natural19J","AmbiguousWithoutX","Ambiguous","AllNatural"} );
Residue* asparagine = new Residue ( "Asparagine", "Asn", "N", EmpiricalFormula( "C4H8N2O3"), 2.14, 8.72, -1.0, 864.94, 881.18, 1.56, set<String>{ "ASN"});
asparagine->addLossName("ammonia");
asparagine->addLossFormula(EmpiricalFormula("NH3"));
insertResidueAndAssociateWithResidueSet_(asparagine, {"All","Natural20","Natural19WithoutI","Natural19WithoutL","Natural19J","AmbiguousWithoutX","Ambiguous","AllNatural"});
Residue* proline = new Residue ( "Proline", "Pro", "P", EmpiricalFormula( "C5H9NO2"), 1.95, 10.64, -1.0, 0.00, 881.25, 11.75, set<String>{ "PRO"});
insertResidueAndAssociateWithResidueSet_(proline, {"All","Natural20","Natural19WithoutI","Natural19WithoutL","Natural19J","AmbiguousWithoutX","Ambiguous","AllNatural"} );
Residue* glutamine = new Residue ( "Glutamine", "Gln", "Q", EmpiricalFormula( "C5H10N2O3"), 2.17, 9.13, -1.0, 865.25, 881.50, 4.10, set<String>{ "GLN"});
glutamine->addLossName("ammonia");
glutamine->addLossFormula(EmpiricalFormula("NH3"));
glutamine->addNTermLossName("water");
glutamine->addNTermLossFormula(EmpiricalFormula("H2O"));
insertResidueAndAssociateWithResidueSet_(glutamine, {"All","Natural20","Natural19WithoutI","Natural19WithoutL","Natural19J","AmbiguousWithoutX","Ambiguous","AllNatural"} );
Residue* arginine = new Residue ( "Arginine", "Arg", "R", EmpiricalFormula( "C6H14N4O2"), 1.82, 8.99, 12.48, 1000.0, 882.98, 6.28, set<String>{ "ARG"});
arginine->addLossName("ammonia");
arginine->addLossFormula(EmpiricalFormula("NH3"));
arginine->addLossName("");
arginine->addLossFormula(EmpiricalFormula("NHCNH"));
arginine->addLossName("");
arginine->addLossFormula(EmpiricalFormula("CONH2"));
insertResidueAndAssociateWithResidueSet_(arginine, {"All","Natural20","Natural19WithoutI","Natural19WithoutL","Natural19J","AmbiguousWithoutX","Ambiguous","AllNatural"} );
Residue* selenocysteine = new Residue ( "Selenocysteine", "Sec", "U", EmpiricalFormula( "C3H7NO2Se"), 0.00, 0.00, 5.73, 0.00, 880.99, 0.12, set<String>{ "SEC"});
insertResidueAndAssociateWithResidueSet_(selenocysteine, {"All","AmbiguousWithoutX","Ambiguous","AllNatural"});
Residue* serine = new Residue ( "Serine", "Ser", "S", EmpiricalFormula( "C3H7NO3"), 2.19, 9.21, -1.0, 775.0, 881.08, 0.98, set<String>{ "SER"});
serine->addLossName("water");
serine->addLossFormula(EmpiricalFormula("H2O"));
insertResidueAndAssociateWithResidueSet_(serine, {"All","Natural20","Natural19WithoutI","Natural19WithoutL","Natural19J","AmbiguousWithoutX","Ambiguous","AllNatural"} );
Residue* threonine = new Residue ( "Threonine", "Thr", "T", EmpiricalFormula( "C4H9NO3"), 2.09, 9.10, -1.0, 780.0, 881.14, 1.21, set<String>{ "THR"});
threonine->addLossName("water");
threonine->addLossFormula(EmpiricalFormula("H2O"));
insertResidueAndAssociateWithResidueSet_(threonine, {"All","Natural20","Natural19WithoutI","Natural19WithoutL","Natural19J","AmbiguousWithoutX","Ambiguous","AllNatural"} );
Residue* valine = new Residue ( "Valine", "Val", "V", EmpiricalFormula( "C5H11NO2"), 2.39, 9.74, -1.0, 0.0, 881.17, -0.90, set<String>{ "VAL"});
insertResidueAndAssociateWithResidueSet_(valine, {"All","Natural20","Natural19WithoutI","Natural19WithoutL","Natural19J","AmbiguousWithoutX","Ambiguous","AllNatural"} );
Residue* tryptophan = new Residue ( "Tryptophan", "Trp", "W", EmpiricalFormula( "C11H12N2O2"), 2.46, 9.41, -1.0, 909.53, 881.31, 0.10, set<String>{ "TRP"});
insertResidueAndAssociateWithResidueSet_(tryptophan, {"All","Natural20","Natural19WithoutI","Natural19WithoutL","Natural19J","AmbiguousWithoutX","Ambiguous","AllNatural"} );
Residue* tyrosine = new Residue ( "Tyrosine", "Tyr", "Y", EmpiricalFormula( "C9H11NO3"), 2.20, 9.21, 10.46, 790.0, 881.20, -0.38, set<String>{ "TYR" });
insertResidueAndAssociateWithResidueSet_(tyrosine, {"All","Natural20","Natural19WithoutI","Natural19WithoutL","Natural19J","AmbiguousWithoutX","Ambiguous","AllNatural"} );
Residue* pyrrolysine = new Residue ( "Pyrrolysine", "Pyr", "O", EmpiricalFormula( "C12H21N3O3"), 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, set<String>{ "PYR"});
insertResidueAndAssociateWithResidueSet_( pyrrolysine, {"All","AmbiguousWithoutX","Ambiguous","AllNatural"});
Residue* asparagine_aspartate = new Residue ( "Asparagine/Aspartate", "Asx", "B", EmpiricalFormula(""), 0.00, 0.00, -1.0, 0.00, 0.00, 0.00, set<String>{ "ASX" });
insertResidueAndAssociateWithResidueSet_( asparagine_aspartate , {"All","AmbiguousWithoutX","Ambiguous"});
Residue* glutamine_glutamate = new Residue ( "Glutamine/Glutamate", "Glx", "Z", EmpiricalFormula(""), 0.00, 0.00, -1.0, 0.00, 0.00, 0.00, set<String>{ "GLX"});
insertResidueAndAssociateWithResidueSet_(glutamine_glutamate, {"All","AmbiguousWithoutX","Ambiguous"} );
Residue* isoleucine_leucine = new Residue ( "Isoleucine/Leucine", "Xle", "J", EmpiricalFormula( "C6H13NO2"), 0.00, 0.00, -1.0, 0.00, 880.99, -1.17, set<String>{ "XLE"});
insertResidueAndAssociateWithResidueSet_( isoleucine_leucine, {"All","AmbiguousWithoutX","Ambiguous"});
Residue* unspecified_unknown = new Residue ( "Unspecified/Unknown", "Xaa", "X", EmpiricalFormula(""), 0.00, 0.00, -1.0, 0.00, 0.00, 0.00, set<String>{ "XAA", "Unk"});
insertResidueAndAssociateWithResidueSet_(unspecified_unknown, {"All","Ambiguous"} );
}
void ResidueDB::insertResidueAndAssociateWithResidueSet_(Residue* res_ptr, const StringList& residue_sets)
{
for (const String& s : residue_sets)
{
res_ptr->addResidueSet(s);
residue_sets_.insert(s);
}
for (const String& s : res_ptr->getResidueSets())
{
residues_by_set_[s].insert(res_ptr);
}
const_residues_.insert(res_ptr);
residue_by_one_letter_code_[static_cast<unsigned char>(res_ptr->getOneLetterCode()[0])] = res_ptr;
addResidueNames_(res_ptr);
}
const set<String> ResidueDB::getResidueSets() const
{
set<String> rs;
#pragma omp critical (ResidueDB)
{
rs = residue_sets_;
}
return rs;
}
void ResidueDB::addModifiedResidueNames_(const Residue* r)
{
// get all modification names
vector<String> mod_names;
const ResidueModification* mod = r->getModification();
if (!mod->getId().empty()) mod_names.push_back(mod->getId()); // for user-defined mods
mod_names.push_back(mod->getFullName());
mod_names.push_back(mod->getFullId());
for (const String& s : mod->getSynonyms())
{
mod_names.push_back(s);
}
vector<String> names;
// add name to lookup
if (!r->getName().empty())
{
names.push_back(r->getName());
}
// add all synonyms to lookup
for (const String & s : r->getSynonyms())
{
names.push_back(s);
}
for (const String& n : names)
{
if (n.empty()) continue;
for (const String& m : mod_names)
{
if (m.empty()) continue;
residue_mod_names_[n][m] = r;
}
}
}
void ResidueDB::addResidueNames_(const Residue* r)
{
// add name to residue_names_
residue_names_[r->getName()] = r;
// add tree letter code to residue_names_
if (!r->getThreeLetterCode().empty())
{
residue_names_[r->getThreeLetterCode()] = r;
}
// add one letter code to residue_names_
if (!r->getOneLetterCode().empty())
{
residue_names_[r->getOneLetterCode()] = r;
}
// add all synonyms to residue_names_
for (const String& s : r->getSynonyms())
{
if (!s.empty())
{
residue_names_[s] = r;
}
}
}
const Residue* ResidueDB::getModifiedResidue(const String& modification)
{
// throws if modification is not part of ModificationsDB
const ResidueModification* mod = ModificationsDB::getInstance()->getModification(modification, "", ResidueModification::ANYWHERE);
auto r = getResidue(mod->getOrigin());
return getModifiedResidue(r, mod->getFullId());
}
const Residue* ResidueDB::getModifiedResidue(const Residue* residue, const String& modification)
{
OPENMS_PRECONDITION(!modification.empty(), "Modification cannot be empty")
// search if the mod already exists
const String & res_name = residue->getName();
Residue* res{};
bool residue_found(true), mod_found(true);
#pragma omp critical (ResidueDB)
{
// Perform a single lookup of the residue name in our database, we assume
// that if it is present in residue_mod_names_ then we have seen it
// before and can directly grab it. If its not present, we may have as
// unmodified residue in residue_names_ but need to create a new entry as
// modified residue. If the residue itself is unknown, we will throw (see
// below).
const auto& rm_entry = residue_mod_names_.find(res_name);
if (rm_entry == residue_mod_names_.end())
{
if (residue_names_.find(res_name) == residue_names_.end())
{
residue_found = false;
}
}
if (residue_found)
{
const ResidueModification* mod{};
try
{
static const ModificationsDB* mdb = ModificationsDB::getInstance();
if (modification.hasSubstring("-term "))
{
// handle terminal modifications of format: "MOD_NAME (Protein {N|C}-term RESIDUE_NAME)"
if (modification.hasSubstring("Protein N-term"))
{
mod = mdb->getModification(modification, residue->getOneLetterCode(), ResidueModification::PROTEIN_N_TERM);
}
else if (modification.hasSubstring("Protein C-term"))
{
mod = mdb->getModification(modification, residue->getOneLetterCode(), ResidueModification::PROTEIN_C_TERM);
}
// handle terminal modifications of format: "MOD_NAME ({N|C}-term RESIDUE_NAME)"
else if (modification.hasSubstring("N-term"))
{
mod = mdb->getModification(modification, residue->getOneLetterCode(), ResidueModification::N_TERM);
}
else if (modification.hasSubstring("C-term"))
{
mod = mdb->getModification(modification, residue->getOneLetterCode(), ResidueModification::C_TERM);
}
}
else
{
mod = mdb->getModification(modification, residue->getOneLetterCode(), ResidueModification::ANYWHERE);
}
}
catch (...)
{
mod_found = false;
}
// check if modification in ResidueDB
if (mod_found)
{
// check if modified residue is already present in ResidueDB
bool found = false;
if (rm_entry != residue_mod_names_.end())
{
const String& id = mod->getId().empty() ? mod->getFullId() : mod->getId();
const auto& inner = rm_entry->second.find(id);
if (inner != rm_entry->second.end())
{
res = const_cast<Residue*>(inner->second);
found = true;
}
}
if (!found)
{
// create and register this modified residue
res = new Residue(*residue_names_.at(res_name));
res->setModification(mod);
addResidue_(res);
}
}
}
}
// throwing (uncaught) exceptions needs to happen outside of critical section (see OpenMP reference manual)
if (!residue_found)
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Residue not found: ", res_name);
}
else if (!mod_found)
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Modification not found: ", modification);
}
return res;
}
const Residue* ResidueDB::getModifiedResidue(const Residue* residue, const ResidueModification* mod)
{
OPENMS_PRECONDITION(mod != nullptr, "Mod cannot be nullptr")
OPENMS_PRECONDITION(mod->getTermSpecificity() == ResidueModification::ANYWHERE, "Mod's term specificity needs to be ANYWHERE to attach it to Residues");
OPENMS_PRECONDITION(mod->getOrigin() == residue->getOneLetterCode()[0], "Mod's AA origin needs to match residues one-letter-code");
// search if the mod already exists
const String & res_name = residue->getName();
Residue* res{};
bool residue_found(true);
#pragma omp critical (ResidueDB)
{
// Perform a single lookup of the residue name in our database, we assume
// that if it is present in residue_mod_names_ then we have seen it
// before and can directly grab it. If its not present, we may have as
// unmodified residue in residue_names_ but need to create a new entry as
// modified residue. If the residue itself is unknown, we will throw (see
// below).
const auto& rm_entry = residue_mod_names_.find(res_name);
if (rm_entry == residue_mod_names_.end())
{
if (residue_names_.find(res_name) == residue_names_.end())
{
residue_found = false;
}
}
if (residue_found)
{
// check if modification in ResidueDB
if (mod != nullptr)
{
// check if modified residue is already present in ResidueDB
bool found = false;
if (rm_entry != residue_mod_names_.end())
{
const String& id = mod->getId().empty() ? mod->getFullId() : mod->getId();
const auto& inner = rm_entry->second.find(id);
if (inner != rm_entry->second.end())
{
res = const_cast<Residue*>(inner->second);
found = true;
}
}
if (!found)
{
// create and register this modified residue
res = new Residue(*residue_names_.at(res_name));
res->setModification(mod);
addResidue_(res);
}
}
}
}
// throwing (uncaught) exceptions needs to happen outside of critical section (see OpenMP reference manual)
if (!residue_found)
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Residue not found: ", res_name);
}
return res;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/EnzymaticDigestion.cpp | .cpp | 17,218 | 512 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow, Xiao Liang $
// $Authors: Marc Sturm, Chris Bielow, Jeremi Maciejewski $
// --------------------------------------------------------------------------
#include <OpenMS/CHEMISTRY/EnzymaticDigestion.h>
#include <OpenMS/CHEMISTRY/ProteaseDB.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/DATASTRUCTURES/StringView.h>
#include <OpenMS/SYSTEM/File.h>
#include <boost/regex.hpp>
using namespace std;
namespace OpenMS
{
const std::string EnzymaticDigestion::NamesOfSpecificity[] = {"none", "semi", "full", "unknown", "unknown", "unknown", "unknown", "unknown", "no-cterm", "no-nterm"};
const std::string EnzymaticDigestion::NoCleavage = "no cleavage";
const std::string EnzymaticDigestion::UnspecificCleavage = "unspecific cleavage";
EnzymaticDigestion::EnzymaticDigestion() :
missed_cleavages_(0),
enzyme_(ProteaseDB::getInstance()->getEnzyme("Trypsin")), // @TODO: keep trypsin as default?
re_(new boost::regex(enzyme_->getRegEx())),
specificity_(SPEC_FULL)
{
}
EnzymaticDigestion::EnzymaticDigestion(const EnzymaticDigestion& rhs) :
missed_cleavages_(rhs.missed_cleavages_),
enzyme_(rhs.enzyme_),
re_(new boost::regex(*rhs.re_)),
specificity_(rhs.specificity_)
{
}
EnzymaticDigestion& EnzymaticDigestion::operator=(const EnzymaticDigestion& rhs)
{
missed_cleavages_ = rhs.missed_cleavages_;
enzyme_ = rhs.enzyme_;
re_.reset(new boost::regex(*rhs.re_));
specificity_ = rhs.specificity_;
return *this;
}
EnzymaticDigestion::~EnzymaticDigestion() = default;
Size EnzymaticDigestion::getMissedCleavages() const
{
return missed_cleavages_;
}
void EnzymaticDigestion::setMissedCleavages(Size missed_cleavages)
{
missed_cleavages_ = missed_cleavages;
}
void EnzymaticDigestion::setEnzyme(const DigestionEnzyme* enzyme)
{
enzyme_ = enzyme;
re_.reset(new boost::regex(enzyme_->getRegEx()));
}
String EnzymaticDigestion::getEnzymeName() const
{
return enzyme_->getName();
}
EnzymaticDigestion::Specificity EnzymaticDigestion::getSpecificityByName(const String& name)
{
for (Size i = 0; i < SIZE_OF_SPECIFICITY; ++i)
{
if (name == NamesOfSpecificity[i])
return Specificity(i);
}
return SPEC_UNKNOWN;
}
EnzymaticDigestion::Specificity EnzymaticDigestion::getSpecificity() const
{
return specificity_;
}
void EnzymaticDigestion::setSpecificity(Specificity spec)
{
specificity_ = spec;
}
std::vector<int> EnzymaticDigestion::tokenize_(const String& sequence, int start, int end) const
{
std::vector<int> positions;
// set proper boundaries
start = std::max(0, start);
if (end < 0 || end > (int)sequence.size())
end = (int)sequence.size();
if (enzyme_->getRegEx() != "()") // if it's not "no cleavage"
{
boost::sregex_token_iterator i(sequence.begin() + start, sequence.begin() + end, *re_, -1);
boost::sregex_token_iterator j;
while (i != j)
{
positions.push_back(start); // first push 'start' (usually 0), then all the real cleavage sites
start += (int)i->length();
++i;
}
}
else
{
positions.push_back(start);
}
return positions;
}
Size EnzymaticDigestion::semiSpecificDigestion_(const std::vector<int>& cleavage_positions, std::vector<std::pair<Size, Size>>& output, Size min_length, Size max_length) const
{
// Too few cleavage sites - should be at least sequence start and end
if (cleavage_positions.size() < 2)
{
String value(cleavage_positions.begin(), cleavage_positions.end());
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
String("Too few cleavage positions - at least sequence start and end positions are required."), value);
}
// cleavage_positions has to be sorted
if (! is_sorted(cleavage_positions.begin(), cleavage_positions.end()))
{
String value(cleavage_positions.begin(), cleavage_positions.end());
throw Exception::Precondition(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
String("Vector of cleavage positions (cleavage_positions) is not sorted, but it should be."));
}
int mc = missed_cleavages_;
Size wrong = 0;
// For every position, add an "extension" of it until next cleavage, in both directions.
// This is done by iterating through sequence from both ends at the same time.
// Warning: cleavage_positions array has to be sorted.
// Warning: this assumes first and last cleavages to be sequence termini.
const int first_c = cleavage_positions.front(); // First cleavage
const int last_c = cleavage_positions.back(); // Last cleavage
const int lgth = last_c - first_c;
// Lambda checking min & max conditions and adding to output
auto variant = [&output, &wrong, min_length, max_length](Size x, Size y)
{
if (min_length <= y - x &&
max_length >= y - x)
{
output.emplace_back(x, y);
} else ++wrong;
};
auto fwd = cleavage_positions.cbegin();
auto bwd = cleavage_positions.crbegin(); // Reverse iterator!
++fwd; ++bwd;
// Iterate from both sides of sequence (by calculating shift from start/end)
for (int shift = 1;shift < lgth;++shift)
{
// Forward position is not a cleavage site
if (first_c+shift != *fwd)
{
// For every cleavage site ahead try to add variant extented until it.
for (int i=0;
i < (mc + 1); // 1 cleavage is always included
++i)
{
variant(first_c + shift, *(fwd+i)); // Extend until current cleavage
if (*(fwd+i) == last_c) break; // This was the last cleavage
}
} else ++fwd; // Prepare for encountering next cleavage site
// Backwards position
if (last_c - shift != *bwd)
{
for (int i=0;
i < (mc + 1);
++i)
{
variant(*(bwd+i), last_c - shift); // Reverse iterator!
if (*(bwd+i) == first_c) break;
}
} else ++bwd;
}
return wrong;
}
Size EnzymaticDigestion::countInternalCleavageSites(const String& sequence) const
{
return tokenize_(sequence).size() - 1;
}
bool EnzymaticDigestion::isValidProduct(const String& sequence, int pos, int length, bool ignore_missed_cleavages) const
{
return isValidProduct_(sequence, pos, length, ignore_missed_cleavages, false, false);
}
bool EnzymaticDigestion::filterByMissedCleavages(const String& sequence, const std::function<bool(Int)>& filter) const
{
const int mc = countInternalCleavageSites(sequence);
return filter(mc);
}
bool EnzymaticDigestion::isValidProduct_(const String& sequence, int pos, int length, bool ignore_missed_cleavages, bool allow_nterm_protein_cleavage, bool allow_random_asp_pro_cleavage) const
{
// for XTandem specific rules (see https://github.com/OpenMS/OpenMS/issues/2497)
// M or MX at the N-terminus might have been cleaved off
if (allow_nterm_protein_cleavage && (pos <= 2) && (sequence[0] == 'M'))
{ // reset the peptide to full length on N-terminus
length += pos;
pos = 0;
}
const int seq_size = (int)sequence.size();
if (pos >= seq_size)
{
OPENMS_LOG_WARN << "Warning: start of fragment (" << pos << ") is beyond end of sequence '" << sequence << "'!" << endl;
return false;
}
if (pos + length > seq_size)
{
OPENMS_LOG_WARN << "Warning: end of fragment (" << (pos + length) << ") is beyond end of sequence '" << sequence << "'!" << endl;
return false;
}
if (length == 0 || sequence.empty())
{
OPENMS_LOG_WARN << "Warning: fragment and sequence must not be empty!" << endl;
return false;
}
// ignore specificity and missed cleavage settings for unspecific cleavage
if (enzyme_->getName() == UnspecificCleavage)
{
return true;
}
const int end = pos + length; // past-the-end index into sequence of last fragment position
if (specificity_ == SPEC_NONE)
{ // we don't care about terminal ends
if (ignore_missed_cleavages)
return true;
// tokenize_ is really slow, so reduce work by working on substring:
const std::vector<int> cleavage_positions = tokenize_(sequence, pos, end); // has 'pos' as first site
return (cleavage_positions.size() - 1) <= missed_cleavages_;
}
if (specificity_ == SPEC_FULL && enzyme_->getName() == NoCleavage && allow_random_asp_pro_cleavage == false)
{ // we want them to be exactly match
return pos == 0 && (int)sequence.size() == end;
}
// either SPEC_SEMI or SPEC_FULL
bool spec_c = false, spec_n = false;
// tokenize_ is really slow, so reduce work by working on substring with +-2 chars margin:
const std::vector<int> cleavage_positions = tokenize_(sequence, pos - 2, end + 2); // has max(0,pos-2) as first site
//
// test each terminal end of the fragment
//
// left end (N-term for peptides):
if (std::find(cleavage_positions.begin(), cleavage_positions.end(), pos) != cleavage_positions.end())
{ // '0' is included in cleavage_positions, so starting fragments will be found as well
spec_n = true;
}
// pos is > 0 at this point, so [pos-1] is valid
else if (allow_random_asp_pro_cleavage && (sequence[pos - 1] == 'D') && (sequence[pos] == 'P'))
{
spec_n = true;
}
// right end (C-term for peptides):
if (end == seq_size)
{ // full length match (end of sequence is not in cleavage_positions)
spec_c = true;
}
else if (std::find(cleavage_positions.rbegin(), cleavage_positions.rend(), end) != cleavage_positions.rend())
{ // use rbegin() since we expect this to be the correct hit
spec_c = true;
}
else if (allow_random_asp_pro_cleavage && (sequence[end - 1] == 'D') && (sequence[end] == 'P'))
{
spec_c = true;
}
if ((spec_n && spec_c) || // full spec
((specificity_ == SPEC_SEMI) && (spec_n || spec_c))) // semi spec
{
if (ignore_missed_cleavages)
{
return true;
}
return countMissedCleavages_(cleavage_positions, pos, end) <= missed_cleavages_;
}
return false;
}
Size EnzymaticDigestion::countMissedCleavages_(const std::vector<int>& cleavage_positions, Size seq_start, Size seq_end) const
{
Size count(0);
for (int pos : cleavage_positions)
{ // count MCs within fragment borders
if (((int)seq_start < pos) && (pos < (int)seq_end))
{
++count;
}
}
return count;
}
Size EnzymaticDigestion::digestAfterTokenize_(const std::vector<int>& fragment_positions, const StringView& sequence, std::vector<std::pair<Size, Size>>& output, Size min_length,
Size max_length) const
{
Size count = fragment_positions.size();
Size wrong_size(0);
Size l(0); // length
// no cleavage sites? return full string
if (count == 0)
{
if (sequence.size() >= min_length && sequence.size() <= max_length)
{
output.emplace_back(0, sequence.size() - 1);
}
return wrong_size;
}
for (Size i = 1; i != count; ++i)
{
// add if cleavage product larger than min length
l = fragment_positions[i] - fragment_positions[i - 1];
if (l >= min_length && l <= max_length)
{
output.emplace_back(fragment_positions[i - 1], l);
}
else
++wrong_size;
}
// add last cleavage product (need to add because end is not a cleavage site) if larger than min length
l = sequence.size() - fragment_positions[count - 1];
if (l >= min_length && l <= max_length)
{
output.emplace_back(fragment_positions[count - 1], l);
}
else
{
++wrong_size;
}
// generate fragments with missed cleavages
for (Size i = 1; ((i <= missed_cleavages_) && (i < count)); ++i)
{
for (Size j = 1; j < count - i; ++j)
{
l = fragment_positions[j + i] - fragment_positions[j - 1];
if (l >= min_length && l <= max_length)
{
output.emplace_back(fragment_positions[j - 1], l);
}
else
++wrong_size;
}
// add last cleavage product (need to add because end is not a cleavage site)
l = sequence.size() - fragment_positions[count - i - 1];
if (l >= min_length && l <= max_length)
{
output.emplace_back(fragment_positions[count - i - 1], l);
}
else
++wrong_size;
}
return wrong_size;
}
Size EnzymaticDigestion::digestAfterTokenize_(const std::vector<int>& fragment_positions, const StringView& sequence, std::vector<StringView>& output, Size min_length, Size max_length) const
{
Size count = fragment_positions.size();
Size wrong_size(0);
// no cleavage sites? return full string
if (count == 0)
{
if (sequence.size() >= min_length && sequence.size() <= max_length)
{
output.push_back(sequence);
}
return wrong_size;
}
for (Size i = 1; i != count; ++i)
{
// add if cleavage product larger than min length
Size l = fragment_positions[i] - fragment_positions[i - 1];
if (l >= min_length && l <= max_length)
{
output.push_back(sequence.substr(fragment_positions[i - 1], l));
}
else
++wrong_size;
}
// add last cleavage product (need to add because end is not a cleavage site) if larger than min length
Size l = sequence.size() - fragment_positions[count - 1];
if (l >= min_length && l <= max_length)
{
output.push_back(sequence.substr(fragment_positions[count - 1], l));
}
else
++wrong_size;
// generate fragments with missed cleavages
for (Size i = 1; ((i <= missed_cleavages_) && (i < count)); ++i)
{
for (Size j = 1; j < count - i; ++j)
{
Size m = fragment_positions[j + i] - fragment_positions[j - 1];
if (m >= min_length && m <= max_length)
{
output.push_back(sequence.substr(fragment_positions[j - 1], m));
}
else
++wrong_size;
}
// add last cleavage product (need to add because end is not a cleavage site)
Size n = sequence.size() - fragment_positions[count - i - 1];
if (n >= min_length && n <= max_length)
{
output.push_back(sequence.substr(fragment_positions[count - i - 1], n));
}
else
{
++wrong_size;
}
}
return wrong_size;
}
Size EnzymaticDigestion::digestUnmodified(const StringView& sequence, std::vector<StringView>& output, Size min_length, Size max_length) const
{
// initialization
output.clear();
// disable max length filter by setting to maximum length
if (max_length == 0 || max_length > sequence.size())
{
max_length = sequence.size();
}
// Unspecific cleavage:
// For unspecific cleavage every site is a cutting position.
// All substrings of length min_size..max_size are generated.
if (enzyme_->getName() == UnspecificCleavage)
{
output.reserve(sequence.size() * (max_length - min_length + 1));
for (Size i = 0; i <= sequence.size() - min_length; ++i)
{
const Size right = std::min(i + max_length, sequence.size());
for (Size j = i + min_length; j <= right; ++j)
{
output.emplace_back(sequence.substr(i, j - i));
}
}
return 0;
}
// naive cleavage sites
std::vector<int> fragment_positions = tokenize_(sequence.getString());
return digestAfterTokenize_(fragment_positions, sequence, output, min_length, max_length);
}
Size EnzymaticDigestion::digestUnmodified(const StringView& sequence, std::vector<std::pair<Size, Size>>& output, Size min_length, Size max_length) const
{
// initialization
output.clear();
// disable max length filter by setting to maximum length
if (max_length == 0 || max_length > sequence.size())
{
max_length = sequence.size();
}
// Unspecific cleavage:
// For unspecific cleavage every site is a cutting position.
// All substrings of length min_size..max_size are generated.
if (enzyme_->getName() == UnspecificCleavage)
{
output.reserve(sequence.size() * (max_length - min_length + 1));
for (Size i = 0; i <= sequence.size() - min_length; ++i)
{
const Size right = std::min(i + max_length, sequence.size());
for (Size j = i + min_length; j <= right; ++j)
{
output.emplace_back(i, j - i);
}
}
return 0;
}
// naive cleavage sites
std::vector<int> fragment_positions = tokenize_(sequence.getString());
return digestAfterTokenize_(fragment_positions, sequence, output, min_length, max_length);
}
} // namespace OpenMS
| C++ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.