keyword stringclasses 7 values | repo_name stringlengths 8 98 | file_path stringlengths 4 244 | file_extension stringclasses 29 values | file_size int64 0 84.1M | line_count int64 0 1.6M | content stringlengths 1 84.1M ⌀ | language stringclasses 14 values |
|---|---|---|---|---|---|---|---|
3D | OpenMS/OpenMS | doc/code_examples/ExamplePlugin.py | .py | 2,277 | 61 | #!/usr/bin/env python
import argparse
from pyopenms import Param, ParamXMLFile, MSExperiment, MzMLFile
'''
The tool needs basic parameter parsing. The following parameters are required to be supported:
-write_ini <filename> : create the ini file for the tool
-ini <filename> : the ini file to load
-in <filename> : the input file
-out <filename> : the output file
-no_progress : flag to disable output to command line
'''
def main():
parser = argparse.ArgumentParser(description="Test tool for prototyping TOPPView-plugins")
parser.add_argument("-write_ini", help="Writes ini to specified path and exits.")
parser.add_argument("-ini", help="The ini file to load")
parser.add_argument("-in", help="The input file")
parser.add_argument("-out", help="The output file")
parser.add_argument("-no_progress", action="store_false", help="Turn of output to the command line")
args = vars(parser.parse_args())
ini_path = args["write_ini"]
# create ini at path
if ini_path is not None:
# create the default parameters
param = Param()
# this will create the param structure that is mandatory for all plugins
param.initPluginParam("ExamplePlugin", "0.0.1")
# the valid input fileformats have to be added like this
param.setValidStrings("ExamplePlugin:1:in", [b"*.mzML"])
# additional parameters can be added like this
param.setValue("ExamplePlugin:1:Number", 0, "This is an additional numeric parameter")
param.setValue("ExamplePlugin:1:Text", "example 1", "This is an additional text parameter")
param.setValidStrings("ExamplePlugin:1:Text", [b"option 1", b"option 2", b"option 3"])
param.setValue("ExamplePlugin:1:Required", "", "This is an additional required parameter", [b"required"])
# write them to the given filepath
file = ParamXMLFile()
file.store(ini_path, param)
exit()
# this is an example for loading and saving an experiment from a mzML file
MzML = MzMLFile()
exp = MSExperiment()
input_file = args["in"]
output_file = args["out"]
MzML.load(input_file, exp)
# we save it here without doing anything
MzML.store(output_file, exp)
if __name__ == "__main__":
main()
| Python |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_FileIO_mzML.cpp | .cpp | 1,097 | 35 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
#include <OpenMS/FORMAT/IndexedMzMLFileLoader.h>
#include <OpenMS/KERNEL/OnDiscMSExperiment.h>
#include <OpenMS/SYSTEM/File.h>
#include <OpenMS/openms_data_path.h> // exotic header for path to tutorial data
#include <iostream>
using namespace OpenMS;
using namespace std;
int main(int argc, const char** argv)
{
auto file_mzXML = OPENMS_DOC_PATH + String("/code_examples/data/Tutorial_FileIO_indexed.mzML");
IndexedMzMLFileLoader imzml;
// load data from an indexed MzML file
OnDiscPeakMap map;
imzml.load(file_mzXML, map);
// get the first spectrum in memory, do some constant (non-changing) data processing
MSSpectrum s = map.getSpectrum(0);
std::cout << "There are " << map.getNrSpectra() << " spectra in the input file." << std::endl;
std::cout << "The first spectrum has " << s.size() << " peaks." << std::endl;
// store the (unmodified) data in a different file
imzml.store("Tutorial_FileIO_output.mzML", map);
} // end of main
| C++ |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_Residue.cpp | .cpp | 1,305 | 48 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
//! [doxygen_snippet_Residue]
#include <OpenMS/CHEMISTRY/ResidueDB.h>
#include <OpenMS/CHEMISTRY/Residue.h>
#include <OpenMS/CHEMISTRY/AASequence.h>
#include <iostream>
using namespace OpenMS;
using namespace std;
int main()
{
// get ResidueDB singleton
ResidueDB const * res_db = ResidueDB::getInstance();
// query Lysine
Residue const * lys = res_db->getResidue("Lysine");
cout << lys->getName() << " "
<< lys->getThreeLetterCode() << " "
<< lys->getOneLetterCode() << " "
<< lys->getFormula().toString() << " "
<< lys->getAverageWeight() << " "
<< lys->getMonoWeight() << endl;
// one letter code query of Arginine
Residue const * arg = res_db->getResidue('R');
cout << arg->getName() << " "
<< arg->getFormula().toString() << " "
<< arg->getMonoWeight() << endl;
// construct a AASequence object, query a residue
// and output some of its properties
AASequence aas = AASequence::fromString("DEFIANGER");
cout << aas[3].getName() << " "
<< aas[3].getFormula().toString() << " "
<< aas[3].getMonoWeight() << endl;
return 0;
} //end of main
//! [doxygen_snippet_Residue]
| C++ |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_Clustering.cpp | .cpp | 2,578 | 76 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
#include <OpenMS/ML/CLUSTERING/ClusterAnalyzer.h>
#include <OpenMS/ML/CLUSTERING/ClusterHierarchical.h>
#include <OpenMS/ML/CLUSTERING/CompleteLinkage.h>
#include <OpenMS/ML/CLUSTERING/SingleLinkage.h>
#include <OpenMS/CONCEPT/Exception.h>
#include <algorithm>
#include <iostream>
#include <random>
#include <vector>
using namespace OpenMS;
using namespace std;
/// A functor, which provides a similarity value for two entities (here: doubles), in range [0, 1)
class LowLevelComparator
{
public:
double operator()(const double first, const double second) const
{
// we just use a linear distance between them, i.e. the closer the values, the more similar they are
auto distance = std::fabs(first - second);
if (distance > 1) { throw Exception::InvalidRange(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION); }
return 1 - distance;
}
}; // end of LowLevelComparator
Int main()
{
// data
vector<double> data;
#if 1 // manual data
data = {0.01, 0.02, 0.7, 0.3, 0.31};
#else // random data
const auto N = 5;
std::mt19937 rng; // default constructed, seeded with fixed seed
std::uniform_real_distribution<> dis(0.0, 1.0); // uniform values between [0, 1)
std::generate_n(back_inserter(data), N, [&]() { return dis(rng); });
#endif
// print raw data to console
std::cout << "raw data: ";
for_each(data.begin(), data.end(), [](auto elem) { std::cout << elem << ' '; });
std::cout << '\n';
// determines the distance between two data points
LowLevelComparator llc;
SingleLinkage sl;
// or try:
//CompleteLinkage sl;
vector<BinaryTreeNode> tree;
DistanceMatrix<float> dist; // will be filled
ClusterHierarchical ch;
ch.setThreshold(1); // maximal distance between clusters; default threshold = 1, i.e. full clustering
// note: not all methods support a threshold, e.g. SingleLinkage requires t = 1.
// do clustering.
// Note: There are other overloads of this function for clustering spectra
ch.cluster<double, LowLevelComparator>(data, llc, sl, tree, dist);
// depending on the cluster method, the distance matrix may have shrunken, e.g. for complete linkage to the point where clustering was stopped
std::cout << "distance matrix:\n" << dist << "\n\n";
ClusterAnalyzer ca;
std::cout << "binary tree in Newick format (numbers are indices into the data)";
std::cout << ca.newickTree(tree) << std::endl;
return 0;
} // end of main
| C++ |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_AASequence.cpp | .cpp | 2,340 | 58 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
//! [doxygen_snippet_AASequence]
// This script calculates the mass-to-charge ratio of a 2+ charged b-ion and full peptide from a hardcoded sequence.
#include <OpenMS/CHEMISTRY/AASequence.h>
#include <iostream>
using namespace OpenMS;
using namespace std;
int main()
{
// generate AASequence object from String
const String s = "DEFIANGER";
AASequence peptide1 = AASequence::fromString(s);
// ... or generate AASequence object from string literal
AASequence peptide2 = AASequence::fromString("PEPTIDER");
// extract prefix and suffix of the first/last AA residues
AASequence prefix(peptide1.getPrefix(2)); // "PE"
AASequence suffix(peptide1.getSuffix(3)); // "DER"
cout << peptide1.toString() << " " << prefix << " " << suffix << endl;
// create chemically modified peptide
AASequence peptide_meth_ox = AASequence::fromString("PEPTIDESEKUEM(Oxidation)CER");
cout << peptide_meth_ox.toString() << " --> unmodified: " << peptide_meth_ox.toUnmodifiedString() << endl;
// mass of the full, uncharged peptide
double peptide_mass_mono = peptide_meth_ox.getMonoWeight();
cout << "Monoisotopic mass of the uncharged, full peptide: " << peptide_mass_mono << endl;
double peptide_mass_avg = peptide_meth_ox.getAverageWeight();
cout << "Average mass of the uncharged, full peptide: " << peptide_mass_avg << endl;
// mass of the 2+ charged b-ion with the given sequence
double ion_mass_b3_2plus = peptide_meth_ox.getPrefix(3).getMonoWeight(Residue::BIon, 2);
cout << "Mass of the doubly positively charged b3-ion: " << ion_mass_b3_2plus << endl;
// mass-to-charge ratio (m/z) of the 2+ charged b-ion and full peptide with the given sequence
cout << "Mass-to-charge of the doubly positively charged b3-ion: " << peptide_meth_ox.getPrefix(3).getMZ(2, Residue::BIon) << endl;
cout << "Mass-to-charge of the doubly positively charged peptide: " << peptide_meth_ox.getMZ(2) << endl;
// count AA's to get a frequency table
std::map<String, Size> aa_freq;
peptide_meth_ox.getAAFrequencies(aa_freq);
cout << "Number of Proline (P) residues in '" << peptide_meth_ox.toString() << "' is " << aa_freq['P'] << endl;
return 0;
}
//! [doxygen_snippet_AASequence]
| C++ |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_MSChromatogram.cpp | .cpp | 864 | 36 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//! [doxygen_snippet_MSChromatogram]
#include <OpenMS/KERNEL/ChromatogramPeak.h>
#include <OpenMS/KERNEL/MSChromatogram.h>
#include <OpenMS/METADATA/ChromatogramSettings.h>
using namespace OpenMS;
using namespace std;
int main()
{
// create a chromatogram
MSChromatogram chromatogram;
// fill it with metadata information
chromatogram.setNativeID("transition_300.9_188.0");
chromatogram.getProduct().setMZ(188.0);
chromatogram.getPrecursor().setMZ(300.9);
// fill chromatogram with peaks
ChromatogramPeak peak;
peak.setIntensity(1.0);
for (float rt = 200.0; rt >= 100; rt -= 1.0)
{
peak.setRT(rt);
chromatogram.push_back(peak);
}
return 0;
} // end of main
//! [doxygen_snippet_MSChromatogram]
| C++ |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_TOPP.cpp | .cpp | 5,857 | 176 | //! [doxygen_snippet_TOPPexample]
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
// --------------------------------------------------------------------------
// $Maintainer: Oliver Alka $
// $Authors: Oliver Alka $
// This file is ONLY used for code snippets in the developer tutorial
// --------------------------------------------------------------------------
//! [doxygen_snippet_Includes]
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/FORMAT/FASTAFile.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/FORMAT/FileTypes.h>
#include <OpenMS/METADATA/PeptideIdentification.h>
//! [doxygen_snippet_Includes]
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
// Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_DatabaseFilter DatabaseFilter
@brief The DatabaseFilter tool filters a protein database in fasta format according to one or multiple filtering criteria.
The resulting database is written as output. Depending on the reporting method (method="whitelist" or "blacklist") only entries are retained that
passed all filters ("whitelist) or failed at least one filter ("blacklist").
Implemented filter criteria:
ID: Filter database according to the set of proteinIDs contained in an identification file (idXML, mzIdentML)
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_DatabaseFilter.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_DatabaseFilter.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPDatabaseFilter : public TOPPBase
{
public:
TOPPDatabaseFilter():
TOPPBase("DatabaseFilter", "Filters a protein database (FASTA format) based on identified proteins", false) // false: mark as unofficial tool
{
}
protected:
//! [doxygen_snippet_Register]
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "Input FASTA file, containing a protein database.");
setValidFormats_("in", {"fasta"});
registerInputFile_("id", "<file>", "", "Input file containing identified peptides and proteins.");
setValidFormats_("id", {"idXML", "mzid"});
registerStringOption_("method", "<choice>", "whitelist", "Switch between white-/blacklisting of protein IDs", false);
setValidStrings_("method", {"whitelist", "blacklist"});
registerOutputFile_("out", "<file>", "", "Output FASTA file where the reduced database will be written to.");
setValidFormats_("out", {"fasta"});
}
//! [doxygen_snippet_Register]
//! [doxygen_snippet_Functionality_1]
void filterByProteinAccessions_(const vector<FASTAFile::FASTAEntry>& db,
const PeptideIdentificationList& peptide_identifications,
bool whitelist,
vector<FASTAFile::FASTAEntry>& db_new)
{
set<String> id_accessions;
for (const auto& pep_id : peptide_identifications)
{
for (const auto& hit : pep_id.getHits())
{
for (const auto& ev : hit.getPeptideEvidences())
{
const String& id_accession = ev.getProteinAccession();
id_accessions.insert(id_accession);
}
}
}
//! [doxygen_snippet_Functionality_1]
OPENMS_LOG_INFO << "Number of Protein IDs: " << id_accessions.size() << endl;
//! [doxygen_snippet_Functionality_2]
for (const auto entry : db)
{
const String& fasta_accession = entry.identifier;
const bool found = id_accessions.find(fasta_accession) != id_accessions.end();
if ((found && whitelist) || (! found && ! whitelist)) // either found in the whitelist or not found in the blacklist
{
db_new.push_back(entry);
}
}
//! [doxygen_snippet_Functionality_2]
}
ExitCodes main_(int, const char**) override
{
//! [doxygen_snippet_InputParam]
//-------------------------------------------------------------
// parsing parameters
//-------------------------------------------------------------
String in(getStringOption_("in"));
String ids(getStringOption_("id"));
String method(getStringOption_("method"));
bool whitelist = (method == "whitelist");
String out(getStringOption_("out"));
//! [doxygen_snippet_InputParam]
//-------------------------------------------------------------
// reading input
//-------------------------------------------------------------
//! [doxygen_snippet_InputRead]
vector<FASTAFile::FASTAEntry> db;
FASTAFile().load(in, db);
//! [doxygen_snippet_InputRead]
vector<ProteinIdentification> protein_identifications;
PeptideIdentificationList peptide_identifications;
FileHandler().loadIdentifications(ids, protein_identifications, peptide_identifications);
OPENMS_LOG_INFO << "Identifications: " << ids.size() << endl;
// run filter
vector<FASTAFile::FASTAEntry> db_new;
filterByProteinAccessions_(db, peptide_identifications, whitelist, db_new);
//-------------------------------------------------------------
// writing output
//-------------------------------------------------------------
OPENMS_LOG_INFO << "Database entries (before / after): " << db.size() << " / " << db_new.size() << endl;
//! [doxygen_snippet_output]
FASTAFile().store(out, db_new);
//! [doxygen_snippet_output]
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPDatabaseFilter tool;
OPENMS_LOG_FATAL_ERROR << "THIS IS TEST CODE AND SHOULD NEVER BE RUN OUTSIDE OF TESTING" << endl;
tool.main(argc, argv);
return 0;
}
/// @endcond
//! [doxygen_snippet_TOPPexample]
| C++ |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_TheoreticalSpectrumGenerator.cpp | .cpp | 1,780 | 57 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
//! [doxygen_snippet_TSG]
#include <OpenMS/CHEMISTRY/TheoreticalSpectrumGenerator.h>
#include <OpenMS/CHEMISTRY/AASequence.h>
#include <OpenMS/KERNEL/MSSpectrum.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <iostream>
using namespace OpenMS;
using namespace std;
int main()
{
// initialize a TheoreticalSpectrumGenerator
TheoreticalSpectrumGenerator tsg;
// get current parameters
// in this case default parameters, since we have not changed any yet
Param tsg_settings = tsg.getParameters();
// with default parameters, only b- and y-ions are generated,
// so we will add a-ions
tsg_settings.setValue("add_a_ions", "true");
// store ion types for each peak
tsg_settings.setValue("add_metainfo", "true");
// set the changed parameters for the TSG
tsg.setParameters(tsg_settings);
PeakSpectrum theoretical_spectrum;
// initialize peptide to be fragmented
AASequence peptide = AASequence::fromString("DEFIANGER");
// generate a-, b- and y- ion spectrum of the peptide
// with all fragment charges from 1 to 2
tsg.getSpectrum(theoretical_spectrum, peptide, 1, 2);
// output of masses and meta information (ion-types) of some peaks
const PeakSpectrum::StringDataArray& ion_types = theoretical_spectrum.getStringDataArrays().at(0);
cout << "Mass of second peak: " << theoretical_spectrum[1].getMZ()
<< " | Ion type of second peak: " << ion_types[1] << endl;
cout << "Mass of tenth peak: " << theoretical_spectrum[9].getMZ()
<< " | Ion type of tenth peak: " << ion_types[9] << endl;
return 0;
} //end of main
//! [doxygen_snippet_TSG]
| C++ |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_FileIO_Consumer.cpp | .cpp | 1,663 | 49 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
#include <OpenMS/FORMAT/MzMLFile.h>
#include <OpenMS/FORMAT/DATAACCESS/MSDataWritingConsumer.h>
#include <OpenMS/SYSTEM/File.h>
#include <OpenMS/openms_data_path.h> // exotic header for path to tutorial data
#include <iostream>
using namespace OpenMS;
using namespace std;
class TICWritingConsumer : public MSDataWritingConsumer
{
// Inheriting from MSDataWritingConsumer allows to change the data before
// they are written to disk (to "filename") using the processSpectrum_ and
// processChromatogram_ functions.
public:
double TIC {};
int nr_spectra {};
// Create new consumer
TICWritingConsumer(const String& filename) : MSDataWritingConsumer(filename)
{}
// Add a data processing step for spectra before they are written to disk
void processSpectrum_(MSDataWritingConsumer::SpectrumType & s) override
{
for (const auto& p : s) TIC += p.getIntensity();
nr_spectra++;
}
// Empty chromatogram data processing
void processChromatogram_(MSDataWritingConsumer::ChromatogramType& /* c */) override {}
};
int main(int argc, const char** argv)
{
auto file_mzXML = OPENMS_DOC_PATH + String("/code_examples/data/Tutorial_FileIO_indexed.mzML");
// Create the consumer, set output file name, transform
TICWritingConsumer consumer("Tutorial_FileIO_output.mzML");
MzMLFile().transform(file_mzXML, &consumer);
std::cout << "There are " << consumer.nr_spectra << " spectra in the input file.\n";
std::cout << "The total ion current is " << consumer.TIC << std::endl;
} //end of main
| C++ |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_MorphologicalFilter.cpp | .cpp | 934 | 34 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
#include <OpenMS/PROCESSING/BASELINE/MorphologicalFilter.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/openms_data_path.h> // exotic header for path to tutorial data
#include <iostream>
using namespace OpenMS;
using namespace std;
int main(int argc, const char** argv)
{
auto tutorial_data_path = OPENMS_DOC_PATH + String("/code_examples/");
PeakMap exp;
FileHandler().loadExperiment(tutorial_data_path + "/data/Tutorial_MorphologicalFilter.mzML", exp);
Param parameters;
parameters.setValue("struc_elem_length", 1.0);
parameters.setValue("struc_elem_unit", "Thomson");
parameters.setValue("method", "tophat");
MorphologicalFilter mf;
mf.setParameters(parameters);
mf.filterExperiment(exp);
return 0;
} // end of main
| C++ |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_Element.cpp | .cpp | 2,385 | 64 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
//! [doxygen_snippet_Element]
#include <OpenMS/CHEMISTRY/Element.h>
#include <OpenMS/CHEMISTRY/ElementDB.h>
#include <OpenMS/DATASTRUCTURES/StringListUtils.h>
#include <iostream>
#include <iomanip>
using namespace OpenMS;
using namespace std;
Int main()
{
const ElementDB& db = *ElementDB::getInstance();
// extract carbon element from ElementDB
// .getResidue("C") would work as well
const Element& carbon = *db.getElement("Carbon");
// output name, symbol, monoisotopic weight and average weight
cout << carbon.getName() << " " << carbon.getSymbol() << " " << carbon.getMonoWeight() << " " << carbon.getAverageWeight() << endl;
if (db.hasElement("foo")) { std::cout << "worth a try..."; }
// get all elements currently known; you can also get them by atomic number or symbols:
const auto all_elements_name = db.getNames();
const auto all_elements_AN = db.getAtomicNumbers();
const auto all_elements_symbols = db.getSymbols();
std::cout << "We currently know of: " << all_elements_name.size() << " elements (incl. isotopes)\n"
<< " with: " << all_elements_AN.size() << " different atomic numbers (linking to the monoisotopic isotope)\n"
<< " and: " << all_elements_symbols.size() << " different symbols\n\n";
std::cout << "\nLet's find all hydrogen isotopes:\n";
for (const auto e : all_elements_name)
{
// all hydrogens have AN == 1
if (e.second->getAtomicNumber() == 1)
{
std::cout << " --> " << std::setw(30) << e.first
<< " Symbol: " << std::setw(5) << e.second->getSymbol()
<< " AN: " << std::setw(3) << e.second->getAtomicNumber()
<< " mono-weight: " << std::setw(14)<< e.second->getMonoWeight() << "\n";
}
}
std::cout << "\nLets print all monoisotopic elements:\n";
for (const auto e : all_elements_AN)
{
std::cout << std::setw(30) << e.first
<< " Symbol: " << std::setw(5) << e.second->getSymbol()
<< " AN: " << std::setw(3) << e.second->getAtomicNumber()
<< " mono-weight: " << std::setw(14)<< e.second->getMonoWeight() << "\n";
}
} // end of main
//! [doxygen_snippet_Element]
| C++ |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_MSSpectrum.cpp | .cpp | 1,121 | 44 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Petra Gutenbrunner $
// $Authors: Petra Gutenbrunner $
// --------------------------------------------------------------------------
//! [doxygen_snippet_MSSpectrum]
#include <OpenMS/KERNEL/MSSpectrum.h>
using namespace OpenMS;
using namespace std;
int main()
{
// Create spectrum
MSSpectrum spectrum;
Peak1D peak;
for (float mz = 1500.0; mz >= 500; mz -= 100.0)
{
peak.setMZ(mz);
spectrum.push_back(peak);
}
// Sort the peaks according to ascending mass-to-charge ratio
spectrum.sortByPosition();
// Iterate over spectrum of those peaks between 800 and 1000 Thomson
for (auto it = spectrum.MZBegin(800.0); it != spectrum.MZEnd(1000.0); ++it)
{
cout << it->getMZ() << endl;
}
// Access a peak by index
cout << spectrum[1].getMZ() << " " << spectrum[1].getIntensity() << endl;
// ... and many more
return 0;
}
//! [doxygen_snippet_MSSpectrum]
| C++ |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_GaussFilter.cpp | .cpp | 861 | 33 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
#include <OpenMS/PROCESSING/SMOOTHING/GaussFilter.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/SYSTEM/File.h>
#include <OpenMS/openms_data_path.h> // exotic header for path to tutorial data
#include <iostream>
using namespace OpenMS;
using namespace std;
int main(int argc, const char** argv)
{
auto file_gauss = OPENMS_DOC_PATH + String("/code_examples/data/Tutorial_GaussFilter.mzML");
PeakMap exp;
FileHandler().loadExperiment(file_gauss, exp, {FileTypes::MZML});
GaussFilter g;
Param param;
param.setValue("gaussian_width", 1.0);
g.setParameters(param);
g.filterExperiment(exp);
return 0;
} //end of main
| C++ |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_FeatureFinder.cpp | .cpp | 601 | 25 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
#include <OpenMS/FEATUREFINDER/FeatureFinderAlgorithmPicked.h>
using namespace OpenMS;
using namespace std;
Int main()
{
FeatureFinderAlgorithmPicked ff;
// ... set parameters (e.g. from INI file)
Param parameters;
// ... set input data (e.g. from mzML file)
PeakMap input;
// ... set output data structure
FeatureMap output;
// ... set user-specified seeds, if needed
FeatureMap seeds;
ff.run(input, output, parameters, seeds);
} //end of main
| C++ |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_DRange.cpp | .cpp | 1,475 | 39 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
#include <OpenMS/DATASTRUCTURES/DRange.h>
#include <iostream>
using namespace OpenMS;
Int main()
{
// A D-dimensional range, without units;
// Note: if you want something more modern with dimensions for RT, m/z, intensity and mobility, then use RangeManager.
//
// You can use any dimension you like; for D=2 and D=3 there are some convenience overloads though, especially for C'tors
// a 2-dimensional, i.e. [x_min..x_max, y_min..y_max], range
DRange<2> range;
range.setMin(DPosition<2>(2.0, 3.0)); // for (x_min, y_min)
range.setMax(DPosition<2>(4.0, 5.0)); // for (x_max, y_max)
std::cout << "values:\n" << range; // prints [2..4, 3..5]
// Note: the class maintains the invariant min<=max for each dimension
// Thus, setting a 'min' which is larger than the current 'max', also adjusts 'max' to the same value
range.setMin(DPosition<2>(10.0, 2.0)); // for (x_max, y_max)
std::cout << "\nadjusted max:\n" << range; // prints [10..10, 2..5]
// you can also set each dimension's min/max: 0 = X, 1 = Y
range.setDimMinMax(0, {0.6, 6.6});
std::cout << "\nnew X range:\n" << range;
// print values using a custom format
for (UInt i = 0; i < range.DIMENSION; ++i)
{
std::cout << "DIM " << i << ": " << range.minPosition()[i] << " ... " << range.maxPosition()[i] << '\n';
}
} //end of main
| C++ |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_RangeManager.cpp | .cpp | 809 | 34 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
#include <OpenMS/KERNEL/FeatureMap.h>
#include <iostream>
using namespace OpenMS;
using namespace std;
Int main()
{
FeatureMap map;
Feature feature;
feature.setIntensity(461.3f);
feature.setRT(15.0);
feature.setMZ(571.3);
map.push_back(feature);
feature.setIntensity(12213.5f);
feature.setRT(23.3);
feature.setMZ(1311.3);
map.push_back(feature);
//calculate the ranges
map.updateRanges();
cout << "Int: " << map.getMinIntensity() << " - " << map.getMaxIntensity() << endl;
cout << "RT: " << map.getMinRT() << " - " << map.getMaxRT() << endl;
cout << "m/z: " << map.getMinMZ() << " - " << map.getMaxMZ() << endl;
return 0;
} //end of main
| C++ |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_GUI_ListEditor.cpp | .cpp | 357 | 18 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
#include <QApplication>
#include <OpenMS/VISUAL/ListEditor.h>
using namespace OpenMS;
int main(int argc, char ** argv)
{
QApplication app(argc, argv);
ListEditor listeditor;
listeditor.show();
return app.exec();
}
| C++ |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_Labeled.cpp | .cpp | 1,216 | 34 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
#include <OpenMS/CONCEPT/Types.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/FeatureGroupingAlgorithmLabeled.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/openms_data_path.h> // exotic header for path to tutorial data
using namespace OpenMS;
using namespace std;
int main(int argc, const char** argv)
{
vector<FeatureMap> maps(1, FeatureMap{});
FileHandler().loadFeatures(OPENMS_DOC_PATH + String("/code_examples/data/Tutorial_Labeled.featureXML"), maps[0], {FileTypes::FEATUREXML});
ConsensusMap out;
out.getColumnHeaders()[0].filename = "data/Tutorial_Labeled.mzML";
out.getColumnHeaders()[0].size = maps[0].size();
out.getColumnHeaders()[0].label = "light";
out.getColumnHeaders()[1].filename = "data/Tutorial_Labeled.mzML";
out.getColumnHeaders()[1].size = maps[0].size();
out.getColumnHeaders()[1].label = "heavy";
FeatureGroupingAlgorithmLabeled algorithm;
// ... set parameters
algorithm.group(maps, out);
FileHandler().storeConsensusFeatures("Tutorial_Labeled.consensusXML", out, {FileTypes::CONSENSUSXML});
return 0;
} //end of main
| C++ |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_Enzyme.cpp | .cpp | 1,742 | 62 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
//! [doxygen_snippet_Enzyme]
#include <OpenMS/CHEMISTRY/AASequence.h>
#include <OpenMS/CHEMISTRY/ProteaseDigestion.h>
#include <vector>
#include <iostream>
using namespace OpenMS;
using namespace std;
int main()
{
ProteaseDigestion protease;
// in this example, we don't produce peptides with missed cleavages
protease.setMissedCleavages(0);
// output the number of tryptic peptides (no cut before proline)
protease.setEnzyme("Trypsin");
cout << protease.peptideCount(AASequence::fromString("ACKPDE")) << " "
<< protease.peptideCount(AASequence::fromString("ACRPDEKA"))
<< endl;
// digest C-terminally amidated peptide
vector<AASequence> products;
auto aa_seq = AASequence::fromString("ARCDRE.(Amidated)");
protease.digest(aa_seq, products);
// output digestion products
std::cout << "digesting " << aa_seq.toString() << " into:\n";
for (const AASequence& p : products)
{
cout << "--> " << p.toString() << "\n";
}
cout << endl;
// allow many miss-cleavages
protease.setMissedCleavages(10);
protease.digest(aa_seq, products);
// output digestion products
std::cout << "digesting " << aa_seq.toString() << " with 10 MCs into:\n";
for (const AASequence& p : products)
{
cout << "--> " << p.toString() << "\n";
}
cout << endl;
// verify an infix of a protein is a digestion product:
String peptide = "FFFRAAA";
cout << "Is '" << peptide.prefix(4) << "' a valid digestion product of '" << peptide << "'? "
<< std::boolalpha << protease.isValidProduct(peptide, 0, 4); // yes it is!
}
//! [doxygen_snippet_Enzyme]
| C++ |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_EmpiricalFormula.cpp | .cpp | 1,246 | 42 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
//! [doxygen_snippet_EmpiricalFormula]
#include <OpenMS/CHEMISTRY/EmpiricalFormula.h>
#include <OpenMS/CHEMISTRY/ElementDB.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/CoarseIsotopePatternGenerator.h>
#include <iostream>
using namespace OpenMS;
using namespace std;
Int main()
{
EmpiricalFormula methanol("CH3OH"), water("H2O");
// sum up empirical formulae
EmpiricalFormula sum = methanol + water;
// get element from ElementDB
const Element * carbon = ElementDB::getInstance()->getElement("Carbon");
// output number of carbon atoms and average weight
cout << "Formula: " << sum
<< "\n average weight: " << sum.getAverageWeight()
<< "\n # of Carbons: " << sum.getNumberOf(carbon);
// extract the isotope distribution
IsotopeDistribution iso_dist = sum.getIsotopeDistribution(CoarseIsotopePatternGenerator(3));
std::cout << "\n\nCoarse isotope distribution of " << sum << ": \n";
for (const auto& it : iso_dist)
{
cout << "m/z: " << it.getMZ() << " abundance: " << it.getIntensity() << endl;
}
} //end of main
//! [doxygen_snippet_EmpiricalFormula]
| C++ |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_IdentificationClasses.cpp | .cpp | 4,285 | 128 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Petra Gutenbrunner $
// $Authors: Petra Gutenbrunner $
// --------------------------------------------------------------------------
//! [doxygen_snippet_Identification]
#include <OpenMS/METADATA/PeptideIdentification.h>
#include <OpenMS/METADATA/ProteinIdentification.h>
#include <OpenMS/METADATA/PeptideHit.h>
#include <OpenMS/DATASTRUCTURES/String.h>
#include <OpenMS/CHEMISTRY/AASequence.h>
#include <OpenMS/FORMAT/IdXMLFile.h>
#include <iostream>
using namespace OpenMS;
using namespace std;
int main()
{
// Create new protein identification object corresponding to a single search
// Each ProteinIdentification object stores a vector of protein hits
vector<ProteinHit> protein_hits;
ProteinHit protein_hit;
protein_hit.setAccession("MyAccession");
protein_hit.setSequence("PEPTIDEPEPTIDEPEPTIDEPEPTIDER");
protein_hit.setScore(1.0);
protein_hits.push_back(protein_hit);
ProteinIdentification protein_id;
protein_id.setIdentifier("Identifier");
protein_id.setHits(protein_hits);
DateTime now = DateTime::now();
String date_string = now.getDate();
protein_id.setDateTime(now);
// Example of possible search parameters
ProteinIdentification::SearchParameters search_parameters;
search_parameters.db = "database";
search_parameters.charges = "+2";
protein_id.setSearchParameters(search_parameters);
// Some search engine meta data
protein_id.setSearchEngineVersion("v1.0.0");
protein_id.setSearchEngine("SearchEngine");
protein_id.setScoreType("HyperScore");
vector<ProteinIdentification> protein_ids;
protein_ids.push_back(protein_id);
// Iterate over protein identifications and protein hits
for (const auto& prot : protein_ids)
{
for (const auto& hit : prot.getHits())
{
cout << "Protein hit accession: " << hit.getAccession() << '\n';
cout << "Protein hit sequence: " << hit.getSequence() << '\n';
cout << "Protein hit score: " << hit.getScore() << '\n';
}
}
// Create new peptide identifications
PeptideIdentificationList peptide_ids;
PeptideIdentification peptide_id;
peptide_id.setRT(1243.56);
peptide_id.setMZ(440.0);
peptide_id.setScoreType("ScoreType");
peptide_id.setHigherScoreBetter(false);
peptide_id.setIdentifier("Identifier");
// define additional meta value for the peptide identification
peptide_id.setMetaValue("AdditionalMetaValue", "Value");
// add PeptideHit to a PeptideIdentification
vector<PeptideHit> peptide_hits;
PeptideHit peptide_hit;
peptide_hit.setScore(1.0);
peptide_hit.setRank(1);
peptide_hit.setCharge(2);
peptide_hit.setSequence(AASequence::fromString("DLQM(Oxidation)TQSPSSLSVSVGDR"));
peptide_hits.push_back(peptide_hit);
// add second best PeptideHit to the PeptideIdentification
peptide_hit.setScore(1.5);
peptide_hit.setRank(2);
peptide_hit.setCharge(2);
peptide_hit.setSequence(AASequence::fromString("QLDM(Oxidation)TQSPSSLSVSVGDR"));
peptide_hits.push_back(peptide_hit);
// add PeptideHit to PeptideIdentification
peptide_id.setHits(peptide_hits);
// add PeptideIdentification
peptide_ids.push_back(peptide_id);
// We could now store the identification data in an idXML file
// FileHandler().storeIdentifications(outfile, protein_ids, peptide_ids);
// And load it back with
// FileHandler().loadIdentifications(outfile, protein_ids, peptide_ids);
// Iterate over PeptideIdentification
for (const auto& peptide_id : peptide_ids)
{
// Peptide identification values
cout << "Peptide ID m/z: " << peptide_id.getMZ() << '\n';
cout << "Peptide ID rt: " << peptide_id.getRT() << '\n';
cout << "Peptide ID score type: " << peptide_id.getScoreType() << '\n';
// PeptideHits
for (const auto& scored_hit : peptide_id.getHits())
{
cout << " - Peptide hit rank: " << scored_hit.getRank() << '\n';
cout << " - Peptide hit sequence: " << scored_hit.getSequence().toString() << '\n';
cout << " - Peptide hit score: " << scored_hit.getScore() << '\n';
}
}
}
//! [doxygen_snippet_Identification]
| C++ |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_Logger.cpp | .cpp | 1,451 | 46 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/ProgressLogger.h>
#include <OpenMS/DATASTRUCTURES/String.h>
using namespace OpenMS;
using namespace std;
// just a placeholder for the computations you would do in real code
void someFunction()
{
}
int main(int argc, const char** argv)
{
//! [doxygen_snippet_Logger]
ProgressLogger progresslogger;
progresslogger.setLogType(ProgressLogger::CMD); // output to the terminal (std::cout)
// Note: within a TOPP tool, you can use
// progresslogger.setLogType(TOPPBase::log_type_);
// to set the log-type (automatically set via commandline options)
const int progress_steps = 200;
// set start progress (0) and end (ms_run.size() = the number of spectra)
progresslogger.startProgress(0, progress_steps, "Doing some calculation...");
for (int i = 0; i < progress_steps; ++i) // in real code, iterate over some datastructure, e.g. an MSExperiments' spectra
{
// update progress
progresslogger.setProgress(i);
// do the actual calculations and processing ...
someFunction();
}
progresslogger.endProgress();
//! [doxygen_snippet_Logger]
}
| C++ |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_Param.cpp | .cpp | 809 | 28 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
#include <OpenMS/CONCEPT/Types.h>
#include <OpenMS/DATASTRUCTURES/Param.h>
#include <iostream>
using namespace OpenMS;
using namespace std;
Int main()
{
Param param;
param.setValue("file:name", "test.xml");
param.setValue("file:size(MB)", 572.3);
param.setValue("file:data:min_int", 0);
param.setValue("file:data:max_int", 16459);
cout << "Name : " << (string)(param.getValue("file:name")) << endl;
cout << "Size : " << (float)(param.getValue("file:size(MB)")) << endl;
cout << "Min int: " << (UInt)(param.getValue("file:data:min_int")) << endl;
cout << "Max int: " << (UInt)(param.getValue("file:data:max_int")) << endl;
return 0;
} //end of main
| C++ |
3D | OpenMS/OpenMS | doc/doxygen/parameters/TOPPDocumenter.cpp | .cpp | 10,954 | 321 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Marc Sturm, Mathias Walzer, Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/ToolHandler.h>
#include <OpenMS/SYSTEM/File.h>
#include <OpenMS/FORMAT/XMLFile.h>
#include <OpenMS/FORMAT/ParamXMLFile.h>
#include <OpenMS/SYSTEM/ExternalProcess.h>
#include <iostream>
#include <fstream>
#include <sstream>
using namespace std;
using namespace OpenMS;
using namespace Internal;
void convertINI2HTML(const Param& p, ostream& os)
{
// the .css file is included via the Header.html (see doc/doxygen/common/Header.html)
// TODO add some general description on how to handle subsections, what each column means, what the tags mean, etc.
os << "<div class=\"ini_global\">\n";
os << "<div class=\"legend\">\n";
os << "<b>Legend:</b><br>\n";
os << " <div class=\"item item_required\">required parameter</div>\n";
os << " <div class=\"item item_advanced\">advanced parameter</div>\n";
os << "</div>\n";
Param::ParamIterator it = p.begin();
String indentation = " ";
while (it != p.end())
{
string key = it.getName();
//write opened/closed nodes
const std::vector<Param::ParamIterator::TraceInfo>& trace = it.getTrace();
for (std::vector<Param::ParamIterator::TraceInfo>::const_iterator it2 = trace.begin(); it2 != trace.end(); ++it2)
{
if (it2->opened) //opened node
{
String d = it2->description;
d.substitute("\n", "<br>");
os << indentation
<< R"(<div class="node"><span class="node_name">)"
// TODO replace/remove weird "(TOPPAS) instance 1" nodes that only confuse people.
<< (String().fillLeft('+', (UInt) indentation.size() / 2) + it2->name)
<< "</span><span class=\"node_description\">"
<< (d)
<< "</span></div>"
<< "\n";
indentation += " ";
}
else //closed node
{
indentation.resize(indentation.size() - 2);
//os << indentation << "</div>" << "\n";
}
}
//write item
String s_attr;
String s_req;
if (it->tags.find("advanced") != it->tags.end())
s_attr += " item_advanced"; // optionally add advanced class
if (it->tags.find("required") != it->tags.end())
s_req += " item_required"; // optionally add required class
ParamValue::ValueType value_type = it->value.valueType();
//write opening tag
os << indentation
<< "<div class=\"item"
<< s_attr
<< "\"><span class=\"item_name"
<< s_req
<< "\" style=\"padding-left:"
<< indentation.size() * 4
<< "px;\">"
<< (it->name)
<< "</span><span class=\"item_value\">"
<< it->value.toString()
<< "</span>"
<< "\n";
//replace all critical characters in description
String d = it->description;
d.substitute("\n", "<br>");
os << "<span class=\"item_description\">" << (d) << "</span>";
//tags
String list;
for (auto tag_it = it->tags.begin(); tag_it != it->tags.end(); ++tag_it)
{
if (*tag_it == "advanced")
continue; // do not list "advanced" or "required" (this is done by color coding)
if (*tag_it == "required")
continue;
if (!list.empty())
list += ", ";
list += *tag_it;
}
os << "<span class=\"item_tags\">" << (list) << "</span>";
//restrictions
String restrictions = "";
switch (value_type)
{
case ParamValue::INT_VALUE:
case ParamValue::INT_LIST:
{
// TODO think about doing the same infinity replacement
// for default values. A single ":" looks weird.
bool min_set = (it->min_int != -numeric_limits<Int>::max());
bool max_set = (it->max_int != numeric_limits<Int>::max());
if (max_set || min_set)
{
if (min_set)
restrictions += String(it->min_int);
else
restrictions += "-∞"; // infinity symbol
restrictions += ':';
if (max_set)
restrictions += String(it->max_int);
else
restrictions += "∞";
}
}
break;
case ParamValue::DOUBLE_VALUE:
case ParamValue::DOUBLE_LIST:
{
bool min_set = (it->min_float != -numeric_limits<double>::max());
bool max_set = (it->max_float != numeric_limits<double>::max());
if (max_set || min_set)
{
if (min_set)
restrictions += String(it->min_float);
else
restrictions += "-∞"; // infinity symbol
restrictions += ':';
if (max_set)
restrictions += String(it->max_float);
else
restrictions += "∞";
}
}
break;
case ParamValue::STRING_VALUE:
case ParamValue::STRING_LIST:
if (!it->valid_strings.empty())
{
// make sure browsers can word wrap with additional whitespace
// TODO: If param name is *modification* just add a link to
// a page with all modifications otherwise you get a HUGE list.
// Also think about a different separator, in case the restrictions have commas.
restrictions.concatenate(it->valid_strings.begin(), it->valid_strings.end(), ", ");
}
else if (value_type == ParamValue::STRING_VALUE)
{
// Issue #8475: Flag parameters are written as type="bool" in INI, which loads
// as STRING_VALUE with no valid_strings. Detect these by checking if value is boolean.
String val = it->value.toString();
if (val == "true" || val == "false")
{
restrictions = "(flag)";
}
}
break;
default:
break;
}
if (restrictions.empty())
restrictions = " "; // create content, such that the cell gets an underline
os << "<span class=\"item_restrictions\">" << restrictions << "</span>";
os << "</div>"; // end div item
++it;
}
os << "</div>\n"; // end global div
}
bool generate(const ToolListType& tools, const String& prefix, const String& binary_directory)
{
// Add an environment variable (used by each TOPP tool to determine width of help text (see TOPPBase))
qputenv("COLUMNS", "110");
// Add Global environment variable to suppress stty errors
qputenv("TERM", "dumb");
qputenv("STTY", "/bin/true");
bool errors_occured = false;
for (ToolListType::const_iterator it = tools.begin(); it != tools.end(); ++it)
{
String command = binary_directory + it->first;
#if defined(__APPLE__)
if (it->first == "TOPPView" || it->first == "TOPPAS")
{
command = binary_directory + it->first + ".app/Contents/MacOS/" + it->first;
}
#endif
#ifdef OPENMS_WINDOWSPLATFORM
command += ".exe"; // otherwise File::exists() will fail
#endif
ofstream f((String("output/") + prefix + it->first + ".cli").c_str());
if (!File::exists(command))
{
stringstream ss;
ss << "Errors occurred while generating the command line documentation for " << it->first << "!" << endl;
ss << "Tool could not be found at '" << command << "'\n " << command << endl;
f << ss.str();
cerr << ss.str();
errors_occured = true;
f.close();
continue;
}
else
{
ExternalProcess ep([&](const String& s) { f << s; },
[&](const String& s) { f << s; });
String error_msg;
if (ep.run(command.toQString(), QStringList() << "--help", "", false, error_msg, ExternalProcess::IO_MODE::READ_WRITE)
!= ExternalProcess::RETURNSTATE::SUCCESS)
{ // error while generation cli docu
stringstream ss;
ss << "Errors occurred while generating the command line documentation for " << it->first << "!" << endl;
ss << "Output was: \n";
ep.setCallbacks([&](const String& s) { ss << s; }, [&](const String& s) { ss << s; });
ep.run(command.toQString(), QStringList() << "--help", "", false, error_msg, ExternalProcess::IO_MODE::READ_WRITE);
ss << "\nCommand line was: \n " << command << endl;
f << ss.str();
cerr << ss.str();
errors_occured = true;
f.close();
continue;
}
}
f.close();
//////
// get the INI file and convert it into HTML
//////
if (it->first != "GenericWrapper" && // does not support -write_ini without a type
it->first != "TOPPView" && // do not support -write_ini
it->first != "TOPPAS")
{
String tmp_file = File::getTempDirectory() + "/" + File::getUniqueName() + "_" + it->first + ".ini";
const auto ini_command_args = QStringList() << "-write_ini" << tmp_file.toQString();
ExternalProcess ep([&](const String& s) { f << s; }, [&](const String& s) { f << s; });
String error_msg;
if (ep.run(command.toQString(), ini_command_args, "", false, error_msg,
ExternalProcess::IO_MODE::READ_WRITE)
!= ExternalProcess::RETURNSTATE::SUCCESS
|| ! File::exists(tmp_file))
{ // error while generation cli docu
std::cerr << "Errors occurred while writing ini file for " << it->first << "!" << std::endl;
std::cerr << "Command line was: \n " << command << ini_command_args.join(" ").toStdString() << std::endl;
errors_occured = true;
continue;
}
// load content of written ini file
Param p;
ParamXMLFile pf;
pf.load(tmp_file, p);
File::remove(tmp_file);
ofstream f_html((String("output/") + prefix + it->first + ".html").c_str());
convertINI2HTML(p, f_html);
f_html.close();
}
}
return errors_occured;
}
int main(int argc, char** argv)
{
if (argc != 2)
{
cerr << "Please specify the path where the TOPP binaries are located." << endl;
return EXIT_FAILURE;
}
String binary_directory = String(argv[1]).ensureLastChar('/');
if (!File::exists(binary_directory))
{
cerr << "The given binary directory does not exist. Aborting." << endl;
return EXIT_FAILURE;
}
//TOPP tools
ToolListType topp_tools = ToolHandler::getTOPPToolList(true); // include GenericWrapper (can be called with --help without error, even though it has a type)
topp_tools["TOPPView"] = Internal::ToolDescription(); // these two need to be excluded from writing an INI file later!
topp_tools["TOPPAS"] = Internal::ToolDescription();
bool errors_occured = generate(topp_tools, "TOPP_", binary_directory);
if (errors_occured)
{
// errors occurred while generating the TOPP CLI docu .. tell the user
cerr << "Errors occurred while generating the command line documentation for some of the TOPP tools." << endl;
return EXIT_FAILURE;
}
else
{
return EXIT_SUCCESS;
}
}
| C++ |
3D | OpenMS/OpenMS | doc/doxygen/parameters/DefaultParamHandlerDocumenter.cpp | .cpp | 20,032 | 482 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: $
// $Authors: Marc Sturm $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/DECHARGING/FeatureDeconvolution.h>
#include <OpenMS/ANALYSIS/DECHARGING/MetaboliteFeatureDeconvolution.h>
#include <OpenMS/ANALYSIS/ID/AScore.h>
#include <OpenMS/ANALYSIS/ID/AccurateMassSearchEngine.h>
#include <OpenMS/ANALYSIS/ID/BasicProteinInferenceAlgorithm.h>
#include <OpenMS/ANALYSIS/ID/BayesianProteinInferenceAlgorithm.h>
#include <OpenMS/ANALYSIS/ID/ConsensusIDAlgorithm.h>
#include <OpenMS/ANALYSIS/ID/ConsensusIDAlgorithmAverage.h>
#include <OpenMS/ANALYSIS/ID/ConsensusIDAlgorithmBest.h>
#include <OpenMS/ANALYSIS/ID/ConsensusIDAlgorithmPEPIons.h>
#include <OpenMS/ANALYSIS/ID/ConsensusIDAlgorithmPEPMatrix.h>
#include <OpenMS/ANALYSIS/ID/ConsensusIDAlgorithmRanks.h>
#include <OpenMS/ANALYSIS/ID/ConsensusIDAlgorithmWorst.h>
#include <OpenMS/ANALYSIS/ID/FIAMSDataProcessor.h>
#include <OpenMS/ANALYSIS/ID/FalseDiscoveryRate.h>
#include <OpenMS/ANALYSIS/ID/IDDecoyProbability.h>
#include <OpenMS/ANALYSIS/ID/IDMapper.h>
#include <OpenMS/ANALYSIS/ID/IDRipper.h>
#include <OpenMS/ANALYSIS/ID/PeptideIndexing.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/FeatureDistance.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/FeatureGroupingAlgorithmKD.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/FeatureGroupingAlgorithmLabeled.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/FeatureGroupingAlgorithmQT.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/FeatureGroupingAlgorithmUnlabeled.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/LabeledPairFinder.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/MapAlignmentAlgorithmIdentification.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/MapAlignmentAlgorithmPoseClustering.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/MapAlignmentAlgorithmTreeGuided.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/PoseClusteringAffineSuperimposer.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/PoseClusteringShiftSuperimposer.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/QTClusterFinder.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/StablePairFinder.h>
#include <OpenMS/ANALYSIS/OPENSWATH/DIAPrescoring.h>
#include <OpenMS/ANALYSIS/OPENSWATH/DIAScoring.h>
#include <OpenMS/ANALYSIS/OPENSWATH/MRMDecoy.h>
#include <OpenMS/ANALYSIS/OPENSWATH/MRMFeatureFilter.h>
#include <OpenMS/ANALYSIS/OPENSWATH/MRMFeatureFinderScoring.h>
#include <OpenMS/ANALYSIS/OPENSWATH/MRMTransitionGroupPicker.h>
#include <OpenMS/ANALYSIS/OPENSWATH/PeakIntegrator.h>
#include <OpenMS/ANALYSIS/OPENSWATH/PeakPickerChromatogram.h>
#include <OpenMS/ANALYSIS/OPENSWATH/PeakPickerMobilogram.h>
#include <OpenMS/ANALYSIS/OPENSWATH/TransitionPQPFile.h>
#include <OpenMS/ANALYSIS/OPENSWATH/TransitionTSVFile.h>
#include <OpenMS/ANALYSIS/QUANTITATION/IsobaricChannelExtractor.h>
#include <OpenMS/ANALYSIS/QUANTITATION/IsobaricQuantifier.h>
#include <OpenMS/ANALYSIS/QUANTITATION/ItraqEightPlexQuantitationMethod.h>
#include <OpenMS/ANALYSIS/QUANTITATION/ItraqFourPlexQuantitationMethod.h>
#include <OpenMS/ANALYSIS/QUANTITATION/PeptideAndProteinQuant.h>
#include <OpenMS/ANALYSIS/QUANTITATION/TMTEighteenPlexQuantitationMethod.h>
#include <OpenMS/ANALYSIS/QUANTITATION/TMTSixPlexQuantitationMethod.h>
#include <OpenMS/ANALYSIS/QUANTITATION/TMTSixteenPlexQuantitationMethod.h>
#include <OpenMS/ANALYSIS/QUANTITATION/TMTTenPlexQuantitationMethod.h>
#include <OpenMS/ML/SVM/SimpleSVM.h>
#include <OpenMS/APPLICATIONS/MapAlignerBase.h>
#include <OpenMS/CHEMISTRY/MASSDECOMPOSITION/MassDecompositionAlgorithm.h>
#include <OpenMS/CHEMISTRY/NucleicAcidSpectrumGenerator.h>
#include <OpenMS/CHEMISTRY/SimpleTSGXLMS.h>
#include <OpenMS/CHEMISTRY/SpectrumAnnotator.h>
#include <OpenMS/CHEMISTRY/TheoreticalSpectrumGenerator.h>
#include <OpenMS/CHEMISTRY/TheoreticalSpectrumGeneratorXLMS.h>
#include <OpenMS/COMPARISON/BinnedSharedPeakCount.h>
#include <OpenMS/COMPARISON/BinnedSpectralContrastAngle.h>
#include <OpenMS/COMPARISON/BinnedSpectrumCompareFunctor.h>
#include <OpenMS/COMPARISON/BinnedSumAgreeingIntensities.h>
#include <OpenMS/COMPARISON/PeakAlignment.h>
#include <OpenMS/COMPARISON/PeakSpectrumCompareFunctor.h>
#include <OpenMS/COMPARISON/SpectrumAlignment.h>
#include <OpenMS/COMPARISON/SpectrumAlignmentScore.h>
#include <OpenMS/COMPARISON/SpectrumCheapDPCorr.h>
#include <OpenMS/COMPARISON/SpectrumPrecursorComparator.h>
#include <OpenMS/COMPARISON/SteinScottImproveScore.h>
#include <OpenMS/COMPARISON/ZhangSimilarityScore.h>
#include <OpenMS/PROCESSING/BASELINE/MorphologicalFilter.h>
#include <OpenMS/FEATUREFINDER/ElutionPeakDetection.h>
#include <OpenMS/FEATUREFINDER/FeatureFindingMetabo.h>
#include <OpenMS/FEATUREFINDER/MassTraceDetection.h>
#include <OpenMS/PROCESSING/NOISEESTIMATION/SignalToNoiseEstimator.h>
#include <OpenMS/PROCESSING/NOISEESTIMATION/SignalToNoiseEstimatorMeanIterative.h>
#include <OpenMS/PROCESSING/NOISEESTIMATION/SignalToNoiseEstimatorMedian.h>
#include <OpenMS/PROCESSING/SMOOTHING/GaussFilter.h>
#include <OpenMS/PROCESSING/SMOOTHING/LowessSmoothing.h>
#include <OpenMS/PROCESSING/SMOOTHING/SavitzkyGolayFilter.h>
#include <OpenMS/PROCESSING/RESAMPLING/LinearResampler.h>
#include <OpenMS/PROCESSING/FILTERING/NLargest.h>
#include <OpenMS/PROCESSING/SCALING/Normalizer.h>
#include <OpenMS/PROCESSING/SPECTRAMERGING/SpectraMerger.h>
#include <OpenMS/PROCESSING/SCALING/SqrtScaler.h>
#include <OpenMS/PROCESSING/FILTERING/ThresholdMower.h>
#include <OpenMS/PROCESSING/FILTERING/WindowMower.h>
#include <OpenMS/FORMAT/MSPFile.h>
#include <OpenMS/FORMAT/MascotGenericFile.h>
#include <OpenMS/FORMAT/MascotRemoteQuery.h>
#include <OpenMS/MATH/MISC/EmgGradientDescent.h>
#include <OpenMS/MATH/STATISTICS/PosteriorErrorProbabilityModel.h>
#include <OpenMS/QC/DBSuitability.h>
#include <OpenMS/FEATUREFINDER/BaseModel.h>
#include <OpenMS/FEATUREFINDER/BiGaussFitter1D.h>
#include <OpenMS/FEATUREFINDER/BiGaussModel.h>
#include <OpenMS/FEATUREFINDER/EGHTraceFitter.h>
#include <OpenMS/FEATUREFINDER/ElutionModelFitter.h>
#include <OpenMS/FEATUREFINDER/EmgFitter1D.h>
#include <OpenMS/FEATUREFINDER/EmgModel.h>
#include <OpenMS/FEATUREFINDER/ExtendedIsotopeFitter1D.h>
#include <OpenMS/FEATUREFINDER/ExtendedIsotopeModel.h>
#include <OpenMS/FEATUREFINDER/FeatureFinderAlgorithmMetaboIdent.h>
#include <OpenMS/FEATUREFINDER/FeatureFinderAlgorithmPicked.h>
#include <OpenMS/FEATUREFINDER/Fitter1D.h>
#include <OpenMS/FEATUREFINDER/GaussFitter1D.h>
#include <OpenMS/FEATUREFINDER/GaussModel.h>
#include <OpenMS/FEATUREFINDER/GaussTraceFitter.h>
#include <OpenMS/FEATUREFINDER/InterpolationModel.h>
#include <OpenMS/FEATUREFINDER/IsotopeFitter1D.h>
#include <OpenMS/FEATUREFINDER/IsotopeModel.h>
#include <OpenMS/FEATUREFINDER/MaxLikeliFitter1D.h>
#include <OpenMS/FEATUREFINDER/MultiplexDeltaMassesGenerator.h>
#include <OpenMS/FEATUREFINDER/TraceFitter.h>
#include <OpenMS/PROCESSING/CENTROIDING/PeakPickerHiRes.h>
#include <OpenMS/PROCESSING/CENTROIDING/PeakPickerIterative.h>
// those are only added if GUI is enabled
#ifdef WITH_GUI
#include <QApplication>
#include <OpenMS/VISUAL/PlotCanvas.h>
#include <OpenMS/VISUAL/Plot1DCanvas.h>
#include <OpenMS/VISUAL/Plot2DCanvas.h>
#include <OpenMS/VISUAL/Plot3DCanvas.h>
#include <OpenMS/VISUAL/SpectraIDViewTab.h>
#include <OpenMS/VISUAL/APPLICATIONS/TOPPASBase.h>
#include <OpenMS/VISUAL/APPLICATIONS/TOPPViewBase.h>
#endif
#include <fstream>
using namespace std;
using namespace OpenMS;
// this weird piece of code is required to avoid the following linker errors in VS2019
/*
Error LNK2001 unresolved external symbol "public: virtual void * __cdecl OpenMS::MSExperiment::`scalar deleting destructor'(unsigned int)" (??_GMSExperiment@OpenMS@@UEAAPEAXI@Z) DefaultParamHandlerDocumenter C:\dev\openms_test_build19\doc\DefaultParamHandlerDocumenter.obj 1
Error LNK2019 unresolved external symbol "public: virtual void * __cdecl OpenMS::MSExperiment::`vector deleting destructor'(unsigned int)" (??_EMSExperiment@OpenMS@@UEAAPEAXI@Z) referenced in function "[thunk]:public: virtual void * __cdecl OpenMS::MSExperiment::`vector deleting destructor'`adjustor{72}' (unsigned int)" (??_EMSExperiment@OpenMS@@WEI@EAAPEAXI@Z) DefaultParamHandlerDocumenter C:\dev\openms_test_build19\doc\DefaultParamHandlerDocumenter.obj 1
see https://stackoverflow.com/a/74235019/1913074
Alternatively, define ~MSExperiment(){}; instead of using ' = default;'
*/
void foo()
{
auto p = new MSExperiment();
delete p;
}
//**********************************************************************************
//Helper method - use this method to generate the actual parameter documentation
//**********************************************************************************
void writeParameters(const String& class_name, const Param& param, bool table_only = false)
{
const String filename = String("output/OpenMS_") + class_name + ".parameters";
ofstream f(filename.c_str());
if (!f)
{
std::cerr << "Cannot open file '" << filename << "'. Check for invalid characters in filename and permissions.\n";
exit(1);
}
if (!table_only)
{
f << "<B>Parameters of this class are:</B><BR><BR>\n";
}
f << R"(<table class="doxtable" border="1" width="100%" cellpadding="4">)" << endl;
f << "<tr><th>Name</th><th>Type</th><th>Default</th><th>Restrictions</th><th>Description</th></tr>" << endl;
String type, description, restrictions;
for (Param::ParamIterator it = param.begin(); it != param.end(); ++it)
{
restrictions = "";
if (it->value.valueType() == ParamValue::INT_VALUE || it->value.valueType() == ParamValue::INT_LIST)
{
type = "int";
if (it->value.valueType() == ParamValue::INT_LIST)
{
type += " list";
}
//restrictions
bool first = true;
if (it->min_int != -(numeric_limits<Int>::max)())
{
restrictions += String("min: ") + it->min_int;
first = false;
}
if (it->max_int != (numeric_limits<Int>::max)())
{
if (!first)
{
restrictions += ' ';
}
restrictions += String("max: ") + it->max_int;
}
}
else if (it->value.valueType() == ParamValue::DOUBLE_VALUE || it->value.valueType() == ParamValue::DOUBLE_LIST)
{
type = "float";
if (it->value.valueType() == ParamValue::DOUBLE_LIST)
type += " list";
//restrictions
bool first = true;
if (it->min_float != -(numeric_limits<double>::max)())
{
restrictions += String("min: ") + it->min_float;
first = false;
}
if (it->max_float != (numeric_limits<double>::max)())
{
if (!first)
restrictions += ' ';
restrictions += String("max: ") + it->max_float;
}
}
else if (it->value.valueType() == ParamValue::STRING_VALUE || it->value.valueType() == ParamValue::STRING_LIST)
{
type = "string";
if (it->value.valueType() == ParamValue::STRING_LIST)
type += " list";
//restrictions
if (!it->valid_strings.empty())
{
String valid_strings;
valid_strings.concatenate(it->valid_strings.begin(), it->valid_strings.end(), ", ");
restrictions += valid_strings;
}
}
if (restrictions == "")
{
restrictions = " ";
}
//replace #, @ and newline in description
description = param.getDescription(it.getName());
description.substitute("@", "XXnot_containedXX");
description.substitute("XXnot_containedXX", "@@");
description.substitute("#", "XXnot_containedXX");
description.substitute("XXnot_containedXX", "@#");
description.substitute("\n", "<BR>");
//create tooltips for sections if they are documented
String name = it.getName();
vector<String> parts;
name.split(':', parts);
String prefix = "";
for (Size i = 0; i + 1 < parts.size(); ++i)
{
if (i == 0)
{
prefix = parts[i];
}
else
{
prefix = prefix + ":" + parts[i];
}
String docu = param.getSectionDescription(prefix);
if (docu != "")
{
parts[i] = String("<span title=\"") + docu + "\">" + parts[i] + "</span>";
}
}
if (parts.size() != 0)
{
name.concatenate(parts.begin(), parts.end(), ":");
}
//replace # and @ in values
String value = it->value.toString(true);
value.substitute("@", "XXnot_containedXX");
value.substitute("XXnot_containedXX", "@@");
value.substitute("#", "XXnot_containedXX");
value.substitute("XXnot_containedXX", "@#");
//make the advanced parameters cursive, the normal ones bold
String style = "b";
if (it->tags.count("advanced") == 1)
style = "i";
//final output
f << "<tr>\n"
<< " <td style=\"vertical-align:top\"><" << style << ">" << name << "</" << style << "></td>\n"
<< " <td style=\"vertical-align:top\">" << type << "</td><td style=\"vertical-align:top\">" << value << "</td>\n"
<< " <td style=\"vertical-align:top\">" << restrictions << "</td><td style=\"vertical-align:top\">" << description << "</td>\n"
<< "</tr>\n";
}
f << "</table>" << "\n";
if (!table_only)
{
f << "<br>" << "\n"
<< "<b>Note:</b>" << "\n"
<< "<UL style=\"margin-top:0px;\">" << "\n"
<< " <LI> If a section name is documented, the documentation is displayed as tooltip." << "\n"
<< " <LI> Advanced parameter names are italic." << "\n"
<< "</UL>" << "\n";
}
f.close();
}
//**********************************************************************************
//Helper macros that can be used for easy classes
//**********************************************************************************
// For classes that have a default-constructor, simply use this macro with the
// class name
#define DOCME(class_name) \
writeParameters("" # class_name, class_name().getDefaults());
// For class templates and classes without default constructor use this macro
// with (1.) the class name and (2.) a class instance.
#define DOCME2(class_template_name, instantiation) \
writeParameters("" # class_template_name, (instantiation).getDefaults());
//**********************************************************************************
//Main method - add your class here
//**********************************************************************************
int main(int argc, char** argv)
{
//////////////////////////////////
// Simple cases
//////////////////////////////////
DOCME(AScore);
DOCME(BasicProteinInferenceAlgorithm);
DOCME(BayesianProteinInferenceAlgorithm);
DOCME(TransitionPQPFile);
DOCME(BiGaussFitter1D);
DOCME(BiGaussModel);
DOCME(BinnedSharedPeakCount);
DOCME(BinnedSpectralContrastAngle);
DOCME(BinnedSumAgreeingIntensities);
DOCME(ConsensusIDAlgorithmAverage);
DOCME(ConsensusIDAlgorithmBest);
DOCME(ConsensusIDAlgorithmPEPIons);
DOCME(ConsensusIDAlgorithmPEPMatrix);
DOCME(ConsensusIDAlgorithmRanks);
DOCME(ConsensusIDAlgorithmWorst);
DOCME(DBSuitability);
DOCME(DiaPrescore);
DOCME(DIAScoring);
DOCME(ElutionModelFitter);
DOCME(EmgFitter1D);
DOCME(EmgGradientDescent)
DOCME(EmgModel);
DOCME(ExtendedIsotopeFitter1D);
DOCME(ExtendedIsotopeModel);
DOCME(FalseDiscoveryRate);
DOCME(FeatureDeconvolution);
DOCME(FeatureDistance);
DOCME(FeatureFinderAlgorithmMetaboIdent);
DOCME(ElutionPeakDetection);
DOCME(FeatureFindingMetabo);
DOCME(FeatureGroupingAlgorithmLabeled);
DOCME(FeatureGroupingAlgorithmQT);
DOCME(FeatureGroupingAlgorithmKD);
DOCME(FeatureGroupingAlgorithmUnlabeled);
DOCME(MapAlignmentAlgorithmIdentification);
DOCME(MapAlignmentAlgorithmTreeGuided);
DOCME(MassTraceDetection);
DOCME(FIAMSDataProcessor);
DOCME(GaussFilter);
DOCME(GaussFitter1D);
DOCME(GaussModel);
DOCME(IDMapper);
DOCME(IDRipper);
DOCME(InterpolationModel);
DOCME(IsotopeFitter1D);
DOCME(IsotopeModel);
DOCME(TMTSixPlexQuantitationMethod);
DOCME(TMTTenPlexQuantitationMethod);
DOCME(TMTSixteenPlexQuantitationMethod);
DOCME(TMTEighteenPlexQuantitationMethod);
DOCME(ItraqEightPlexQuantitationMethod);
DOCME(ItraqFourPlexQuantitationMethod);
DOCME(LabeledPairFinder);
DOCME(LinearResampler);
DOCME(MSPFile);
DOCME(MapAlignmentAlgorithmPoseClustering);
DOCME(SpectrumAnnotator);
DOCME(TheoreticalSpectrumGeneratorXLMS);
DOCME(MRMDecoy);
DOCME(MetaboliteFeatureDeconvolution);
DOCME(MRMFeatureFilter);
DOCME(MRMFeatureFinderScoring);
DOCME(MRMTransitionGroupPicker);
DOCME(MultiplexDeltaMassesGenerator);
DOCME(NucleicAcidSpectrumGenerator);
DOCME(NLargest);
DOCME(Normalizer);
DOCME(PeakAlignment);
DOCME(PeakIntegrator);
DOCME(PeakPickerHiRes);
DOCME(PeakPickerIterative);
DOCME(PeakPickerChromatogram);
DOCME(PeakPickerMobilogram);
DOCME(PeptideIndexing);
DOCME(PoseClusteringAffineSuperimposer);
DOCME(PoseClusteringShiftSuperimposer);
DOCME(QTClusterFinder);
DOCME(SavitzkyGolayFilter);
DOCME(LowessSmoothing);
DOCME(SimpleSVM);
DOCME(StablePairFinder);
DOCME(SpectrumAlignment);
DOCME(SpectrumAlignmentScore);
DOCME(SpectrumCheapDPCorr);
DOCME(SpectrumPrecursorComparator);
DOCME(SqrtScaler);
DOCME(SteinScottImproveScore);
DOCME(SpectraMerger);
DOCME(TheoreticalSpectrumGenerator);
DOCME(ThresholdMower);
DOCME(TransitionTSVFile);
DOCME(IDDecoyProbability);
DOCME(WindowMower);
DOCME(ZhangSimilarityScore);
DOCME(MorphologicalFilter);
DOCME(MassDecompositionAlgorithm);
DOCME(MascotRemoteQuery);
DOCME(MascotGenericFile);
DOCME(Fitter1D);
DOCME(PeptideAndProteinQuant);
DOCME(SimpleTSGXLMS);
// workarounds for documenting model parameters in MapAligners:
writeParameters("MapAlignerIdentificationModel", MapAlignerBase::getModelDefaults("interpolated"), true);
writeParameters("MapAlignerPoseClusteringModel", MapAlignerBase::getModelDefaults("linear"), true);
writeParameters("MapRTTransformerModel", MapAlignerBase::getModelDefaults("none"), true);
//////////////////////////////////
// More complicated cases
//////////////////////////////////
// ConsensusIDAlgorithm...: abstract base classes, get param. from subclass:
DOCME2(ConsensusIDAlgorithm, (ConsensusIDAlgorithmBest()));
DOCME2(ConsensusIDAlgorithmIdentity, (ConsensusIDAlgorithmBest()));
DOCME2(ConsensusIDAlgorithmSimilarity, (ConsensusIDAlgorithmBest()));
DOCME2(FeatureFinderAlgorithmPicked, (FeatureFinderAlgorithmPicked()));
DOCME2(SignalToNoiseEstimatorMeanIterative, SignalToNoiseEstimatorMeanIterative<>());
DOCME2(SignalToNoiseEstimatorMedian, SignalToNoiseEstimatorMedian<>());
DOCME2(SignalToNoiseEstimator, SignalToNoiseEstimatorMedian<>()); //SignalToNoiseEstimator is a base class, get parameters from subclass SignalToNoiseEstimatorMedian
DOCME2(GaussTraceFitter, (GaussTraceFitter()));
DOCME2(EGHTraceFitter, (EGHTraceFitter()));
DOCME2(TraceFitter, (GaussTraceFitter())); //TraceFitter is an abstract base class, get parameters from subclass GaussTraceFitter
DOCME2(BinnedSpectrumCompareFunctor, (BinnedSharedPeakCount())); //BaseModel is a base class, get parameters from subclass BinnedSharedPeakCount
ItraqFourPlexQuantitationMethod itraq4;
DOCME2(IsobaricChannelExtractor, (IsobaricChannelExtractor(&itraq4)))
DOCME2(IsobaricQuantifier, (IsobaricQuantifier(&itraq4)))
DOCME2(PosteriorErrorProbabilityModel, Math::PosteriorErrorProbabilityModel());
// handle GUI documentation separately
#ifdef WITH_GUI
// some classes require a QApplication
QApplication app(argc, argv);
DOCME(TOPPASBase);
DOCME2(TOPPViewBase, TOPPViewBase(TOPPViewBase::TOOL_SCAN::SKIP_SCAN));
DOCME2(PlotCanvas, Plot1DCanvas(Param()));
DOCME2(Plot1DCanvas, Plot1DCanvas(Param()));
DOCME2(Plot2DCanvas, Plot2DCanvas(Param()));
DOCME2(Plot3DCanvas, Plot3DCanvas(Param()));
DOCME2(SpectraIDViewTab, SpectraIDViewTab(Param()));
#endif
return 0;
}
| C++ |
3D | OpenMS/OpenMS | doc/doxygen/test/Doxygen_Warning_Checker.cpp | .cpp | 2,282 | 64 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Chris Bielow $
// --------------------------------------------------------------------------
#include <iostream>
#include <string>
#include <fstream>
int main(int argc, char** argv)
{
if (argc != 3)
{
std::cerr << "Usage:\n " << argv[0] << " <path to doxygen-error.log> <doxygen version to print>\n";
return 1;
}
std::cout << "Note: Please make sure to run the 'doc' target before running this test, so the 'doxygen-error.log' is up to date.\n";
// print doxygen version; useful to know in CI/CD when your local doxygen version differs in output and you don't want to dig into CI logs to find the doxygen version used
std::cout << "Doxygen version: " << argv[2] << std::endl;
std::ifstream is(argv[1]);
if (!is)
{
std::cerr << "Error: File '" << argv[1] << "' cannot be opened.\n";
return 1;
}
std::cout << "Opening '" << argv[1] << "' to check for doxygen errors...\n"
<< "----------- ERRORS/WARNINGS -----------------" << std::endl;
int line_count = 0, error_count = 0;
for (std::string line; std::getline(is, line);)
{
if (line.empty()) continue;
++line_count;
// Skip over warnings which are not critical:
//
// 1) Dot graph: we do not want huge graphs, since they are unreadable.
// So we ignore this: ""warning: Included by graph for 'PeptideIdentification.h' not generated, too many nodes (68), threshold is 50. Consider increasing DOT_GRAPH_MAX_NODES.""
if (line.find("Consider increasing DOT_GRAPH_MAX_NODES") != std::string::npos) continue;
// 2) ...
// ...
// line is a warning. Display it (in CI/CD)
std::cerr << line << '\n';
++error_count;
}
std::cout << "---------------------------------------------" << std::endl;
std::cout << "Skipped over " << line_count - error_count << " lines with unavoidable warnings";
if (error_count)
{
std::cerr << "\n\nFound " << error_count << " Doxygen errors. See above. Please fix them.\n";
return 1;
}
return 0;
} | C++ |
3D | OpenMS/OpenMS | src/topp/QCExtractor.cpp | .cpp | 5,950 | 170 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Mathias Walzer $
// $Author: Mathias Walzer $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/FORMAT/CsvFile.h>
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/DATASTRUCTURES/String.h>
#include <OpenMS/FORMAT/QcMLFile.h>
#include <QByteArray>
#include <QFile>
#include <QString>
#include <QFileInfo>
//~ #include <QIODevice>
#include <iostream>
#include <fstream>
#include <vector>
#include <map>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_QCExtractor QCExtractor
@brief Extracts a table attachment of a given quality parameter from a qcML file as tabular (text) format.
<CENTER>
<table>
<tr>
<th ALIGN = "center"> pot. predecessor tools </td>
<td VALIGN="middle" ROWSPAN=3> → QCExtractor →</td>
<th ALIGN = "center"> pot. successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=2> @ref TOPP_QCEmbedder </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_QCShrinker </td>
</tr>
</table>
</CENTER>
If there is a table attached to a given qp that is needed as a single file, e.g. for easy input to plotting software, this can be extracted to a tabular (text) format.
- @p qp defines the qp name to which the table is attached;
- @p run the file that defined the run under which the qp for the attachment is aggregated as mzML file. The file is only used to extract the run name from the file name.
- @p name if no file for the run was given (or if the target qp is contained in a set), at least a name of the target run/set containing the the qp for the attachment has to be given.
- @p set/run if the target qp is contained in a set, this has to be set here;
Output is in csv format (see parameter @p out_csv) which can be easily parsed by many programs.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_QCExtractor.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_QCExtractor.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPQCExtractor :
public TOPPBase
{
public:
TOPPQCExtractor():
TOPPBase("QCExtractor", "Extracts a table attachment to a given qc parameter.",
true, {{ "Walzer M, Pernas LE, Nasso S, Bittremieux W, Nahnsen S, Kelchtermans P, Martens, L", "qcML: An Exchange Format for Quality Control Metrics from Mass Spectrometry Experiments", "Molecular & Cellular Proteomics 2014; 13(8)" , "10.1074/mcp.M113.035907"}})
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "Input qcml file");
setValidFormats_("in", ListUtils::create<String>("qcML"));
registerStringOption_("qp", "<string>", "", "Target attachment qp.");
registerInputFile_("run", "<file>", "", "The file that defined the run under which the qp for the attachment is aggregated as mzML file. The file is only used to extract the run name from the file name.", false);
setValidFormats_("run", ListUtils::create<String>("mzML"));
registerStringOption_("name", "<string>", "", "If no file for the run was given (or if the target qp is contained in a set), at least a name of the target run/set containing the the qp for the attachment has to be given.", false);
registerOutputFile_("out_csv", "<file>", "", "Output csv formatted table.");
setValidFormats_("out_csv", ListUtils::create<String>("csv"));
}
ExitCodes main_(int, const char**) override
{
//-------------------------------------------------------------
// parsing parameters
//-------------------------------------------------------------
String in = getStringOption_("in");
String csv = getStringOption_("out_csv");
String target_qp = getStringOption_("qp");
String target_run = getStringOption_("name");
String target_file = getStringOption_("run");
//-------------------------------------------------------------
// reading input
//------------------------------------------------------------
if (!target_file.empty())
{
target_run = QFileInfo(QString::fromStdString(target_file)).baseName();
}
QcMLFile qcmlfile;
qcmlfile.load(in);
if (target_run.empty())
{
//~ check if only one run in file
std::vector<String> nas;
qcmlfile.getRunNames(nas);
if (nas.size() == 1)
{
target_run = nas.front();
}
else
{
cerr << "Error: You have to give at least one of the following parameter (in ascending precedence): name, run. Aborting!" << endl;
return ILLEGAL_PARAMETERS;
}
}
String csv_str = "";
if (target_qp == "set id")
{
if (qcmlfile.existsSet(target_run,true))
{
csv_str = qcmlfile.exportIDstats(target_run);
}
else
{
cerr << "Error: You have to specify a existing set for this qp. " << target_run << " seems not to exist. Aborting!" << endl;
return ILLEGAL_PARAMETERS;
}
}
else
{
//TODO warn when target_run is empty or not present in qcml
csv_str = qcmlfile.exportAttachment(target_run, target_qp);
}
ofstream fout(csv.c_str());
fout << csv_str << endl;
fout.close();
//~ qcmlfile.store(out);
return EXECUTION_OK;
//~ TODO export table containing all given qp
}
};
int main(int argc, const char** argv)
{
TOPPQCExtractor tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/QCEmbedder.cpp | .cpp | 9,899 | 272 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Mathias Walzer $
// $Author: Mathias Walzer $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/FORMAT/CsvFile.h>
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/DATASTRUCTURES/String.h>
// TODO this is currently needed for attachments
#include <OpenMS/FORMAT/QcMLFile.h>
#include <OpenMS/FORMAT/ControlledVocabulary.h>
#include <OpenMS/SYSTEM/File.h>
#include <OpenMS/CONCEPT/UniqueIdGenerator.h>
#include <QByteArray>
#include <QFile>
#include <QString>
#include <QFileInfo>
//~ #include <QIODevice>
#include <iostream>
#include <fstream>
#include <vector>
#include <map>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_QCEmbedder QCEmbedder
@brief This application is used to embed tables or plots generated externally as attachments to existing quality parameters in qcML files.
<CENTER>
<table>
<tr>
<th ALIGN = "center"> pot. predecessor tools </td>
<td VALIGN="middle" ROWSPAN=3> → QCEmbedder →</td>
<th ALIGN = "center"> pot. successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_QCExporter </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_QCMerger </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_CometAdapter </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_QCShrinker </td>
</tr>
</table>
</CENTER>
If there is additional data from external tools to a certain quality parameter (qp) in the qcML file at @p in, it can be attached in tabluar (csv) format or as png image file.
If no corresponding quality parameter is present an empty value one will be generated with the name of "default set name"/"default mzML file".
- @p qp_att_acc defines the qp cv accession of the qp to which the table/image is attached.
- @p cv_acc defines the cv accession of the attachment.
- @p run the file that defined the run under which the qp for the attachment is aggregated as mzML file. The file is only used to extract the run name from the file name.
- @p name if no file for the run was given (or if the target qp is contained in a set), at least a name of the target run/set containing the the qp for the attachment has to be given.
- @p plot if a plot image is to be attached to a qp, this has to be specified here.
- @p table if a table is to be attached to a qp, this has to be specified here.
Output is in qcML format (see parameter @p out) which can be viewed directly in a modern browser (chromium, firefox, safari).
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_QCEmbedder.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_QCEmbedder.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPQCEmbedder :
public TOPPBase
{
public:
TOPPQCEmbedder() :
TOPPBase("QCEmbedder", "Attaches a table or an image to a given qc parameter.", false, {{ "Walzer M, Pernas LE, Nasso S, Bittremieux W, Nahnsen S, Kelchtermans P, Martens, L", "qcML: An Exchange Format for Quality Control Metrics from Mass Spectrometry Experiments", "Molecular & Cellular Proteomics 2014; 13(8)" , "10.1074/mcp.M113.035907"}})
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "Input qcml file", false);
setValidFormats_("in", ListUtils::create<String>("qcML"));
registerStringOption_("qp_att_acc", "<string>", "", "Defines the qp cv accession of the qp to which the table/image is attached.", false);
registerStringOption_("cv_acc", "<string>", "", "Defines the cv accession of the attachment.");
registerInputFile_("run", "<file>", "", "The file that defined the run under which the qp for the attachment is aggregated as mzML file. The file is only used to extract the run name from the file name.", false);
setValidFormats_("run", ListUtils::create<String>("mzML"));
registerStringOption_("name", "<String>", "", "If no file for the run was given (or if the target qp is contained in a set), at least a name of the target run/set containing the the qp for the attachment has to be given.", false);
registerInputFile_("plot", "<file>", "", "If a plot image is to be attached to a qp, this has to be specified here.", false);
setValidFormats_("plot", ListUtils::create<String>("PNG"));
registerInputFile_("table", "<file>", "", "If a table is to be attached to a qp, this has to be specified here.", false);
setValidFormats_("table", ListUtils::create<String>("csv"));
registerOutputFile_("out", "<file>", "", "Output extended qcML file");
setValidFormats_("out", ListUtils::create<String>("qcML"));
}
ExitCodes main_(int, const char**) override
{
//-------------------------------------------------------------
// parsing parameters
//-------------------------------------------------------------
String in = getStringOption_("in");
String out = getStringOption_("out");
String target_qp = getStringOption_("qp_att_acc");
String target_acc = getStringOption_("cv_acc");
String target_run = getStringOption_("name");
String target_file = getStringOption_("run");
String plot_file = getStringOption_("plot");
String tab = getStringOption_("table");
//-------------------------------------------------------------
// fetch vocabularies
//------------------------------------------------------------
ControlledVocabulary cv;
cv.loadFromOBO("PSI-MS", File::find("/CV/psi-ms.obo"));
cv.loadFromOBO("QC", File::find("/CV/qc-cv.obo"));
cv.loadFromOBO("QC", File::find("/CV/qc-cv-legacy.obo"));
//-------------------------------------------------------------
// reading input
//------------------------------------------------------------
if (!target_file.empty())
{
target_run = QFileInfo(QString::fromStdString(target_file)).baseName();
}
QcMLFile qcmlfile;
if (!in.empty())
{
qcmlfile.load(in);
}
if (target_run.empty())
{
//~ check if only one run in file
std::vector<String> nas;
qcmlfile.getRunNames(nas);
if (nas.size() == 1)
{
target_run = nas.front();
}
else
{
cerr << "Error: You have to give at least one of the following parameter (in ascending precedence): name, run. Aborting!" << endl;
return ILLEGAL_PARAMETERS;
}
}
QFile f(plot_file.c_str());
String plot_b64;
if (f.open(QIODevice::ReadOnly))
{
QByteArray ba = f.readAll();
f.close();
plot_b64 = String(ba.toBase64().toStdString());
}
QcMLFile::Attachment at;
at.cvAcc = target_acc;
at.id = String(UniqueIdGenerator::getUniqueId());
at.cvRef = "QC"; //TODO assign right cv reference
if (!plot_b64.empty() || !tab.empty())
{
if (!plot_b64.empty())
{
try
{
const ControlledVocabulary::CVTerm& term = cv.getTerm(target_acc);
at.name = term.name; ///< Name
//~ at.unitRef; //TODO MIME type
//~ at.unitAcc;
}
catch (...)
{
cerr << "Error: You have to give the accession of a existing cv term. Aborting!" << endl;
return ILLEGAL_PARAMETERS;
}
at.binary = plot_b64;
}
else if (!tab.empty())
{
try
{
const ControlledVocabulary::CVTerm& term = cv.getTerm(target_acc);
at.name = term.name; ///< Name
//~ at.unitRef; //TODO MIME type
//~ at.unitAcc;
}
catch (...)
{
cerr << "Error: You have to give the accession of a existing cv term. Aborting!" << endl;
return ILLEGAL_PARAMETERS;
}
CsvFile csv_file(tab);
if (csv_file.rowCount() > 1)
{
StringList li;
csv_file.getRow(0, li);
for (Size i = 0; i < li.size(); ++i)
{
at.colTypes.push_back(li[i]);
}
for (UInt i = 1; i < csv_file.rowCount(); ++i)
{
StringList li;
std::vector<String> v;
csv_file.getRow(i, li);
//TODO throw error if li.size() != at.colTypes.size()
for (Size i = 0; i < li.size(); ++i)
{
v.push_back(li[i]);
}
at.tableRows.push_back(v);
}
}
}
else
{
cerr << "Error: Nothing valid to attach. Aborting!" << endl;
return ILLEGAL_PARAMETERS;
}
std::vector<String> ids;
qcmlfile.existsRunQualityParameter(target_run, target_qp, ids);
if (!ids.empty())
{
at.qualityRef = ids.front();
qcmlfile.addRunAttachment(target_run, at);
}
else
{
qcmlfile.existsSetQualityParameter(target_run, target_qp, ids);
if (!ids.empty())
{
at.qualityRef = ids.front();
qcmlfile.addSetAttachment(target_run, at);
}
else
{
cerr << "Error: You have to give the accession of a existing cv term to attacht to. Aborting!" << endl;
return ILLEGAL_PARAMETERS;
}
}
}
qcmlfile.store(out);
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPQCEmbedder tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/QCMerger.cpp | .cpp | 7,383 | 206 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Mathias Walzer $
// $Author: Mathias Walzer $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/FORMAT/CsvFile.h>
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/DATASTRUCTURES/String.h>
#include <OpenMS/FORMAT/QcMLFile.h>
#include <QByteArray>
#include <QFile>
#include <QString>
#include <QFileInfo>
#include <OpenMS/MATH/MathFunctions.h>
#include <OpenMS/MATH/StatisticFunctions.h>
//~ #include <QIODevice>
#include <algorithm>
#include <fstream>
#include <vector>
#include <map>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_QCMerger QCMerger
@brief Merges two qcml files together.
<CENTER>
<table>
<tr>
<th ALIGN = "center"> pot. predecessor tools </td>
<td VALIGN="middle" ROWSPAN=3> → QCCalculator →</td>
<th ALIGN = "center"> pot. successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=2> @ref TOPP_QCCalculator </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_QCShrinker </td>
</tr>
</table>
</CENTER>
The two or more given files (see parameter @p in) are merged. If a run/set exisits in several files, the quality parameters of these are merged as well.
Several runs from qcml files can be comprised in a set.
- @p setname If the runs of the given input files are to be comprised in a set, this will be the name of the set.
Output is in qcML format (see parameter @p out) which can be viewed directly in a modern browser (chromium, firefox, safari).
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_QCMerger.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_QCMerger.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPQCMerger :
public TOPPBase
{
public:
TOPPQCMerger() :
TOPPBase("QCMerger", "Merges two qcml files together.",
true, {{ "Walzer M, Pernas LE, Nasso S, Bittremieux W, Nahnsen S, Kelchtermans P, Martens, L", "qcML: An Exchange Format for Quality Control Metrics from Mass Spectrometry Experiments", "Molecular & Cellular Proteomics 2014; 13(8)" , "10.1074/mcp.M113.035907"}})
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFileList_("in", "<files>", StringList(), "List of qcml files to be merged.");
setValidFormats_("in", ListUtils::create<String>("qcML"));
registerOutputFile_("out", "<file>", "", "Output extended/reduced qcML file");
setValidFormats_("out",ListUtils::create<String>("qcML"));
registerStringOption_("setname", "<string>", "", "Use only when all given qcml files belong to one set, which will be held under the given name.", false);
}
void addBoxPlotQPs(std::map<String,String> nums, std::map<String,String> nams, String set, QcMLFile& qcmlfile)
{
for (std::map<String, String >::const_iterator it = nums.begin(); it != nums.end(); ++it)
{
QcMLFile::QualityParameter qp;
qp.name = nams[it->first]; ///< Name
qp.id = set + it->first; ///< Identifier
qp.cvRef = "QC"; ///< cv reference
qp.cvAcc = it->first;
qp.value = it->second;
qcmlfile.addSetQualityParameter(set, qp);
}
}
ExitCodes main_(int, const char **) override
{
//-------------------------------------------------------------
// parsing parameters
//-------------------------------------------------------------
StringList in_files = getStringList_("in");
String out = getStringOption_("out");
String setname = getStringOption_("setname");
//-------------------------------------------------------------
// reading input
//-------------------------------------------------------------
QcMLFile qcmlfile;
if (!setname.empty())
{
qcmlfile.registerSet(setname,setname,std::set< String >());
}
for (Size i = 0; i < in_files.size(); ++i)
{
QcMLFile tmpfile;
tmpfile.load(in_files[i]);
qcmlfile.merge(tmpfile,setname);
}
if (!setname.empty())
{
// // make #ms2 set stats
// std::vector<String> ms2nums_strings;
// qcmlfile.collectSetParameter(setname,"QC:0000007", ms2nums_strings);
// std::vector<Int> ms2nums;
// for (std::vector<String>::iterator it = ms2nums_strings.begin(); it != ms2nums_strings.end(); ++it) //transform is too ugly and errorprone
// {
// ms2nums.push_back(it->toInt());
// }
// std::sort(ms2nums.begin(), ms2nums.end());
// if (ms2nums.size()>0)
// {
// std::map<String,String> nums;
// std::map<String,String> nams;
// //~ min,q1,q2,q3,max
// nums["QC:0000043"] = String(ms2nums.front());
// nams["QC:0000043"] = "min ms2 number";
// nums["QC:0000044"] = String(OpenMS::Math::quantile1st(ms2nums.begin(), ms2nums.end(),true));
// nams["QC:0000044"] = "Q1 ms2 number";
// nums["QC:0000045"] = String(OpenMS::Math::median(ms2nums.begin(), ms2nums.end(), true));
// nams["QC:0000045"] = "Q2 ms2 number";
// nums["QC:0000046"] = String(OpenMS::Math::quantile3rd(ms2nums.begin(), ms2nums.end(),true));
// nams["QC:0000046"] = "Q3 ms2 number";
// nums["QC:0000047"] = String(ms2nums.back());
// nams["QC:0000047"] = "max ms2 number";
// addBoxPlotQPs(nums, nams, setname, qcmlfile);
// }
// // make #id-psm set stats
// std::vector<String> idnums_strings;
// qcmlfile.collectSetParameter(setname,"QC:0000029", idnums_strings);
// std::vector<Int> idnums;
// for (std::vector<String>::iterator it = idnums_strings.begin(); it != idnums_strings.end(); ++it) //transform is too ugly and errorprone
// {
// idnums.push_back(it->toInt());
// }
// std::sort(idnums.begin(), idnums.end());
// if (idnums.size()>0)
// {
// std::map<String,String> nums;
// std::map<String,String> nams;
// //~ min,q1,q2,q3,max
// nums["QC:0000053"] = String(idnums.front());
// nams["QC:0000053"] = "min id numbers";
// nums["QC:0000054"] = String(OpenMS::Math::quantile1st(idnums.begin(), idnums.end()));
// nams["QC:0000054"] = "Q1 id numbers";
// nums["QC:0000055"] = String(OpenMS::Math::median(idnums.begin(), idnums.end()));
// nams["QC:0000055"] = "Q2 id numbers";
// nums["QC:0000056"] = String(OpenMS::Math::quantile3rd(idnums.begin(), idnums.end()));
// nams["QC:0000056"] = "Q3 id numbers";
// nums["QC:0000057"] = String(idnums.back());
// nams["QC:0000057"] = "max id number";
// addBoxPlotQPs(nums, nams, setname, qcmlfile);
// }
}
qcmlfile.store(out);
return EXECUTION_OK;
}
};
int main(int argc, const char ** argv)
{
TOPPQCMerger tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/ImageCreator.cpp | .cpp | 15,071 | 416 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Clemens Groepl, Timo Sachsenberg$
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/PROCESSING/RESAMPLING/LinearResampler.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/KERNEL/FeatureMap.h>
#include <OpenMS/KERNEL/RangeUtils.h>
#include <OpenMS/ML/INTERPOLATION/BilinearInterpolation.h>
#include <OpenMS/VISUAL/MultiGradient.h>
#include <QtGui/QImage>
#include <QtGui/QPainter>
using namespace OpenMS;
using namespace OpenMS::Math;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_ImageCreator ImageCreator
@brief Transforms an LC-MS map into a png image.
The input is first resampled into a matrix using bilinear forward resampling.
Then the content of the matrix is written to an image file.
The output has a uniform spacing in both dimensions regardless of the input.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_ImageCreator.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_ImageCreator.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPImageCreator :
public TOPPBase
{
public:
TOPPImageCreator() :
TOPPBase("ImageCreator",
"Transforms an LC-MS map into an image.", false),
out_formats_({"png", "jpg", "bmp", "tiff", "ppm"}) // all in lower case!
{
}
protected:
const StringList out_formats_; ///< valid output formats for image
void addPoint_(int x, int y, QImage& image, QColor color = Qt::black,
Size size = 2)
{
int h = image.height(), w = image.width();
vector<int> xs(1, x), ys(1, y);
if (size == 2)
{
int xtemp[] = {x - 1, x, x, x + 1};
int ytemp[] = {y, y - 1, y + 1, y};
xs = vector<int>(xtemp, xtemp + 4);
ys = vector<int>(ytemp, ytemp + 4);
}
else if (size == 3)
{
int xtemp[] = {x - 2, x - 1, x - 1, x, x, x + 1, x + 1, x + 2};
int ytemp[] = {y, y + 1, y - 1, y + 2, y - 2, y + 1, y - 1, y};
xs = vector<int>(xtemp, xtemp + 8);
ys = vector<int>(ytemp, ytemp + 8);
}
for (Size i = 0; i < xs.size(); ++i)
{
int xi = xs[i], yi = ys[i];
if ((xi > 0) && (xi < w) && (yi > 0) && (yi < h))
{
image.setPixel(xi, yi, color.rgb());
}
}
}
void addFeatureBox_(int lower_mz, int lower_rt, int upper_mz, int upper_rt, QImage& image, QColor color = Qt::black)
{
QPainter * painter = new QPainter(&image);
painter->setPen(color);
painter->drawRect(QRect(lower_rt, lower_mz, upper_rt - lower_rt, upper_mz - lower_mz));
delete painter;
}
void markMS2Locations_(PeakMap& exp, QImage& image, bool transpose,
QColor color, Size size)
{
double xcoef = image.width(), ycoef = image.height();
if (transpose)
{
xcoef /= exp.getMaxRT() - exp.getMinRT();
ycoef /= exp.getMaxMZ() - exp.getMinMZ();
}
else
{
xcoef /= exp.getMaxMZ() - exp.getMinMZ();
ycoef /= exp.getMaxRT() - exp.getMinRT();
}
for (PeakMap::Iterator spec_iter = exp.begin();
spec_iter != exp.end(); ++spec_iter)
{
if (spec_iter->getMSLevel() == 2)
{
double mz = spec_iter->getPrecursors()[0].getMZ();
double rt = exp.getPrecursorSpectrum(spec_iter)->getRT();
int x, y;
if (transpose)
{
x = int(xcoef * (rt - exp.getMinRT()));
y = int(ycoef * (exp.getMaxMZ() - mz));
}
else
{
x = int(xcoef * (mz - exp.getMinMZ()));
y = int(ycoef * (exp.getMaxRT() - rt));
}
addPoint_(x, y, image, color, size); //mark MS2
}
}
}
void markFeatureLocations_(FeatureMap& feature_map, PeakMap& exp, QImage& image, bool transpose, QColor color)
{
double xcoef = image.width(), ycoef = image.height();
if (transpose)
{
xcoef /= exp.getMaxRT() - exp.getMinRT();
ycoef /= exp.getMaxMZ() - exp.getMinMZ();
}
else
{
xcoef /= exp.getMaxMZ() - exp.getMinMZ();
ycoef /= exp.getMaxRT() - exp.getMinRT();
}
for (FeatureMap::Iterator feat_iter = feature_map.begin();
feat_iter != feature_map.end(); ++feat_iter)
{
const ConvexHull2D convex_hull = feat_iter->getConvexHull();
DBoundingBox<2> box = convex_hull.getBoundingBox();
double rt = feat_iter->getRT();
double mz = feat_iter->getMZ();
double lower_mz = box.minY();
double lower_rt = box.minX();
double upper_mz = box.maxY();
double upper_rt = box.maxX();
int lx, ly, ux, uy, cx, cy;
if (transpose)
{
lx = int(xcoef * (lower_rt - exp.getMinRT()));
ly = int(ycoef * (exp.getMaxMZ() - lower_mz));
ux = int(xcoef * (upper_rt - exp.getMinRT()));
uy = int(ycoef * (exp.getMaxMZ() - upper_mz));
cx = int(xcoef * (rt - exp.getMinRT()));
cy = int(ycoef * (mz - lower_mz));
}
else
{
lx = int(xcoef * (lower_mz - exp.getMinMZ()));
ly = int(ycoef * (exp.getMaxRT() - lower_rt));
ux = int(xcoef * (upper_mz - exp.getMinMZ()));
uy = int(ycoef * (exp.getMaxRT() - upper_rt));
cx = int(xcoef * (mz - exp.getMinMZ()));
cy = int(ycoef * (exp.getMaxRT() - rt));
}
addFeatureBox_(ly, lx, uy, ux, image, color);
addPoint_(cx, cy, image, Qt::black); // mark center
}
}
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "input file ");
setValidFormats_("in", {"mzML"});
registerInputFile_("in_featureXML", "<file>", "", "input file ", false);
setValidFormats_("in_featureXML", {"featureXML"});
registerOutputFile_("out", "<file>", "", "output file");
setValidFormats_("out", out_formats_, false);
registerStringOption_("out_type", "<file type>", "", "The image format. Set this if you want to force a format not reflected by the 'out' filename.", false);
setValidStrings_("out_type", out_formats_);
registerStringOption_("rt", "[min]:[max]", ":", "Retention time range to extract [s]", false);
registerStringOption_("mz", "[min]:[max]", ":", "Mass-to-charge range to extract", false);
registerIntOption_("width", "<number>", 1024, "Number of pixels in m/z dimension.\nIf 0, one pixel per Th.", false);
setMinInt_("width", 0);
registerIntOption_("height", "<number>", 1024, "Number of pixels in RT dimension.\nIf 0, one pixel per spectrum.", false);
setMinInt_("height", 0);
registerStringOption_("background_color", "<color>", "#FFFFFF", "Background color e.g.: \"#FF0000\" to choose red as background color", false);
registerStringOption_("feature_color", "<color>", "#000000", "Feature color e.g.: \"#00FF00\" to choose green as feature color", false);
registerStringOption_("gradient", "<gradient>", "", "Intensity gradient that defines colors for the range between 0 and 100.\n"
"Example: '0,#FFFFFF;50,#FF0000;100,#000000'", false);
registerDoubleOption_("max_intensity", "<int>", 0, "Maximum peak intensity used to determine range for colors.\n"
"If 0, this is determined from the data.", false);
registerFlag_("log_intensity", "Apply logarithm to intensity values");
registerFlag_("transpose", "Flag to transpose the resampled matrix (RT vs. m/z).\n"
"Per default, dimensions run bottom-up in RT and left-right in m/z.");
registerFlag_("precursors", "Mark locations of MS2 precursors.\n");
registerStringOption_("precursor_color", "<color>", "#000000", "Color for precursor marks (color code or word, e.g. 'black') (requires 'precursors' flag to be active)", false);
registerIntOption_("precursor_size", "<number>", 2,
"Size of the precursor marks (requires 'precursors' flag to be active)", false);
setMinInt_("precursor_size", 1);
setMaxInt_("precursor_size", 3);
}
ExitCodes main_(int, const char**) override
{
//----------------------------------------------------------------
// load data
//----------------------------------------------------------------
String in = getStringOption_("in");
String in_featureXML = getStringOption_("in_featureXML");
String out = getStringOption_("out");
String format = getStringOption_("out_type");
if (format.trim().empty()) // get from filename
{
try
{
format = out.suffix('.');
}
catch (Exception::ElementNotFound& /*e*/)
{
format = "nosuffix";
}
if (!ListUtils::contains(out_formats_, format.toLower()))
{
OPENMS_LOG_ERROR << "No explicit image output format was provided via 'out_type', and the suffix ('" << format << "') does not resemble a valid type. Please fix one of them." << std::endl;
return ILLEGAL_PARAMETERS;
}
}
const double init = numeric_limits<double>::max();
double rt_min = -init, rt_max = init, mz_min = -init, mz_max = init;
bool filter_rt = parseRange_(getStringOption_("rt"), rt_min, rt_max);
if (rt_min > rt_max) swap(rt_min, rt_max);
bool filter_mz = parseRange_(getStringOption_("mz"), mz_min, mz_max);
if (mz_min > mz_max) swap(mz_min, mz_max);
bool show_precursors = getFlag_("precursors");
PeakMap exp;
FileHandler f;
if (filter_rt) f.getOptions().setRTRange(DRange<1>(rt_min, rt_max));
if (filter_mz) f.getOptions().setMZRange(DRange<1>(mz_min, mz_max));
if (!show_precursors) f.getOptions().setMSLevels({1});
f.loadExperiment(in, exp, {FileTypes::MZML}, log_type_);
if (filter_mz && show_precursors)
{
// MS2 spectra were not filtered by precursor m/z, remove them now:
auto predicate =
InPrecursorMZRange<MSSpectrum>(mz_min, mz_max, true);
exp.getSpectra().erase(remove_if(exp.begin(), exp.end(), predicate),
exp.end());
}
exp.updateRanges();
Size rows = getIntOption_("height"), cols = getIntOption_("width");
if (rows == 0) rows = exp.size();
if (cols == 0) cols = UInt(ceil(
exp.spectrumRanges().byMSLevel(1).getMaxMZ() - exp.spectrumRanges().byMSLevel(1).getMinMZ()));
//----------------------------------------------------------------
//Do the actual resampling
BilinearInterpolation<double, double> bilip;
bilip.getData().resize(rows, cols);
bilip.getData().fill(0.0);
if (!getFlag_("transpose"))
{
// scans run bottom-up:
bilip.setMapping_0(0, exp.spectrumRanges().byMSLevel(1).getMaxRT(), rows - 1, exp.spectrumRanges().byMSLevel(1).getMinRT());
// peaks run left-right:
bilip.setMapping_1(0, exp.spectrumRanges().byMSLevel(1).getMinMZ(), cols - 1, exp.spectrumRanges().byMSLevel(1).getMaxMZ());
for (PeakMap::Iterator spec_iter = exp.begin();
spec_iter != exp.end(); ++spec_iter)
{
if (spec_iter->getMSLevel() != 1) continue;
for (PeakMap::SpectrumType::ConstIterator peak1_iter =
spec_iter->begin(); peak1_iter != spec_iter->end();
++peak1_iter)
{
bilip.addValue(spec_iter->getRT(), peak1_iter->getMZ(),
peak1_iter->getIntensity());
}
}
}
else // transpose
{
// spectra run bottom-up:
bilip.setMapping_0(0, exp.getMaxMZ(), rows - 1, exp.getMinMZ());
// scans run left-right:
bilip.setMapping_1(0, exp.getMinRT(), cols - 1, exp.getMaxRT());
for (PeakMap::Iterator spec_iter = exp.begin();
spec_iter != exp.end(); ++spec_iter)
{
if (spec_iter->getMSLevel() != 1) continue;
for (PeakMap::SpectrumType::ConstIterator peak1_iter =
spec_iter->begin(); peak1_iter != spec_iter->end();
++peak1_iter)
{
bilip.addValue(peak1_iter->getMZ(), spec_iter->getRT(),
peak1_iter->getIntensity());
}
}
}
//----------------------------------------------------------------
//create and store image
int scans = (int) bilip.getData().rows();
int peaks = (int) bilip.getData().cols();
bool use_log = getFlag_("log_intensity");
MultiGradient gradient;
String gradient_str = getStringOption_("gradient");
if (!gradient_str.empty())
{
gradient.fromString(String("Linear|") + gradient_str);
}
else if (use_log)
{
gradient = MultiGradient::getDefaultGradientLogarithmicIntensityMode();
}
else
{
gradient = MultiGradient::getDefaultGradientLinearIntensityMode();
}
QImage image(peaks, scans, QImage::Format_RGB32);
string s = getStringOption_("background_color");
QColor background_color(s.c_str());
string feature_color_string = getStringOption_("feature_color");
QColor feature_color(feature_color_string.c_str());
QPainter* painter = new QPainter(&image);
painter->setPen(background_color);
painter->fillRect(0, 0, peaks, scans, Qt::SolidPattern);
delete painter;
double factor = getDoubleOption_("max_intensity");
if (factor == 0)
{
factor = bilip.getData().maxValue();
}
// with a user-supplied gradient, we need to logarithmize explicitly;
// by default, the gradient itself is adjusted to the log-scale:
use_log &= !gradient_str.empty();
if (use_log) factor = std::log(factor);
factor /= 100.0;
for (int i = 0; i < scans; ++i)
{
for (int j = 0; j < peaks; ++j)
{
double value = bilip.getData()(i, j);
if (use_log) value = std::log(value);
if (value > 1e-4)
{
image.setPixel(j, i, gradient.interpolatedColorAt(value / factor).rgb());
}
else
{
image.setPixel(j, i, background_color.rgb());
}
}
}
if (show_precursors)
{
markMS2Locations_(exp, image, getFlag_("transpose"),
getStringOption_("precursor_color").toQString(),
Size(getIntOption_("precursor_size")));
}
if (!in_featureXML.empty())
{
FeatureMap feature_map;
FileHandler().loadFeatures(in_featureXML, feature_map, {FileTypes::FEATUREXML});
markFeatureLocations_(feature_map, exp, image, getFlag_("transpose"), feature_color);
}
if (image.save(out.toQString(), format.c_str())) return EXECUTION_OK;
else return CANNOT_WRITE_OUTPUT_FILE;
}
};
int main(int argc, const char** argv)
{
TOPPImageCreator tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/MzMLSplitter.cpp | .cpp | 6,576 | 189 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/SYSTEM/File.h>
#include <QFile>
#include <iomanip>
#include <sstream>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
// Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_MzMLSplitter MzMLSplitter
@brief Splits an mzML file into multiple parts
This utility will split an input mzML file into @e N parts, with an approximately equal number of spectra and chromatograms in each part.
@e N is set by the parameter @p parts; optionally only spectra (parameter @p no_chrom) or only chromatograms (parameter @p no_spec) can be transferred to the output.
Alternatively to setting the number of parts directly, a target maximum file size for the parts can be specified (parameters @p size and @p unit).
The number of parts is then calculated by dividing the original file size by the target and rounding up.
Note that the resulting parts may actually be bigger than the target size (due to meta data that is included in every part) or
that more parts than necessary may be produced (if spectra or chromatograms are removed via @p no_spec/@p no_chrom).
This tool cannot be used as part of a TOPPAS workflow, because the number of output files is variable.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_MzMLSplitter.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_MzMLSplitter.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPMzMLSplitter : public TOPPBase
{
public:
TOPPMzMLSplitter() : TOPPBase("MzMLSplitter", "Splits an mzML file into multiple parts")
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "Input file");
setValidFormats_("in", ListUtils::create<String>("mzML"));
registerOutputPrefix_("out", "<prefix>", "", "Prefix for output files ('_part1of2.mzML' etc. will be appended; default: same as 'in' without the file extension)", false);
registerIntOption_("parts", "<num>", 1, "Number of parts to split into (takes precedence over 'size' if set)", false);
setMinInt_("parts", 1);
registerIntOption_("size", "<num>", 0, "Approximate upper limit for resulting file sizes (in 'unit')", false);
setMinInt_("size", 0);
registerStringOption_("unit", "<choice>", "MB", "Unit for 'size' (base 1024)", false);
setValidStrings_("unit", ListUtils::create<String>("KB,MB,GB"));
// @TODO:
// registerFlag_("precursor", "Make sure precursor spectra end up in the same part as their fragment spectra");
registerFlag_("no_chrom", "Remove chromatograms, keep only spectra.");
registerFlag_("no_spec", "Remove spectra, keep only chromatograms.");
}
ExitCodes main_(int, const char**) override
{
String in = getStringOption_("in"), out = getStringOption_("out");
if (out.empty())
{
out = FileHandler::stripExtension(in);
}
bool no_chrom = getFlag_("no_chrom"), no_spec = getFlag_("no_spec");
if (no_chrom && no_spec)
{
writeLogError_("Error: 'no_chrom' and 'no_spec' cannot be used together");
return ILLEGAL_PARAMETERS;
}
Size parts = getIntOption_("parts"), size = getIntOption_("size");
if (parts == 1)
{
if (size == 0)
{
writeLogError_("Error: Higher value for parameter 'parts' or 'size' required");
return ILLEGAL_PARAMETERS;
}
QFile mzml_file(in.toQString());
// use float here to avoid too many decimals in output below:
float total_size = mzml_file.size();
String unit = getStringOption_("unit");
if (unit == "KB")
total_size /= 1024;
else if (unit == "MB")
total_size /= (1024 * 1024);
else
total_size /= (1024 * 1024 * 1024); // "GB"
writeLogInfo_("File size: " + String(total_size) + " " + unit);
parts = ceil(total_size / size);
}
writeLogInfo_("Splitting file into " + String(parts) + " parts...");
PeakMap experiment;
FileHandler().loadExperiment(in, experiment, {FileTypes::MZML}, log_type_);
vector<MSSpectrum> spectra;
vector<MSChromatogram> chromatograms;
if (no_spec)
{
experiment.getSpectra().clear();
}
else
{
experiment.getSpectra().swap(spectra);
}
if (no_chrom)
{
experiment.getChromatograms().clear();
}
else
{
experiment.getChromatograms().swap(chromatograms);
}
writeLogInfo_("Total spectra: " + String(spectra.size()));
writeLogInfo_("Total chromatograms: " + String(chromatograms.size()));
Size spec_start = 0, chrom_start = 0;
Size width = String(parts).size();
for (Size counter = 1; counter <= parts; ++counter)
{
ostringstream out_name;
out_name << out << "_part" << setw(width) << setfill('0') << counter << "of" << parts << ".mzML";
PeakMap part = experiment;
addDataProcessing_(part, getProcessingInfo_(DataProcessing::FILTERING));
Size remaining = parts - counter + 1;
Size n_spec = ceil((spectra.size() - spec_start) / double(remaining));
if (n_spec > 0)
{
part.reserveSpaceSpectra(n_spec);
for (Size i = spec_start; i < spec_start + n_spec; ++i)
{
part.addSpectrum(std::move(spectra[i]));
}
}
spec_start += n_spec;
Size n_chrom = ceil((chromatograms.size() - chrom_start) / double(remaining));
if (n_chrom > 0)
{
part.reserveSpaceChromatograms(n_chrom);
for (Size i = chrom_start; i < chrom_start + n_chrom; ++i)
{
part.addChromatogram(std::move(chromatograms[i]));
}
}
chrom_start += n_chrom;
writeLogInfo_("Part " + String(counter) + ": " + String(n_spec) + " spectra, " + String(n_chrom) + " chromatograms");
FileHandler().storeExperiment(out_name.str(), part, {FileTypes::MZML}, log_type_);
}
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPMzMLSplitter tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/StaticModification.cpp | .cpp | 8,150 | 216 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/CHEMISTRY/ModificationsDB.h>
#include <OpenMS/CHEMISTRY/ResidueDB.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <vector>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_StaticModification StaticModification
@brief Applies a set of modifications to all PeptideIDs in an idXML file.
Given peptide sequences from an idXML file, this TOPP tool applies a set of static (i.e. unconditional)
modifications to all AA's of a peptide sequences which have a matching origin (i.e. amino acid), and to the C/N-term.
The modifications supported are the usual ones from UniMod.
The user can provide modification(s) explicitly, e.g. <em>Carbamidomethyl (C)</em>, or use predefined sets.
Predefined sets:
- N15 (a.k.a. 15N) -- assumes all AAs contain heavy nitrogen (20 modifications in total)
Explicit modifications and predefined sets can be combined.
Modifications already present on an AA/Terminus of the input will not be applied again.
If more than one modification is to be applied to an AA/Terminus, annotation using single name is not sufficient anymore and the summed delta-mass
has to be used. Modifications are not applied to AAs which already contain an unspecified delta-mass in the input.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_StaticModification.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_StaticModification.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class StaticModification :
public TOPPBase
{
public:
StaticModification() :
TOPPBase("StaticModification", "Applies a set of modifications to all PeptideIDs in an idXML file.")
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "Input: identification results");
setValidFormats_("in", { "idXML" });
registerOutputFile_("out", "<file>", "", "Output: identification results with modifications applied");
setValidFormats_("out", { "idXML" });
registerStringList_("mods","<list>", StringList(), "List of manual modifications, specified using Unimod (www.unimod.org) terms, e.g. 'Carbamidomethyl (C)' or 'Oxidation (M)'.", false);
vector<String> all_mods;
ModificationsDB::getInstance()->getAllSearchModifications(all_mods);
setValidStrings_("mods", all_mods);
registerStringOption_("presets", "<name>", "none", "Add predefined sets, as shortcut to manually specifying a lot of modifications.", false);
setValidStrings_("presets", { "none", "N15" });
}
/// insert a mod into a container C and report it to commandline if its new
void insertMod(const ResidueModification* p_mod, std::set<const ResidueModification*>& sink)
{
std::pair<std::set<const ResidueModification*>::iterator, bool> ret = sink.insert(p_mod);
if (ret.second == true)
{ // entry is new
OPENMS_LOG_INFO << " " << p_mod->getFullId() << "\n";
}
};
void insertMod(const ResidueModification* p_mod, const char origin, std::map<char, std::set<const ResidueModification*>>& sink)
{
insertMod(p_mod, sink[origin]);
};
ExitCodes main_(int, const char**) override
{
String in = getStringOption_("in");
String out = getStringOption_("out");
StringList s_mods = getStringList_("mods");
String sets = getStringOption_("presets");
StringList s_mods_predef;
if (sets == "N15")
{
s_mods_predef = {"Label:15N(1) (A)", "Label:15N(1) (C)", "Label:15N(1) (D)", "Label:15N(1) (E)",
"Label:15N(1) (F)", "Label:15N(1) (G)", "Label:15N(1) (I)", "Label:15N(1) (L)",
"Label:15N(1) (M)", "Label:15N(1) (P)", "Label:15N(1) (S)", "Label:15N(1) (T)",
"Label:15N(1) (V)", "Label:15N(1) (Y)", "Label:15N(2) (K)", "Label:15N(2) (N)",
"Label:15N(2) (Q)", "Label:15N(2) (W)", "Label:15N(3) (H)", "Label:15N(4) (R)"};
}
// merge both string sets
s_mods.insert(s_mods.end(), s_mods_predef.begin(), s_mods_predef.end());
// convert to ResidueModifications:
std::map<char, std::set<const ResidueModification*>> mods_anywhere;
std::set<const ResidueModification*> mods_nterm;
std::set<const ResidueModification*> mods_cterm;
ModificationsDB* mod_DB = ModificationsDB::getInstance();
ResidueDB* res_DB = ResidueDB::getInstance();
if (s_mods.empty())
{
OPENMS_LOG_ERROR << "Error: no modifications given. The tool would not change the output."
<< " This is probably not what you wanted. Use the '-force' flag if you really really want no change in the output." << std::endl;
if (!getFlag_("force")) return ExitCodes::ILLEGAL_PARAMETERS;
OPENMS_LOG_ERROR << "Ok, you used the force. Computing ... nothing..." << std::endl;
}
OPENMS_LOG_INFO << "Using the following modifications to annotate PepHits:\n";
for (const auto& s_mod : s_mods)
{
auto p_mod = mod_DB->getModification(s_mod, "");
switch (p_mod->getTermSpecificity())
{
case ResidueModification::TermSpecificity::C_TERM:
case ResidueModification::TermSpecificity::PROTEIN_C_TERM:
insertMod(p_mod, mods_cterm);
break;
case ResidueModification::TermSpecificity::N_TERM:
case ResidueModification::TermSpecificity::PROTEIN_N_TERM:
insertMod(p_mod, mods_nterm);
break;
case ResidueModification::TermSpecificity::ANYWHERE:
insertMod(p_mod, p_mod->getOrigin(), mods_anywhere);
break;
default:
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Modification has invalid term specificity.",
String(ResidueModification::TermSpecificity::NUMBER_OF_TERM_SPECIFICITY));
}
}
OPENMS_LOG_INFO << "\n";
// load data
std::vector<ProteinIdentification> prot_ids;
PeptideIdentificationList pep_ids;
FileHandler().loadIdentifications(in, prot_ids, pep_ids, {FileTypes::IDXML});
// apply mod to all PeptideHits
for (auto& id : pep_ids)
{
for (auto& hit : id.getHits())
{
AASequence seq = hit.getSequence();
if (seq.empty()) continue; // avoid invalid access
// N-Term mods:
if (!mods_nterm.empty())
{
seq.setNTerminalModification(ResidueModification::combineMods(seq.getNTerminalModification(), mods_nterm, false));
}
// C-Term mods:
if (!mods_nterm.empty())
{
seq.setCTerminalModification(ResidueModification::combineMods(seq.getCTerminalModification(), mods_nterm, false));
}
// AA-mods
for (Size i = 0; i < seq.size(); ++i)
{
const char code = seq[i].getOneLetterCode()[0];
// get all mods for this origin
const auto mods_set = mods_anywhere[code];
if (mods_set.empty()) continue; // nothing to apply
auto mod_new = ResidueModification::combineMods(seq[i].getModification(), mods_set, false, &seq[i]);
auto res_new = res_DB->getModifiedResidue(mod_new->getFullId());
seq.setModification(i, res_new);
} // end AA
// write back result
hit.setSequence(seq);
}
}
FileHandler().storeIdentifications(out, prot_ids, pep_ids, {FileTypes::IDXML});
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
StaticModification tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/FileInfo.cpp | .cpp | 88,950 | 2,312 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Lars Nilse $
// $Authors: Marc Sturm, Clemens Groepl, Lars Nilse, Chris Bielow $
// --------------------------------------------------------------------------
#include <boost/iostreams/device/null.hpp>
#include <boost/iostreams/filtering_stream.hpp>
#include <OpenMS/config.h>
#include <OpenMS/ANALYSIS/OPENSWATH/TransitionPQPFile.h>
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/DATASTRUCTURES/ListUtilsIO.h> // for operator<< on StringList
#include <OpenMS/DATASTRUCTURES/StringListUtils.h>
#include <OpenMS/FORMAT/ConsensusXMLFile.h>
#include <OpenMS/FORMAT/FASTAFile.h>
#include <OpenMS/FORMAT/FeatureXMLFile.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/FORMAT/FileTypes.h>
#include <OpenMS/FORMAT/HANDLERS/IndexedMzMLHandler.h>
#include <OpenMS/FORMAT/IdXMLFile.h>
#include <OpenMS/FORMAT/MzDataFile.h>
#include <OpenMS/FORMAT/MzIdentMLFile.h>
#include <OpenMS/FORMAT/MzMLFile.h>
#include <OpenMS/FORMAT/MzTabFile.h>
#include <OpenMS/FORMAT/MzXMLFile.h>
#include <OpenMS/FORMAT/PeakTypeEstimator.h>
#include <OpenMS/FORMAT/PepXMLFile.h>
#include <OpenMS/FORMAT/TransformationXMLFile.h>
#include <OpenMS/IONMOBILITY/FAIMSHelper.h>
#include <OpenMS/KERNEL/Feature.h>
#include <OpenMS/KERNEL/FeatureMap.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/MATH/MathFunctions.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <OpenMS/SYSTEM/SysInfo.h>
#include <unordered_map>
#include <iomanip>
#include <map>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
// Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_FileInfo FileInfo
@brief Shows basic information about the data in an %OpenMS readable file.
<CENTER>
<table>
<tr>
<th ALIGN = "center"> pot. predecessor tools </td>
<td VALIGN="middle" ROWSPAN=2> → FileInfo →</td>
<th ALIGN = "center"> pot. successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> any tool operating on MS peak data @n (in mzML format) </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> none ; console or text file</td>
</tr>
</table>
</CENTER>
This tool can show basic information about the data in different file types, such as raw peak, featureXML and consensusXML files. It can
- show information about the data range of a file (m/z, RT, ion mobility, intensity)
- show a statistical summary for intensities, qualities, feature widths, precursor charges, activation methods
- show an overview of the metadata
- validate several XML formats against their XML schema
- check for corrupt data in a file (e.g., duplicate spectra)
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_FileInfo.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_FileInfo.html
In order to enrich the resulting data of your analysis pipeline or to quickly compare different outcomes of your pipeline you can invoke the aforementioned information of your input data and (intermediary) results.
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
namespace OpenMS
{
/// iterate over all items in CONTAINER and
/// use LAMBDA function to extract the charge from each item
template <class CONTAINER, typename LAMBDA>
void printChargeDistribution(const CONTAINER& data, LAMBDA lam, ostream& os, ostream& os_tsv, const String& header = "Charge")
{
std::map<Int, UInt> charges;
Int q;
for (const auto& item : data)
{
if (lam(item, q)) ++charges[q];
}
os << header << " distribution:"
<< '\n';
for (const auto& ch : charges)
{
os << " charge " << ch.first << ": " << ch.second << "x\n";
os_tsv << "general: charge distribution: charge: "
<< ch.first << '\t'
<< ch.second << '\n';
}
os << '\n';
};
//helper struct for identification data
struct IdData
{
String identifier;
vector<ProteinIdentification> proteins;
PeptideIdentificationList peptides;
};
/// Write SomeStatistics to a stream.
template <class T>
static ostream &operator<<(ostream &os, const Math::SummaryStatistics<T> &rhs)
{
return os << " num. of values: " << rhs.count << '\n'
<< " mean: " << rhs.mean << '\n'
<< " minimum: " << rhs.min << '\n'
<< " lower quartile: " << rhs.lowerq << '\n'
<< " median: " << rhs.median << '\n'
<< " upper quartile: " << rhs.upperq << '\n'
<< " maximum: " << rhs.max << '\n'
<< " variance: " << rhs.variance << '\n';
}
} // namespace
class TOPPFileInfo : public TOPPBase
{
public:
TOPPFileInfo() : TOPPBase("FileInfo", "Shows basic information about the file, such as data ranges and file type.")
{
}
protected:
void registerOptionsAndFlags_() override
{
StringList in_types = { "mzData", "mzXML", "mzML", "sqMass", "dta", "dta2d", "mgf", "featureXML", "consensusXML", "idXML", "pepXML", "mzTab", "fid", "mzid", "trafoXML", "fasta", "pqp" };
registerInputFile_("in", "<file>", "", "input file");
setValidFormats_("in", in_types);
registerStringOption_("in_type", "<type>", "", "input file type -- default: determined from file extension or content", false);
setValidStrings_("in_type", in_types);
registerOutputFile_("out", "<file>", "", "Optional output file. If left out, the output is written to the command line.", false);
setValidFormats_("out", {"txt"});
registerOutputFile_("out_tsv", "<file>", "", "Second optional output file. Tab separated flat text file.", false, true);
setValidFormats_("out_tsv", {"tsv"});
registerFlag_("m", "Show meta information about the whole experiment");
registerFlag_("p", "Shows data processing information");
registerFlag_("s", "Computes a five-number statistics of intensities, qualities, and widths");
registerFlag_("d", "Show detailed listing of all spectra and chromatograms (peak files only)");
registerFlag_("c", "Check for corrupt data in the file (peak files only)");
registerFlag_("v", "Validate the file only (for mzML, mzData, mzXML, featureXML, idXML, consensusXML, pepXML)");
registerFlag_("i", "Check whether a given mzML file contains valid indices (conforming to the indexedmzML standard)");
}
// Forward declare the specialized version for MSExperiment to avoid compiler errors
// template <>
// void writeRangesHumanReadable_<MSExperiment>(const MSExperiment& map, ostream &os);
template <class Map>
void writeRangesHumanReadable_(const Map& map, ostream &os)
{
if (map.RangeRT::isEmpty())
{
os << "Ranges:'\n' retention time: <none> .. <none> sec (<none> min)\n";
}
else
{
os << "Ranges:" << '\n'
<< " retention time: " << String::number(map.getMinRT(), 2) << " .. " << String::number(map.getMaxRT(), 2) << " sec ("
<< String::number((map.getMaxRT() - map.getMinRT()) / 60, 1) << " min)\n";
}
if (map.RangeMZ::isEmpty())
{
os << " mass-to-charge: <none> .. <none>\n";
}
else
{
os << " mass-to-charge: " << String::number(map.getMinMZ(), 2) << " .. " << String::number(map.getMaxMZ(), 2) << '\n';
}
if constexpr (std::is_base_of < RangeMobility, Map>())
{
if (map.RangeMobility::isEmpty())
{
os << " ion mobility: <none> .. <none>\n";
}
else
{
os << " ion mobility: " << String::number(map.getMinMobility(), 2) << " .. " << String::number(map.getMaxMobility(), 2) << '\n';
}
}
if (map.RangeIntensity::isEmpty())
{
os << " intensity: <none> .. <none>\n\n";
}
else
{
os << " intensity: " << String::number(map.getMinIntensity(), 2) << " .. " << String::number(map.getMaxIntensity(), 2) << "\n\n";
}
}
void writeRangesHumanReadable_(const MSExperiment& exp, ostream &os)
{
// 1. Display Combined Ranges (same format as before for backward compatibility)
os << "Combined Ranges (spectra + chromatograms):" << '\n';
// Use the combinedRanges() accessor
if (exp.combinedRanges().RangeRT::isEmpty())
{
os << " retention time: <none> .. <none> sec (<none> min)\n";
}
else
{
os << " retention time: " << String::number(exp.combinedRanges().getMinRT(), 2) << " .. "
<< String::number(exp.combinedRanges().getMaxRT(), 2) << " sec ("
<< String::number((exp.combinedRanges().getMaxRT() - exp.combinedRanges().getMinRT()) / 60, 1) << " min)\n";
}
// Display m/z range
if (exp.combinedRanges().RangeMZ::isEmpty())
{
os << " mass-to-charge: <none> .. <none>\n";
}
else
{
os << " mass-to-charge: " << String::number(exp.combinedRanges().getMinMZ(), 2) << " .. "
<< String::number(exp.combinedRanges().getMaxMZ(), 2) << '\n';
}
// Display mobility range if present
if (exp.combinedRanges().RangeMobility::isEmpty())
{
os << " ion mobility: <none> .. <none>\n";
}
else
{
os << " ion mobility: " << String::number(exp.combinedRanges().getMinMobility(), 2) << " .. "
<< String::number(exp.combinedRanges().getMaxMobility(), 2) << '\n';
}
// Display intensity range
if (exp.combinedRanges().RangeIntensity::isEmpty())
{
os << " intensity: <none> .. <none>\n\n";
}
else
{
os << " intensity: " << String::number(exp.combinedRanges().getMinIntensity(), 2) << " .. "
<< String::number(exp.combinedRanges().getMaxIntensity(), 2) << "\n\n";
}
// 2. Display Spectrum Ranges (overall)
os << "Spectrum Ranges:" << '\n';
// Use the spectrumRanges() accessor with MS level 0 for overall ranges
const auto& spec_ranges = exp.spectrumRanges();
if (spec_ranges.RangeRT::isEmpty())
{
os << " retention time: <none> .. <none> sec (<none> min)\n";
}
else
{
os << " retention time: " << String::number(spec_ranges.getMinRT(), 2) << " .. "
<< String::number(spec_ranges.getMaxRT(), 2) << " sec ("
<< String::number((spec_ranges.getMaxRT() - spec_ranges.getMinRT()) / 60, 1) << " min)\n";
}
// Display m/z range
if (spec_ranges.RangeMZ::isEmpty())
{
os << " mass-to-charge: <none> .. <none>\n";
}
else
{
os << " mass-to-charge: " << String::number(spec_ranges.getMinMZ(), 2) << " .. "
<< String::number(spec_ranges.getMaxMZ(), 2) << '\n';
}
// Display mobility range if present
if (spec_ranges.RangeMobility::isEmpty())
{
os << " ion mobility: <none> .. <none>\n";
}
else
{
os << " ion mobility: " << String::number(spec_ranges.getMinMobility(), 2) << " .. "
<< String::number(spec_ranges.getMaxMobility(), 2) << '\n';
}
// Display intensity range
if (spec_ranges.RangeIntensity::isEmpty())
{
os << " intensity: <none> .. <none>\n\n";
}
else
{
os << " intensity: " << String::number(spec_ranges.getMinIntensity(), 2) << " .. "
<< String::number(spec_ranges.getMaxIntensity(), 2) << "\n\n";
}
// 3. Display Spectrum Ranges per MS Level
std::set<UInt> ms_levels = exp.spectrumRanges().getMSLevels();
for (UInt ms_level : ms_levels)
{
os << "MS Level " << ms_level << " Ranges:" << '\n';
const auto& level_ranges = exp.spectrumRanges().byMSLevel(ms_level);
// Output RT range for this MS level
if (level_ranges.RangeRT::isEmpty())
{
os << " retention time: <none> .. <none> sec (<none> min)\n";
}
else
{
os << " retention time: " << String::number(level_ranges.getMinRT(), 2) << " .. "
<< String::number(level_ranges.getMaxRT(), 2) << " sec ("
<< String::number((level_ranges.getMaxRT() - level_ranges.getMinRT()) / 60, 1) << " min)\n";
}
// Display m/z range for this MS level
if (level_ranges.RangeMZ::isEmpty())
{
os << " mass-to-charge: <none> .. <none>\n";
}
else
{
os << " mass-to-charge: " << String::number(level_ranges.getMinMZ(), 2) << " .. "
<< String::number(level_ranges.getMaxMZ(), 2) << '\n';
}
// Display mobility range for this MS level if present
if (level_ranges.RangeMobility::isEmpty())
{
os << " ion mobility: <none> .. <none>\n";
}
else
{
os << " ion mobility: " << String::number(level_ranges.getMinMobility(), 2) << " .. "
<< String::number(level_ranges.getMaxMobility(), 2) << '\n';
}
// Display intensity range for this MS level
if (level_ranges.RangeIntensity::isEmpty())
{
os << " intensity: <none> .. <none>\n\n";
}
else
{
os << " intensity: " << String::number(level_ranges.getMinIntensity(), 2) << " .. "
<< String::number(level_ranges.getMaxIntensity(), 2) << "\n\n";
}
}
// 4. Display Chromatogram Ranges
os << "Chromatogram Ranges:" << '\n';
const auto& chrom_ranges = exp.chromatogramRanges();
if (chrom_ranges.RangeRT::isEmpty())
{
os << " retention time: <none> .. <none> sec (<none> min)\n";
}
else
{
os << " retention time: " << String::number(chrom_ranges.getMinRT(), 2) << " .. "
<< String::number(chrom_ranges.getMaxRT(), 2) << " sec ("
<< String::number((chrom_ranges.getMaxRT() - chrom_ranges.getMinRT()) / 60, 1) << " min)\n";
}
// Display m/z range for chromatograms
if (chrom_ranges.RangeMZ::isEmpty())
{
os << " mass-to-charge: <none> .. <none>\n";
}
else
{
os << " mass-to-charge: " << String::number(chrom_ranges.getMinMZ(), 2) << " .. "
<< String::number(chrom_ranges.getMaxMZ(), 2) << '\n';
}
// Display intensity range for chromatograms
if (chrom_ranges.RangeIntensity::isEmpty())
{
os << " intensity: <none> .. <none>\n\n";
}
else
{
os << " intensity: " << String::number(chrom_ranges.getMinIntensity(), 2) << " .. "
<< String::number(chrom_ranges.getMaxIntensity(), 2) << "\n\n";
}
}
template <class Map>
void writeRangesMachineReadable_(const Map& map, ostream &os)
{
if (!map.RangeRT::isEmpty())
{
os << "general: ranges: retention time: min" << '\t' << String::number(map.getMinRT(), 2) << '\n'
<< "general: ranges: retention time: max" << '\t' << String::number(map.getMaxRT(), 2) << '\n';
}
else
{
os << "general: ranges: retention time: min" << '\t' << "<none>" << '\n'
<< "general: ranges: retention time: max" << '\t' << "<none>" << '\n';
}
if (!map.RangeMZ::isEmpty())
{
os << "general: ranges: mass-to-charge: min" << '\t' << String::number(map.getMinMZ(), 2) << '\n'
<< "general: ranges: mass-to-charge: max" << '\t' << String::number(map.getMaxMZ(), 2) << '\n';
}
else
{
os << "general: ranges: mass-to-charge: min" << '\t' << "<none>" << '\n'
<< "general: ranges: mass-to-charge: max" << '\t' << "<none>" << '\n';
}
if constexpr (std::is_base_of < RangeMobility, Map>())
{
if (!map.RangeMobility::isEmpty())
{
os << "general: ranges: ion-mobility: min" << '\t' << String::number(map.getMinMobility(), 2) << '\n'
<< "general: ranges: ion-mobility: max" << '\t' << String::number(map.getMaxMobility(), 2) << '\n';
}
}
if (!map.RangeIntensity::isEmpty())
{
os << "general: ranges: intensity: min"
<< '\t' << String::number(map.getMinIntensity(), 2) << '\n'
<< "general: ranges: intensity: max"
<< '\t' << String::number(map.getMaxIntensity(), 2) << '\n';
}
else
{
os << "general: ranges: intensity: min" << '\t' << "<none>" << '\n'
<< "general: ranges: intensity: max" << '\t' << "<none>" << '\n';
}
}
void writeRangesMachineReadable_(const MSExperiment& exp, ostream &os)
{
// 1. Combined Ranges
if (!exp.combinedRanges().RangeRT::isEmpty())
{
os << "general: combined ranges: retention time: min" << '\t' << String::number(exp.combinedRanges().getMinRT(), 2) << '\n'
<< "general: combined ranges: retention time: max" << '\t' << String::number(exp.combinedRanges().getMaxRT(), 2) << '\n';
}
else
{
os << "general: combined ranges: retention time: min" << '\t' << "<none>" << '\n'
<< "general: combined ranges: retention time: max" << '\t' << "<none>" << '\n';
}
if (!exp.combinedRanges().RangeMZ::isEmpty())
{
os << "general: combined ranges: mass-to-charge: min" << '\t' << String::number(exp.combinedRanges().getMinMZ(), 2) << '\n'
<< "general: combined ranges: mass-to-charge: max" << '\t' << String::number(exp.combinedRanges().getMaxMZ(), 2) << '\n';
}
else
{
os << "general: combined ranges: mass-to-charge: min" << '\t' << "<none>" << '\n'
<< "general: combined ranges: mass-to-charge: max" << '\t' << "<none>" << '\n';
}
if (!exp.combinedRanges().RangeMobility::isEmpty())
{
os << "general: combined ranges: ion-mobility: min" << '\t' << String::number(exp.combinedRanges().getMinMobility(), 2) << '\n'
<< "general: combined ranges: ion-mobility: max" << '\t' << String::number(exp.combinedRanges().getMaxMobility(), 2) << '\n';
}
if (!exp.combinedRanges().RangeIntensity::isEmpty())
{
os << "general: combined ranges: intensity: min" << '\t' << String::number(exp.combinedRanges().getMinIntensity(), 2) << '\n'
<< "general: combined ranges: intensity: max" << '\t' << String::number(exp.combinedRanges().getMaxIntensity(), 2) << '\n';
}
else
{
os << "general: combined ranges: intensity: min" << '\t' << "<none>" << '\n'
<< "general: combined ranges: intensity: max" << '\t' << "<none>" << '\n';
}
// 2. Spectrum Ranges (overall)
const auto& spec_ranges = exp.spectrumRanges();
if (!spec_ranges.RangeRT::isEmpty())
{
os << "general: spectrum ranges: retention time: min" << '\t' << String::number(spec_ranges.getMinRT(), 2) << '\n'
<< "general: spectrum ranges: retention time: max" << '\t' << String::number(spec_ranges.getMaxRT(), 2) << '\n';
}
else
{
os << "general: spectrum ranges: retention time: min" << '\t' << "<none>" << '\n'
<< "general: spectrum ranges: retention time: max" << '\t' << "<none>" << '\n';
}
// Similar code for m/z, mobility, intensity for spectrum ranges
if (!spec_ranges.RangeMZ::isEmpty())
{
os << "general: spectrum ranges: mass-to-charge: min" << '\t' << String::number(spec_ranges.getMinMZ(), 2) << '\n'
<< "general: spectrum ranges: mass-to-charge: max" << '\t' << String::number(spec_ranges.getMaxMZ(), 2) << '\n';
}
else
{
os << "general: spectrum ranges: mass-to-charge: min" << '\t' << "<none>" << '\n'
<< "general: spectrum ranges: mass-to-charge: max" << '\t' << "<none>" << '\n';
}
if (!spec_ranges.RangeMobility::isEmpty())
{
os << "general: spectrum ranges: ion-mobility: min" << '\t' << String::number(spec_ranges.getMinMobility(), 2) << '\n'
<< "general: spectrum ranges: ion-mobility: max" << '\t' << String::number(spec_ranges.getMaxMobility(), 2) << '\n';
}
if (!spec_ranges.RangeIntensity::isEmpty())
{
os << "general: spectrum ranges: intensity: min" << '\t' << String::number(spec_ranges.getMinIntensity(), 2) << '\n'
<< "general: spectrum ranges: intensity: max" << '\t' << String::number(spec_ranges.getMaxIntensity(), 2) << '\n';
}
else
{
os << "general: spectrum ranges: intensity: min" << '\t' << "<none>" << '\n'
<< "general: spectrum ranges: intensity: max" << '\t' << "<none>" << '\n';
}
// 3. MS Level-specific Ranges
std::set<UInt> ms_levels = exp.spectrumRanges().getMSLevels();
for (UInt ms_level : ms_levels)
{
const auto& level_ranges = exp.spectrumRanges().byMSLevel(ms_level);
if (!level_ranges.RangeRT::isEmpty())
{
os << "general: MS" << ms_level << " ranges: retention time: min" << '\t' << String::number(level_ranges.getMinRT(), 2) << '\n'
<< "general: MS" << ms_level << " ranges: retention time: max" << '\t' << String::number(level_ranges.getMaxRT(), 2) << '\n';
}
else
{
os << "general: MS" << ms_level << " ranges: retention time: min" << '\t' << "<none>" << '\n'
<< "general: MS" << ms_level << " ranges: retention time: max" << '\t' << "<none>" << '\n';
}
// Similar code for other dimensions
if (!level_ranges.RangeMZ::isEmpty())
{
os << "general: MS" << ms_level << " ranges: mass-to-charge: min" << '\t' << String::number(level_ranges.getMinMZ(), 2) << '\n'
<< "general: MS" << ms_level << " ranges: mass-to-charge: max" << '\t' << String::number(level_ranges.getMaxMZ(), 2) << '\n';
}
else
{
os << "general: MS" << ms_level << " ranges: mass-to-charge: min" << '\t' << "<none>" << '\n'
<< "general: MS" << ms_level << " ranges: mass-to-charge: max" << '\t' << "<none>" << '\n';
}
if (!level_ranges.RangeMobility::isEmpty())
{
os << "general: MS" << ms_level << " ranges: ion-mobility: min" << '\t' << String::number(level_ranges.getMinMobility(), 2) << '\n'
<< "general: MS" << ms_level << " ranges: ion-mobility: max" << '\t' << String::number(level_ranges.getMaxMobility(), 2) << '\n';
}
if (!level_ranges.RangeIntensity::isEmpty())
{
os << "general: MS" << ms_level << " ranges: intensity: min" << '\t' << String::number(level_ranges.getMinIntensity(), 2) << '\n'
<< "general: MS" << ms_level << " ranges: intensity: max" << '\t' << String::number(level_ranges.getMaxIntensity(), 2) << '\n';
}
else
{
os << "general: MS" << ms_level << " ranges: intensity: min" << '\t' << "<none>" << '\n'
<< "general: MS" << ms_level << " ranges: intensity: max" << '\t' << "<none>" << '\n';
}
}
// 4. Chromatogram Ranges
const auto& chrom_ranges = exp.chromatogramRanges();
if (!chrom_ranges.RangeRT::isEmpty())
{
os << "general: chromatogram ranges: retention time: min" << '\t' << String::number(chrom_ranges.getMinRT(), 2) << '\n'
<< "general: chromatogram ranges: retention time: max" << '\t' << String::number(chrom_ranges.getMaxRT(), 2) << '\n';
}
else
{
os << "general: chromatogram ranges: retention time: min" << '\t' << "<none>" << '\n'
<< "general: chromatogram ranges: retention time: max" << '\t' << "<none>" << '\n';
}
// Similar code for m/z and intensity for chromatogram ranges
if (!chrom_ranges.RangeMZ::isEmpty())
{
os << "general: chromatogram ranges: mass-to-charge: min" << '\t' << String::number(chrom_ranges.getMinMZ(), 2) << '\n'
<< "general: chromatogram ranges: mass-to-charge: max" << '\t' << String::number(chrom_ranges.getMaxMZ(), 2) << '\n';
}
else
{
os << "general: chromatogram ranges: mass-to-charge: min" << '\t' << "<none>" << '\n'
<< "general: chromatogram ranges: mass-to-charge: max" << '\t' << "<none>" << '\n';
}
if (!chrom_ranges.RangeIntensity::isEmpty())
{
os << "general: chromatogram ranges: intensity: min" << '\t' << String::number(chrom_ranges.getMinIntensity(), 2) << '\n'
<< "general: chromatogram ranges: intensity: max" << '\t' << String::number(chrom_ranges.getMaxIntensity(), 2) << '\n';
}
else
{
os << "general: chromatogram ranges: intensity: min" << '\t' << "<none>" << '\n'
<< "general: chromatogram ranges: intensity: max" << '\t' << "<none>" << '\n';
}
}
template <class T>
void writeSummaryStatisticsMachineReadable_(const Math::SummaryStatistics<T> &stats, ostream &os, String title)
{
os << "statistics: " << title << ": num. of values" << '\t' << stats.count << '\n'
<< "statistics: " << title << ": mean" << '\t' << stats.mean << '\n'
<< "statistics: " << title << ": minimum" << '\t' << stats.min << '\n'
<< "statistics: " << title << ": lower quartile" << '\t' << stats.lowerq << '\n'
<< "statistics: " << title << ": median" << '\t' << stats.median << '\n'
<< "statistics: " << title << ": upper quartile" << '\t' << stats.upperq << '\n'
<< "statistics: " << title << ": maximum" << '\t' << stats.max << '\n'
<< "statistics: " << title << ": variance" << '\t' << stats.variance << '\n';
}
ExitCodes outputTo_(ostream &os, ostream &os_tsv)
{
//-------------------------------------------------------------
// Parameter handling
//-------------------------------------------------------------
// File names
String in = getStringOption_("in");
// File type
FileHandler fh;
FileTypes::Type in_type = FileTypes::nameToType(getStringOption_("in_type"));
if (in_type == FileTypes::UNKNOWN)
{
in_type = FileHandler::getType(in);
writeDebug_(String("Input file type: ") + FileTypes::typeToName(in_type), 2);
}
if (in_type == FileTypes::UNKNOWN)
{
writeLogError_("Error: Could not determine input file type!");
return PARSE_ERROR;
}
os << '\n'
<< "-- General information --"
<< '\n'
<< '\n'
<< "File name: " << in << '\n'
<< "File type: " << FileTypes::typeToName(in_type) << '\n';
os_tsv << "general: file name"
<< '\t' << in << '\n'
<< "general: file type"
<< '\t' << FileTypes::typeToName(in_type) << '\n';
PeakMap exp;
FeatureMap feat;
ConsensusMap cons;
IdData id_data;
//-------------------------------------------------------------
// Validation
//-------------------------------------------------------------
if (getFlag_("v"))
{
bool valid = true;
os << '\n'
<< "Validating " << FileTypes::typeToName(in_type) << " file";
switch (in_type)
{
case FileTypes::MZDATA:
os << " against XML schema version " << MzDataFile().getVersion() << '\n';
valid = MzDataFile().isValid(in, os);
break;
case FileTypes::MZML:
os << " against XML schema version " << MzMLFile().getVersion() << '\n';
valid = MzMLFile().isValid(in, os);
break;
case FileTypes::FEATUREXML:
os << " against XML schema version " << FeatureXMLFile().getVersion() << '\n';
valid = FeatureXMLFile().isValid(in, os);
break;
case FileTypes::IDXML:
os << " against XML schema version " << IdXMLFile().getVersion() << '\n';
valid = IdXMLFile().isValid(in, os);
break;
case FileTypes::MZIDENTML:
os << " against XML schema version " << MzIdentMLFile().getVersion() << '\n';
valid = MzIdentMLFile().isValid(in, os);
break;
case FileTypes::CONSENSUSXML:
os << " against XML schema version " << ConsensusXMLFile().getVersion() << '\n';
valid = ConsensusXMLFile().isValid(in, os);
break;
case FileTypes::MZXML:
os << " against XML schema version " << MzXMLFile().getVersion() << '\n';
valid = MzXMLFile().isValid(in, os);
break;
case FileTypes::PEPXML:
os << " against XML schema version " << PepXMLFile().getVersion() << '\n';
valid = PepXMLFile().isValid(in, os);
break;
case FileTypes::TRANSFORMATIONXML:
os << " against XML schema version " << TransformationXMLFile().getVersion() << '\n';
valid = TransformationXMLFile().isValid(in, os);
break;
default:
os << '\n'
<< "Aborted: Validation of this file type is not supported!"
<< '\n';
return EXECUTION_OK;
}
if (valid)
{
os << "Success - the file is valid!"
<< '\n';
}
else
{
os << "Failed - errors are listed above!"
<< '\n';
}
// semantic validation:
if ((in_type == FileTypes::MZML) || (in_type == FileTypes::MZDATA))
{
if (!valid)
{
os << '\n'
<< "Semantic validation is not performed due to previous errors!"
<< '\n';
}
else
{
os << '\n'
<< "Semantically validating " << FileTypes::typeToName(in_type)
<< " file";
if (in_type == FileTypes::MZDATA)
os << " (EXPERIMENTAL)";
os << ":"
<< '\n';
StringList errors, warnings;
if (in_type == FileTypes::MZML)
{
valid = MzMLFile().isSemanticallyValid(in, errors, warnings);
}
else
{
valid = MzDataFile().isSemanticallyValid(in, errors, warnings);
}
for (Size i = 0; i < warnings.size(); ++i)
{
os << "Warning: " << warnings[i] << '\n';
}
for (Size i = 0; i < errors.size(); ++i)
{
os << "Error: " << errors[i] << '\n';
}
if (valid)
{
os << "Success - the file is semantically valid!"
<< '\n';
}
else
{
os << "Failed - errors are listed above!"
<< '\n';
}
}
}
return EXECUTION_OK;
}
//-------------------------------------------------------------
// Validation of indices
//-------------------------------------------------------------
if (getFlag_("i"))
{
if (in_type != FileTypes::MZML)
{
writeLogError_("Error: Can only validate indices for mzML files");
printUsage_();
return ILLEGAL_PARAMETERS;
}
std::cout << "Checking mzML file for valid indices ... " << std::endl;
Internal::IndexedMzMLHandler ifile;
ifile.openFile(in);
if (ifile.getParsingSuccess())
{
// Validate that we can access each single spectrum and chromatogram
for (int i = 0; i < (int)ifile.getNrSpectra(); i++)
{
OpenMS::Interfaces::SpectrumPtr p = ifile.getSpectrumById(i);
}
for (int i = 0; i < (int)ifile.getNrChromatograms(); i++)
{
OpenMS::Interfaces::ChromatogramPtr p = ifile.getChromatogramById(i);
}
std::cout << "Found a valid indexed mzML XML File with " << ifile.getNrSpectra() << " spectra and " << ifile.getNrChromatograms() << " chromatograms." << std::endl
<< std::endl;
}
else
{
std::cout << "Could not detect a valid index for the mzML file " << in << "\nEither the index is not present or is not correct." << std::endl;
return ILLEGAL_PARAMETERS;
}
}
//-------------------------------------------------------------
// Content statistics
//-------------------------------------------------------------
std::map<String, int> meta_names;
if (in_type == FileTypes::FASTA)
{
vector<FASTAFile::FASTAEntry> entries;
FASTAFile file;
file.setLogType(log_type_);
SysInfo::MemUsage mu;
// loading input
file.load(in, entries);
std::cout << "\n\n" << mu.delta("loading FASTA") << std::endl;
std::map<char, int> residue_counts; // required for default construction of non-existing keys
size_t number_of_residues = 0;
Size dup_header(0);
Size dup_seq(0);
typedef std::unordered_map<size_t, vector<ptrdiff_t> > SHashmap;
SHashmap m_headers;
SHashmap m_seqs;
// Collect sequence lengths for statistics
std::vector<size_t> sequence_lengths;
sequence_lengths.reserve(entries.size());
// lambda to count residues matching given characters in frequency table
auto count_residues = [](const auto& residue_counts, std::string_view which) {
size_t count = 0;
for (char a : which)
{
auto it = residue_counts.find(a);
if (it != residue_counts.end()) { count += it->second; }
}
return count;
};
size_t seq_has_ambiguous = 0;
// Ambiguity codes for amino acids
const std::string_view AA_AMBIGUOUS_BXZJ = "BZXbzxJj"; // B=Asx, Z=Glx, X=unknown, J=Leu/Ile
const std::string_view AA_AMBIGUOUS_BXZ = "BZXbzx";
// IUPAC nucleotide codes (standard + ambiguity codes)
// Standard: A, C, G, T, U; Ambiguity: N (any), R, Y, S, W, K, M, B, D, H, V
const std::string_view NUCLEOTIDE_CHARS = "ACGTUNacgtunRYSWKMBDHVryswkmbdhv";
const std::string_view NA_AMBIGUOUS = "NRYSWKMBDHVnryswkmbdhv";
// Detect if sequences are nucleic acid (vs amino acid)
// If ANY character is not a valid nucleotide code, it's an amino acid sequence
bool is_nucleic_acid = true;
for (const auto& entry : entries)
{
for (char c : entry.sequence)
{
if (NUCLEOTIDE_CHARS.find(c) == std::string_view::npos)
{
is_nucleic_acid = false;
break;
}
}
if (!is_nucleic_acid) break;
}
std::hash<string> s_hash;
for (auto loopiter = entries.begin(); loopiter != entries.end(); ++loopiter)
{
{
size_t id_hash = s_hash(loopiter->identifier);
auto it_id = m_headers.find(id_hash);
if (it_id != m_headers.end())
{ // hash matches ... test the real indices to make sure
vector<ptrdiff_t>::const_iterator iter = find_if(it_id->second.begin(), it_id->second.end(), [&loopiter, &entries](const ptrdiff_t& idx) { return entries[idx].headerMatches(*loopiter); });
if (iter != it_id->second.end())
{
os << "Warning: Duplicate header, #" << std::distance(entries.begin(), loopiter) << ", ID: " << loopiter->identifier << " = #" << *iter << ", ID: " << entries[*iter].identifier << '\n';
++dup_header;
}
}
// add our own hash
m_headers[id_hash] = { std::distance(entries.begin(), loopiter) };
}
{
size_t id_seq = s_hash(loopiter->sequence);
auto it_id = m_seqs.find(id_seq);
if (it_id != m_seqs.end())
{ // hash matches ... test the real indices to make sure
vector<ptrdiff_t>::const_iterator iter = find_if(it_id->second.begin(), it_id->second.end(), [&loopiter, &entries](const ptrdiff_t& idx) { return entries[idx].sequenceMatches(*loopiter); });
if (iter != it_id->second.end())
{
os << "Warning: Duplicate sequence, #" << std::distance(entries.begin(), loopiter) << ", ID: " << loopiter->identifier << " == #" << *iter << ", ID: " << entries[*iter].identifier << '\n';
++dup_seq;
}
}
// add our own hash
m_seqs[id_seq] = { std::distance(entries.begin(), loopiter) };
}
// Collect sequence length for statistics
sequence_lengths.push_back(loopiter->sequence.size());
// Count before to detect if this sequence has ambiguous residues
const auto count_ambig_before = is_nucleic_acid
? count_residues(residue_counts, NA_AMBIGUOUS)
: count_residues(residue_counts, AA_AMBIGUOUS_BXZJ);
// count residues
for (char a : loopiter->sequence)
{
++residue_counts[a];
}
// did this sequence contain ambiguous residues?
const auto count_ambig_after = is_nucleic_acid
? count_residues(residue_counts, NA_AMBIGUOUS)
: count_residues(residue_counts, AA_AMBIGUOUS_BXZJ);
if (count_ambig_before != count_ambig_after)
{
++seq_has_ambiguous;
}
number_of_residues += loopiter->sequence.size();
}
// Labels depend on sequence type
const char* residue_type = is_nucleic_acid ? "nucleotide" : "amino acid";
const char* residue_type_cap = is_nucleic_acid ? "Nucleotide" : "Amino acid";
os << '\n';
os << "Number of sequences : " << entries.size() << '\n';
// Sequence length distribution statistics
if (!sequence_lengths.empty())
{
std::sort(sequence_lengths.begin(), sequence_lengths.end());
size_t len_min = sequence_lengths.front();
size_t len_max = sequence_lengths.back();
double len_median = Math::median(sequence_lengths.begin(), sequence_lengths.end(), true);
double len_q1 = Math::quantile1st(sequence_lengths.begin(), sequence_lengths.end(), true);
double len_q3 = Math::quantile3rd(sequence_lengths.begin(), sequence_lengths.end(), true);
os << "Sequence length distribution:\n";
os << " Minimum : " << len_min << '\n';
os << " 25%ile : " << len_q1 << '\n';
os << " Median : " << len_median << '\n';
os << " 75%ile : " << len_q3 << '\n';
os << " Maximum : " << len_max << '\n';
}
os << "Number of sequences with ambiguous " << residue_type << "s: " << seq_has_ambiguous << " ("
<< Math::percentOf(seq_has_ambiguous, entries.size(), 2) << "%)\n";
os << "# duplicated headers : " << dup_header << " (" << Math::percentOf(dup_header, entries.size(), 2) << "%)\n";
os << "# duplicated sequences: " << dup_seq << " (" << Math::percentOf(dup_seq, entries.size(), 2) << "%) [by exact string matching]\n";
os << "Total " << residue_type << "s : " << number_of_residues << "\n\n";
os << residue_type_cap << " counts:\n";
for (const auto& [residue, count] : residue_counts)
{
os << " " << residue << ":\t" << count << '\n';
}
// Ambiguous residue counts
if (is_nucleic_acid)
{
size_t amb_N = residue_counts['N'] + residue_counts['n'];
size_t amb_all = count_residues(residue_counts, NA_AMBIGUOUS);
os << "Ambiguous nucleotides (N) : " << amb_N << " (" << Math::percentOf(amb_N, number_of_residues, 2) << "%)\n";
os << "All IUPAC ambiguity codes : " << amb_all << " (" << Math::percentOf(amb_all, number_of_residues, 2) << "%)\n\n";
}
else
{
size_t amb = count_residues(residue_counts, AA_AMBIGUOUS_BXZ);
size_t amb_J = count_residues(residue_counts, AA_AMBIGUOUS_BXZJ);
os << "Ambiguous amino acids (B/Z/X) : " << amb << " (" << Math::percentOf(amb, number_of_residues, 2) << "%)\n";
os << " (B/Z/X/J): " << amb_J << " (" << Math::percentOf(amb_J, number_of_residues, 2) << "%)\n\n";
}
}
else if (in_type == FileTypes::FEATUREXML) //features
{
FileHandler ff;
ff.getFeatOptions().setLoadConvexHull(false); // CH's currently not needed here
ff.getFeatOptions().setLoadSubordinates(false); // SO's currently not needed here
SysInfo::MemUsage mu;
// reading input
ff.loadFeatures(in, feat, {FileTypes::FEATUREXML});
std::cout << "\n\n" << mu.delta("loading featureXML") << std::endl;
feat.updateRanges();
os << "Number of features: " << feat.size() << '\n'
<< '\n';
os_tsv << "general: number of features" << '\t'
<< feat.size() << '\n';
writeRangesHumanReadable_(feat, os);
writeRangesMachineReadable_(feat, os_tsv);
// Charge distribution and TIC
std::map<Int, UInt> charges;
std::map<size_t, UInt> numberofids;
double tic = 0.0;
Size assigned_ids = 0;
for (Size i = 0; i < feat.size(); ++i)
{
++charges[feat[i].getCharge()];
tic += feat[i].getIntensity();
const PeptideIdentificationList &peptide_ids = feat[i].getPeptideIdentifications();
++numberofids[peptide_ids.size()];
assigned_ids += peptide_ids.size();
}
os << "Total ion current in features: " << tic << '\n';
os_tsv << "general: total ion current in features" << '\t'
<< tic << '\n';
os << '\n';
printChargeDistribution(feat, [](const Feature& f, Int& q) { q = f.getCharge(); return true;}, os, os_tsv);
os << "Distribution of peptide identifications (IDs) per feature:\n";
for (auto it = numberofids.begin(); it != numberofids.end(); ++it)
{
os << " " << it->first << " IDs: " << it->second << '\n';
os_tsv << "general: distribution of peptide identifications (IDs) per feature: IDs: "
<< it->first << '\t'
<< it->second << '\n';
}
os << '\n'
<< "Assigned peptide identifications: " << assigned_ids << '\n';
os_tsv << "general: assigned peptide identifications" << '\t'
<< assigned_ids << '\n';
os << "Unassigned peptide identifications: " << feat.getUnassignedPeptideIdentifications().size() << '\n';
os_tsv << "general: unassigned peptide identifications" << '\t'
<< feat.getUnassignedPeptideIdentifications().size() << '\n';
}
else if (in_type == FileTypes::CONSENSUSXML) //consensus features
{
SysInfo::MemUsage mu;
// reading input
FileHandler().loadConsensusFeatures(in, cons, {FileTypes::CONSENSUSXML});
std::cout << "\n\n" << mu.delta("loading consensusXML") << std::endl;
cons.updateRanges();
map<Size, UInt> num_consfeat_of_size;
map<Size, UInt> num_consfeat_of_size_with_id;
Size assigned_ids = 0;
map<pair<String, UInt>, vector<int> > seq_charge2map_occurence;
for (const ConsensusFeature& cm : cons)
{
++num_consfeat_of_size[cm.size()];
const auto& pids = cm.getPeptideIdentifications();
assigned_ids += pids.size();
if (!pids.empty())
{
++num_consfeat_of_size_with_id[cm.size()];
// count how often a peptide/charge pair has been observed in the different maps
const vector<PeptideHit>& phits = pids[0].getHits();
if (!phits.empty())
{
const String s = phits[0].getSequence().toString();
const int z = phits[0].getCharge();
if (seq_charge2map_occurence[make_pair(s,z)].empty())
{
seq_charge2map_occurence[make_pair(s,z)] = vector<int>(cons.getColumnHeaders().size(), 0);
}
// assign id to all dimensions in the consensus feature
for (auto const & f : cm.getFeatures())
{
Size map_index = f.getMapIndex();
seq_charge2map_occurence[make_pair(s,z)][map_index] += 1;
}
}
}
}
// now at the level of peptides (different charges and modifications are counted separately)
// to get a number independent of potential alignment/link errors
// Note:
// we determine the size of a consensus feature we would obtain if we would link just be sequence and charge
// and we sum up all sub features for these consensus feature that has at least one id
// (as we assume that the ID is transfered to all sub features)
map<Size, Size> num_aggregated_consfeat_of_size_with_id;
map<Size, Size> num_aggregated_feat_of_size_with_id;
for (auto & a : seq_charge2map_occurence)
{
const vector<int>& occurrences = a.second;
UInt n(0); // dimensions with at least one peptide id assigned
UInt f(0); // number of subfeatures with a least one peptide id assigned
for (int i : occurrences)
{
if (i != 0) ++n;
f += i;
}
num_aggregated_consfeat_of_size_with_id[n] += 1;
num_aggregated_feat_of_size_with_id[n] += f;
}
if (num_consfeat_of_size.empty())
{
os << '\n'
<< "Number of consensus features: 0"
<< '\n';
os << "No consensus features found, map is empty!"
<< "\n\n";
}
else
{
Size field_width = num_consfeat_of_size.rbegin()->first / 10 + 1;
os << '\n'
<< "Number of consensus features:"
<< '\n';
Size number_features{0};
Size number_cons_features_with_id{0};
Size number_features_with_id{0};
for (auto it = num_consfeat_of_size.rbegin(); it != num_consfeat_of_size.rend(); ++it)
{
const Size csize = it->first;
const Size nfeatures = it->first * it->second;
const Size nc_with_id = num_consfeat_of_size_with_id[it->first];
number_features += nfeatures;
number_features_with_id += csize * nc_with_id;
number_cons_features_with_id += nc_with_id;
os << " of size " << setw(field_width) << csize << ": " << it->second
<< "\t (features: " << nfeatures << " )"
<< "\t with at least one ID: " << nc_with_id
<< "\t (features: " << csize * nc_with_id << " )"
<< '\n';
}
auto ci = num_aggregated_consfeat_of_size_with_id.rbegin();
auto fi = num_aggregated_feat_of_size_with_id.rbegin();
for (; ci != num_aggregated_consfeat_of_size_with_id.rend(); ++ci, ++fi)
{
const Size csize = ci->first;
const Size nconsfeatures = ci->second;
const Size nfeatures = fi->second;
os << " peptides (with different mod. and charge) observed in " << setw(field_width) << csize << " maps: " << nconsfeatures
<< "\t (features: " << nfeatures << " )"
<< '\n';
}
os << " total consensus features: " << cons.size()
<< " with at least one ID: " << number_cons_features_with_id << '\n'
<< " total features: " << number_features
<< " with at least one ID: " << string(field_width, ' ') << number_features_with_id
<< '\n';
writeRangesHumanReadable_(cons, os);
writeRangesMachineReadable_(cons, os_tsv);
}
// file descriptions
const ConsensusMap::ColumnHeaders& descs = cons.getColumnHeaders();
if (!descs.empty())
{
os << "File descriptions:"
<< '\n';
for (ConsensusMap::ColumnHeaders::const_iterator it = descs.begin(); it != descs.end(); ++it)
{
os << " " << it->second.filename << ":"
<< '\n'
<< " identifier: " << it->first << '\n'
<< " label: " << it->second.label << '\n'
<< " size: " << it->second.size << '\n';
}
os << '\n';
}
os << "Assigned peptide identifications: " << assigned_ids << '\n';
os_tsv << "general: assigned peptide identifications" << '\t'
<< assigned_ids << '\n';
os << "Unassigned peptide identifications: " << cons.getUnassignedPeptideIdentifications().size() << '\n';
os_tsv << "general: unassigned peptide identifications" << '\t'
<< cons.getUnassignedPeptideIdentifications().size() << '\n';
}
else if (in_type == FileTypes::IDXML || in_type == FileTypes::MZIDENTML) //identifications
{
UInt spectrum_count(0);
Size peptide_hit_count(0);
UInt runs_count(0);
Size protein_hit_count(0);
set<String> peptides;
set<String> peptides_ignore_mods;
set<String> proteins;
Size modified_peptide_count(0);
std::map<String, int> mod_counts;
vector<uint16_t> peptide_length;
// reading input
SysInfo::MemUsage mu;
if (in_type == FileTypes::MZIDENTML)
{
FileHandler().loadIdentifications(in, id_data.proteins, id_data.peptides, {FileTypes::MZIDENTML});
}
else
{
IdXMLFile().load(in, id_data.proteins, id_data.peptides, id_data.identifier);
}
std::cout << "\n\n" << mu.delta("loading idXML") << std::endl;
// export metadata to second output stream
os_tsv << "general: database"
<< '\t' << id_data.proteins[0].getSearchParameters().db << '\n'
<< "general: database version"
<< '\t' << id_data.proteins[0].getSearchParameters().db_version << '\n'
<< "general: taxonomy"
<< '\t' << id_data.proteins[0].getSearchParameters().taxonomy << '\n';
// calculations
Size average_peptide_hits{0}; // average number of hits per spectrum (ignoring the empty ones)
for (Size i = 0; i < id_data.peptides.size(); ++i)
{
if (!id_data.peptides[i].empty())
{
++spectrum_count;
average_peptide_hits += id_data.peptides[i].getHits().size();
peptide_hit_count += id_data.peptides[i].getHits().size();
const vector<PeptideHit> &temp_hits = id_data.peptides[i].getHits();
// collect stats about modifications from TOP HIT!
if (temp_hits[0].getSequence().isModified())
{
++modified_peptide_count;
const AASequence& aa = temp_hits[0].getSequence();
if (aa.hasCTerminalModification())
{
++mod_counts[aa.getCTerminalModificationName()];
}
if (aa.hasNTerminalModification())
{
++mod_counts[aa.getNTerminalModificationName()];
}
for (Size ia = 0; ia < aa.size(); ++ia)
{
if (aa[ia].isModified())
{
++mod_counts[aa[ia].getModification()->getFullId()];
}
}
}
for (Size j = 0; j < temp_hits.size(); ++j)
{
peptides.insert(temp_hits[j].getSequence().toString());
peptides_ignore_mods.insert(temp_hits[j].getSequence().toUnmodifiedString());
peptide_length.push_back((uint16_t)temp_hits[j].getSequence().size());
}
}
}
set<pair<String, String>> search_engines;
for (Size i = 0; i < id_data.proteins.size(); ++i)
{
++runs_count;
protein_hit_count += id_data.proteins[i].getHits().size();
const vector<ProteinHit> &temp_hits = id_data.proteins[i].getHits();
for (Size j = 0; j < temp_hits.size(); ++j)
{
proteins.insert(temp_hits[j].getAccession());
}
// collect all search engines which generated the data
search_engines.emplace(id_data.proteins[i].getSearchEngine(), id_data.proteins[i].getSearchEngineVersion());
}
if (peptide_length.empty())
{ // avoid invalid-range exception when computing mean()
peptide_length.push_back(0);
}
os << "Search Engine(s):\n";
for (const auto& se : search_engines)
{
os << " " << se.first << " (version: " << se.second << ")\n";
}
os << "Number of:"
<< '\n';
os << " runs: " << runs_count << '\n';
os << " protein hits: " << protein_hit_count << '\n';
os << " non-redundant protein hits: " << proteins.size() << '\n';
os << " (only hits that differ in the accession)"
<< '\n';
os << '\n';
os << " matched spectra: " << spectrum_count << '\n';
os << " peptide sequences: " << peptides_ignore_mods.size() << '\n';
os << " PSMs / spectrum (ignoring unidentified spectra): " << average_peptide_hits / std::max(1, (Int)spectrum_count) << '\n';
os << " peptide hits: " << peptide_hit_count << " (avg. length: " << Math::round(Math::mean(peptide_length.begin(), peptide_length.end())) << ")\n";
os << " modified top-hits: " << modified_peptide_count << "/" << spectrum_count << (spectrum_count > 0 ? String(" (") + Math::round(modified_peptide_count * 1000.0 / spectrum_count) / 10 + "%)" : "") << '\n';
os << " non-redundant peptide hits: " << peptides.size() << '\n';
os << " (only hits that differ in sequence and/or modifications)"
<< '\n';
for (std::map<String, int>::const_iterator it = mod_counts.begin(); it != mod_counts.end(); ++it)
{
if (it != mod_counts.begin())
{
os << ", ";
}
else
{
os << " Modification count (top-hits only): ";
}
os << it->first << " " << it->second;
}
for (const auto& se : search_engines)
{
os_tsv << "general: search engine" << '\t' << se.first << '\t' << "(version: " << se.second << ")" << '\n';
}
os_tsv << "general: num. of runs" << '\t' << runs_count << '\n';
os_tsv << "general: num. of protein hits" << '\t' << protein_hit_count << '\n';
os_tsv << "general: num. of non-redundant protein hits (only hits that differ in the accession)"
<< '\t' << proteins.size() << '\n';
os_tsv << "general: num. of matched spectra" << '\t' << spectrum_count << '\n';
os_tsv << "general: num. of peptide hits" << '\t' << peptide_hit_count << '\n';
os_tsv << "general: num. of modified top-hits" << '\t' << modified_peptide_count << '\n';
os_tsv << "general: num. of non-redundant peptide hits (only hits that differ in sequence and/or modifications): "
<< '\t' << peptides.size() << '\n';
}
else if (in_type == FileTypes::PEPXML)
{
os << "\nFor pepXML files, only validation against the XML schema is implemented at this point."
<< '\n';
}
else if (in_type == FileTypes::MZTAB)
{
MzTab mztab;
MzTabFile().load(in, mztab);
os << "mzTab-version: " << mztab.getMetaData().mz_tab_version.get() << '\n'
<< "mzTab-mode: " << mztab.getMetaData().mz_tab_mode.get() << '\n'
<< "mzTab-type: " << mztab.getMetaData().mz_tab_type.get() << '\n'
<< "number of PSMs: " << mztab.getNumberOfPSMs() << '\n'
<< "number of peptides: " << mztab.getPeptideSectionRows().size() << '\n'
<< "number of proteins: " << mztab.getProteinSectionRows().size() << '\n'
<< "number of oligonucleotides: " << mztab.getOligonucleotideSectionRows().size() << '\n'
<< "number of OSMs: " << mztab.getOSMSectionRows().size() << '\n'
<< "number of small molecules: " << mztab.getSmallMoleculeSectionRows().size() << '\n'
<< "number of nucleic acids: " << mztab.getNucleicAcidSectionRows().size() << '\n';
}
else if (in_type == FileTypes::TRANSFORMATIONXML)
{
TransformationDescription trafo;
FileHandler().loadTransformations(in, trafo, true, {FileTypes::TRANSFORMATIONXML});
os << "\nTransformation model: " << trafo.getModelType() << '\n';
trafo.printSummary(os);
}
else if (in_type == FileTypes::PQP)
{
TargetedExperiment targeted_exp;
TransitionPQPFile pqp_reader;
pqp_reader.setLogType(log_type_);
pqp_reader.convertPQPToTargetedExperiment(in.c_str(), targeted_exp, true);
os << targeted_exp.getSummary();
}
else // peaks
{
SysInfo::MemUsage mu;
fh.loadExperiment(in, exp, {in_type}, log_type_, false, false);
// update range information and retrieve which MS levels were recorded
exp.updateRanges();
// report memory consumption
OPENMS_LOG_INFO << "\n\n" << mu.delta("loading MS data") << std::endl;
os << '\n';
os << "Instrument: " << exp.getInstrument().getName() << '\n';
for (const auto& ma : exp.getInstrument().getMassAnalyzers())
{
os << " Mass Analyzer: " << MassAnalyzer::NamesOfAnalyzerType[static_cast<size_t>(ma.getType())] << " (resolution: " << ma.getResolution() << ")\n";
}
os << '\n';
const vector<UInt>& levels = exp.getMSLevels();
os << "MS levels: " << ListUtils::concatenate(levels, ", ") << '\n';
// basic info
os << "Total number of peaks: " << exp.getSize() << '\n'; // count ALL peaks (also chromatographic)
os << "Number of spectra: " << exp.size() << '\n'
<< '\n';
os_tsv << "number of spectra"
<< '\t' << exp.size() << '\n'
<< "total number of peaks"
<< '\t' << exp.getSize() << '\n';
writeRangesHumanReadable_(exp, os);
writeRangesMachineReadable_(exp, os_tsv);
// check if the meta data indicates that this is peak data
// and count how many spectra per MS level there are
map<Size, UInt> level_annotated_picked;
map<Size, UInt> level_estimated_picked;
struct ChAM
{
Size mslevel;
Precursor::ActivationMethod am;
bool operator<(const ChAM& rhs) const
{
return std::tie(mslevel, am) < std::tie(rhs.mslevel, rhs.am);
}
};
map<ChAM, Size> act_method_counts;
map<Size, UInt> counts;
for (const auto& spectrum : exp)
{
const Size level = spectrum.getMSLevel();
++counts[level]; // count MS level
for (auto const& pc : spectrum.getPrecursors())
{
for (auto const& am : pc.getActivationMethods())
{
++act_method_counts[{level, am}];
}
}
// annotate peak type (profile / centroided) from meta data
if (level_annotated_picked.count(level) == 0)
{
level_annotated_picked[level] = static_cast<UInt>(spectrum.getType(false));
}
// estimate peak type once for every level (take a spectrum with enough peaks for stable estimation)
if (level_estimated_picked.count(level) == 0 && spectrum.size() > 10)
{
level_estimated_picked[level] = static_cast<UInt>(PeakTypeEstimator::estimateType(spectrum.begin(), spectrum.end()));
}
}
// output
if (!counts.empty())
{
os << "Number of spectra per MS level:"
<< '\n';
for (auto it = counts.begin(); it != counts.end(); ++it)
{
os << " level " << it->first << ": " << it->second << '\n';
os_tsv << "number of MS" << it->first << " spectra"
<< '\t' << it->second << '\n';
}
os << '\n';
}
// write peak types (centroided / profile mode)
os << "Peak type from metadata (or estimated from data)\n";
for (const auto& l : levels)
{
os << " level " << l << ": "
<< SpectrumSettings::NamesOfSpectrumType[level_annotated_picked[l]] << " ("
<< SpectrumSettings::NamesOfSpectrumType[level_estimated_picked[l]] << ")\n";
os_tsv << "peak type metadata [annotation, estimate]" << '\t' << SpectrumSettings::NamesOfSpectrumType[level_annotated_picked[l]] << '\t' << SpectrumSettings::NamesOfSpectrumType[level_estimated_picked[l]] << '\n';
}
os << '\n';
os << "Activation methods\n";
for (const auto& am : act_method_counts)
{
os << " MS-Level " << am.first.mslevel << " & " << Precursor::NamesOfActivationMethodShort[static_cast<size_t>(am.first.am)] << " (" << Precursor::NamesOfActivationMethod[static_cast<size_t>(am.first.am)] << "): " << am.second << '\n';
os_tsv << "activation methods (mslevel, method, count)" << '\t' << am.first.mslevel << '\t' << Precursor::NamesOfActivationMethodShort[static_cast<size_t>(am.first.am)] << '\t' << am.second << '\n';
}
os << '\n';
printChargeDistribution(exp,
[](const MSSpectrum& spec, int& q) {
for (const auto& pc: spec.getPrecursors())
{
q = pc.getCharge();
return true;
}
return false;
},
os,
os_tsv,
"Precursor charge");
// show meta data array names
for (MSSpectrum& pm : exp)
{
for (Size i = 0; i < pm.getFloatDataArrays().size(); ++i)
{
++meta_names[pm.getFloatDataArrays()[i].getName()];
}
for (Size i = 0; i < pm.getIntegerDataArrays().size(); ++i)
{
++meta_names[pm.getIntegerDataArrays()[i].getName()];
}
for (Size i = 0; i < pm.getStringDataArrays().size(); ++i)
{
++meta_names[pm.getStringDataArrays()[i].getName()];
}
}
if (!meta_names.empty())
{
// nice formatting:
Size max_length = 0;
for (std::map<String, int>::const_iterator it = meta_names.begin(); it != meta_names.end(); ++it)
{
if (it->first.size() > max_length)
{
max_length = it->first.size();
}
}
os << "Meta data array:"
<< '\n';
for (std::map<String, int>::const_iterator it = meta_names.begin(); it != meta_names.end(); ++it)
{
String padding(max_length - it->first.size(), ' ');
os << " " << it->first << ": " << padding << it->second << " spectra"
<< '\n';
}
os << '\n';
}
auto cvs = FAIMSHelper::getCompensationVoltages(exp);
if (!cvs.empty())
{
os << "IM (FAIMS_CV): ";
StringList cvs_sl;
for (double cv : cvs)
{
cvs_sl.push_back(String(cv));
}
os << cvs_sl << "\n\n";
}
// some chromatogram information
if (!exp.getChromatograms().empty())
{
os << "Number of chromatograms: " << exp.getChromatograms().size() << '\n';
os_tsv << "number of chromatograms"
<< '\t' << exp.getChromatograms().size() << '\n';
Size num_chrom_peaks(0);
std::map<ChromatogramSettings::ChromatogramType, Size> chrom_types;
for (const MSChromatogram& ms : exp.getChromatograms())
{
num_chrom_peaks += ms.size();
++chrom_types[ms.getChromatogramType()];
}
os << "Number of chromatographic peaks: " << num_chrom_peaks << '\n'
<< '\n';
os_tsv << "number of chromatographic peaks" << '\t' << num_chrom_peaks << '\n';
os << "Number of chromatograms per type: "
<< '\n';
for (std::map<ChromatogramSettings::ChromatogramType, Size>::const_iterator it = chrom_types.begin(); it != chrom_types.end(); ++it)
{
os << String(" ") + ChromatogramSettings::ChromatogramNames[static_cast<size_t>(it->first)] + ": "
<< it->second << '\n';
}
if (getFlag_("d") && chrom_types.find(ChromatogramSettings::ChromatogramType::SELECTED_REACTION_MONITORING_CHROMATOGRAM) != chrom_types.end())
{
os << '\n'
<< " -- Detailed chromatogram listing -- "
<< '\n';
os << "\nSelected Reaction Monitoring Transitions:"
<< '\n';
os << "Q1 Q3 RT_begin RT_end name comment"
<< '\n';
for (const MSChromatogram& ms : exp.getChromatograms())
{
if (ms.getChromatogramType() == ChromatogramSettings::ChromatogramType::SELECTED_REACTION_MONITORING_CHROMATOGRAM)
{
os << ms.getPrecursor().getMZ() << " " << ms.getProduct().getMZ() << " " << ms.front().getRT() << " " << ms.back().getRT() << " " << ms.getName() << " " << ms.getComment() << '\n';
}
}
}
}
// Detailed listing of scans
if (getFlag_("d") && !exp.empty())
{
os << '\n'
<< "-- Detailed spectrum listing --"
<< '\n';
UInt count = 0;
for (auto const& spectrum : exp)
{
++count;
os << '\n'
<< "Spectrum " << count << ":"
<< '\n'
<< " mslevel: " << spectrum.getMSLevel() << '\n'
<< " scanMode: " << InstrumentSettings::NamesOfScanMode[static_cast<size_t>(spectrum.getInstrumentSettings().getScanMode())] << '\n'
<< " peaks: " << spectrum.size() << '\n'
<< " RT: " << spectrum.getRT() << '\n'
<< " m/z: ";
if (!spectrum.empty())
{
os << spectrum.begin()->getMZ() << " .. " << spectrum.rbegin()->getMZ() << '\n';
}
if (spectrum.getDriftTimeUnit() != DriftTimeUnit::NONE)
{
os << " IM: " << spectrum.getDriftTime() << ' '
<< spectrum.getDriftTimeUnitAsString()
<< '\n';
}
os << "Precursors: " << spectrum.getPrecursors().size() << '\n';
auto pc_count = UInt{0};
for (auto const& pc : spectrum.getPrecursors())
{
os << "Precursor[" << pc_count << "]\n"
<< " charge: " << pc.getCharge() << '\n'
<< " mz: " << pc.getMZ() << '\n'
<< " activation methods: \n";
for (auto const& am : pc.getActivationMethods())
{
os << " " << Precursor::NamesOfActivationMethodShort[static_cast<size_t>(am)] << " (" << Precursor::NamesOfActivationMethod[static_cast<size_t>(am)] << ")\n";
}
os << '\n';
++pc_count;
}
}
}
// Check for corrupt data
if (getFlag_("c"))
{
os << '\n'
<< "-- Checking for corrupt data --"
<< '\n'
<< '\n';
// RTs sorted?
if (!exp.isSorted(false))
{
os << "Error: Spectrum retention times are not sorted in ascending order"
<< '\n';
}
vector<double> ms1_rts;
ms1_rts.reserve(exp.size());
for (Size s = 0; s < exp.size(); ++s)
{
// ms level = 0
if (exp[s].getMSLevel() == 0)
{
os << "Error: MS-level 0 in spectrum (RT: " << exp[s].getRT() << ")"
<< '\n';
}
//scan size = 0
if (exp[s].empty())
{
os << "Warning: No peaks in spectrum (RT: " << exp[s].getRT() << ")"
<< '\n';
}
//duplicate meta data array names
std::map<String, int> names;
for (Size m = 0; m < exp[s].getFloatDataArrays().size(); ++m)
{
String name = exp[s].getFloatDataArrays()[m].getName();
if (names.find(name) != names.end())
{
os << "Error: Duplicate meta data array name '" << name << "' in spectrum (RT: " << exp[s].getRT() << ")"
<< '\n';
}
else
{
names[name] = 0;
}
}
for (Size m = 0; m < exp[s].getIntegerDataArrays().size(); ++m)
{
String name = exp[s].getIntegerDataArrays()[m].getName();
if (names.find(name) != names.end())
{
os << "Error: Duplicate meta data array name '" << name << "' in spectrum (RT: " << exp[s].getRT() << ")"
<< '\n';
}
else
{
names[name] = 0;
}
}
for (Size m = 0; m < exp[s].getStringDataArrays().size(); ++m)
{
String name = exp[s].getStringDataArrays()[m].getName();
if (names.find(name) != names.end())
{
os << "Error: Duplicate meta data array name '" << name << "' in spectrum (RT: " << exp[s].getRT() << ")"
<< '\n';
}
else
{
names[name] = 0;
}
}
//duplicate scans (part 1)
if (exp[s].getMSLevel() == 1)
{
ms1_rts.push_back(exp[s].getRT());
}
}
//duplicate scans (part 2)
sort(ms1_rts.begin(), ms1_rts.end());
for (Size i = 1; i < ms1_rts.size(); ++i)
{
if (ms1_rts[i - 1] == ms1_rts[i])
os << "Error: Duplicate spectrum retention time: " << ms1_rts[i] << '\n';
}
//check peaks
for (Size s = 0; s < exp.size(); ++s)
{
//peaks sorted?
if (!exp[s].isSorted())
{
os << "Error: Peak m/z positions are not sorted in ascending order in spectrum (RT: " << exp[s].getRT() << ")"
<< '\n';
}
vector<double> mzs;
mzs.reserve(exp[s].size());
for (Size p = 0; p < exp[s].size(); ++p)
{
//negative intensity
if (exp[s][p].getIntensity() < 0.0)
{
os << "Warning: Negative peak intensity peak (RT: " << exp[s].getRT() << " MZ: " << exp[s][p].getMZ() << " intensity: " << exp[s][p].getIntensity() << ")"
<< '\n';
}
//duplicate m/z (part 1)
mzs.push_back(exp[s][p].getMZ());
}
//duplicate m/z (part 2)
sort(mzs.begin(), mzs.end());
for (Size i = 1; i < mzs.size(); ++i)
{
if (mzs[i - 1] == mzs[i])
os << "Error: Duplicate peak m/z " << mzs[i] << " in spectrum (RT: " << exp[s].getRT() << ")"
<< '\n';
}
}
}
}
//-------------------------------------------------------------
// meta information
//-------------------------------------------------------------
if (getFlag_("m") || !getStringOption_("out_tsv").empty())
{
//basic info
os << '\n'
<< "-- Meta information --"
<< '\n'
<< '\n';
if (in_type == FileTypes::FEATUREXML) //features
{
os << "Document ID: " << feat.getIdentifier() << '\n'
<< '\n';
os_tsv << "meta: document ID" << '\t'
<< feat.getIdentifier() << '\n';
}
else if (in_type == FileTypes::CONSENSUSXML) //consensus features
{
os << "Document ID: " << cons.getIdentifier() << '\n'
<< '\n';
}
else if (in_type == FileTypes::IDXML) //identifications
{
os << "Document ID: " << id_data.identifier << '\n'
<< '\n';
os_tsv << "meta: document ID" << '\t'
<< id_data.identifier;
}
else if (in_type == FileTypes::PEPXML)
{
// TODO
}
else if (in_type == FileTypes::FASTA)
{
// TODO
}
else //peaks
{
os << "Document ID: " << exp.getIdentifier() << '\n'
<< "Date: " << exp.getDateTime().get() << '\n';
os_tsv << "document id"
<< '\t' << exp.getIdentifier() << '\n'
<< "date"
<< '\t' << exp.getDateTime().get() << '\n';
//basic info
os << '\n'
<< "Sample:"
<< '\n'
<< " name: " << exp.getSample().getName() << '\n'
<< " organism: " << exp.getSample().getOrganism() << '\n'
<< " comment: " << exp.getSample().getComment() << '\n';
os_tsv << "sample name"
<< '\t' << exp.getSample().getName() << '\n'
<< "sample organism"
<< '\t' << exp.getSample().getOrganism() << '\n'
<< "sample comment"
<< '\t' << exp.getSample().getComment() << '\n';
//instrument info
os << '\n'
<< "Instrument:"
<< '\n'
<< " name: " << exp.getInstrument().getName() << '\n'
<< " model: " << exp.getInstrument().getModel() << '\n'
<< " vendor: " << exp.getInstrument().getVendor() << '\n'
<< " ion source(s): ";
os_tsv << "instrument name"
<< '\t' << exp.getInstrument().getName() << '\n'
<< "instrument model"
<< '\t' << exp.getInstrument().getModel() << '\n'
<< "instrument vendor"
<< '\t' << exp.getInstrument().getVendor() << '\n';
for (Size i = 0; i < exp.getInstrument().getIonSources().size(); ++i)
{
os << IonSource::NamesOfIonizationMethod[static_cast<size_t>(exp.getInstrument().getIonSources()[i].getIonizationMethod())];
if (i != exp.getInstrument().getIonSources().size() - 1)
{
os << ", ";
}
}
os << '\n'
<< " mass analyzer(s): ";
for (Size i = 0; i < exp.getInstrument().getMassAnalyzers().size(); ++i)
{
os << MassAnalyzer::NamesOfAnalyzerType[static_cast<size_t>(exp.getInstrument().getMassAnalyzers()[i].getType())];
if (i != exp.getInstrument().getMassAnalyzers().size() - 1)
{
os << ", ";
}
}
os << '\n'
<< " detector(s): ";
for (Size i = 0; i < exp.getInstrument().getIonDetectors().size(); ++i)
{
os << IonDetector::NamesOfType[static_cast<size_t>(exp.getInstrument().getIonDetectors()[i].getType())];
if (i != exp.getInstrument().getIonDetectors().size() - 1)
os << ", ";
}
os << '\n'
<< '\n';
//contact persons
for (Size i = 0; i < exp.getContacts().size(); ++i)
{
os << "Contact person:"
<< '\n'
<< " first name: " << exp.getContacts()[i].getFirstName() << '\n'
<< " last name: " << exp.getContacts()[i].getLastName() << '\n'
<< " email: " << exp.getContacts()[i].getEmail() << '\n'
<< '\n';
}
}
}
//-------------------------------------------------------------
// data processing
//-------------------------------------------------------------
if (getFlag_("p"))
{
//basic info
os << '\n'
<< "-- Data processing information --"
<< '\n'
<< '\n';
//get data processing info
vector<DataProcessing> dp;
if (in_type == FileTypes::FEATUREXML) //features
{
dp = feat.getDataProcessing();
}
else if (in_type == FileTypes::CONSENSUSXML) //consensus features
{
dp = cons.getDataProcessing();
}
else if (in_type == FileTypes::IDXML) //identifications
{
}
else if (in_type == FileTypes::PEPXML)
{
// TODO
}
else if (in_type == FileTypes::FASTA)
{
}
else //peaks
{
if (!exp.empty())
{
os << "Note: The data is taken from the first spectrum!"
<< '\n'
<< '\n';
for (Size i = 0; i < exp[0].getDataProcessing().size(); i++)
{
dp.push_back(*exp[0].getDataProcessing()[i].get());
}
}
}
//print data
if (dp.empty())
{
os << "No information about data processing available!"
<< '\n'
<< '\n';
}
else
{
for (Size i = 0; i < dp.size(); ++i)
{
os << "Processing " << (i + 1) << ":"
<< '\n';
os << " software name: " << dp[i].getSoftware().getName() << '\n';
os << " software version: " << dp[i].getSoftware().getVersion() << '\n';
os << " completion time: " << dp[i].getCompletionTime().get() << '\n';
os_tsv << "data processing: " << (i + 1)
<< ": software name" << '\t'
<< dp[i].getSoftware().getName() << '\n';
os_tsv << "data processing: " << (i + 1)
<< ": software version" << '\t'
<< dp[i].getSoftware().getVersion() << '\n';
os_tsv << "data processing: " << (i + 1)
<< ": completion time" << '\t'
<< dp[i].getCompletionTime().get() << '\n';
os << " actions: ";
os_tsv << "data processing: " << (i + 1)
<< ": actions" << '\t';
for (set<DataProcessing::ProcessingAction>::const_iterator it = dp[i].getProcessingActions().begin();
it != dp[i].getProcessingActions().end(); ++it)
{
if (it != dp[i].getProcessingActions().begin())
{
os << ", ";
os_tsv << ", ";
}
os << DataProcessing::NamesOfProcessingAction[*it];
os_tsv << DataProcessing::NamesOfProcessingAction[*it];
}
os << '\n'
<< '\n';
os_tsv << '\n';
}
}
}
//-------------------------------------------------------------
// statistics
//-------------------------------------------------------------
if (getFlag_("s"))
{
os << '\n'
<< "-- Statistics --"
<< '\n'
<< '\n';
if (in_type == FileTypes::FEATUREXML) //features
{
Size size = feat.size();
vector<double> intensities(size);
vector<double> overall_qualities(size);
vector<double> mz_qualities(size);
vector<double> rt_qualities(size);
vector<double> peak_widths(size);
Size idx = 0;
for (const Feature& fm : feat)
{
intensities[idx] = fm.getIntensity();
overall_qualities[idx] = fm.getOverallQuality();
rt_qualities[idx] = fm.getQuality(Feature::RT);
mz_qualities[idx] = fm.getQuality(Feature::MZ);
peak_widths[idx] = fm.getWidth();
++idx;
}
Math::SummaryStatistics<vector<double>> intensities_summary;
intensities_summary = Math::SummaryStatistics<vector<double>>(intensities);
os.precision(writtenDigits<>(Feature::IntensityType()));
os << "Intensities:" << '\n' << intensities_summary << '\n';
os_tsv.precision(writtenDigits<>(Feature::IntensityType()));
writeSummaryStatisticsMachineReadable_(intensities_summary, os_tsv, "intensities");
Math::SummaryStatistics<vector<double>> peak_widths_summary;
peak_widths_summary = Math::SummaryStatistics<vector<double>>(peak_widths);
os.precision(writtenDigits<>(Feature::QualityType()));
os << "Feature FWHM in RT dimension:" << '\n' << peak_widths_summary << '\n';
os_tsv.precision(writtenDigits<>(Feature::QualityType()));
writeSummaryStatisticsMachineReadable_(peak_widths_summary, os_tsv, "feature FWHM in RT dimension");
Math::SummaryStatistics<vector<double>> overall_qualities_summary;
overall_qualities_summary = Math::SummaryStatistics<vector<double>>(overall_qualities);
os.precision(writtenDigits<>(Feature::QualityType()));
os << "Overall qualities:" << '\n' << overall_qualities_summary << '\n';
os_tsv.precision(writtenDigits<>(Feature::QualityType()));
writeSummaryStatisticsMachineReadable_(overall_qualities_summary, os_tsv, "overall qualities");
Math::SummaryStatistics<vector<double>> rt_qualities_summary;
rt_qualities_summary = Math::SummaryStatistics<vector<double>>(rt_qualities);
os.precision(writtenDigits<>(Feature::QualityType()));
os << "Qualities in retention time dimension:" << '\n' << rt_qualities_summary << '\n';
os_tsv.precision(writtenDigits<>(Feature::QualityType()));
writeSummaryStatisticsMachineReadable_(rt_qualities_summary, os_tsv, "qualities in retention time dimension");
Math::SummaryStatistics<vector<double>> mz_qualities_summary;
mz_qualities_summary = Math::SummaryStatistics<vector<double>>(mz_qualities);
os.precision(writtenDigits<>(Feature::QualityType()));
os << "Qualities in mass-to-charge dimension:" << '\n' << mz_qualities_summary << '\n';
os_tsv.precision(writtenDigits<>(Feature::QualityType()));
writeSummaryStatisticsMachineReadable_(mz_qualities_summary, os_tsv, "qualities in mass-to-charge dimension");
}
else if (in_type == FileTypes::CONSENSUSXML) //consensus features
{
Size size = cons.size();
vector<double> intensities;
intensities.reserve(size);
vector<double> qualities(size);
qualities.reserve(size);
vector<double> widths(size);
widths.reserve(size);
vector<double> rt_delta_by_elems;
vector<double> rt_aad_by_elems;
vector<double> rt_aad_by_cfs;
rt_aad_by_cfs.reserve(size);
vector<double> mz_delta_by_elems;
vector<double> mz_aad_by_elems;
vector<double> mz_aad_by_cfs;
mz_aad_by_cfs.reserve(size);
vector<double> it_delta_by_elems;
vector<double> it_aad_by_elems;
vector<double> it_aad_by_cfs;
it_aad_by_cfs.reserve(size);
for (const ConsensusFeature& cm : cons)
{
double rt_aad = 0;
double mz_aad = 0;
double it_aad = 0;
intensities.push_back(cm.getIntensity());
qualities.push_back(cm.getQuality());
widths.push_back(cm.getWidth());
for (ConsensusFeature::HandleSetType::const_iterator hs_iter = cm.begin();
hs_iter != cm.end(); ++hs_iter)
{
double rt_diff = hs_iter->getRT() - cm.getRT();
rt_delta_by_elems.push_back(rt_diff);
if (rt_diff < 0)
{
rt_diff = -rt_diff;
}
rt_aad_by_elems.push_back(rt_diff);
rt_aad += rt_diff;
double mz_diff = hs_iter->getMZ() - cm.getMZ();
mz_delta_by_elems.push_back(mz_diff);
if (mz_diff < 0)
{
mz_diff = -mz_diff;
}
mz_aad_by_elems.push_back(mz_diff);
mz_aad += mz_diff;
double it_ratio = hs_iter->getIntensity() / (cm.getIntensity() > 0 ? cm.getIntensity() : 1.);
it_delta_by_elems.push_back(it_ratio);
if (it_ratio < 1.)
{
it_ratio = 1. / it_ratio;
}
it_aad_by_elems.push_back(it_ratio);
it_aad += it_ratio;
}
if (!cm.empty())
{
rt_aad /= cm.size();
mz_aad /= cm.size();
it_aad /= cm.size();
} // otherwise rt_aad etc. are 0 anyway
rt_aad_by_cfs.push_back(rt_aad);
mz_aad_by_cfs.push_back(mz_aad);
it_aad_by_cfs.push_back(it_aad);
}
os.precision(writtenDigits(ConsensusFeature::IntensityType()));
os << "Intensities of consensus features:"
<< '\n'
<< Math::SummaryStatistics<vector<double>>(intensities) << '\n';
os.precision(writtenDigits(ConsensusFeature::QualityType()));
os << "Qualities of consensus features:"
<< '\n'
<< Math::SummaryStatistics<vector<double>>(qualities) << '\n';
os.precision(writtenDigits(ConsensusFeature::CoordinateType()));
os << "Retention time differences (\"element - center\", weight 1 per element):"
<< '\n'
<< Math::SummaryStatistics<vector<double>>(rt_delta_by_elems) << '\n';
os << "Absolute retention time differences (\"|element - center|\", weight 1 per element):"
<< '\n'
<< Math::SummaryStatistics<vector<double>>(rt_aad_by_elems) << '\n';
os << "Average absolute differences of retention time within consensus features (\"|element - center|\", weight 1 per consensus features):"
<< '\n'
<< Math::SummaryStatistics<vector<double>>(rt_aad_by_cfs) << '\n';
os.precision(writtenDigits(ConsensusFeature::CoordinateType()));
os << "Mass-to-charge differences (\"element - center\", weight 1 per element):"
<< '\n'
<< Math::SummaryStatistics<vector<double>>(mz_delta_by_elems) << '\n';
os << "Absolute differences of mass-to-charge (\"|element - center|\", weight 1 per element):"
<< '\n'
<< Math::SummaryStatistics<vector<double>>(mz_aad_by_elems) << '\n';
os << "Average absolute differences of mass-to-charge within consensus features (\"|element - center|\", weight 1 per consensus features):"
<< '\n'
<< Math::SummaryStatistics<vector<double>>(mz_aad_by_cfs) << '\n';
os.precision(writtenDigits(ConsensusFeature::IntensityType()));
os << "Intensity ratios (\"element / center\", weight 1 per element):"
<< '\n'
<< Math::SummaryStatistics<vector<double>>(it_delta_by_elems) << '\n';
os << "Relative intensity error (\"max{(element / center), (center / element)}\", weight 1 per element):"
<< '\n'
<< Math::SummaryStatistics<vector<double>>(it_aad_by_elems) << '\n';
os << "Average relative intensity error within consensus features (\"max{(element / center), (center / element)}\", weight 1 per consensus features):"
<< '\n'
<< Math::SummaryStatistics<vector<double>>(it_aad_by_cfs) << '\n';
}
else if (in_type == FileTypes::IDXML) //identifications
{
//TODO
}
else if (in_type == FileTypes::FASTA)
{
}
else if (in_type == FileTypes::PEPXML)
{
// TODO
}
else //peaks
{
//copy intensities of MS-level 1 peaks
exp.updateRanges();
Size size = exp.getSize();
vector<double> intensities;
intensities.reserve(size);
for (PeakMap::const_iterator spec = exp.begin(); spec != exp.end(); ++spec)
{
if (spec->getMSLevel() != 1)
{
continue;
}
for (PeakMap::SpectrumType::const_iterator it = spec->begin(); it != spec->end(); ++it)
{
intensities.push_back(it->getIntensity());
}
}
sort(intensities.begin(), intensities.end());
os.precision(writtenDigits(Peak1D::IntensityType()));
os << "Intensities:"
<< '\n'
<< Math::SummaryStatistics<vector<double>>(intensities) << '\n';
//Statistics for meta information
for (std::map<String, int>::const_iterator it = meta_names.begin(); it != meta_names.end(); ++it)
{
String name = it->first;
vector<double> m_values;
for (PeakMap::const_iterator spec = exp.begin(); spec != exp.end(); ++spec)
{
for (Size meta = 0; meta < spec->getFloatDataArrays().size(); ++meta)
{
if (spec->getFloatDataArrays()[meta].getName() != name)
continue;
for (Size peak = 0; peak < spec->getFloatDataArrays()[meta].size(); ++peak)
{
m_values.push_back(spec->getFloatDataArrays()[meta][peak]);
}
}
for (Size meta = 0; meta < spec->getIntegerDataArrays().size(); ++meta)
{
if (spec->getIntegerDataArrays()[meta].getName() != name)
{
continue;
}
for (Size peak = 0; peak < spec->getIntegerDataArrays()[meta].size(); ++peak)
{
m_values.push_back(spec->getIntegerDataArrays()[meta][peak]);
}
}
}
os << "Meta data: " << name << '\n'
<< Math::SummaryStatistics<vector<double>>(m_values) << '\n';
}
}
}
os << '\n'
<< '\n';
return EXECUTION_OK;
}
ExitCodes main_(int, const char **) override
{
String out = getStringOption_("out");
String out_tsv = getStringOption_("out_tsv");
ofstream os;
ofstream os_tsv;
boost::iostreams::filtering_ostream os_filt;
boost::iostreams::filtering_ostream os_tsv_filt;
if (out.empty())
{
os_filt.push(getGlobalLogInfo());
}
else
{
os.open(out);
if (!os)
{
throw Exception::FileNotWritable(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, out);
}
os_filt.push(os);
}
if (out_tsv.empty())
{
os_tsv_filt.push(boost::iostreams::null_sink());
}
else
{
os_tsv.open(out_tsv);
if (!os_tsv)
{
throw Exception::FileNotWritable(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, out_tsv);
}
os_tsv_filt.push(os_tsv);
}
return outputTo_(os_filt, os_tsv_filt);
}
};
int main(int argc, const char **argv)
{
TOPPFileInfo tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/MapStatistics.cpp | .cpp | 18,763 | 505 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Oliver Kohlbacher $
// $Authors: Oliver Kohlbacher $
// --------------------------------------------------------------------------
#include <OpenMS/config.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/FORMAT/FileTypes.h>
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/DATASTRUCTURES/StringListUtils.h>
#include <OpenMS/KERNEL/FeatureMap.h>
#include <QtCore/QString>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <iomanip> // setw
#include <map>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
// Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_MapStatistics MapStatistics
@brief Extract extended statistics on the features of a map for quality control.
<center>
<table>
<tr>
<th ALIGN = "center"> pot. predecessor tools </td>
<td VALIGN="middle" ROWSPAN=2> → MapStatistics →</td>
<th ALIGN = "center"> pot. successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> FeatureFinder, FeatureMatcher</td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> - </td>
</tr>
</table>
</center>
This tool computes some basic statistics on the features of a map
that are frequently used for quality control.
Information displayed includes:
- show information about the data range of a file (m/z, RT, intensity)
- show a statistical summary for intensities, qualities, feature widths
- break down the statistics for fractions of the map
- total ion current included in the features as a function of RT
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_MapStatistics.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_MapStatistics.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
namespace OpenMS
{
/// Copy the SummaryStatistics into a vector
template<class T>
static vector<double>& operator<<(vector<double>& result, const Math::SummaryStatistics<T>& stats)
{
result.push_back(stats.mean);
result.push_back(sqrt(stats.variance));
result.push_back(stats.min);
result.push_back(stats.max);
result.push_back(stats.median);
result.push_back(stats.lowerq);
result.push_back(stats.upperq);
return result;
}
/// Write SummaryStatistics to a stream.
template<class T>
static ostream& operator<<(ostream& os, const Math::SummaryStatistics<T>& rhs)
{
return os <<
" mean: " << rhs.mean << endl <<
" stddev: " << sqrt(rhs.variance) << endl <<
" median: " << rhs.median << endl <<
" min: " << rhs.min << endl <<
" max: " << rhs.max << endl;
}
}
class TOPPMapStatistics :
public TOPPBase
{
public:
TOPPMapStatistics() :
TOPPBase("MapStatistics", "Extract extended statistics on the features of a map for quality control.")
{
}
vector<double> sliceStatistics(const FeatureMap& map, Size begin, Size end) const
{
// If we are asked to produce stats for an empty set, return an empty vector.
if (end <= begin || end > map.size())
{
return vector<double>(43);
}
Size size = end - begin;
vector<double> intensities(size);
vector<double> peak_widths(size);
vector<double> mz(size);
vector<double> overall_qualities(size);
vector<double> mz_qualities(size);
vector<double> rt_qualities(size);
double tic = 0.0;
for (Size i = begin; i < end; ++i)
{
intensities[i - begin] = map[i].getIntensity();
mz[i - begin] = map[i].getMZ();
peak_widths[i - begin] = map[i].getWidth();
rt_qualities[i - begin] = map[i].getQuality(Feature::RT);
mz_qualities[i - begin] = map[i].getQuality(Feature::MZ);
overall_qualities[i - begin] = map[i].getOverallQuality();
tic += map[i].getIntensity();
}
vector<double> results;
results.reserve(43); // 6 7-number stats + tic
results.push_back(tic);
results << Math::SummaryStatistics< vector<double> >(intensities);
results << Math::SummaryStatistics< vector<double> >(mz);
results << Math::SummaryStatistics< vector<double> >(peak_widths);
results << Math::SummaryStatistics< vector<double> >(overall_qualities);
results << Math::SummaryStatistics< vector<double> >(rt_qualities);
results << Math::SummaryStatistics< vector<double> >(mz_qualities);
return results;
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "Input file");
setValidFormats_("in", ListUtils::create<String>("featureXML,consensusXML"));
registerStringOption_("in_type", "<type>", "", "Input file type -- default: determined from file extension or content", false);
setValidStrings_("in_type", ListUtils::create<String>("featureXML,consensusXML"));
registerOutputFile_("out", "<file>", "", "Optional output txt file. If empty, the output is written to the command line.", false);
setValidFormats_("out", ListUtils::create<String>("txt"));
registerIntOption_("n", "<n>", 4, // 4 slices is the default
"Report separate statistics for each of n RT slices of the map.",
false, false);
setMinInt_("n", 1);
setMaxInt_("n", 100);
registerFlag_("m", "Show meta information about the whole experiment");
registerFlag_("p", "Shows data processing information");
registerFlag_("s", "Computes a summary statistics of intensities, qualities, and widths");
}
ExitCodes outputTo(ostream& os)
{
//-------------------------------------------------------------
// Parameter handling
//-------------------------------------------------------------
// File names
String in = getStringOption_("in");
// File type
FileHandler fh;
FileTypes::Type in_type = FileTypes::nameToType(getStringOption_("in_type"));
if (in_type == FileTypes::UNKNOWN)
{
in_type = fh.getType(in);
writeDebug_(String("Input file type: ") + FileTypes::typeToName(in_type), 2);
}
if (in_type == FileTypes::UNKNOWN)
{
writeLogError_("Error: Could not determine input file type!");
return PARSE_ERROR;
}
PeakMap exp;
FeatureMap feat;
ConsensusMap cons;
if (in_type == FileTypes::FEATUREXML) //features
{
FileHandler().loadFeatures(in, feat, {FileTypes::FEATUREXML});
feat.updateRanges();
}
else if (in_type == FileTypes::CONSENSUSXML) //consensus features
{
FileHandler().loadConsensusFeatures(in, cons, {FileTypes::CONSENSUSXML});
cons.updateRanges();
}
//-------------------------------------------------------------
// meta information
//-------------------------------------------------------------
if (getFlag_("m"))
{
os << endl
<< "-- General information --" << endl
<< endl
<< "file name: " << in << endl
<< "file type: " << FileTypes::typeToName(in_type) << endl;
//basic info
os << endl
<< "-- Meta information --" << endl
<< endl;
if (in_type == FileTypes::FEATUREXML) //features
{
os << "Document id : " << feat.getIdentifier() << endl << endl;
}
else if (in_type == FileTypes::CONSENSUSXML) //consensus features
{
os << "Document id : " << cons.getIdentifier() << endl << endl;
}
}
//-------------------------------------------------------------
// data processing
//-------------------------------------------------------------
if (getFlag_("p"))
{
//basic info
os << endl
<< "-- Data processing information --" << endl
<< endl;
//get data processing info
vector<DataProcessing> dp;
if (in_type == FileTypes::FEATUREXML) //features
{
dp = feat.getDataProcessing();
}
else if (in_type == FileTypes::CONSENSUSXML) //consensus features
{
dp = cons.getDataProcessing();
}
int i = 0;
for (const DataProcessing& data : dp)
{
os << "Data processing " << i << endl;
os << "\tcompletion_time: " << data.getCompletionTime().getDate() << 'T' << data.getCompletionTime().getTime() << endl;
os << "\tsoftware name: " << data.getSoftware().getName() << " version " << data.getSoftware().getVersion() << endl;
for (const DataProcessing::ProcessingAction& pa : data.getProcessingActions())
{
os << "\t\tprocessing action: " << DataProcessing::NamesOfProcessingAction[pa] << endl;
}
i++;
}
}
//-------------------------------------------------------------
// statistics
//-------------------------------------------------------------
if (getFlag_("s"))
{
//-------------------------------------------------------------
// Content statistics
//-------------------------------------------------------------
std::map<String, int> meta_names;
if (in_type == FileTypes::FEATUREXML) //features
{
os << "Number of features: " << feat.size() << endl
<< endl
<< "Ranges:" << endl
<< " retention time: " << String::number(feat.getMinRT(), 2) << " : " << String::number(feat.getMaxRT(), 2) << endl
<< " mass-to-charge: " << String::number(feat.getMinMZ(), 2) << " : " << String::number(feat.getMaxMZ(), 2) << endl
<< " intensity: " << String::number(feat.getMinIntensity(), 2) << " : " << String::number(feat.getMaxIntensity(), 2) << endl
<< endl;
// Charge distribution
std::map<UInt, UInt> charges;
for (Size i = 0; i < feat.size(); ++i)
{
charges[feat[i].getCharge()]++;
}
os << "Charge distribution" << endl;
for (std::map<UInt, UInt>::const_iterator it = charges.begin();
it != charges.end(); ++it)
{
os << "charge " << it->first << ": " << it->second << endl;
}
}
else if (in_type == FileTypes::CONSENSUSXML) //consensus features
{
map<Size, UInt> num_consfeat_of_size;
for (ConsensusMap::const_iterator cmit = cons.begin();
cmit != cons.end(); ++cmit)
{
++num_consfeat_of_size[cmit->size()];
}
os << endl << "Number of consensus features:" << endl;
for (map<Size, UInt>::reverse_iterator i = num_consfeat_of_size.rbegin(); i != num_consfeat_of_size.rend(); ++i)
{
os << " of size " << setw(2) << i->first << ": " << setw(6) << i->second << endl;
}
os << " total: " << setw(6) << cons.size() << endl << endl;
os << "Ranges:" << endl
<< " retention time: " << String::number(cons.getMinRT(), 2) << " : " << String::number(cons.getMaxRT(), 2) << endl
<< " mass-to-charge: " << String::number(cons.getMinMZ(), 2) << " : " << String::number(cons.getMaxMZ(), 2) << endl
<< " intensity: " << String::number(cons.getMinIntensity(), 2) << " : " << String::number(cons.getMaxIntensity(), 2) << endl;
// file descriptions
const ConsensusMap::ColumnHeaders& descs = cons.getColumnHeaders();
if (!descs.empty())
{
os << endl <<
"File descriptions:" << endl;
for (ConsensusMap::ColumnHeaders::const_iterator it = descs.begin(); it != descs.end(); ++it)
{
os << " - " << it->second.filename << endl
<< " identifier: " << it->first << endl
<< " label : " << it->second.label << endl
<< " size : " << it->second.size << endl;
}
}
}
os << endl
<< "-- Summary Statistics --" << endl
<< endl;
}
if (in_type == FileTypes::FEATUREXML) //features
{
feat.sortByRT();
vector<double> slice_stats;
Size n = getIntOption_("n");
Size begin = 0;
Size end = 0;
os << "#slice\tRT_begin\tRT_end\tnumber_of_features\ttic\t"
<< "int_mean\tint_stddev\tint_min\tint_max\tint_median\tint_lowerq\tint_upperq\t"
<< "mz_mean\tmz_stddev\tmz_min\tmz_max\tmz_median\tmz_lowerq\tmz_upperq\t"
<< "width_mean\twidth_stddev\twidth_min\twidth_max\twidth_median\twidth_lowerq\twidth_upperq\t"
<< "qual_mean\tqual_stddev\tqual_min\tqual_max\tqual_median\tqual_lowerq\tqual_upperq\t"
<< "rt_qual_mean\trt_qual_stddev\trt_qual_min\trt_qual_max\trt_qual_median\trt_qual_lowerq\trt_qual_upperq\t"
<< "mz_qual_mean\tmz_qual_stddev\tmz_qual_min\tmz_qual_max\tmz_qual_median\tmz_qual_lowerq\tmz_qual_upperq"
<< endl;
double rt_begin = 0.0;
for (Size slice = 0; slice < n; ++slice)
{
// Determine slice boundaries.
double rt_end = feat.back().getRT() / (double)n * (slice + 1);
for (end = begin; end < feat.size() && feat[end].getRT() < rt_end; ++end) {}
// Compute statistics on all features in this slice.
slice_stats = sliceStatistics(feat, begin, end);
// Write the beginning and end of the slices to the output as well as the slice index.
os << slice << "\t" << rt_begin << "\t" << rt_end << "\t" << end - begin << "\t";
// Write the statistics as a line of an csv file
copy(slice_stats.begin(), slice_stats.end(), ostream_iterator<double>(os, "\t"));
os << endl;
begin = end;
rt_begin = rt_end;
}
}
else if (in_type == FileTypes::CONSENSUSXML) //consensus features
{
Size size = cons.size();
vector<double> intensities;
intensities.reserve(size);
vector<double> qualities(size);
qualities.reserve(size);
vector<double> widths(size);
widths.reserve(size);
vector<double> rt_delta_by_elems;
vector<double> rt_aad_by_elems;
vector<double> rt_aad_by_cfs;
rt_aad_by_cfs.reserve(size);
vector<double> mz_delta_by_elems;
vector<double> mz_aad_by_elems;
vector<double> mz_aad_by_cfs;
mz_aad_by_cfs.reserve(size);
vector<double> it_delta_by_elems;
vector<double> it_aad_by_elems;
vector<double> it_aad_by_cfs;
it_aad_by_cfs.reserve(size);
for (const ConsensusFeature& cm : cons)
{
double rt_aad = 0;
double mz_aad = 0;
double it_aad = 0;
intensities.push_back(cm.getIntensity());
qualities.push_back(cm.getQuality());
widths.push_back(cm.getWidth());
for (const FeatureHandle& hs : cm)
{
double rt_diff = hs.getRT() - cm.getRT();
rt_delta_by_elems.push_back(rt_diff);
if (rt_diff < 0)
{
rt_diff = -rt_diff;
}
rt_aad_by_elems.push_back(rt_diff);
rt_aad += rt_diff;
double mz_diff = hs.getMZ() - cm.getMZ();
mz_delta_by_elems.push_back(mz_diff);
if (mz_diff < 0)
{
mz_diff = -mz_diff;
}
mz_aad_by_elems.push_back(mz_diff);
mz_aad += mz_diff;
double it_ratio = hs.getIntensity() / (cm.getIntensity() ? cm.getIntensity() : 1.);
it_delta_by_elems.push_back(it_ratio);
if (it_ratio < 1.)
{
it_ratio = 1. / it_ratio;
}
it_aad_by_elems.push_back(it_ratio);
it_aad += it_ratio;
}
if (!cm.empty())
{
rt_aad /= cm.size();
mz_aad /= cm.size();
it_aad /= cm.size();
} // otherwise rt_aad etc. are 0 anyway
rt_aad_by_cfs.push_back(rt_aad);
mz_aad_by_cfs.push_back(mz_aad);
it_aad_by_cfs.push_back(it_aad);
}
os.precision(writtenDigits(ConsensusFeature::IntensityType()));
os << "Intensities of consensus features:" << endl << Math::SummaryStatistics< vector<double> >(intensities) << endl;
os.precision(writtenDigits(ConsensusFeature::QualityType()));
os << "Qualities of consensus features:" << endl << Math::SummaryStatistics< vector<double> >(qualities) << endl;
os.precision(writtenDigits(ConsensusFeature::CoordinateType()));
os << "Retention time differences ( element-center, weight 1 per element):" << endl << Math::SummaryStatistics< vector<double> >(rt_delta_by_elems) << endl;
os << "Absolute retention time differences ( |element-center|, weight 1 per element):" << endl << Math::SummaryStatistics< vector<double> >(rt_aad_by_elems) << endl;
os << "Average absolute differences of retention time within consensus features ( |element-center|, weight 1 per consensus features):" << endl << Math::SummaryStatistics< vector<double> >(rt_aad_by_cfs) << endl;
os.precision(writtenDigits(ConsensusFeature::CoordinateType()));
os << "Mass-to-charge differences ( element-center, weight 1 per element):" << endl << Math::SummaryStatistics< vector<double> >(mz_delta_by_elems) << endl;
os << "Absolute differences of mass-to-charge ( |element-center|, weight 1 per element):" << endl << Math::SummaryStatistics< vector<double> >(mz_aad_by_elems) << endl;
os << "Average absolute differences of mass-to-charge within consensus features ( |element-center|, weight 1 per consensus features):" << endl << Math::SummaryStatistics< vector<double> >(mz_aad_by_cfs) << endl;
os.precision(writtenDigits(ConsensusFeature::IntensityType()));
os << "Intensity ratios ( element/center, weight 1 per element):" << endl << Math::SummaryStatistics< vector<double> >(it_delta_by_elems) << endl;
os << "Relative intensity error ( max{(element/center),(center/element)}, weight 1 per element):" << endl << Math::SummaryStatistics< vector<double> >(it_aad_by_elems) << endl;
os << "Average relative intensity error within consensus features ( max{(element/center),(center/element)}, weight 1 per consensus features):" << endl << Math::SummaryStatistics< vector<double> >(it_aad_by_cfs) << endl;
}
return EXECUTION_OK;
}
ExitCodes main_(int, const char**) override
{
String out = getStringOption_("out");
//output to command line
if (out.empty())
{
return outputTo(cout);
}
//output to file
else
{
ofstream os(out.c_str());
return outputTo(os);
}
}
};
int main(int argc, const char** argv)
{
TOPPMapStatistics tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/GNPSExport.cpp | .cpp | 10,224 | 175 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Dorrestein Lab - University of California San Diego - https://dorresteinlab.ucsd.edu/$
// $Authors: Abinesh Sarvepalli and Louis Felix Nothias$
// $Contributors: Fabian Aicheler and Oliver Alka from Oliver Kohlbacher's group at Tubingen University$
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/FORMAT/GNPSMetaValueFile.h>
#include <OpenMS/FORMAT/GNPSMGFFile.h>
#include <OpenMS/FORMAT/GNPSQuantificationFile.h>
#include <OpenMS/ANALYSIS/ID/IonIdentityMolecularNetworking.h>
#include <OpenMS/KERNEL/ConsensusMap.h>
#include <OpenMS/FORMAT/FileHandler.h>
using namespace OpenMS;
using namespace std;
//----------------------------------------------------------
// Doxygen docu
//----------------------------------------------------------
/**
@page TOPP_GNPSExport GNPSExport
@brief Export MS/MS data in .MGF format for GNPS (http://gnps.ucsd.edu).
GNPS (Global Natural Products Social Molecular Networking, http://gnps.ucsd.edu) is an open-access knowledge base for community-wide organization and sharing of raw, processed or identified tandem mass (MS/MS) spectrometry data. The GNPS web-platform makes it possible to perform spectral library search against public MS/MS spectral libraries, as well as to perform various data analysis such as MS/MS molecular networking, network annotation propagation, and the Dereplicator-based annotation. The GNPS manuscript is available here: https://www.nature.com/articles/nbt.3597
This tool was developed for the Feature Based Molecular Networking (FBMN) (https://ccms-ucsd.github.io/GNPSDocumentation/featurebasedmolecularnetworking/) and Ion Identity Molecular Networking (IIMN) (https://ccms-ucsd.github.io/GNPSDocumentation/fbmn-iin/) workflows.
Please cite:
Nothias, L.-F., Petras, D., Schmid, R. et al. [Feature-based molecular networking in the GNPS analysis environment](https://www.nature.com/articles/s41592-020-0933-6). Nat. Methods 17, 905–908 (2020).
In brief, after running an OpenMS metabolomics pipeline, the <b>GNPSExport</b> TOPP tool can be used on the consensusXML file and the mzML files to generate the files needed for FBMN and IIMN.
Those files are:
- A <b>MS/MS spectral data file</b> (.MGF format).
- A <b>feature quantification table</b> (.TXT format). (https://ccms-ucsd.github.io/GNPSDocumentation/featurebasedmolecularnetworking/#feature-quantification-table)
- A <b>supplementary pairs table</b> (.CSV format) required for IIMN. (https://ccms-ucsd.github.io/GNPSDocumentation/fbmn-iin/#supplementary-pairs)
- A <b>meta value table</b> (.TSV format). (https://ccms-ucsd.github.io/GNPSDocumentation/metadata/)
A representative OpenMS-GNPS workflow would use the following OpenMS TOPP tools sequentially:
- Input mzML files
- Run the @ref TOPP_FeatureFinderMetabo tool on the mzML files.
- Run MetaboliteAdductDecharger on the featureXML files (optional, for Ion Identity Molecular Networking).
- Run the @ref TOPP_MapAlignerPoseClustering tool on the featureXML files.
@code
MapAlignerPoseClustering -in FFM_inputFile0.featureXML FFM_inputFile1.featureXML -out MapAlignerPoseClustering_inputFile0.featureXML MapAlignerPoseClustering_inputFile1.featureXML -trafo_out MapAlignerPoseClustering_inputFile0.trafoXML MapAlignerPoseClustering_inputFile1.trafoXML
@endcode
- Run the @ref TOPP_MapRTTransformer tool on the mzML files to transform retention times based on the feature map alignment by @ref TOPP_MapAlignerPoseClustering.
@code
MapRTTransformer -in inputFile0.mzML -out MapRTTransformer_inputFile0.mzML -trafo_in MapAlignerPoseClustering_inputFile0.trafoXML
MapRTTransformer -in inputFile1.mzML -out MapRTTransformer_inputFile1.mzML -trafo_in MapAlignerPoseClustering_inputFile1.trafoXML
@endcode
- Run the @ref TOPP_IDMapper tool on the featureXML and mzML files.
@code
IDMapper -id emptyfile.idXML -in MapAlignerPoseClustering_inputFile0.featureXML -spectra:in MapRTTransformer_inputFile0.mzML -out IDMapper_inputFile0.featureXML
IDMapper -id emptyfile.idXML -in MapAlignerPoseClustering_inputFile1.featureXML -spectra:in MapRTTransformer_inputFile1.mzML -out IDMapper_inputFile1.featureXML
@endcode
- Run the @ref TOPP_MetaboliteAdductDecharger tool on the featureXML files.
- Run the @ref TOPP_FeatureLinkerUnlabeledKD tool or FeatureLinkerUnlabeledQT, on the featureXML files and output a consensusXML file.
@code
FeatureLinkerUnlabeledKD -in IDMapper_inputFile0.featureXML IDMapper_inputFile1.featureXML -out FeatureLinkerUnlabeledKD.consensusXML
@endcode
- Run the @ref TOPP_FileFilter on the consensusXML file to keep only consensusElements with at least MS/MS scan (peptide identification).
@code
FileFilter -id:remove_unannotated_features -in FeatureLinkerUnlabeledKD.consensusXML -out FileFilter.consensusXML
@endcode
- Run the @ref TOPP_GNPSExport on the "filtered consensusXML file" to export an .MGF file. For each consensusElement in the consensusXML file, the GNPSExport command produces one representative consensus MS/MS spectrum (named peptide annotation in OpenMS jargon) which is appended in the MS/MS spectral file (.MGF file).
(Note that the parameters for the spectral file generation are defined in the GNPSExport INI parameters file, available here: https://ccms-ucsd.github.io/GNPSDocumentation/openms_gnpsexport/GNPSExport.ini
@code
GNPSExport -in_cm filtered.consensusXML -in_mzml MapRTTransformer_inputFile0.mzML MapRTTransformer_inputFile1.mzML -out GNPSExport_output.mgf -out_quantification FeatureQuantificationTable.txt -out_pairs SupplementaryPairsTable.csv -out_meta_values MetaValues.tsv
@endcode
- Upload your files to GNPS and run the Feature-Based Molecular Networking workflow. Instructions can be found here: https://ccms-ucsd.github.io/GNPSDocumentation/featurebasedmolecularnetworking/
The GitHub page for the ProteoSAFe workflow and the OpenMS python wrappers is available here: https://github.com/Bioinformatic-squad-DorresteinLab/openms-gnps-workflow
An online version of the OpenMS-GNPS pipeline for FBMN running on CCMS server (http://proteomics.ucsd.edu/) is available here: https://ccms-ucsd.github.io/GNPSDocumentation/featurebasedmolecularnetworking-with-openms/
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_GNPSExport.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_GNPSExport.html
*/
class TOPPGNPSExport : public TOPPBase
{
public:
TOPPGNPSExport() : TOPPBase(
"GNPSExport",
"Export representative consensus MS/MS scan per consensusElement into a .MGF file format.\nSee the documentation on https://ccms-ucsd.github.io/GNPSDocumentation/featurebasedmolecularnetworking-with-openms",
true,
{
{
"Nothias L.F. et al.", // authors
"Feature-based Molecular Networking in the GNPS Analysis Environment", // title
"bioRxiv 812404 (2019)", // when_where
"10.1101/812404" // doi
}
}
) {}
protected:
// this function will be used to register the tool parameters
// it gets automatically called on tool execution
void registerOptionsAndFlags_() override
{
registerInputFile_("in_cm", "<file>", "", "Input consensusXML file containing only consensusElements with \"peptide\" annotations.");
setValidFormats_("in_cm", {"consensusXML"});
registerInputFileList_("in_mzml", "<files>", ListUtils::create<String>(""), "Original mzml files containing the ms2 spectra (aka peptide annotation). \nMust be in order that the consensusXML file maps the original mzML files.");
setValidFormats_("in_mzml", {"mzML"});
registerOutputFile_("out", "<file>", "", "Output MGF file.");
setValidFormats_("out", {"mgf"});
registerOutputFile_("out_quantification", "<file>", "", "Output feature quantification table.");
setValidFormats_("out_quantification", {"txt"});
registerOutputFile_("out_pairs", "<file>", "", "Output supplementary pairs table for IIMN.", false);
setValidFormats_("out_pairs", {"csv"});
registerOutputFile_("out_meta_values", "<file>", "", "Output meta value file.", false);
setValidFormats_("out_meta_values", {"tsv"});
addEmptyLine_();
registerFullParam_(GNPSMGFFile().getDefaults());
}
// the main function is called after all parameters are read
ExitCodes main_(int, const char**) override
{
//-------------------------------------------------------------
// parsing parameters
//-------------------------------------------------------------
String consensus_file_path(getStringOption_("in_cm"));
StringList mzml_file_paths = getStringList_("in_mzml");
String out(getStringOption_("out"));
String out_quantification(getStringOption_("out_quantification"));
String out_pairs(getStringOption_("out_pairs"));
String out_meta(getStringOption_("out_meta_values"));
// load ConsensusMap from file
ConsensusMap cm;
FileHandler().loadConsensusFeatures(consensus_file_path, cm, {FileTypes::CONSENSUSXML});
// if at least one of the features has an annotation for Constants::UserParam::IIMN_LINKED_GROUPS, annotate ConsensusMap for IIMN
for (const auto& f: cm)
{
if (f.metaValueExists(Constants::UserParam::IIMN_LINKED_GROUPS))
{
IonIdentityMolecularNetworking::annotateConsensusMap(cm);
break;
}
}
GNPSMGFFile gnps;
gnps.setLogType(log_type_);
gnps.setParameters(getParam_()); // copy tool parameter to library class/algorithm
gnps.store(consensus_file_path, mzml_file_paths, out);
if (!out_pairs.empty()) IonIdentityMolecularNetworking::writeSupplementaryPairTable(cm, out_pairs);
if (!out_quantification.empty()) GNPSQuantificationFile::store(cm, out_quantification);
if (!out_meta.empty()) GNPSMetaValueFile::store(cm, out_meta);
return EXECUTION_OK;
}
};
// the actual main functioned needed to create an executable
int main (int argc, const char** argv)
{
TOPPGNPSExport tool;
return tool.main(argc, argv);
}
| C++ |
3D | OpenMS/OpenMS | src/topp/ProteinInference.cpp | .cpp | 11,953 | 294 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Julianus Pfeuffer $
// $Authors: Andreas Bertsch, Julianus Pfeuffer $
// --------------------------------------------------------------------------
#include <algorithm>
#include <OpenMS/ANALYSIS/ID/BasicProteinInferenceAlgorithm.h>
#include <OpenMS/ANALYSIS/ID/ConsensusMapMergerAlgorithm.h>
#include <OpenMS/ANALYSIS/ID/FalseDiscoveryRate.h>
#include <OpenMS/ANALYSIS/ID/IDMergerAlgorithm.h>
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/CONCEPT/VersionInfo.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/SYSTEM/StopWatch.h>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_ProteinInference ProteinInference
@brief Computes a protein identification score based on an aggregation of scores of identified peptides.
<CENTER>
<table>
<tr>
<th ALIGN = "center"> pot. predecessor tools </td>
<td VALIGN="middle" ROWSPAN=4> → ProteinInterference →</td>
<th ALIGN = "center"> pot. successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_CometAdapter (or other ID engines)</td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=3> @ref TOPP_PeptideIndexer </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_FalseDiscoveryRate </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_IDFilter </td>
</tr>
</table>
</CENTER>
This tool counts and aggregates the scores of peptide sequences that match a protein accession. Only the top PSM for a peptide is used.
By default it also annotates the number of peptides used for the calculation (metavalue "nr_found_peptides") and
can be used for further filtering. 0 probability peptides are counted but ignored in aggregation method "multiplication".
@note Currently mzIdentML (mzid) is not directly supported as an input/output format of this tool. Convert mzid files to/from idXML using @ref TOPP_IDFileConverter if necessary.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_ProteinInference.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_ProteinInference.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPProteinInference :
public TOPPBase
{
public:
TOPPProteinInference() :
TOPPBase("ProteinInference", "Protein inference based on an aggregation of the scores of the identified peptides.")
{}
protected:
void registerOptionsAndFlags_() override
{
//TODO allow consensusXML version
registerInputFileList_("in", "<file>", StringList(), "input file(s)");
setValidFormats_("in", ListUtils::create<String>("idXML,consensusXML"));
registerOutputFile_("out", "<file>", "", "output file");
setValidFormats_("out", ListUtils::create<String>("idXML,consensusXML"));
registerStringOption_("out_type", "<file>", "", "output file type", false);
setValidStrings_("out_type", ListUtils::create<String>("idXML,consensusXML"));
//TODO add function to merge based on replicates only. Needs additional exp. design file then.
registerStringOption_("merge_runs", "<choice>", "all",
"If your idXML contains multiple runs, merge them beforehand? Otherwise performs inference separately per run.", false);
setValidStrings_("merge_runs", ListUtils::create<String>("no,all"));
registerStringOption_("protein_fdr",
"<option>",
"false",
"Additionally calculate the target-decoy FDR on protein-level after inference", false, false);
setValidStrings_("protein_fdr", {"true","false"});
registerStringOption_("conservative_fdr",
"<option>",
"true",
"Use (D+1)/(T) instead of (D+1)/(T+D) for reporting protein FDRs.", false, true);
setValidStrings_("conservative_fdr", {"true","false"});
registerStringOption_("picked_fdr",
"<option>",
"true",
"Use picked protein FDRs.", false, true);
setValidStrings_("picked_fdr", {"true","false"});
registerStringOption_("picked_decoy_string",
"<decoy_string>",
"",
"If using picked protein FDRs, which decoy string was used? Leave blank for auto-detection.", false, true);
registerStringOption_("picked_decoy_prefix",
"<option>",
"prefix",
"If using picked protein FDRs, was the decoy string a prefix or suffix? Ignored during auto-detection.", false, true);
setValidStrings_("picked_decoy_prefix", {"prefix","suffix"});
// If we support more psms per spectrum, it should be done in the Algorithm class first
/*registerIntOption_("nr_psms_per_spectrum", "<choice>", 1,
"The number of top scoring PSMs per spectrum to consider. 0 means all.", false);
setMinInt_("nr_psms_per_spectrum", 0);*/
addEmptyLine_();
Param merger_with_subsection;
merger_with_subsection.insert("Merging:", IDMergerAlgorithm().getDefaults());
registerFullParam_(merger_with_subsection);
Param algo_with_subsection;
algo_with_subsection.insert("Algorithm:", BasicProteinInferenceAlgorithm().getDefaults());
registerFullParam_(algo_with_subsection);
}
ExitCodes main_(int, const char**) override
{
StopWatch sw;
sw.start();
StringList in = getStringList_("in");
// Merging if specifically asked or multiple files given. If you want to not merge
// and use multiple files, use a loop
bool merge_runs = getStringOption_("merge_runs") == "all" || in.size() > 1;
String out = getStringOption_("out");
String out_type = getStringOption_("out_type");
// load identifications
OPENMS_LOG_INFO << "Loading input..." << std::endl;
FileTypes::Type in_type = FileHandler::getType(in[0]);
if (!in.empty() && in_type == FileTypes::CONSENSUSXML)
{
if (FileHandler::getTypeByFileName(out) != FileTypes::CONSENSUSXML &&
FileTypes::nameToType(out_type) != FileTypes::CONSENSUSXML)
{
OPENMS_LOG_FATAL_ERROR << "Error: Running on consensusXML requires output as consensusXML. Please change the "
"output type.\n";
}
if (in.size() > 1)
{
OPENMS_LOG_FATAL_ERROR << "Error: Multiple inputs only supported for idXML\n";
}
ConsensusMapMergerAlgorithm cmerge;
ConsensusMap cmap;
OPENMS_LOG_INFO << "Loading input..." << std::endl;
FileHandler().loadConsensusFeatures(in[0], cmap, {FileTypes::CONSENSUSXML});
OPENMS_LOG_INFO << "Loading input took " << sw.toString() << std::endl;
sw.clear();
OPENMS_LOG_INFO << "Merging IDs across runs..." << std::endl;
cmerge.mergeAllIDRuns(cmap);
OPENMS_LOG_INFO << "Merging IDs across runs took " << sw.toString() << std::endl;
sw.clear();
OPENMS_LOG_INFO << "Aggregating protein scores..." << std::endl;
BasicProteinInferenceAlgorithm pi;
pi.setParameters(getParam_().copy("Algorithm:", true));
pi.run(cmap, cmap.getProteinIdentifications()[0], true);
OPENMS_LOG_INFO << "Aggregating protein scores took " << sw.toString() << std::endl;
sw.clear();
bool calc_protFDR = getStringOption_("protein_fdr") == "true";
if (calc_protFDR)
{
OPENMS_LOG_INFO << "Calculating target-decoy q-values..." << std::endl;
FalseDiscoveryRate fdr;
Param fdrparam = fdr.getParameters();
fdrparam.setValue("conservative", getStringOption_("conservative_fdr"));
fdrparam.setValue("add_decoy_proteins","true");
fdr.setParameters(fdrparam);
if (getStringOption_("picked_fdr") == "true")
{
fdr.applyPickedProteinFDR(cmap.getProteinIdentifications()[0], getStringOption_("picked_decoy_string"), getStringOption_("picked_decoy_prefix") == "prefix");
}
else
{
fdr.applyBasic(cmap.getProteinIdentifications()[0], true);
}
}
OPENMS_LOG_INFO << "Storing output..." << std::endl;
sw.start();
// write output
FileHandler().storeConsensusFeatures(out, cmap, {FileTypes::CONSENSUSXML});
OPENMS_LOG_INFO << "Storing output took " << sw.toString() << std::endl;
sw.stop();
}
else //----------- IdXML --------------------------
{
vector<ProteinIdentification> inferred_protein_ids{1};
PeptideIdentificationList inferred_peptide_ids;
FileHandler f;
if (merge_runs)
{
//TODO allow keep_best_pepmatch_only option during merging (Peptide-level datastructure would help a lot,
// otherwise you need to build a map of peptides everytime you want to quickly check if the peptide is already
// present)
//TODO allow experimental design aware merging
IDMergerAlgorithm merger{String("all_merged")};
merger.setParameters(getParam_().copy("Merging:", true));
for (const auto &idfile : in)
{
vector<ProteinIdentification> protein_ids;
PeptideIdentificationList peptide_ids;
f.loadIdentifications(idfile, protein_ids, peptide_ids, {FileTypes::IDXML});
merger.insertRuns(std::move(protein_ids), std::move(peptide_ids));
}
merger.returnResultsAndClear(inferred_protein_ids[0], inferred_peptide_ids);
}
else
{
f.loadIdentifications(in[0], inferred_protein_ids, inferred_peptide_ids, {FileTypes::IDXML});
}
OPENMS_LOG_INFO << "Loading input took " << sw.toString() << std::endl;
sw.reset();
// groups will be reannotated or scores will not make sense anymore -> delete
inferred_protein_ids[0].getIndistinguishableProteins().clear();
OPENMS_LOG_INFO << "Aggregating protein scores..." << std::endl;
BasicProteinInferenceAlgorithm pi;
pi.setParameters(getParam_().copy("Algorithm:", true));
pi.run(inferred_peptide_ids, inferred_protein_ids);
OPENMS_LOG_INFO << "Aggregating protein scores took " << sw.toString() << std::endl;
sw.clear();
bool calc_protFDR = getStringOption_("protein_fdr") == "true";
if (calc_protFDR)
{
OPENMS_LOG_INFO << "Calculating target-decoy q-values..." << std::endl;
FalseDiscoveryRate fdr;
Param fdrparam = fdr.getParameters();
fdrparam.setValue("conservative", getStringOption_("conservative_fdr"));
fdrparam.setValue("add_decoy_proteins","true");
fdr.setParameters(fdrparam);
if (getStringOption_("picked_fdr") == "true")
{
fdr.applyPickedProteinFDR(inferred_protein_ids[0], getStringOption_("picked_decoy_string"), getStringOption_("picked_decoy_prefix") == "prefix");
}
else
{
fdr.applyBasic(inferred_protein_ids[0], true);
}
}
OPENMS_LOG_INFO << "Storing output..." << std::endl;
sw.start();
// write output
FileHandler().storeIdentifications(out, inferred_protein_ids, inferred_peptide_ids, {FileTypes::IDXML});
OPENMS_LOG_INFO << "Storing output took " << sw.toString() << std::endl;
sw.stop();
}
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPProteinInference tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/HighResPrecursorMassCorrector.cpp | .cpp | 10,852 | 215 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Timo Sachsenberg, Oliver Alka $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/DATASTRUCTURES/DataValue.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/FORMAT/FeatureXMLFile.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/MATH/MathFunctions.h>
#include <OpenMS/PROCESSING/CENTROIDING/PeakPickerHiRes.h>
#include <OpenMS/PROCESSING/CALIBRATION/PrecursorCorrection.h>
#include <fstream>
#include <iostream>
#include <string>
#include <algorithm>
#include <iomanip>
using namespace OpenMS;
using namespace std;
/**
@page TOPP_HighResPrecursorMassCorrector HighResPrecursorMassCorrector
@brief Corrects the precursor mz of high resolution data.
<CENTER>
<table>
<tr>
<th ALIGN = "center"> pot. predecessor tools </td>
<td VALIGN= "middle" ROWSPAN=2> → HighResPrecursorMassCorrector →</td>
<th ALIGN = "center"> pot. successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_PeakPickerHiRes </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_CometAdapter (or other ID engines) </td>
</tr>
</table>
</CENTER>
This tool performs precursor m/z correction on picked (=centroided) high resolution data.
Three methods are available: 'nearest_peak', 'highest_intensity_peak' and 'feature'.
- nearest_peak: Use nearest centroided MS1 peak for precursor mass correction.
- highest_intensity_peak: Use highest intensity centroided MS1 peak in a given mass range for precursor mass correction.
- feature: Use features for precursor mass correction, which allows for charge correction.
The method hightest_intensity_peak searches in a specific m/z-window of the precursor information for the peak with the highest intensity.
Suggestioned value 1/maximal expected charge. E.g maximal expected charge 5, m/z-window = +/- 0.2 Da
See the corresponding parameter subsection for details.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_HighResPrecursorMassCorrector.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_HighResPrecursorMassCorrector.html
*/
/// @cond TOPPCLASSES
#include <OpenMS/PROCESSING/CALIBRATION/PrecursorCorrection.h>
class TOPPHiResPrecursorMassCorrector :
public TOPPBase
{
public:
TOPPHiResPrecursorMassCorrector() :
TOPPBase("HighResPrecursorMassCorrector",
"Corrects the precursor mass and charge determined by the instrument software.")
{
}
protected:
void registerOptionsAndFlags_() override
{
// input files
registerInputFile_("in", "<file>", "", "Input file (centroided data)");
setValidFormats_("in", ListUtils::create<String>("mzML"));
registerOutputFile_("out", "<file>", "", "Output file");
setValidFormats_("out", ListUtils::create<String>("mzML"));
registerTOPPSubsection_("feature", "Use features for precursor mass correction.");
registerInputFile_("feature:in", "<file>", "", "Features used to correct precursor masses.", false);
setValidFormats_("feature:in", ListUtils::create<String>("featureXML"));
registerDoubleOption_("feature:mz_tolerance", "<num>", 5.0, "The precursor mass tolerance. Used to determine matching to feature mass traces.", false);
registerStringOption_("feature:mz_tolerance_unit", "<choice>", "ppm", "Unit of precursor mass tolerance", false);
setValidStrings_("feature:mz_tolerance_unit", ListUtils::create<String>("Da,ppm"));
registerDoubleOption_("feature:rt_tolerance", "<num>", 0.0, "Additional retention time tolerance added to feature boundaries.", false);
registerIntOption_("feature:max_trace", "<num>", 2, "Maximum isotopic peak offset from the monoisotopic peak to consider for correction. For example, with max_trace=2, the tool can reassign a precursor to the monoisotopic peak (M+0) even if it was originally assigned to M+1 or M+2 (corrections of approximately -1 or -2 Da). To allow larger corrections (e.g., -3 Da), increase this value accordingly.", false, true);
registerFlag_("feature:believe_charge", "Assume precursor charge to be correct.");
registerFlag_("feature:keep_original", "Make a copy of the precursor and MS2 (true) or discard the original (false).");
registerFlag_("feature:assign_all_matching", "Correct a precursor using all matching features (true) or only the nearest (false). Only evaluated if copies are created (feature:keep_original).");
registerTOPPSubsection_("nearest_peak", "Use nearest centroided MS1 peak for precursor mass correction.");
registerDoubleOption_("nearest_peak:mz_tolerance", "<num>", 0.0, "The precursor mass tolerance to find the closest MS1 peak. (Disable method by setting value to 0.0)", false);
registerStringOption_("nearest_peak:mz_tolerance_unit", "<choice>", "ppm", "Unit of precursor mass tolerance", false);
setValidStrings_("nearest_peak:mz_tolerance_unit", ListUtils::create<String>("Da,ppm"));
registerTOPPSubsection_("highest_intensity_peak", "Use centroided MS1 peak with the highest intensity in a certrain mass range - for precursor mass correction");
registerDoubleOption_("highest_intensity_peak:mz_tolerance", "<num>", 0.0, "The precursor mass tolerance to find the highest intensity MS1 peak. Suggested value 1/max. expected charge. (Disable method by setting value to 0.0)", false);
registerStringOption_("highest_intensity_peak:mz_tolerance_unit", "<choice>", "ppm", "Unit of precursor mass tolerance", false);
setValidStrings_("highest_intensity_peak:mz_tolerance_unit", ListUtils::create<String>("Da,ppm"));
registerOutputFile_("out_csv", "<file>", "", "Optional CSV output file for results on 'nearest_peak' or 'highest_intensity_peak' algorithm (see corresponding subsection) containing columns: " + ListUtils::concatenate(ListUtils::create<String>(PrecursorCorrection::csv_header), ", ") + ".", false);
setValidFormats_("out_csv", ListUtils::create<String>("csv"));
}
ExitCodes main_(int, const char **) override
{
const string in_mzml(getStringOption_("in"));
const string in_feature(getStringOption_("feature:in"));
const string out_mzml(getStringOption_("out"));
const string out_csv = getStringOption_("out_csv");
const double mz_tolerance = getDoubleOption_("feature:mz_tolerance");
const bool mz_unit_ppm = getStringOption_("feature:mz_tolerance_unit") == "ppm" ? true : false;
const double rt_tolerance = getDoubleOption_("feature:rt_tolerance");
const int max_trace = getIntOption_("feature:max_trace");
bool keep_original = getFlag_("feature:keep_original");
bool assign_all_matching = getFlag_("feature:assign_all_matching");
bool believe_charge = getFlag_("feature:believe_charge");
const double nearest_peak_mz_tolerance = getDoubleOption_("nearest_peak:mz_tolerance");
const bool nearest_peak_ppm = getStringOption_("nearest_peak:mz_tolerance_unit") == "ppm" ? true : false;
const double highest_intensity_peak_mz_tolerance = getDoubleOption_("highest_intensity_peak:mz_tolerance");
const bool highest_intensity_peak_ppm = getStringOption_("highest_intensity_peak:mz_tolerance_unit") == "ppm" ? true : false;
PeakMap exp;
FileHandler().loadExperiment(in_mzml, exp, {FileTypes::MZML}, log_type_);
cout << setprecision(12);
// determine accuracy
vector<double> deltaMZs;
vector<double> mzs;
vector<double> rts;
set<Size> corrected_precursors; // spectrum index of corrected precursors
if ((nearest_peak_mz_tolerance <= 0.0) && (highest_intensity_peak_mz_tolerance <= 0.0) && in_feature.empty())
{
OPENMS_LOG_ERROR << "No method for PC correction requested. Either provide featureXML input files or set 'nearest_peak:mz_tolerance' > 0 or specify a 'highest_intensity_peak:mz_tolerance' > 0" << std::endl;
return MISSING_PARAMETERS;
}
// perform correction to closest MS1 peak
set<Size> corrected_to_nearest_peak;
if (nearest_peak_mz_tolerance > 0.0 && highest_intensity_peak_mz_tolerance <= 0.0)
{
corrected_to_nearest_peak = PrecursorCorrection::correctToNearestMS1Peak(exp, nearest_peak_mz_tolerance, nearest_peak_ppm, deltaMZs, mzs, rts);
}
//perform correction to highest intensity MS1 peak
set<Size> corrected_to_highest_intensity_peak;
if (highest_intensity_peak_mz_tolerance > 0.0)
{
corrected_to_highest_intensity_peak = PrecursorCorrection::correctToHighestIntensityMS1Peak(exp, highest_intensity_peak_mz_tolerance, highest_intensity_peak_ppm, deltaMZs, mzs, rts);
}
// perform correction to closest feature (also corrects charge if not disabled)
set<Size> corrected_to_nearest_feature;
if (!in_feature.empty())
{
FeatureMap features;
FileHandler().loadFeatures(in_feature, features, {}, log_type_);
corrected_to_nearest_feature = PrecursorCorrection::correctToNearestFeature(features, exp, rt_tolerance, mz_tolerance, mz_unit_ppm, believe_charge, keep_original, assign_all_matching, max_trace, debug_level_);
corrected_precursors.insert(corrected_to_nearest_feature.begin(), corrected_to_nearest_feature.end());
}
FileHandler().storeExperiment(out_mzml, exp, {FileTypes::MZML},log_type_);
if (!out_csv.empty())
{
if (nearest_peak_mz_tolerance > 0.0 && highest_intensity_peak_mz_tolerance <= 0.0)
{
OPENMS_LOG_INFO << "Corrected " << corrected_to_nearest_peak.size() << " precursor to a MS1 peak." << endl;
}
else if (highest_intensity_peak_mz_tolerance > 0.0)
{
OPENMS_LOG_INFO << "Corrected " << corrected_to_highest_intensity_peak.size() << " precursor to a MS1 peak." << endl;
}
else
{
OPENMS_LOG_WARN << "Output file 'out_csv': No data collected since 'nearest_peak:mz_tolerance' was not enabled. CSV will be empty." << endl;
}
PrecursorCorrection::writeHist(out_csv, deltaMZs, mzs, rts);
}
if (!in_feature.empty())
{
OPENMS_LOG_INFO << "Corrected " << corrected_to_nearest_feature.size() << " precursors to a feature." << endl;
}
return EXECUTION_OK;
}
};
int main(int argc, const char ** argv)
{
TOPPHiResPrecursorMassCorrector tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/EICExtractor.cpp | .cpp | 21,279 | 526 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/config.h>
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/DATASTRUCTURES/StringListUtils.h>
#include <OpenMS/DATASTRUCTURES/ListUtilsIO.h>
#include <OpenMS/FORMAT/EDTAFile.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/KERNEL/MSChromatogram.h>
#include <OpenMS/PROCESSING/NOISEESTIMATION/SignalToNoiseEstimatorMedian.h>
#include <OpenMS/PROCESSING/SMOOTHING/GaussFilter.h>
#include <OpenMS/PROCESSING/CENTROIDING/PeakPickerHiRes.h>
#include <OpenMS/SYSTEM/File.h>
#include <OpenMS/ANALYSIS/OPENSWATH/PeakIntegrator.h>
#include <functional>
#include <numeric>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_EICExtractor EICExtractor
@brief Extracts EICs from an MS experiment, in order to quantify analytes at a given position
<CENTER>
<table>
<tr>
<th ALIGN = "center"> pot. predecessor tools </td>
<td VALIGN="middle" ROWSPAN=2> → EICExtractor →</td>
<th ALIGN = "center"> pot. successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_FileConverter</td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> statistical tools, e.g., Excel, R, ... </td>
</tr>
</table>
</CENTER>
Use this instead of FeatureFinder, if you have bad features which are not recognized (much noise etc)
or if you want to quantify non-peptides.
The input EDTA file specifies where to search for signal in RT and m/z.
Retention time is in seconds [s]. A third intensity column is ignored but needs to be present.
Example (replace space separator with <TAB>):<br>
@code
RT m/z int
19.2 431.85 0
21.1 678.77 0
25.7 660.76 0
59.2 431.85 0
@endcode
RT positions can also be automatically generated using the 'auto-RT' functionality, which can be enabled by the flag 'auto_rt:enabled'.
All EDTA input lines with negative RT and some m/z values are replaced by 'n' other lines, where the m/z value is identical
and the RT column is replaced by of the 'n' RT estimates as discovered by the auto-RT functionality.
This allows you to specify only the expected m/z positions and let the auto-RT function handle the RT positions.
Info: auto-RT positions are only generated once from the FIRST mzML input file. All other mzML input files are expected to
have similar RT positions!
To debug auto-RT, you can specify an mzML output file (see 'auto_rt:out_debug_TIC' option) which will contain four single spectra which represent:
1. the TIC (of the first mzML input file)
2. the smoothed version of #1
3. the signal/noise (S/N) ratio of #2
4. the centroided version of #2 (not #3!)
Since you can specify the smoothing aggressiveness using 'auto_rt:FHWM' and
the minimum S/N theshold for centroided using 'auto_rt:SNThreshold', this should give you all information needed to set the best parameters which fit your data.
Sensible default thresholds have been chosen though, such that adaption should only be required in extreme cases.
The intensity reported is the MAXIMUM intensity of all peaks each within the given tolerances for this row's position.
As output, one file in text format is given. It contains the actual RT and m/z positions of the data,
as well as RT delta (in [s]) and m/z delta (in ppm) from the expected position as specified in the EDTA file or as found by the auto-RT feature.
<pre>
RT - expected RT position (in [s])
mz - expected m/z position
RTobs - RT position (in [s]) of the quantified entity
dRT - RT delta (in [s]) to the input RT value (as specified in input file or as computed by the auto-rt heuristic)
mzobs - m/z position of the quantified entity
dppm - m/z delta (in parts-per-million) to the input m/z value (as specified in input file) intensity quantification (height of centroided peak); this is an average over multiple scans, thus does usually not correspond to the maximum peak ppm
intensity
area - area of EIC (trapezoid integration)
</pre>
Each input experiment gives rise to the two RT and mz columns plus additional five columns (starting from RTobs) for each input file.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_EICExtractor.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_EICExtractor.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
struct HeaderInfo
{
explicit HeaderInfo(const String& filename)
{
header_description = "-- empty --";
TextFile tf;
tf.load(filename);
String content;
content.concatenate(tf.begin(), tf.end(), ";");
String search = "$$ Sample Description:";
Size pos = content.find(search);
if (pos != std::string::npos)
{
pos += search.size();
Size pos_end = content.find("$$", pos);
if (pos_end != std::string::npos)
{
String tmp = content.substr(pos, pos_end - pos - 1);
if (!tmp.trim().empty()) header_description = tmp;
//std::cerr << "Header info is: " << header_description << std::endl;
}
}
}
String header_description;
String filename;
};
class TOPPEICExtractor :
public TOPPBase
{
public:
TOPPEICExtractor() :
TOPPBase("EICExtractor", "Extracts intensities from dedicates positions in a LC/MS map")
{
}
void registerOptionsAndFlags_() override
{
registerInputFileList_("in", "<file>", ListUtils::create<String>(""), "Input raw data file");
setValidFormats_("in", ListUtils::create<String>("mzML"));
registerInputFileList_("in_header", "<file>", ListUtils::create<String>(""), "[for Waters data only] Read additional information from _HEADER.TXT. Provide one for each raw input file.", false);
setValidFormats_("in_header", ListUtils::create<String>("txt"));
registerInputFile_("pos", "<file>", "", "Input config file stating where to find signal");
setValidFormats_("pos", ListUtils::create<String>("edta"));
registerDoubleOption_("rt_tol", "", 3, "RT tolerance in [s] for finding max peak (whole RT range around RT middle)", false, false);
registerDoubleOption_("mz_tol", "", 10, "m/z tolerance in [ppm] for finding a peak", false, false);
registerIntOption_("rt_collect", "", 1, "# of scans up & down in RT from highest point for ppm estimation in result", false, false);
registerTOPPSubsection_("auto_rt", "Parameters for automatic detection of injection RT peaks (no need to specify them in 'pos' input file)");
registerFlag_("auto_rt:enabled", "Automatically detect injection peaks from TIC and quantify all m/z x RT combinations.");
registerDoubleOption_("auto_rt:FHWM", "<FWHM [s]>", 5, "Expected full width at half-maximum of each raw RT peak in [s]. Gaussian smoothing filter with this width is applied to TIC.", false, true);
registerDoubleOption_("auto_rt:SNThreshold", "<S/N>", 5, "S/N threshold for a smoothed raw peak to pass peak picking. Higher thesholds will result in less peaks.", false, true);
registerOutputFile_("auto_rt:out_debug_TIC", "<file>", "", "Optional output file (for first input) containing the smoothed TIC, S/N levels and picked RT positions", false, true);
setValidFormats_("auto_rt:out_debug_TIC", ListUtils::create<String>("mzML"));
registerStringOption_("out_separator", "<sep>", ",", "Separator character for output CSV file.", false, true);
//setValidStrings_("out_separator", ListUtils::create<String>(",!\t! ", '!')); // comma not allowed as valid string
registerOutputFile_("out", "<file>", "", "Output quantitation file (multiple columns for each input compound)");
setValidFormats_("out", ListUtils::create<String>("csv"));
}
MSChromatogram toChromatogram(const MSSpectrum& in) // for debugging
{
MSChromatogram out;
for (const auto& peak : in)
{
out.emplace_back(peak.getMZ(), peak.getIntensity());
}
out.setChromatogramType(ChromatogramSettings::ChromatogramType::SELECTED_ION_CURRENT_CHROMATOGRAM);
return out;
}
ExitCodes main_(int, const char**) override
{
//-------------------------------------------------------------
// parameter handling
//-------------------------------------------------------------
StringList in = getStringList_("in");
String edta = getStringOption_("pos");
String out = getStringOption_("out");
String out_sep = getStringOption_("out_separator");
String out_TIC_debug = getStringOption_("auto_rt:out_debug_TIC");
StringList in_header = getStringList_("in_header");
// number of out_debug_TIC files and input files must be identical
/*if (out_TIC_debug.size() > 0 && in.size() != out_TIC_debug.size())
{
OPENMS_LOG_FATAL_ERROR << "Error: number of input file 'in' and auto_rt:out_debug_TIC files must be identical!" << std::endl;
return ILLEGAL_PARAMETERS;
}*/
// number of header files and input files must be identical
if (!in_header.empty() && in.size() != in_header.size())
{
OPENMS_LOG_FATAL_ERROR << "Error: number of input file 'in' and 'in_header' files must be identical!" << std::endl;
return ILLEGAL_PARAMETERS;
}
if (!getFlag_("auto_rt:enabled") && !out_TIC_debug.empty())
{
OPENMS_LOG_FATAL_ERROR << "Error: TIC output file requested, but auto_rt is not enabled! Either do not request the file or switch on 'auto_rt:enabled'." << std::endl;
return ILLEGAL_PARAMETERS;
}
double rttol = getDoubleOption_("rt_tol");
double mztol = getDoubleOption_("mz_tol");
Size rt_collect = getIntOption_("rt_collect");
//-------------------------------------------------------------
// loading input
//-------------------------------------------------------------
FileHandler mzml_file;
PeakFileOptions options;
options.clearMSLevels();
options.addMSLevel(1);
mzml_file.getOptions() = options;
PeakMap exp, exp_pp;
FileHandler ed;
ConsensusMap cm;
ed.loadConsensusFeatures(edta, cm);
StringList tf_single_header0, tf_single_header1, tf_single_header2; // header content, for each column
std::vector<String> vec_single; // one line for each compound, multiple columns per experiment
vec_single.resize(cm.size());
PeakIntegrator peak_integrator; // for raw signal integration
auto pi_param = peak_integrator.getDefaults();
pi_param.setValue("integration_type", "trapezoid");
peak_integrator.setParameters(pi_param);
for (Size fi = 0; fi < in.size(); ++fi)
{
// load raw data
mzml_file.loadExperiment(in[fi], exp, {FileTypes::MZML}, log_type_);
exp.sortSpectra(true);
if (exp.empty())
{
OPENMS_LOG_WARN << "The given file does not contain any conventional peak data, but might"
" contain chromatograms. This tool currently cannot handle them, sorry." << std::endl;
return INCOMPATIBLE_INPUT_DATA;
}
// try to detect RT peaks (only for the first input file -- all others should align!)
// cm.size() might change in here...
if (getFlag_("auto_rt:enabled") && fi == 0)
{
ConsensusMap cm_local = cm; // we might have different RT peaks for each map if 'auto_rt' is enabled
cm.clear(false); // reset global list (about to be filled)
// compute TIC
MSChromatogram tic = exp.calculateTIC();
MSSpectrum tics, tic_gf, tics_pp, tics_sn;
for (Size ic = 0; ic < tic.size(); ++ic)
{ // rewrite Chromatogram to MSSpectrum (GaussFilter requires it)
Peak1D peak;
peak.setMZ(tic[ic].getRT());
peak.setIntensity(tic[ic].getIntensity());
tics.push_back(peak);
}
// smooth (no PP_CWT here due to efficiency reasons -- large FWHM take longer!)
double fwhm = getDoubleOption_("auto_rt:FHWM");
GaussFilter gf;
Param p = gf.getParameters();
p.setValue("gaussian_width", fwhm * 2); // wider than FWHM, just to be sure we have a fully smoothed peak. Merging two peaks is unlikely
p.setValue("use_ppm_tolerance", "false");
gf.setParameters(p);
tic_gf = tics;
gf.filter(tic_gf);
// pick peaks
PeakPickerHiRes pp;
p = pp.getParameters();
p.setValue("signal_to_noise", getDoubleOption_("auto_rt:SNThreshold"));
pp.setParameters(p);
pp.pick(tic_gf, tics_pp);
if (!tics_pp.empty())
{
OPENMS_LOG_INFO << "Found " << tics_pp.size() << " auto-rt peaks at: ";
for (Size ipp = 0; ipp != tics_pp.size(); ++ipp)
{
OPENMS_LOG_INFO << " " << tics_pp[ipp].getMZ();
}
}
else
{
OPENMS_LOG_INFO << "Found no auto-rt peaks. Change threshold parameters!";
}
OPENMS_LOG_INFO << std::endl;
if (!out_TIC_debug.empty()) // if debug file was given
{ // store intermediate steps for debug
PeakMap out_debug;
out_debug.addChromatogram(toChromatogram(tics));
out_debug.addChromatogram(toChromatogram(tic_gf));
SignalToNoiseEstimatorMedian<MSSpectrum> snt;
snt.init(tics);
for (Size is = 0; is < tics.size(); ++is)
{
Peak1D peak;
peak.setMZ(tic[is].getPos());
peak.setIntensity(snt.getSignalToNoise(is));
tics_sn.push_back(peak);
}
out_debug.addChromatogram(toChromatogram(tics_sn));
out_debug.addChromatogram(toChromatogram(tics_pp));
// get rid of "native-id" missing warning
for (Size id = 0; id < out_debug.size(); ++id) out_debug[id].setNativeID(String("spectrum=") + id);
mzml_file.storeExperiment(out_TIC_debug, out_debug,{FileTypes::MZML});
OPENMS_LOG_DEBUG << "Storing debug AUTO-RT: " << out_TIC_debug << std::endl;
}
// add target EICs: for each m/z with no/negative RT, add all combinations of that m/z with auto-RTs
// duplicate m/z entries will be ignored!
// all other lines with positive RT values are copied unaffected
//do not allow doubles
std::set<double> mz_doubles;
for (ConsensusFeature& cf : cm_local)
{
if (cf.getRT() < 0)
{
if (mz_doubles.find(cf.getMZ()) == mz_doubles.end())
{
mz_doubles.insert(cf.getMZ());
}
else
{
OPENMS_LOG_INFO << "Found duplicate m/z entry (" << cf.getMZ() << ") for auto-rt. Skipping ..." << std::endl;
continue;
}
ConsensusMap cm_RT_multiplex;
for (const Peak1D& pk : tics_pp)
{
ConsensusFeature f = cf;
f.setRT(pk.getMZ());
cm.push_back(f);
}
}
else
{ // default feature with no auto-rt
OPENMS_LOG_INFO << "copying feature with RT " << cf.getRT() << std::endl;
cm.push_back(cf);
}
}
// resize, since we have more positions now
vec_single.resize(cm.size());
}
// search for each EIC and add up
Int not_found(0);
std::map<Size, double> quant;
String description;
if (fi < in_header.size())
{
HeaderInfo info(in_header[fi]);
description = info.header_description;
}
if (fi == 0)
{ // two additional columns for first file (theoretical RT and m/z)
tf_single_header0 << "" << "";
tf_single_header1 << "" << "";
tf_single_header2 << "RT" << "mz";
}
// 5 entries for each input file
tf_single_header0 << File::basename(in[fi]) << "" << "" << "" << "" << "";
tf_single_header1 << description << "" << "" << "" << "" << "";
tf_single_header2 << "RTobs" << "dRT" << "mzobs" << "dppm" << "intensity" << "area";
for (Size i = 0; i < cm.size(); ++i)
{
//std::cerr << "Rt" << cm[i].getRT() << " mz: " << cm[i].getMZ() << " R " << cm[i].getMetaValue("rank") << "\n";
double mz_da = mztol * cm[i].getMZ() / 1e6; // mz tolerance in Dalton
PeakMap::ConstAreaIterator it = exp.areaBeginConst(cm[i].getRT() - rttol / 2,
cm[i].getRT() + rttol / 2,
cm[i].getMZ() - mz_da,
cm[i].getMZ() + mz_da);
Peak2D max_peak;
max_peak.setIntensity(0);
max_peak.setRT(cm[i].getRT());
max_peak.setMZ(cm[i].getMZ());
map<double, double> rt_highest;
for (; it != exp.areaEndConst(); ++it)
{
// extract intensity of highest peak
if (max_peak.getIntensity() < it->getIntensity())
{
max_peak.setIntensity(it->getIntensity());
max_peak.setRT(it.getRT());
max_peak.setMZ(it->getMZ());
}
// take maximum only for each RT
if (rt_highest[it.getRT()] < it->getIntensity()) rt_highest[it.getRT()] = it->getIntensity();
}
// copy to EIC for area integration
MSChromatogram eic;
eic.reserve(rt_highest.size());
for (const auto& rt_int : rt_highest)
{
ChromatogramPeak p;
p.setRT(rt_int.first);
p.setIntensity(rt_int.second);
// std::cout << rt_int.first << "\t" << rt_int.second << std::endl; // for debugging. output a single chromatogram
eic.push_back(std::move(p));
}
PeakIntegrator::PeakArea peak_area = peak_integrator.integratePeak(eic, max_peak.getRT() - rttol / 2, max_peak.getRT() + rttol / 2);
double ppm = 0; // observed m/z offset
if (max_peak.getIntensity() == 0)
{
++not_found;
}
else
{
// take median for m/z found
std::vector<double> mz;
PeakMap::Iterator itm = exp.RTBegin(max_peak.getRT());
SignedSize low = std::min<SignedSize>(std::distance(exp.begin(), itm), rt_collect);
SignedSize high = std::min<SignedSize>(std::distance(itm, exp.end()) - 1, rt_collect);
PeakMap::AreaIterator itt = exp.areaBegin((itm - low)->getRT() - 0.01, (itm + high)->getRT() + 0.01, cm[i].getMZ() - mz_da, cm[i].getMZ() + mz_da);
for (; itt != exp.areaEnd(); ++itt)
{
mz.push_back(itt->getMZ());
//std::cerr << "ppm: " << itt.getRT() << " " << itt->getMZ() << " " << itt->getIntensity() << std::endl;
}
if ((SignedSize)mz.size() > (low + high + 1)) OPENMS_LOG_WARN << "Compound " << i << " has overlapping peaks [" << mz.size() << "/" << low + high + 1 << "]" << std::endl;
if (!mz.empty())
{
double avg_mz = std::accumulate(mz.begin(), mz.end(), 0.0) / double(mz.size());
//std::cerr << "avg: " << avg_mz << "\n";
ppm = (avg_mz - cm[i].getMZ()) / cm[i].getMZ() * 1e6;
}
}
// appending the second column set requires separator
String append_sep = (fi == 0 ? "" : out_sep);
vec_single[i] += append_sep; // new line
if (fi == 0)
{
vec_single[i] += String(cm[i].getRT()) + out_sep +
String(cm[i].getMZ()) + out_sep;
}
vec_single[i] += String(max_peak.getRT()) + out_sep +
String(max_peak.getRT() - cm[i].getRT()) + out_sep +
String(max_peak.getMZ()) + out_sep +
String(ppm) + out_sep +
String(max_peak.getIntensity()) + out_sep +
String(peak_area.area);
}
if (not_found)
{
OPENMS_LOG_INFO << "Missing peaks for " << not_found << " compounds in file '" << in[fi] << "'.\n";
}
}
//-------------------------------------------------------------
// create header
//-------------------------------------------------------------
vec_single.insert(vec_single.begin(), ListUtils::concatenate(tf_single_header2, out_sep));
vec_single.insert(vec_single.begin(), ListUtils::concatenate(tf_single_header1, out_sep));
vec_single.insert(vec_single.begin(), ListUtils::concatenate(tf_single_header0, out_sep));
//-------------------------------------------------------------
// writing output
//-------------------------------------------------------------
TextFile tf;
for (const auto& v : vec_single)
{
tf.addLine(v);
}
tf.store(out);
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPEICExtractor tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/ProteinQuantifier.cpp | .cpp | 47,453 | 1,111 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/ANALYSIS/ID/PeptideProteinResolution.h>
#include <OpenMS/ANALYSIS/QUANTITATION/PeptideAndProteinQuant.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/FORMAT/FileTypes.h>
#include <OpenMS/FORMAT/SVOutStream.h>
#include <OpenMS/PROCESSING/ID/IDFilter.h>
#include <OpenMS/SYSTEM/File.h>
#include <OpenMS/FORMAT/MzTabFile.h>
#include <OpenMS/FORMAT/MzTab.h>
#include <OpenMS/METADATA/ExperimentalDesign.h>
#include <OpenMS/FORMAT/ExperimentalDesignFile.h>
#include <cmath>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_ProteinQuantifier ProteinQuantifier
@brief Compute peptide and protein abundances from annotated feature/consensus maps or from identification results.
<CENTER>
<table>
<tr>
<th ALIGN = "center"> potential predecessor tools </td>
<td VALIGN="middle" ROWSPAN=3> → ProteinQuantifier →</td>
<th ALIGN = "center"> potential successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_IDMapper </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=2> external tools @n e.g. for statistical analysis</td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_FeatureLinkerUnlabeled @n (or another feature grouping tool) </td>
</tr>
</table>
</CENTER>
Reference:\n
Weisser <em>et al.</em>: <a href="https://doi.org/10.1021/pr300992u">An automated pipeline for high-throughput label-free quantitative proteomics</a> (J. Proteome Res., 2013, PMID: 23391308).
<B>Input: featureXML or consensusXML</B>
Quantification is based on the intensity values of the features in the input files. Feature intensities are first accumulated to peptide abundances, according to the peptide identifications annotated to the features/feature groups. Then, abundances of the peptides of a protein are aggregated to compute the protein abundance.
The peptide-to-protein step uses the (e.g. 3) most abundant proteotypic peptides per protein to compute the protein abundances. This is a general version of the "top 3 approach" (but only for relative quantification) described in:\n
Silva <em>et al.</em>: Absolute quantification of proteins by LCMS<sup>E</sup>: a virtue of parallel MS acquisition (Mol. Cell. Proteomics, 2006, PMID: 16219938).
Only features/feature groups with unambiguous peptide annotation are used for peptide quantification. It is possible to resolve ambiguities before applying ProteinQuantifier using one of several equivalent mechanisms in OpenMS: @ref TOPP_IDConflictResolver, @ref TOPP_ConsensusID (algorithm @p best), or @ref TOPP_FileFilter (option @p id:keep_best_score_id).
Similarly, only proteotypic peptides (i.e. those matching to exactly one protein) are used for protein quantification <em>by default</em>. Peptide/protein IDs from multiple identification runs can be handled, but will not be differentiated (i.e. protein accessions for a peptide will be accumulated over all identification runs). See section "Optional input: Protein inference/grouping results" below for exceptions to this.
Peptides with the same sequence, but with different modifications are quantified separately on the peptide level, but treated as one peptide for the protein quantification (i.e. the contributions of differently-modified variants of the same peptide are accumulated).
<B>Input: idXML</B>
Quantification based on identification results uses spectral counting, i.e. the abundance of each peptide is the number of times that peptide was identified from an MS2 spectrum (considering only the best hit per spectrum). Different identification runs in the input are treated as different samples; this makes it possible to quantify several related samples at once by merging the corresponding idXML files with @ref TOPP_IDMerger. Depending on the presence of multiple runs, output format and applicable parameters are the same as for featureXML and consensusXML, respectively.
The notes above regarding quantification on the protein level and the treatment of modifications also apply to idXML input. In particular, this means that the settings @p top 0 and @p aggregate @p sum should be used to get the "classical" spectral counting quantification on the protein level (where all identifications of all peptides of a protein are summed up).
<B>Optional input: Protein inference/grouping results</B>
By default only proteotypic peptides (i.e. those matching to exactly one protein) are used for protein quantification. However, this limitation can be overcome: Protein inference results for the whole sample set can be supplied with the @p protein_groups option (or included in a featureXML input). In that case, the peptide-to-protein references from that file are used (rather than those from @p in), and groups of indistinguishable proteins will be quantified. Each reported protein quantity then refers to the total for the respective group.
In order for everything to work correctly, it is important that the protein inference results come from the same identifications that were used to annotate the quantitative data. We suggest to use the OpenMS tool ProteinInference @ref TOPP_ProteinInference.
More information below the parameter specification.
@note Currently mzIdentML (mzid) is not directly supported as an input/output format of this tool. Convert mzid files to/from idXML using @ref TOPP_IDFileConverter if necessary.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_ProteinQuantifier.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_ProteinQuantifier.html
<B>Output format</B>
The output files produced by this tool have a table format, with columns as described below:
<b>Protein output</b> (one protein/set of indistinguishable proteins per line):
- @b protein: Protein accession(s) (as in the annotations in the input file; separated by "/" if more than one).
- @b n_proteins: Number of indistinguishable proteins quantified (usually "1").
- @b protein_score: Protein score, e.g. ProteinProphet probability (if available).
- @b n_peptides: Number of proteotypic peptides observed for this protein (or group of indistinguishable proteins) across all samples. Note that not necessarily all of these peptides contribute to the protein abundance (depending on parameter @p top).
- @b abundance: Computed protein abundance. For consensusXML input, there will be one column per sample ("abundance_sample1", "abundance_sample2", etc.).
<b>Peptide output</b> (one peptide or - if @p best_charge_and_fraction is set - one charge state and fraction of a peptide per line):
- @b peptide: Peptide sequence. Only peptides that occur in unambiguous annotations of features are reported.
- @b protein: Protein accession(s) for the peptide (separated by "/" if more than one).
- @b n_proteins: Number of proteins this peptide maps to. (Same as the number of accessions in the previous column.)
- @b charge: Charge state quantified in this line. "0" (for "all charges") unless @p best_charge_and_fraction was set.
- @b abundance: Computed abundance for this peptide. If the charge in the preceding column is 0, this is the total abundance of the peptide over all charge states; otherwise, it is only the abundance observed for the indicated charge (in this case, there may be more than one line for the peptide sequence). Again, for consensusXML input, there will be one column per sample ("abundance_sample1", "abundance_sample2", etc.). Also for consensusXML, the reported values are already normalized if @p consensus:normalize was set.
<B>Protein quantification examples</B>
While quantification on the peptide level is fairly straight-forward, a number of options influence quantification on the protein level - especially for consensusXML input. The three parameters @p top:N, @p top:include_all and @p consensus:fix_peptides determine which peptides are used to quantify proteins in different samples.
As an example, consider a protein with four proteotypic peptides. Each peptide is detected in a subset of three samples, as indicated in the table below. The peptides are ranked by abundance (1: highest, 4: lowest; assuming for simplicity that the order is the same in all samples).
<CENTER>
<table>
<tr>
<td></td>
<td ALIGN="center" BGCOLOR="#EBEBEB"> sample 1 </td>
<td ALIGN="center" BGCOLOR="#EBEBEB"> sample 2 </td>
<td ALIGN="center" BGCOLOR="#EBEBEB"> sample 3 </td>
</tr>
<tr>
<td ALIGN="center" BGCOLOR="#EBEBEB"> peptide 1 </td>
<td ALIGN="center"> X </td>
<td></td>
<td ALIGN="center"> X </td>
</tr>
<tr>
<td ALIGN="center" BGCOLOR="#EBEBEB"> peptide 2 </td>
<td ALIGN="center"> X </td>
<td ALIGN="center"> X </td>
<td></td>
</tr>
<tr>
<td ALIGN="center" BGCOLOR="#EBEBEB"> peptide 3 </td>
<td ALIGN="center"> X </td>
<td ALIGN="center"> X </td>
<td ALIGN="center"> X </td>
</tr>
<tr>
<td ALIGN="center" BGCOLOR="#EBEBEB"> peptide 4 </td>
<td ALIGN="center"> X </td>
<td ALIGN="center"> X </td>
<td></td>
</tr>
</table>
</CENTER>
Different parameter combinations lead to different quantification scenarios, as shown here:
<CENTER>
<table>
<tr>
<td ALIGN="center" BGCOLOR="#EBEBEB" COLSPAN=3> @b parameters \n "*": no effect in this case </td>
<td ALIGN="center" BGCOLOR="#EBEBEB" COLSPAN=3> <b>peptides used for quantification</b> \n "(...)": not quantified here because ... </td>
<td ALIGN="center" VALIGN="middle" BGCOLOR="#EBEBEB" ROWSPAN=2> explanation </td>
</tr>
<tr>
<td ALIGN="center" BGCOLOR="#EBEBEB"> @p top </td>
<td ALIGN="center" BGCOLOR="#EBEBEB"> @p include_all </td>
<td ALIGN="center" BGCOLOR="#EBEBEB"> @p c.:fix_peptides </td>
<td ALIGN="center" BGCOLOR="#EBEBEB"> sample 1 </td>
<td ALIGN="center" BGCOLOR="#EBEBEB"> sample 2 </td>
<td ALIGN="center" BGCOLOR="#EBEBEB"> sample 3 </td>
</tr>
<tr>
<td ALIGN="center"> 0 </td>
<td ALIGN="center"> * </td>
<td ALIGN="center"> no </td>
<td ALIGN="center"> 1, 2, 3, 4 </td>
<td ALIGN="center"> 2, 3, 4 </td>
<td ALIGN="center"> 1, 3 </td>
<td> all peptides </td>
</tr>
<tr>
<td ALIGN="center"> 1 </td>
<td ALIGN="center"> * </td>
<td ALIGN="center"> no </td>
<td ALIGN="center"> 1 </td>
<td ALIGN="center"> 2 </td>
<td ALIGN="center"> 1 </td>
<td> single most abundant peptide </td>
</tr>
<tr>
<td ALIGN="center"> 2 </td>
<td ALIGN="center"> * </td>
<td ALIGN="center"> no </td>
<td ALIGN="center"> 1, 2 </td>
<td ALIGN="center"> 2, 3 </td>
<td ALIGN="center"> 1, 3 </td>
<td> two most abundant peptides </td>
</tr>
<tr>
<td ALIGN="center"> 3 </td>
<td ALIGN="center"> no </td>
<td ALIGN="center"> no </td>
<td ALIGN="center"> 1, 2, 3 </td>
<td ALIGN="center"> 2, 3, 4 </td>
<td ALIGN="center"> (too few peptides) </td>
<td> three most abundant peptides </td>
</tr>
<tr>
<td ALIGN="center"> 3 </td>
<td ALIGN="center"> yes </td>
<td ALIGN="center"> no </td>
<td ALIGN="center"> 1, 2, 3 </td>
<td ALIGN="center"> 2, 3, 4 </td>
<td ALIGN="center"> 1, 3 </td>
<td> three or fewer most abundant peptides </td>
</tr>
<tr>
<td ALIGN="center"> 4 </td>
<td ALIGN="center"> no </td>
<td ALIGN="center"> * </td>
<td ALIGN="center"> 1, 2, 3, 4 </td>
<td ALIGN="center"> (too few peptides) </td>
<td ALIGN="center"> (too few peptides) </td>
<td> four most abundant peptides </td>
</tr>
<tr>
<td ALIGN="center"> 4 </td>
<td ALIGN="center"> yes </td>
<td ALIGN="center"> * </td>
<td ALIGN="center"> 1, 2, 3, 4 </td>
<td ALIGN="center"> 2, 3, 4 </td>
<td ALIGN="center"> 1, 3 </td>
<td> four or fewer most abundant peptides </td>
</tr>
<tr>
<td ALIGN="center"> 0 </td>
<td ALIGN="center"> * </td>
<td ALIGN="center"> yes </td>
<td ALIGN="center"> 3 </td>
<td ALIGN="center"> 3 </td>
<td ALIGN="center"> 3 </td>
<td> all peptides present in every sample </td>
</tr>
<tr>
<td ALIGN="center"> 1 </td>
<td ALIGN="center"> * </td>
<td ALIGN="center"> yes </td>
<td ALIGN="center"> 3 </td>
<td ALIGN="center"> 3 </td>
<td ALIGN="center"> 3 </td>
<td> single peptide present in most samples </td>
</tr>
<tr>
<td ALIGN="center"> 2 </td>
<td ALIGN="center"> no </td>
<td ALIGN="center"> yes </td>
<td ALIGN="center"> 1, 3 </td>
<td ALIGN="center"> (peptide 1 missing) </td>
<td ALIGN="center"> 1, 3 </td>
<td> two peptides present in most samples </td>
</tr>
<tr>
<td ALIGN="center"> 2 </td>
<td ALIGN="center"> yes </td>
<td ALIGN="center"> yes </td>
<td ALIGN="center"> 1, 3 </td>
<td ALIGN="center"> 3 </td>
<td ALIGN="center"> 1, 3 </td>
<td> two or fewer peptides present in most samples </td>
</tr>
<tr>
<td ALIGN="center"> 3 </td>
<td ALIGN="center"> no </td>
<td ALIGN="center"> yes </td>
<td ALIGN="center"> 1, 2, 3 </td>
<td ALIGN="center"> (peptide 1 missing) </td>
<td ALIGN="center"> (peptide 2 missing) </td>
<td> three peptides present in most samples </td>
</tr>
<tr>
<td ALIGN="center"> 3 </td>
<td ALIGN="center"> yes </td>
<td ALIGN="center"> yes </td>
<td ALIGN="center"> 1, 2, 3 </td>
<td ALIGN="center"> 2, 3 </td>
<td ALIGN="center"> 1, 3 </td>
<td> three or fewer peptides present in most samples </td>
</tr>
</table>
</CENTER>
<B>Further considerations for parameter selection</B>
With @p best_charge_and_fractions and @p aggregate, there is a trade-off between comparability of protein abundances within a sample and of abundances for the same protein across different samples.\n
Setting @p best_charge_and_fraction may increase reproducibility between samples, but will distort the proportions of protein abundances within a sample. The reason is that ionization properties vary between peptides, but should remain constant across samples. Filtering by charge state can help to reduce the impact of feature detection differences between samples.\n
For @p aggregate, there is a qualitative difference between @p (intensity weighted) mean/median and @p sum in the effect that missing peptide abundances have (only if @p include_all is set or @p top is 0): @p (intensity weighted) mean and @p median ignore missing cases, averaging only present values. If low-abundant peptides are not detected in some samples, the computed protein abundances for those samples may thus be too optimistic. @p sum implicitly treats missing values as zero, so this problem does not occur and comparability across samples is ensured. However, with @p sum the total number of peptides ("summands") available for a protein may affect the abundances computed for it (depending on @p top), so results within a sample may become unproportional.
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPProteinQuantifier :
public TOPPBase
{
public:
TOPPProteinQuantifier() :
TOPPBase("ProteinQuantifier", "Compute peptide and protein abundances"),
algo_params_(), proteins_(), peptides_(), columns_headers_(),
spectral_counting_(false) {}
protected:
typedef PeptideAndProteinQuant::PeptideQuant PeptideQuant;
typedef PeptideAndProteinQuant::ProteinQuant ProteinQuant;
typedef PeptideAndProteinQuant::SampleAbundances SampleAbundances;
typedef PeptideAndProteinQuant::Statistics Statistics;
typedef ProteinIdentification::ProteinGroup ProteinGroup;
Param algo_params_; // parameters for PeptideAndProteinQuant algorithm
ProteinIdentification proteins_; // protein inference results (proteins)
PeptideIdentificationList peptides_; // protein inference res. (peptides)
ConsensusMap::ColumnHeaders columns_headers_; // information about experimental design
bool spectral_counting_; // quantification based on spectral counting?
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "Input file");
setValidFormats_("in", ListUtils::create<String>("featureXML,consensusXML,idXML"));
registerInputFile_("protein_groups", "<file>", "", "Protein inference results for the identification runs that were used to annotate the input (e.g. via the ProteinInference tool).\nInformation about indistinguishable proteins will be used for protein quantification.", false);
setValidFormats_("protein_groups", ListUtils::create<String>("idXML"));
registerInputFile_("design", "<file>", "", "input file containing the experimental design", false);
setValidFormats_("design", ListUtils::create<String>("tsv"));
// output
registerOutputFile_("out", "<file>", "", "Output file for protein abundances", false);
setValidFormats_("out", ListUtils::create<String>("csv"));
registerOutputFile_("peptide_out", "<file>", "", "Output file for peptide abundances", false);
setValidFormats_("peptide_out", ListUtils::create<String>("csv"));
registerOutputFile_("mztab", "<file>", "", "Output file (mzTab)", false);
setValidFormats_("mztab", ListUtils::create<String>("mzTab"));
// algorithm parameters:
addEmptyLine_();
Param temp = PeptideAndProteinQuant().getParameters();
registerFullParam_(temp);
registerStringOption_("greedy_group_resolution", "<choice>", "false", "Pre-process identifications with greedy resolution of shared peptides based on the protein group probabilities. (Only works with an idXML file given as protein_groups parameter).", false);
setValidStrings_("greedy_group_resolution", ListUtils::create<String>("true,false"));
registerFlag_("ratios", "Add the log2 ratios of the abundance values to the output. Format: log_2(x_0/x_0) <sep> log_2(x_1/x_0) <sep> log_2(x_2/x_0) ...", false);
registerFlag_("ratiosSILAC", "Add the log2 ratios for a triple SILAC experiment to the output. Only applicable to consensus maps of exactly three sub-maps. Format: log_2(heavy/light) <sep> log_2(heavy/middle) <sep> log_2(middle/light)", false);
registerStringOption_("file_and_channel_level_output", "<choice>", "false", "Output protein abundances with detailed file+channel level headers (similar to detailed peptide output). When enabled, protein output will show abundance_filename_channel columns instead of abundance_N.", false);
setValidStrings_("file_and_channel_level_output", {"true","false"});
registerTOPPSubsection_("format", "Output formatting options");
registerStringOption_("format:separator", "<sep>", "", "Character(s) used to separate fields; by default, the 'tab' character is used", false);
registerStringOption_("format:quoting", "<method>", "double", "Method for quoting of strings: 'none' for no quoting, 'double' for quoting with doubling of embedded quotes,\n'escape' for quoting with backslash-escaping of embedded quotes", false);
setValidStrings_("format:quoting", ListUtils::create<String>("none,double,escape"));
registerStringOption_("format:replacement", "<x>", "_", "If 'quoting' is 'none', used to replace occurrences of the separator in strings before writing", false);
}
/// Write a table of peptide results.
void writePeptideTable_(SVOutStream& out, const PeptideQuant& quant, const ExperimentalDesign& ed)
{
ExperimentalDesign::MSFileSection msfile_section = ed.getMSFileSection();
// Extract the Spectra Filepath column from the design
map<UInt64, String> design_filenames;
for (ExperimentalDesign::MSFileSectionEntry const& f : msfile_section)
{
const String fn = FileHandler::stripExtension(File::basename(f.path));
design_filenames[f.fraction_group] = fn;
}
// write header:
out << "peptide" << "protein" << "n_proteins" << "charge";
for (const auto& [fraction_group, filename] : design_filenames)
{
for (Size c = 1; c <= ed.getNumberOfLabels(); ++c)
{
out << "abundance|" + filename + "|ch" + String(c);
}
}
out << "fraction" << endl;
bool best_charge_and_fraction = algo_params_.getValue("best_charge_and_fraction") == "true";
for (auto const & q : quant) // loop over sequence->peptide data
{
if (q.second.total_abundances.empty())
{
continue; // not quantified
}
StringList accessions;
for (String acc : q.second.accessions)
{
accessions.push_back(acc.substitute('/', '_'));
}
String protein = ListUtils::concatenate(accessions, "/");
if (best_charge_and_fraction)
{
// write individual abundances (one line for each charge state and fraction):
for (auto const & fa : q.second.abundances) // loop over fractions
{
const Size fraction = fa.first;
auto& filename_to_chargemap = fa.second; // filenames -> (charge -> abundance)
std::set<Int64> charge_of_peptide; // store the charge states of the peptide
// determine charge states the peptide was quantified over all files
for (const auto& filenames : filename_to_chargemap) {
for (const auto& [charge, abundance] : filenames.second) {
charge_of_peptide.insert(charge); // store the charge state for this peptide
}
}
for (Int64 charge : charge_of_peptide)
{
// write peptide sequence, protein, number of accessions, and charge:
out << q.first.toString() << protein << accessions.size() << charge;
// fill file + channel/label columns
for (auto& file : design_filenames) // note: we need to use the order in the experimental design file
{
String filename = file.second; // get the filename from the design
for (Size c = 1; c <= ed.getNumberOfLabels(); ++c)
{
bool no_quant = false;
if (filename_to_chargemap.find(filename) != filename_to_chargemap.end())
{
if (const auto& charge_map = filename_to_chargemap.at(filename); charge_map.find(charge) != charge_map.end())
{
const auto& channel_to_abundance = charge_map.at(charge);
if (channel_to_abundance.find(c) != channel_to_abundance.end())
{
out << channel_to_abundance.at(c);
}
}
else
{
no_quant = true;
}
}
if (no_quant)
{
out << 0.0; // no abundance for this file and charge
}
}
}
out << fraction << endl; // output fraction
}
}
}
else
{
// write total abundances (accumulated over all charge states and fractions):
out << q.first.toString() << protein << accessions.size() << 0;
for (size_t sample_id = 0; sample_id < ed.getNumberOfSamples(); ++sample_id)
{
// write abundance for the sample if it exists, 0 otherwise:
SampleAbundances::const_iterator pos = q.second.total_abundances.find(sample_id);
out << (pos != q.second.total_abundances.end() ? pos->second : 0.0);
}
out << "all" << endl;
}
}
}
/// Write header for protein table based on output format options
void writeProteinTableHeader_(SVOutStream& out, const ExperimentalDesign& ed,
const map<UInt64, map<UInt64, String>>& design_group_fraction_filename,
UInt64 n_files, bool channel_level_output, bool print_ratios, bool print_SILACratios)
{
// write header:
out << "protein" << "n_proteins" << "protein_score" << "n_peptides";
if (channel_level_output)
{
OPENMS_LOG_INFO << "Writing detailed protein output for " << design_group_fraction_filename.size()
<< " fraction groups "
<< n_files << " files and "
<< ed.getNumberOfLabels() << " channels." << std::endl;
// Use detailed file+channel headers
for (const auto& [fraction_group, filename_map] : design_group_fraction_filename) // ordered by fraction_group identifier
{
for (const auto& [fraction, filename] : filename_map)
{
for (Size c = 1; c <= ed.getNumberOfLabels(); ++c)
{
out << "abundance|" + filename + "|ch" + String(c);
}
}
}
}
else if (ed.getNumberOfSamples() <= 1)
{
out << "abundance";
}
else
{
// Get sample condition names from experimental design sample section
const auto& sample_section = ed.getSampleSection();
std::cout << "Writing protein output for " << ed.getNumberOfSamples()
<< std::endl;
for (Size i = 0; i < ed.getNumberOfSamples(); ++i) // samples are 0-indexed
{
String sample_condition = "unknown";
if (sample_section.hasFactor("MSstats_Condition"))
{
sample_condition = sample_section.getFactorValue(i, "MSstats_Condition");
}
out << "abundance_sample" + String(i+1) + "[" + sample_condition + "]";
}
// TODO MULTIPLEXING: check if correct
// if ratios-flag is set, print log2-ratios. ratio_1 <sep> ratio_x ....
if (print_ratios)
{
for (Size i = 0; i < ed.getNumberOfSamples(); ++i)
{
out << "ratio_" + String(i+1);
}
}
// if ratiosSILAC-flag is set, print SILAC log2-ratios, only if three
if (print_SILACratios && ed.getNumberOfSamples() == 3)
{
for (Size i = 0; i < ed.getNumberOfSamples(); ++i)
{
out << "SILACratio_" + String(i+1);
}
}
}
out << endl;
}
/// Write a table of protein results.
void writeProteinTable_(SVOutStream& out, const ProteinQuant& quant, const ExperimentalDesign& ed)
{
const bool print_ratios = getFlag_("ratios");
const bool print_SILACratios = getFlag_("ratiosSILAC");
const bool channel_level_output = (getStringOption_("file_and_channel_level_output") == "true");
ExperimentalDesign::MSFileSection msfile_section = ed.getMSFileSection();
// Extract the Spectra Filepath column from the design
map<UInt64, map<UInt64, String>> design_group_fraction_filename;
UInt64 n_files = 0;
for (ExperimentalDesign::MSFileSectionEntry const& f : msfile_section)
{
const String fn = FileHandler::stripExtension(File::basename(f.path));
design_group_fraction_filename[f.fraction_group][f.fraction] = fn;
n_files++;
}
// Write table header
writeProteinTableHeader_(out, ed, design_group_fraction_filename, n_files,
channel_level_output, print_ratios, print_SILACratios);
// mapping: accession of leader -> (accessions of grouped proteins, score)
map<String, pair<StringList, double> > leader_to_group;
if (!proteins_.getIndistinguishableProteins().empty())
{
for (auto group : proteins_.getIndistinguishableProteins()) //OMS_CODING_TEST_EXCLUDE
{
StringList& accessions = leader_to_group[group.accessions[0]].first;
accessions = group.accessions;
for (auto & acc : accessions)
{
acc.substitute('/', '_'); // to allow concatenation later
}
leader_to_group[group.accessions[0]].second = group.probability;
}
}
for (auto const & q : quant) // for each protein quantification
{
if (q.second.total_abundances.empty())
{
continue; // not quantified
}
if (leader_to_group.empty())
{
out << q.first << 1;
if (proteins_.getHits().empty())
{
out << 0;
}
else
{
vector<ProteinHit>::iterator pos = proteins_.findHit(q.first);
out << pos->getScore();
}
}
else
{
pair<StringList, double>& group = leader_to_group[q.first];
out << ListUtils::concatenate(group.first, '/') << group.first.size()
<< group.second;
}
Size n_peptide = q.second.peptide_abundances.size();
out << n_peptide;
if (channel_level_output)
{
// Write detailed abundances (file+channel level)
// We loop over the filenames in the design file, as this is the order we expect in the output.
for (const auto& [group_id, fraction_to_filename_map] : design_group_fraction_filename)
{
for (auto [fraction, design_filename] : fraction_to_filename_map)
{
// Process each filename within the fraction group
// important: strip file extension and path to find the entry
design_filename = FileHandler::stripExtension(File::basename(design_filename));
#ifdef DEBUG_PROTEINQUANTIFIER
std::cout
<< "Experimental design: fraction group: " << group_id
<< ", filename: '" << design_filename
<< "', fraction: " << fraction
<< " of the experimental design." << std::endl;
#endif
// for each file in the design, fill the channels quantity
for (Size c = 1; c <= ed.getNumberOfLabels(); ++c)
{
double channel_quantity{};
const auto& filename_to_channel_map = q.second.channel_level_abundances;
if (auto file_level_it = filename_to_channel_map.find(design_filename);
file_level_it != filename_to_channel_map.end())
{
// Found the file, now search for the channel
if (auto channel_it = file_level_it->second.find(c);
channel_it != file_level_it->second.end())
{
channel_quantity = channel_it->second; // there should be only one entry per file+channel
}
}
out << channel_quantity; // Always output a value (0.0 if no data found) to maintain CSV structure
}
}
}
}
else
{
for (size_t sample_id = 0; sample_id < ed.getNumberOfSamples(); ++sample_id)
{
// write abundance for the sample if it exists, 0 otherwise:
SampleAbundances::const_iterator pos = q.second.total_abundances.find(sample_id);
out << (pos != q.second.total_abundances.end() ? pos->second : 0.0);
}
}
// if ratios-flag is set, print log2-ratios. ab1/ab0, ab2/ab0, ... , ab'n/ab0
if (print_ratios)
{
double log2 = log(2.0);
double ref_abundance = q.second.total_abundances.find(0)->second;
out << 0; // =log(1)/log2;
for (size_t sample_id = 1; sample_id < ed.getNumberOfSamples(); ++sample_id)
{
SampleAbundances::const_iterator pos = q.second.total_abundances.find(sample_id);
out << (pos != q.second.total_abundances.end() ? log(pos->second / ref_abundance) / log2 : 0.0);
}
}
// if ratiosSILAC-flag is set, print log2-SILACratios. Only if three maps are provided (triple SILAC).
if (print_SILACratios && ed.getNumberOfSamples() == 3)
{
double light = q.second.total_abundances.find(0)->second;
double middle = q.second.total_abundances.find(1)->second;
double heavy = q.second.total_abundances.find(2)->second;
double log2 = log(2.0);
out << log(heavy / light) / log2
<< log(heavy / middle) / log2
<< log(middle / light) / log2;
}
out << endl;
}
}
/// Write comment lines before a peptide/protein table.
void writeComments_(SVOutStream& out, const ExperimentalDesign& ed, const bool proteins = true)
{
String what = (proteins ? "Protein" : "Peptide");
bool old = out.modifyStrings(false);
bool is_ibaq = algo_params_.getValue("method") == "iBAQ";
out << "# " + what + " abundances computed from file '" +
getStringOption_("in") + "'" << endl;
StringList relevant_params;
if (proteins) // parameters relevant only for protein output
{
relevant_params.push_back("method");
if (!is_ibaq)
{
relevant_params.push_back("top:N");
Size top = algo_params_.getValue("top:N");
if (top != 1)
{
relevant_params.push_back("top:aggregate");
if (top != 0)
{
relevant_params.push_back("top:include_all");
}
}
}
}
relevant_params.push_back("best_charge_and_fraction"); // also for peptide output
if (ed.getNumberOfSamples() > 1) // flags only for consensusXML input
{
relevant_params.push_back("consensus:normalize");
if (proteins)
{
relevant_params.push_back("consensus:fix_peptides");
}
}
String params;
for (const String& str : relevant_params)
{
String value = algo_params_.getValue(str).toString();
if (value != "false")
{
params += str + "=" + value + ", ";
}
}
if (params.empty())
{
params = "(none)";
}
else
{
params.resize(params.size() - 2); // remove trailing ", "
}
out << "# Parameters (relevant only): " + params << endl;
if (ed.getNumberOfSamples() > 1 && ed.getNumberOfLabels() == 1)
{
String desc = "# Files/samples associated with abundance values below: ";
const auto& ms_section = ed.getMSFileSection();
map<String, String> sample_id_to_filename;
for (const auto& e : ms_section)
{
String ed_filename = FileHandler::stripExtension(File::basename(e.path));
String ed_label = e.label;
String ed_sample = e.sample;
sample_id_to_filename[e.sample] = ed_filename; // should be 0,...,n_samples-1
}
for (Size i = 0; i < ed.getNumberOfSamples(); ++i)
{
if (i > 0)
{
desc += ", ";
}
desc += String(i + 1) + ": '" + sample_id_to_filename[String(i)] + "'";
}
out << desc << endl;
}
out.modifyStrings(old);
}
/// Write processing statistics.
void writeStatistics_(const Statistics& stats)
{
OPENMS_LOG_INFO << "\nProcessing summary - number of...";
if (spectral_counting_)
{
OPENMS_LOG_INFO << "\n...spectra: " << stats.total_features << " identified"
<< "\n...peptides: " << stats.quant_peptides
<< " identified and quantified (considering best hits only)";
}
else
{
OPENMS_LOG_INFO << "\n...features: " << stats.quant_features
<< " used for quantification, " << stats.total_features
<< " total (" << stats.blank_features << " no annotation, "
<< stats.ambig_features << " ambiguous annotation)"
<< "\n...peptides: " << stats.quant_peptides
<< " quantified, " << stats.total_peptides
<< " identified (considering best hits only)";
}
if (!getStringOption_("out").empty())
{
bool include_all = algo_params_.getValue("top:include_all") == "true";
Size top_n = algo_params_.getValue("top:N");
OPENMS_LOG_INFO << "\n...proteins/protein groups: " << stats.quant_proteins
<< " quantified";
if (top_n > 1)
{
if (include_all)
{
OPENMS_LOG_INFO << " (incl. ";
}
else
{
OPENMS_LOG_INFO << ", ";
}
OPENMS_LOG_INFO << stats.too_few_peptides << " with fewer than " << top_n
<< " peptides";
if (stats.n_samples > 1)
{
OPENMS_LOG_INFO << " in every sample";
}
if (include_all)
{
OPENMS_LOG_INFO << ")";
}
}
}
OPENMS_LOG_INFO << endl;
}
ExperimentalDesign getExperimentalDesignIds_(const String & design_file, const vector<ProteinIdentification> & proteins)
{
if (!design_file.empty()) // load experimental design file
{
return ExperimentalDesignFile::load(design_file, false);
// TODO FRACTIONS: check if ed sane
}
else // no design file provided
{
return ExperimentalDesign::fromIdentifications(proteins);
}
}
ExperimentalDesign getExperimentalDesignFeatureMap_(const String & design_file, const FeatureMap & fm)
{
if (!design_file.empty()) // experimental design file
{
return ExperimentalDesignFile::load(design_file, false);
// TODO FRACTIONS: check if ed sane
}
else // no design given
{
return ExperimentalDesign::fromFeatureMap(fm);
}
}
ExperimentalDesign getExperimentalDesignConsensusMap_(const String & design_file, const ConsensusMap & cm)
{
if (!design_file.empty()) // load experimental design file
{
return ExperimentalDesignFile::load(design_file, false);
// TODO FRACTIONS: check if ed sane
}
else // no design file provided
{
OPENMS_LOG_INFO << "No design file given. Trying to infer from consensus map." << std::endl;
return ExperimentalDesign::fromConsensusMap(cm);
}
}
/// Process FeatureXML input and perform quantification
ExperimentalDesign processFeatureXMLInput_(const String& in, const String& design_file,
PeptideAndProteinQuant& quantifier)
{
FeatureMap features;
FileHandler().loadFeatures(in, features, {FileTypes::FEATUREXML});
columns_headers_[0].filename = in;
ExperimentalDesign ed = getExperimentalDesignFeatureMap_(design_file, features);
// protein inference results in the featureXML?
if (getStringOption_("protein_groups").empty() &&
(features.getProteinIdentifications().size() == 1) &&
(!features.getProteinIdentifications()[0].getHits().empty()))
{
proteins_ = features.getProteinIdentifications()[0];
}
quantifier.readQuantData(features, ed);
quantifier.quantifyPeptides(peptides_);
quantifier.quantifyProteins(proteins_);
return ed;
}
/// Process IdXML input and perform quantification
ExperimentalDesign processIdXMLInput_(const String& in, const String& design_file,
PeptideAndProteinQuant& quantifier)
{
spectral_counting_ = true;
vector<ProteinIdentification> proteins;
PeptideIdentificationList peptides;
FileHandler().loadIdentifications(in, proteins, peptides, {FileTypes::IDXML});
for (Size i = 0; i < proteins.size(); ++i)
{
columns_headers_[i].filename = proteins[i].getSearchEngine() + "_" + proteins[i].getDateTime().toString();
}
ExperimentalDesign ed = getExperimentalDesignIds_(design_file, proteins);
// protein inference results in the idXML?
if (getStringOption_("protein_groups").empty() && (proteins.size() == 1) &&
(!proteins[0].getHits().empty()))
{
proteins_ = proteins[0];
}
quantifier.readQuantData(proteins, peptides, ed);
quantifier.quantifyPeptides(peptides_);
quantifier.quantifyProteins(proteins_);
return ed;
}
/// Process ConsensusXML input and perform quantification
ExperimentalDesign processConsensusXMLInput_(const String& in, const String& design_file, const String& mztab,
PeptideAndProteinQuant& quantifier)
{
ConsensusMap consensus;
FileHandler().loadConsensusFeatures(in, consensus, {FileTypes::CONSENSUSXML});
columns_headers_ = consensus.getColumnHeaders();
ExperimentalDesign ed = getExperimentalDesignConsensusMap_(design_file, consensus);
bool inference_in_cxml = false;
// protein inference results in the consensusXML or from external ID-only file?
if (getStringOption_("protein_groups").empty() &&
(consensus.getProteinIdentifications().size() == 1) &&
consensus.getProteinIdentifications()[0].hasInferenceData())
{
proteins_ = consensus.getProteinIdentifications()[0];
inference_in_cxml = true;
}
quantifier.readQuantData(consensus, ed);
quantifier.quantifyPeptides(peptides_);
quantifier.quantifyProteins(proteins_);
// write mzTab file
if (!mztab.empty())
{
// annotate quants to protein(groups) for easier export in mzTab
auto const & protein_quants = quantifier.getProteinResults();
quantifier.annotateQuantificationsToProteins(protein_quants, proteins_);
if (!inference_in_cxml)
{
auto& prots = consensus.getProteinIdentifications();
prots.insert(prots.begin(), proteins_); // insert inference information as first protein identification
}
else
{
std::swap(consensus.getProteinIdentifications()[0], proteins_);
}
// fill MzTab with meta data and quants annotated in identification data structure
const bool report_unmapped(true);
const bool report_unidentified_features(false);
const bool report_subfeatures(false);
MzTabFile().store(mztab,
consensus,
!inference_in_cxml,
report_unidentified_features,
report_unmapped,
report_subfeatures);
}
return ed;
}
ExitCodes main_(int, const char**) override
{
String in = getStringOption_("in");
String out = getStringOption_("out");
String peptide_out = getStringOption_("peptide_out");
String mztab = getStringOption_("mztab");
String design_file = getStringOption_("design");
bool greedy_group_resolution = getStringOption_("greedy_group_resolution") == "true";
if (out.empty() && peptide_out.empty())
{
throw Exception::RequiredParameterNotGiven(__FILE__, __LINE__,
OPENMS_PRETTY_FUNCTION,
"out/peptide_out");
}
String protein_groups = getStringOption_("protein_groups");
if (!protein_groups.empty()) // read protein inference data
{
vector<ProteinIdentification> proteins;
FileHandler().loadIdentifications(protein_groups, proteins, peptides_, {FileTypes::IDXML});
if (proteins.empty() ||
proteins[0].getIndistinguishableProteins().empty())
{
throw Exception::MissingInformation(
__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"No information on indistinguishable protein groups found in file '" + protein_groups + "'");
}
proteins_ = proteins[0]; // inference data is attached to first ID run
if (greedy_group_resolution)
{
PeptideProteinResolution ppr{};
ppr.buildGraph(proteins_, peptides_);
ppr.resolveGraph(proteins_, peptides_);
}
}
FileTypes::Type in_type = FileHandler::getType(in);
PeptideAndProteinQuant quantifier;
algo_params_ = quantifier.getParameters();
Logger::LogStream nirvana; // avoid parameter update messages
algo_params_.update(getParam_(), false, nirvana);
quantifier.setParameters(algo_params_);
// iBAQ works only with feature intensity values in consensusXML or featureXML files
if (algo_params_.getValue("method") == "iBAQ" && in.hasSuffix("idXML"))
{
throw Exception::InvalidParameter(__FILE__, __LINE__,
OPENMS_PRETTY_FUNCTION,
"Invalid input: idXML for iBAQ, only consensusXML or featureXML are valid");
}
// iBAQ can only quantify proteins
if (algo_params_.getValue("method") == "iBAQ" && !peptide_out.empty())
{
throw Exception::InvalidParameter(__FILE__, __LINE__,
OPENMS_PRETTY_FUNCTION,
"Invalid output: peptide_out can not be set when using iBAQ");
}
ExperimentalDesign ed;
// Process input based on file type
if (in_type == FileTypes::FEATUREXML)
{
ed = processFeatureXMLInput_(in, design_file, quantifier);
}
else if (in_type == FileTypes::IDXML)
{
ed = processIdXMLInput_(in, design_file, quantifier);
}
else // consensusXML
{
ed = processConsensusXMLInput_(in, design_file, mztab, quantifier);
}
// output:
String separator = getStringOption_("format:separator");
String replacement = getStringOption_("format:replacement");
String quoting = getStringOption_("format:quoting");
if (separator.empty())
{
separator = "\t";
}
String::QuotingMethod quoting_method;
if (quoting == "none")
{
quoting_method = String::NONE;
}
else if (quoting == "double")
{
quoting_method = String::DOUBLE;
}
else
{
quoting_method = String::ESCAPE;
}
if (!peptide_out.empty())
{
ofstream outstr(peptide_out.c_str());
SVOutStream output(outstr, separator, replacement, quoting_method);
writeComments_(output, ed, false);
writePeptideTable_(output, quantifier.getPeptideResults(), ed);
outstr.close();
}
if (!out.empty())
{
ofstream outstr(out.c_str());
SVOutStream output(outstr, separator, replacement, quoting_method);
writeComments_(output, ed);
writeProteinTable_(output, quantifier.getProteinResults(), ed);
outstr.close();
}
writeStatistics_(quantifier.getStatistics());
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPProteinQuantifier t;
return t.main(argc, argv);
}
/// @endcond | C++ |
3D | OpenMS/OpenMS | src/topp/SiriusExport.cpp | .cpp | 6,294 | 139 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Axel Walter, Oliver Alka $
// $Authors: Axel Walter, Oliver Alka, Timo Sachsenberg $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/SiriusExportAlgorithm.h>
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/DATASTRUCTURES/ListUtils.h>
using namespace OpenMS;
//----------------------------------------------------------
// Doxygen docu
//----------------------------------------------------------
/**
@page TOPP_SiriusExport SiriusExport
@brief De novo metabolite identification.
Exports an input file for SIRIUS, a tool for metabolomics data analysis with several subtools, each serving specific purposes:
- SIRIUS: Identify molecular formula for each compound individually using fragmentation trees and isotope patterns. Output from this tool can be used to generate an OpenSwathAssay library with the AssayGeneratorMetabo TOPP tool.
- CSI:FingerID: This subtool is dedicated to predicting molecular structures based on tandem mass spectrometry (MS/MS) data. It utilizes a fragmentation tree approach for the annotation of fragment spectra.
- CANOPUS: Predict compound categories for each compound individually based on its predicted molecular fingerprint (CSI:FingerID) using CANOPUS.
- Passatutto: Compute decoy spectra based on the fragmentation trees of the given input spectra. If no molecular formula is provided in the input, the top scoring computed formula is used. Required to include decoys in an OpenSwathWorkflow assay library generated by the AssayGeneratorMetabo TOPP tool.
Sirius can be found at https://bio.informatik.uni-jena.de/software/sirius/
<B>Internal procedure in SiriusExport</B>
1. Input mzML (and optional featureXML) \n
Make sure to input the matching mzML and featureXML files in the correct order.\n
2. Preprocessing \n
By providing a featureXML, the feature information can be used for feature mapping. \n
Sirius will then process the internally merged MS2 spectra allocated to one feature (instead of all available MS2). \n
To reduce the feature space even further a masstrace filter can be set. \n
Additional adduct information can be provided using a featureXML from the MetaboliteAdductDecharger or AccurateMassSearch. \n
3. Parsed by SiriusMSConverter into (sirius internal) .ms format
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_SiriusExport.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_SiriusExport.html
*/
/// @cond TOPPCLASSES
class TOPPSiriusExport :
public TOPPBase
{
public:
TOPPSiriusExport() :
TOPPBase("SiriusExport",
"Metabolite identification using single and tandem mass spectrometry", true,
{
{"Kai Duehrkop and Sebastian Boecker",
"Fragmentation trees reloaded",
"J Cheminform; 2016",
"10.1186/s13321-016-0116-8"},
{"Kai Duehrkop, Huibin Shen, Marvin Meusel, Juho Rousu, and Sebastian Boecker",
"Searching molecular structure databases with tandem mass spectra using CSI:FingerID",
"Proceedings of the National Academy of Sciences; 2015",
"10.1073/pnas.1509788112"}
})
{}
protected:
void registerOptionsAndFlags_() override
{
registerInputFileList_("in", "<file(s)>", StringList(), "MzML Input file(s)");
setValidFormats_("in", ListUtils::create<String>("mzML"));
registerInputFileList_("in_featureinfo", "<file(s)>", StringList(), "FeatureXML input with feature and adduct information", false);
setValidFormats_("in_featureinfo", ListUtils::create<String>("featureXML"));
registerOutputFile_("out","<file>", "", "Internal SIRIUS .ms format after OpenMS preprocessing");
setValidFormats_("out", ListUtils::create<String>("ms"));
registerOutputFile_("out_compoundinfo","<file>", "", "File (.tsv) with information on processed compounds which are associated with a feature. Required for AssayGeneratorMetaboSirius tool.", false);
setValidFormats_("out_compoundinfo", ListUtils::create<String>("tsv"));
addEmptyLine_();
registerFullParam_(SiriusExportAlgorithm().getDefaults());
}
ExitCodes main_(int, const char **) override
{
//-------------------------------------------------------------
// Parsing parameters
//-------------------------------------------------------------
StringList mzML_files = getStringList_("in");
StringList featureXML_files = getStringList_("in_featureinfo");
String out_ms = getStringOption_("out");
String out_compoundinfo = getStringOption_("out_compoundinfo");
SiriusExportAlgorithm algorithm;
algorithm.setParameters(getParam_().copySubset(SiriusExportAlgorithm().getDefaults()));
//-------------------------------------------------------------
// Check input
//-------------------------------------------------------------
if (featureXML_files.size() > 0 && mzML_files.size() != featureXML_files.size())
{
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Number of .mzML do not match to the number of .featureXML files. \n Please check and provide the corresponding files.");
}
if (!out_compoundinfo.empty() && featureXML_files.size() == 0)
{
OPENMS_LOG_WARN << "A compound info output file was specified but no feature maps provided. The resulting table will be empty." << std::endl;
}
if (algorithm.isFeatureOnly() && featureXML_files.size() == 0)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"No feature maps provided but preprocessing:feature_only flag set. Please provide featureXML files.");
}
algorithm.run(mzML_files,
featureXML_files,
out_ms,
out_compoundinfo);
return EXECUTION_OK;
}
};
int main(int argc, const char ** argv)
{
TOPPSiriusExport tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/OpenSwathConfidenceScoring.cpp | .cpp | 8,771 | 192 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Hannes Roest, Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/ConfidenceScoring.h>
#include <iostream> // for "cout"
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/FORMAT/FileHandler.h>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_OpenSwathConfidenceScoring OpenSwathConfidenceScoring
@brief Computes confidence scores for OpenSwath results.
<CENTER>
<table>
<tr>
<th ALIGN = "center"> potential predecessor tools </td>
<td VALIGN="middle" ROWSPAN=2> → OpenSwathConfidenceScoring →</td>
<th ALIGN = "center"> potential successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_OpenSwathAnalyzer </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_OpenSwathFeatureXMLToTSV </td>
</tr>
</table>
</CENTER>
This is an implementation of the SRM scoring algorithm described in:
Malmstroem, L.; Malmstroem, J.; Selevsek, N.; Rosenberger, G. & Aebersold, R.:\n
<a href="https://doi.org/10.1021/pr200844d">Automated workflow for large-scale selected reaction monitoring experiments.</a>\n
J. Proteome Res., 2012, 11, 1644-1653
It has been adapted for the scoring of OpenSwath results.
The algorithm compares SRM/MRM features (peak groups) to assays and computes scores for the agreements. Every feature is compared not only to the "true" assay that was used to acquire the corresponding ion chromatograms, but also to a number (parameter @p decoys) of unrelated - but real - assays selected at random from the assay library (parameter @p lib). This serves to establish a background distribution of scores, against which the significance of the "true" score can be evaluated. The final confidence value of a feature is the local false discovery rate (FDR), calculated as the fraction of decoy assays that score higher than the "true" assay against the feature. In the output feature map, every feature is annotated with its local FDR in the meta value "local_FDR" (a "userParam" element in the featureXML), and its overall quality is set to "1 - local_FDR".
The agreement of a feature and an assay is assessed based on the difference in retention time (RT) and on the deviation of relative transition intensities. The score @e S is computed using a binomial generalized linear model (GLM) of the form:
@f[
S = \frac{1}{1 + \exp(-(a + b \cdot \Delta_{RT}^2 + c \cdot d_{int}))}
@f]
The meanings of the model terms are as follows:
@f$ \Delta_{RT} @f$: Observed retention times are first mapped to the scale of the assays (parameter @p trafo), then all RTs are scaled to the range 0 to 100 (based on the lowest/highest RT in the assay library). @f$ \Delta_{RT} @f$ is the absolute difference of the scaled RTs; note that this is squared in the scoring model.
@f$ d_{int} @f$: To compute the intensity distance, the @e n (advanced
parameter @p transitions) most intensive transitions of the feature are
selected. For comparing against the "true" assay, the same transitions are
considered; otherwise, the same number of most intensive transitions from
the decoy assay. Transition intensities are scaled to a total of 1 per
feature/assay and are ordered by the product (Q3) m/z value. Then the
Manhattan distance of the intensity vectors is calculated (Malmstroem et
al. used the RMSD instead, which has been replaced here to be independent
of the number of transitions).
@f$ a, b, c @f$: Model coefficients, stored in the advanced parameters @p GLM:intercept, @p GLM:delta_rt, and @p GLM:dist_int. The default values were estimated based on the training dataset used in the Malmstroem et al. study, reprocessed with the OpenSwath pipeline.
In addition to the local FDRs, the scores of features against their "true" assays are recorded in the output - in the meta value "GLM_score" of the respective feature.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_OpenSwathConfidenceScoring.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_OpenSwathConfidenceScoring.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPOpenSwathConfidenceScoring :
public TOPPBase
{
public:
/// Constructor
TOPPOpenSwathConfidenceScoring() :
TOPPBase("OpenSwathConfidenceScoring", "Compute confidence scores for OpenSwath results")
{
}
/// Docu in base class
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "Input file (OpenSwath results)");
setValidFormats_("in", ListUtils::create<String>("featureXML"));
registerInputFile_("lib", "<file>", "", "Assay library");
setValidFormats_("lib", ListUtils::create<String>("traML"));
registerOutputFile_("out", "<file>", "",
"Output file (results with confidence scores)");
setValidFormats_("out", ListUtils::create<String>("featureXML"));
registerInputFile_("trafo", "<file>", "", "Retention time transformation",
false);
setValidFormats_("trafo", ListUtils::create<String>("trafoXML"));
registerIntOption_("decoys", "<number>", 1000, "Number of decoy assays to "
"select from the library for every true assay (0 for "
"\"all\")", false);
setMinInt_("decoys", 0);
registerIntOption_("transitions", "<number>", 6, "Number of transitions "
"per feature to consider (highest intensities first; "
"0 for \"all\")", false);
setMinInt_("transitions", 0);
registerTOPPSubsection_("GLM",
"Parameters of the binomial GLM");
registerDoubleOption_("GLM:intercept", "<value>", 3.87333466,
"Intercept term", false, true);
registerDoubleOption_("GLM:delta_rt", "<value>", -0.02898629, "Coefficient "
"of retention time difference", false, true);
registerDoubleOption_("GLM:dist_int", "<value>", -7.75880768,
"Coefficient of intensity distance", false, true);
}
/// Docu in base class
ExitCodes main_(int, const char**) override
{
TargetedExperiment library_; // assay library
Size n_decoys_; // number of decoys to use (per feature/true assay)
Size n_transitions_; // number of transitions to consider
TransformationDescription rt_trafo_; /// RT transformation to map measured RTs to assay RTs
OPENMS_LOG_DEBUG << "Reading parameters..." << endl;
String in = getStringOption_("in");
String lib = getStringOption_("lib");
String out = getStringOption_("out");
String trafo = getStringOption_("trafo");
n_decoys_ = getIntOption_("decoys");
n_transitions_ = getIntOption_("transitions");
OPENMS_LOG_DEBUG << "Loading input files..." << endl;
FeatureMap features;
FileHandler().loadFeatures(in, features, {FileTypes::FEATUREXML});
FileHandler().loadTransitions(lib, library_, {FileTypes::TRAML});
if (trafo.empty())
{
OPENMS_LOG_WARN << "Warning: You have not supplied an RT transformation file "
<< "(parameter 'trafo'). You should be sure that the retention "
<< "times of your features ('in') and library ('lib') are on "
<< "the same scale." << endl;
}
else
{
FileHandler().loadTransformations(trafo, rt_trafo_, true, {FileTypes::TRANSFORMATIONXML});
if (rt_trafo_.getModelType() == "none") // fit a linear model now
{
rt_trafo_.fitModel("linear");
}
}
ConfidenceScoring scoring(test_mode_);
scoring.setLogType(log_type_);
scoring.initialize(library_, n_decoys_, n_transitions_, rt_trafo_);
scoring.initializeGlm(getDoubleOption_("GLM:intercept"), getDoubleOption_("GLM:delta_rt"), getDoubleOption_("GLM:dist_int"));
scoring.scoreMap(features);
OPENMS_LOG_DEBUG << "Storing results..." << endl;
addDataProcessing_(features,
getProcessingInfo_(DataProcessing::DATA_PROCESSING));
FileHandler().storeFeatures(out, features, {FileTypes::FEATUREXML});
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPOpenSwathConfidenceScoring t;
return t.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/TICCalculator.cpp | .cpp | 14,319 | 421 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/config.h>
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/FORMAT/DATAACCESS/MSDataWritingConsumer.h>
#include <OpenMS/FORMAT/HANDLERS/CachedMzMLHandler.h>
#include <OpenMS/FORMAT/OPTIONS/PeakFileOptions.h>
#include <OpenMS/FORMAT/CachedMzML.h>
#include <OpenMS/FORMAT/FileTypes.h>
//TODO add support for indexed mzml to handler
#include <OpenMS/FORMAT/IndexedMzMLFileLoader.h>
#include <OpenMS/FORMAT/MzMLFile.h>
#include <OpenMS/INTERFACES/IMSDataConsumer.h>
#include <OpenMS/KERNEL/OnDiscMSExperiment.h>
#include <OpenMS/SYSTEM/SysInfo.h>
#include <numeric>
using namespace OpenMS;
using namespace std;
using namespace OpenSwath;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_TICCalculator TICCalculator
@brief Calculates the TIC of a raw mass spectrometric file.
This class was developed to benchmark multiple methods inside OpenMS for
reading raw mass spectrometric data. It can be used for benchmarking these
different methods as well as benchmarking external tools. Of course you can
also calculate the TIC with this tool.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_TICCalculator.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_TICCalculator.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TICConsumer :
public Interfaces::IMSDataConsumer
{
typedef PeakMap MapType;
typedef MapType::SpectrumType SpectrumType;
typedef MapType::ChromatogramType ChromatogramType;
public:
double TIC;
int nr_spectra;
long int nr_peaks;
// Create new consumer, set TIC to zero
TICConsumer() :
TIC(0.0),
nr_spectra(0.0),
nr_peaks(0)
{}
void consumeSpectrum(SpectrumType & s) override
{
for (Size i = 0; i < s.size(); i++)
{
TIC += s[i].getIntensity();
}
nr_peaks += s.size();
nr_spectra++;
}
void consumeChromatogram(ChromatogramType& /* c */) override {}
void setExpectedSize(Size /* expectedSpectra */, Size /* expectedChromatograms */) override {}
void setExperimentalSettings(const ExperimentalSettings& /* exp */) override {}
};
/**
@brief Abstraction of a std::ifstream
Useful for parallel access to the file when each thread is given its own
instance of this class. Each thread will then have its own file stream and
access the file independently.
*/
class FileAbstraction
{
public:
// constructor
explicit FileAbstraction(std::string filename) :
filename_(filename)
{
ifs_.open(filename.c_str(), std::ios::binary);
}
// copy constructor
FileAbstraction(const FileAbstraction& source) :
filename_(source.filename_),
ifs_(source.filename_.c_str())
{}
// access to underlying stream
std::ifstream & getStream()
{
return ifs_;
}
private:
std::string filename_;
std::ifstream ifs_;
};
class TOPPTICCalculator :
public TOPPBase
{
public:
TOPPTICCalculator() :
TOPPBase("TICCalculator",
"Calculates the TIC from a mass spectrometric raw file (useful for benchmarking).",
true)
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "Input file to convert.");
registerStringOption_("in_type", "<type>", "", "Input file type -- default: determined from file extension or content\n", false);
String formats("mzData,mzXML,mzML,cachedMzML,dta,dta2d,mgf,featureXML,consensusXML,ms2,fid,tsv,peplist,kroenik,edta");
setValidFormats_("in", ListUtils::create<String>(formats));
setValidStrings_("in_type", ListUtils::create<String>(formats));
registerStringOption_("read_method", "<method>", "regular", "Method to read the file", false);
String method("regular,indexed,indexed_parallel,streaming,cached,cached_parallel");
setValidStrings_("read_method", ListUtils::create<String>(method));
registerStringOption_("loadData", "<method>", "true", "Whether to actually load and decode the binary data (or whether to skip decoding the binary data)", false);
String loadData("true,false");
setValidStrings_("loadData", ListUtils::create<String>(loadData));
}
ExitCodes main_(int, const char**) override
{
//-------------------------------------------------------------
// parameter handling
//-------------------------------------------------------------
//input file names
String in = getStringOption_("in");
String read_method = getStringOption_("read_method");
bool load_data = getStringOption_("loadData") == "true";
if (read_method == "streaming")
{
std::cout << "Read method: streaming" << std::endl;
// Create the consumer, set output file name, transform
TICConsumer consumer;
MzMLFile mzml;
mzml.setLogType(log_type_);
PeakFileOptions opt = mzml.getOptions();
opt.setFillData(load_data); // whether to actually load any data
opt.setSkipXMLChecks(true); // save time by not checking base64 strings for whitespaces
opt.setMaxDataPoolSize(100);
opt.setAlwaysAppendData(false);
mzml.setOptions(opt);
mzml.transform(in, &consumer, true, true);
std::cout << "There are " << consumer.nr_spectra << " spectra and " << consumer.nr_peaks << " peaks in the input file." << std::endl;
std::cout << "The total ion current is " << consumer.TIC << std::endl;
size_t after;
SysInfo::getProcessMemoryConsumption(after);
std::cout << " Memory consumption after " << after << std::endl;
}
else if (read_method == "regular")
{
std::cout << "Read method: regular" << std::endl;
MzMLFile mzml;
mzml.setLogType(log_type_);
PeakFileOptions opt = mzml.getOptions();
opt.setFillData(load_data); // whether to actually load any data
opt.setSkipXMLChecks(true); // save time by not checking base64 strings for whitespaces
mzml.setOptions(opt);
PeakMap map;
mzml.load(in, map);
double TIC = 0.0;
long int nr_peaks = 0;
for (Size i =0; i < map.size(); i++)
{
nr_peaks += map[i].size();
for (Size j = 0; j < map[i].size(); j++)
{
TIC += map[i][j].getIntensity();
}
}
std::cout << "There are " << map.size() << " spectra and " << nr_peaks << " peaks in the input file." << std::endl;
std::cout << "The total ion current is " << TIC << std::endl;
size_t after;
SysInfo::getProcessMemoryConsumption(after);
std::cout << " Memory consumption after " << after << std::endl;
}
else if (read_method == "indexed")
{
std::cout << "Read method: indexed" << std::endl;
IndexedMzMLFileLoader imzml;
// load data from an indexed MzML file
OnDiscPeakMap map;
imzml.load(in, map);
double TIC = 0.0;
long int nr_peaks = 0;
if (load_data)
{
for (Size i =0; i < map.getNrSpectra(); i++)
{
OpenMS::Interfaces::SpectrumPtr sptr = map.getSpectrumById(i);
nr_peaks += sptr->getIntensityArray()->data.size();
TIC += std::accumulate(sptr->getIntensityArray()->data.begin(), sptr->getIntensityArray()->data.end(), 0.0);
}
}
std::cout << "There are " << map.getNrSpectra() << " spectra and " << nr_peaks << " peaks in the input file." << std::endl;
std::cout << "The total ion current is " << TIC << std::endl;
size_t after;
SysInfo::getProcessMemoryConsumption(after);
std::cout << " Memory consumption after " << after << std::endl;
}
else if (read_method == "indexed_parallel")
{
std::cout << "Read method: indexed (parallel)" << std::endl;
IndexedMzMLFileLoader imzml;
PeakFileOptions opt = imzml.getOptions();
opt.setFillData(load_data); // whether to actually load any data
imzml.setOptions(opt);
// load data from an indexed MzML file
OnDiscPeakMap map;
map.openFile(in, true);
map.setSkipXMLChecks(true);
double TIC = 0.0;
long nr_peaks = 0;
if (load_data)
{
// firstprivate means that each thread has its own instance of the
// variable, each copy initialized with the initial value
#pragma omp parallel for firstprivate(map) reduction(+: TIC, nr_peaks)
for (SignedSize i =0; i < (SignedSize)map.getNrSpectra(); i++)
{
OpenMS::Interfaces::SpectrumPtr sptr = map.getSpectrumById(i);
nr_peaks += sptr->getIntensityArray()->data.size();
TIC += std::accumulate(sptr->getIntensityArray()->data.begin(), sptr->getIntensityArray()->data.end(), 0.0);
}
}
std::cout << "There are " << map.getNrSpectra() << " spectra and " << nr_peaks << " peaks in the input file." << std::endl;
std::cout << "The total ion current is " << TIC << std::endl;
size_t after;
SysInfo::getProcessMemoryConsumption(after);
std::cout << " Memory consumption after " << after << std::endl;
}
else if (read_method == "cached")
{
std::cout << "Read method: cached" << std::endl;
// Special handling of cached mzML as input types:
// we expect two paired input files which we should read into exp
std::vector<String> split_out;
in.split(".cachedMzML", split_out);
if (split_out.size() != 2)
{
OPENMS_LOG_ERROR << "Cannot deduce base path from input '" << in <<
"' (note that '.cachedMzML' should only occur once as the final ending)" << std::endl;
return ILLEGAL_PARAMETERS;
}
String in_meta = split_out[0] + ".mzML";
MzMLFile f;
f.setLogType(log_type_);
Internal::CachedMzMLHandler cache;
cache.createMemdumpIndex(in);
const std::vector<std::streampos> spectra_index = cache.getSpectraIndex();
std::ifstream ifs_;
ifs_.open(in.c_str(), std::ios::binary);
double TIC = 0.0;
long int nr_peaks = 0;
for (Size i=0; i < spectra_index.size(); ++i)
{
BinaryDataArrayPtr mz_array(new BinaryDataArray);
BinaryDataArrayPtr intensity_array(new BinaryDataArray);
int ms_level = -1;
double rt = -1.0;
ifs_.seekg(spectra_index[i]);
Internal::CachedMzMLHandler::readSpectrumFast(mz_array, intensity_array, ifs_, ms_level, rt);
nr_peaks += intensity_array->data.size();
for (Size j = 0; j < intensity_array->data.size(); j++)
{
TIC += intensity_array->data[j];
}
}
std::cout << "There are " << spectra_index.size() << " spectra and " << nr_peaks << " peaks in the input file." << std::endl;
std::cout << "The total ion current is " << TIC << std::endl;
size_t after;
SysInfo::getProcessMemoryConsumption(after);
std::cout << " Memory consumption after " << after << std::endl;
}
else if (read_method == "cached_parallel")
{
std::cout << "Read method: cached parallel" << std::endl;
// Special handling of cached mzML as input types:
// we expect two paired input files which we should read into exp
std::vector<String> split_out;
in.split(".cachedMzML", split_out);
if (split_out.size() != 2)
{
OPENMS_LOG_ERROR << "Cannot deduce base path from input '" << in <<
"' (note that '.cachedMzML' should only occur once as the final ending)" << std::endl;
return ILLEGAL_PARAMETERS;
}
String in_meta = split_out[0] + ".mzML";
MzMLFile f;
f.setLogType(log_type_);
Internal::CachedMzMLHandler cache;
cache.createMemdumpIndex(in);
const std::vector<std::streampos> spectra_index = cache.getSpectraIndex();
FileAbstraction filestream(in);
double TIC = 0.0;
long nr_peaks = 0;
#pragma omp parallel for firstprivate(filestream) reduction(+: TIC, nr_peaks)
for (SignedSize i=0; i < (SignedSize)spectra_index.size(); ++i)
{
BinaryDataArrayPtr mz_array(new BinaryDataArray);
BinaryDataArrayPtr intensity_array(new BinaryDataArray);
int ms_level = -1;
double rt = -1.0;
// we only change the position of the thread-local filestream
filestream.getStream().seekg(spectra_index[i]);
Internal::CachedMzMLHandler::readSpectrumFast(mz_array, intensity_array, filestream.getStream(), ms_level, rt);
nr_peaks += intensity_array->data.size();
TIC += std::accumulate(intensity_array->data.begin(), intensity_array->data.end(), 0.0);
}
std::cout << "There are " << spectra_index.size() << " spectra and " << nr_peaks << " peaks in the input file." << std::endl;
std::cout << "The total ion current is " << TIC << std::endl;
size_t after;
SysInfo::getProcessMemoryConsumption(after);
std::cout << " Memory consumption after " << after << std::endl;
}
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
// Print usage if used without arguments
if (argc == 1)
{
TOPPTICCalculator tool;
tool.main(argc, argv);
return 0;
}
// Add -test at the end of the arguments in order to avoid calling the OpenMS
// server for usage statistics (and thus making the benchmark slower)
char testflag[] = "-test";
std::vector<const char *> newArgs(argc+1); // vector containing one element more than required
for (int arg = 0; arg < argc; ++arg)
{
newArgs[arg] = argv[arg];
}
newArgs[argc] = testflag;
TOPPTICCalculator tool;
size_t after, before;
SysInfo::getProcessMemoryConsumption(before);
std::cout << " Memory consumption before " << before << std::endl;
tool.main(argc+1, (const char **)&newArgs[0]);
SysInfo::getProcessMemoryConsumption(after);
std::cout << " Memory consumption final " << after << std::endl;
return 0;
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/MetaboliteAdductDecharger.cpp | .cpp | 6,986 | 179 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Fabian Aicheler $
// $Authors: Fabian Aicheler $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/SYSTEM/StopWatch.h>
#include <OpenMS/ANALYSIS/DECHARGING/MetaboliteFeatureDeconvolution.h>
#include <OpenMS/KERNEL/FeatureMap.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/KERNEL/ConsensusMap.h>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_MetaboliteAdductDecharger MetaboliteAdductDecharger
@brief Decharges a feature map by clustering charge variants of metabolites to zero-charge entities.
<CENTER>
<table>
<tr>
<th ALIGN = "center"> pot. predecessor tools </td>
<td VALIGN="middle" ROWSPAN=2> → MetaboliteAdductDecharger →</td>
<th ALIGN = "center"> pot. successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_FeatureFinderMetabo </td>
</tr>
</table>
</CENTER>
The Decharger uses an ILP approach to group charge variants of the same metabolite, which
usually occur in ESI ionization mode. The resulting zero-charge metabolites, which are defined by RT and mass,
are written to consensusXML. Intensities of charge variants are summed up. The position of the zero charge
variant is the average of all clustered metabolites in each dimension (m and RT). For clustered metabolites,
the reported m/z is thus their neutral mass. For unclusted features with known charge, a default adduct
(protonation for positive mode, deprotonation for negative mode) is assumed to compute the neutral mass.
For unclustered features without known charge, m/z zero is reported.
It is also possible to include adducted species to the charge ladders (see 'potential_adducts' parameter).
Via this mechanism it is also possible to use this tool to find pairs/triples/quadruples/... in labeled data (by specifing the mass
tag weight as an adduct). If mass tags induce an RT shift (e.g. deuterium labeled data) you can also specify this also in the adduct list.
This will allow to tighten the RT search window, thus reducing false positive results.
This tool is derived from the method described in the following publication:
Bielow C, Ruzek S, Huber CG, Reinert K. Optimal decharging and clustering of charge ladders generated in ESI-MS. J Proteome Res 2010; 9: 2688.<br>
DOI: 10.1021/pr100177k
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_MetaboliteAdductDecharger.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_MetaboliteAdductDecharger.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class UTILMetaboliteAdductDecharger :
virtual public TOPPBase
{
public:
UTILMetaboliteAdductDecharger() :
TOPPBase("MetaboliteAdductDecharger", "Decharges and merges different feature charge variants of the same metabolite.")
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "input file ");
setValidFormats_("in", ListUtils::create<String>("featureXML"));
registerOutputFile_("out_cm", "<file>", "", "output consensus map", false);
registerOutputFile_("out_fm", "<file>", "", "output feature map", false);
registerOutputFile_("outpairs", "<file>", "", "output file", false);
setValidFormats_("out_fm", ListUtils::create<String>("featureXML"));
setValidFormats_("out_cm", ListUtils::create<String>("consensusXML"));
setValidFormats_("outpairs", ListUtils::create<String>("consensusXML"));
addEmptyLine_();
registerSubsection_("algorithm", "Feature decharging algorithm section");
}
Param getSubsectionDefaults_(const String & /*section*/) const override
{
// there is only one subsection: 'algorithm' (s.a) .. and in it belongs the FeatureDecharger param
MetaboliteFeatureDeconvolution fdc;
Param tmp;
tmp.insert("MetaboliteFeatureDeconvolution:", fdc.getParameters());
return tmp;
}
ExitCodes main_(int, const char **) override
{
//-------------------------------------------------------------
// parameter handling
//-------------------------------------------------------------
String infile = getStringOption_("in");
String outfile_fm = getStringOption_("out_fm");
String outfile_cm = getStringOption_("out_cm");
String outfile_p = getStringOption_("outpairs");
MetaboliteFeatureDeconvolution fdc;
Param const & dc_param = getParam_().copy("algorithm:MetaboliteFeatureDeconvolution:", true);
writeDebug_("Parameters passed to MetaboliteAdductDecharger", dc_param, 3);
fdc.setParameters(dc_param);
//-------------------------------------------------------------
// loading input
//-------------------------------------------------------------
writeDebug_("Loading input file", 1);
typedef FeatureMap FeatureMapType;
FeatureMapType map_in, map_out;
FileHandler().loadFeatures(infile, map_in, {FileTypes::FEATUREXML});
//-------------------------------------------------------------
// calculations
//-------------------------------------------------------------
ConsensusMap cm, cm2;
StopWatch a;
a.start();
fdc.compute(map_in, map_out, cm, cm2);
a.stop();
//std::cerr << "took: " << a.getClockTime() << " seconds\n\n\n";
//-------------------------------------------------------------
// writing output
//-------------------------------------------------------------
writeDebug_("Saving output files", 1);
// Set filename for all column headers
for (auto& header : cm.getColumnHeaders())
{
header.second.filename = infile;
}
for (auto& header : cm2.getColumnHeaders())
{
header.second.filename = infile;
}
//annotate output with data processing info
addDataProcessing_(map_out, getProcessingInfo_(DataProcessing::CHARGE_DECONVOLUTION));
addDataProcessing_(cm, getProcessingInfo_(DataProcessing::CHARGE_DECONVOLUTION));
addDataProcessing_(cm2, getProcessingInfo_(DataProcessing::CHARGE_DECONVOLUTION));
FileHandler f;
if (!outfile_cm.empty()) f.storeConsensusFeatures(outfile_cm, cm, {FileTypes::CONSENSUSXML});
if (!outfile_p.empty()) f.storeConsensusFeatures(outfile_p, cm2, {FileTypes::CONSENSUSXML});
if (!outfile_fm.empty()) FileHandler().storeFeatures(outfile_fm, map_out, {FileTypes::FEATUREXML});
return EXECUTION_OK;
}
};
int main(int argc, const char ** argv)
{
UTILMetaboliteAdductDecharger tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/QCExporter.cpp | .cpp | 7,524 | 207 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Mathias Walzer $
// $Author: Mathias Walzer $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/FORMAT/CsvFile.h>
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/DATASTRUCTURES/String.h>
#include <OpenMS/FORMAT/QcMLFile.h>
#include <OpenMS/FORMAT/ControlledVocabulary.h>
#include <OpenMS/SYSTEM/File.h>
#include <QByteArray>
#include <QFile>
#include <QString>
#include <QFileInfo>
//~ #include <QIODevice>
#include <iostream>
#include <fstream>
#include <vector>
#include <map>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_QCExporter QCExporter
@brief Will extract several quality parameter from several run/sets from a qcML file into a tabular (text) format - counterpart to QCImporter.
<CENTER>
<table>
<tr>
<th ALIGN = "center"> pot. predecessor tools </td>
<td VALIGN="middle" ROWSPAN=2> → QCExporter →</td>
<th ALIGN = "center"> pot. successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> ? </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_QCEmbedder </td>
</tr>
</table>
</CENTER>
The data contained as values of the qp of a qcML file at @p in can be exported in tabluar (csv) format.
- @p names The name of the target runs or sets to be exported from. If empty, from all will be exported.
- @p mapping The mapping of the exported table's headers to the according qp cvs. The first row is considered containing the headers as for the exported the table. The second row is considered the according qp cv accessions of the qp to be exported.
Output is in csv format (see parameter @p out_csv) which can be easily viewed/parsed by many programs.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_QCExporter.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_QCExporter.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPQCExporter :
public TOPPBase
{
public:
TOPPQCExporter() :
TOPPBase("QCExporter", "Will extract several qp from several run/sets in a tabular format.",
true, {{ "Walzer M, Pernas LE, Nasso S, Bittremieux W, Nahnsen S, Kelchtermans P, Martens, L", "qcML: An Exchange Format for Quality Control Metrics from Mass Spectrometry Experiments", "Molecular & Cellular Proteomics 2014; 13(8)" , "10.1074/mcp.M113.035907"}})
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "Input qcml file");
setValidFormats_("in", ListUtils::create<String>("qcML"));
registerStringList_("names", "<names>", StringList(), "The name of the target runs or sets to be exported from. If empty, from all will be exported.", false);
registerInputFile_("mapping", "<file>", "", "The mapping of the exported table's headers to the according qp cvs. The first row is considered containing the headers as for the exported the table. The second row is considered the according qp cv accessions of the qp to be exported.", true);
setValidFormats_("mapping", ListUtils::create<String>("csv"));
registerOutputFile_("out_csv", "<file>", "", "Output csv formatted quality parameter.");
setValidFormats_("out_csv", ListUtils::create<String>("csv"));
}
ExitCodes main_(int, const char**) override
{
//-------------------------------------------------------------
// parsing parameters
//-------------------------------------------------------------
String in = getStringOption_("in");
String csv = getStringOption_("out_csv");
StringList names = getStringList_("names");
String mappi = getStringOption_("mapping");
ControlledVocabulary cv;
cv.loadFromOBO("PSI-MS", File::find("/CV/psi-ms.obo"));
cv.loadFromOBO("QC", File::find("/CV/qc-cv.obo"));
cv.loadFromOBO("QC", File::find("/CV/qc-cv-legacy.obo"));
//-------------------------------------------------------------
// reading input
//------------------------------------------------------------
QcMLFile qcmlfile;
qcmlfile.load(in);
if (!mappi.empty())
{
CsvFile map_file(mappi);
if (map_file.rowCount() < 2) //assumed that first row is the header of table and second row is the according qc
{
cerr << "Error: You have to give a mapping of your table (first row is the header of table and second row is the according qc). Aborting!" << endl;
return ILLEGAL_PARAMETERS;
}
StringList header, according;
map_file.getRow(0, header);
map_file.getRow(1, according);
if (header.size() != according.size())
{
cerr << "Error: You have to give a mapping of your table (first row is the header of table and second row is the according qc). Aborting!" << endl;
return ILLEGAL_PARAMETERS;
}
//~ std::map<String,String> mapping;
//~ std::transform( header.begin(), header.end(), according.begin(), std::inserter(mapping, mapping.end() ), std::make_pair<String,String> );
//~ Size runset_col;
for (Size i = 0; i < according.size(); ++i)
{
if (!cv.exists(according[i]))
{
try
{
const ControlledVocabulary::CVTerm& term = cv.getTermByName(according[i]);
header[i] = term.name;
according[i] = term.id;
}
catch (...)
{
cerr << "Error: You have to specify a correct cv with accession or name in col " << String(i) << ". Aborting!" << endl;
return ILLEGAL_PARAMETERS;
}
}
//~ else
//~ {
//~ const ControlledVocabulary::CVTerm& term = cv.getTerm(according[i]);
//~ header[i] = term.name; //TODO what if custom headers are needed?!
//~ }
//~ if (header[i] == "raw file name")
//~ {
//~ runset_col = i;
//~ }
}
if (names.empty())
{
std::vector<String> ns;
qcmlfile.getRunIDs(ns); //n.b. names are ids
names = StringList(ns); //TODO also sets
}
String csv_str = ListUtils::concatenate(header, ",");
csv_str += '\n';
for (Size i = 0; i < names.size(); ++i)
{
//~ if (qcmlfile.existsRun(names[i]))
//~ {
csv_str += qcmlfile.exportQPs(names[i], according);
csv_str += '\n';
//~ }
//~ else if (qcmlfile.existsSet(names[i]))
//~ {
//~ csv_str += qcmlfile.exportSetQP(names[i],according);
//~ }
//~ else
//~ {
//~ cerr << "Error: You have to specify a existing set for this qp. " << names[i] << " seems not to exist. Aborting!" << endl;
//~ return ILLEGAL_PARAMETERS;
//~ }
}
ofstream fout(csv.c_str());
fout << csv_str << endl;
fout.close();
//~ qcmlfile.store(out);
//~ return EXECUTION_OK;
//~ TODO export table containing all given qp
}
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPQCExporter tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/IDMapper.cpp | .cpp | 13,540 | 250 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Marc Sturm, Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/IDMapper.h>
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/FORMAT/ConsensusXMLFile.h>
#include <OpenMS/FORMAT/FeatureXMLFile.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/FORMAT/FileTypes.h>
#include <OpenMS/FORMAT/IdXMLFile.h>
#include <OpenMS/FORMAT/MzIdentMLFile.h>
#include <OpenMS/config.h>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
// Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_IDMapper IDMapper
@brief Assigns protein/peptide identifications to features or consensus features.
<CENTER>
<table>
<tr>
<th ALIGN = "center"> potential predecessor tools </td>
<td VALIGN="middle" ROWSPAN=3> → IDMapper →</td>
<th ALIGN = "center"> potential successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_CometAdapter (or other ID engines) </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_ConsensusID </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN="center" ROWSPAN=1> @ref TOPP_IDFilter </td>
<td VALIGN="middle" ALIGN="center" ROWSPAN=1> @ref TOPP_MapAlignerIdentification </td>
</tr>
</table>
</CENTER>
The mapping is based on retention times and mass-to-charge values. Roughly, a peptide identification is assigned to a (consensus) feature if its position lies within the boundaries of the feature
or close enough to the feature centroid. Peptide identifications that don't match anywhere are still recorded in the resulting map, as "unassigned peptides". Protein identifications are annotated
to the whole map, i.e. not to any particular (consensus) feature.
In all cases, tolerance in RT and m/z dimension is applied according to the parameters @p rt_tolerance and @p mz_tolerance. Tolerance is understood as "plus or minus x", so the matching range is
actually increased by twice the tolerance value.
If several features or consensus features overlap the position of a peptide identification (taking the allowed tolerances into account), the identification is annotated to all of them.
<B>Annotation of feature maps (featureXML input):</B>\n
If @em all features have at least one convex hull, peptide positions are matched against the bounding boxes of the convex hulls (of individual mass traces, if available) by default.
If not, the positions of the feature centroids are used. The respective coordinates of the centroids are also used for matching (in place of the corresponding ranges from the bounding boxes)
if @p feature:use_centroid_rt or @p feature:use_centroid_mz are true.
<B>Annotation of consensus maps (consensusXML input):</B>\n
Peptide positions are always matched against centroid positions. By default, the consensus centroids are used. However, if @p consensus:use_subelements is set, the centroids of sub-features are
considered instead. In this case, a peptide identification is mapped to a consensus feature if any of its sub-features matches.
@note Currently mzIdentML (mzid) is not directly supported as an input/output format of this tool. Convert mzid files to/from idXML using @ref TOPP_IDFileConverter if necessary.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_IDMapper.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_IDMapper.html
On the peptide side, two sources for m/z values are possible (see parameter @p mz_reference): 1. m/z of the precursor of the MS2 spectrum that gave rise to the peptide identification;
2. theoretical masses computed from the amino acid sequences of peptide hits.
(When using theoretical masses, make sure that peptide modifications were identified correctly. OpenMS currently "forgets" mass shifts that it can't assign to modifications - if that
happens, masses computed from peptide sequences will be off.)
@deprecated The parameter handling of this tool has been reworked. For greater consistency with other tools, the parameters @p rt_delta and @p mz_delta have been renamed to @p rt_tolerance
and @p mz_tolerance. The possible values of the @p mz_reference parameter have also been renamed. The default value of @p mz_tolerance has been increased from 1 ppm to a more realistic 20 ppm.\n
Most importantly, the @p use_centroids parameter from previous versions has been split into two parameters, @p feature:use_centroid_rt and @p feature:use_centroid_mz. In OpenMS 1.6, peptide
identifications would be matched only against monoisotopic mass traces of features if @p mz_reference was @p PeptideMass; otherwise, all mass traces would be used. This implicit behaviour has
been abandoned, you can now explicitly control it with the @p feature:use_centroid_mz parameter. @p feature:use_centroid_mz does not take into account m/z deviations in the monoisotopic mass
trace, but this can be compensated by increasing @p mz_tolerance. The new implementation should work correctly even if the monoisotopic mass trace itself was not detected.
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPIDMapper : public TOPPBase
{
public:
TOPPIDMapper() : TOPPBase("IDMapper", "Assigns protein/peptide identifications to features or consensus features.")
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("id", "<file>", "", "Protein/peptide identifications file");
setValidFormats_("id", ListUtils::create<String>("mzid,idXML"));
registerInputFile_("in", "<file>", "", "Feature map/consensus map file");
setValidFormats_("in", ListUtils::create<String>("featureXML,consensusXML"));
registerOutputFile_("out", "<file>", "", "Output file (the format depends on the input file format).");
setValidFormats_("out", ListUtils::create<String>("featureXML,consensusXML"));
addEmptyLine_();
IDMapper mapper;
Param p = mapper.getParameters();
registerDoubleOption_("rt_tolerance", "<value>", p.getValue("rt_tolerance"),
"RT tolerance (in seconds) for the matching of peptide identifications and (consensus) features.\nTolerance is understood as 'plus or minus x', so the matching range "
"increases by twice the given value.",
false);
setMinFloat_("rt_tolerance", 0.0);
registerDoubleOption_("mz_tolerance", "<value>", p.getValue("mz_tolerance"),
"m/z tolerance (in ppm or Da) for the matching of peptide identifications and (consensus) features.\nTolerance is understood as 'plus or minus x', so the matching range "
"increases by twice the given value.",
false);
setMinFloat_("mz_tolerance", 0.0);
registerStringOption_("mz_measure", "<choice>", p.getEntry("mz_measure").valid_strings[0], "Unit of 'mz_tolerance'.", false);
setValidStrings_("mz_measure", ListUtils::toStringList<std::string>(p.getEntry("mz_measure").valid_strings));
registerStringOption_(
"mz_reference", "<choice>", p.getEntry("mz_reference").valid_strings[1],
"Source of m/z values for peptide identifications. If 'precursor', the precursor-m/z from the idXML is used. If 'peptide',\nmasses are computed from the sequences of peptide hits; in this "
"case, an identification matches if any of its hits matches.\n('peptide' should be used together with 'feature:use_centroid_mz' to avoid false-positive matches.)",
false);
setValidStrings_("mz_reference", ListUtils::toStringList<std::string>(p.getEntry("mz_reference").valid_strings));
registerFlag_("ignore_charge", "For feature/consensus maps: Assign an ID independently of whether its charge state matches that of the (consensus) feature.", true);
addEmptyLine_();
registerTOPPSubsection_("feature", "Additional options for featureXML input");
registerStringOption_("feature:use_centroid_rt", "<choice>", "false", "Use the RT coordinates of the feature centroids for matching, instead of the RT ranges of the features/mass traces.", false);
setValidStrings_("feature:use_centroid_rt", ListUtils::create<String>("true,false"));
registerStringOption_("feature:use_centroid_mz", "<choice>", "true",
"Use the m/z coordinates of the feature centroids for matching, instead of the m/z ranges of the features/mass traces.\n(If you choose 'peptide' as 'mz_reference', you "
"should usually set this flag to avoid false-positive matches.)",
false);
setValidStrings_("feature:use_centroid_mz", ListUtils::create<String>("true,false"));
addEmptyLine_();
registerTOPPSubsection_("consensus", "Additional options for consensusXML input");
registerFlag_("consensus:use_subelements", "Match using RT and m/z of sub-features instead of consensus RT and m/z. A consensus feature matches if any of its sub-features matches.");
registerFlag_("consensus:annotate_ids_with_subelements", "Store the map index of the sub-feature in the peptide ID.", true);
registerTOPPSubsection_("spectra", "Additional options for mzML input");
registerInputFile_("spectra:in", "<file>", "", "MS run used to annotated unidentified spectra to features or consensus features.", false);
setValidFormats_("spectra:in", ListUtils::create<String>("mzML"));
}
ExitCodes main_(int, const char**) override
{
// OPENMS_LOG_DEBUG << "Starting..." << endl;
//----------------------------------------------------------------
// load ids
//----------------------------------------------------------------
// OPENMS_LOG_DEBUG << "Loading idXML..." << endl;
String id = getStringOption_("id");
vector<ProteinIdentification> protein_ids;
PeptideIdentificationList peptide_ids;
FileTypes::Type in_type = FileHandler::getType(id);
FileHandler().loadIdentifications(id, protein_ids, peptide_ids, {FileTypes::IDXML, FileTypes::MZIDENTML});
String in = getStringOption_("in");
String spectra = getStringOption_("spectra:in");
String out = getStringOption_("out");
in_type = FileHandler::getType(in);
//----------------------------------------------------------------
// create mapper
//----------------------------------------------------------------
// OPENMS_LOG_DEBUG << "Creating mapper..." << endl;
IDMapper mapper;
Param p = mapper.getParameters();
p.setValue("rt_tolerance", getDoubleOption_("rt_tolerance"));
p.setValue("mz_tolerance", getDoubleOption_("mz_tolerance"));
p.setValue("mz_measure", getStringOption_("mz_measure"));
p.setValue("mz_reference", getStringOption_("mz_reference"));
p.setValue("ignore_charge", getFlag_("ignore_charge") ? "true" : "false");
mapper.setParameters(p);
//----------------------------------------------------------------
// consensusXML
//----------------------------------------------------------------
if (in_type == FileTypes::CONSENSUSXML)
{
// OPENMS_LOG_DEBUG << "Processing consensus map..." << endl;
FileHandler consensusFile;
ConsensusMap map;
consensusFile.loadConsensusFeatures(in, map, {FileTypes::CONSENSUSXML});
PeakMap exp;
if (!spectra.empty())
{
FileHandler().loadExperiment(spectra, exp, {FileTypes::MZML}, log_type_);
}
bool measure_from_subelements = getFlag_("consensus:use_subelements");
bool annotate_ids_with_subelements = getFlag_("consensus:annotate_ids_with_subelements");
mapper.annotate(map, peptide_ids, protein_ids, measure_from_subelements, annotate_ids_with_subelements, exp);
// annotate output with data processing info
addDataProcessing_(map, getProcessingInfo_(DataProcessing::IDENTIFICATION_MAPPING));
// sort list of peptide identifications in each consensus feature by map index
map.sortPeptideIdentificationsByMapIndex();
consensusFile.storeConsensusFeatures(out, map, {FileTypes::CONSENSUSXML});
}
//----------------------------------------------------------------
// featureXML
//----------------------------------------------------------------
if (in_type == FileTypes::FEATUREXML)
{
// OPENMS_LOG_DEBUG << "Processing feature map..." << endl;
FeatureMap map;
FileHandler featureFile;
featureFile.loadFeatures(in, map, {FileTypes::FEATUREXML});
PeakMap exp;
if (!spectra.empty())
{
FileHandler().loadExperiment(spectra, exp, {FileTypes::MZML}, log_type_);
}
mapper.annotate(map, peptide_ids, protein_ids, (getStringOption_("feature:use_centroid_rt") == "true"), (getStringOption_("feature:use_centroid_mz") == "true"), exp);
// annotate output with data processing info
addDataProcessing_(map, getProcessingInfo_(DataProcessing::IDENTIFICATION_MAPPING));
featureFile.storeFeatures(out, map, {FileTypes::FEATUREXML});
}
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPIDMapper tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/Digestor.cpp | .cpp | 11,266 | 293 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Nico Pfeifer, Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/AhoCorasickAmbiguous.h> // for AA's
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/CHEMISTRY/ProteaseDB.h>
#include <OpenMS/CHEMISTRY/ProteaseDigestion.h>
#include <OpenMS/FORMAT/FASTAFile.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/METADATA/ProteinIdentification.h>
#include <boost/random/mersenne_twister.hpp>
#include <boost/random/uniform_int_distribution.hpp>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_Digestor Digestor
@brief Digests a protein database in-silico.
<CENTER>
<table>
<tr>
<th ALIGN = "center"> pot. predecessor tools </td>
<td VALIGN="middle" ROWSPAN=2> → Digestor →</td>
<th ALIGN = "center"> pot. successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> none (FASTA input) </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_IDFilter (peptide blacklist)</td>
</tr>
</table>
</CENTER>
This application is used to digest a protein database to get all
peptides given a cleavage enzyme.
The output can be used e.g. as a blacklist filter input to @ref TOPP_IDFilter, to remove certain peptides.
@note Currently mzIdentML (mzid) is not directly supported as an input/output format of this tool. Convert mzid files to/from idXML using @ref TOPP_IDFileConverter if necessary.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_Digestor.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_Digestor.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPDigestor :
public TOPPBase
{
public:
TOPPDigestor() :
TOPPBase("Digestor", "Digests a protein database in-silico.")
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "input file");
setValidFormats_("in", {"fasta"});
registerOutputFile_("out", "<file>", "", "Output file (peptides)");
setValidFormats_("out", {"idXML", "fasta"});
registerStringOption_("out_type", "<type>", "", "Set this if you cannot control the filename of 'out', e.g., in TOPPAS.", false);
setValidStrings_("out_type", {"idXML", "fasta"});
registerIntOption_("missed_cleavages", "<number>", 1, "The number of allowed missed cleavages", false);
setMinInt_("missed_cleavages", 0);
registerIntOption_("min_length", "<number>", 6, "Minimum length of peptide", false);
registerIntOption_("max_length", "<number>", 40, "Maximum length of peptide", false);
vector<String> all_enzymes;
ProteaseDB::getInstance()->getAllNames(all_enzymes);
registerStringOption_("enzyme", "<string>", "Trypsin", "The type of digestion enzyme", false);
setValidStrings_("enzyme", all_enzymes);
registerTOPPSubsection_("FASTA", "Options for FASTA output files");
registerStringOption_("FASTA:ID", "<option>", "parent", "Identifier to use for each peptide: copy from parent protein (parent); a consecutive number (number); parent ID + consecutive number (both)", false);
setValidStrings_("FASTA:ID", {"parent", "number", "both"});
registerStringOption_("FASTA:description", "<option>", "remove", "Keep or remove the (possibly lengthy) FASTA header description. Keeping it can increase resulting FASTA file significantly.", false);
setValidStrings_("FASTA:description", {"remove", "keep"});
registerFlag_("replace_ambiguous",
"Replace ambiguous amino acids with a random unambiguous amino acid. This is useful for generating an output file that "
"mimics a search engine result (since they usually do not contain ambiguous amino acids).",
false);
}
enum FASTAID {PARENT, NUMBER, BOTH};
ExitCodes main_(int, const char**) override
{
vector<ProteinIdentification> protein_identifications;
PeptideIdentificationList identifications;
PeptideIdentification peptide_identification;
DateTime date_time = DateTime::now();
String date_time_string = date_time.get();
peptide_identification.setIdentifier("In-silico_digestion" + date_time_string);
ProteinIdentification protein_identification;
protein_identifications.push_back(ProteinIdentification());
//-------------------------------------------------------------
// parsing parameters
//-------------------------------------------------------------
String inputfile_name = getStringOption_("in");
String outputfile_name = getStringOption_("out");
FASTAID FASTA_ID = getStringOption_("FASTA:ID") == "parent" ? PARENT : (getStringOption_("FASTA:ID") == "number" ? NUMBER : BOTH);
bool keep_FASTA_desc = (getStringOption_("FASTA:description") == "keep");
// output file type
FileHandler fh;
FileTypes::Type out_type = FileTypes::nameToType(getStringOption_("out_type"));
if (out_type == FileTypes::UNKNOWN)
{
out_type = fh.getTypeByFileName(outputfile_name);
writeDebug_(String("Output file type: ") + FileTypes::typeToName(out_type), 2);
}
if (out_type == FileTypes::UNKNOWN)
{
OPENMS_LOG_ERROR << ("Error: Could not determine output file type!") << std::endl;
return PARSE_ERROR;
}
Size min_size = getIntOption_("min_length");
Size max_size = getIntOption_("max_length");
Size missed_cleavages = getIntOption_("missed_cleavages");
bool has_FASTA_output = (out_type == FileTypes::FASTA);
//-------------------------------------------------------------
// reading input
//-------------------------------------------------------------
FASTAFile ff;
ff.readStart(inputfile_name);
if (has_FASTA_output) ff.writeStart(outputfile_name);
//-------------------------------------------------------------
// calculations
//-------------------------------------------------------------
// This should be updated if more cleavage enzymes are available
ProteinIdentification::SearchParameters search_parameters;
String enzyme = getStringOption_("enzyme");
ProteaseDigestion digestor;
digestor.setEnzyme(enzyme);
digestor.setMissedCleavages(missed_cleavages);
search_parameters.digestion_enzyme = *ProteaseDB::getInstance()->getEnzyme(enzyme);
PeptideHit temp_peptide_hit;
PeptideEvidence temp_pe;
protein_identifications[0].setSearchParameters(search_parameters);
protein_identifications[0].setDateTime(date_time);
protein_identifications[0].setSearchEngine("In-silico digestion");
protein_identifications[0].setIdentifier("In-silico_digestion" + date_time_string);
Size dropped_by_length(0); // stats for removing candidates
Size fasta_out_count(0);
FASTAFile::FASTAEntry fe;
boost::random::mt19937 gen; // for cross-platform reproducibility
boost::random::uniform_int_distribution<> rng_X(0, AA::unambiguousAACount() - 1); // roll for unambiguous AA (boost uses a closed interval, i.e. [0, 21] for 22 unambiguous AA's)
boost::random::uniform_int_distribution<> rng_2(0, 1); // coin flip
while (ff.readNext(fe))
{
if (!has_FASTA_output)
{
ProteinHit temp_protein_hit;
temp_protein_hit.setSequence(fe.sequence);
temp_protein_hit.setAccession(fe.identifier);
protein_identifications[0].insertHit(temp_protein_hit);
temp_pe.setProteinAccession(fe.identifier);
temp_peptide_hit.setPeptideEvidences(vector<PeptideEvidence>(1, temp_pe));
}
std::vector<std::pair<size_t, size_t>> current_digest;
if (enzyme == "none")
{
current_digest.emplace_back(0, fe.sequence.size());
}
else
{
dropped_by_length += digestor.digest(AASequence::fromString(fe.sequence), current_digest, min_size, max_size);
}
String id = fe.identifier;
for (auto [pep_start, pep_end] : current_digest)
{
if (getFlag_("replace_ambiguous"))
{
for (auto pos = pep_start; pos < pep_end; ++pos)
{ // look at all AA's of the peptide, and replace if ambiguous
switch (fe.sequence[pos])
{
case 'B': // asparagine or aspartic acid
fe.sequence[pos] = rng_2(gen) ? 'N' : 'D';
break;
case 'Z': // glutamine or glutamic acid
fe.sequence[pos] = rng_2(gen) ? 'Q' : 'E';
break;
case 'J': // leucine or isoleucine
fe.sequence[pos] = rng_2(gen) ? 'L' : 'I';
break;
case 'X': // any amino acid
fe.sequence[pos] = AA::fromIndex(rng_X(gen)).toChar();
break;
default:
break; // do nothing for other residues
}
}
}
if (!has_FASTA_output)
{
temp_peptide_hit.setSequence(AASequence(fe.sequence.substr(pep_start, pep_end-pep_start)));
peptide_identification.insertHit(temp_peptide_hit);
identifications.push_back(peptide_identification);
peptide_identification.setHits(std::vector<PeptideHit>()); // clear
}
else // for FASTA file output
{
++fasta_out_count;
switch (FASTA_ID)
{
case PARENT: break;
case NUMBER: id = String(fasta_out_count); break;
case BOTH: id = fe.identifier + "_" + String(fasta_out_count); break;
}
ff.writeNext(FASTAFile::FASTAEntry(id, keep_FASTA_desc ? fe.description : "", fe.sequence.substr(pep_start, pep_end - pep_start)));
}
}
}
//-------------------------------------------------------------
// writing output
//-------------------------------------------------------------
if (has_FASTA_output)
{
ff.writeEnd();
}
else
{
FileHandler().storeIdentifications(outputfile_name,
protein_identifications,
identifications,
{FileTypes::IDXML});
}
Size pep_remaining_count = (has_FASTA_output ? fasta_out_count : identifications.size());
OPENMS_LOG_INFO << "Statistics:\n"
<< " file: " << inputfile_name << "\n"
<< " total #peptides after digestion: " << pep_remaining_count + dropped_by_length << "\n"
<< " removed #peptides (length restrictions): " << dropped_by_length << "\n"
<< " remaining #peptides: " << pep_remaining_count << std::endl;
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPDigestor tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/OpenSwathMzMLFileCacher.cpp | .cpp | 10,408 | 300 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/FORMAT/CachedMzML.h>
#include <OpenMS/FORMAT/HANDLERS/CachedMzMLHandler.h>
#include <OpenMS/FORMAT/SqMassFile.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/FORMAT/FileTypes.h>
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/CONCEPT/ProgressLogger.h>
// TODO move transform to handler
#include <OpenMS/FORMAT/MzMLFile.h>
#include <fstream>
#include <OpenMS/FORMAT/DATAACCESS/MSDataWritingConsumer.h>
#include <OpenMS/FORMAT/DATAACCESS/MSDataCachedConsumer.h>
#include <OpenMS/FORMAT/DATAACCESS/MSDataSqlConsumer.h>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_OpenSwathMzMLFileCacher OpenSwathMzMLFileCacher
@brief Serialize a spectra and/or chromatogram mzML file
This class will serialize a spectra and/or chromatogram mzML file and store
it in a binary format that contains ONLY the spectra and chromatogram data
(no metadata).
This is implemented using the write_memdump and read_memdump functions.
For reading there are 2 options
- read the whole file into the OpenMS datastructures
- read only an index (read_memdump_idx) of the spectra and chromatograms and then use
random-access to retrieve a specific spectra from the disk (read_memdump_spectra)
@note This tool is experimental!
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_OpenSwathMzMLFileCacher.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_OpenSwathMzMLFileCacher.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPOpenSwathMzMLFileCacher
: public TOPPBase,
public ProgressLogger
{
public:
TOPPOpenSwathMzMLFileCacher()
: TOPPBase("OpenSwathMzMLFileCacher","Caches the spectra and chromatogram data of an mzML to disk.")
{
}
typedef PeakMap MapType;
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in","<file>","","Input mzML file");
registerStringOption_("in_type", "<type>", "", "input file type -- default: determined from file extension or content\n", false);
String formats("mzML,sqMass");
setValidFormats_("in", ListUtils::create<String>(formats));
setValidStrings_("in_type", ListUtils::create<String>(formats));
formats = "mzML,sqMass";
registerOutputFile_("out", "<file>", "", "Output file");
setValidFormats_("out", ListUtils::create<String>(formats));
registerStringOption_("out_type", "<type>", "", "Output file type -- default: determined from file extension or content\nNote: that not all conversion paths work or make sense.", false);
setValidStrings_("out_type", ListUtils::create<String>(formats));
//registerStringOption_("out_meta","<file>","","output file", false);
//setValidFormats_("out_meta",ListUtils::create<String>("mzML"));
registerFlag_("convert_back", "Convert back to mzML");
registerStringOption_("lossy_compression", "<type>", "true", "Use numpress compression to achieve optimally small file size (attention: may cause small loss of precision; only for mzML data).", false);
setValidStrings_("lossy_compression", ListUtils::create<String>("true,false"));
registerStringOption_("full_meta", "<type>", "true", "Write full meta information into sqMass file (may require large amounts of memory)", false);
setValidStrings_("full_meta", ListUtils::create<String>("true,false"));
registerDoubleOption_("lossy_mass_accuracy", "<error>", -1.0, "Desired (absolute) m/z accuracy for lossy compression (e.g. use 0.0001 for a mass accuracy of 0.2 ppm at 500 m/z, default uses -1.0 for maximal accuracy).", false, true);
registerFlag_("process_lowmemory", "Whether to process the file on the fly without loading the whole file into memory first (only for conversions of mzXML/mzML to mzML).\nNote: this flag will prevent conversion from spectra to chromatograms.", true);
registerIntOption_("lowmem_batchsize", "<number>", 500, "The batch size of the low memory conversion", false, true);
setMinInt_("lowmem_batchsize", 0);
}
ExitCodes main_(int , const char**) override
{
String out_meta = getStringOption_("out");
String out_cached = out_meta + ".cached";
bool convert_back = getFlag_("convert_back");
bool process_lowmemory = getFlag_("process_lowmemory");
int batchSize = (int)getIntOption_("lowmem_batchsize");
bool full_meta = (getStringOption_("full_meta") == "true");
bool lossy_compression = (getStringOption_("lossy_compression") == "true");
double mass_acc = getDoubleOption_("lossy_mass_accuracy");
FileHandler fh;
//input file type
String in = getStringOption_("in");
String in_cached = in + ".cached";
FileTypes::Type in_type = FileTypes::nameToType(getStringOption_("in_type"));
if (in_type == FileTypes::UNKNOWN)
{
in_type = fh.getType(in);
writeDebug_(String("Input file type: ") + FileTypes::typeToName(in_type), 2);
}
if (in_type == FileTypes::UNKNOWN)
{
writeLogError_("Error: Could not determine input file type!");
return PARSE_ERROR;
}
//output file names and types
String out = getStringOption_("out");
FileTypes::Type out_type = FileTypes::nameToType(getStringOption_("out_type"));
if (out_type == FileTypes::UNKNOWN)
{
out_type = fh.getTypeByFileName(out);
}
if (out_type == FileTypes::UNKNOWN)
{
writeLogError_("Error: Could not determine output file type!");
return PARSE_ERROR;
}
if (in_type == FileTypes::SQMASS && out_type == FileTypes::MZML)
{
MapType exp;
SqMassFile sqfile;
sqfile.load(in, exp);
FileHandler().storeExperiment(out, exp, {FileTypes::MZML}, log_type_);
return EXECUTION_OK;
}
else if (in_type == FileTypes::MZML && out_type == FileTypes::SQMASS && process_lowmemory)
{
MSDataSqlConsumer consumer(out, batchSize, full_meta, lossy_compression, mass_acc);
MzMLFile f;
PeakFileOptions opt = f.getOptions();
opt.setMaxDataPoolSize(batchSize);
f.setOptions(opt);
f.transform(in, &consumer, true, true);
return EXECUTION_OK;
}
else if (in_type == FileTypes::SQMASS && out_type == FileTypes::SQMASS && process_lowmemory)
{
PlainMSDataWritingConsumer consumer(out);
consumer.getOptions().setWriteIndex(true);
SqMassFile f;
f.transform(in, &consumer, true, true);
return EXECUTION_OK;
}
else if (in_type == FileTypes::MZML && out_type == FileTypes::SQMASS)
{
SqMassFile::SqMassConfig config;
config.write_full_meta = full_meta;
config.use_lossy_numpress = lossy_compression;
config.linear_fp_mass_acc = mass_acc;
SqMassFile sqfile;
sqfile.setConfig(config);
MapType exp;
FileHandler().loadExperiment(in, exp, {FileTypes::MZML}, log_type_);
sqfile.store(out, exp);
return EXECUTION_OK;
}
if (!convert_back)
{
if (process_lowmemory)
{
MapType exp;
MzMLFile f;
f.setLogType(log_type_);
MSDataCachedConsumer consumer(out_cached, true);
PeakFileOptions opt = f.getOptions();
opt.setMaxDataPoolSize(batchSize);
f.setOptions(opt);
f.transform(in, &consumer, exp, false, false);
Internal::CachedMzMLHandler cacher;
cacher.setLogType(log_type_);
cacher.writeMetadata(exp, out_meta, true);
}
else
{
MapType exp;
Internal::CachedMzMLHandler cacher;
cacher.setLogType(log_type_);
FileHandler().loadExperiment(in, exp, {FileTypes::MZML}, log_type_);
cacher.writeMemdump(exp, out_cached);
cacher.writeMetadata(exp, out_meta, true);
}
}
else
{
MapType meta_exp;
Internal::CachedMzMLHandler cacher;
MapType exp_reading;
cacher.setLogType(log_type_);
FileHandler().loadExperiment(in,meta_exp, {FileTypes::MZML}, log_type_);
cacher.readMemdump(exp_reading, in_cached);
std::cout << " read back, got " << exp_reading.size() << " spectra " << exp_reading.getChromatograms().size() << " chromats " << std::endl;
{
for (Size i=0; i<meta_exp.size(); ++i)
{
for (Size j = 0; j < meta_exp[i].getDataProcessing().size(); j++)
{
if (meta_exp[i].getDataProcessing()[j]->metaValueExists("cached_data"))
{
meta_exp[i].getDataProcessing()[j]->removeMetaValue("cached_data");
}
}
}
for (Size i=0; i < meta_exp.getNrChromatograms(); ++i)
{
for (Size j = 0; j < meta_exp.getChromatogram(i).getDataProcessing().size(); j++)
{
if (meta_exp.getChromatogram(i).getDataProcessing()[j]->metaValueExists("cached_data"))
{
meta_exp.getChromatogram(i).getDataProcessing()[j]->removeMetaValue("cached_data");
}
}
}
}
if (meta_exp.size() != exp_reading.size())
{
std::cerr << " Both experiments need to have the same size!";
}
for (Size i=0; i<exp_reading.size(); ++i)
{
for (Size j = 0; j < exp_reading[i].size(); j++)
{
meta_exp[i].push_back(exp_reading[i][j]);
}
}
std::vector<MSChromatogram > chromatograms = exp_reading.getChromatograms();
std::vector<MSChromatogram > old_chromatograms = meta_exp.getChromatograms();
for (Size i=0; i<chromatograms.size(); ++i)
{
for (Size j = 0; j < chromatograms[i].size(); j++)
{
old_chromatograms[i].push_back(chromatograms[i][j]);
}
}
meta_exp.setChromatograms(old_chromatograms);
FileHandler().storeExperiment(out_meta,meta_exp, {FileTypes::MZML}, log_type_);
}
return EXECUTION_OK;
}
};
int main( int argc, const char** argv )
{
TOPPOpenSwathMzMLFileCacher tool;
return tool.main(argc,argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/JSONExporter.cpp | .cpp | 2,266 | 89 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/FORMAT/OMSFile.h>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_JSONExporter JSONExporter
@brief Converts .oms (SQLite) files to JSON
<CENTER>
<table>
<tr>
<th ALIGN = "center"> potential predecessor tools </td>
<td VALIGN="middle" ROWSPAN=2> → JSONExporter →</td>
<th ALIGN = "center"> potential successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_IDFileConverter </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> external tools</td>
</tr>
</table>
</CENTER>
...
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_JSONExporter.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_JSONExporter.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class JSONExporter: public TOPPBase
{
public:
JSONExporter(): TOPPBase("JSONExporter", "Exports .oms (SQLite) files in JSON format")
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "Input file");
setValidFormats_("in", {"oms"});
registerOutputFile_("out", "<file>", "", "Output file");
setValidFormats_("out", {"json"});
}
/// Main function
ExitCodes main_(int, const char**) override
{
//-------------------------------------------------------------
// parameter handling
//-------------------------------------------------------------
String in = getStringOption_("in");
String out = getStringOption_("out");
OMSFile oms(log_type_);
oms.exportToJSON(in, out);
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
JSONExporter tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/IsobaricAnalyzer.cpp | .cpp | 13,487 | 280 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
// the available quantitation methods
#include <OpenMS/ANALYSIS/QUANTITATION/IsobaricQuantitationMethod.h>
#include <OpenMS/ANALYSIS/QUANTITATION/ItraqFourPlexQuantitationMethod.h>
#include <OpenMS/ANALYSIS/QUANTITATION/ItraqEightPlexQuantitationMethod.h>
#include <OpenMS/ANALYSIS/QUANTITATION/TMTSixPlexQuantitationMethod.h>
#include <OpenMS/ANALYSIS/QUANTITATION/TMTTenPlexQuantitationMethod.h>
#include <OpenMS/ANALYSIS/QUANTITATION/TMTElevenPlexQuantitationMethod.h>
#include <OpenMS/ANALYSIS/QUANTITATION/TMTSixteenPlexQuantitationMethod.h>
#include <OpenMS/ANALYSIS/QUANTITATION/TMTEighteenPlexQuantitationMethod.h>
#include <OpenMS/ANALYSIS/QUANTITATION/IsobaricChannelExtractor.h>
#include <OpenMS/ANALYSIS/QUANTITATION/IsobaricQuantifier.h>
#include <OpenMS/FORMAT/ConsensusXMLFile.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <memory> // for std::unique_ptr
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_IsobaricAnalyzer IsobaricAnalyzer
@brief Extracts and normalizes isobaric labeling information from an LC-MS/MS experiment.
<CENTER>
<table>
<tr>
<th ALIGN = "center"> pot. predecessor tools </td>
<td VALIGN="middle" ROWSPAN=3> → IsobaricAnalyzer →</td>
<th ALIGN = "center"> pot. successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_PeakPickerHiRes </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=2> @ref TOPP_IDMapper</td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_FileFilter </td>
</tr>
</table>
</CENTER>
The input MSn spectra have to be in centroid mode for the tool to work properly. Use e.g. @ref TOPP_PeakPickerHiRes to perform centroiding of profile data, if necessary.
This tool currently supports iTRAQ 4-plex and 8-plex, and TMT 6-plex, 10-plex, 11-plex, 16-plex, and 18-plex as labeling methods.
It extracts the isobaric reporter ion intensities from centroided MS2 or MS3 data (MSn), then performs isotope correction and stores the resulting quantitation in a consensus map,
in which each consensus feature represents one relevant MSn scan (e.g. HCD; see parameters @p select_activation and @p min_precursor_intensity).
The MS level for quantification is chosen automatically, i.e. if MS3 is present, MS2 will be ignored.
For intensity, the closest non-zero m/z signal to the theoretical position is taken as reporter ion abundance.
The position (RT, m/z) of the consensus centroid is the precursor position in MS1 (from the MS2 spectrum);
the consensus sub-elements correspond to the theoretical channel m/z (with m/z values of 113-121 Th for iTRAQ and 126-131 Th for TMT, respectively).
For all labeling techniques, the search radius (@p reporter_mass_shift) should be set as small as possible, to avoid picking up false-positive ions as reporters.
Usually, Orbitraps deliver precision of about 0.0001 Th at this low mass range. Low intensity reporters might have a slightly higher deviation.
By default, the mass range is set to ~0.002 Th, which should be sufficient for all instruments (~15 ppm).
The tool will throw an Exception if you set it below 0.0001 Th (~0.7ppm).
The tool will also throw an Exception if you set @p reporter_mass_shift > 0.003 Th for TMT-10plex and TMT-11plex, since this could
lead to ambiguities with neighbouring channels (which are ~0.006 Th apart in most cases).
For quality control purposes, the tool reports the median distance between the theoretical vs. observed reporter ion peaks in each channel.
The search radius is fixed to 0.5 Th (regardless of the user defined search radius). This allows to track calibration issues.
For TMT-10plex, these results are automatically omitted if they could be confused with a neighbouring channel, i.e.
exceed the tolerance to a neighbouring channel with the same nominal mass (C/N channels).
If the distance is too large, you might have a m/z calibration problem (see @ref TOPP_InternalCalibration).
@note If none of the reporter ions can be detected in an MSn scan, a consensus feature will still be generated,
but the intensities of the overall feature and of all its sub-elements will be zero.
(If desired, such features can be removed by applying an intensity filter in @ref TOPP_FileFilter.)
However, if the spectrum is completely empty (no ions whatsoever), no consensus feature will be generated.
Isotope correction is done using non-negative least squares (NNLS), i.e.:@n
Minimize ||Ax - b||, subject to x >= 0, where b is the vector of observed reporter intensities (with "contaminating" isotope species),
A is a correction matrix (as supplied by the manufacturer of the labeling kit) and x is the desired vector of corrected (real) reporter intensities.
Other software tools solve this problem by using an inverse matrix multiplication, but this can yield entries in x which are negative.
In a real sample, this solution cannot possibly be true, so usually negative values (= negative reporter intensities) are set to zero.
However, a negative result usually means that noise was not properly accounted for in the calculation.
We thus use NNLS to get a non-negative solution, without the need to truncate negative values.
In the (usual) case that inverse matrix multiplication yields only positive values, our NNLS will give the exact same optimal solution.
The correction matrices can be found (and changed) in the INI file (parameter @p correction_matrix of the corresponding labeling method).
However, these matrices for both 4-plex and 8-plex iTRAQ are now stable, and every kit delivered should have the same isotope correction values.
Thus, there should be no need to change them, but feel free to compare the values in the INI file with your kit's certificate.
For TMT (6-plex and 10-plex) the values have to be adapted for each kit: Modify the correction matrix according to the data in the product data sheet of your charge:
<pre>
Data sheet:
Mass Tag Repoter Ion -2 -1 Monoisotopic +1 +2
126 126.12776 0.0% 0.0% 100% 5.0% 0.0%
127N 127.124761 0.0% 0.2% 100% 4.6% 0.0%
...
</pre>
Corresponding correction matrix:
<pre>
[0.0/0.0/5.0/0.0,
0.0/0.2/4.6/0.0,
...
</pre>
After the quantitation, you may want to annotate the consensus features with corresponding peptide identifications,
obtained from an identification pipeline. Use @ref TOPP_IDMapper to perform the annotation, but make sure to set
suitably small RT and m/z tolerances for the mapping. Since the positions of the consensus features reported here
are taken from the precursor of the MS2 (also if quant was done in MS3), it should be possible to achieve a
perfect one-to-one matching of every identification (from MS2) to a single consensus feature.
Note that quantification will be solely on peptide level after this stage. In order to obtain protein quantities,
you can use @ref TOPP_TextExporter to obtain a simple text format which you can feed to other software tools (e.g., R),
or you can apply @ref TOPP_ProteinQuantifier.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_IsobaricAnalyzer.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_IsobaricAnalyzer.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPIsobaricAnalyzer :
public TOPPBase
{
private:
std::map<String, std::unique_ptr<IsobaricQuantitationMethod>> quant_methods_;
std::map<String, String> quant_method_names_;
void addMethod_(std::unique_ptr<IsobaricQuantitationMethod> ptr, std::string name)
{
std::string internal_name = ptr->getMethodName();
quant_methods_[internal_name] = std::move(ptr);
quant_method_names_[internal_name] = name;
}
public:
TOPPIsobaricAnalyzer() :
TOPPBase("IsobaricAnalyzer", "Calculates isobaric quantitative values for peptides")
{
addMethod_(make_unique<ItraqFourPlexQuantitationMethod>(), "iTRAQ 4-plex");
addMethod_(make_unique<ItraqEightPlexQuantitationMethod>(), "iTRAQ 8-plex");
addMethod_(make_unique<TMTSixPlexQuantitationMethod>(), "TMT 6-plex");
addMethod_(make_unique<TMTTenPlexQuantitationMethod>(), "TMT 10-plex");
addMethod_(make_unique<TMTElevenPlexQuantitationMethod>(), "TMT 11-plex");
addMethod_(make_unique<TMTSixteenPlexQuantitationMethod>(), "TMT 16-plex");
addMethod_(make_unique<TMTEighteenPlexQuantitationMethod>(), "TMT 18-plex");
}
protected:
void registerOptionsAndFlags_() override
{
// initialize with the first available type
registerStringOption_("type", "<mode>", quant_methods_.begin()->first, "Isobaric Quantitation method used in the experiment.", false);
StringList valid_types;
for (const auto& qm : quant_methods_)
{
valid_types.push_back(qm.first);
}
setValidStrings_("type", valid_types);
registerInputFile_("in", "<file>", "", "input raw/picked data file ");
setValidFormats_("in", {"mzML"});
registerOutputFile_("out", "<file>", "", "output consensusXML file with quantitative information");
setValidFormats_("out", {"consensusXML"});
registerSubsection_("extraction", "Parameters for the channel extraction.");
registerSubsection_("quantification", "Parameters for the peptide quantification.");
for (const auto& qm : quant_methods_)
{
registerSubsection_(qm.second->getMethodName(), String("Algorithm parameters for ") + quant_method_names_[qm.second->getMethodName()]);
}
}
Param getSubsectionDefaults_(const String& section) const override
{
ItraqFourPlexQuantitationMethod temp_quant;
if (section == "extraction")
{
return IsobaricChannelExtractor(&temp_quant).getParameters();
}
else if (section == "quantification")
{
return IsobaricQuantifier(&temp_quant).getParameters();
}
else
{
if (const auto it = quant_methods_.find(section); it == quant_methods_.end())
{ // should not happen
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Invalid subsection " + section);
}
else
{
return it->second->getParameters();
}
}
}
ExitCodes main_(int, const char**) override
{
//-------------------------------------------------------------
// parameter handling
//-------------------------------------------------------------
String in = getStringOption_("in");
String out = getStringOption_("out");
//-------------------------------------------------------------
// loading input
//-------------------------------------------------------------
PeakMap exp;
FileHandler().loadExperiment(in, exp, {FileTypes::MZML}, log_type_);
//-------------------------------------------------------------
// init quant method
//-------------------------------------------------------------
const auto& quant_method = quant_methods_[getStringOption_("type")];
// set the parameters for this method
quant_method->setParameters(getParam_().copy(quant_method->getMethodName() + ":", true));
//-------------------------------------------------------------
// calculations
//-------------------------------------------------------------
Param extract_param(getParam_().copy("extraction:", true));
IsobaricChannelExtractor channel_extractor(quant_method.get());
channel_extractor.setParameters(extract_param);
ConsensusMap consensus_map_raw, consensus_map_quant;
// extract channel information
channel_extractor.extractChannels(exp, consensus_map_raw);
IsobaricQuantifier quantifier(quant_method.get());
Param quant_param(getParam_().copy("quantification:", true));
quantifier.setParameters(quant_param);
quantifier.quantify(consensus_map_raw, consensus_map_quant);
//-------------------------------------------------------------
// writing output
//-------------------------------------------------------------
//annotate output with data processing info
addDataProcessing_(consensus_map_quant, getProcessingInfo_(DataProcessing::QUANTITATION));
// add filename references
for (auto& column : consensus_map_quant.getColumnHeaders())
{
column.second.filename = in;
}
const auto empty_feat = [](const ConsensusFeature& c){return c.getPeptideIdentifications().empty() && c.metaValueExists("all_empty") && c.getMetaValue("all_empty") == "true";};
consensus_map_quant.erase(remove_if(consensus_map_quant.begin(), consensus_map_quant.end(), empty_feat), consensus_map_quant.end());
consensus_map_quant.ensureUniqueId();
FileHandler().storeConsensusFeatures(out, consensus_map_quant);
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPIsobaricAnalyzer tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/IDConflictResolver.cpp | .cpp | 5,200 | 139 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Hendrik Weisser, Lucia Espona $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/METADATA/PeptideIdentification.h>
#include <OpenMS/ANALYSIS/ID/IDConflictResolverAlgorithm.h>
#include <algorithm>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
// Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_IDConflictResolver IDConflictResolver
@brief Resolves ambiguous annotations of features with peptide identifications.
<CENTER>
<table>
<tr>
<th ALIGN = "center"> potential predecessor tools </td>
<td VALIGN="middle" ROWSPAN=3> → IDConflictResolver →</td>
<th ALIGN = "center"> potential successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_IDMapper </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_TextExporter </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN="center" ROWSPAN=1> @ref TOPP_FeatureLinkerUnlabeled @n (or another feature grouping algorithm) </td>
<td VALIGN="middle" ALIGN="center" ROWSPAN=1> @ref TOPP_ProteinQuantifier </td>
</tr>
</table>
</CENTER>
The peptide identifications are filtered so that only one identification
with a single hit (with the best score) is associated to each feature. (If
two IDs have the same best score, either one of them may be selected.)
The the filtered identifications are added to the vector of unassigned peptides
and also reduced to a single best hit.
This step may be useful before applying @ref TOPP_ProteinQuantifier
"ProteinQuantifier", because features with ambiguous annotation are not
considered for the quantification.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_IDConflictResolver.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_IDConflictResolver.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPIDConflictResolver :
public TOPPBase
{
public:
TOPPIDConflictResolver() :
TOPPBase("IDConflictResolver", "Resolves ambiguous annotations of features with peptide identifications")
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "Input file (data annotated with identifications)");
setValidFormats_("in", ListUtils::create<String>("featureXML,consensusXML"));
registerOutputFile_("out", "<file>", "", "Output file (data with one peptide identification per feature)");
setValidFormats_("out", ListUtils::create<String>("featureXML,consensusXML"));
registerStringOption_("resolve_between_features", "<resolve_between_features>", "off", "A map may contain multiple features with both identical (possibly modified i.e. not stripped) sequence and charge state. The feature with the 'highest intensity' is very likely the most reliable one. When switched on, the filter removes the sequence annotation from the lower intensity features, thereby resolving the multiplicity. Only the most reliable features for each (possibly modified i.e. not stripped) sequence maintain annotated with this peptide sequence.", false);
setValidStrings_("resolve_between_features", ListUtils::create<String>("off,highest_intensity"));
}
ExitCodes main_(int, const char **) override
{
String in = getStringOption_("in"), out = getStringOption_("out");
String resolve_between_features = getStringOption_("resolve_between_features");
FileTypes::Type in_type = FileHandler::getType(in);
if (in_type == FileTypes::FEATUREXML) // featureXML
{
FeatureMap features;
FileHandler().loadFeatures(in, features, {FileTypes::FEATUREXML});
IDConflictResolverAlgorithm::resolve(features);
if (resolve_between_features=="highest_intensity")
{
IDConflictResolverAlgorithm::resolveBetweenFeatures(features);
}
addDataProcessing_(features, getProcessingInfo_(DataProcessing::FILTERING));
FileHandler().storeFeatures(out, features, {FileTypes::FEATUREXML});
}
else // consensusXML
{
ConsensusMap consensus;
FileHandler().loadConsensusFeatures(in, consensus, {FileTypes::CONSENSUSXML});
IDConflictResolverAlgorithm::resolve(consensus);
if (resolve_between_features=="highest_intensity")
{
IDConflictResolverAlgorithm::resolveBetweenFeatures(consensus);
}
addDataProcessing_(consensus, getProcessingInfo_(DataProcessing::FILTERING));
FileHandler().storeConsensusFeatures(out, consensus, {FileTypes::CONSENSUSXML});
}
return EXECUTION_OK;
}
};
int main(int argc, const char ** argv)
{
TOPPIDConflictResolver tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/FeatureFinderCentroided.cpp | .cpp | 12,656 | 366 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Clemens Groepl, Marc Sturm $
// --------------------------------------------------------------------------
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/KERNEL/RangeUtils.h>
#include <OpenMS/FEATUREFINDER/FeatureFinderAlgorithmPicked.h>
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/IONMOBILITY/IMDataConverter.h>
#include <OpenMS/SYSTEM/File.h>
#include <OpenMS/PROCESSING/FEATURE/FeatureOverlapFilter.h>
#include <cmath>
#include <limits>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_FeatureFinderCentroided FeatureFinderCentroided
@brief The feature detection application for quantitation (centroided).
<CENTER>
<table>
<tr>
<th ALIGN = "center"> pot. predecessor tools </td>
<td VALIGN="middle" ROWSPAN=3> → FeatureFinderCentroided →</td>
<th ALIGN = "center"> pot. successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_PeakPickerHiRes </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_FeatureLinkerUnlabeled @n (or another feature grouping tool) </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_SeedListGenerator </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_MapAlignerPoseClustering @n (or another alignment tool) </td>
</tr>
</table>
</CENTER>
Reference:\n
Weisser <em>et al.</em>: <a href="https://doi.org/10.1021/pr300992u">An automated pipeline for high-throughput label-free quantitative proteomics</a> (J. Proteome Res., 2013, PMID: 23391308).
This module identifies "features" in a LC/MS map. By feature, we understand a peptide in an MS sample that
reveals a characteristic isotope distribution over time. The algorithm
computes positions in RT and m/z dimension and a charge estimate
of each peptide.
The algorithm identifies pronounced regions of the data around so-called <tt>seeds</tt>.
The user can provide a list of seeds (e.g. from an identification run of MS/MS spectra) or the algorithm can compute seeds itself.
In the next step, we iteratively fit a model of the isotope profile and the retention time to
the initial seed data points. Data points with a low probability under this model are removed from the
feature region. The intensity of the feature is then given by the sum of the data points included
in its regions.
How to find suitable parameters and details of the different algorithms implemented are described
in the "TOPP tutorial" (on https://openms.readthedocs.io/).
Specialized tools are available for some experimental techniques: @ref TOPP_IsobaricAnalyzer.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_FeatureFinderCentroided.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_FeatureFinderCentroided.html
For the parameters of the algorithm section see the algorithms documentation: @n
@ref OpenMS::FeatureFinderAlgorithmPicked "centroided" @n
In the following table you can find example values of the most important parameters for
different instrument types. @n These parameters are not valid for all instruments of that type,
but can be used as a starting point for finding suitable parameters.
<b>'centroided' algorithm</b>:
<table>
<tr>
<td> </td>
<td><b>Q-TOF</b></td>
<td><b>LTQ Orbitrap</b></td>
</tr>
<tr>
<td><b>intensity:bins</b></td>
<td>10</td>
<td>10</td>
</tr>
<tr>
<td><b>mass_trace:mz_tolerance</b></td>
<td>0.02</td>
<td>0.004</td>
</tr>
<tr>
<td><b>isotopic_pattern:mz_tolerance</b></td>
<td>0.04</td>
<td>0.005</td>
</tr>
</table>
For the @em centroided algorithm centroided data is needed. In order to create centroided data from profile data use the @ref TOPP_PeakPickerHiRes.
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPFeatureFinderCentroided :
public TOPPBase
{
public:
TOPPFeatureFinderCentroided() :
TOPPBase("FeatureFinderCentroided",
"Detects two-dimensional features in LC-MS data.",
true,
{
Citation{ "Sturm M",
"A novel feature detection algorithm for centroided data",
"Dissertation, 2010-09-15, p.37 ff",
"https://publikationen.uni-tuebingen.de/xmlui/bitstream/handle/10900/49453/pdf/Dissertation_Marc_Sturm.pdf"
},
Citation {"Weisser H", "An automated pipeline for high-throughput label-free quantitative proteomics",
"J. Proteome Res., 2013, PMID: 23391308", "https://doi.org/10.1021/pr300992u"}
})
{}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "input file");
setValidFormats_("in", ListUtils::create<String>("mzML"));
registerOutputFile_("out", "<file>", "", "output file");
setValidFormats_("out", ListUtils::create<String>("featureXML"));
registerInputFile_("seeds", "<file>", "", "User specified seed list", false);
setValidFormats_("seeds", ListUtils::create<String>("featureXML"));
addEmptyLine_();
registerStringOption_("faims_merge_features", "<true/false>", "true",
"For FAIMS data with multiple compensation voltages: Merge features representing the same analyte "
"detected at different CV values into a single feature. Only features with DIFFERENT FAIMS CV values "
"are merged (same CV = different analytes). Has no effect on non-FAIMS data.", false);
setValidStrings_("faims_merge_features", {"true", "false"});
addEmptyLine_();
registerSubsection_("algorithm", "Algorithm section");
}
Param getSubsectionDefaults_(const String& ) const override
{
return FeatureFinderAlgorithmPicked().getDefaultParameters();
}
ExitCodes main_(int, const char**) override
{
//input file names
String in = getStringOption_("in");
String out = getStringOption_("out");
// prevent loading of fragment spectra
PeakFileOptions options;
options.setMSLevels(vector<Int>(1, 1));
// filter out zero (and negative) intensities
using RP_TYPE = DRange<1>::PositionType;
options.setIntensityRange({std::numeric_limits<RP_TYPE>::min(), RP_TYPE::maxPositive()});
// reading input data
FileHandler f;
f.getOptions() = options;
PeakMap exp;
f.loadExperiment(in, exp, {FileTypes::MZML}, log_type_);
exp.updateRanges();
if (exp.getSpectra().empty())
{
throw OpenMS::Exception::FileEmpty(__FILE__, __LINE__, __FUNCTION__, "Error: No MS1 spectra in input file.");
}
// determine type of spectral data (profile or centroided)
SpectrumSettings::SpectrumType spectrum_type = exp[0].getType();
if (spectrum_type == SpectrumSettings::SpectrumType::PROFILE)
{
if (!getFlag_("force"))
{
throw OpenMS::Exception::IllegalArgument(__FILE__, __LINE__, __FUNCTION__, "Error: Profile data provided but centroided spectra expected. To enforce processing of the data set the -force flag.");
}
}
//load seeds
FeatureMap seeds;
if (!getStringOption_("seeds").empty())
{
FileHandler().loadFeatures(getStringOption_("seeds"), seeds, {FileTypes::FEATUREXML});
}
// get parameters specific for the feature finder
Param feafi_param = getParam_().copy("algorithm:", true);
writeDebug_("Parameters passed to FeatureFinder", feafi_param, 3);
//-------------------------------------------------------------
// Split by FAIMS CV (returns single NaN-keyed element for non-FAIMS data)
//-------------------------------------------------------------
auto faims_groups = IMDataConverter::splitByFAIMSCV(std::move(exp));
const bool has_faims = faims_groups.size() > 1 || !std::isnan(faims_groups[0].first);
if (has_faims)
{
OPENMS_LOG_INFO << "FAIMS data detected with " << faims_groups.size() << " compensation voltage(s)." << endl;
}
// A map for the resulting features
FeatureMap features;
// Process each FAIMS CV group (or single group for non-FAIMS data)
for (auto& [group_cv, faims_group] : faims_groups)
{
if (has_faims)
{
OPENMS_LOG_INFO << "Processing FAIMS CV group: " << group_cv << " V (" << faims_group.size() << " spectra)" << endl;
}
// Filter seeds for this FAIMS CV group (if FAIMS data and seeds provided)
FeatureMap seeds_cv;
if (has_faims && !seeds.empty())
{
for (const auto& seed : seeds)
{
if (seed.metaValueExists(Constants::UserParam::FAIMS_CV))
{
double seed_cv = seed.getMetaValue(Constants::UserParam::FAIMS_CV);
if (std::abs(seed_cv - group_cv) < 0.01)
{
seeds_cv.push_back(seed);
}
}
else
{
// Seeds without FAIMS_CV annotation - include in all groups (backward compatibility)
seeds_cv.push_back(seed);
}
}
}
else
{
seeds_cv = seeds;
}
// Setup FeatureFinder for this group
FeatureFinderAlgorithmPicked ff;
//ff.setLogType(log_type_); TODO
// A map for features from this group
FeatureMap features_cv;
// Apply the feature finder
ff.run(faims_group, features_cv, feafi_param, seeds_cv);
// Annotate features with FAIMS CV (if FAIMS data) and add to results
for (auto& feat : features_cv)
{
if (has_faims)
{
feat.setMetaValue(Constants::UserParam::FAIMS_CV, group_cv);
}
features.push_back(feat);
}
}
if (has_faims)
{
OPENMS_LOG_INFO << "Combined " << features.size() << " features from all FAIMS CV groups." << endl;
// Optionally merge features representing the same analyte at different CV values
if (getStringOption_("faims_merge_features") == "true")
{
Size before_merge = features.size();
FeatureOverlapFilter::mergeFAIMSFeatures(features, 5.0, 0.05);
OPENMS_LOG_INFO << "FAIMS feature merge: " << before_merge << " -> " << features.size()
<< " features (merged " << (before_merge - features.size()) << ")" << endl;
}
}
// Set primary MS run path and ensure unique IDs
if (getFlag_("test"))
{
// if test mode set, add file without path so we can compare it
features.setPrimaryMSRunPath({"file://" + File::basename(in)});
}
else
{
features.setPrimaryMSRunPath({in});
}
features.ensureUniqueId();
features.applyMemberFunction(&UniqueIdInterface::setUniqueId);
// DEBUG
if (debug_level_ > 10)
{
for (const Feature& ft : features)
{
if (!ft.isMetaEmpty())
{
vector<String> keys;
ft.getKeys(keys);
OPENMS_LOG_INFO << "Feature " << ft.getUniqueId() << endl;
for (Size i = 0; i < keys.size(); i++)
{
OPENMS_LOG_INFO << " " << keys[i] << " = " << ft.getMetaValue(keys[i]) << endl;
}
}
}
}
//-------------------------------------------------------------
// writing files
//-------------------------------------------------------------
//annotate output with data processing info
addDataProcessing_(features, getProcessingInfo_(DataProcessing::QUANTITATION));
// write features to user specified output file
FileHandler map_file;
// Remove detailed convex hull information and subordinate features
// (unless requested otherwise) to reduce file size of feature files
// unless debugging is turned on.
if (debug_level_ < 5)
{
for (Feature& ft : features)
{
ft.getConvexHull().expandToBoundingBox();
for (Size i = 0; i < ft.getConvexHulls().size(); ++i)
{
ft.getConvexHulls()[i].expandToBoundingBox();
}
ft.getSubordinates().clear();
}
}
map_file.storeFeatures(out, features, {FileTypes::FEATUREXML});
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPFeatureFinderCentroided tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/MapNormalizer.cpp | .cpp | 4,153 | 152 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: $
// $Authors: Marc Sturm $
// --------------------------------------------------------------------------
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/APPLICATIONS/TOPPBase.h>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_MapNormalizer MapNormalizer
@brief Normalizes peak intensities to the percentage of the maximum intensity in the HPLC-MS map.
<center>
<table>
<tr>
<th ALIGN = "center"> pot. predecessor tools </td>
<td VALIGN="middle" ROWSPAN=2> → MapNormalizer →</td>
<th ALIGN = "center"> pot. successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_PeakPickerHiRes </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> any tool operating on MS peak data @n (in mzML format)</td>
</tr>
</table>
</center>
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_MapNormalizer.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_MapNormalizer.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPMapNormalizer :
public TOPPBase
{
public:
TOPPMapNormalizer() :
TOPPBase("MapNormalizer", "Normalizes peak intensities in an MS run.")
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "input file ");
setValidFormats_("in", ListUtils::create<String>("mzML"));
registerOutputFile_("out", "<file>", "", "output file ");
setValidFormats_("out", ListUtils::create<String>("mzML"));
}
ExitCodes main_(int, const char **) override
{
//-------------------------------------------------------------
// parameter handling
//-------------------------------------------------------------
String in = getStringOption_("in");
String out = getStringOption_("out");
//-------------------------------------------------------------
// loading input
//-------------------------------------------------------------
PeakMap exp;
FileHandler f;
f.loadExperiment(in, exp, {FileTypes::MZML});
//-------------------------------------------------------------
// calculations
//-------------------------------------------------------------
//determine maximum peak
exp.updateRanges();
double max = exp.getMaxIntensity() / 100.0;
for (MSSpectrum& ms : exp)
{
if (ms.getMSLevel() < 2)
{
for (Peak1D& pk : ms)
{
pk.setIntensity(pk.getIntensity() / max);
}
}
}
/// @todo add chromatogram support for normalization, e.g. for MRM stuff (Andreas)
/*
vector<MSChromatogram > chroms = exp.getChromatograms();
double sum(0);
for (vector<MSChromatogram >::iterator it = chroms.begin(); it != chroms.end(); ++it)
{
for (MSChromatogram::Iterator it2 = it->begin(); it2 != it->end(); ++it2)
{
sum += it2->getIntensity();
}
}
for (vector<MSChromatogram >::iterator it = chroms.begin(); it != chroms.end(); ++it)
{
for (MSChromatogram::Iterator it2 = it->begin(); it2 != it->end(); ++it2)
{
it2->setIntensity(it2->getIntensity() / sum * 1000000.0);
}
}
exp.setChromatograms(chroms);
*/
//-------------------------------------------------------------
// writing output
//-------------------------------------------------------------
//annotate output with data processing info
addDataProcessing_(exp, getProcessingInfo_(DataProcessing::NORMALIZATION));
f.storeExperiment(out, exp, {FileTypes::MZML});
return EXECUTION_OK;
}
};
int main(int argc, const char ** argv)
{
TOPPMapNormalizer tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/ProteomicsLFQ.cpp | .cpp | 78,877 | 1,830 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Timo Sachsenberg $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/BasicProteinInferenceAlgorithm.h>
#include <OpenMS/ANALYSIS/ID/BayesianProteinInferenceAlgorithm.h>
#include <OpenMS/ANALYSIS/ID/FalseDiscoveryRate.h>
#include <OpenMS/ANALYSIS/ID/IDBoostGraph.h>
#include <OpenMS/ANALYSIS/ID/IDConflictResolverAlgorithm.h>
#include <OpenMS/ANALYSIS/ID/IDScoreSwitcherAlgorithm.h>
#include <OpenMS/ANALYSIS/ID/PeptideIndexing.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/ConsensusMapNormalizerAlgorithmMedian.h>
#include <OpenMS/ANALYSIS/ID/ConsensusMapMergerAlgorithm.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/FeatureGroupingAlgorithmQT.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/MapAlignmentAlgorithmIdentification.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/MapAlignmentAlgorithmTreeGuided.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/MapAlignmentTransformer.h>
#include <OpenMS/ANALYSIS/QUANTITATION/PeptideAndProteinQuant.h>
#include <OpenMS/ANALYSIS/QUANTITATION/DDAWorkflowCommons.h>
#include <OpenMS/APPLICATIONS/MapAlignerBase.h>
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/DATASTRUCTURES/CalibrationData.h>
#include <OpenMS/PROCESSING/CALIBRATION/InternalCalibration.h>
#include <OpenMS/PROCESSING/CALIBRATION/MZTrafoModel.h>
#include <OpenMS/PROCESSING/CALIBRATION/PrecursorCorrection.h>
#include <OpenMS/FEATUREFINDER/FeatureFindingMetabo.h>
#include <OpenMS/FEATUREFINDER/MassTraceDetection.h>
#include <OpenMS/PROCESSING/ID/IDFilter.h>
#include <OpenMS/PROCESSING/FILTERING/ThresholdMower.h>
#include <OpenMS/FORMAT/ExperimentalDesignFile.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/FORMAT/MSstatsFile.h>
#include <OpenMS/FORMAT/MzTabFile.h>
#include <OpenMS/FORMAT/PeakTypeEstimator.h>
#include <OpenMS/FORMAT/TriqlerFile.h>
#include <OpenMS/KERNEL/ConversionHelper.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/KERNEL/MassTrace.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <OpenMS/METADATA/ExperimentalDesign.h>
#include <OpenMS/METADATA/SpectrumMetaDataLookup.h>
#include <OpenMS/SYSTEM/File.h>
#include <OpenMS/FEATUREFINDER/FeatureFinderIdentificationAlgorithm.h>
#include <OpenMS/FEATUREFINDER/FeatureFinderMultiplexAlgorithm.h>
#include <OpenMS/PROCESSING/CENTROIDING/PeakPickerHiRes.h>
#include <OpenMS/ML/SVM/SimpleSVM.h>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_ProteomicsLFQ ProteomicsLFQ
ProteomicsLFQ performs label-free quantification of peptides and proteins. @n
Input: @n
- Spectra in mzML format
- Identifications in idXML or mzIdentML format with posterior error probabilities
as score type.
To generate those we suggest to run:
1. PeptideIndexer to annotate target and decoy information.
2. PSMFeatureExtractor to annotate percolator features.
3. PercolatorAdapter tool (score_type = 'q-value', -post-processing-tdc)
4. IDFilter (pep:score = 0.01) to filter PSMs at 1% FDR
- An experimental design file: @n
(see @ref OpenMS::ExperimentalDesign "ExperimentalDesign" for details) @n
- A protein database in with appended decoy sequences in FASTA format @n
(e.g., generated by the OpenMS DecoyDatabase tool) @n
Processing: @n
ProteomicsLFQ has different methods to extract features: ID-based (targeted only), or both ID-based and untargeted.
1. The first method uses targeted feature dectection using RT and m/z information derived from identification data to extract features.
Note: only identifications found in a particular MS run are used to extract features in the same run.
No transfer of IDs (match between runs) is performed.
2. The second method adds untargeted feature detection to obtain quantities from unidentified features.
Transfer of Ids (match between runs) is performed by transfering feature identifications to coeluting, unidentified features with similar mass and RT in other runs.
@b FAIMS (Field Asymmetric Ion Mobility Spectrometry): @n
FAIMS data is automatically detected based on compensation voltage (CV) annotations in the mzML file.
The data is split by CV and processed separately for each voltage group during feature detection.
Features representing the same analyte detected at different CV values are merged automatically.
The merged features are then aligned and linked across runs based on RT and m/z.
No special preparation of the input mzML file is required.
Normalization: @n
- For feature-intensity-based quantification with multiple runs, ProteomicsLFQ automatically applies median normalization
to the consensus features (using simple median scaling).
- Normalization is DISABLED when MSstats output (-out_msstats) or Triqler output (-out_triqler) is requested,
as these tools perform their own normalization.
- Normalization is also DISABLED for spectral counting quantification.
Output:
- mzTab file with analysis results
- MSstats file with analysis results for statistical downstream analysis in MSstats
- ConsensusXML file for visualization and further processing in OpenMS
Potential scripts to perform the search can be found under src/tests/topp/ProteomicsLFQTestScripts
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_ProteomicsLFQ.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_ProteomicsLFQ.html
**/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class ProteomicsLFQ :
public TOPPBase
{
public:
ProteomicsLFQ() :
TOPPBase("ProteomicsLFQ", "A standard proteomics LFQ pipeline.")
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFileList_("in", "<file list>", StringList(), "Input files");
setValidFormats_("in", ListUtils::create<String>("mzML"));
registerInputFileList_("ids", "<file list>", StringList(),
"Identifications filtered at PSM level (e.g., q-value < 0.01)."
"And annotated with PEP as main score.\n"
"We suggest using:\n"
"1. PSMFeatureExtractor to annotate percolator features.\n"
"2. PercolatorAdapter tool (score_type = 'q-value', -post-processing-tdc)\n"
"3. IDFilter (pep:score = 0.05)\n"
"To obtain well calibrated PEPs and an initial reduction of PSMs\n"
"ID files must be provided in same order as spectra files.");
setValidFormats_("ids", ListUtils::create<String>("idXML,mzId"));
registerInputFile_("design", "<file>", "", "design file", false);
setValidFormats_("design", ListUtils::create<String>("tsv"));
registerInputFile_("fasta", "<file>", "", "fasta file", false);
setValidFormats_("fasta", ListUtils::create<String>("fasta"));
registerOutputFile_("out", "<file>", "", "output mzTab file");
setValidFormats_("out", ListUtils::create<String>("mzTab"));
registerOutputFile_("out_msstats", "<file>", "", "output MSstats input file", false, false);
setValidFormats_("out_msstats", ListUtils::create<String>("csv"));
registerOutputFile_("out_triqler", "<file>", "", "output Triqler input file", false, false);
setValidFormats_("out_triqler", ListUtils::create<String>("tsv"));
registerOutputFile_("out_cxml", "<file>", "", "output consensusXML file", false, false);
setValidFormats_("out_cxml", ListUtils::create<String>("consensusXML"));
registerDoubleOption_("proteinFDR", "<threshold>", 0.05, "Protein FDR threshold (0.05=5%).", false);
setMinFloat_("proteinFDR", 0.0);
setMaxFloat_("proteinFDR", 1.0);
//TODO test rigorously
registerStringOption_("picked_proteinFDR", "<choice>", "false", "Use a picked protein FDR?", false);
setValidStrings_("picked_proteinFDR", {"true","false"});
registerDoubleOption_("psmFDR", "<threshold>", 1.0, "FDR threshold for sub-protein level (e.g. 0.05=5%). Use -FDR_type to choose the level. Cutoff is applied at the highest level."
" If Bayesian inference was chosen, it is equivalent with a peptide FDR", false);
setMinFloat_("psmFDR", 0.0);
setMaxFloat_("psmFDR", 1.0);
registerStringOption_("FDR_type", "<threshold>", "PSM", "Sub-protein FDR level. PSM, PSM+peptide (best PSM q-value).", false);
setValidStrings_("FDR_type", {"PSM", "PSM+peptide"});
//TODO expose all parameters of the inference algorithms (e.g. aggregation methods etc.)?
registerStringOption_("protein_inference", "<option>", "aggregation",
"Infer proteins:\n"
"aggregation = aggregates all peptide scores across a protein (using the best score) \n"
"bayesian = computes a posterior probability for every protein based on a Bayesian network.\n"
" Note: 'bayesian' only uses and reports the best PSM per peptide.",
false, true);
setValidStrings_("protein_inference", ListUtils::create<String>("aggregation,bayesian"));
registerStringOption_("protein_quantification", "<option>", "unique_peptides",
"Quantify proteins based on:\n"
"unique_peptides = use peptides mapping to single proteins or a group of indistinguishable proteins"
"(according to the set of experimentally identified peptides).\n"
"strictly_unique_peptides = use peptides mapping to a unique single protein only.\n"
"shared_peptides = use shared peptides only for its best group (by inference score)", false, true);
setValidStrings_("protein_quantification", ListUtils::create<String>("unique_peptides,strictly_unique_peptides,shared_peptides"));
registerStringOption_("quantification_method", "<option>",
"feature_intensity",
"feature_intensity: MS1 signal.\n"
"spectral_counting: PSM counts.", false, false);
setValidStrings_("quantification_method", ListUtils::create<String>("feature_intensity,spectral_counting"));
registerStringOption_("targeted_only", "<option>", "false",
"true: Only ID based quantification.\n"
"false: include unidentified features so they can be linked to identified ones (=match between runs).", false, false);
setValidStrings_("targeted_only", ListUtils::create<String>("true,false"));
registerDoubleOption_("feature_with_id_min_score", "<p-value>", 0.0, "The minimum probability (e.g.: 0.25) an identified (=id targeted) feature must have to be kept for alignment and linking (0=no filter).", false, true);
setMinFloat_("feature_with_id_min_score", 0.0);
setMaxFloat_("feature_with_id_min_score", 1.0);
registerDoubleOption_("feature_without_id_min_score", "<p-value>", 0.0, "The minimum probability (e.g.: 0.75) an unidentified feature must have to be kept for alignment and linking (0=no filter).", false, true);
setMinFloat_("feature_without_id_min_score", 0.0);
setMaxFloat_("feature_without_id_min_score", 1.0);
registerStringOption_("mass_recalibration", "<option>", "false", "Mass recalibration.", false, true);
setValidStrings_("mass_recalibration", ListUtils::create<String>("true,false"));
registerStringOption_("alignment_order", "<option>", "star", "If star, aligns all maps to the reference with most IDs.", false, true);
setValidStrings_("alignment_order", ListUtils::create<String>("star")); // TODO: fix and reenable tree guided
registerStringOption_("keep_feature_top_psm_only", "<option>", "true", "If false, also keeps lower ranked PSMs that have the top-scoring"
" sequence as a candidate per feature in the same file.", false, true);
setValidStrings_("keep_feature_top_psm_only", ListUtils::create<String>("true,false"));
registerTOPPSubsection_("Seeding", "Parameters for seeding of untargeted features");
registerDoubleOption_("Seeding:intThreshold", "<threshold>", 1e4, "Peak intensity threshold applied in seed detection.", false, true);
registerStringOption_("Seeding:charge", "<minChg:maxChg>", "2:5", "Charge range considered for untargeted feature seeds.", false, true); //TODO infer from IDs?
registerDoubleOption_("Seeding:traceRTTolerance", "<tolerance(sec)>", 3.0, "Combines all spectra in the tolerance window to stabilize identification of isotope patterns. Controls sensitivity (low value) vs. specificity (high value) of feature seeds.", false, true); //TODO infer from average MS1 cycle time?
/// TODO: think about export of quality control files (qcML?)
Param pp_defaults = PeakPickerHiRes().getDefaults();
for (const auto& s : {"report_FWHM", "report_FWHM_unit", "SignalToNoise:win_len", "SignalToNoise:bin_count", "SignalToNoise:min_required_elements", "SignalToNoise:write_log_messages"} )
{
pp_defaults.addTag(s, "advanced");
}
Param ffi_defaults = FeatureFinderIdentificationAlgorithm().getDefaults();
ffi_defaults.setValue("svm:samples", 10000); // restrict number of samples for training
ffi_defaults.setValue("svm:log2_C", DoubleList({-2.0, 5.0, 15.0}));
ffi_defaults.setValue("svm:log2_gamma", DoubleList({-3.0, -1.0, 2.0}));
ffi_defaults.setValue("svm:min_prob", 0.9); // keep only feature candidates with > 0.9 probability of correctness
// hide entries
for (const auto& s : {"svm:samples", "svm:log2_C", "svm:log2_gamma", "svm:min_prob", "svm:no_selection", "svm:xval_out", "svm:kernel", "svm:xval", "candidates_out", "extract:n_isotopes", "model:type"} )
{
ffi_defaults.addTag(s, "advanced");
}
ffi_defaults.remove("detect:peak_width"); // set from data
Param ma_defaults = MapAlignmentAlgorithmTreeGuided().getDefaults();
ma_defaults.setValue("align_algorithm:max_rt_shift", 0.1);
ma_defaults.setValue("align_algorithm:use_unassigned_peptides", "false");
ma_defaults.setValue("align_algorithm:use_feature_rt", "true");
// hide entries
for (const auto& s : {"align_algorithm:use_unassigned_peptides", "align_algorithm:use_feature_rt",
"align_algorithm:score_cutoff", "align_algorithm:min_score"} )
{
ma_defaults.addTag(s, "advanced");
}
//Param fl_defaults = FeatureGroupingAlgorithmKD().getDefaults();
Param fl_defaults = FeatureGroupingAlgorithmQT().getDefaults();
fl_defaults.setValue("distance_MZ:max_difference", 10.0);
fl_defaults.setValue("distance_MZ:unit", "ppm");
fl_defaults.setValue("distance_MZ:weight", 5.0);
fl_defaults.setValue("distance_intensity:weight", 0.1);
fl_defaults.setValue("use_identifications", "true");
fl_defaults.remove("distance_RT:max_difference"); // estimated from data
for (const auto& s : {"distance_MZ:weight", "distance_intensity:weight", "use_identifications", "ignore_charge", "ignore_adduct"} )
{
fl_defaults.addTag(s, "advanced");
}
Param pq_defaults = PeptideAndProteinQuant().getDefaults();
// overwrite algorithm default, so we export everything (important for copying back MSstats results)
pq_defaults.setValue("top:include_all", "true");
pq_defaults.addTag("top:include_all", "advanced");
// combine parameters of the individual algorithms
Param combined;
combined.insert("Centroiding:", pp_defaults);
combined.insert("PeptideQuantification:", ffi_defaults);
combined.insert("Alignment:", ma_defaults);
combined.insert("Linking:", fl_defaults);
combined.insert("ProteinQuantification:", pq_defaults);
registerFullParam_(combined);
}
ExitCodes centroidAndCorrectPrecursors_(const String & mz_file, MSExperiment & ms_centroided)
{
Param pp_param = getParam_().copy("Centroiding:", true);
writeDebug_("Parameters passed to PeakPickerHiRes algorithm", pp_param, 3);
// create scope for raw data, so it is properly freed (Note: clear() is not sufficient)
// load raw file
PeakMap ms_raw;
FileHandler().loadExperiment(mz_file, ms_raw, {FileTypes::MZML}, log_type_);
ms_raw.clearMetaDataArrays();
ms_raw.updateRanges();
if (ms_raw.empty())
{
OPENMS_LOG_WARN << "The given file does not contain any spectra.";
return INCOMPATIBLE_INPUT_DATA;
}
// remove MS2 peak data and check if spectra are sorted
//TODO can we load just MS1 or do we need precursor information?
for (auto & spec : ms_raw)
{
if (spec.getMSLevel() == 2)
{
spec.clear(false); // delete MS2 peaks
}
if (!spec.isSorted())
{
spec.sortByPosition();
writeLogInfo_("Info: Sorted peaks by m/z.");
}
}
//-------------------------------------------------------------
// Centroiding of MS1
//-------------------------------------------------------------
PeakPickerHiRes pp;
pp.setLogType(log_type_);
pp.setParameters(pp_param);
pp.pickExperiment(ms_raw, ms_centroided, true);
//-------------------------------------------------------------
// HighRes Precursor Mass Correction
//-------------------------------------------------------------
std::vector<double> deltaMZs, mzs, rts;
std::set<Size> corrected_to_highest_intensity_peak = PrecursorCorrection::correctToHighestIntensityMS1Peak(
ms_centroided,
0.01, // check if we can estimate this from data (here it is given in m/z not ppm)
false, // is ppm = false
deltaMZs,
mzs,
rts
);
writeLogInfo_("Info: Corrected " + String(corrected_to_highest_intensity_peak.size()) + " precursors.");
if (!deltaMZs.empty())
{
vector<double> deltaMZs_ppm, deltaMZs_ppmabs;
for (Size i = 0; i != deltaMZs.size(); ++i)
{
deltaMZs_ppm.push_back(Math::getPPM(mzs[i], mzs[i] + deltaMZs[i]));
deltaMZs_ppmabs.push_back(Math::getPPMAbs(mzs[i], mzs[i] + deltaMZs[i]));
}
double median = Math::median(deltaMZs_ppm.begin(), deltaMZs_ppm.end());
double MAD = Math::MAD(deltaMZs_ppm.begin(), deltaMZs_ppm.end(), median);
double median_abs = Math::median(deltaMZs_ppmabs.begin(), deltaMZs_ppmabs.end());
double MAD_abs = Math::MAD(deltaMZs_ppmabs.begin(), deltaMZs_ppmabs.end(), median_abs);
writeLogInfo_("Precursor correction:\n median = "
+ String(median) + " ppm MAD = " + String(MAD)
+ "\n median (abs.) = " + String(median_abs)
+ " ppm MAD = " + String(MAD_abs));
}
return EXECUTION_OK;
}
// aligns the feature maps
double align_(
vector<FeatureMap> & feature_maps,
vector<TransformationDescription>& transformations
)
{
if (feature_maps.size() > 1) // do we have several maps to align / link?
{
Param mat_param = getParam_().copy("Alignment:", true);
writeDebug_("Parameters passed to MapAlignmentAlgorithms", mat_param, 3);
Param model_params = MapAlignerBase::getModelDefaults("b_spline");
String model_type = model_params.getValue("type").toString();
model_params = model_params.copy(model_type + ":", true);
try
{
if (getStringOption_("alignment_order") == "star")
{
// Determine reference from data, otherwise a change in order of input files
// leads to slightly different results
const int reference_index(-1); // set no reference (determine from data)
Param ma_param = mat_param.copy("align_algorithm:", true);
writeDebug_("Parameters passed to MapAlignerIdentification", ma_param, 3);
MapAlignmentAlgorithmIdentification aligner;
aligner.setLogType(log_type_);
aligner.setParameters(ma_param);
aligner.align(feature_maps, transformations, reference_index);
}
else //tree-guided
{
MapAlignmentAlgorithmTreeGuided aligner;
aligner.setLogType(log_type_);
aligner.setParameters(mat_param);
aligner.align(feature_maps, transformations);
}
}
catch (Exception::MissingInformation& err)
{
if (getFlag_("force"))
{
OPENMS_LOG_ERROR
<< "Error: alignment failed. Details:\n" << err.what()
<< "\nProcessing will continue using 'identity' transformations."
<< endl;
model_type = "identity";
transformations.resize(feature_maps.size());
}
else throw;
}
// find model parameters (if model_type == "identity" the fit is a NOP):
vector<TransformationDescription::TransformationStatistics> alignment_stats;
for (TransformationDescription & t : transformations)
{
writeDebug_("Using " + String() + " points in fit.", 1);
if (t.getDataPoints().size() > 10)
{
t.fitModel(model_type, model_params);
}
t.printSummary(getGlobalLogDebug());
alignment_stats.emplace_back(t.getStatistics());
}
// determine maximum RT shift after transformation that includes all high confidence IDs
using TrafoStat = TransformationDescription::TransformationStatistics;
for (auto & s : alignment_stats)
{
OPENMS_LOG_INFO << "Alignment differences (second) for percentiles (before & after): " << endl;
OPENMS_LOG_INFO << ListUtils::concatenate(s.percents,"%\t") << "%" << endl;
OPENMS_LOG_INFO << "before alignment:" << endl;
for (const auto& p : s.percents)
{
OPENMS_LOG_INFO << (int)s.percentiles_before[p] << "\t";
}
OPENMS_LOG_INFO << endl;
OPENMS_LOG_INFO << "after alignment:" << endl;
for (const auto& p : s.percents)
{
OPENMS_LOG_INFO << (int)s.percentiles_after[p] << "\t";
}
OPENMS_LOG_INFO << endl;
}
double max_alignment_diff = std::max_element(alignment_stats.begin(), alignment_stats.end(),
[](TrafoStat a, TrafoStat b)
{ return a.percentiles_after[100] < b.percentiles_after[100]; })->percentiles_after[100];
// sometimes, very good alignments might lead to bad overall performance. Choose 2 minutes as minimum.
OPENMS_LOG_INFO << "Max alignment difference (seconds): " << max_alignment_diff << endl;
max_alignment_diff = std::max(max_alignment_diff, 120.0); // minimum 2 minutes
max_alignment_diff = std::min(max_alignment_diff, 600.0); // maximum 10 minutes
return max_alignment_diff;
}
return 0;
}
void transform_(
vector<FeatureMap>& feature_maps,
vector<TransformationDescription>& transformations
)
{
if (feature_maps.size() > 1 && !transformations.empty())
{
// Apply transformations
for (Size i = 0; i < feature_maps.size(); ++i)
{
try
{
MapAlignmentTransformer::transformRetentionTimes(feature_maps[i],
transformations[i]);
} catch (Exception::IllegalArgument& e)
{
OPENMS_LOG_WARN << e.what() << endl;
}
if (debug_level_ > 666)
{
// plot with e.g.:
// Rscript ../share/OpenMS/SCRIPTS/plot_trafo.R debug_trafo_1.trafoXML debug_trafo_1.pdf
FileHandler().storeTransformations("debug_trafo_" + String(i) + ".trafoXML", transformations[i], {FileTypes::TRANSFORMATIONXML});
}
}
}
}
//-------------------------------------------------------------
// Link all features of this fraction
//-------------------------------------------------------------
/// this method will only be used during requantification.
void link_(
vector<FeatureMap> & feature_maps,
double median_fwhm,
double max_alignment_diff,
ConsensusMap & consensus_fraction
)
{
//since requantification only happens with 2+ maps, we do not need to check/skip,
//in case of a singleton fraction. Would throw an exception in linker.group
Param fl_param = getParam_().copy("Linking:", true);
writeDebug_("Parameters passed to feature grouping algorithm", fl_param, 3);
writeDebug_("Linking: " + String(feature_maps.size()) + " features.", 1);
// grouping tolerance = max alignment error + median FWHM
FeatureGroupingAlgorithmQT linker;
fl_param.setValue("distance_RT:max_difference", 2.0 * max_alignment_diff + 2.0 * median_fwhm);
linker.setParameters(fl_param);
/*
FeatureGroupingAlgorithmKD linker;
fl_param.setValue("warp:rt_tol", 2.0 * max_alignment_diff + 2.0 * median_fwhm);
fl_param.setValue("link:rt_tol", 2.0 * max_alignment_diff + 2.0 * median_fwhm);
fl_param.setValue("link:mz_tol", 10.0);
fl_param.setValue("mz_unit", "ppm");
linker.setParameters(fl_param);
*/
linker.group(feature_maps, consensus_fraction);
OPENMS_LOG_INFO << "Size of consensus fraction: " << consensus_fraction.size() << endl;
assert(!consensus_fraction.empty());
}
/// Align and link.
void alignAndLink_(
vector<FeatureMap> & feature_maps,
ConsensusMap & consensus_fraction,
vector<TransformationDescription>& transformations,
const double median_fwhm)
{
double max_alignment_diff(0.0);
if (feature_maps.size() > 1)
{
max_alignment_diff = align_(feature_maps, transformations);
transform_(feature_maps, transformations);
link_(feature_maps,
median_fwhm,
max_alignment_diff,
consensus_fraction);
}
else // only one feature map
{
MapConversion::convert(0, feature_maps.back(), consensus_fraction);
}
}
/// determine in which runs of the current fraction a peptide was quantified
/// returns map sequence+charge -> map index in consensus map that have non-zero quant values
map<pair<String, UInt>, vector<int> > getPeptideOccurrence_(const ConsensusMap &cons)
{
map<Size, UInt> num_consfeat_of_size;
map<Size, UInt> num_consfeat_of_size_with_id;
map<pair<String, UInt>, vector<int> > seq_charge2map_occurence;
for (const ConsensusFeature& cfeature : cons)
{
++num_consfeat_of_size[cfeature.size()];
const auto& pids = cfeature.getPeptideIdentifications();
if (!pids.empty())
{
++num_consfeat_of_size_with_id[cfeature.size()];
// count how often a peptide/charge pair has been observed in the different maps
const vector<PeptideHit>& phits = pids[0].getHits();
if (!phits.empty())
{
const String s = phits[0].getSequence().toString();
const int z = phits[0].getCharge();
if (seq_charge2map_occurence[make_pair(s,z)].empty())
{
seq_charge2map_occurence[make_pair(s,z)] = vector<int>(cons.getColumnHeaders().size(), 0);
}
// assign id to all dimensions in the consensus feature
for (auto const & f : cfeature.getFeatures())
{
Size map_index = f.getMapIndex();
seq_charge2map_occurence[make_pair(s,z)][map_index] += 1;
}
}
}
}
return seq_charge2map_occurence;
}
ExitCodes checkSingleRunPerID_(const vector<ProteinIdentification>& protein_ids, const String& id_file_abs_path)
{
if (protein_ids.size() != 1)
{
OPENMS_LOG_FATAL_ERROR << "Exactly one protein identification run must be annotated in " << id_file_abs_path << endl;
return ExitCodes::INCOMPATIBLE_INPUT_DATA;
}
StringList run_paths;
protein_ids[0].getPrimaryMSRunPath(run_paths);
if (run_paths.size() > 1)
{
OPENMS_LOG_FATAL_ERROR << "ProteomicsLFQ does not support merged ID runs. ID file: " << id_file_abs_path << endl;
return ExitCodes::INCOMPATIBLE_INPUT_DATA;
}
if (run_paths.empty())
{
OPENMS_LOG_WARN << "Warning: No mzML origin annotated in ID file. This can lead to errors or unexpected behaviour later: " << id_file_abs_path << endl;
}
return EXECUTION_OK;
}
ExitCodes switchScoreType_(PeptideIdentificationList& peptide_ids, const String& id_file_abs_path)
{
// Check if score types are valid. TODO
try
{
IDScoreSwitcherAlgorithm switcher;
Size c = 0;
switcher.switchToGeneralScoreType(peptide_ids, IDScoreSwitcherAlgorithm::ScoreType::PEP, c);
}
catch(Exception::MissingInformation&)
{
OPENMS_LOG_FATAL_ERROR << "ProteomicsLFQ expects a Posterior Error Probability score in all Peptide IDs. ID file: " << id_file_abs_path << endl;
return ExitCodes::INCOMPATIBLE_INPUT_DATA;
}
return EXECUTION_OK;
}
ExitCodes loadAndCleanupIDFile_(
const String& id_file_abs_path,
const String& mz_file,
const String& in_db,
const Size& fraction_group,
const Size& fraction,
vector<ProteinIdentification>& protein_ids,
PeptideIdentificationList& peptide_ids,
set<String>& fixed_modifications, // adds to
set<String>& variable_modifications) // adds to
{
const String& mz_file_abs_path = File::absolutePath(mz_file);
FileHandler().loadIdentifications(id_file_abs_path, protein_ids, peptide_ids, {FileTypes::IDXML}, log_type_);
ExitCodes e = checkSingleRunPerID_(protein_ids, id_file_abs_path);
if (e != EXECUTION_OK) return e;
// Re-index
if (!in_db.empty())
{
PeptideIndexing indexer;
Param param_pi = indexer.getParameters();
param_pi.setValue("missing_decoy_action", "silent");
param_pi.setValue("write_protein_sequence", "true");
param_pi.setValue("write_protein_description", "true");
indexer.setParameters(param_pi);
// stream data in fasta file
FASTAContainer<TFI_File> fasta_db(in_db);
PeptideIndexing::ExitCodes indexer_exit = indexer.run(fasta_db, protein_ids, peptide_ids);
picked_decoy_string_ = indexer.getDecoyString();
picked_decoy_prefix_ = indexer.isPrefix();
if ((indexer_exit != PeptideIndexing::ExitCodes::EXECUTION_OK) &&
(indexer_exit != PeptideIndexing::ExitCodes::PEPTIDE_IDS_EMPTY))
{
if (indexer_exit == PeptideIndexing::ExitCodes::DATABASE_EMPTY)
{
return INPUT_FILE_EMPTY;
}
else if (indexer_exit == PeptideIndexing::ExitCodes::UNEXPECTED_RESULT)
{
return UNEXPECTED_RESULT;
}
else
{
return UNKNOWN_ERROR;
}
}
}
e = switchScoreType_(peptide_ids, id_file_abs_path);
if (e != EXECUTION_OK) return e;
// TODO we could think about removing this limitation but it gets complicated quickly
IDFilter::keepBestPeptideHits(peptide_ids, false); // strict = false
// add to the (global) set of fixed and variable modifications
const vector<String>& var_mods = protein_ids[0].getSearchParameters().variable_modifications;
const vector<String>& fixed_mods = protein_ids[0].getSearchParameters().fixed_modifications;
std::copy(var_mods.begin(), var_mods.end(), std::inserter(variable_modifications, variable_modifications.begin()));
std::copy(fixed_mods.begin(), fixed_mods.end(), std::inserter(fixed_modifications, fixed_modifications.end()));
// delete meta info to free some space
for (PeptideIdentification & pid : peptide_ids)
{
// we currently can't clear the PeptideIdentification meta data
// because the spectrum_reference is stored in the meta value (which it probably shouldn't)
// TODO: pid.clearMetaInfo(); if we move it to the PeptideIdentification structure
for (PeptideHit & ph : pid.getHits())
{
// TODO: we only have super inefficient meta value removal
vector<String> keys;
ph.getKeys(keys);
for (const auto& k : keys)
{
if (!(k.hasSubstring("_score")
|| k.hasSubstring("q-value")
|| k.hasPrefix("Luciphor_global_flr")
|| k == "target_decoy") // keep target_decoy information for QC
)
{
ph.removeMetaValue(k);
}
}
// we only clear selected metavalues
//ph.clearMetaInfo();
}
}
///////////////////////////////////////////////////////
// annotate experimental design
// check and reannotate mzML file in ID
StringList id_msfile_ref;
protein_ids[0].getPrimaryMSRunPath(id_msfile_ref);
// fix other problems like missing MS run path annotations
if (id_msfile_ref.empty())
{
OPENMS_LOG_WARN << "MS run path not set in ID file: " << id_file_abs_path << endl
<< "Resetting reference to MS file provided at same input position." << endl;
}
else if (id_msfile_ref.size() == 1)
{
// Check if the annotated primary MS run filename matches the mzML filename (comparison by base name)
const String& in_bn = FileHandler::stripExtension(File::basename(mz_file_abs_path));
const String& id_primaryMSRun_bn = FileHandler::stripExtension(File::basename(id_msfile_ref[0]));
if (in_bn != id_primaryMSRun_bn) // mismatch between annotation in ID file and provided mzML file
{
OPENMS_LOG_WARN << "MS run path referenced from ID file does not match MS file at same input position: " << id_file_abs_path << endl
<< "Resetting reference to MS file provided at same input position." << endl;
}
}
else
{
OPENMS_LOG_WARN << "Multiple MS files referenced from ID file: " << id_file_abs_path << endl
<< "Resetting reference to MS file provided at same input position." << endl;
}
id_msfile_ref = StringList{mz_file};
protein_ids[0].setPrimaryMSRunPath(id_msfile_ref);
protein_ids[0].setMetaValue("fraction_group", fraction_group);
protein_ids[0].setMetaValue("fraction", fraction);
// update identifiers to make them unique
// fixes some bugs related to users splitting the original mzML and id files before running the analysis
// in that case these files might have the same identifier
const String old_identifier = protein_ids[0].getIdentifier();
const String new_identifier = old_identifier + "_" + String(fraction_group) + "F" + String(fraction);
protein_ids[0].setIdentifier(new_identifier);
for (PeptideIdentification & p : peptide_ids)
{
if (p.getIdentifier() == old_identifier)
{
p.setIdentifier(new_identifier);
}
else
{
OPENMS_LOG_WARN << "Peptide ID identifier found not present in the protein ID" << endl;
}
}
bool missing_spec_ref(false);
for (const PeptideIdentification & pid : peptide_ids)
{
if (!pid.metaValueExists(Constants::UserParam::SPECTRUM_REFERENCE)
|| pid.getMetaValue(Constants::UserParam::SPECTRUM_REFERENCE).toString().empty())
{
missing_spec_ref = true;
break;
}
}
// reannotate spectrum references if missing
if (missing_spec_ref)
{
OPENMS_LOG_WARN << "Warning: Identification file " << id_file_abs_path << " contains IDs without meta value for the spectrum native id.\n"
"OpenMS will try to reannotate them by matching retention times between ID and spectra." << endl;
SpectrumMetaDataLookup::addMissingSpectrumReferences(
peptide_ids,
mz_file_abs_path,
true);
}
return EXECUTION_OK;
}
void printMetaValues(const FeatureMap& tmp)
{
// extract meta value keys from the first element (which might be a normal or OffsetPeptide -> extract only the common ones)
std::vector<String> keys;
tmp[0].getKeys(keys);
if (auto it = std::find(keys.begin(), keys.end(), "OffsetPeptide"); it != keys.end())
{
keys.erase(it); // remove the string
}
for (const auto& k : keys) std::cout << k << '\t';
std::cout << "OffsetPeptide" << std::endl;
for (auto & f : tmp)
{
for (const auto& k : keys)
{
std::cout << f.getMetaValue(k) << '\t';
}
if (f.metaValueExists("OffsetPeptide"))
{
std::cout << "true";
}
else
{
std::cout << "false";
}
std::cout << endl;
}
}
ExitCodes quantifyFraction_(
const pair<unsigned int, std::vector<String> > & ms_files,
const map<String, String>& mzfile2idfile,
const String& in_db,
double median_fwhm,
ConsensusMap & consensus_fraction,
set<String>& fixed_modifications,
set<String>& variable_modifications)
{
vector<TransformationDescription> transformations;
vector<FeatureMap> feature_maps;
const Size fraction = ms_files.first;
// debug output
writeDebug_("Processing fraction number: " + String(fraction) + "\nFiles: ", 1);
for (String const & mz_file : ms_files.second) { writeDebug_(mz_file, 1); }
// for sanity checks we collect the primary MS run basenames as well as the ones stored in the ID files (below)
StringList id_MS_run_ref;
StringList in_MS_run = ms_files.second;
// for each MS file of current fraction (e.g., all MS files that measured the n-th fraction)
Size fraction_group{1};
for (String const & mz_file : ms_files.second)
{
writeDebug_("Processing file: " + mz_file, 1);
// centroid spectra (if in profile mode) and correct precursor masses
MSExperiment ms_centroided;
{
ExitCodes e = centroidAndCorrectPrecursors_(mz_file, ms_centroided);
if (e != EXECUTION_OK) { return e; }
}
// load and clean identification data associated with MS run
vector<ProteinIdentification> protein_ids;
PeptideIdentificationList peptide_ids;
const String& mz_file_abs_path = File::absolutePath(mz_file);
const String& id_file_abs_path = File::absolutePath(mzfile2idfile.at(mz_file_abs_path));
{
ExitCodes e = loadAndCleanupIDFile_(id_file_abs_path, mz_file, in_db, fraction_group, fraction, protein_ids, peptide_ids, fixed_modifications, variable_modifications);
if (e != EXECUTION_OK) return e;
}
// Annotate peptide IDs with FAIMS CV from spectrum data (required for proper per-CV filtering in FFI)
// Safe to call on non-FAIMS data - returns false and does nothing if no FAIMS data present
SpectrumMetaDataLookup::addMissingFAIMSToPeptideIDs(peptide_ids, ms_centroided);
StringList id_msfile_ref;
protein_ids[0].getPrimaryMSRunPath(id_msfile_ref);
id_MS_run_ref.push_back(id_msfile_ref[0]);
//-------------------------------------------------------------
// Internal Calibration of spectra peaks and precursor peaks with high-confidence IDs
//-------------------------------------------------------------
if (getStringOption_("mass_recalibration") == "true")
{
String debug_output_basename = (debug_level_ > 666) ? id_file_abs_path : "";
DDAWorkflowCommons::recalibrateMS1(ms_centroided, peptide_ids, debug_output_basename);
}
//////////////////////////////////////////
// Chromatographic parameter estimation
//////////////////////////////////////////
median_fwhm = DDAWorkflowCommons::estimateMedianChromatographicFWHM(ms_centroided);
OPENMS_LOG_INFO << "Median chromatographic FWHM: " << median_fwhm << std::endl;
//-------------------------------------------------------------
// Feature detection
//-------------------------------------------------------------
// Run MTD before FFM
// create empty feature map and annotate MS file
FeatureMap seeds;
seeds.setPrimaryMSRunPath({mz_file});
const bool targeted_only = getStringOption_("targeted_only") != "false";
if (!targeted_only)
{
// TODO: infer min/max charge from ID data
DDAWorkflowCommons::calculateSeeds(ms_centroided, getDoubleOption_("Seeding:intThreshold"), seeds, median_fwhm, 2, 5);
if (debug_level_ > 666)
{
FileHandler().storeFeatures("debug_seeds_fraction_" + String(ms_files.first) + "_" + String(fraction_group) + ".featureXML", seeds, {FileTypes::FEATUREXML}, log_type_);
}
}
/////////////////////////////////////////////////
// Run FeatureFinderIdentification
FeatureFinderIdentificationAlgorithm ffi;
ffi.getMSData().swap(ms_centroided);
ffi.getProgressLogger().setLogType(log_type_);
Param ffi_param = getParam_().copy("PeptideQuantification:", true);
ffi_param.setValue("detect:peak_width", 5.0 * median_fwhm);
ffi_param.setValue("debug", debug_level_); // pass down debug level
double feature_with_id_min_score = getDoubleOption_("feature_with_id_min_score");
double feature_without_id_min_score = getDoubleOption_("feature_without_id_min_score");
const bool filter_by_quant_scores = (feature_with_id_min_score > 0.0) && (targeted_only ||(feature_without_id_min_score > 0.0));
if (filter_by_quant_scores)
{
OPENMS_LOG_INFO << "Adding offset peptides as quant. decoys." << std::endl;
ffi_param.setValue("add_mass_offset_peptides", 10.005); // create mass offset peptides (aka quant. decoys). Uses same value as SAGE.
}
ffi.setParameters(ffi_param);
writeDebug_("Parameters passed to FeatureFinderIdentification algorithm", ffi_param, 3);
FeatureMap fm;
{
// These containers must be empty because we may be using
// seeds. The variables are not used by this code but
// required by the `run` call.
vector<ProteinIdentification> ext_protein_ids;
PeptideIdentificationList ext_peptide_ids;
ffi.run(peptide_ids,
protein_ids,
ext_peptide_ids,
ext_protein_ids,
fm, // fills fm
seeds,
mz_file);
}
if (filter_by_quant_scores)
{
// SVM table for training
// We want to filter out wrongly quantified peptides and noise. Later we want to also filter out wrongly transfered ones.
SimpleSVM::PredictorMap predictors;
map<Size, double> labels;
size_t current_row = 0;
size_t quant_target{}, quant_decoy{};
// randomize selection
Math::RandomShuffler shuffler;
std::vector<size_t> randomized_indices(fm.size());
std::iota(randomized_indices.begin(), randomized_indices.end(), 0);
for (auto & i : randomized_indices)
{
const auto& f = fm[i]; // select random feature
predictors["var_library_sangle"].push_back(f.getMetaValue("var_library_sangle"));
predictors["var_xcorr_shape"].push_back(f.getMetaValue("var_xcorr_shape"));
predictors["total_xic"].push_back(f.getMetaValue("total_xic"));
predictors["var_elution_model_fit_score"].push_back(f.getMetaValue("var_elution_model_fit_score"));
bool is_offset = f.metaValueExists("OffsetPeptide");
bool has_id = !f.getPeptideIdentifications().empty(); // offset peptides also have no id
if (is_offset)
{
if (quant_decoy < 1000) // max 1000 for training
{
labels[current_row] = 0.0;
++quant_decoy;
}
}
else
{
// only consider features with ID annotated
if (has_id && quant_target < 1000)
{
labels[current_row] = 1.0;
++quant_target;
}
}
++current_row;
}
if (quant_decoy > 4 && quant_target > 4) // less than 4 will make the SVM to error out
{
SimpleSVM svm;
Param svm_param = svm.getParameters();
svm_param.setValue("kernel", "linear");
svm_param.setValue("log2_C", ListUtils::create<double>("-5,-1,1,5,7,11,15"));
svm_param.setValue("log2_p", ListUtils::create<double>("-15,-9,-6,-3.32192809489,0,3.32192809489,6,9,15"));
svm.setParameters(svm_param);
svm.setup(predictors, labels);
vector<SimpleSVM::Prediction> predictions;
OPENMS_LOG_INFO << "Predicting class probabilities:" << endl;
svm.predict(predictions);
std::map<String, double> feature_weights;
svm.getFeatureWeights(feature_weights);
// assign quant probabilities to feature
size_t current_row{};
for (auto & i : randomized_indices) // traverse features in same order as before
{
auto& f = fm[i];
f.setMetaValue("p_quant", (double)predictions[current_row].probabilities[1]); // set probability of being a peptide feature (not a MassOffset decoy)
++current_row;
}
OPENMS_LOG_DEBUG << "Feature weights:" << endl;
for (const auto& m : feature_weights)
{
OPENMS_LOG_DEBUG << "weights: " << m.first << "\t" << m.second << endl;
}
}
// printMetaValues(tmp);
if (quant_decoy > 4 && quant_target > 4)
{
// remove offset (peptides+untargeted), and non-offset peptides and untargeted features if they don't pass the score threshold
size_t removed_non_offset_with_id{}, removed_non_offset_without_id{}, removed_offset{}, total_offset{}, total_non_offset_with_id{}, total_non_offset_without_id{};
fm.erase(std::remove_if(fm.begin(), fm.end(),
[&](const Feature& f)
{
double quant_score = f.getMetaValue("p_quant");
bool is_offset = f.metaValueExists("OffsetPeptide");
bool has_id = !f.getPeptideIdentifications().empty(); // offset peptides also have no id
bool untargeted_feature = !is_offset && !has_id;
bool is_feature_with_id = !is_offset && has_id;
// count offset (peptides+untargeted), and non-offset peptides and untargeted features
// and remove if they don't pass the score threshold
if (is_feature_with_id) // peptide but non-offset
{
++total_non_offset_with_id;
if (quant_score < feature_with_id_min_score)
{
++removed_non_offset_with_id;
return true;
}
}
else if (untargeted_feature) // seed but non-offset
{
++total_non_offset_without_id;
if (quant_score < feature_without_id_min_score)
{
++removed_non_offset_without_id;
return true;
}
}
else // offset
{
++total_offset;
if (quant_score < feature_without_id_min_score)
{
++removed_offset;
return true;
}
}
return false;
}),
fm.end());
// clean up by removing all OffsetPeptide features (TODO: maybe keep for transfer FDR)
fm.erase(std::remove_if(fm.begin(), fm.end(),
[](const Feature& f){return f.metaValueExists("OffsetPeptide");}),
fm.end());
std::cout << "Removed quant. targets with id (features with id) because of low quantification score: "
<< (double)removed_non_offset_with_id << " of " << total_non_offset_with_id << "\t ( "
<< (double)removed_non_offset_with_id/total_non_offset_with_id * 100.0 << "% )"
<< std::endl;
std::cout << "Removed quant. targets with id (features without id) because of low quantification score: "
<< (double)removed_non_offset_without_id << " of " << total_non_offset_without_id << "\t ( "
<< (double)removed_non_offset_without_id/total_non_offset_without_id * 100.0 << "% )"
<< std::endl;
std::cout << "Removed quant. decoys (offset features) because of low quantification score: "
<< (double)removed_offset << " of " << total_offset << "\t ( "
<< (double)removed_offset/total_offset * 100.0 << "% )"
<< std::endl;
}
}
// free parts of feature map not needed for further processing (e.g., subfeatures...)
unordered_set<String> keep_meta = {"OffsetPeptide"}; // meta values to keep (all others will be removed) TODO: keep FWHM etc. for QC
for (auto & f : fm)
{
std::vector<String> keys;
f.getKeys(keys);
for (const auto& k : keys)
{
if (auto it = keep_meta.find(k); it == keep_meta.end()) // not none of the meta value to keep? then delete
{
f.removeMetaValue(k);
}
}
f.setSubordinates({});
f.setConvexHulls({});
}
IDConflictResolverAlgorithm::resolve(fm,
getStringOption_("keep_feature_top_psm_only") == "false"); // keep only best peptide per feature per file
feature_maps.emplace_back(std::move(fm));
if (debug_level_ > 666)
{
FileHandler().storeFeatures("debug_fraction_" + String(ms_files.first) + "_" + String(fraction_group) + ".featureXML", feature_maps.back(), {FileTypes::FEATUREXML}, log_type_);
}
if (debug_level_ > 10000)
{
FileHandler().storeExperiment("debug_fraction_" + String(ms_files.first) + "_" + String(fraction_group) + "_chroms.mzML", ffi.getChromatograms(), {FileTypes::MZML}, log_type_);
}
++fraction_group;
}
// validate file lists (use only basename and ignore extension)
auto validation_result = File::validateMatchingFileNames(in_MS_run, id_MS_run_ref, true, true);
switch(validation_result)
{
case File::MatchingFileListsStatus::SET_MISMATCH:
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"MS run path reference in ID files and spectra filenames differ.");
break;
case File::MatchingFileListsStatus::ORDER_MISMATCH:
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"MS run path reference in ID files and spectra filenames match but order differs.");
break;
case File::MatchingFileListsStatus::MATCH:
writeLogInfo_("ID files reference same names as spectra files.");
break;
}
// Align all features of this fraction
alignAndLink_(feature_maps, consensus_fraction, transformations, median_fwhm);
// add dataprocessing
if (feature_maps.size() > 1)
{
addDataProcessing_(consensus_fraction,
getProcessingInfo_(DataProcessing::ALIGNMENT));
addDataProcessing_(consensus_fraction,
getProcessingInfo_(DataProcessing::FEATURE_GROUPING));
}
////////////////////////////////////////////////////////////
// Annotate experimental design in consensus map
////////////////////////////////////////////////////////////
Size j(0);
// for each MS file (as provided in the experimental design)
const auto& path_label_to_sampleidx = design_.getPathLabelToSampleMapping(true);
for (String const & mz_file : ms_files.second)
{
const Size curr_fraction_group = j + 1;
consensus_fraction.getColumnHeaders()[j].label = "label-free";
consensus_fraction.getColumnHeaders()[j].filename = mz_file;
consensus_fraction.getColumnHeaders()[j].unique_id = feature_maps[j].getUniqueId();
consensus_fraction.getColumnHeaders()[j].setMetaValue("fraction", fraction);
consensus_fraction.getColumnHeaders()[j].setMetaValue("fraction_group", curr_fraction_group);
const auto& sample_index = path_label_to_sampleidx.at({File::basename(mz_file), 1});
const auto& sample_name = design_.getSampleSection().getSampleName(sample_index);
consensus_fraction.getColumnHeaders()[j].setMetaValue("sample_name", sample_name);
++j;
}
// assign unique ids
consensus_fraction.applyMemberFunction(&UniqueIdInterface::setUniqueId);
// sort list of peptide identifications in each consensus feature by map index
consensus_fraction.sortPeptideIdentificationsByMapIndex();
if (debug_level_ >= 666)
{
FileHandler().storeConsensusFeatures("debug_fraction_" + String(ms_files.first) + ".consensusXML", consensus_fraction, {FileTypes::CONSENSUSXML}, log_type_);
writeDebug_("to produce a consensus map with: " + String(consensus_fraction.getColumnHeaders().size()) + " columns.", 1);
}
//-------------------------------------------------------------
// ID conflict resolution
//-------------------------------------------------------------
IDConflictResolverAlgorithm::resolve(consensus_fraction, true);
//-------------------------------------------------------------
// ConsensusMap normalization (basic median scaling)
//-------------------------------------------------------------
// Note: This normalization is applied automatically for feature-intensity-based quantification
// when multiple runs are provided. It uses simple median scaling to make sample medians equal.
// Normalization is DISABLED when MSstats or Triqler output is requested, as these tools
// perform their own normalization.
// This is independent of the -ProteinQuantification:consensus:normalize parameter, which
// controls an additional normalization step at the peptide quantification level (default: false).
if (getStringOption_("out_msstats").empty()
&& getStringOption_("out_triqler").empty()) // only normalize if no MSstats/Triqler output is generated
{
ConsensusMapNormalizerAlgorithmMedian::normalizeMaps(
consensus_fraction,
ConsensusMapNormalizerAlgorithmMedian::NM_SCALE,
"",
"");
}
return EXECUTION_OK;
}
ExitCodes inferProteinGroups_(ConsensusMap& consensus,
const set<String>& fixed_modifications)
{
// since we don't require an index as input but need to calculate e.g., coverage we reindex here (fast)
//-------------------------------------------------------------
// Protein inference
//-------------------------------------------------------------
// TODO: This needs to be rewritten to work directly on the quant data.
// of course we need to provide options to keep decoys and unassigned PSMs all the way through quantification.
// TODO: Think about ProteinInference on IDs only merged per condition
bool groups = getStringOption_("protein_quantification") != "strictly_unique_peptides";
bool bayesian = getStringOption_("protein_inference") == "bayesian";
bool greedy_group_resolution = getStringOption_("protein_quantification") == "shared_peptides";
// Study-wide inference operates on a single merged ID run.
ConsensusMapMergerAlgorithm cmerge;
// The following will result in a SINGLE protein run for the whole consensusMap.
cmerge.mergeAllIDRuns(consensus);
if (!bayesian) // simple aggregation
{
BasicProteinInferenceAlgorithm bpia;
auto bpiaparams = bpia.getParameters();
bpiaparams.setValue("annotate_indistinguishable_groups", groups ? "true" : "false");
bpiaparams.setValue("greedy_group_resolution", greedy_group_resolution ? "true" : "false");
bpia.setParameters(bpiaparams);
// TODO parameterize if unassigned IDs without feature should contribute?
bpia.run(consensus, consensus.getProteinIdentifications()[0], true);
}
else // if (bayesian)
{
BayesianProteinInferenceAlgorithm bayes;
auto bayesparams = bayes.getParameters();
// We need all PSMs to collect all possible modifications, to do spectral counting and to do PSM FDR.
// In theory, if none is needed we can save memory. For quantification,
// we basically discard peptide+PSM information from inference and use the info from the cMaps.
bayesparams.setValue("keep_best_PSM_only", "false");
bayes.setParameters(bayesparams);
//bayesian inference automatically annotates groups, therefore remove them later
bayes.inferPosteriorProbabilities(consensus, greedy_group_resolution);
if (!groups)
{
// should be enough to just clear the groups. Only indistinguishable will be annotated above.
consensus.getProteinIdentifications()[0].getIndistinguishableProteins().clear();
}
}
// TODO think about order of greedy resolution, FDR calc and filtering
//-------------------------------------------------------------
// Protein (and additional peptide?) FDR
//-------------------------------------------------------------
const double max_fdr = getDoubleOption_("proteinFDR");
const bool picked = getStringOption_("picked_proteinFDR") == "true";
//TODO use new FDR_type parameter
const double max_psm_fdr = getDoubleOption_("psmFDR");
FalseDiscoveryRate fdr;
if (getFlag_("PeptideQuantification:quantify_decoys"))
{
Param fdr_param = fdr.getParameters();
fdr_param.setValue("add_decoy_peptides", "true");
fdr_param.setValue("add_decoy_proteins", "true");
fdr.setParameters(fdr_param);
}
// ensure that only one final inference result is generated for now
assert(consensus.getProteinIdentifications().size() == 1);
auto& overall_proteins = consensus.getProteinIdentifications()[0];
if (!picked)
{
fdr.applyBasic(overall_proteins);
}
else
{
fdr.applyPickedProteinFDR(overall_proteins, picked_decoy_string_, picked_decoy_prefix_);
}
bool pepFDR = getStringOption_("FDR_type") == "PSM+peptide";
//TODO Think about the implications of mixing PSMs from different files and searches.
// Score should be PEPs here. We could extract the original search scores, depending on preprocessing. PEPs allow some normalization but will
// disregard the absolute score differences between runs (i.e. if scores in one run are all lower than the ones in another run,
// do you want to filter them out preferably or do you say: this was a faulty run, if the decoys are equally bad, I want the
// best targets to be treated like the best targets from the other runs, even if the absolute match scores are much lower).
if (pepFDR)
{
fdr.applyBasicPeptideLevel(consensus, true);
}
else
{
fdr.applyBasic(consensus, true);
}
if (!getFlag_("PeptideQuantification:quantify_decoys"))
{ // FDR filtering removed all decoy proteins -> update references and remove all unreferenced (decoy) PSMs
IDFilter::removeDanglingProteinReferences(consensus, true);
IDFilter::removeUnreferencedProteins(consensus, true); // if we don't filter peptides for now, we don't need this
IDFilter::updateProteinGroups(overall_proteins.getIndistinguishableProteins(),
overall_proteins.getHits());
IDFilter::updateProteinGroups(overall_proteins.getProteinGroups(),
overall_proteins.getHits());
}
// FDR filtering
if (max_psm_fdr < 1.) // PSM level
{
for (auto& f : consensus)
{
IDFilter::filterHitsByScore(f.getPeptideIdentifications(), max_psm_fdr);
}
IDFilter::filterHitsByScore(consensus.getUnassignedPeptideIdentifications(), max_psm_fdr);
}
if (max_fdr < 1.) // protein level
{
IDFilter::filterHitsByScore(overall_proteins, max_fdr);
}
if (max_fdr < 1. || !getFlag_("PeptideQuantification:quantify_decoys"))
{
IDFilter::removeDanglingProteinReferences(consensus, true);
}
if (max_psm_fdr < 1.)
{
IDFilter::removeUnreferencedProteins(consensus, true);
}
if (max_fdr < 1. || max_psm_fdr < 1. || !getFlag_("PeptideQuantification:quantify_decoys"))
{
IDFilter::updateProteinGroups(overall_proteins.getIndistinguishableProteins(), overall_proteins.getHits());
IDFilter::updateProteinGroups(overall_proteins.getProteinGroups(), overall_proteins.getHits());
}
if (overall_proteins.getHits().empty())
{
throw Exception::MissingInformation(
__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"No proteins left after FDR filtering. Please check the log and adjust your settings.");
}
// do we only want to keep strictly unique peptides (e.g., no groups)?
// This filters for the VERY initially computed theoretical uniqueness calculated by PeptideIndexer
// which also means that e.g., target+decoy peptides are not unique
if (!greedy_group_resolution && !groups)
{
for (auto& f : consensus)
{
IDFilter::keepUniquePeptidesPerProtein(f.getPeptideIdentifications());
}
IDFilter::keepUniquePeptidesPerProtein(consensus.getUnassignedPeptideIdentifications());
}
// compute coverage (sequence was annotated during PeptideIndexing)
// TODO: do you really want to compute coverage from unquantified peptides also?
overall_proteins.computeCoverage(consensus, true);
// TODO: this might not be correct if only the best peptidoform is kept
// determine observed modifications (exclude fixed mods)
overall_proteins.computeModifications(consensus, StringList(fixed_modifications.begin(), fixed_modifications.end()), true);
return EXECUTION_OK;
}
ExitCodes main_(int, const char **) override
{
//-------------------------------------------------------------
// Parameter handling
//-------------------------------------------------------------
// Read tool parameters
StringList in = getStringList_("in");
String out = getStringOption_("out");
String out_msstats = getStringOption_("out_msstats");
String out_triqler = getStringOption_("out_triqler");
StringList in_ids = getStringList_("ids");
String design_file = getStringOption_("design");
String in_db = getStringOption_("fasta");
// Validate parameters
if (in.size() != in_ids.size())
{
throw Exception::FileNotFound(__FILE__, __LINE__,
OPENMS_PRETTY_FUNCTION, "Number of spectra file (" + String(in.size()) + ") must match number of ID files (" + String(in_ids.size()) + ").");
}
if (getStringOption_("quantification_method") == "spectral_counting")
{
if (!out_msstats.empty())
{
throw Exception::FileNotFound(__FILE__, __LINE__,
OPENMS_PRETTY_FUNCTION, "MSstats export for spectral counting data not supported. Please remove output file.");
}
if (!out_triqler.empty())
{
throw Exception::FileNotFound(__FILE__, __LINE__,
OPENMS_PRETTY_FUNCTION, "Triqler export for spectral counting data not supported. Please remove output file.");
}
}
//-------------------------------------------------------------
// Experimental design: read or generate default
//-------------------------------------------------------------
if (!design_file.empty())
{ // load from file
design_ = ExperimentalDesignFile::load(design_file, false);
}
else
{
OPENMS_LOG_INFO << "No experimental design file provided.\n"
<< "Assuming a label-free experiment without fractionation.\n"
<< endl;
TextFile design_table;
design_table.addLine("Fraction_Group\tFraction\tSpectra_Filepath\tLabel\tSample\tMSstats_Condition\tMSstats_BioReplicate");
Size count{1};
for (String & s : in)
{
design_table.addLine(String(count) + "\t1\t" + s +"\t1\tSample" + String(count) + "\t" + String(count)+ "\t" + String(count));
++count;
}
design_ = ExperimentalDesignFile::load(design_table, "--no design file--", false);
}
// some sanity checks
// extract basenames from experimental design and input files
const auto& pl2fg = design_.getPathLabelToFractionGroupMapping(true);
set<String> ed_basenames;
for (const auto& p : pl2fg)
{
const String& filename = p.first.first;
ed_basenames.insert(filename);
}
set<String> in_basenames;
for (const auto & f : in)
{
const String& in_bn = File::basename(f);
in_basenames.insert(in_bn);
}
if (!std::includes(ed_basenames.begin(), ed_basenames.end(), in_basenames.begin(), in_basenames.end()))
{
throw Exception::InvalidParameter(__FILE__, __LINE__,
OPENMS_PRETTY_FUNCTION, "Spectra file basenames provided as input need to match a subset the experimental design file basenames.");
}
Size nr_filtered = design_.filterByBasenames(in_basenames);
if (nr_filtered > 0)
{
OPENMS_LOG_WARN << "WARNING: " << nr_filtered << " files from experimental design were not passed as mzMLs. Continuing with subset if the fractions still match." << std::endl;
}
if (design_.getNumberOfLabels() != 1)
{
throw Exception::InvalidParameter(__FILE__, __LINE__,
OPENMS_PRETTY_FUNCTION, "Experimental design is not label-free as it contains multiple labels.");
}
if (!design_.sameNrOfMSFilesPerFraction())
{
OPENMS_LOG_WARN << "WARNING: Different number of fractions for different samples provided. Support maybe limited in ProteomicsLFQ." << std::endl;
}
std::map<unsigned int, std::vector<String> > frac2ms = design_.getFractionToMSFilesMapping();
// experimental design file could contain URLs etc. that we want to overwrite with the actual input files
for (auto & fraction_ms_files : frac2ms)
{
for (auto & s : fraction_ms_files.second)
{ // for all ms files of current fraction number
// if basename in experimental design matches to basename in input file
// overwrite experimental design to point to existing file (and only if they were different)
if (auto it = std::find_if(in.begin(), in.end(),
[&s] (const String& in_filename) { return File::basename(in_filename) == File::basename(s); }); // basename matches?
it != in.end() && s != *it) // and differ?
{
OPENMS_LOG_INFO << "Path of spectra files differ between experimental design (1) and input (2). Using the path of the input file as "
<< "we know this file exists on the file system: '" << *it << "' vs. '" << s << endl;
s = *it; // overwrite filename in design with filename in input files
}
}
}
for (auto & f : frac2ms)
{
writeDebug_("Fraction " + String(f.first) + ":", 10);
for (const String & s : f.second)
{
writeDebug_("MS file: " + s, 10);
}
}
// Map between mzML file and corresponding id file
// We assume that these are provided in the exact same order.
map<String, String> mzfile2idfile = DDAWorkflowCommons::mapMzML2Ids(in, in_ids);
map<String, String> idfile2mzfile = DDAWorkflowCommons::mapId2MzMLs(mzfile2idfile);
// TODO maybe check if mzMLs in experimental design match to mzMLs passed as in parameter
// IF both are present
Param pep_param = getParam_().copy("Posterior Error Probability:", true);
writeDebug_("Parameters passed to PEP algorithm", pep_param, 3);
// TODO: inference parameter
Param pq_param = getParam_().copy("ProteinQuantification:", true);
writeDebug_("Parameters passed to PeptideAndProteinQuant algorithm", pq_param, 3);
Param com_param = getParam_().copy("algorithm:common:", true);
writeDebug_("Common parameters passed to both sub-algorithms (mtd and epd)", com_param, 3);
set<String> fixed_modifications, variable_modifications;
//-------------------------------------------------------------
// Loading input
//-------------------------------------------------------------
ConsensusMap consensus;
//-------------------------------------------------------------
// feature-based quantifications
//-------------------------------------------------------------
if (getStringOption_("quantification_method") == "feature_intensity")
{
OPENMS_LOG_INFO << "Performing feature intensity-based quantification." << endl;
double median_fwhm(0);
for (auto const & ms_files : frac2ms) // for each fraction->ms file(s)
{
ConsensusMap consensus_fraction; // quantitative result for this fraction identifier
ExitCodes e = quantifyFraction_(
ms_files,
mzfile2idfile,
in_db,
median_fwhm,
consensus_fraction,
fixed_modifications,
variable_modifications);
if (e != EXECUTION_OK) { return e; }
consensus.appendColumns(consensus_fraction); // append consensus map calculated for this fraction number
} // end of scope of fraction related data
consensus.sortByPosition();
consensus.sortPeptideIdentificationsByMapIndex();
if (debug_level_ >= 666)
{
FileHandler().storeConsensusFeatures("debug_after_normalization.consensusXML", consensus, {FileTypes::CONSENSUSXML}, log_type_);
}
}
else if (getStringOption_("quantification_method") == "spectral_counting")
{
OPENMS_LOG_INFO << "Performing spectral counting-based quantification." << endl;
// init consensus map with basic experimental design information
consensus.setExperimentType("label-free");
auto& all_protein_ids = consensus.getProteinIdentifications();
auto& all_peptide_ids = consensus.getUnassignedPeptideIdentifications();
Size run_index(0);
for (auto const & ms_files : frac2ms) // for each fraction->ms file(s) e.g.: Fraction1->FileA,FileB,FileC
{
const Size& fraction = ms_files.first;
// debug output
writeDebug_("Processing fraction number: " + String(fraction) + "\nFiles: ", 1);
for (String const & mz_file : ms_files.second) { writeDebug_(mz_file, 1); }
// for sanity checks we collect the primary MS run basenames as well as the ones stored in the ID files (below)
StringList id_MS_run_ref;
StringList in_MS_run = ms_files.second;
// for each MS file of current fraction (e.g., all MS files that measured the n-th fraction)
Size fraction_group{1};
for (String const & mz_file : ms_files.second)
{
// load and clean identification data associated with MS run
vector<ProteinIdentification> protein_ids;
PeptideIdentificationList peptide_ids;
const String& mz_file_abs_path = File::absolutePath(mz_file);
const String& id_file_abs_path = File::absolutePath(mzfile2idfile.at(mz_file_abs_path));
{
ExitCodes e = loadAndCleanupIDFile_(id_file_abs_path, mz_file, in_db, fraction_group, fraction, protein_ids, peptide_ids, fixed_modifications, variable_modifications);
if (e != EXECUTION_OK) return e;
}
StringList id_msfile_ref;
protein_ids[0].getPrimaryMSRunPath(id_msfile_ref);
id_MS_run_ref.push_back(id_msfile_ref[0]);
// append the ProteinIdentification run (contains backlink to MS file) and the PeptideIdentifications (PSMs for this fraction and MS run) to the list of UnassignedPeptideIdentifications
all_protein_ids.emplace_back(std::move(protein_ids[0]));
all_peptide_ids.insert(all_peptide_ids.end(),
std::make_move_iterator(peptide_ids.begin()),
std::make_move_iterator(peptide_ids.end()));
}
////////////////////////////////////////////////////////////
// Annotate experimental design in consensus map
////////////////////////////////////////////////////////////
Size j(0);
// for each MS file (as provided in the experimental design)
const auto& path_label_to_sampleidx = design_.getPathLabelToSampleMapping(true);
for (String const & mz_file : ms_files.second)
{
const Size curr_fraction_group = j + 1;
consensus.getColumnHeaders()[run_index].label = "label-free";
consensus.getColumnHeaders()[run_index].filename = mz_file;
consensus.getColumnHeaders()[run_index].unique_id = 1 + run_index;
consensus.getColumnHeaders()[run_index].setMetaValue("fraction", fraction);
consensus.getColumnHeaders()[run_index].setMetaValue("fraction_group", curr_fraction_group);
consensus.getColumnHeaders()[run_index].setMetaValue("sample_name", design_.getSampleSection().getSampleName(path_label_to_sampleidx.at({File::basename(mz_file),1})));
++j;
++run_index;
}
}
}
//-------------------------------------------------------------
// ID related algorithms
//-------------------------------------------------------------
ExitCodes e = inferProteinGroups_(consensus, fixed_modifications);
if (e != EXECUTION_OK) return e;
// clean up references (assigned and unassigned)
IDFilter::removeUnreferencedProteins(consensus, true);
// only keep best scoring ID for each consensus feature
IDConflictResolverAlgorithm::resolve(consensus);
//-------------------------------------------------------------
// Peptide quantification
//-------------------------------------------------------------
PeptideAndProteinQuant quantifier;
// TODO Why is there no easy quantifier.run(consensus,[inference_prot_ids]) function??
if (getStringOption_("quantification_method") == "feature_intensity")
{
quantifier.setParameters(pq_param);
quantifier.readQuantData(consensus, design_);
}
else if (getStringOption_("quantification_method") == "spectral_counting")
{
pq_param.setValue("top:aggregate", "sum");
pq_param.setValue("top:N", 0); // all
pq_param.setValue("consensus:normalize", "false");
quantifier.setParameters(pq_param);
quantifier.readQuantData(
consensus.getProteinIdentifications(),
consensus.getUnassignedPeptideIdentifications(),
design_);
}
// nothing to filter. everything in consensus should be uptodate with inference.
// on peptide level it does not annotate anything anyway
quantifier.quantifyPeptides();
//-------------------------------------------------------------
// Protein quantification
//-------------------------------------------------------------
// Should always be there by now, even if just singletons
ProteinIdentification& inferred_proteins = consensus.getProteinIdentifications()[0];
if (inferred_proteins.getIndistinguishableProteins().empty())
{
throw Exception::MissingInformation(
__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"No information on indistinguishable protein groups found.");
}
quantifier.quantifyProteins(inferred_proteins);
auto const & protein_quants = quantifier.getProteinResults();
if (protein_quants.empty())
{
OPENMS_LOG_WARN << "Warning: No proteins were quantified." << endl;
}
//-------------------------------------------------------------
// Export of MzTab file as final output
//-------------------------------------------------------------
// Annotate quants to protein(groups) for easier export in mzTab
// Note: we keep protein groups that have not been quantified
quantifier.annotateQuantificationsToProteins(
protein_quants, inferred_proteins, false);
// For correctness, we would need to set the run reference in the pepIDs of the consensusXML all to the first run then
// And probably make sure that peptides that correspond to filtered out proteins are not producing errors
// e.g. by removing them with a Filter beforehand.
consensus.resolveUniqueIdConflicts(); // TODO: find out if this is still needed
if (!getStringOption_("out_cxml").empty())
{
// Note: idXML and consensusXML doesn't support writing quantification at protein groups
// (they are nevertheless stored and passed to mzTab for proper export)
FileHandler().storeConsensusFeatures(getStringOption_("out_cxml"), consensus, {FileTypes::CONSENSUSXML}, log_type_);
}
// Fill MzTab with meta data and quants annotated in identification data structure
const bool report_unidentified_features(false);
const bool report_unmapped(true); //TODO we should make a distinction from unassigned after conflict resolution and unassigned because unmappable
const bool report_subfeatures(false);
const bool report_unidentified_spectra(false);
const bool report_not_only_best_psm_per_spectrum(false);
MzTabFile().store(out,
consensus,
false, // first run is inference but also a properly merged run, so we don't need the hack
report_unidentified_features,
report_unmapped,
report_subfeatures,
report_unidentified_spectra,
report_not_only_best_psm_per_spectrum);
if (!out_msstats.empty())
{
IDFilter::removeEmptyIdentifications(consensus); // MzTab stream exporter currently doesn't support IDs with empty hits.
MSstatsFile msstats;
// TODO: add a helper method to quickly check if experimental design file contain the right columns
// (and put this at start of tool)
// shrink protein runs to the one containing the inference data
consensus.getProteinIdentifications().resize(1);
msstats.storeLFQ(
out_msstats,
consensus,
design_,
StringList(),
false, //lfq
"MSstats_BioReplicate",
"MSstats_Condition",
"max");
}
if (!out_triqler.empty())
{
TriqlerFile tf;
// shrink protein runs to the one containing the inference data
consensus.getProteinIdentifications().resize(1);
IDScoreSwitcherAlgorithm switcher;
Size c = 0;
switcher.switchToGeneralScoreType(consensus, IDScoreSwitcherAlgorithm::ScoreType::PEP, c);
tf.storeLFQ(
out_triqler,
consensus,
design_,
StringList(),
"MSstats_Condition" // TODO: choose something more generic like "Condition" for both MSstats and Triqler export
);
}
return EXECUTION_OK;
}
String picked_decoy_string_;
bool picked_decoy_prefix_ = true;
ExperimentalDesign design_;
};
int main(int argc, const char ** argv)
{
ProteomicsLFQ tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/PeakPickerHiRes.cpp | .cpp | 8,820 | 271 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Eva Lange $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/FORMAT/FileHandler.h>
// TODO remove needed here for transform
#include <OpenMS/FORMAT/MzMLFile.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/PROCESSING/CENTROIDING/PeakPickerHiRes.h>
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/FORMAT/DATAACCESS/MSDataWritingConsumer.h>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_PeakPickerHiRes PeakPickerHiRes
@brief A tool for peak detection in profile data. Executes the peak picking with @ref OpenMS::PeakPickerHiRes "high_res" algorithm.
<center>
<table>
<tr>
<th ALIGN = "center"> pot. predecessor tools </td>
<td VALIGN="middle" ROWSPAN=4> → PeakPickerHiRes →</td>
<th ALIGN = "center"> pot. successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_BaselineFilter </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=3> any tool operating on MS peak data @n (in mzML format)</td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_NoiseFilterGaussian </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_NoiseFilterSGolay </td>
</tr>
</table>
</center>
Reference:\n
Weisser <em>et al.</em>: <a href="https://doi.org/10.1021/pr300992u">An automated pipeline for high-throughput label-free quantitative proteomics</a> (J. Proteome Res., 2013, PMID: 23391308).
The conversion of the "raw" ion count data acquired
by the machine into peak lists for further processing
is usually called peak picking or centroiding. The choice of the algorithm
should mainly depend on the resolution of the data.
As the name implies, the @ref OpenMS::PeakPickerHiRes "high_res"
algorithm is fit for high resolution (Orbitrap or FTICR) data.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_PeakPickerHiRes.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_PeakPickerHiRes.html
For the parameters of the algorithm section see the algorithm documentation: @ref OpenMS::PeakPickerHiRes "PeakPickerHiRes"
Be aware that applying the algorithm to already picked data results in an error message and program exit or corrupted output data.
Advanced users may skip the check for already centroided data using the flag "-force" (useful e.g. if spectrum annotations in the data files are wrong).
In the following table you, can find example values of the most important algorithm parameters for
different instrument types. @n These parameters are not valid for all instruments of that type,
but can be used as a starting point for finding suitable parameters.
<table>
<tr BGCOLOR="#EBEBEB">
<td> </td>
<td><b>Q-TOF</b></td>
<td><b>LTQ Orbitrap</b></td>
</tr>
<tr>
<td BGCOLOR="#EBEBEB"><b>signal_to_noise</b></td>
<td>2</td>
<td>0</td>
</tr>
</table>
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPPeakPickerHiRes :
public TOPPBase
{
public:
TOPPPeakPickerHiRes() :
TOPPBase("PeakPickerHiRes", "Finds mass spectrometric peaks in profile mass spectra.")
{
}
protected:
/**
@brief Helper class for the Low Memory peak-picking
*/
class PPHiResMzMLConsumer :
public MSDataWritingConsumer
{
public:
PPHiResMzMLConsumer(String filename, const PeakPickerHiRes& pp) :
MSDataWritingConsumer(std::move(filename)),
ms_levels_(pp.getParameters().getValue("ms_levels").toIntVector())
{
pp_ = pp;
}
void processSpectrum_(MapType::SpectrumType& s) override
{
if (ms_levels_.empty()) //auto mode
{
if (s.getType() == SpectrumSettings::SpectrumType::CENTROID)
{
return;
}
}
else if (!ListUtils::contains(ms_levels_, s.getMSLevel()))
{
return;
}
MapType::SpectrumType sout;
pp_.pick(s, sout);
s = std::move(sout);
}
void processChromatogram_(MapType::ChromatogramType & c) override
{
MapType::ChromatogramType c_out;
pp_.pick(c, c_out);
c = std::move(c_out);
}
private:
PeakPickerHiRes pp_;
std::vector<Int> ms_levels_;
};
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "input profile data file ");
setValidFormats_("in", ListUtils::create<String>("mzML"));
registerOutputFile_("out", "<file>", "", "output peak file ");
setValidFormats_("out", ListUtils::create<String>("mzML"));
registerStringOption_("processOption", "<name>", "inmemory", "Whether to load all data and process them in-memory or whether to process the data on the fly (lowmemory) without loading the whole file into memory first", false, true);
setValidStrings_("processOption", ListUtils::create<String>("inmemory,lowmemory"));
registerSubsection_("algorithm", "Algorithm parameters section");
}
Param getSubsectionDefaults_(const String & /*section*/) const override
{
return PeakPickerHiRes().getDefaults();
}
ExitCodes doLowMemAlgorithm(const PeakPickerHiRes& pp)
{
///////////////////////////////////
// Create the consumer object, add data processing
///////////////////////////////////
PPHiResMzMLConsumer pp_consumer(out, pp);
pp_consumer.addDataProcessing(getProcessingInfo_(DataProcessing::PEAK_PICKING));
///////////////////////////////////
// Create new MSDataReader and set our consumer
///////////////////////////////////
MzMLFile mz_data_file;
mz_data_file.setLogType(log_type_);
mz_data_file.transform(in, &pp_consumer);
return EXECUTION_OK;
}
ExitCodes main_(int, const char **) override
{
//-------------------------------------------------------------
// parameter handling
//-------------------------------------------------------------
in = getStringOption_("in");
out = getStringOption_("out");
String process_option = getStringOption_("processOption");
Param pepi_param = getParam_().copy("algorithm:", true);
writeDebug_("Parameters passed to PeakPickerHiRes", pepi_param, 3);
PeakPickerHiRes pp;
pp.setLogType(log_type_);
pp.setParameters(pepi_param);
if (process_option == "lowmemory")
{
return doLowMemAlgorithm(pp);
}
//-------------------------------------------------------------
// loading input
//-------------------------------------------------------------
PeakMap ms_exp_raw;
FileHandler().loadExperiment(in, ms_exp_raw, {FileTypes::MZML}, log_type_);
if (ms_exp_raw.empty() && ms_exp_raw.getChromatograms().empty())
{
OPENMS_LOG_WARN << "The given file does not contain any conventional peak data, but might"
" contain chromatograms. This tool currently cannot handle them, sorry.";
return INCOMPATIBLE_INPUT_DATA;
}
//check if spectra are sorted
for (Size i = 0; i < ms_exp_raw.size(); ++i)
{
if (!ms_exp_raw[i].isSorted())
{
writeLogError_("Error: Not all spectra are sorted according to peak m/z positions. Use FileFilter to sort the input!");
return INCOMPATIBLE_INPUT_DATA;
}
}
//check if chromatograms are sorted
for (Size i = 0; i < ms_exp_raw.getChromatograms().size(); ++i)
{
if (!ms_exp_raw.getChromatogram(i).isSorted())
{
writeLogError_("Error: Not all chromatograms are sorted according to peak m/z positions. Use FileFilter to sort the input!");
return INCOMPATIBLE_INPUT_DATA;
}
}
//-------------------------------------------------------------
// pick
//-------------------------------------------------------------
PeakMap ms_exp_peaks;
bool check_spectrum_type = !getFlag_("force");
pp.pickExperiment(ms_exp_raw, ms_exp_peaks, check_spectrum_type);
//-------------------------------------------------------------
// writing output
//-------------------------------------------------------------
//annotate output with data processing info
addDataProcessing_(ms_exp_peaks, getProcessingInfo_(DataProcessing::PEAK_PICKING));
FileHandler().storeExperiment(out, ms_exp_peaks, {FileTypes::MZML});
return EXECUTION_OK;
}
// parameters
String in;
String out;
};
int main(int argc, const char ** argv)
{
TOPPPeakPickerHiRes tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/MRMPairFinder.cpp | .cpp | 12,275 | 314 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Andreas Bertsch $
// --------------------------------------------------------------------------
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/KERNEL/FeatureMap.h>
#include <OpenMS/KERNEL/Feature.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <map>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_MRMPairFinder MRMPairFinder
@brief Util which can be used to evaluate pairs of MRM experiments
@experimental This software is experimental and might contain bugs!
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_MRMPairFinder.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_MRMPairFinder.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
// simple helper struct which stores
// a SILAC pair, with m/z value rt
struct SILAC_pair
{
double mz_light;
double mz_heavy;
double rt;
};
// helper struct which stores the
// SILAC_pair which it is matched to
struct MatchedFeature
{
MatchedFeature(const Feature& feature, Size index) :
f(feature),
idx(index)
{
}
Feature f;
Size idx;
};
// This struct store quantitation for one scan
// for fast access to defined pair
struct SILACQuantitation
{
SILACQuantitation(double l_intensity, double h_intensity, Size index) :
light_intensity(l_intensity),
heavy_intensity(h_intensity),
idx(index)
{
}
double light_intensity;
double heavy_intensity;
Size idx;
};
class TOPPMRMPairFinder :
public TOPPBase
{
public:
TOPPMRMPairFinder() :
TOPPBase("MRMPairFinder", "Util which can be used to evaluate labeled pair ratios on MRM features.")
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "Input featureXML file containing the features of the MRM experiment spectra.");
setValidFormats_("in", ListUtils::create<String>("featureXML"));
registerInputFile_("pair_in", "<file>", "", "Pair-file in the format: prec-m/z-light prec-m/z-heavy frag-m/z-light frag-m/z-heavy rt");
setValidFormats_("pair_in", ListUtils::create<String>("csv"));
registerOutputFile_("out", "<file>", "", "Output consensusXML file were the pairs of the features will be written to.");
setValidFormats_("out", ListUtils::create<String>("consensusXML"));
registerOutputFile_("feature_out", "<file>", "", "Output featureXML file, only written if given, skipped otherwise.", false);
setValidFormats_("feature_out", ListUtils::create<String>("featureXML"));
registerDoubleOption_("mass_tolerance", "<tolerance>", 0.01, "Precursor mass tolerance which is used for the pair finding and the matching of the given pair m/z values to the features.", false, true);
setMinFloat_("mass_tolerance", 0.0);
registerDoubleOption_("RT_tolerance", "<tolerance>", 200, "Maximal deviation in RT dimension in seconds a feature can have when comparing to the RT values given in the pair file.", false, true);
setMinFloat_("RT_tolerance", 0.0);
registerDoubleOption_("RT_pair_tolerance", "<tolerance>", 5, "Maximal deviation in RT dimension in seconds the two partners of a pair is allowed to have.", false, true);
setMinFloat_("RT_pair_tolerance", 0.0);
}
ExitCodes main_(int, const char**) override
{
//-------------------------------------------------------------
// parsing parameters
//-------------------------------------------------------------
String in(getStringOption_("in"));
String out(getStringOption_("out"));
String feature_out(getStringOption_("feature_out"));
String pair_in(getStringOption_("pair_in"));
double mass_tolerance(getDoubleOption_("mass_tolerance"));
double RT_tolerance(getDoubleOption_("RT_tolerance"));
double RT_pair_tolerance(getDoubleOption_("RT_pair_tolerance"));
//-------------------------------------------------------------
// reading input
//-------------------------------------------------------------
FeatureMap all_mrm_features;
FileHandler().loadFeatures(in, all_mrm_features, {FileTypes::FEATUREXML});
// read pair file
ifstream is(pair_in.c_str());
String line;
std::map<double, std::map<double, vector<SILAC_pair> > > pairs;
while (getline(is, line))
{
line.trim();
if (line.empty() || line[0] == '#')
{
continue;
}
vector<String> split;
line.split(' ', split);
if (split.empty())
{
line.split('\t', split);
}
if (split.size() != 5)
{
cerr << "missformated line ('" << line << "') should be (space separated) 'prec-m/z-light prec-m/z-heavy frag-m/z-light frag-m/z-heavy rt'" << endl;
continue;
}
SILAC_pair p;
double prec_mz_light = split[0].toDouble();
double prec_mz_heavy = split[1].toDouble();
p.mz_light = split[2].toDouble();
p.mz_heavy = split[3].toDouble();
p.rt = split[4].toDouble();
pairs[prec_mz_light][prec_mz_heavy].push_back(p);
}
is.close();
//-------------------------------------------------------------
// calculations
//-------------------------------------------------------------
ConsensusMap results_map;
results_map.getColumnHeaders()[0].label = "light";
results_map.getColumnHeaders()[0].filename = in;
results_map.getColumnHeaders()[1].label = "heavy";
results_map.getColumnHeaders()[1].filename = in;
// collect the different MRM XIC pairs for each SILAC pair as quantlets
// then calculate the ratio over the quanlets and calculate some statistics
FeatureMap all_features;
for (std::map<double, std::map<double, vector<SILAC_pair> > >::const_iterator it1 = pairs.begin(); it1 != pairs.end(); ++it1)
{
for (std::map<double, vector<SILAC_pair> >::const_iterator it2 = it1->second.begin(); it2 != it1->second.end(); ++it2)
{
vector<SILACQuantitation> quantlets;
writeDebug_("Analyzing SILAC pair: " + String(it1->first) + " <-> " + String(it2->first), 3);
Size idx = 0;
for (vector<SILAC_pair>::const_iterator pit = it2->second.begin(); pit != it2->second.end(); ++pit, ++idx)
{
FeatureMap feature_map_light, feature_map_heavy;
for (FeatureMap::const_iterator it = all_mrm_features.begin(); it != all_mrm_features.end(); ++it)
{
if (fabs((double)it->getMetaValue("MZ") - it1->first) < mass_tolerance &&
fabs(it->getMZ() - pit->mz_light) < mass_tolerance &&
fabs(it->getRT() - pit->rt) < RT_tolerance)
{
feature_map_light.push_back(*it);
}
if (fabs((double)it->getMetaValue("MZ") - it2->first) < mass_tolerance &&
fabs(it->getMZ() - pit->mz_heavy) < mass_tolerance &&
fabs(it->getRT() - pit->rt) < RT_tolerance)
{
feature_map_heavy.push_back(*it);
}
}
// search if feature maps to m/z value of pair
vector<MatchedFeature> light, heavy;
for (FeatureMap::const_iterator fit = feature_map_light.begin(); fit != feature_map_light.end(); ++fit)
{
all_features.push_back(*fit);
light.push_back(MatchedFeature(*fit, idx));
}
for (FeatureMap::const_iterator fit = feature_map_heavy.begin(); fit != feature_map_heavy.end(); ++fit)
{
all_features.push_back(*fit);
heavy.push_back(MatchedFeature(*fit, idx));
}
if (!heavy.empty() && !light.empty())
{
writeDebug_("Finding best feature pair out of " + String(light.size()) + " light and " + String(heavy.size()) + " heavy matching features.", 1);
// now find "good" matches, means the pair with the smallest m/z deviation
Feature best_light, best_heavy;
double best_deviation(numeric_limits<double>::max());
Size best_idx(it2->second.size());
for (vector<MatchedFeature>::const_iterator fit1 = light.begin(); fit1 != light.end(); ++fit1)
{
for (vector<MatchedFeature>::const_iterator fit2 = heavy.begin(); fit2 != heavy.end(); ++fit2)
{
if (fit1->idx != fit2->idx || fabs(fit1->f.getRT() - fit2->f.getRT()) > RT_pair_tolerance)
{
continue;
}
double deviation(0);
deviation = fabs(fit1->f.getMZ() - it2->second[fit1->idx].mz_light) +
fabs(fit2->f.getMZ() - it2->second[fit2->idx].mz_heavy);
if (deviation < best_deviation && deviation < mass_tolerance)
{
best_light = fit1->f;
best_heavy = fit2->f;
best_idx = fit1->idx;
}
}
}
if (best_idx == it2->second.size())
{
continue;
}
ConsensusFeature SILAC_feature;
SILAC_feature.setMZ((best_light.getMZ() + best_heavy.getMZ()) / 2.0);
SILAC_feature.setRT((best_light.getRT() + best_heavy.getRT()) / 2.0);
SILAC_feature.insert(0, best_light);
SILAC_feature.insert(1, best_heavy);
results_map.push_back(SILAC_feature);
quantlets.push_back(SILACQuantitation(best_light.getIntensity(), best_heavy.getIntensity(), best_idx));
writeDebug_("Ratio of XIC: " + String(best_heavy.getIntensity() / best_light.getIntensity()) + " " + String(best_light.getMZ()) + " <-> " + String(best_heavy.getMZ()) + " @" + String(SILAC_feature.getRT()) + " RT-heavy=" + String(best_heavy.getRT()) + ", RT-light=" + String(best_light.getRT()) + ", RT-diff=" + String(best_heavy.getRT() - best_light.getRT()) +
" avg. int " + String((best_heavy.getIntensity() + best_light.getIntensity()) / 2.0), 1);
}
}
writeDebug_("Quantitation of pair " + String(it1->first) + " <-> " + String(it2->first) + " (#XIC pairs for quantation=" + String(quantlets.size()) + ")", 1);
if (quantlets.empty())
{
continue;
}
// simply add up all intensities and calculate the final ratio
double light_sum(0), heavy_sum(0);
vector<double> light_ints, heavy_ints, ratios;
for (vector<SILACQuantitation>::const_iterator qit1 = quantlets.begin(); qit1 != quantlets.end(); ++qit1)
{
light_sum += qit1->light_intensity;
light_ints.push_back(qit1->light_intensity);
heavy_sum += qit1->heavy_intensity;
heavy_ints.push_back(qit1->heavy_intensity);
ratios.push_back(qit1->heavy_intensity / qit1->light_intensity * (qit1->heavy_intensity + qit1->light_intensity));
}
double absdev_ratios = Math::absdev(ratios.begin(), ratios.begin() + (ratios.size()) / (light_sum + heavy_sum));
cout << "Ratio: " << it1->first << " <-> " << it2->first << " @ " << it2->second.begin()->rt << " s, ratio(h/l) " << heavy_sum / light_sum << " +/- " << absdev_ratios << " " << "(#XIC-pairs for quantation: " + String(ratios.size()) + " )" << endl;
}
}
//-------------------------------------------------------------
// writing output
//-------------------------------------------------------------
if (!feature_out.empty())
{
FileHandler().storeFeatures(feature_out, all_features, {FileTypes::FEATUREXML});
}
writeDebug_("Writing output", 1);
FileHandler().storeConsensusFeatures(out, results_map, {FileTypes::CONSENSUSXML});
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPMRMPairFinder tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/FeatureLinkerLabeled.cpp | .cpp | 3,436 | 100 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Marc Sturm, Clemens Groepl, Steffen Sass $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MAPMATCHING/FeatureGroupingAlgorithmLabeled.h>
#include "FeatureLinkerBase.cpp"
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_FeatureLinkerLabeled FeatureLinkerLabeled
@brief Groups corresponding isotope-labeled features in a feature map.
<CENTER>
<table>
<tr>
<th ALIGN = "center"> potential predecessor tools </td>
<td VALIGN="middle" ROWSPAN=3> → FeatureLinkerLabeled →</td>
<th ALIGN = "center"> potential successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=2> @ref TOPP_FeatureFinderCentroided @n (or another feature detection algorithm) </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_ProteinQuantifier </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_TextExporter </td>
</tr>
</table>
</CENTER>
This tool provides an algorithm for grouping corresponding features in isotope-labeled experiments. For more details and algorithm-specific parameters (set in the ini file) see "Detailed Description" in the @ref OpenMS::FeatureGroupingAlgorithmLabeled "algorithm documentation".
FeatureLinkerLabeled takes one feature map (featureXML file) and stores the corresponding features in a consensus map (consensusXML file). Feature maps can be created from MS experiments (peak data) using one of the FeatureFinder TOPP tools.
@see @ref TOPP_FeatureLinkerUnlabeled @ref TOPP_FeatureLinkerUnlabeledQT
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_FeatureLinkerLabeled.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_FeatureLinkerLabeled.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPFeatureLinkerLabeled :
public TOPPFeatureLinkerBase
{
public:
TOPPFeatureLinkerLabeled() :
TOPPFeatureLinkerBase("FeatureLinkerLabeled", "Groups corresponding isotope-labeled features in a feature map.")
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "Input file", true);
setValidFormats_("in", ListUtils::create<String>("featureXML"));
registerOutputFile_("out", "<file>", "", "Output file", true);
setValidFormats_("out", ListUtils::create<String>("consensusXML"));
registerSubsection_("algorithm", "Algorithm parameters section");
}
Param getSubsectionDefaults_(const String & /*section*/) const override
{
FeatureGroupingAlgorithmLabeled algo;
Param p = algo.getParameters();
return p;
}
ExitCodes main_(int, const char **) override
{
FeatureGroupingAlgorithmLabeled algo;
return TOPPFeatureLinkerBase::common_main_(&algo, true);
}
};
int main(int argc, const char ** argv)
{
TOPPFeatureLinkerLabeled tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/FeatureLinkerBase.cpp | .cpp | 12,092 | 354 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Marc Sturm, Clemens Groepl, Steffen Sass $
// --------------------------------------------------------------------------
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/FORMAT/FileTypes.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/FeatureGroupingAlgorithm.h>
#include <OpenMS/DATASTRUCTURES/ListUtils.h>
#include <OpenMS/CONCEPT/ProgressLogger.h>
#include <OpenMS/METADATA/ExperimentalDesign.h>
#include <OpenMS/FORMAT/ExperimentalDesignFile.h>
#include <OpenMS/KERNEL/ConversionHelper.h>
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <iomanip> // setw
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_FeatureLinkerBase FeatureLinkerBase
@brief Base class for different FeatureLinker tools.
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPFeatureLinkerBase :
public TOPPBase,
public ProgressLogger
{
public:
TOPPFeatureLinkerBase(String name, String description, bool official = true) :
TOPPBase(name, description, official)
{
}
protected:
void registerOptionsAndFlags_() override // only for "unlabeled" algorithms!
{
registerInputFileList_("in", "<files>", ListUtils::create<String>(""), "input files separated by blanks", true);
setValidFormats_("in", ListUtils::create<String>("featureXML,consensusXML"));
registerOutputFile_("out", "<file>", "", "Output file", true);
setValidFormats_("out", ListUtils::create<String>("consensusXML"));
registerInputFile_("design", "<file>", "", "input file containing the experimental design", false);
setValidFormats_("design", ListUtils::create<String>("tsv"));
addEmptyLine_();
registerFlag_("keep_subelements", "For consensusXML input only: If set, the sub-features of the inputs are transferred to the output.");
}
ExitCodes common_main_(FeatureGroupingAlgorithm * algorithm,
bool labeled = false)
{
//-------------------------------------------------------------
// parameter handling
//-------------------------------------------------------------
StringList ins;
if (labeled)
{
ins.push_back(getStringOption_("in"));
}
else
{
ins = getStringList_("in");
}
String out = getStringOption_("out");
//-------------------------------------------------------------
// check for valid input
//-------------------------------------------------------------
// check if all input files have the correct type
FileTypes::Type file_type = FileHandler::getType(ins[0]);
for (Size i = 0; i < ins.size(); ++i)
{
if (FileHandler::getType(ins[i]) != file_type)
{
writeLogError_("Error: All input files must be of the same type!");
return ILLEGAL_PARAMETERS;
}
}
//-------------------------------------------------------------
// set up algorithm
//-------------------------------------------------------------
Param algorithm_param = getParam_().copy("algorithm:", true);
writeDebug_("Used algorithm parameters", algorithm_param, 3);
algorithm->setParameters(algorithm_param);
//-------------------------------------------------------------
// perform grouping
//-------------------------------------------------------------
// load input
ConsensusMap out_map;
StringList ms_run_locations;
String design_file;
// TODO: support design in labeled feature linker
if (!labeled)
{
design_file = getStringOption_("design");
}
if (file_type == FileTypes::CONSENSUSXML && !design_file.empty())
{
writeLogError_("Error: Using fractionated design with consensusXML als input is not supported!");
return ILLEGAL_PARAMETERS;
}
if (file_type == FileTypes::FEATUREXML)
{
OPENMS_LOG_INFO << "Linking " << ins.size() << " featureXMLs." << endl;
//-------------------------------------------------------------
// Extract (optional) fraction identifiers and associate with featureXMLs
//-------------------------------------------------------------
// determine map of fractions to MS files
map<unsigned, vector<String>> frac2files;
if (!design_file.empty())
{
// parse design file and determine fractions
ExperimentalDesign ed = ExperimentalDesignFile::load(design_file, false);
// determine if design defines more than one fraction
frac2files = ed.getFractionToMSFilesMapping();
writeDebug_(String("Grouping ") + String(ed.getNumberOfFractions()) + " fractions.", 3);
// check if all fractions have the same number of MS runs associated
if (!ed.sameNrOfMSFilesPerFraction())
{
writeLogError_("Error: Number of runs must match for every fraction!");
return ILLEGAL_PARAMETERS;
}
}
else // no design file given
{
for (Size i = 0; i != ins.size(); ++i)
{
frac2files[1].emplace_back(String("file") + String(i)); // associate each run with fraction 1
}
}
vector<FeatureMap > maps(ins.size());
FileHandler f;
FeatureFileOptions param = f.getFeatOptions();
// to save memory don't load convex hulls and subordinates
param.setLoadSubordinates(false);
param.setLoadConvexHull(false);
f.setFeatOptions(param);
Size progress = 0;
setLogType(ProgressLogger::CMD);
startProgress(0, ins.size(), "reading input");
for (Size i = 0; i < ins.size(); ++i)
{
FeatureMap tmp;
f.loadFeatures(ins[i], tmp, {FileTypes::FEATUREXML});
StringList ms_runs;
tmp.getPrimaryMSRunPath(ms_runs);
// associate mzML file with map i in consensusXML
if (ms_runs.size() > 1 || ms_runs.empty())
{
OPENMS_LOG_WARN << "Exactly one MS run should be associated with a FeatureMap. "
<< ms_runs.size()
<< " provided." << endl;
}
else
{
out_map.getColumnHeaders()[i].filename = ms_runs.front();
}
out_map.getColumnHeaders()[i].size = tmp.size();
out_map.getColumnHeaders()[i].unique_id = tmp.getUniqueId();
// copy over information on the primary MS run
ms_run_locations.insert(ms_run_locations.end(), ms_runs.begin(), ms_runs.end());
// to save memory, remove convex hulls, subordinates:
for (Feature& ft : tmp)
{
String adduct;
String group;
//exception: addduct information
if (ft.metaValueExists(Constants::UserParam::DC_CHARGE_ADDUCTS))
{
adduct = ft.getMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS);
}
if (ft.metaValueExists(Constants::UserParam::ADDUCT_GROUP))
{
group = ft.getMetaValue(Constants::UserParam::ADDUCT_GROUP);
}
ft.getSubordinates().clear();
ft.getConvexHulls().clear();
ft.clearMetaInfo();
if (!adduct.empty())
{
ft.setMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS, adduct);
}
if (!group.empty())
{
ft.setMetaValue("Group", group);
}
}
maps[i] = tmp;
maps[i].updateRanges();
setProgress(progress++);
}
endProgress();
// exception for "labeled" algorithms: copy file descriptions
if (labeled)
{
out_map.getColumnHeaders()[1] = out_map.getColumnHeaders()[0];
out_map.getColumnHeaders()[0].label = "light";
out_map.getColumnHeaders()[1].label = "heavy";
ms_run_locations.push_back(ms_run_locations[0]);
}
////////////////////////////////////////////////////
// invoke feature grouping algorithm
if (frac2files.size() == 1) // group one fraction
{
algorithm->group(maps, out_map);
}
else // group multiple fractions
{
writeDebug_(String("Stored in ") + String(maps.size()) + " maps.", 3);
for (Size i = 1; i <= frac2files.size(); ++i)
{
vector<FeatureMap> fraction_maps;
// TODO FRACTIONS: here we assume that the order of featureXML is from fraction 1..n
// we should check if these are shuffled and error / warn
for (size_t feature_map_index = 0; feature_map_index != frac2files[i].size(); ++feature_map_index)
{
fraction_maps.push_back(maps[feature_map_index]);
}
algorithm->group(fraction_maps, out_map);
}
}
}
else
{
//TODO isn't it better to have this option/functionality in the FeatureGroupingAlgorithm class?
// Otherwise everyone has to remember e.g. to annotate the old map_index etc.
bool keep_subelements = getFlag_("keep_subelements");
vector<ConsensusMap> maps(ins.size());
FileHandler f;
for (Size i = 0; i < ins.size(); ++i)
{
f.loadConsensusFeatures(ins[i], maps[i], {FileTypes::CONSENSUSXML});
maps[i].updateRanges();
// copy over information on the primary MS run
StringList ms_runs;
maps[i].getPrimaryMSRunPath(ms_runs);
ms_run_locations.insert(ms_run_locations.end(), ms_runs.begin(), ms_runs.end());
if (keep_subelements)
{
auto saveOldMapIndex =
[](PeptideIdentification &p)
{
if (p.metaValueExists("map_index"))
{
p.setMetaValue("old_map_index", p.getMetaValue("map_index"));
}
else
{
OPENMS_LOG_WARN << "Warning: map_index not found in PeptideID. The tool will not be able to assign a"
"consistent one. Check the settings of previous tools." << std::endl;
}
};
maps[i].applyFunctionOnPeptideIDs(saveOldMapIndex, true);
}
}
// group
algorithm->group(maps, out_map);
// set file descriptions:
if (!keep_subelements)
{
for (Size i = 0; i < ins.size(); ++i)
{
out_map.getColumnHeaders()[i].filename = ins[i];
out_map.getColumnHeaders()[i].size = maps[i].size();
out_map.getColumnHeaders()[i].unique_id = maps[i].getUniqueId();
}
}
else
{
// components of the output map are not the input maps themselves, but
// the components of the input maps:
algorithm->transferSubelements(maps, out_map);
}
}
// assign unique ids
out_map.applyMemberFunction(&UniqueIdInterface::setUniqueId);
// annotate output with data processing info
addDataProcessing_(out_map,
getProcessingInfo_(DataProcessing::FEATURE_GROUPING));
// sort list of peptide identifications in each consensus feature by map index
out_map.sortPeptideIdentificationsByMapIndex();
// write output
FileHandler().storeConsensusFeatures(out, out_map, {FileTypes::CONSENSUSXML});
// some statistics
map<Size, UInt> num_consfeat_of_size;
for (const ConsensusFeature& cf : out_map)
{
++num_consfeat_of_size[cf.size()];
}
OPENMS_LOG_INFO << "Number of consensus features:" << endl;
for (map<Size, UInt>::reverse_iterator i = num_consfeat_of_size.rbegin();
i != num_consfeat_of_size.rend(); ++i)
{
OPENMS_LOG_INFO << " of size " << setw(2) << i->first << ": " << setw(6)
<< i->second << endl;
}
OPENMS_LOG_INFO << " total: " << setw(6) << out_map.size() << endl;
return EXECUTION_OK;
}
};
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/FeatureFinderLFQ.cpp | .cpp | 8,489 | 214 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Mark Ivanov, Timo Sachsenberg $
// --------------------------------------------------------------------------
/**
* @page TOPP_FeatureFinderLFQ FeatureFinderLFQ
*
* @brief Feature detection for LC-MS1 data with ion mobility support (EXPERIMENTAL)
*
* <B>Note: This tool is experimental and under active development. The interface and behavior may change.</B>
*
* This TOPP tool is a C++ reimplementation of the Biosaur2 feature detection algorithm.
* It detects peptide features in centroided LC-MS1 data (with optional profile mode support) through:
* 1. Grouping peaks across scans into "hills" (continuous m/z traces)
* 2. Splitting hills at valley points to separate co-eluting species
* 3. Detecting isotope patterns based on expected mass differences and intensity correlations
* 4. Calculating comprehensive feature properties (m/z, RT, intensity, charge state)
*
* <B>Key Features:</B>
* - FAIMS compensation voltage grouping for FAIMS-enabled instruments
* - Ion mobility-aware processing for PASEF/TIMS data (2D centroiding in m/z and ion mobility space)
* - TOF-specific intensity filtering for time-of-flight instruments
* - Automatic mass calibration to improve detection accuracy
* - Profile mode support via PeakPickerHiRes centroiding
* - Export to featureXML and Biosaur2-compatible TSV formats
*
* The tool closely mirrors the Python reference implementation to ensure reproducible results
* and exposes all core parameters through the INI file for fine-tuning. Besides the mandatory
* featureXML output, optional TSV exports for both the peptide features and raw hills can be
* enabled for quality control and downstream analysis.
*
* <B>Reference:</B>
* Abdrakhimov, et al. Biosaur: An open-source Python software for liquid chromatography-mass
* spectrometry peptide feature detection with ion mobility support.
* Rapid Communications in Mass Spectrometry, 2022. https://doi.org/10.1002/rcm.9045
*
* <B>The command line parameters of this tool are:</B>
* @verbinclude TOPP_FeatureFinderLFQ.cli
* <B>INI file documentation of this tool:</B>
* @htmlinclude TOPP_FeatureFinderLFQ.html
*/
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/FEATUREFINDER/Biosaur2Algorithm.h>
#include <OpenMS/FORMAT/FeatureXMLFile.h>
#include <OpenMS/FORMAT/MzMLFile.h>
#include <OpenMS/KERNEL/FeatureMap.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/SYSTEM/StopWatch.h>
#include <OpenMS/SYSTEM/File.h>
#include <vector>
using namespace OpenMS;
using namespace std;
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
/**
* @brief TOPP wrapper that exposes the Biosaur2 feature detection pipeline on the command line.
*
* The tool handles mzML I/O, forwards the full set of @ref OpenMS::Biosaur2Algorithm parameters and optionally writes TSV exports for
* features and hills. All algorithmic heavy lifting is performed by the reusable Biosaur2Algorithm class so the TOPP wrapper remains
* focused on parameter handling and reporting.
*/
class TOPPFeatureFinderLFQ final :
public TOPPBase
{
public:
TOPPFeatureFinderLFQ() :
TOPPBase("FeatureFinderLFQ", "Feature detection for LC-MS1 data (EXPERIMENTAL)", false)
{
}
protected:
/// Declare command-line options and forward the algorithm defaults into the INI namespace.
void registerOptionsAndFlags_() override
{
const Param& defaults = algorithm_.getDefaults();
registerInputFile_("in", "<file>", "", "Input mzML file (centroided data)");
setValidFormats_("in", ListUtils::create<String>("mzML"));
registerOutputFile_("out", "<file>", "", "Output featureXML file");
setValidFormats_("out", ListUtils::create<String>("featureXML"));
registerOutputFile_("out_tsv", "<file>", "", "Optional: output TSV file (Biosaur2 format)", false);
setValidFormats_("out_tsv", ListUtils::create<String>("tsv"));
registerOutputFile_("out_hills", "<file>", "", "Optional: write detected hills to TSV", false);
setValidFormats_("out_hills", ListUtils::create<String>("tsv"));
registerFlag_("write_hills", "Force writing of hills file even if no output path was provided", false);
registerFullParam_(defaults);
}
/**
* @brief Execute the Biosaur2 workflow for the configured inputs/outputs.
*
* The function loads the requested mzML file, instantiates Biosaur2Algorithm with the user-defined parameters and takes care of
* exporting featureXML, TSV and hill diagnostics. Meta information such as primary MS run paths and DataProcessing entries are annotated.
*/
ExitCodes main_(int, const char**) override
{
//-------------------------------------------------------------
// parameter handling
//-------------------------------------------------------------
String in = getStringOption_("in");
String out = getStringOption_("out");
String out_tsv = getStringOption_("out_tsv");
String out_hills = getStringOption_("out_hills");
bool write_hills_flag = getFlag_("write_hills");
Param algo_param = getParam_().copySubset(algorithm_.getDefaults());
algorithm_.setParameters(algo_param);
ProgressLogger progresslogger;
progresslogger.setLogType(log_type_);
StopWatch stopwatch;
progresslogger.startProgress(0, 1, "Loading input mzML");
stopwatch.start();
MzMLFile mzml_file;
PeakFileOptions options;
options.clearMSLevels();
options.addMSLevel(1); // only load MS1 level for feature finding
mzml_file.setOptions(options);
mzml_file.load(in, algorithm_.getMSData());
progresslogger.setProgress(1);
progresslogger.endProgress();
stopwatch.stop();
OPENMS_LOG_INFO << "Loaded input file in " << stopwatch.toString() << endl;
FeatureMap feature_map;
vector<Biosaur2Algorithm::Hill> hills;
vector<Biosaur2Algorithm::PeptideFeature> peptide_features;
stopwatch.reset();
progresslogger.startProgress(0, 1, "Preprocessing and feature finding");
stopwatch.start();
algorithm_.run(feature_map, hills, peptide_features);
progresslogger.setProgress(1);
progresslogger.endProgress();
stopwatch.stop();
String primary_path = getFlag_("test") ? ("file://" + File::basename(in)) : in;
feature_map.setPrimaryMSRunPath({primary_path}, algorithm_.getMSData());
addDataProcessing_(feature_map, getProcessingInfo_(DataProcessing::QUANTITATION));
OPENMS_LOG_INFO << "Preprocessing and feature finding took " << stopwatch.toString() << endl;
stopwatch.reset();
progresslogger.startProgress(0, 1, "Writing featureXML output");
stopwatch.start();
FeatureXMLFile feature_file;
feature_file.store(out, feature_map);
progresslogger.setProgress(1);
progresslogger.endProgress();
stopwatch.stop();
OPENMS_LOG_INFO << "Wrote " << peptide_features.size() << " features to: " << out << endl;
OPENMS_LOG_INFO << "Writing featureXML took " << stopwatch.toString() << endl;
if (write_hills_flag || !out_hills.empty())
{
stopwatch.reset();
String hills_file = out_hills;
if (hills_file.empty())
{
String base = out;
Size dot_pos = base.find_last_of('.');
if (dot_pos != String::npos)
{
base = base.substr(0, dot_pos);
}
hills_file = base + ".hills.tsv";
}
progresslogger.startProgress(0, 1, "Writing hills TSV");
stopwatch.start();
algorithm_.writeHills(hills, hills_file);
progresslogger.setProgress(1);
progresslogger.endProgress();
stopwatch.stop();
OPENMS_LOG_INFO << "Writing hills TSV took " << stopwatch.toString() << endl;
}
if (!out_tsv.empty())
{
stopwatch.reset();
progresslogger.startProgress(0, 1, "Writing feature TSV");
stopwatch.start();
algorithm_.writeTSV(peptide_features, out_tsv);
progresslogger.setProgress(1);
progresslogger.endProgress();
stopwatch.stop();
OPENMS_LOG_INFO << "Writing feature TSV took " << stopwatch.toString() << endl;
}
return EXECUTION_OK;
}
private:
Biosaur2Algorithm algorithm_;
};
int main(int argc, const char** argv)
{
TOPPFeatureFinderLFQ tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/DatabaseSuitability.cpp | .cpp | 14,237 | 297 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Tom Waschischeck $
// $Authors: Tom Waschischeck $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/ANALYSIS/ID/FalseDiscoveryRate.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/PROCESSING/ID/IDFilter.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/QC/DBSuitability.h>
#include <OpenMS/QC/Ms2IdentificationRate.h>
#include <OpenMS/QC/PSMExplainedIonCurrent.h>
#include <algorithm>
#include <cmath>
#include <cstdio>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
// Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_DatabaseSuitability DatabaseSuitability
@brief Calculates the suitability of a database which was used a for peptide identification search. Also reports the quality of LC-MS spectra.
*/
#ifdef OPENMS_HASDOXYGENDOT
/*
@dot
digraph sample_workflow {
node [ style="solid,filled", color=black, fillcolor=grey90, width=1.5, fixedsize=true, shape=square, fontname=Helvetica, fontsize=10 ];
edge [ style="solid" ];
rankdir="LR";
splines=ortho;
mzml [ label="mzML file(s)" shape=oval fillcolor=white group=1];
db [ label="database in question" shape=oval fillcolor=white ];
novor [ label="NovorAdapter" URL="\ref OpenMS::NovorAdapter" group=2];
id_filter [ label="IDFilter" URL="\ref OpenMS::IDFilter" group=2];
id_convert [ label="IDFileConverter" URL="\ref OpenMS::IDFileConverter" group=2];
decoy_db [ label="DecoyDatabase" URL="\ref OpenMS::DecoyDatabase" group=2];
comet [ label="CometAdapter" URL="\ref OpenMS::CometAdapter" group=1];
pep_ind [ label="PeptideIndexer" URL="\ref OpenMS::PeptideIndexer" group=1];
db_suit [ label="DatabaseSuitability" fillcolor="#6F42C1" fontcolor=white group=3];
tsv [ label="optional\ntsv output" shape=oval fillcolor=white group=3];
{rank = same; db_suit; decoy_db;}
mzml -> novor;
mzml -> comet;
comet -> pep_ind;
pep_ind -> db_suit [ xlabel="in_id" fontsize=10 ];
novor -> id_filter;
id_filter -> id_convert;
id_convert -> db_suit [ xlabel="novo_database" fontsize=10 ];
id_convert -> decoy_db;
decoy_db -> db [ dir=back ];
db_suit -> db [ dir=back xlabel="database" fontsize=10 ];
decoy_db -> comet;
mzml -> db_suit [ xlabel="in_spec" fontsize=10 ];
novor -> db_suit [ xlabel="in_novo" fontsize=10 ];
db_suit -> tsv;
}
@enddot
*/
#endif
/**
The metric this tool uses to determine the suitability of a database is based on a de novo model. Therefore it is crucial that your workflow is set up the right way. Above you can see an example.@n
Most importantly the peptide identification search needs to be done with a combination of the database in question and a de novo "database".@n
To generate the de novo "database":
- @ref TOPP_NovorAdapter calculates de novo sequences.
- @ref TOPP_IDFilter can filter out unwanted ones.
- @ref TOPP_IDFileConverter generates the de novo fasta file.
For re-ranking all cases where a peptide hit only found in the de novo "database" scores above a peptide hit found in the actual database are checked. In all these cases the cross-correlation scores of those peptide hits are compared. If they are similar enough, the database hit will be re-ranked to be on top of the de novo hit. You can control how much of cases with similar scores will be re-ranked by using the @p reranking_cutoff_percentile.@n
For this to work it is important @ref TOPP_PeptideIndexer ran before. However it is also crucial that no FDR was performed. This tool does this itself and will crash if a q-value is found. You can still control the FDR that you want to establish using the corresponding flag.
@note For identification search the recommended search engine is Comet because the Comet cross-correlation score is recommended for re-ranking.@n
If you use other search engines re-ranking will be turned off automatically. You can still enforce re-ranking by using the 'force' flag.@n
In this case the tool will use the default score of your search engine. This can result in undefined behaviour. Be warned.@n
The results are written directly into the console. But you can provide an optional tsv output file where the most important results will be exported to.
This tool uses the metrics and algorithms first presented in:@n
<em>Assessing protein sequence database suitability using de novo sequencing. Molecular & Cellular Proteomics. January 1, 2020; 19, 1: 198-208. doi:10.1074/mcp.TIR119.001752.@n
Richard S. Johnson, Brian C. Searle, Brook L. Nunn, Jason M. Gilmore, Molly Phillips, Chris T. Amemiya, Michelle Heck, Michael J. MacCoss.</em>
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_DatabaseSuitability.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_DatabaseSuitability.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
Citation c = {"Richard S. Johnson, Brian C. Searle, Brook L. Nunn, Jason M. Gilmore, Molly Phillips, Chris T. Amemiya, Michelle Heck, Michael J. MacCoss",
"Assessing protein sequence database suitability using de novo sequencing",
"Molecular & Cellular Proteomics. January 1, 2020; 19, 1: 198-208",
"10.1074/mcp.TIR119.001752"};
class DatabaseSuitability :
public TOPPBase
{
public:
DatabaseSuitability() :
TOPPBase("DatabaseSuitability", "Computes a suitability score for a database which was used for a peptide identification search. Also reports the quality of LC-MS spectra.", true, {c})
{
}
protected:
// this function will be used to register the tool parameters
// it gets automatically called on tool execution
Param getSubsectionDefaults_(const String& /*section*/) const override
{
return DBSuitability().getDefaults();
}
void registerOptionsAndFlags_() override
{
registerInputFile_("in_id", "<file>", "", "Input idXML file from a peptide identification search with a combined database. PeptideIndexer is needed, FDR is forbidden.");
setValidFormats_("in_id", { "idXML" });
registerInputFile_("in_spec", "<file>", "", "Input MzML file used for the peptide identification");
setValidFormats_("in_spec", {"mzML"});
registerInputFile_("in_novo", "<file>", "", "Input idXML file containing de novo peptides (unfiltered)");
setValidFormats_("in_novo", { "idXML" });
registerInputFile_("database", "<file>", "", "Input FASTA file of the database in question");
setValidFormats_("database", { "FASTA" });
registerInputFile_("novo_database", "<file>", "", "Input deNovo sequences derived from MzML given in 'in_spec' concatenated to one FASTA entry");
setValidFormats_("novo_database", { "FASTA" });
registerOutputFile_("out", "<file>", "", "Optional tsv output containing database suitability information as well as spectral quality.", false);
setValidFormats_("out", {"tsv"});
registerDoubleOption_("novo_threshold", "double", 60, "Minimum score a de novo sequence has to have to be defined as 'correct'. The default of 60 is proven to be a good estimate for sequences generated by Novor.", false, true);
setMinFloat_("novo_threshold", 0);
registerSubsection_("algorithm", "Parameter section for the suitability calculation algorithm");
}
// the main_ function is called after all parameters are read
ExitCodes main_(int, const char**) override
{
//-------------------------------------------------------------
// parsing parameters
//-------------------------------------------------------------
String in_id = getStringOption_("in_id");
String in_spec = getStringOption_("in_spec");
String in_novo = getStringOption_("in_novo");
String db = getStringOption_("database");
String novo_db = getStringOption_("novo_database");
String out = getStringOption_("out");
double novo_threshold = getDoubleOption_("novo_threshold");
//-------------------------------------------------------------
// reading input
//-------------------------------------------------------------
FileHandler m;
PeakFileOptions op;
op.setMSLevels({2});// only ms2
m.setOptions(op);
PeakMap exp;
m.loadExperiment(in_spec, exp, {FileTypes::MZML});
FileHandler x;
vector<ProteinIdentification> prot_ids;
PeptideIdentificationList pep_ids;
x.loadIdentifications(in_id, prot_ids, pep_ids, {FileTypes::IDXML});
if (prot_ids.empty())
{
OPENMS_LOG_ERROR << "No ProteinIdentifications found in idXML given in 'in_id'. Aborting!" << endl;
return ILLEGAL_PARAMETERS;
}
vector<ProteinIdentification> novo_prots;
PeptideIdentificationList novo_peps;
x.loadIdentifications(in_novo, novo_prots, novo_peps, {FileTypes::IDXML});
FASTAFile f;
vector<FASTAFile::FASTAEntry> database;
f.load(db, database);
vector<FASTAFile::FASTAEntry> novo_database;
f.load(novo_db, novo_database);
//-------------------------------------------------------------
// calculations
//-------------------------------------------------------------
Size total_number_novo_seqs = novo_peps.size();
IDFilter::filterHitsByScore(novo_peps, novo_threshold);
set<AASequence> unique_novo;
for (const auto& pep_id : novo_peps)
{
if (pep_id.getScoreType() != "novorscore")
{
OPENMS_LOG_ERROR << in_novo << " contains at least one identification without a novorscore! Make sure this file contains only deNovo sequences." << endl;
return INPUT_FILE_CORRUPT;
}
if (pep_id.getHits().empty())
{
continue;
}
unique_novo.insert(pep_id.getHits()[0].getSequence());
}
Ms2IdentificationRate q;
q.compute(novo_peps, exp, true);
Ms2IdentificationRate::IdentificationRateData spectral_quality = q.getResults()[0];
QCBase::SpectraMap mapping;
mapping.calculateMap(exp);
PeptideIdentificationList copy_ids(pep_ids); //unattractive solution for now
FalseDiscoveryRate fdr;
fdr.apply(copy_ids);
PSMExplainedIonCurrent eic;
eic.compute(copy_ids, prot_ids[0].getSearchParameters(), exp, mapping);
PSMExplainedIonCurrent::Statistics eic_result = eic.getResults()[0];
DBSuitability s;
Param p = getParam_().copy("algorithm:", true);
s.setParameters(p);
s.compute(std::move(pep_ids), exp, database, novo_database, prot_ids[0].getSearchParameters());
DBSuitability::SuitabilityData suit = s.getResults()[0];
//-------------------------------------------------------------
// writing output
//-------------------------------------------------------------
OPENMS_LOG_INFO << suit.num_top_db << " / " << (suit.num_top_db + suit.num_top_novo) << " top hits were found in the database." << endl;
OPENMS_LOG_INFO << suit.num_top_novo << " / " << (suit.num_top_db + suit.num_top_novo) << " top hits were only found in the concatenated de novo peptide." << endl;
OPENMS_LOG_INFO << suit.getCorrectedNovoHits() << " top deNovo hits after correction." << endl;
OPENMS_LOG_INFO << suit.num_interest << " times scored a de novo hit above a database hit. Of those times " << suit.num_re_ranked << " top de novo hits where re-ranked using a decoy cut-off of: " << suit.cut_off << "." << endl;
OPENMS_LOG_INFO << "database suitability [0, 1]: " << suit.suitability << endl;
OPENMS_LOG_INFO << "database suitability after correction: " << suit.getCorrectedSuitability() << endl << endl;
OPENMS_LOG_INFO << spectral_quality.num_peptide_identification << " / " << total_number_novo_seqs << " de novo sequences are high scoring. Of those " << unique_novo.size() << " are unique." << endl;
OPENMS_LOG_INFO << spectral_quality.num_ms2_spectra << " ms2 spectra found" << endl;
OPENMS_LOG_INFO << "spectral quality (id rate of high scoring de novo sequences) [0, 1]: " << spectral_quality.identification_rate << endl << endl;
OPENMS_LOG_INFO << "avg. explained ion current [0, 1]: " << eic_result.average_correctness << " - variance: " << eic_result.variance_correctness << endl << endl;
if (!out.empty())
{
OPENMS_LOG_INFO << "Writing output to: " << out << endl
<< endl;
std::ofstream os(out);
if (!os.is_open())
{
OPENMS_LOG_ERROR << "Output file given in 'out' isn't writable." << endl;
return CANNOT_WRITE_OUTPUT_FILE;
}
os.precision(writtenDigits(double()));
os << "key\tvalue\n";
os << "#top_db_hits\t" << suit.num_top_db << "\n";
os << "#top_novo_hits\t" << suit.num_top_novo << "\n";
os << "decoy_cut_off\t" << suit.cut_off << "\n";
os << "correction_factor\t" << suit.getCorrectionFactor() << "\n";
os << "#corrected_novo_hits\t" << suit.getCorrectedNovoHits() << "\n";
os << "db_suitability\t" << suit.suitability << "\n";
os << "corrected_suitability\t" << suit.getCorrectedSuitability() << "\n";
os << "no_rerank_suitability\t" << suit.suitability_no_rerank << "\n";
os << "corrected_no_rerank_suitability\t" << suit.suitability_corr_no_rerank << "\n";
os << "#total_novo_seqs\t" << total_number_novo_seqs << "\n";
os << "#high_scoring_novo_seqs\t" << spectral_quality.num_peptide_identification << "\n";
os << "#unique_high_scoring_novo_seqs\t" << unique_novo.size() << "\n";
os << "#ms2_spectra\t" << spectral_quality.num_ms2_spectra << "\n";
os << "spectral_quality\t" << spectral_quality.identification_rate << "\n";
os << "avg_EIC\t" << eic_result.average_correctness << "\n";
os << "EIC_variance\t" << eic_result.variance_correctness << "\n";
os.close();
}
return EXECUTION_OK;
}
};
// the actual main function needed to create an executable
int main(int argc, const char** argv)
{
DatabaseSuitability tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/CVInspector.cpp | .cpp | 17,366 | 425 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Marc Sturm $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/DATASTRUCTURES/CVMappings.h>
#include <OpenMS/DATASTRUCTURES/CVMappingTerm.h>
#include <OpenMS/DATASTRUCTURES/StringListUtils.h>
#include <OpenMS/FORMAT/CVMappingFile.h>
#include <OpenMS/FORMAT/ControlledVocabulary.h>
#include <OpenMS/FORMAT/TextFile.h>
#include <map>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_CVInspector CVInspector
@brief A tool for visualization and validation of PSI mapping and CV files.
This tool is used to validate the correct use of mapping files and CV files.
It can also generate a HTML representation of mapping file and CV.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_CVInspector.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_CVInspector.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPCVInspector :
public TOPPBase
{
public:
TOPPCVInspector() :
TOPPBase("CVInspector", "Visualize and validate PSI mapping and CV files.")
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFileList_("cv_files", "<files>", StringList(), "List of ontology files in OBO format.");
setValidFormats_("cv_files", ListUtils::create<String>("obo"));
registerStringList_("cv_names", "<names>", StringList(), "List of identifiers (one for each ontology file).");
registerInputFile_("mapping_file", "<file>", "", "Mapping file in CVMapping (XML) format.");
setValidFormats_("mapping_file", ListUtils::create<String>("XML"));
registerStringList_("ignore_cv", "<list>", ListUtils::create<String>("UO,PATO,BTO"), "A list of CV identifiers which should be ignored.", false);
registerOutputFile_("html", "<file>", "", "Writes an HTML version of the mapping file with annotated CV terms", false);
setValidFormats_("html", ListUtils::create<String>("HTML"));
}
void writeTermTree_(const String& accession, const ControlledVocabulary& cv, TextFile& file, UInt indent)
{
const ControlledVocabulary::CVTerm& term = cv.getTerm(accession);
for (set<String>::const_iterator it = term.children.begin(); it != term.children.end(); ++it)
{
const ControlledVocabulary::CVTerm& child_term = cv.getTerm(*it);
String subterm_line;
for (Size i = 0; i < 4 * indent; ++i) subterm_line += " ";
String description = child_term.description;
if (!child_term.synonyms.empty())
{
description += String(" -- Synonyms: '") + ListUtils::concatenate(child_term.synonyms, ", ") + "'";
}
subterm_line += "- <span title=\"" + description + "\">" + child_term.id + " ! " + child_term.name + "</span>";
StringList tags;
if (child_term.obsolete)
{
tags.push_back("<font color=darkred>obsolete</font>");
}
if (child_term.xref_type != ControlledVocabulary::CVTerm::XRefType::NONE)
{
tags.push_back("value-type=" + ControlledVocabulary::CVTerm::getXRefTypeName(child_term.xref_type));
}
if (!child_term.units.empty())
{
StringList units;
for (set<String>::const_iterator u_it = child_term.units.begin(); u_it != child_term.units.end(); ++u_it)
{
units.push_back(*u_it + "!" + cv.getTerm(*u_it).name);
}
tags.push_back(String("units=") + ListUtils::concatenate(units, ","));
}
if (!child_term.xref_binary.empty())
{
StringList types;
for (StringList::const_iterator u_it = child_term.xref_binary.begin(); u_it != child_term.xref_binary.end(); ++u_it)
{
types.push_back(*u_it + "!" + cv.getTerm(*u_it).name);
}
tags.push_back(String("binary-array-types=") + ListUtils::concatenate(types, ","));
}
if (!tags.empty())
{
subterm_line += String("<FONT color=\"grey\"> (") + ListUtils::concatenate(tags, ", ") + ")</FONT>";
}
file.addLine(subterm_line + "<BR>");
writeTermTree_(child_term.id, cv, file, indent + 1);
}
}
ExitCodes main_(int, const char**) override
{
StringList cv_files = getStringList_("cv_files");
StringList cv_names = getStringList_("cv_names");
if (cv_files.size() != cv_names.size())
{
cerr << "Error: You have to specify an identifier for each CV file. Aborting!" << endl;
return ILLEGAL_PARAMETERS;
}
// load cv terms
ControlledVocabulary cv;
for (Size i = 0; i < cv_files.size(); ++i)
{
cv.loadFromOBO(cv_names[i], cv_files[i]);
}
auto terms = cv.getTerms();
// load mappings from mapping file
String mapping_file = getStringOption_("mapping_file");
CVMappings mappings;
CVMappingFile().load(mapping_file, mappings);
//store HTML version of mapping and CV
if (!getStringOption_("html").empty())
{
TextFile file;
file.addLine("<HTML>");
file.addLine(" <HEAD>");
file.addLine(" <TITLE>CV mapping file</TITLE>");
file.addLine(" <SCRIPT language=javascript type='text/javascript'>");
file.addLine(" function toggleDiv(layer_ref,force_state) ");
file.addLine(" {");
file.addLine(" if (document.getElementById(layer_ref).style.display=='none' || force_state=='true')");
file.addLine(" {");
file.addLine(" document.getElementById(layer_ref).style.display = 'block';");
file.addLine(" }");
file.addLine(" else if (document.getElementById(layer_ref).style.display=='block' || force_state=='false')");
file.addLine(" {");
file.addLine(" document.getElementById(layer_ref).style.display = 'none';");
file.addLine(" }");
file.addLine(" }");
file.addLine(" </SCRIPT>");
file.addLine(" </HEAD>");
file.addLine(" <BODY>");
//count the number of terms and add button to expend/collaps all terms
Int term_count = 0;
for (vector<CVMappingRule>::const_iterator it = mappings.getMappingRules().begin(); it != mappings.getMappingRules().end(); ++it)
{
for (vector<CVMappingTerm>::const_iterator tit = it->getCVTerms().begin(); tit != it->getCVTerms().end(); ++tit)
{
++term_count;
}
}
String expand_all = " <a href=\"javascript:toggleDiv('div0','true')";
String collapse_all = " <a href=\"javascript:toggleDiv('div0','false')";
for (Int i = 1; i < term_count; ++i)
{
expand_all += String(";toggleDiv('div") + i + "','true')";
collapse_all += String(";toggleDiv('div") + i + "','false')";
}
file.addLine(expand_all + "\">Expand all</a><BR>");
file.addLine(collapse_all + "\">Collapse all</a>");
file.addLine(" <TABLE width=100% border=0>");
term_count = -1;
for (vector<CVMappingRule>::const_iterator it = mappings.getMappingRules().begin(); it != mappings.getMappingRules().end(); ++it)
{
//create rule line
file.addLine(" <TR><TD colspan=\"2\"><HR></TD></TR>");
file.addLine(String(" <TR><TD>Identifier:</TD><TD><B>") + it->getIdentifier() + "</B></TD></TR>");
file.addLine(String(" <TR><TD>Element:</TD><TD><B>") + it->getElementPath() + "</B></TD></TR>");
if (it->getRequirementLevel() == CVMappingRule::MUST)
{
file.addLine(" <TR><TD>Requirement level:</TD><TD><FONT color=\"red\">MUST</FONT></TD></TR>");
}
else if (it->getRequirementLevel() == CVMappingRule::SHOULD)
{
file.addLine(" <TR><TD>Requirement level:</TD><TD><FONT color=\"orange\">SHOULD</FONT></TD></TR>");
}
else if (it->getRequirementLevel() == CVMappingRule::MAY)
{
file.addLine(" <TR><TD>Requirement level:</TD><TD><FONT color=\"green\">MAY</FONT></TD></TR>");
}
if (it->getCombinationsLogic() == CVMappingRule::AND)
{
file.addLine(" <TR><TD>Combination logic:</TD><TD><FONT color=\"red\">AND</FONT></TD></TR>");
}
else if (it->getCombinationsLogic() == CVMappingRule::XOR)
{
file.addLine(" <TR><TD>Combination logic:</TD><TD><FONT color=\"orange\">XOR</FONT></TD></TR>");
}
else if (it->getCombinationsLogic() == CVMappingRule::OR)
{
file.addLine(" <TR><TD>Combination logic:</TD><TD><FONT color=\"green\">OR</FONT></TD></TR>");
}
//create table with terms
for (vector<CVMappingTerm>::const_iterator tit = it->getCVTerms().begin(); tit != it->getCVTerms().end(); ++tit)
{
//create term line
String term_line = String(" <TR><TD valign=\"top\">Term:</TD><TD>");
if (tit->getAllowChildren())
{
++term_count;
term_line += String("<a href=\"javascript:toggleDiv('div") + term_count + "','')\" style=\"text-decoration:none\" >+</a> ";
}
else
{
term_line += String(" ");
}
//add Term accession, name and description (as popup)
if (cv.exists(tit->getAccession()))
{
const ControlledVocabulary::CVTerm& child_term = cv.getTerm(tit->getAccession());
String description = child_term.description;
if (!child_term.synonyms.empty())
{
description += String(" -- Synonyms: '") + ListUtils::concatenate(child_term.synonyms, ", ") + "'";
}
term_line += "<span title=\"" + description + "\">";
}
term_line += tit->getAccession() + " ! " + tit->getTermName();
if (cv.exists(tit->getAccession()))
{
term_line += "</span>";
//check if term accession and term name correspond to the CV
const ControlledVocabulary::CVTerm& main_term = cv.getTerm(tit->getAccession());
if (main_term.name != tit->getTermName())
{
cerr << "Warning: Accession '" << tit->getAccession() << "' and name '" << tit->getTermName() << "' do not match. Name should be '" << main_term.name << "'." << endl;
}
}
//tags
StringList tags;
if (!tit->getUseTerm())
{
tags.push_back("children only");
}
if (tit->getIsRepeatable())
{
tags.push_back("repeatable");
}
if (cv.exists(tit->getAccession()))
{
const ControlledVocabulary::CVTerm& term = cv.getTerm(tit->getAccession());
if (term.obsolete)
{
tags.push_back("<font color=darkred>obsolete</font>");
}
if (term.xref_type != ControlledVocabulary::CVTerm::XRefType::NONE)
{
tags.push_back("value-type=" + ControlledVocabulary::CVTerm::getXRefTypeName(term.xref_type));
}
if (!term.units.empty())
{
StringList units;
for (set<String>::const_iterator u_it = term.units.begin(); u_it != term.units.end(); ++u_it)
{
units.push_back(*u_it + "!" + cv.getTerm(*u_it).name);
}
tags.push_back(String("units=") + ListUtils::concatenate(units, ","));
}
if (!term.xref_binary.empty())
{
StringList types;
for (StringList::const_iterator u_it = term.xref_binary.begin(); u_it != term.xref_binary.end(); ++u_it)
{
types.push_back(*u_it + "!" + cv.getTerm(*u_it).name);
}
tags.push_back(String("binary-array-types=") + ListUtils::concatenate(types, ","));
}
}
if (!tags.empty())
{
term_line += String("<FONT color=\"grey\"> (") + ListUtils::concatenate(tags, ", ") + ")</FONT>";
}
file.addLine(term_line);
// check whether we need the whole tree, or just the term itself
if (tit->getAllowChildren())
{
file.addLine(String(" <div id=\"div") + term_count + R"(" style="display: none">)");
if (cv.exists(tit->getAccession()))
{
writeTermTree_(tit->getAccession(), cv, file, 1);
//BEGIN - THIS IS NEEDED FOR WRITING PARSERS ONLY
/*
set<String> allowed_terms;
cv.getAllChildTerms(allowed_terms, tit->getAccession());
for (set<String>::const_iterator atit=allowed_terms.begin(); atit!=allowed_terms.end(); ++atit)
{
const ControlledVocabulary::CVTerm& child_term = cv.getTerm(*atit);
String parser_string = String("os << \"<cvParam cvRef=\\\"MS\\\" accession=\\\"") + child_term.id + "\\\" name=\\\"" + child_term.name + "\\\"";
for (Size i=0; i<child_term.unparsed.size(); ++i)
{
//TODO this does not work anymore. The type is now stored as a member
if (child_term.unparsed[i].hasSubstring("value-type:xsd\\:int") || child_term.unparsed[i].hasSubstring("value-type:xsd\\:float") || child_term.unparsed[i].hasSubstring("value-type:xsd\\:string"))
{
parser_string += " value=\\\"\" << << \"\\\"";
}
}
parser_string += "/>\\n\";<BR>";
file.push_back(parser_string);
}*/
}
else
{
file.addLine(" - Missing terms, CV not loaded...");
cerr << "Warning: no child terms for " << tit->getAccession() << " found!" << endl;
}
file.addLine(" </div>");
file.addLine(" </TD></TD></TR>");
}
}
}
file.addLine(" </TABLE>");
file.addLine(" </BODY>");
file.addLine("</HTML>");
file.store(getStringOption_("html"));
return EXECUTION_OK;
}
// iterator over all mapping rules and store the mentioned terms
StringList ignore_namespaces = getStringList_("ignore_cv");
set<String> ignore_cv_list;
for (StringList::const_iterator it = ignore_namespaces.begin(); it != ignore_namespaces.end(); ++it)
{
ignore_cv_list.insert(*it);
}
set<String> used_terms;
for (vector<CVMappingRule>::const_iterator it = mappings.getMappingRules().begin(); it != mappings.getMappingRules().end(); ++it)
{
set<String> allowed_terms;
// iterate over all allowed terms
for (vector<CVMappingTerm>::const_iterator tit = it->getCVTerms().begin(); tit != it->getCVTerms().end(); ++tit)
{
// check whether the term itself it allowed, or only its children
if (tit->getUseTerm())
{
allowed_terms.insert(tit->getAccession());
}
// check whether we need the whole tree, or just the term itself
if (tit->getAllowChildren())
{
// check whether we want to ignore this term
if (!(tit->getAccession().has(':') && ignore_cv_list.find(tit->getAccession().prefix(':')) != ignore_cv_list.end()))
{
cv.getAllChildTerms(allowed_terms, tit->getAccession());
}
// also add the term itself to the used_terms, because all the children are allowed
used_terms.insert(tit->getAccession());
}
}
// print the allowed terms for the rule
cout << "MappingRule: id=" << it->getIdentifier() << ", elementPath=" << it->getElementPath() << ", #terms=" << it->getCVTerms().size() << endl;
for (set<String>::const_iterator ait = allowed_terms.begin(); ait != allowed_terms.end(); ++ait)
{
cout << *ait << " " << terms[*ait].name << endl;
}
used_terms.insert(allowed_terms.begin(), allowed_terms.end());
}
// find unused terms, which CANNOT be used in the XML due to the mapping file
set<String> unused_terms;
for (const auto& [acc, _] : terms)
{
if (used_terms.find(acc) == used_terms.end())
{
unused_terms.insert(acc);
}
}
cout << "\n\nCVTerms which are unused in the mapping file and therefore MUST NOT be used in an instance document" << endl;
for (const auto& acc : unused_terms)
{
cout << acc << " " << terms[acc].name;
// print also parent names
for (const auto& parent : terms[acc].parents)
{
cout << " " << terms[parent].id << " " << terms[parent].name;
}
cout << endl;
}
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPCVInspector tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/FLASHDeconv.cpp | .cpp | 20,879 | 444 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Kyowon Jeong, Jihyung Kim $
// $Authors: Kyowon Jeong, Jihyung Kim $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/TOPDOWN/DeconvolvedSpectrum.h>
#include <OpenMS/ANALYSIS/TOPDOWN/FLASHDeconvAlgorithm.h>
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/FORMAT/FLASHDeconvFeatureFile.h>
#include <OpenMS/FORMAT/FLASHDeconvSpectrumFile.h>
#include <OpenMS/FORMAT/FileTypes.h>
#include <OpenMS/FORMAT/MzMLFile.h>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
// Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_FLASHDeconv FLASHDeconv
@brief FLASHDeconv performs ultrafast deconvolution of top-down proteomics MS datasets.
FLASHDeconv takes an mzML file as input and outputs deconvolved feature list (.tsv) and
deconvolved spectra files (.tsv, .mzML, .msalign, .feature).
FLASHDeconv uses SpectralDeconvolution for spectral level deconvolution and MassFeatureTrace to detect mass features.
For MSn spectra, the precursor masses (not peak m/zs) are determined by tracking MSn-1 spectra deconvolution information.
See https://openms.de/FLASHDeconv for more information.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_FLASHDeconv.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_FLASHDeconv.html
*/
class TOPPFLASHDeconv : public TOPPBase
{
public:
TOPPFLASHDeconv():
TOPPBase("FLASHDeconv",
"Ultra-fast high-quality deconvolution enables online processing of top-down MS data",
true,
{Citation {"Jeong K, Kim J, Gaikwad M et al.", "FLASHDeconv: Ultrafast, High-Quality Feature Deconvolution for Top-Down Proteomics",
"Cell Syst 2020 Feb 26;10(2):213-218.e6", "10.1016/j.cels.2020.01.003"}})
{
}
protected:
// this function will be used to register the tool parameters
// it gets automatically called on tool execution
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "Input file in mzML format. ");
setValidFormats_("in", ListUtils::create<String>("mzML"));
registerOutputFile_("out", "<file>", "", "Default output tsv file containing deconvolved features");
setValidFormats_("out", ListUtils::create<String>("tsv"));
registerOutputFile_(
"out_spec1", "<file>", "",
"Output tsv file for deconvolved MS1 spectra. Use -out_spec2, ..., -out_spec4 for MS2, ..., MS4 spectra.", false);
setValidFormats_("out_spec1", ListUtils::create<String>("tsv"));
registerOutputFile_("out_spec2", "<file>", "", "Output TSV files for deconvolved MS2 spectra.", false, true);
setValidFormats_("out_spec2", ListUtils::create<String>("tsv"));
registerOutputFile_("out_spec3", "<file>", "", "Output TSV files for deconvolved MS3 spectra.", false, true);
setValidFormats_("out_spec3", ListUtils::create<String>("tsv"));
registerOutputFile_("out_spec4", "<file>", "", "Output TSV files for deconvolved MS4 spectra.", false, true);
setValidFormats_("out_spec4", ListUtils::create<String>("tsv"));
registerOutputFile_("out_mzml", "<file>", "", "Output mzML file containing deconvolved spectra (for all MS levels).", false);
setValidFormats_("out_mzml", ListUtils::create<String>("mzML"));
registerOutputFile_("out_quant", "<file>", "", "Output tsv file with isobaric quantification results for MS2 spectra.", false);
setValidFormats_("out_quant", ListUtils::create<String>("tsv"));
registerOutputFile_("out_annotated_mzml", "<file>", "",
"Output annotated mzML file with monoisotopic mass, charge, and isotope index metadata for peaks. Unannotated peaks are also retained without metadata.",
false);
setValidFormats_("out_annotated_mzml", ListUtils::create<String>("mzML"));
registerOutputFile_(
"out_msalign1", "<file>", "",
"Output msalign (TopFD and ProMex compatible) file for MS1 deconvolved spectra. Ensure filename ends with ms1.msalign for TopPIC GUI compatibility (e.g., result_ms1.msalign; refer to TopPIC input formats).",
false);
setValidFormats_("out_msalign1", ListUtils::create<String>("msalign"), false);
registerOutputFile_("out_msalign2", "<file>", "",
"Output msalign (TopFD and ProMex compatible) file for MS2 deconvolved spectra. Ensure filename ends with ms2.msalign for TopPIC GUI compatibility (e.g., result_ms2.msalign; refer to TopPIC input formats).",
false, true);
setValidFormats_("out_msalign2", ListUtils::create<String>("msalign"), false);
registerOutputFile_("out_feature1", "<file>", "",
"Output feature file (TopFD compatible) for MS1 spectra. It is needed for TopPIC feature intensity output (refer to TopPIC input formats).",
false);
setValidFormats_("out_feature1", ListUtils::create<String>("feature"), false);
registerOutputFile_("out_feature2", "<file>", "",
"Output feature file (TopFD compatible) for MS2 spectra. It is needed for TopPIC feature intensity output (refer to TopPIC input formats).",
false, true);
setValidFormats_("out_feature2", ListUtils::create<String>("feature"), false);
registerFlag_("keep_empty_out", "Retain empty output files (e.g., *.tsv files with no features).");
registerIntOption_("mzml_mass_charge", "<0:uncharged 1: +1 charged -1: -1 charged>", 0,
"Charge state of deconvolved masses in mzML output specified by -out_mzml.", false, true);
setMinInt_("mzml_mass_charge", -1);
setMaxInt_("mzml_mass_charge", 1);
registerFlag_("write_detail",
"Include detailed peak information (m/z, intensity, charge, isotope index) for each deconvolved mass in the output spectrum tsv files specified by out_spec* options.",
false);
registerDoubleOption_("min_mz", "<m/z value>", -1.0, "Specify the minimum m/z values for peaks considered during deconvolution. Negative values disable the threshold.", false, true);
registerDoubleOption_("max_mz", "<m/z value>", -1.0, "Specify the maximum m/z values for peaks considered during deconvolution. Negative values disable the threshold.", false, true);
registerDoubleOption_("min_rt", "<RT value>", -1.0, "Specify the minimum retention time (in minutes) for spectra considered during deconvolution. Negative values disable the threshold.", false, true);
registerDoubleOption_("max_rt", "<RT value>", -1.0, "Specify the maximum retention time (in minutes) for spectra considered during deconvolution. Negative values disable the threshold.", false, true);
registerIntOption_("max_ms_level", "<MS level>", -1, "Set the maximum MS level (inclusive) for deconvolution. Negative values disable the threshold.", false, true);
registerSubsection_("FD", "FLASHDeconv algorithm parameters");
registerSubsection_("SD", "Spectral deconvolution parameters");
registerSubsection_("ft", "Feature tracing parameters");
registerSubsection_("iq", "Isobaric quantification parameters");
}
/// Returns default parameters for each subsection.
/// FLASHDeconvAlgorithm stores all parameters in a single Param object with prefixes (SD:, ft:, iq:).
/// This function splits them into separate subsections for a cleaner CLI interface:
/// -FD:* -> top-level algorithm parameters (after removing SD:, ft:, iq: prefixed params)
/// -SD:* -> spectral deconvolution parameters
/// -ft:* -> feature tracing parameters
/// -iq:* -> isobaric quantification parameters
Param getSubsectionDefaults_(const String& prefix) const override
{
auto fd_param = FLASHDeconvAlgorithm().getDefaults();
if (prefix == "FD")
{
// Remove nested subsection params to get only top-level algorithm parameters
fd_param.removeAll("SD:");
fd_param.removeAll("ft:");
fd_param.removeAll("iq:");
return fd_param;
}
else if (prefix == "SD")
{
return fd_param.copy("SD:", true); // Extract SD:* params, strip prefix
}
else if (prefix == "ft")
{
return fd_param.copy("ft:", true); // Extract ft:* params, strip prefix
}
else if (prefix == "iq")
{
return fd_param.copy("iq:", true); // Extract iq:* params, strip prefix
}
else { throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Unknown subsection", prefix); }
}
// the main_ function is called after all parameters are read
ExitCodes main_(int, const char**) override
{
OPENMS_LOG_INFO << "Initializing ... " << endl;
//-------------------------------------------------------------
// parsing parameters
//-------------------------------------------------------------
String in_file = getStringOption_("in");
String out_file = getStringOption_("out");
bool keep_empty_out = getFlag_("keep_empty_out");
auto out_spec_file
= StringList {getStringOption_("out_spec1"), getStringOption_("out_spec2"), getStringOption_("out_spec3"), getStringOption_("out_spec4")};
auto out_topfd_file = StringList {getStringOption_("out_msalign1"), getStringOption_("out_msalign2")};
auto out_topfd_feature_file = StringList {getStringOption_("out_feature1"), getStringOption_("out_feature2")};
String out_mzml_file = getStringOption_("out_mzml");
String out_anno_mzml_file = getStringOption_("out_annotated_mzml");
String out_quant_file = getStringOption_("out_quant");
bool write_detail = getFlag_("write_detail");
int mzml_charge = getIntOption_("mzml_mass_charge");
double min_mz = getDoubleOption_("min_mz");
double max_mz = getDoubleOption_("max_mz");
double min_rt = getDoubleOption_("min_rt") * 60.0;
double max_rt = getDoubleOption_("max_rt") * 60.0;
int max_ms_level = getIntOption_("max_ms_level");
std::map<uint, int> per_ms_level_spec_count;
std::map<uint, int> per_ms_level_deconv_spec_count;
std::map<uint, int> per_ms_level_mass_count;
FLASHDeconvAlgorithm fd;
// Reassemble parameters from CLI subsections back into format expected by FLASHDeconvAlgorithm.
// getSubsectionDefaults_() split the params for CLI display; here we merge them back:
// FD:* params -> inserted without prefix (top-level algorithm params)
// SD:*, ft:*, iq:* params -> inserted with prefix preserved (nested params)
Param fd_param;
Param tmp_fd_param = getParam_().copy("FD:", true); // copy FD:* params, strip "FD:" prefix
fd_param.insert("", tmp_fd_param);
bool report_decoy = tmp_fd_param.getValue("report_FDR") != "false";
tmp_fd_param = getParam_().copy("SD:", false); // copy SD:* params, keep "SD:" prefix
fd_param.insert("", tmp_fd_param);
DoubleList tols = tmp_fd_param.getValue("SD:tol");
tmp_fd_param = getParam_().copy("ft:", false); // copy ft:* params, keep "ft:" prefix
fd_param.insert("", tmp_fd_param);
tmp_fd_param = getParam_().copy("iq:", false); // copy iq:* params, keep "iq:" prefix
fd_param.insert("", tmp_fd_param);
fd.setParameters(fd_param);
//-------------------------------------------------------------
// reading input
//-------------------------------------------------------------
constexpr double MAX_RANGE_VALUE = 1e7; // effectively unlimited upper bound for RT/m/z ranges
MSExperiment map;
MzMLFile mzml;
// reading mzMLs with m/z and rt criteria.
PeakFileOptions opt = mzml.getOptions();
if (min_rt > 0 || max_rt > 0)
{
if (min_rt > 0 && max_rt < 0) max_rt = MAX_RANGE_VALUE;
opt.setRTRange(DRange<1> {min_rt, max_rt});
}
if (min_mz > 0 || max_mz > 0)
{
if (min_mz > 0 && max_mz < 0) max_mz = MAX_RANGE_VALUE;
opt.setMZRange(DRange<1> {min_mz, max_mz});
}
if (max_ms_level > 0)
{
IntList ms_levels;
for (int msl = 1; msl <= max_ms_level; msl++)
ms_levels.push_back(msl);
opt.setMSLevels(ms_levels);
}
mzml.setLogType(log_type_);
mzml.setOptions(opt);
mzml.load(in_file, map);
std::vector<DeconvolvedSpectrum> deconvolved_spectra;
std::vector<FLASHHelperClasses::MassFeature> deconvolved_features;
std::map<int, double> scan_rt_map;
// Run FLASHDeconvAlgorithm here!
OPENMS_LOG_INFO << "Processing : " << in_file << endl;
fd.run(map, deconvolved_spectra, deconvolved_features);
tols = fd.getTolerances();
// collect statistics for information
for (const auto& it : map)
{
uint ms_level = it.getMSLevel();
if (per_ms_level_spec_count.find(ms_level) == per_ms_level_spec_count.end()) per_ms_level_spec_count[ms_level] = 0;
per_ms_level_spec_count[ms_level]++;
}
for (const auto& deconvolved_spectrum : deconvolved_spectra)
{
uint ms_level = deconvolved_spectrum.getOriginalSpectrum().getMSLevel();
scan_rt_map[deconvolved_spectrum.getScanNumber()] = deconvolved_spectrum.getOriginalSpectrum().getRT();
if (deconvolved_spectrum.empty()) continue;
if (per_ms_level_deconv_spec_count.find(ms_level) == per_ms_level_deconv_spec_count.end()) per_ms_level_deconv_spec_count[ms_level] = 0;
if (per_ms_level_mass_count.find(ms_level) == per_ms_level_mass_count.end()) per_ms_level_mass_count[ms_level] = 0;
per_ms_level_deconv_spec_count[ms_level]++;
per_ms_level_mass_count[ms_level] += (int)deconvolved_spectrum.size();
}
for (const auto& val : per_ms_level_deconv_spec_count)
{
OPENMS_LOG_INFO << "So far, FLASHDeconv found " << per_ms_level_mass_count[val.first] << " masses in " << val.second << " MS" << val.first
<< " spectra out of " << per_ms_level_spec_count[val.first] << endl;
}
if (! deconvolved_features.empty()) { OPENMS_LOG_INFO << "Mass tracer found " << deconvolved_features.size() << " features" << endl; }
OPENMS_LOG_INFO << "FLASHDeconv run complete. Now writing the results in output files ..." << endl;
// Write output files
// default feature deconvolution tsv output
if (keep_empty_out || ! deconvolved_features.empty())
{
OPENMS_LOG_INFO << "writing feature tsv ..." << endl;
ofstream out_stream;
out_stream.open(out_file);
if (!out_stream)
{
OPENMS_LOG_FATAL_ERROR << "Error: Could not open output file '" << out_file << "'" << endl;
return CANNOT_WRITE_OUTPUT_FILE;
}
FLASHDeconvFeatureFile::writeHeader(out_stream, report_decoy);
FLASHDeconvFeatureFile::writeFeatures(deconvolved_features, in_file, out_stream, report_decoy);
out_stream.close();
}
// Per ms level spectrum deconvolution tsv output
// Check if any spectrum output file is specified
auto has_any_output = [](const StringList& files) {
for (const auto& f : files) if (!f.empty()) return true;
return false;
};
if (has_any_output(out_spec_file))
{
std::vector<ofstream> out_spec_streams = std::vector<ofstream>(out_spec_file.size());
for (Size i = 0; i < out_spec_file.size(); i++)
{
if (out_spec_file[i].empty() || (! keep_empty_out && per_ms_level_deconv_spec_count.find(i + 1) == per_ms_level_deconv_spec_count.end()))
continue;
OPENMS_LOG_INFO << "writing spectrum tsv for MS level " << (i + 1) << " ..." << endl;
out_spec_streams[i].open(out_spec_file[i]);
if (!out_spec_streams[i])
{
OPENMS_LOG_FATAL_ERROR << "Error: Could not open output file '" << out_spec_file[i] << "'" << endl;
return CANNOT_WRITE_OUTPUT_FILE;
}
FLASHDeconvSpectrumFile::writeDeconvolvedMassesHeader(out_spec_streams[i], i + 1, write_detail, report_decoy);
}
for (const auto& deconvolved_spectrum : deconvolved_spectra)
{
uint ms_level = deconvolved_spectrum.getOriginalSpectrum().getMSLevel();
if (ms_level > out_spec_file.size() || out_spec_file[ms_level - 1].empty()) continue;
FLASHDeconvSpectrumFile::writeDeconvolvedMasses(deconvolved_spectrum, out_spec_streams[ms_level - 1], in_file, fd.getAveragine(), fd.getDecoyAveragine(),
tols[ms_level - 1], write_detail, report_decoy, fd.getNoiseDecoyWeight());
}
for (Size i = 0; i < out_spec_file.size(); i++)
{
if (out_spec_file[i].empty() || (! keep_empty_out && per_ms_level_deconv_spec_count.find(i + 1) == per_ms_level_deconv_spec_count.end()))
continue;
out_spec_streams[i].close();
}
}
// mzML output
if (! out_anno_mzml_file.empty() || ! out_mzml_file.empty())
{
FLASHDeconvSpectrumFile::writeMzML(map, deconvolved_spectra, out_mzml_file, out_anno_mzml_file, mzml_charge, tols);
}
// isobaric quantification output
if (! out_quant_file.empty())
{
OPENMS_LOG_INFO << "writing quantification tsv ..." << endl;
ofstream out_quant_stream;
out_quant_stream.open(out_quant_file);
if (!out_quant_stream)
{
OPENMS_LOG_FATAL_ERROR << "Error: Could not open output file '" << out_quant_file << "'" << endl;
return CANNOT_WRITE_OUTPUT_FILE;
}
FLASHDeconvSpectrumFile::writeIsobaricQuantification(out_quant_stream, deconvolved_spectra);
out_quant_stream.close();
}
// topFD feature output - it should be at the end since zero feature IDs are redefined for TopPIC feature indices.
if (has_any_output(out_topfd_feature_file))
{
std::vector<ofstream> out_topfd_feature_streams;
out_topfd_feature_streams = std::vector<ofstream>(out_topfd_feature_file.size());
for (Size i = 0; i < out_topfd_feature_file.size(); i++)
{
if (out_topfd_feature_file[i].empty()
|| (! keep_empty_out && per_ms_level_deconv_spec_count.find(i + 1) == per_ms_level_deconv_spec_count.end()))
continue;
OPENMS_LOG_INFO << "writing topfd *.feature for MS level " << (i + 1) << " ..." << endl;
out_topfd_feature_streams[i].open(out_topfd_feature_file[i]);
if (!out_topfd_feature_streams[i])
{
OPENMS_LOG_FATAL_ERROR << "Error: Could not open output file '" << out_topfd_feature_file[i] << "'" << endl;
return CANNOT_WRITE_OUTPUT_FILE;
}
FLASHDeconvFeatureFile::writeTopFDFeatureHeader(out_topfd_feature_streams[i], i + 1);
FLASHDeconvFeatureFile::writeTopFDFeatures(deconvolved_spectra, deconvolved_features, scan_rt_map, in_file, out_topfd_feature_streams[i],
i + 1);
out_topfd_feature_streams[i].close();
}
}
// topFD msalign output
if (has_any_output(out_topfd_file))
{
auto out_topfd_streams = std::vector<ofstream>(out_topfd_file.size());
for (Size i = 0; i < out_topfd_file.size(); i++)
{
if (out_topfd_file[i].empty() || (! keep_empty_out && per_ms_level_deconv_spec_count.find(i + 1) == per_ms_level_deconv_spec_count.end()))
continue;
OPENMS_LOG_INFO << "writing topfd *.msalign for MS level " << (i + 1) << " ..." << endl;
out_topfd_streams[i].open(out_topfd_file[i]);
if (!out_topfd_streams[i])
{
OPENMS_LOG_FATAL_ERROR << "Error: Could not open output file '" << out_topfd_file[i] << "'" << endl;
return CANNOT_WRITE_OUTPUT_FILE;
}
FLASHDeconvSpectrumFile::writeTopFDHeader(out_topfd_streams[i], getParam_().copy("SD:", true));
}
for (const auto& deconvolved_spectrum : deconvolved_spectra)
{
uint ms_level = deconvolved_spectrum.getOriginalSpectrum().getMSLevel();
if (ms_level > out_topfd_file.size() || out_topfd_file[ms_level - 1].empty()) continue;
FLASHDeconvSpectrumFile::writeTopFD(deconvolved_spectrum, out_topfd_streams[ms_level - 1], in_file, 1,
per_ms_level_deconv_spec_count.begin()->first, false, false);
}
for (Size i = 0; i < out_topfd_file.size(); i++)
{
if (out_topfd_file[i].empty() || (! keep_empty_out && per_ms_level_deconv_spec_count.find(i + 1) == per_ms_level_deconv_spec_count.end()))
continue;
out_topfd_streams[i].close();
}
}
return EXECUTION_OK;
}
};
// the actual main function needed to create an executable
int main(int argc, const char** argv)
{
TOPPFLASHDeconv tool;
return tool.main(argc, argv);
} | C++ |
3D | OpenMS/OpenMS | src/topp/FeatureFinderMetabo.cpp | .cpp | 18,477 | 473 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Erhan Kenar, Holger Franken $
// --------------------------------------------------------------------------
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/IONMOBILITY/FAIMSHelper.h>
#include <OpenMS/IONMOBILITY/IMDataConverter.h>
#include <OpenMS/IONMOBILITY/IMTypes.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/KERNEL/FeatureMap.h>
#include <OpenMS/KERNEL/MassTrace.h>
#include <OpenMS/FEATUREFINDER/MassTraceDetection.h>
#include <OpenMS/FEATUREFINDER/ElutionPeakDetection.h>
#include <OpenMS/FEATUREFINDER/FeatureFindingMetabo.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/SYSTEM/File.h>
#include <OpenMS/PROCESSING/FEATURE/FeatureOverlapFilter.h>
#include <limits>
#include <set>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_FeatureFinderMetabo FeatureFinderMetabo
@brief FeatureFinderMetabo assembles metabolite features from singleton mass traces.
<CENTER>
<table>
<tr>
<th ALIGN = "center"> pot. predecessor tools </td>
<td VALIGN="middle" ROWSPAN=3> → FeatureFinderMetabo →</td>
<th ALIGN = "center"> pot. successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_PeakPickerHiRes </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=2> @ref TOPP_TextExporter</td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_PeakPickerHiRes </td>
</tr>
</table>
</CENTER>
Mass traces alone would allow for further analysis such as metabolite ID or
statistical evaluation. However, in general, monoisotopic mass traces are
accompanied by satellite C13 peaks and thus may render the analysis more
difficult. FeatureFinderMetabo fulfills a further data reduction step by
assembling compatible mass traces to metabolite features (that is, all mass
traces originating from one metabolite). To this end, multiple metabolite
hypotheses are formulated and scored according to how well differences in RT (optional),
m/z or intensity ratios match to those of theoretical isotope patterns.
If the raw data scans contain the scan polarity information, it is stored as
meta value "scan_polarity" in the output file.
Mass trace clustering can be done using either 13C distances or a linear model (Kenar et al) -- see parameter 'ffm:mz_scoring_13C'.
Generally, for lipidomics, use 13C, since lipids contain a lot of 13C.
For general metabolites, the linear model is usually more appropriate.
To decide what is better, the total number of features can be used as indirect measure
- the lower(!) the better (since more mass traces are assembled into single features).
Detailed information is stored in the featureXML output: it contains meta-values for each feature about the
mass trace differences (inspectable via TOPPView). If you want this in a tabular format, use TextExporter, i.e.,
@code
TextExporter.exe -feature:add_metavalues 1 -in <ff_metabo.featureXML> -out <ff_metabo.csv>
@endcode
By default, the linear model is used.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_FeatureFinderMetabo.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_FeatureFinderMetabo.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPFeatureFinderMetabo :
public TOPPBase
{
public:
TOPPFeatureFinderMetabo() :
TOPPBase("FeatureFinderMetabo", "Assembles metabolite features from centroided (LC-)MS data using the mass trace approach.")
{
}
protected:
/**
* @brief Process a single PeakMap (or FAIMS CV group) through the full FFMetabo pipeline
* @param[in,out] ms_peakmap Input peak map (will be modified - sorted)
* @param[in] common_param Common parameters
* @param[in] mtd_param MassTraceDetection parameters
* @param[in] epd_param ElutionPeakDetection parameters
* @param[in] ffm_param FeatureFindingMetabo parameters
* @param[out] feat_map Output feature map
* @param[out] feat_chromatograms Output chromatograms (if enabled)
* @return True on success, false on error
*/
bool processOneGroup_(PeakMap& ms_peakmap,
const Param& common_param,
Param mtd_param,
Param epd_param,
Param ffm_param,
FeatureMap& feat_map,
std::vector<std::vector<OpenMS::MSChromatogram>>& feat_chromatograms)
{
// make sure the spectra are sorted by m/z
ms_peakmap.sortSpectra(true);
vector<MassTrace> m_traces;
//-------------------------------------------------------------
// configure and run mass trace detection
//-------------------------------------------------------------
MassTraceDetection mtdet;
mtd_param.insert("", common_param);
mtd_param.remove("chrom_fwhm");
mtdet.setParameters(mtd_param);
mtdet.run(ms_peakmap, m_traces);
//-------------------------------------------------------------
// configure and run elution peak detection
//-------------------------------------------------------------
std::vector<MassTrace> m_traces_final;
if (epd_param.getValue("enabled").toBool())
{
std::vector<MassTrace> splitted_mtraces;
epd_param.remove("enabled");
epd_param.insert("", common_param);
epd_param.remove("noise_threshold_int");
ElutionPeakDetection epdet;
epdet.setParameters(epd_param);
epdet.detectPeaks(m_traces, splitted_mtraces);
if (epdet.getParameters().getValue("width_filtering") == "auto")
{
m_traces_final.clear();
epdet.filterByPeakWidth(splitted_mtraces, m_traces_final);
}
else
{
m_traces_final = splitted_mtraces;
}
}
else
{
m_traces_final = m_traces;
for (Size i = 0; i < m_traces_final.size(); ++i)
{
m_traces_final[i].estimateFWHM(false);
}
if (ffm_param.getValue("use_smoothed_intensities").toBool())
{
OPENMS_LOG_WARN << "Without EPD, smoothing is not supported. Setting 'use_smoothed_intensities' to false!" << std::endl;
ffm_param.setValue("use_smoothed_intensities", "false");
}
}
//-------------------------------------------------------------
// configure and run feature finding
//-------------------------------------------------------------
ffm_param.insert("", common_param);
ffm_param.remove("noise_threshold_int");
ffm_param.remove("chrom_peak_snr");
FeatureFindingMetabo ffmet;
ffmet.setParameters(ffm_param);
ffmet.run(m_traces_final, feat_map, feat_chromatograms);
Size trace_count(0);
for (Size i = 0; i < feat_map.size(); ++i)
{
OPENMS_PRECONDITION(feat_map[i].metaValueExists(Constants::UserParam::NUM_OF_MASSTRACES),
"MetaValue 'num_of_masstraces' missing from FFMetabo output!");
trace_count += (Size) feat_map[i].getMetaValue(Constants::UserParam::NUM_OF_MASSTRACES);
}
if (trace_count != m_traces_final.size())
{
if (!ffm_param.getValue("remove_single_traces").toBool())
{
OPENMS_LOG_ERROR << "FF-Metabo: Internal error. Not all mass traces have been assembled to features!" << std::endl;
return false;
}
else
{
OPENMS_LOG_INFO << "FF-Metabo: " << (m_traces_final.size() - trace_count) << " unassembled traces have been removed." << std::endl;
}
}
OPENMS_LOG_INFO << "-- FF-Metabo stats --\n"
<< "Input traces: " << m_traces_final.size() << "\n"
<< "Output features: " << feat_map.size() << " (total trace count: " << trace_count << ")" << std::endl;
return true;
}
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "Centroided mzML file");
setValidFormats_("in", ListUtils::create<String>("mzML"));
registerOutputFile_("out", "<file>", "", "FeatureXML file with metabolite features");
setValidFormats_("out", ListUtils::create<String>("featureXML"));
registerOutputFile_("out_chrom", "<file>", "", "Optional mzML file with chromatograms", false);
setValidFormats_("out_chrom", ListUtils::create<String>("mzML"));
addEmptyLine_();
registerStringOption_("faims_merge_features", "<true/false>", "true",
"For FAIMS data with multiple compensation voltages: Merge features representing the same analyte "
"detected at different CV values into a single feature. Only features with DIFFERENT FAIMS CV values "
"are merged (same CV = different analytes). Has no effect on non-FAIMS data.", false);
setValidStrings_("faims_merge_features", {"true", "false"});
addEmptyLine_();
registerSubsection_("algorithm", "Algorithm parameters section");
}
Param getSubsectionDefaults_(const String& /*section*/) const override
{
Param combined;
Param p_com;
p_com.setValue("noise_threshold_int", 10.0, "Intensity threshold below which peaks are regarded as noise.");
p_com.setValue("chrom_peak_snr", 3.0, "Minimum signal-to-noise a mass trace should have.");
p_com.setValue("chrom_fwhm", 5.0, "Expected chromatographic peak width (in seconds).");
combined.insert("common:", p_com);
combined.setSectionDescription("common", "Common parameters for all other subsections");
Param p_mtd = MassTraceDetection().getDefaults();
p_mtd.remove("noise_threshold_int");
p_mtd.remove("chrom_peak_snr");
combined.insert("mtd:", p_mtd);
combined.setSectionDescription("mtd", "Mass Trace Detection parameters");
Param p_epd;
p_epd.setValue("enabled", "true", "Enable splitting of isobaric mass traces by chromatographic peak detection. Disable for direct injection.");
p_epd.setValidStrings("enabled", {"true","false"});
p_epd.insert("", ElutionPeakDetection().getDefaults());
p_epd.remove("chrom_peak_snr");
p_epd.remove("chrom_fwhm");
combined.insert("epd:", p_epd);
combined.insert("epd:", p_epd);
combined.setSectionDescription("epd", "Elution Profile Detection (to separate isobaric Mass Traces by elution time).");
Param p_ffm = FeatureFindingMetabo().getDefaults();
p_ffm.remove("chrom_fwhm");
p_ffm.remove("report_chromatograms");
combined.insert("ffm:", p_ffm);
combined.setSectionDescription("ffm", "FeatureFinder parameters (assembling mass traces to charged features)");
return combined;
}
ExitCodes main_(int, const char**) override
{
//-------------------------------------------------------------
// parameter handling
//-------------------------------------------------------------
String in = getStringOption_("in");
String out = getStringOption_("out");
String out_chrom = getStringOption_("out_chrom");
//-------------------------------------------------------------
// loading input
//-------------------------------------------------------------
FileHandler mz_data_file;
PeakMap ms_peakmap;
std::vector<Int> ms_level(1, 1);
mz_data_file.getOptions().setMSLevels(ms_level);
mz_data_file.loadExperiment(in, ms_peakmap, {FileTypes::MZML}, log_type_);
if (ms_peakmap.empty())
{
OPENMS_LOG_WARN << "The given file does not contain any conventional peak data, but might"
" contain chromatograms. This tool currently cannot handle them, sorry.";
return INCOMPATIBLE_INPUT_DATA;
}
// determine type of spectral data (profile or centroided)
SpectrumSettings::SpectrumType spectrum_type = ms_peakmap[0].getType();
if (spectrum_type == SpectrumSettings::SpectrumType::PROFILE)
{
if (!getFlag_("force"))
{
throw OpenMS::Exception::FileEmpty(__FILE__, __LINE__, __FUNCTION__,
"Error: Profile data provided but centroided spectra expected. To enforce processing of the data set the -force flag.");
}
}
//-------------------------------------------------------------
// set parameters
//-------------------------------------------------------------
Param common_param = getParam_().copy("algorithm:common:", true);
writeDebug_("Common parameters passed to sub-algorithms (mtd and ffm)", common_param, 3);
Param mtd_param = getParam_().copy("algorithm:mtd:", true);
writeDebug_("Parameters passed to MassTraceDetection", mtd_param, 3);
Param epd_param = getParam_().copy("algorithm:epd:", true);
writeDebug_("Parameters passed to ElutionPeakDetection", epd_param, 3);
Param ffm_param = getParam_().copy("algorithm:ffm:", true);
writeDebug_("Parameters passed to FeatureFindingMetabo", ffm_param, 3);
String report_chromatograms = out_chrom.empty() ? "false" : "true";
ffm_param.setValue("report_chromatograms", report_chromatograms);
// Store polarities before potentially moving ms_peakmap (needed later for output annotation)
set<IonSource::Polarity> polarities;
for (const auto& spec : ms_peakmap)
{
polarities.insert(spec.getInstrumentSettings().getPolarity());
}
//-------------------------------------------------------------
// Split by FAIMS CV (returns single NaN-keyed element for non-FAIMS data)
//-------------------------------------------------------------
auto faims_groups = IMDataConverter::splitByFAIMSCV(std::move(ms_peakmap));
const bool has_faims = faims_groups.size() > 1 || !std::isnan(faims_groups[0].first);
if (has_faims)
{
OPENMS_LOG_INFO << "FAIMS data detected with " << faims_groups.size() << " compensation voltage(s)." << endl;
}
FeatureMap feat_map;
std::vector<std::vector<OpenMS::MSChromatogram>> feat_chromatograms;
// Process each FAIMS CV group (or single group for non-FAIMS data)
for (auto& [group_cv, faims_group] : faims_groups)
{
if (has_faims)
{
OPENMS_LOG_INFO << "Processing FAIMS CV group: " << group_cv << " V (" << faims_group.size() << " spectra)" << endl;
}
// Process this group
FeatureMap feat_map_cv;
std::vector<std::vector<OpenMS::MSChromatogram>> feat_chromatograms_cv;
if (!processOneGroup_(faims_group, common_param, mtd_param, epd_param, ffm_param,
feat_map_cv, feat_chromatograms_cv))
{
if (has_faims)
{
OPENMS_LOG_ERROR << "Processing failed for FAIMS CV " << group_cv << " V. Aborting." << endl;
}
return UNEXPECTED_RESULT;
}
// Annotate features with FAIMS CV (if FAIMS data) and add to combined results
for (auto& feat : feat_map_cv)
{
if (has_faims)
{
feat.setMetaValue(Constants::UserParam::FAIMS_CV, group_cv);
}
feat_map.push_back(feat);
}
// Combine chromatograms
for (auto& chrom_group : feat_chromatograms_cv)
{
feat_chromatograms.push_back(std::move(chrom_group));
}
}
if (has_faims)
{
OPENMS_LOG_INFO << "Combined " << feat_map.size() << " features from all FAIMS CV groups." << endl;
// Optionally merge features representing the same analyte at different CV values
if (getStringOption_("faims_merge_features") == "true")
{
Size before_merge = feat_map.size();
FeatureOverlapFilter::mergeFAIMSFeatures(feat_map, 5.0, 0.05);
OPENMS_LOG_INFO << "FAIMS feature merge: " << before_merge << " -> " << feat_map.size()
<< " features (merged " << (before_merge - feat_map.size()) << ")" << endl;
}
}
// filter features with zero intensity (this can happen if the FWHM is zero (bc of overly skewed shape) and no peaks end up being summed up)
auto intensity_zero = [&](Feature& f) { return f.getIntensity() == 0; };
feat_map.erase(remove_if(feat_map.begin(),feat_map.end(),intensity_zero),feat_map.end());
// store chromatograms
if (!out_chrom.empty())
{
if (feat_chromatograms.size() == feat_map.size())
{
MSExperiment out_exp;
for (Size i = 0; i < feat_chromatograms.size(); ++i)
{
for (Size j = 0; j < feat_chromatograms[i].size(); ++j)
{
out_exp.addChromatogram(feat_chromatograms[i][j]);
}
}
FileHandler().storeExperiment(out_chrom, out_exp, {FileTypes::MZML});
}
else
{
OPENMS_LOG_ERROR << "FF-Metabo: Internal error. The number of features (" << feat_chromatograms.size() << ") and chromatograms (" << feat_map.size() << ") are different! Aborting." << std::endl;
return UNEXPECTED_RESULT;
}
}
// store ionization mode of spectra (useful for post-processing by AccurateMassSearch tool)
// Note: polarities were collected before ms_peakmap was potentially moved
if (!feat_map.empty() && !polarities.empty())
{
StringList sl_pols;
for (const auto& pol : polarities)
{
sl_pols.push_back(IonSource::polarityToString(pol));
}
feat_map[0].setMetaValue("scan_polarity", ListUtils::concatenate(sl_pols, ";"));
}
//-------------------------------------------------------------
// writing output
//-------------------------------------------------------------
// ensure unique IDs for the combined feature map
feat_map.ensureUniqueId();
// annotate output with data processing info
addDataProcessing_(feat_map, getProcessingInfo_(DataProcessing::QUANTITATION));
// annotate "spectra_data" metavalue
// Note: use simple form since ms_peakmap may have been moved for FAIMS processing
if (getFlag_("test"))
{
// if test mode set, add file without path so we can compare it
feat_map.setPrimaryMSRunPath({"file://" + File::basename(in)});
}
else
{
feat_map.setPrimaryMSRunPath({in});
}
FileHandler().storeFeatures(out, feat_map, {FileTypes::FEATUREXML});
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPFeatureFinderMetabo tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/FileFilter.cpp | .cpp | 74,415 | 1,604 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Marc Sturm, Lars Nilse, Chris Bielow, Hendrik Brauer $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/COMPARISON/ZhangSimilarityScore.h>
#include <OpenMS/CONCEPT/EnumHelpers.h>
#include <OpenMS/DATASTRUCTURES/String.h>
#include <OpenMS/DATASTRUCTURES/StringListUtils.h>
#include <OpenMS/FORMAT/ConsensusXMLFile.h>
#include <OpenMS/FORMAT/FeatureXMLFile.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/FORMAT/FileTypes.h>
#include <OpenMS/FORMAT/IdXMLFile.h>
#include <OpenMS/FORMAT/MSNumpressCoder.h>
#include <OpenMS/KERNEL/ChromatogramTools.h>
#include <OpenMS/KERNEL/ConsensusMap.h>
#include <OpenMS/KERNEL/FeatureMap.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/KERNEL/RangeUtils.h>
#include <OpenMS/PROCESSING/NOISEESTIMATION/SignalToNoiseEstimatorMedian.h>
#include <algorithm>
#include <memory>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_FileFilter FileFilter
@brief Extracts portions of the data from an mzML, featureXML or consensusXML file.
<center>
<table>
<tr>
<th ALIGN = "center"> pot. predecessor tools </td>
<td VALIGN="middle" ROWSPAN=2> → FileFilter →</td>
<th ALIGN = "center"> pot. successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> any tool yielding output @n in mzML, featureXML @n or consensusXML format</td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> any tool that profits on reduced input </td>
</tr>
</table>
</center>
With this tool it is possible to extract m/z, retention time and intensity ranges from an input file
and to write all data that lies within the given ranges to an output file.
The retention time filtering using `-rt from:to` can be performed in three different modes using the '-rt_block_mode' flag:
- 'as_is' (default): Uses the exact RT range given in '-rt' for RT filtering
- 'extend_to_preserve_full_cycle': Extends the given RT range (if required) such that consecutive blocks of spectra (i.e. a cycle) are kept intact. A block of spectra means that consecutive MS levels (1,2,3,4,....) constitute one block and a new block starts with the lowest MS level in the data
- 'shrink_to_preserve_full_cycle': Same as above but only takes full blocks inside the given RT range (no extension of given RT boundaries)
Depending on the input file type, additional specific operations are possible:
- mzML
- extract spectra of a certain MS level
- filter by signal-to-noise estimation
- filter by scan mode of the spectra
- filter by scan polarity of the spectra
- filter by activation method of the spectra
- filter by collision energy of the spectra
- filter by isolation window width of the spectra
- select/remove zoom scans
- remove chromatograms, meta data arrays, and empty spectra
- remove MS2 scans whose precursor matches identifications (from an idXML file in 'id:blacklist')
- featureXML
- filter by feature charge
- filter by feature size (number of subordinate features)
- filter by overall feature quality
- consensusXML
- filter by size (number of elements in consensus features)
- filter by consensus feature charge
- filter by map (extracts specified maps and re-evaluates consensus centroid)@n e.g. FileFilter -map 2 3 5 -in file1.consensusXML -out file2.consensusXML@n If a single map is specified, the feature itself can be extracted.@n e.g. FileFilter -map 5 -in file1.consensusXML -out file2.featureXML
- featureXML / consensusXML:
- remove items with a certain meta value annotation. Allowing for >, < and = comparisons. List types are compared by length, not content. Integer, Double and String are compared using their build-in operators.
- filter sequences, e.g. "LYSNLVER" or the modification "(Phospho)"@n e.g. FileFilter -id:sequences_whitelist Phospho -in file1.consensusXML -out file2.consensusXML
- filter accessions, e.g. "sp|P02662|CASA1_BOVIN"
- remove features with annotations
- remove features without annotations
- remove unassigned peptide identifications
- filter id with best score of features with multiple peptide identifications@n e.g. FileFilter -id:remove_unannotated_features -id:remove_unassigned_ids -id:keep_best_score_id -in file1.featureXML -out file2.featureXML
- remove features with id clashes (different sequences mapped to one feature)
The priority of the id-flags is (decreasing order): remove_annotated_features / remove_unannotated_features -> remove_clashes -> keep_best_score_id -> sequences_whitelist / accessions_whitelist
MS2 and higher spectra can be filtered according to precursor m/z (see 'peak_options:pc_mz_range'). This flag can be combined with 'rt' range to filter precursors by RT and m/z.
If you want to extract an MS1 region with untouched MS2 spectra included, you will need to split the dataset by MS level, then use the 'mz' option for MS1 data and 'peak_options:pc_mz_range' for MS2 data. Afterwards merge the two files again. RT can be filtered at any step.
@note For filtering peptide/protein identification data, see the @ref TOPP_IDFilter tool.
@note Currently mzIdentML (mzid) is not directly supported as an input/output format of this tool. Convert mzid files to/from idXML using @ref TOPP_IDFileConverter if necessary.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_FileFilter.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_FileFilter.html
For the parameters of the S/N algorithm section see the class documentation there: @n
@ref OpenMS::SignalToNoiseEstimatorMedian "peak_options:sn"@n
*/
//-------------------------------------------------------------
// RT Cut Mode definitions
//-------------------------------------------------------------
enum class RTBlockMode
{
AS_IS,
FULL_CYCLE_EXTEND,
FULL_CYCLE_SHRINK
};
const std::array<std::string, 3> RT_BLOCK_MODE_NAMES = {
"as_is",
"extend_to_preserve_full_cycle",
"shrink_to_preserve_full_cycle"
};
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPFileFilter :
public TOPPBase
{
public:
TOPPFileFilter() :
TOPPBase("FileFilter", "Extracts or manipulates portions of data from peak, feature or consensus-feature files.")
{
}
private:
static bool sequenceIsWhiteListed_(const AASequence& peptide_hit_sequence,
const StringList& whitelist,
const String& sequence_comparison_method)
{
const String& sequence_str = peptide_hit_sequence.toString();
const String& sequence_unmodified_str = peptide_hit_sequence.toUnmodifiedString();
if (sequence_comparison_method == "substring")
{
for (const String & s : whitelist)
{
if (sequence_str.hasSubstring(s) || sequence_unmodified_str.hasSubstring(s))
{
return true;
}
}
}
else if (sequence_comparison_method == "exact")
{
for (const String & s : whitelist)
{
if (sequence_str == s || sequence_unmodified_str == s)
{
return true;
}
}
}
else
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Invalid sequence comparison method given: '" + sequence_comparison_method + "'");
}
return false;
}
static void replacePrecursorCharge(MSExperiment& e, int charge_in, int charge_out)
{
for (auto& s : e.getSpectra())
{
for (auto& p : s.getPrecursors())
{
if (p.getCharge() == charge_in) { p.setCharge(charge_out); }
}
}
}
static bool checkPeptideIdentification_(BaseFeature& feature,
const bool remove_annotated_features,
const bool remove_unannotated_features,
const StringList& sequences,
const String& sequence_comparison_method,
const StringList& accessions,
const bool keep_best_score_id,
const bool remove_clashes)
{
//flag: remove_annotated_features and non-empty peptideIdentifications
if (remove_annotated_features && !feature.getPeptideIdentifications().empty())
{
return false;
}
//flag: remove_unannotated_features and no peptideIdentifications
if (remove_unannotated_features && feature.getPeptideIdentifications().empty())
{
return false;
}
//flag: remove_clashes
if (remove_clashes && !feature.getPeptideIdentifications().empty())
{
String temp = feature.getPeptideIdentifications().begin()->getHits().begin()->getSequence().toString();
//loop over all peptideIdentifications
for (const PeptideIdentification& pep : feature.getPeptideIdentifications())
{
//loop over all peptideHits
for (const PeptideHit& pep_hit : pep.getHits())
{
if (pep_hit.getSequence().toString() != temp)
{
return false;
}
}
}
}
//flag: keep_best_score_id
if (keep_best_score_id && !feature.getPeptideIdentifications().empty())
{
PeptideIdentification temp = feature.getPeptideIdentifications().front();
//loop over all peptideIdentifications
for (const PeptideIdentification& pep : feature.getPeptideIdentifications())
{
//loop over all peptideHits
for (const PeptideHit& pep_hit : pep.getHits())
{
if ((pep.isHigherScoreBetter() && pep_hit.getScore() > temp.getHits().front().getScore()) ||
(!pep.isHigherScoreBetter() && pep_hit.getScore() < temp.getHits().front().getScore()))
{
temp = pep;
}
}
}
feature.setPeptideIdentifications(PeptideIdentificationList(1, temp));
// not filtering sequences or accessions
if (sequences.empty() && accessions.empty())
{
return true;
}
}
//flag: sequences or accessions
if (!sequences.empty() || !accessions.empty())
{
bool sequen = false;
bool access = false;
//loop over all peptideIdentifications
for (const PeptideIdentification& pep_id : feature.getPeptideIdentifications())
{
//loop over all peptideHits
for (const PeptideHit& pep_hit : pep_id.getHits())
{
if (sequenceIsWhiteListed_(pep_hit.getSequence(), sequences, sequence_comparison_method))
{
sequen = true;
}
//loop over all accessions of the peptideHits
set<String> protein_accessions = pep_hit.extractProteinAccessionsSet();
for (set<String>::const_iterator p_acc_it = protein_accessions.begin(); p_acc_it != protein_accessions.end(); ++p_acc_it)
{
//loop over all accessions entries of the StringList
for (StringList::const_iterator acc_it = accessions.begin(); acc_it != accessions.end(); ++acc_it)
{
if (p_acc_it->hasSubstring(*acc_it))
{
access = true;
}
}
}
}
}
if (!sequences.empty() && !accessions.empty())
{
return sequen && access;
}
if (!sequences.empty())
{
return sequen;
}
else
{
return access;
}
}
return true;
}
/**
* @brief Apply RT filtering with block-aware modes
* @param[in] exp The MSExperiment to filter; will be reloaded with actual data
* @param[in] f The filehandler with filtering options
* @param[in] rt_l Lower RT bound
* @param[in] rt_u Upper RT bound
* @param[in] rt_block_mode The RT cutting mode (RTBlockMode enum)
*/
void applyRTBlockFiltering(PeakMap& exp, FileHandler& f, double rt_l, double rt_u, RTBlockMode rt_block_mode)
{
if (rt_block_mode == RTBlockMode::AS_IS)
{
return;
}
if (exp.empty()) return;
auto min_ms_level = *exp.getMSLevels().begin();
exp.sortSpectra(false); // only by RT, not by m/z
// Identify spectrum blocks
std::vector<std::pair<Size, Size>> blocks; // start_idx, end_idx pairs
auto first_spec = exp.RTBegin(rt_l);
auto last_spec = exp.RTBegin(rt_u);
if (rt_block_mode == RTBlockMode::FULL_CYCLE_EXTEND)
{
while (first_spec != exp.begin() && first_spec->getMSLevel() != min_ms_level) --first_spec;
while (last_spec != exp.end() && last_spec->getMSLevel() != min_ms_level) ++last_spec;
// 'last_spec' now points to the start of a block, but we want it to point to the end of the previous block
--last_spec;
}
else if (rt_block_mode == RTBlockMode::FULL_CYCLE_SHRINK)
{
// first_spec may be inside the last block of exp... and will end up as first_spec == exp.end()
while (first_spec != exp.end() && first_spec->getMSLevel() != min_ms_level) ++first_spec;
// last_spec points somewhere inside a block which contains 'rt_u'; let's move left to the start of this block
while (last_spec != exp.end() && // if this happens, rt_u is beyond the last spectrum and we don't need to shrink
last_spec != exp.begin() && last_spec->getMSLevel() != min_ms_level)
--last_spec;
// 'last_spec' now points to the start of the invalid block, but we want it to point to the end of the previous block
--last_spec;
// in some cases, there was no full block inside [rt_l, rt_u]
if (first_spec >= last_spec)
{
OPENMS_LOG_WARN << "RTBlockMode: there is no full block in the range [" << rt_l << ", " << rt_u << "]. Result is empty. Please extend RT range or use another block strategy.\n";
exp.clear(true);
return;
}
}
OPENMS_LOG_INFO << "RTBlockMode: RT range changed from [" << rt_l << ", " << rt_u << "] to [" << first_spec->getRT() << " (MS " << first_spec->getMSLevel() << "), " << last_spec->getRT() << " (MS " << last_spec->getMSLevel() << ")]\n";
assert(first_spec != exp.end());
assert(last_spec != exp.end());
// RT filtering uses a half-open interval [min, max), we thus need to move last_spec a tad to the right
double rt_u_new = last_spec->getRT();
if (last_spec == --exp.end())
{// last_spec was the last spectrum in exp; we need to extend the upper RT boundary a bit to include it
rt_u_new += 1.0;
}
else
{
rt_u_new = (rt_u_new + (last_spec + 1)->getRT()) / 2; // take midpoint to next spectrum
}
// reload with data and corrected rt range
f.getOptions().setRTRange(DRange<1>(first_spec->getRT(), rt_u_new));
f.getOptions().setFillData(true);
auto filename = exp.getLoadedFilePath();
f.loadExperiment(filename, exp);
}
protected:
typedef PeakMap MapType;
void registerOptionsAndFlags_() override
{
std::vector<String> formats = ListUtils::create<String>("mzML,featureXML,consensusXML");
registerInputFile_("in", "<file>", "", "Input file");
setValidFormats_("in", formats);
registerStringOption_("in_type", "<type>", "", "Input file type -- default: determined from file extension or content", false);
setValidStrings_("in_type", formats);
registerOutputFile_("out", "<file>", "", "Output file");
setValidFormats_("out", formats);
registerStringOption_("out_type", "<type>", "", "Output file type -- default: determined from file extension or content", false);
setValidStrings_("out_type", formats);
registerStringOption_("rt", "[min]:[max]", ":", "Retention time range to extract [s]", false);
registerStringOption_("rt_block_mode", "<mode>", RT_BLOCK_MODE_NAMES[(int)RTBlockMode::AS_IS], String("RT filtering mode: '") + RT_BLOCK_MODE_NAMES[(int)RTBlockMode::AS_IS] + "' uses RT range as given in '-rt'; '" + RT_BLOCK_MODE_NAMES[(int)RTBlockMode::FULL_CYCLE_EXTEND] + "' extends RT range to keep complete spectrum blocks intact, '" + RT_BLOCK_MODE_NAMES[(int)RTBlockMode::FULL_CYCLE_SHRINK] + "' only keeps complete blocks within the given RT range", false);
setValidStrings_("rt_block_mode", StringList(RT_BLOCK_MODE_NAMES.begin(), RT_BLOCK_MODE_NAMES.end()));
registerStringOption_("mz", "[min]:[max]", ":", "m/z range to extract (applies to ALL ms levels!)", false);
registerStringOption_("int", "[min]:[max]", ":", "Intensity range to extract", false);
registerFlag_("sort", "Sorts the output according to RT and m/z.");
registerTOPPSubsection_("peak_options", "Peak data options");
registerDoubleOption_("peak_options:sn", "<s/n ratio>", 0, "Write peaks with S/N > 'sn' values only", false);
registerIntList_("peak_options:rm_pc_charge", "i j ...", IntList(), "Remove MS(2) spectra with these precursor charges. All spectra without precursor are kept!", false);
registerStringOption_("peak_options:pc_mz_range", "[min]:[max]", ":", "MSn (n>=2) precursor filtering according to their m/z value. Do not use this flag in conjunction with 'mz', unless you want to actually remove peaks in spectra (see 'mz'). RT filtering is covered by 'rt' and compatible with this flag.", false);
registerDoubleList_("peak_options:pc_mz_list", "mz_1 mz_2 ...", DoubleList(), "List of m/z values. If a precursor window covers ANY of these values, the corresponding MS/MS spectrum will be kept.", false);
registerIntList_("peak_options:level", "i j ...", ListUtils::create<Int>("1,2,3"), "MS levels to extract", false);
registerFlag_("peak_options:sort_peaks", "Sorts the peaks according to m/z");
registerFlag_("peak_options:no_chromatograms", "No conversion to space-saving real chromatograms, e.g. from SRM scans");
registerFlag_("peak_options:remove_chromatograms", "Removes chromatograms stored in a file");
registerFlag_("peak_options:remove_empty", "Removes spectra and chromatograms without peaks.");
registerFlag_("peak_options:remove_metadataarrays", "Remove all binary data (e.g. ion mobility), except m/z and intensity.");
registerStringOption_("peak_options:mz_precision", "32 or 64", 64, "Store base64 encoded m/z data using 32 or 64 bit precision", false);
setValidStrings_("peak_options:mz_precision", ListUtils::create<String>("32,64"));
registerStringOption_("peak_options:int_precision", "32 or 64", 32, "Store base64 encoded intensity data using 32 or 64 bit precision", false);
setValidStrings_("peak_options:int_precision", ListUtils::create<String>("32,64"));
registerStringOption_("peak_options:indexed_file", "true or false", "true", "Whether to add an index to the file when writing", false);
setValidStrings_("peak_options:indexed_file", ListUtils::create<String>("true,false"));
registerStringOption_("peak_options:zlib_compression", "true or false", "false", "Whether to store data with zlib compression (lossless compression)", false);
setValidStrings_("peak_options:zlib_compression", ListUtils::create<String>("true,false"));
registerTOPPSubsection_("peak_options:numpress", "Numpress compression for peak data");
registerStringOption_("peak_options:numpress:masstime", "<compression_scheme>", "none", "Apply MS Numpress compression algorithms in m/z or rt dimension (recommended: linear)", false);
setValidStrings_("peak_options:numpress:masstime", MSNumpressCoder::NamesOfNumpressCompression, (int)MSNumpressCoder::SIZE_OF_NUMPRESSCOMPRESSION);
registerDoubleOption_("peak_options:numpress:lossy_mass_accuracy", "<error>", -1.0, "Desired (absolute) m/z accuracy for lossy compression (e.g. use 0.0001 for a mass accuracy of 0.2 ppm at 500 m/z, default uses -1.0 for maximal accuracy).", false, true);
registerStringOption_("peak_options:numpress:intensity", "<compression_scheme>", "none", "Apply MS Numpress compression algorithms in intensity dimension (recommended: slof or pic)", false);
setValidStrings_("peak_options:numpress:intensity", MSNumpressCoder::NamesOfNumpressCompression, (int)MSNumpressCoder::SIZE_OF_NUMPRESSCOMPRESSION);
registerStringOption_("peak_options:numpress:float_da", "<compression_scheme>", "none", "Apply MS Numpress compression algorithms for the float data arrays (recommended: slof or pic)", false);
setValidStrings_("peak_options:numpress:float_da", MSNumpressCoder::NamesOfNumpressCompression, (int)MSNumpressCoder::SIZE_OF_NUMPRESSCOMPRESSION);
registerTOPPSubsection_("spectra", "Remove spectra or select spectra (removing all others) with certain properties");
registerFlag_("spectra:remove_zoom", "Remove zoom (enhanced resolution) scans");
registerStringOption_("spectra:remove_mode", "<mode>", "", "Remove scans by scan mode", false);
setValidStrings_("spectra:remove_mode", InstrumentSettings::NamesOfScanMode, (int)static_cast<size_t>(InstrumentSettings::ScanMode::SIZE_OF_SCANMODE));
addEmptyLine_();
registerStringOption_("spectra:remove_activation", "<activation>", "", "Remove MSn scans where any of its precursors features a certain activation method", false);
setValidStrings_("spectra:remove_activation", Precursor::NamesOfActivationMethod, (int)Precursor::ActivationMethod::SIZE_OF_ACTIVATIONMETHOD);
registerStringOption_("spectra:remove_collision_energy", "[min]:[max]", ":", "Remove MSn scans with a collision energy in the given interval", false);
registerStringOption_("spectra:remove_isolation_window_width", "[min]:[max]", ":", "Remove MSn scans whose isolation window width is in the given interval", false);
addEmptyLine_();
registerFlag_("spectra:select_zoom", "Select zoom (enhanced resolution) scans");
registerStringOption_("spectra:select_mode", "<mode>", "", "Selects scans by scan mode\n", false);
setValidStrings_("spectra:select_mode", InstrumentSettings::NamesOfScanMode, (int)static_cast<size_t>(InstrumentSettings::ScanMode::SIZE_OF_SCANMODE));
registerStringOption_("spectra:select_activation", "<activation>", "", "Retain MSn scans where any of its precursors features a certain activation method", false);
setValidStrings_("spectra:select_activation", Precursor::NamesOfActivationMethod, (int)Precursor::ActivationMethod::SIZE_OF_ACTIVATIONMETHOD);
registerStringOption_("spectra:select_collision_energy", "[min]:[max]", ":", "Select MSn scans with a collision energy in the given interval", false);
registerStringOption_("spectra:select_isolation_window_width", "[min]:[max]", ":", "Select MSn scans whose isolation window width is in the given interval", false);
addEmptyLine_();
registerStringOption_("spectra:select_polarity", "<polarity>", "", "Retain MSn scans with a certain scan polarity", false);
setValidStrings_("spectra:select_polarity", IonSource::NamesOfPolarity, static_cast<int>(IonSource::Polarity::SIZE_OF_POLARITY));
registerTOPPSubsection_("spectra:blackorwhitelist", "Black or white listing of of MS2 spectra by spectral similarity");
registerInputFile_("spectra:blackorwhitelist:file", "<file>", "", "Input file containing MS2 spectra that should be retained or removed from the mzML file!\n"
"Matching tolerances are taken from 'spectra:blackorwhitelist:similarity_threshold|rt|mz' options.\n", false);
setValidFormats_("spectra:blackorwhitelist:file", ListUtils::create<String>("mzML"));
registerDoubleOption_("spectra:blackorwhitelist:similarity_threshold", "<similarity>", -1, "Similarity threshold when matching MS2 spectra. (-1 = disabled).", false);
registerDoubleOption_("spectra:blackorwhitelist:rt", "tolerance", 0.01, "Retention tolerance [s] when matching precursor positions. (-1 = disabled)", false);
registerDoubleOption_("spectra:blackorwhitelist:mz", "tolerance", 0.01, "m/z tolerance [Th] when matching precursor positions. (-1 = disabled)", false);
registerStringOption_("spectra:blackorwhitelist:use_ppm_tolerance", "", "false", "If ppm tolerance should be used. Otherwise Da are used.", false, false);
registerStringOption_("spectra:blackorwhitelist:blacklist", "", "true", "True: remove matched MS2. False: retain matched MS2 spectra. Other levels are kept", false, false);
setValidStrings_("spectra:blackorwhitelist:blacklist", ListUtils::create<String>("false,true"));
setMinFloat_("spectra:blackorwhitelist:similarity_threshold", -1.0);
setMaxFloat_("spectra:blackorwhitelist:similarity_threshold", 1.0);
registerStringOption_("spectra:replace_pc_charge", "in_charge:out_charge", ":", "Replaces in_charge with out_charge in all precursors.", false, false);
addEmptyLine_();
registerTOPPSubsection_("feature", "Feature data options");
registerStringOption_("feature:q", "[min]:[max]", ":", "Overall quality range to extract [0:1]", false);
addEmptyLine_();
registerTOPPSubsection_("consensus", "Consensus feature data options");
registerIntList_("consensus:map", "i j ...", ListUtils::create<Int>(""), "Non-empty list of maps to be extracted from a consensus (indices are 0-based).", false);
registerFlag_("consensus:map_and", "Consensus features are kept only if they contain exactly one feature from each map (as given above in 'map')");
// black and white listing
registerTOPPSubsection_("consensus:blackorwhitelist", "Black or white listing of of MS2 spectra by consensus features");
registerStringOption_("consensus:blackorwhitelist:blacklist", "", "true", "True: remove matched MS2. False: retain matched MS2 spectra. Other levels are kept", false, false);
setValidStrings_("consensus:blackorwhitelist:blacklist", ListUtils::create<String>("false,true"));
registerInputFile_("consensus:blackorwhitelist:file", "<file>", "", "Input file containing consensus features whose corresponding MS2 spectra should be removed from the mzML file!\n"
"Matching tolerances are taken from 'consensus:blackorwhitelist:rt' and 'consensus:blackorwhitelist:mz' options.\n"
"If consensus:blackorwhitelist:maps is specified, only these will be used.\n", false);
setValidFormats_("consensus:blackorwhitelist:file", ListUtils::create<String>("consensusXML"));
registerIntList_("consensus:blackorwhitelist:maps", "i j ...", ListUtils::create<Int>(""), "Maps used for black/white list filtering", false);
registerDoubleOption_("consensus:blackorwhitelist:rt", "tolerance", 60.0, "Retention tolerance [s] for precursor to consensus feature position", false);
registerDoubleOption_("consensus:blackorwhitelist:mz", "tolerance", 0.01, "m/z tolerance [Th] for precursor to consensus feature position", false);
registerStringOption_("consensus:blackorwhitelist:use_ppm_tolerance", "", "false", "If ppm tolerance should be used. Otherwise Da are used.", false, false);
setValidStrings_("consensus:blackorwhitelist:use_ppm_tolerance", ListUtils::create<String>("false,true"));
setMinFloat_("consensus:blackorwhitelist:rt", 0);
setMinFloat_("consensus:blackorwhitelist:mz", 0);
addEmptyLine_();
registerTOPPSubsection_("f_and_c", "Feature & Consensus data options");
registerStringOption_("f_and_c:charge", "[min]:[max]", ":", "Charge range to extract", false);
registerStringOption_("f_and_c:size", "[min]:[max]", ":", "Size range to extract", false);
registerStringList_("f_and_c:remove_meta", "<name> 'lt|eq|gt' <value>", StringList(), "Expects a 3-tuple (=3 entries in the list), i.e. <name> 'lt|eq|gt' <value>; the first is the name of meta value, followed by the comparison operator (equal, less or greater) and the value to compare to. All comparisons are done after converting the given value to the corresponding data value type of the meta value (for lists, this simply compares length, not content!)!", false);
registerFlag_("f_and_c:remove_hull", "Remove hull from features.", false);
addEmptyLine_();
// XXX: Change description
registerTOPPSubsection_("id", "ID options. The Priority of the id-flags is: remove_annotated_features / remove_unannotated_features -> remove_clashes -> keep_best_score_id -> sequences_whitelist / accessions_whitelist");
registerFlag_("id:remove_clashes", "Remove features with id clashes (different sequences mapped to one feature)", true);
registerFlag_("id:keep_best_score_id", "in case of multiple peptide identifications, keep only the id with best score");
registerStringList_("id:sequences_whitelist", "<sequence>", StringList(), "Keep only features containing whitelisted substrings, e.g. features containing LYSNLVER or the modification (Oxidation). To control comparison method used for whitelisting, see 'id:sequence_comparison_method'.", false);
registerStringOption_("id:sequence_comparison_method", "substring|exact", "substring", "Comparison method used to determine if a feature is whitelisted.", false, true);
registerStringList_("id:accessions_whitelist", "<accessions>", StringList(), "keep only features with white listed accessions, e.g. sp|P02662|CASA1_BOVIN", false);
// XXX: Proper description of this parameter.
setValidStrings_("id:sequence_comparison_method", ListUtils::create<String>("substring,exact"));
registerFlag_("id:remove_annotated_features", "Remove features with annotations");
registerFlag_("id:remove_unannotated_features", "Remove features without annotations");
registerFlag_("id:remove_unassigned_ids", "Remove unassigned peptide identifications");
registerInputFile_("id:blacklist", "<file>", "", "Input file containing MS2 identifications whose corresponding MS2 spectra should be removed from the mzML file!\n"
"Matching tolerances are taken from 'id:rt' and 'id:mz' options.\n"
"This tool will require all IDs to be matched to an MS2 spectrum, and quit with error otherwise. Use 'id:blacklist_imperfect' to allow for mismatches.", false);
setValidFormats_("id:blacklist", ListUtils::create<String>("idXML"));
registerDoubleOption_("id:rt", "tolerance", 0.1, "Retention tolerance [s] for precursor to id position", false);
registerDoubleOption_("id:mz", "tolerance", 0.001, "m/z tolerance [Th] for precursor to id position", false);
setMinFloat_("id:rt", 0);
setMinFloat_("id:mz", 0);
registerFlag_("id:blacklist_imperfect", "Allow for mismatching precursor positions (see 'id:blacklist')");
addEmptyLine_();
registerSubsection_("algorithm", "S/N algorithm section");
}
Param getSubsectionDefaults_(const String& /*section*/) const override
{
SignalToNoiseEstimatorMedian<MapType::SpectrumType> sn;
Param tmp;
tmp.insert("SignalToNoise:", sn.getParameters());
return tmp;
}
bool checkMetaOk(const MetaInfoInterface& mi, const StringList& meta_info)
{
if (!mi.metaValueExists(meta_info[0]))
{
return true; // not having the meta value means passing the test
}
DataValue v_data = mi.getMetaValue(meta_info[0]);
DataValue v_user;
if (v_data.valueType() == DataValue::STRING_VALUE)
{
v_user = String(meta_info[2]);
}
else if (v_data.valueType() == DataValue::INT_VALUE)
{
v_user = String(meta_info[2]).toInt();
}
else if (v_data.valueType() == DataValue::DOUBLE_VALUE)
{
v_user = String(meta_info[2]).toDouble();
}
else if (v_data.valueType() == DataValue::STRING_LIST)
{
v_user = (StringList)ListUtils::create<String>(meta_info[2]);
}
else if (v_data.valueType() == DataValue::INT_LIST)
{
v_user = ListUtils::create<Int>(meta_info[2]);
}
else if (v_data.valueType() == DataValue::DOUBLE_LIST)
{
v_user = ListUtils::create<double>(meta_info[2]);
}
else if (v_data.valueType() == DataValue::EMPTY_VALUE)
{
v_user = DataValue::EMPTY;
}
if (meta_info[1] == "lt")
{
return !(v_data < v_user);
}
else if (meta_info[1] == "eq")
{
return !(v_data == v_user);
}
else if (meta_info[1] == "gt")
{
return !(v_data > v_user);
}
else
{
writeLogError_("Internal Error. Meta value filtering got invalid comparison operator ('" + meta_info[1] + "'), which should have been caught before! Aborting!");
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Illegal meta value filtering operator!");
}
}
ExitCodes main_(int, const char**) override
{
//-------------------------------------------------------------
// parameter handling
//-------------------------------------------------------------
//input file name and type
String in = getStringOption_("in");
FileHandler fh;
FileTypes::Type in_type = fh.getType(in);
//only use flag in_type, if the in_type cannot be determined by file
if (in_type == FileTypes::UNKNOWN)
{
in_type = FileTypes::nameToType(getStringOption_("in_type"));
writeDebug_(String("Input file type: ") + FileTypes::typeToName(in_type), 2);
}
//output file name and type
String out = getStringOption_("out");
FileTypes::Type out_type = fh.getTypeByFileName(out);
//only use flag out_type, if the out_type cannot be determined by file
if (out_type == FileTypes::UNKNOWN)
{
out_type = FileTypes::nameToType(getStringOption_("out_type"));
writeDebug_(String("Output file type: ") + FileTypes::typeToName(out_type), 2);
}
//use in_type as out_type, if out_type cannot be determined by file or out_type flag
if (out_type == FileTypes::UNKNOWN)
{
out_type = in_type;
writeDebug_(String("Output file type: ") + FileTypes::typeToName(out_type), 2);
}
bool no_chromatograms(getFlag_("peak_options:no_chromatograms"));
//ranges
double mz_l, mz_u, rt_l, rt_u, it_l, it_u, charge_l, charge_u, size_l, size_u, q_l, q_u, pc_left, pc_right, select_collision_l, remove_collision_l, select_collision_u, remove_collision_u, select_isolation_width_l, remove_isolation_width_l, select_isolation_width_u, remove_isolation_width_u, replace_pc_charge_in, replace_pc_charge_out;
//initialize ranges
mz_l = rt_l = it_l = charge_l = size_l = q_l = pc_left = select_collision_l = remove_collision_l = select_isolation_width_l = remove_isolation_width_l = replace_pc_charge_in = -1 * numeric_limits<double>::max();
mz_u = rt_u = it_u = charge_u = size_u = q_u = pc_right = select_collision_u = remove_collision_u = select_isolation_width_u = remove_isolation_width_u = replace_pc_charge_out = numeric_limits<double>::max();
String rt = getStringOption_("rt");
RTBlockMode rt_block_mode = (RTBlockMode)Helpers::indexOf(RT_BLOCK_MODE_NAMES, getStringOption_("rt_block_mode"));
String mz = getStringOption_("mz");
String pc_mz_range = getStringOption_("peak_options:pc_mz_range");
String it = getStringOption_("int");
IntList levels = getIntList_("peak_options:level");
IntList maps = getIntList_("consensus:map");
double sn = getDoubleOption_("peak_options:sn");
String charge = getStringOption_("f_and_c:charge");
String size = getStringOption_("f_and_c:size");
String q = getStringOption_("feature:q");
String remove_collision_energy = getStringOption_("spectra:remove_collision_energy");
String select_collision_energy = getStringOption_("spectra:select_collision_energy");
String remove_isolation_width = getStringOption_("spectra:remove_isolation_window_width");
String select_isolation_width = getStringOption_("spectra:select_isolation_window_width");
String replace_pc_charge = getStringOption_("spectra:replace_pc_charge");
int mz32 = getStringOption_("peak_options:mz_precision").toInt();
int int32 = getStringOption_("peak_options:int_precision").toInt();
bool indexed_file = getStringOption_("peak_options:indexed_file") == "true";
bool zlib_compression = getStringOption_("peak_options:zlib_compression") == "true";
//-----------------------------------
// MS Numpress options
//-----------------------------------
MSNumpressCoder::NumpressConfig npconfig_mz, npconfig_int, npconfig_fda;
npconfig_mz.estimate_fixed_point = true; // critical
npconfig_int.estimate_fixed_point = true; // critical
npconfig_fda.estimate_fixed_point = true; // critical
// npconfig_mz.numpressErrorTolerance = getDoubleOption_("peak_options:numpress:masstime_error");
// npconfig_int.numpressErrorTolerance = getDoubleOption_("peak_options:numpress:intensity_error");
// npconfig_fda.numpressErrorTolerance = getDoubleOption_("peak_options:numpress:intensity_error");
npconfig_mz.setCompression(getStringOption_("peak_options:numpress:masstime"));
npconfig_int.setCompression(getStringOption_("peak_options:numpress:intensity"));
npconfig_fda.setCompression(getStringOption_("peak_options:numpress:float_da"));
double mass_acc = getDoubleOption_("peak_options:numpress:lossy_mass_accuracy");
npconfig_mz.linear_fp_mass_acc = mass_acc; // set the desired mass accuracy
//-----------------------------------
// ID-filtering parameters
//-----------------------------------
bool remove_annotated_features = getFlag_("id:remove_annotated_features");
bool remove_unannotated_features = getFlag_("id:remove_unannotated_features");
bool remove_unassigned_ids = getFlag_("id:remove_unassigned_ids");
StringList sequences = getStringList_("id:sequences_whitelist");
String sequence_comparison_method = getStringOption_("id:sequence_comparison_method");
StringList accessions = getStringList_("id:accessions_whitelist");
bool keep_best_score_id = getFlag_("id:keep_best_score_id");
bool remove_clashes = getFlag_("id:remove_clashes");
bool remove_hulls = getFlag_("f_and_c:remove_hull");
// convert bounds to numbers
try
{
//rt
parseRange_(rt, rt_l, rt_u);
//mz
parseRange_(mz, mz_l, mz_u);
//mz precursor
parseRange_(pc_mz_range, pc_left, pc_right);
//int
parseRange_(it, it_l, it_u);
//charge (features only)
parseRange_(charge, charge_l, charge_u);
//size (features and consensus features only)
parseRange_(size, size_l, size_u);
//overall quality (features only)
parseRange_(q, q_l, q_u);
//remove collision energy
parseRange_(remove_collision_energy, remove_collision_l, remove_collision_u);
//select collision energy
parseRange_(select_collision_energy, select_collision_l, select_collision_u);
//remove isolation window width
parseRange_(remove_isolation_width, remove_isolation_width_l, remove_isolation_width_u);
//select isolation window width
parseRange_(select_isolation_width, select_isolation_width_l, select_isolation_width_u);
//parse precursor charge from in to out
parseRange_(replace_pc_charge, replace_pc_charge_in, replace_pc_charge_out);
}
catch (Exception::ConversionError& ce)
{
writeLogError_(String("Error: Invalid boundary given: ") + ce.what() + ". Aborting!");
printUsage_();
return ILLEGAL_PARAMETERS;
}
// sort by RT and m/z
bool sort = getFlag_("sort");
writeDebug_("Sorting output data: " + String(sort), 3);
// handle remove_meta
StringList meta_info = getStringList_("f_and_c:remove_meta");
bool remove_meta_enabled = (!meta_info.empty());
if (remove_meta_enabled && meta_info.size() != 3)
{
writeLogError_("Error: Param 'f_and_c:remove_meta' has invalid number of arguments. Expected 3, got " + String(meta_info.size()) + ". Aborting!");
printUsage_();
return ILLEGAL_PARAMETERS;
}
if (remove_meta_enabled && !(meta_info[1] == "lt" || meta_info[1] == "eq" || meta_info[1] == "gt"))
{
writeLogError_("Error: Param 'f_and_c:remove_meta' has invalid second argument. Expected one of 'lt', 'eq' or 'gt'. Got '" + meta_info[1] + "'. Aborting!");
printUsage_();
return ILLEGAL_PARAMETERS;
}
if (in_type == FileTypes::MZML)
{
//-------------------------------------------------------------
// loading input
//-------------------------------------------------------------
FileHandler f;
// Only apply RT filtering at load time if using exact mode
if (rt_block_mode == RTBlockMode::AS_IS)
{
f.getOptions().setRTRange(DRange<1>(rt_l, rt_u));
}
else
{ // do not load data (yet)
f.getOptions().setFillData(false);
}
f.getOptions().setMZRange(DRange<1>(mz_l, mz_u));
f.getOptions().setIntensityRange(DRange<1>(it_l, it_u));
f.getOptions().setMSLevels(levels);
// set precision options
if (mz32 == 32)
{
f.getOptions().setMz32Bit(true);
}
else if (mz32 == 64)
{
f.getOptions().setMz32Bit(false);
}
if (int32 == 32)
{
f.getOptions().setIntensity32Bit(true);
}
else if (int32 == 64)
{
f.getOptions().setIntensity32Bit(false);
}
// set writing index (e.g. indexedmzML)
f.getOptions().setWriteIndex(indexed_file);
f.getOptions().setCompression(zlib_compression);
// numpress compression
f.getOptions().setNumpressConfigurationMassTime(npconfig_mz);
f.getOptions().setNumpressConfigurationIntensity(npconfig_int);
f.getOptions().setNumpressConfigurationFloatDataArray(npconfig_fda);
MapType exp;
f.loadExperiment(in, exp, {FileTypes::MZML}, log_type_);
// Apply RT filtering with block-aware modes if not using exact mode
if (rt_block_mode != RTBlockMode::AS_IS && ! rt.empty())
{
applyRTBlockFiltering(exp, f, rt_l, rt_u, rt_block_mode);
}
// remove spectra with meta values:
if (remove_meta_enabled)
{
MapType exp_tmp;
for (MapType::ConstIterator it = exp.begin(); it != exp.end(); ++it)
{
if (checkMetaOk(*it, meta_info)) exp_tmp.addSpectrum(*it);
}
exp.clear(false);
exp.getSpectra().insert(exp.begin(), exp_tmp.begin(), exp_tmp.end());
}
if (!no_chromatograms)
{
// convert the spectra chromatograms to real chromatograms
ChromatogramTools chrom_tools;
chrom_tools.convertSpectraToChromatograms(exp, true);
}
bool remove_chromatograms(getFlag_("peak_options:remove_chromatograms"));
if (remove_chromatograms)
{
exp.setChromatograms(vector<MSChromatogram >());
}
bool remove_empty = getFlag_("peak_options:remove_empty");
if (remove_empty)
{
auto& spectra = exp.getSpectra();
spectra.erase(
remove_if(spectra.begin(), spectra.end(), [](const MSSpectrum & s){ return s.empty();} )
,spectra.end());
auto& chroms = exp.getChromatograms();
chroms.erase(
remove_if(chroms.begin(), chroms.end(), [](const MSChromatogram & c){ return c.empty();} )
,chroms.end());
}
bool remove_metadataarrays = getFlag_("peak_options:remove_metadataarrays");
if (remove_metadataarrays)
{
for (MapType::SpectrumType& spec : exp.getSpectra())
{
spec.getFloatDataArrays().clear();
spec.getStringDataArrays().clear();
spec.getIntegerDataArrays().clear();
}
}
//-------------------------------------------------------------
// calculations
//-------------------------------------------------------------
// remove forbidden precursor charges
IntList rm_pc_charge = getIntList_("peak_options:rm_pc_charge");
if (!rm_pc_charge.empty())
{
std::erase_if(exp.getSpectra(), HasPrecursorCharge<MapType::SpectrumType>(rm_pc_charge, false));
}
// remove precursors out of certain m/z range for all spectra with a precursor (MS2 and above)
if (!pc_mz_range.empty())
{
std::erase_if(exp.getSpectra(), InPrecursorMZRange<MapType::SpectrumType>(pc_left, pc_right, true));
}
// keep MS/MS spectra whose precursors cover at least of the given m/z values
std::vector<double> vec_mz = getDoubleList_("peak_options:pc_mz_list");
if (!vec_mz.empty())
{
std::erase_if(exp.getSpectra(), IsInIsolationWindow<MapType::SpectrumType>(vec_mz, true));
}
// remove by scan mode (might be a lot of spectra)
String remove_mode = getStringOption_("spectra:remove_mode");
if (!remove_mode.empty())
{
writeDebug_("Removing mode: " + remove_mode, 3);
for (Size i = 0; i < static_cast<size_t>(InstrumentSettings::ScanMode::SIZE_OF_SCANMODE); ++i)
{
if (InstrumentSettings::NamesOfScanMode[i] == remove_mode)
{
std::erase_if(exp.getSpectra(), HasScanMode<MapType::SpectrumType>(static_cast<Int>(i)));
}
}
}
//select by scan mode (might be a lot of spectra)
String select_mode = getStringOption_("spectra:select_mode");
if (!select_mode.empty())
{
writeDebug_("Selecting mode: " + select_mode, 3);
for (Size i = 0; i < static_cast<size_t>(InstrumentSettings::ScanMode::SIZE_OF_SCANMODE); ++i)
{
if (InstrumentSettings::NamesOfScanMode[i] == select_mode)
{
std::erase_if(exp.getSpectra(), HasScanMode<MapType::SpectrumType>(static_cast<Int>(i), true));
}
}
}
//remove by activation mode (might be a lot of spectra)
String remove_activation = getStringOption_("spectra:remove_activation");
if (!remove_activation.empty())
{
writeDebug_("Removing scans with activation mode: " + remove_activation, 3);
for (Size i = 0; i < static_cast<Size>(Precursor::ActivationMethod::SIZE_OF_ACTIVATIONMETHOD); ++i)
{
if (Precursor::NamesOfActivationMethod[i] == remove_activation)
{
exp.getSpectra().erase(remove_if(exp.begin(), exp.end(), HasActivationMethod<MapType::SpectrumType>(ListUtils::create<String>(remove_activation))), exp.end());
}
}
}
//select by activation mode
String select_activation = getStringOption_("spectra:select_activation");
if (!select_activation.empty())
{
writeDebug_("Selecting scans with activation mode: " + select_activation, 3);
for (Size i = 0; i < static_cast<Size>(Precursor::ActivationMethod::SIZE_OF_ACTIVATIONMETHOD); ++i)
{
if (Precursor::NamesOfActivationMethod[i] == select_activation)
{
exp.getSpectra().erase(remove_if(exp.begin(), exp.end(), HasActivationMethod<MapType::SpectrumType>(ListUtils::create<String>(select_activation), true)), exp.end());
}
}
}
//select by scan polarity
String select_polarity = getStringOption_("spectra:select_polarity");
if (!select_polarity.empty())
{
writeDebug_("Selecting polarity: " + select_polarity, 3);
IonSource::Polarity pol = IonSource::toPolarity(select_polarity);
exp.getSpectra().erase(remove_if(exp.begin(), exp.end(), HasScanPolarity<MapType::SpectrumType>(pol, true)), exp.end());
}
//remove zoom scans (might be a lot of spectra)
if (getFlag_("spectra:remove_zoom"))
{
writeDebug_("Removing zoom scans", 3);
exp.getSpectra().erase(remove_if(exp.begin(), exp.end(), IsZoomSpectrum<MapType::SpectrumType>()), exp.end());
}
if (getFlag_("spectra:select_zoom"))
{
writeDebug_("Selecting zoom scans", 3);
exp.getSpectra().erase(remove_if(exp.begin(), exp.end(), IsZoomSpectrum<MapType::SpectrumType>(true)), exp.end());
}
//remove based on collision energy
if (remove_collision_l != -1 * numeric_limits<double>::max() || remove_collision_u != numeric_limits<double>::max())
{
writeDebug_(String("Removing collision energy scans in the range: ") + remove_collision_l + ":" + remove_collision_u, 3);
exp.getSpectra().erase(remove_if(exp.begin(), exp.end(), IsInCollisionEnergyRange<PeakMap::SpectrumType>(remove_collision_l, remove_collision_u)), exp.end());
}
if (select_collision_l != -1 * numeric_limits<double>::max() || select_collision_u != numeric_limits<double>::max())
{
writeDebug_(String("Selecting collision energy scans in the range: ") + select_collision_l + ":" + select_collision_u, 3);
exp.getSpectra().erase(remove_if(exp.begin(), exp.end(), IsInCollisionEnergyRange<PeakMap::SpectrumType>(select_collision_l, select_collision_u, true)), exp.end());
}
//remove based on isolation window size
if (remove_isolation_width_l != -1 * numeric_limits<double>::max() || remove_isolation_width_u != numeric_limits<double>::max())
{
writeDebug_(String("Removing isolation windows with width in the range: ") + remove_isolation_width_l + ":" + remove_isolation_width_u, 3);
exp.getSpectra().erase(remove_if(exp.begin(), exp.end(), IsInIsolationWindowSizeRange<PeakMap::SpectrumType>(remove_isolation_width_l, remove_isolation_width_u)), exp.end());
}
if (select_isolation_width_l != -1 * numeric_limits<double>::max() || select_isolation_width_u != numeric_limits<double>::max())
{
writeDebug_(String("Selecting isolation windows with width in the range: ") + select_isolation_width_l + ":" + select_isolation_width_u, 3);
exp.getSpectra().erase(remove_if(exp.begin(), exp.end(), IsInIsolationWindowSizeRange<PeakMap::SpectrumType>(select_isolation_width_l, select_isolation_width_u, true)), exp.end());
}
// reannoate precursor charge if both range values are set
if (replace_pc_charge_in != -1 * numeric_limits<double>::max() && replace_pc_charge_out != numeric_limits<double>::max())
{
replacePrecursorCharge(exp, (int)replace_pc_charge_in, (int)replace_pc_charge_out);
}
//remove empty scans
std::erase_if(exp.getSpectra(), IsEmptySpectrum<MapType::SpectrumType>());
//sort
if (sort)
{
exp.sortSpectra(true);
if (getFlag_("peak_options:sort_peaks"))
{
OPENMS_LOG_INFO << "Info: Using 'peak_options:sort_peaks' in combination with 'sort' is redundant, since 'sort' implies 'peak_options:sort_peaks'." << std::endl;
}
}
else if (getFlag_("peak_options:sort_peaks"))
{
for (Size i = 0; i < exp.size(); ++i)
{
exp[i].sortByPosition();
}
}
// calculate S/N values and delete data points below S/N threshold
if (sn > 0)
{
SignalToNoiseEstimatorMedian<MapType::SpectrumType> snm;
Param const& dc_param = getParam_().copy("algorithm:SignalToNoise:", true);
snm.setParameters(dc_param);
for (auto& spec : exp)
{
snm.init(spec);
for (Size i = 0; i != spec.size(); ++i)
{
if (snm.getSignalToNoise(i) < sn) spec[i].setIntensity(0);
}
spec.erase(remove_if(spec.begin(), spec.end(), InIntensityRange<MapType::PeakType>(1, numeric_limits<MapType::PeakType::IntensityType>::max(), true)), spec.end());
}
}
//
String id_blacklist = getStringOption_("id:blacklist");
if (!id_blacklist.empty())
{
OPENMS_LOG_INFO << "Filtering out MS2 spectra from raw file using blacklist ..." << std::endl;
bool blacklist_imperfect = getFlag_("id:blacklist_imperfect");
int ret = filterByBlackList(exp, id_blacklist, blacklist_imperfect, getDoubleOption_("id:rt"), getDoubleOption_("id:mz"));
if (ret != EXECUTION_OK)
{
return (ExitCodes)ret;
}
}
// check if filtering by consensus feature is enabled
String consensus_blackorwhitelist = getStringOption_("consensus:blackorwhitelist:file");
if (!consensus_blackorwhitelist.empty())
{
OPENMS_LOG_INFO << "Filtering MS2 spectra from raw file using consensus features ..." << std::endl;
IntList il = getIntList_("consensus:blackorwhitelist:maps");
set<UInt64> maps(il.begin(), il.end());
double rt_tol = getDoubleOption_("consensus:blackorwhitelist:rt");
double mz_tol = getDoubleOption_("consensus:blackorwhitelist:mz");
bool is_ppm = getStringOption_("consensus:blackorwhitelist:use_ppm_tolerance") == "false" ? false : true;
bool is_blacklist = getStringOption_("consensus:blackorwhitelist:blacklist") == "true" ? true : false;
ConsensusMap consensus_map;
FileHandler cxml_file;
cxml_file.loadConsensusFeatures(consensus_blackorwhitelist, consensus_map);
consensus_map.sortByMZ();
int ret = filterByBlackOrWhiteList(is_blacklist, exp, consensus_map, rt_tol, mz_tol, is_ppm, maps);
if (ret != EXECUTION_OK)
{
return (ExitCodes)ret;
}
}
// filter spectra if they occur in spectra:blackorwhitelist:file
// (determined by comparing rt/mz/similarity)
String lib_file_name = getStringOption_("spectra:blackorwhitelist:file");
if (!lib_file_name.empty())
{
OPENMS_LOG_INFO << "Filtering MS2 spectra based on precursor rt, mz, and spectral similarity ..." << std::endl;
double tol_rt = getDoubleOption_("spectra:blackorwhitelist:rt");
double tol_mz = getDoubleOption_("spectra:blackorwhitelist:mz");
double tol_sim = getDoubleOption_("spectra:blackorwhitelist:similarity_threshold");
bool is_ppm = getStringOption_("spectra:blackorwhitelist:use_ppm_tolerance") == "true" ? true : false;
bool is_blacklist = getStringOption_("spectra:blackorwhitelist:blacklist") == "true" ? true : false;
PeakMap lib_file;
FileHandler().loadExperiment(lib_file_name, lib_file, {FileTypes::MZML}, log_type_);
int ret = filterByBlackOrWhiteList(is_blacklist, exp, lib_file, tol_rt, tol_mz, tol_sim, is_ppm);
if (ret != EXECUTION_OK)
{
return (ExitCodes)ret;
}
}
//-------------------------------------------------------------
// writing output
//-------------------------------------------------------------
//annotate output with data processing info
addDataProcessing_(exp, getProcessingInfo_(DataProcessing::FILTERING));
f.storeExperiment(out, exp,{FileTypes::MZML}, log_type_);
}
else if (in_type == FileTypes::FEATUREXML || in_type == FileTypes::CONSENSUSXML)
{
bool meta_ok = true; // assume true by default (as meta might not be checked below)
if (in_type == FileTypes::FEATUREXML)
{
//-------------------------------------------------------------
// loading input
//-------------------------------------------------------------
FeatureMap feature_map;
FileHandler f;
//f.setLogType(log_type_);
// this does not work yet implicitly - not supported by FeatureXMLFile
f.getFeatOptions().setRTRange(DRange<1>(rt_l, rt_u));
f.getFeatOptions().setMZRange(DRange<1>(mz_l, mz_u));
f.getFeatOptions().setIntensityRange(DRange<1>(it_l, it_u));
f.loadFeatures(in, feature_map);
//-------------------------------------------------------------
// calculations
//-------------------------------------------------------------
//copy all properties
FeatureMap map_sm = feature_map;
//.. but delete feature information
map_sm.clear(false);
// only keep charge ch_l:ch_u (WARNING: feature files without charge information have charge=0, see Ctor of KERNEL/Feature.h)
for (Feature& fm : feature_map)
{
bool const rt_ok = f.getFeatOptions().getRTRange().encloses(DPosition<1>(fm.getRT()));
bool const mz_ok = f.getFeatOptions().getMZRange().encloses(DPosition<1>(fm.getMZ()));
bool const int_ok = f.getFeatOptions().getIntensityRange().encloses(DPosition<1>(fm.getIntensity()));
bool const charge_ok = ((charge_l <= fm.getCharge()) && (fm.getCharge() <= charge_u));
bool const size_ok = ((size_l <= fm.getSubordinates().size()) && (fm.getSubordinates().size() <= size_u));
bool const q_ok = ((q_l <= fm.getOverallQuality()) && (fm.getOverallQuality() <= q_u));
if (remove_hulls) fm.getConvexHulls().clear();
if (rt_ok && mz_ok && int_ok && charge_ok && size_ok && q_ok)
{
if (remove_meta_enabled)
{
meta_ok = checkMetaOk(fm, meta_info);
}
bool const annotation_ok = checkPeptideIdentification_(fm, remove_annotated_features, remove_unannotated_features, sequences, sequence_comparison_method, accessions, keep_best_score_id, remove_clashes);
if (annotation_ok && meta_ok) map_sm.push_back(fm);
}
}
//delete unassigned PeptideIdentifications
if (remove_unassigned_ids)
{
map_sm.getUnassignedPeptideIdentifications().clear();
}
//update minimum and maximum position/intensity
map_sm.updateRanges();
// sort if desired
if (sort)
{
map_sm.sortByPosition();
}
//-------------------------------------------------------------
// writing output
//-------------------------------------------------------------
//annotate output with data processing info
addDataProcessing_(map_sm, getProcessingInfo_(DataProcessing::FILTERING));
f.storeFeatures(out, map_sm, {FileTypes::FEATUREXML});
}
else if (in_type == FileTypes::CONSENSUSXML)
{
//-------------------------------------------------------------
// loading input
//-------------------------------------------------------------
ConsensusMap consensus_map;
FileHandler f;
//f.setLogType(log_type_);
f.getOptions().setRTRange(DRange<1>(rt_l, rt_u));
f.getOptions().setMZRange(DRange<1>(mz_l, mz_u));
f.getOptions().setIntensityRange(DRange<1>(it_l, it_u));
f.loadConsensusFeatures(in, consensus_map);
//-------------------------------------------------------------
// calculations
//-------------------------------------------------------------
// copy all properties
ConsensusMap consensus_map_filtered = consensus_map;
//.. but delete feature information
consensus_map_filtered.resize(0);
for (ConsensusFeature& cm : consensus_map)
{
const bool charge_ok = ((charge_l <= cm.getCharge()) && (cm.getCharge() <= charge_u));
const bool size_ok = ((cm.size() >= size_l) && (cm.size() <= size_u));
if (charge_ok && size_ok)
{
// this is expensive, so evaluate after everything else passes the test
if (remove_meta_enabled)
{
meta_ok = checkMetaOk(cm, meta_info);
}
const bool annotation_ok = checkPeptideIdentification_(cm, remove_annotated_features, remove_unannotated_features, sequences, sequence_comparison_method, accessions, keep_best_score_id, remove_clashes);
if (annotation_ok && meta_ok) consensus_map_filtered.push_back(cm);
}
}
//delete unassigned PeptideIdentifications
if (remove_unassigned_ids)
{
consensus_map_filtered.getUnassignedPeptideIdentifications().clear();
}
//update minimum and maximum position/intensity
consensus_map_filtered.updateRanges();
// sort if desired
if (sort)
{
consensus_map_filtered.sortByPosition();
}
if (out_type == FileTypes::FEATUREXML)
{
if (maps.size() == 1) // When extracting a feature map from a consensus map, only one map ID should be specified. Hence 'maps' should contain only one integer.
{
FeatureMap feature_map_filtered;
FileHandler ff;
for (ConsensusMap::Iterator cm_it = consensus_map_filtered.begin(); cm_it != consensus_map_filtered.end(); ++cm_it)
{
for (ConsensusFeature::HandleSetType::const_iterator fh_iter = cm_it->getFeatures().begin(); fh_iter != cm_it->getFeatures().end(); ++fh_iter)
{
if ((int)fh_iter->getMapIndex() == maps[0])
{
Feature feature;
feature.setRT(fh_iter->getRT());
feature.setMZ(fh_iter->getMZ());
feature.setIntensity(fh_iter->getIntensity());
feature.setCharge(fh_iter->getCharge());
feature_map_filtered.push_back(feature);
}
}
}
//-------------------------------------------------------------
// writing output
//-------------------------------------------------------------
//annotate output with data processing info
addDataProcessing_(feature_map_filtered, getProcessingInfo_(DataProcessing::FILTERING));
feature_map_filtered.applyMemberFunction(&UniqueIdInterface::setUniqueId);
ff.storeFeatures(out, feature_map_filtered, {FileTypes::FEATUREXML});
}
else
{
writeLogError_("Error: When extracting a feature map from a consensus map, only one map ID should be specified. The 'map' parameter contains more than one. Aborting!");
printUsage_();
return ILLEGAL_PARAMETERS;
}
}
else if (out_type == FileTypes::CONSENSUSXML)
{
// generate new consensuses with features that appear in the 'maps' list
ConsensusMap cm_new; // new consensus map
for (IntList::iterator map_it = maps.begin(); map_it != maps.end(); ++map_it)
{
cm_new.getColumnHeaders()[*map_it].filename = consensus_map_filtered.getColumnHeaders()[*map_it].filename;
cm_new.getColumnHeaders()[*map_it].size = consensus_map_filtered.getColumnHeaders()[*map_it].size;
cm_new.getColumnHeaders()[*map_it].unique_id = consensus_map_filtered.getColumnHeaders()[*map_it].unique_id;
}
cm_new.setProteinIdentifications(consensus_map_filtered.getProteinIdentifications());
const bool and_connective = getFlag_("consensus:map_and");
for (ConsensusFeature& cm : consensus_map_filtered) // iterate over consensuses in the original consensus map
{
ConsensusFeature consensus_feature_new(cm); // new consensus feature
consensus_feature_new.clear();
ConsensusFeature::HandleSetType::const_iterator fh_it = cm.getFeatures().begin();
ConsensusFeature::HandleSetType::const_iterator fh_it_end = cm.getFeatures().end();
for (; fh_it != fh_it_end; ++fh_it) // iterate over features in consensus
{
if (ListUtils::contains(maps, fh_it->getMapIndex()))
{
consensus_feature_new.insert(*fh_it);
}
}
if ((!consensus_feature_new.empty() && !and_connective) || (consensus_feature_new.size() == maps.size() && and_connective)) // add the consensus to the consensus map only if it is non-empty
{
consensus_feature_new.computeConsensus(); // evaluate position of the consensus
cm_new.push_back(consensus_feature_new);
}
}
// assign unique ids
cm_new.applyMemberFunction(&UniqueIdInterface::setUniqueId);
//-------------------------------------------------------------
// writing output
//-------------------------------------------------------------
if (maps.empty())
{
//annotate output with data processing info
addDataProcessing_(consensus_map_filtered, getProcessingInfo_(DataProcessing::FILTERING));
f.storeConsensusFeatures(out, consensus_map_filtered, {FileTypes::CONSENSUSXML});
}
else
{
//annotate output with data processing info
addDataProcessing_(cm_new, getProcessingInfo_(DataProcessing::FILTERING));
f.storeConsensusFeatures(out, cm_new, {FileTypes::CONSENSUSXML});
}
}
}
else
{
writeLogError_("Error: Unknown output file type given. Aborting!");
printUsage_();
return ILLEGAL_PARAMETERS;
}
}
else
{
writeLogError_("Error: Unknown input file type given. Aborting!");
printUsage_();
return INCOMPATIBLE_INPUT_DATA;
}
return EXECUTION_OK;
}
ExitCodes filterByBlackList(MapType& exp, const String& id_blacklist, bool blacklist_imperfect, double rt_tol, double mz_tol)
{
vector<ProteinIdentification> protein_ids;
PeptideIdentificationList peptide_ids;
FileHandler().loadIdentifications(id_blacklist, protein_ids, peptide_ids);
// translate idXML entries into something more handy
typedef std::vector<Peak2D> IdType;
IdType ids; // use Peak2D since it has sorting operators already
for (Size i = 0; i < peptide_ids.size(); ++i)
{
if (!(peptide_ids[i].hasRT() && peptide_ids[i].hasMZ()))
{
OPENMS_LOG_ERROR << "Identifications given in 'id:blacklist' are missing RT and/or MZ coordinates. Cannot do blacklisting without. Quitting." << std::endl;
return INCOMPATIBLE_INPUT_DATA;
}
Peak2D p;
p.setRT(peptide_ids[i].getRT());
p.setMZ(peptide_ids[i].getMZ());
ids.push_back(p);
}
std::sort(ids.begin(), ids.end(), Peak2D::RTLess());
set<Size> blacklist_idx;
set<Size> ids_covered;
for (Size i = 0; i != exp.size(); ++i)
{
if (exp[i].getMSLevel() == 2)
{
if (!exp[i].getPrecursors().empty())
{
double pc_rt = exp[i].getRT();
double pc_mz = exp[i].getPrecursors()[0].getMZ();
IdType::iterator p_low = std::lower_bound(ids.begin(), ids.end(), pc_rt - rt_tol, Peak2D::RTLess());
IdType::iterator p_high = std::lower_bound(ids.begin(), ids.end(), pc_rt + rt_tol, Peak2D::RTLess());
// if precursor is out of the whole range, then p_low==p_high == (begin()||end())
// , thus the following loop will not run
for (IdType::iterator id_it = p_low; id_it != p_high; ++id_it) // RT already checked.. now check m/z
{
if (pc_mz - mz_tol < id_it->getMZ() && id_it->getMZ() < pc_mz + mz_tol)
{
blacklist_idx.insert(i);
ids_covered.insert(std::distance(ids.begin(), id_it));
// no break, since we might cover more IDs here
}
}
}
}
}
OPENMS_LOG_INFO << "Removing " << blacklist_idx.size() << " MS2 spectra." << endl;
if (ids_covered.size() != ids.size())
{
if (!blacklist_imperfect)
{
OPENMS_LOG_ERROR << "Covered only " << ids_covered.size() << "/" << ids.size() << " IDs. Check if your input files (raw + ids) match and if your tolerances ('rt' and 'mz') are set properly.\n"
<< "If you are sure unmatched ids are ok, set the 'id:blacklist_imperfect' flag!" << std::endl;
return UNEXPECTED_RESULT;
}
else
{
OPENMS_LOG_WARN << "Covered only " << ids_covered.size() << "/" << ids.size() << " IDs. Check if your input files (raw + ids) match and if your tolerances ('rt' and 'mz') are set properly.\n"
<< "Remove the 'id:blacklist_imperfect' flag of you want this to be an error!" << std::endl;
}
}
PeakMap exp2 = exp;
exp2.clear(false);
for (Size i = 0; i != exp.size(); ++i)
{
if (blacklist_idx.find(i) ==
blacklist_idx.end())
{
exp2.addSpectrum(exp[i]);
}
}
exp = exp2;
return EXECUTION_OK;
}
ExitCodes filterByBlackOrWhiteList(bool is_blacklist, MapType& exp, const ConsensusMap& consensus_map, double rt_tol, double mz_tol, bool unit_ppm, std::set<UInt64> map_ids)
{
std::vector<Peak2D> feature_pos;
// if map_id are specified, only use these for blacklisting
for (const ConsensusFeature& cm : consensus_map)
{
for (const FeatureHandle& fh : cm)
{
UInt64 map_index = fh.getMapIndex();
if (map_ids.empty() || map_ids.find(map_index) != map_ids.end())
{
Peak2D p;
p.setMZ(fh.getMZ());
p.setRT(fh.getRT());
feature_pos.push_back(p);
}
}
}
// sort by rt to use binary search
std::sort(feature_pos.begin(), feature_pos.end(), Peak2D::RTLess());
set<Size> list_idx;
for (Size i = 0; i != exp.size(); ++i)
{
if (exp[i].getMSLevel() == 2)
{
if (!exp[i].getPrecursors().empty())
{
double pc_mz = exp[i].getPrecursors()[0].getMZ();
double pc_rt = exp[i].getRT(); // use rt of MS2
std::vector<Peak2D>::iterator p_low = std::lower_bound(feature_pos.begin(), feature_pos.end(), pc_rt - rt_tol, Peak2D::RTLess());
std::vector<Peak2D>::iterator p_high = std::lower_bound(feature_pos.begin(), feature_pos.end(), pc_rt + rt_tol, Peak2D::RTLess());
double mz_tol_da = unit_ppm ? pc_mz * 1e-6 * mz_tol : mz_tol;
// if precursor is out of the whole range, then p_low==p_high == (begin()||end())
// , thus the following loop will not run
for (std::vector<Peak2D>::iterator f_it = p_low; f_it != p_high; ++f_it) // RT already checked.. now check m/z
{
if (pc_mz - mz_tol_da < f_it->getMZ() && f_it->getMZ() < pc_mz + mz_tol_da)
{
list_idx.insert(i);
// no break, since we might cover more features here
}
}
}
}
}
// create new experiment
PeakMap exp2;
exp2.getExperimentalSettings() = (ExperimentalSettings)exp.getExperimentalSettings(); // copy meta data
for (Size i = 0; i != exp.size(); ++i)
{
// don't need to sort list as it is increasing
if (is_blacklist)
{
// blacklist: add all spectra not contained in list
if (list_idx.find(i) == list_idx.end())
{
exp2.addSpectrum(exp[i]);
}
}
else // whitelist: add all non MS2 spectra, and MS2 only if in list
{
if (exp[i].getMSLevel() != 2 || list_idx.find(i) != list_idx.end())
{
exp2.addSpectrum(exp[i]);
}
}
}
exp = exp2;
return EXECUTION_OK;
}
ExitCodes filterByBlackOrWhiteList(bool is_blacklist, PeakMap& exp, const PeakMap& lib_file, double rt_tol, double mz_tol, double sim_tol, bool unit_ppm)
{
const bool enable_mz_check = (mz_tol >= 0);
const bool enable_rt_check = (rt_tol >= 0);
const bool enable_sim_check = (sim_tol > -1);
auto comp_function= std::unique_ptr<PeakSpectrumCompareFunctor>(new (ZhangSimilarityScore));
set<Size> list_idx;
for (auto const & lib_spectrum : lib_file)
{
if (!lib_spectrum.getPrecursors().empty())
{
// extract precursor positions from query file
double lib_mz = lib_spectrum.getPrecursors()[0].getMZ();
double lib_rt = lib_spectrum.getRT();
// look-up matching spectra in input file (TODO: use KD-tree)
int exp_index = -1;
for (auto const & exp_spectrum : exp)
{
// keep track of current spectrum index
++exp_index;
// TODO: extend to other MS levels and multiple precursors
if (exp_spectrum.getMSLevel() != 2 || exp_spectrum.getPrecursors().empty())
{
continue;
}
// skip if m/z's don't match
const double pc_mz = exp_spectrum.getPrecursors()[0].getMZ();
const double mz_tol_da = unit_ppm ? pc_mz * 1e-6 * mz_tol : mz_tol;
if (enable_mz_check && fabs(pc_mz - lib_mz) > mz_tol_da)
{
continue;
}
// skip if rt's don't match
const double pc_rt = exp_spectrum.getRT();
if (enable_rt_check && fabs(pc_rt - lib_rt) > rt_tol)
{
continue;
}
// skip if not similar enough
if (enable_sim_check && (*comp_function)(exp_spectrum, lib_spectrum) < sim_tol)
{
continue;
}
writeDebug_("Similarity score: " + String((*comp_function)(exp_spectrum, lib_spectrum)), 10);
// we have matching spectra
list_idx.insert(exp_index);
}
}
}
// create new experiment
PeakMap exp2 = exp; // copy meta data
exp2.clear(false); // clear spectra
for (Size i = 0; i != exp.size(); ++i)
{
// don't need to sort list as it is increasing
if (is_blacklist)
{
// blacklist: add all spectra not contained in list
if (list_idx.find(i) == list_idx.end())
{
exp2.addSpectrum(exp[i]);
}
}
else // whitelist: add all non-MS2 spectra + matched MS2 spectra
{
if (exp[i].getMSLevel() != 2 || list_idx.find(i) != list_idx.end())
{
exp2.addSpectrum(exp[i]);
}
}
}
exp = exp2;
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPFileFilter tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/PhosphoScoring.cpp | .cpp | 11,731 | 327 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg, Petra Gutenbrunner $
// $Authors: David Wojnar, Timo Sachsenberg, Petra Gutenbrunner $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/ANALYSIS/ID/IDMapper.h>
#include <OpenMS/ANALYSIS/ID/AScore.h>
#include <OpenMS/METADATA/SpectrumMetaDataLookup.h>
using namespace OpenMS;
using namespace std;
/**
@page TOPP_PhosphoScoring PhosphoScoring
@brief Tool to score phosphorylation sites of peptides.
<CENTER>
<table>
<tr>
<th ALIGN = "center"> pot. predecessor tools </td>
<td VALIGN="middle" ROWSPAN=2> → PhosphoScoring →</td>
<th ALIGN = "center"> pot. successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_CometAdapter (or other ID engines) </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_PeptideIndexer </td>
</tr>
</table>
</CENTER>
This tool performs phosphorylation analysis and site localization. Input files are an LC-MS/MS
data file as well as the corresponding identification file. Firstly, the peptide identifications
are mapped onto the spectra. Secondly, the tool uses an implementation of the Ascore according to
Beausoleil <em>et al.</em> in order to localize the most probable phosphorylation sites.
For details, see:\n
Beausoleil <em>et al.</em>: <a href="https://doi.org/10.1038/nbt1240">A probability-based
approach for high-throughput protein phosphorylation analysis and site localization</a>
(Nat. Biotechnol., 2006, PMID: 16964243).
In the output the score of the peptide hit describes the peptide score, which is a weighted
average of all ten scores of the selected peptide sequence. For each phosphorylation site an
individual Ascore was calculated and listed as meta value of the peptide hit (e.g. AScore_1,
AScore_2).
The Ascore results of this TOPP tool differs with the results of the Ascore calculation provided
<a href="http://ascore.med.harvard.edu/ascore.html">on the website</a>, but it seems that the
implementation according to Beausoleil <em>et al.</em> has some calculation errors. It is not
possible to recalculate the Ascore using the cumulative binomial probability formula with the
given values (see Fig. 3c). In addition the site determining ions calculation seems not reliable,
because in some test cases more site determining ions were calculated than it could be possible.
@note Currently mzIdentML (mzid) is not directly supported as an input/output format of this tool.
Convert mzid files to/from idXML using @ref TOPP_IDFileConverter if necessary.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_PhosphoScoring.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_PhosphoScoring.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPPhosphoScoring :
public TOPPBase
{
public:
TOPPPhosphoScoring() :
TOPPBase("PhosphoScoring", "Scores potential phosphorylation sites in order to localize the most probable sites.")
{
}
protected:
// spectrum must not contain 0 intensity peaks and must be sorted by m/z
template <typename SpectrumType>
static void deisotopeAndSingleChargeMSSpectrum_(SpectrumType& in, Int min_charge, Int max_charge,
double fragment_tolerance,
bool fragment_unit_ppm,
bool keep_only_deisotoped = false,
Size min_isopeaks = 3,
Size max_isopeaks = 10,
bool make_single_charged = true)
{
if (in.empty())
{
return;
}
SpectrumType old_spectrum = in;
// determine charge seeds and extend them
vector<Size> mono_isotopic_peak(old_spectrum.size(), 0);
vector<Int> features(old_spectrum.size(), -1);
Int feature_number = 0;
for (Size current_peak = 0; current_peak != old_spectrum.size(); ++current_peak)
{
double current_mz = old_spectrum[current_peak].getPosition()[0];
for (Int q = max_charge; q >= min_charge; --q) // important: test charge hypothesis from high to low
{
// try to extend isotopes from mono-isotopic peak
// if extension larger then min_isopeaks possible:
// - save charge q in mono_isotopic_peak[]
// - annotate all isotopic peaks with feature number
if (features[current_peak] == -1) // only process peaks which have no assigned feature number
{
bool has_min_isopeaks = true;
vector<Size> extensions;
for (Size i = 0; i < max_isopeaks; ++i)
{
double expected_mz = current_mz + i * Constants::C13C12_MASSDIFF_U / q;
Size p = old_spectrum.findNearest(expected_mz);
double tolerance_dalton = fragment_unit_ppm ? fragment_tolerance * old_spectrum[p].getPosition()[0] * 1e-6 : fragment_tolerance;
if (fabs(old_spectrum[p].getPosition()[0] - expected_mz) > tolerance_dalton) // test for missing peak
{
if (i < min_isopeaks)
{
has_min_isopeaks = false;
}
break;
}
else
{
// TODO: include proper averagine model filtering. for now start at the second peak to test hypothesis
Size n_extensions = extensions.size();
if (n_extensions != 0)
{
if (old_spectrum[p].getIntensity() > old_spectrum[extensions[n_extensions - 1]].getIntensity())
{
if (i < min_isopeaks)
{
has_min_isopeaks = false;
}
break;
}
}
// averagine check passed
extensions.push_back(p);
}
}
if (has_min_isopeaks)
{
//cout << "min peaks at " << current_mz << " " << " extensions: " << extensions.size() << endl;
mono_isotopic_peak[current_peak] = q;
for (Size i = 0; i != extensions.size(); ++i)
{
features[extensions[i]] = feature_number;
}
feature_number++;
}
}
}
}
in.clear(false);
for (Size i = 0; i != old_spectrum.size(); ++i)
{
Int z = mono_isotopic_peak[i];
if (keep_only_deisotoped)
{
if (z == 0)
{
continue;
}
// if already single charged or no decharging selected keep peak as it is
if (!make_single_charged)
{
in.push_back(old_spectrum[i]);
}
else
{
Peak1D p = old_spectrum[i];
p.setMZ(p.getMZ() * z - (z - 1) * Constants::PROTON_MASS_U);
in.push_back(p);
}
}
else
{
// keep all unassigned peaks
if (features[i] < 0)
{
in.push_back(old_spectrum[i]);
continue;
}
// convert mono-isotopic peak with charge assigned by deisotoping
if (z != 0)
{
if (!make_single_charged)
{
in.push_back(old_spectrum[i]);
}
else
{
Peak1D p = old_spectrum[i];
p.setMZ(p.getMZ() * z - (z - 1) * Constants::PROTON_MASS_U);
in.push_back(p);
}
}
}
}
in.sortByPosition();
}
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "Input file with MS/MS spectra");
setValidFormats_("in", ListUtils::create<String>("mzML"));
registerInputFile_("id", "<file>", "", "Identification input file which contains a search against a concatenated sequence database");
setValidFormats_("id", ListUtils::create<String>("idXML"));
registerOutputFile_("out", "<file>", "", "Identification output annotated with phosphorylation scores");
setValidFormats_("out", { "idXML" });
// Ascore algorithm parameters:
registerFullParam_(AScore().getDefaults());
}
// If the score_type has a different name in the meta_values, it is not possible to find it.
// E.g. Percolator_qvalue <-> q-value.
// Improvement for the future would be to have unique names for the score_types
// LuciphorAdapter uses the same strategy to backup previous scores.
void addScoreToMetaValues_(PeptideHit& hit, const String score_type)
{
if (!hit.metaValueExists(score_type) && !hit.metaValueExists(score_type + "_score"))
{
if (score_type.hasSubstring("score"))
{
hit.setMetaValue(score_type, hit.getScore());
}
else
{
hit.setMetaValue(score_type + "_score", hit.getScore());
}
}
}
ExitCodes main_(int, const char**) override
{
//-------------------------------------------------------------
// parameter handling
//-------------------------------------------------------------
String in(getStringOption_("in"));
String id(getStringOption_("id"));
String out(getStringOption_("out"));
AScore ascore;
Param ascore_params = ascore.getDefaults();
ascore_params.update(getParam_(), false, false, false, false, getGlobalLogDebug());
ascore.setParameters(ascore_params);
//-------------------------------------------------------------
// loading input
//-------------------------------------------------------------
PeptideIdentificationList pep_ids;
vector<ProteinIdentification> prot_ids;
PeptideIdentificationList pep_out;
FileHandler().loadIdentifications(id, prot_ids, pep_ids, {FileTypes::IDXML});
PeakMap exp;
FileHandler f;
PeakFileOptions options;
options.clearMSLevels();
options.addMSLevel(2);
f.getOptions() = options;
f.loadExperiment(in, exp, {FileTypes::MZML});
exp.sortSpectra(true);
SpectrumLookup lookup;
lookup.readSpectra(exp.getSpectra());
for (const PeptideIdentification& pep : pep_ids)
{
Size scan_id = lookup.findByRT(pep.getRT());
PeakSpectrum& temp = exp.getSpectrum(scan_id);
vector<PeptideHit> scored_peptides;
for (const PeptideHit& hit : pep.getHits())
{
PeptideHit scored_hit = hit;
addScoreToMetaValues_(scored_hit, pep.getScoreType()); // backup score value
OPENMS_LOG_DEBUG << "starting to compute AScore RT=" << pep.getRT() << " SEQUENCE: " << scored_hit.getSequence().toString() << std::endl;
PeptideHit phospho_sites = ascore.compute(scored_hit, temp);
scored_peptides.push_back(phospho_sites);
}
PeptideIdentification new_pep_id(pep);
new_pep_id.setScoreType("PhosphoScore");
new_pep_id.setHigherScoreBetter(true);
new_pep_id.setHits(scored_peptides);
pep_out.push_back(new_pep_id);
}
//-------------------------------------------------------------
// writing output
//-------------------------------------------------------------
FileHandler().storeIdentifications(out, prot_ids, pep_out, {FileTypes::IDXML});
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPPhosphoScoring tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/DTAExtractor.cpp | .cpp | 5,754 | 196 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Marc Sturm $
// --------------------------------------------------------------------------
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/APPLICATIONS/TOPPBase.h>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_DTAExtractor DTAExtractor
@brief Extracts scans of an mzML file to several files in DTA format.
<center>
<table>
<tr>
<th ALIGN = "center"> pot. predecessor tools </td>
<td VALIGN="middle" ROWSPAN=2> → DTAExtractor →</td>
<th ALIGN = "center"> pot. successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> any signal-/preprocessing tool </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> - </td>
</tr>
</table>
</center>
The retention time, the m/z ratio (for MS level > 1) and the file extension are appended to the output file name.
You can limit the exported spectra by m/z range, retention time range or MS level.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_DTAExtractor.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_DTAExtractor.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPDTAExtractor :
public TOPPBase
{
public:
TOPPDTAExtractor() :
TOPPBase("DTAExtractor", "Extracts spectra of an MS run file to several files in DTA format.")
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "input file ");
setValidFormats_("in", ListUtils::create<String>("mzML"));
registerStringOption_("out", "<file>", "", "base name of DTA output files (RT, m/z and extension are appended)");
registerStringOption_("mz", "[min]:[max]", ":", "m/z range of precursor peaks to extract.\n"
"This option is ignored for MS level 1", false);
registerStringOption_("rt", "[min]:[max]", ":", "retention time range of spectra to extract [s]", false);
registerStringOption_("level", "i[,j]...", "1,2,3", "MS levels to extract", false);
}
ExitCodes main_(int, const char**) override
{
//-------------------------------------------------------------
// parameter handling
//-------------------------------------------------------------
String in = getStringOption_("in");
String out = getStringOption_("out");
//ranges
String mz, rt, tmp;
double mz_l, mz_u, rt_l, rt_u;
vector<UInt> levels;
//initialize ranges
mz_l = rt_l = -numeric_limits<double>::max();
mz_u = rt_u = numeric_limits<double>::max();
rt = getStringOption_("rt");
mz = getStringOption_("mz");
String level = getStringOption_("level");
//convert bounds to numbers
try
{
//rt
parseRange_(rt, rt_l, rt_u);
writeDebug_("rt lower/upper bound: " + String(rt_l) + " / " + String(rt_u), 1);
//mz
parseRange_(mz, mz_l, mz_u);
writeDebug_("mz lower/upper bound: " + String(mz_l) + " / " + String(mz_u), 1);
//levels
tmp = level;
if (level.has(',')) //several levels given
{
vector<String> tmp2;
level.split(',', tmp2);
for (vector<String>::iterator it = tmp2.begin(); it != tmp2.end(); ++it)
{
levels.push_back(it->toInt());
}
}
else //one level given
{
levels.push_back(level.toInt());
}
String tmp3("MS levels: ");
tmp3 = tmp3 + *(levels.begin());
for (vector<UInt>::iterator it = ++levels.begin(); it != levels.end(); ++it)
{
tmp3 = tmp3 + ", " + *it;
}
writeDebug_(tmp3, 1);
}
catch (Exception::ConversionError& /*e*/)
{
writeLogError_(String("Invalid boundary '") + tmp + "' given. Aborting!");
printUsage_();
return ILLEGAL_PARAMETERS;
}
//-------------------------------------------------------------
// loading input
//-------------------------------------------------------------
PeakMap exp;
FileHandler f;
f.getOptions().setRTRange(DRange<1>(rt_l, rt_u));
f.loadExperiment(in, exp, {FileTypes::MZML}, log_type_);
FileHandler dta;
//-------------------------------------------------------------
// calculations
//-------------------------------------------------------------
for (MSSpectrum& spec : exp)
{
// check for MS-level
if (std::find(levels.begin(), levels.end(), spec.getMSLevel()) == levels.end())
{
continue;
}
// store spectra
if (spec.getMSLevel() > 1)
{
double mz_value = 0.0;
if (!spec.getPrecursors().empty())
{
mz_value = spec.getPrecursors()[0].getMZ();
}
if (mz_value < mz_l || mz_value > mz_u)
{
continue;
}
MSExperiment exp;
exp.addSpectrum(spec);
dta.storeExperiment(out + "_RT" + String(spec.getRT()) + "_MZ" + String(mz_value) + ".dta", exp);
}
else
{
MSExperiment exp;
exp.addSpectrum(spec);
dta.storeExperiment(out + "_RT" + String(spec.getRT()) + ".dta", exp);
}
}
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPDTAExtractor tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/MetaProSIP.cpp | .cpp | 146,874 | 3,636 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Timo Sachsenberg $
// --------------------------------------------------------------------------
#include <OpenMS/KERNEL/FeatureMap.h>
#include <OpenMS/KERNEL/Feature.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/CHEMISTRY/ModificationsDB.h>
#include <OpenMS/DATASTRUCTURES/Param.h>
#include <OpenMS/FORMAT/FASTAFile.h>
#include <OpenMS/CHEMISTRY/Element.h>
#include <OpenMS/CHEMISTRY/ElementDB.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <OpenMS/PROCESSING/CENTROIDING/PeakPickerHiRes.h>
#include <OpenMS/ML/NNLS/NonNegativeLeastSquaresSolver.h>
#include <OpenMS/FORMAT/SVOutStream.h>
#include <OpenMS/FORMAT/TextFile.h>
#include <OpenMS/PROCESSING/SCALING/Normalizer.h>
#include <OpenMS/FEATUREFINDER/GaussModel.h>
#include <OpenMS/COMPARISON/SpectrumAlignment.h>
#include <OpenMS/PROCESSING/FILTERING/ThresholdMower.h>
#include <OpenMS/MATH/MISC/CubicSpline2d.h>
#include <OpenMS/CHEMISTRY/MASSDECOMPOSITION/MassDecomposition.h>
#include <OpenMS/CHEMISTRY/MASSDECOMPOSITION/MassDecompositionAlgorithm.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/CoarseIsotopePatternGenerator.h>
#include <OpenMS/SYSTEM/File.h>
#include <boost/math/distributions/normal.hpp>
#include <QtCore/QStringList>
#include <QtCore/QFile>
#include <QtCore/QDir>
#include <QtCore/QFileInfo>
#include <QtCore/QProcess>
#include <algorithm>
#include <iostream>
#include <iomanip>
#include <fstream>
#include <map>
#include <cmath>
//#define DEBUG_METAPROSIP
using namespace OpenMS;
using namespace std;
using boost::math::normal;
typedef map<double, double> MapRateToScoreType;
typedef pair<double, vector<double> > IsotopePattern;
typedef vector<IsotopePattern> IsotopePatterns;
//-------------------------------------------------------------
// Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_MetaProSIP MetaProSIP
@brief Performs proteinSIP on peptide features for elemental flux analysis.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_MetaProSIP.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_MetaProSIP.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class MetaProSIP :
public TOPPBase
{
public:
MetaProSIP()
: TOPPBase("MetaProSIP", "Performs proteinSIP on peptide features for elemental flux analysis."),
ADDITIONAL_ISOTOPES(5),
FEATURE_STRING("feature"),
UNASSIGNED_ID_STRING("id"),
UNIDENTIFIED_STRING("unidentified")
{
}
struct RateScorePair
{
double rate = -1.;
double score = -1.;
};
/// datastructure for reporting an incorporation event
struct SIPIncorporation
{
double rate = -1.; ///< rate
double correlation = -1.; ///< correlation coefficient
double abundance = -1.; ///< abundance of isotopologue
#ifdef DEBUG_METAPROSIP
PeakSpectrum theoretical; ///< peak spectrum as generated from the theoretical isotopic distribution. Large memory consumption.
#endif
};
/// datastructure for reporting a peptide with one or more incorporation rates
struct SIPPeptide
{
AASequence sequence; ///< sequence of the peptide
vector<String> accessions; ///< protein accessions of the peptide
bool unique = true; ///< if the peptide is unique and therefor identifies the protein umambiguously
double mz_theo = -1.; ///< theoretical mz
double mass_theo = -1.; ///< uncharged theoretical mass
double score = -1.; ///< search engine score or q-value if fdr filtering is applied
double feature_rt = -1.; ///< measurement time of feature apex [s]
double feature_mz = -1.; ///< mz of feature apex [s]
//Size feature_scan_number; ///< scan number
Int charge = 0; ///< charge of the peptide feature
double mass_diff = 0.; // 13C or 15N mass difference
double global_LR = -1.; ///< labeling ratio for the whole spectrum used to detect global drifts. 13C/(12C+13C) intensities. (15N analogous)
vector<RateScorePair> correlation_maxima;
MapRateToScoreType decomposition_map; // all rate to decomposition scores for the peptide
MapRateToScoreType correlation_map; // all rate to correlation scores for the peptide
double RR = -1.; ///< R squared of NNLS fit
double explained_TIC_fraction = -1.; ///< fraction of the MS2 TIC that is explained by the maximum correlating decomposition weights
String feature_type; ///< used to distinguish features from FeatureFinder, or synthetised from ids or averagine ids in reporting
Size non_zero_decomposition_coefficients = 0; ///< decomposition coefficients significantly larger than 0
PeakSpectrum reconstruction; ///< signal reconstruction (debugging)
vector<double> reconstruction_monoistopic; ///< signal reconstruction of natural peptide (at mono-isotopic peak)
PeakSpectrum accumulated;
vector<SIPIncorporation> incorporations;
IsotopePatterns patterns;
#ifdef DEBUG_METAPROSIP
vector<PeakSpectrum> pattern_spectra;
#endif
};
///< comparator for vectors of SIPPeptides based on their size. Used to sort by group size.
struct SizeLess
{
inline bool operator()(const vector<SIPPeptide>& a, const vector<SIPPeptide>& b) const
{
return a.size() < b.size();
}
};
struct SequenceLess
{
inline bool operator()(const pair<SIPPeptide, Size>& a, const pair<SIPPeptide, Size>& b) const
{
return a.first.sequence.toString() < b.first.sequence.toString();
}
};
struct RIALess
{
inline bool operator()(const SIPIncorporation& a, const SIPIncorporation& b) const
{
return a.rate < b.rate;
}
};
class MetaProSIPInterpolation
{
public:
///< Determine score maxima from rate to score distribution using derivatives from spline interpolation
static vector<RateScorePair> getHighPoints(double threshold, const MapRateToScoreType& rate2score, bool debug = false)
{
vector<RateScorePair> high_points;
vector<double> x, y;
// set proper boundaries (uniform spacing)
x.push_back(-100.0 / (double)rate2score.size());
y.push_back(0);
// copy data
for (MapRateToScoreType::const_iterator it = rate2score.begin(); it != rate2score.end(); ++it)
{
x.push_back(it->first);
y.push_back(it->second);
}
if (rate2score.find(100.0) == rate2score.end() && x[x.size() - 1] < 100.0)
{
x.push_back(100.0);
y.push_back(0);
}
const size_t n = x.size();
//gte::IntpAkimaNonuniform1<double> spline(x.size(), &x.front(), &y.front());
CubicSpline2d spline(x, y);
if (debug)
{
OPENMS_LOG_DEBUG << x[0] << " " << x[n - 1] << " " << n << endl;
}
double last_dxdy = 0;
for (double xi = x[0]; xi < x[n - 1]; xi += 0.01)
{
double dxdy = spline.derivatives(xi, 1);
double yi = spline.eval(xi);
if (debug)
{
cout << x[0] << " " << x[n - 1] << " " << xi << " " << yi << endl;
}
if (last_dxdy > 0.0 && dxdy <= 0 && yi > threshold)
{
RateScorePair rsp{};
rsp.rate = xi;
rsp.score = yi;
high_points.push_back(rsp);
}
last_dxdy = dxdy;
}
if (debug)
{
OPENMS_LOG_DEBUG << "Found: " << high_points.size() << " local maxima." << endl;
for (Size i = 0; i != high_points.size(); ++i)
{
OPENMS_LOG_DEBUG << high_points[i].rate << " " << high_points[i].score << endl;
}
}
return high_points;
}
};
class MetaProSIPClustering
{
public:
static vector<double> getRIAClusterCenter(const vector<SIPPeptide>& sip_peptides, bool debug = false)
{
vector<double> cluster;
MapRateToScoreType hist;
for (vector<SIPPeptide>::const_iterator cit = sip_peptides.begin(); cit != sip_peptides.end(); ++cit)
{
// build histogram of rates
for (vector<SIPIncorporation>::const_iterator iit = cit->incorporations.begin(); iit != cit->incorporations.end(); ++iit)
{
if (hist.find(iit->rate) == hist.end())
{
hist[iit->rate] = 1.0;
}
else
{
hist[iit->rate] += 1.0;
}
}
}
// kernel density estimation, TODO: binary search for 5 sigma boundaries
vector<double> density(101, 0);
for (Size i = 0; i != density.size(); ++i)
{
double sum = 0;
for (MapRateToScoreType::const_iterator mit = hist.begin(); mit != hist.end(); ++mit)
{
normal s(mit->first, 2.0);
sum += mit->second * pdf(s, (double)i);
}
density[i] = sum;
}
MapRateToScoreType ria_density;
for (Size i = 0; i != density.size(); ++i)
{
ria_density[i] = density[i];
}
vector<RateScorePair> cluster_center = MetaProSIPInterpolation::getHighPoints(0.5, ria_density, debug);
// return cluster centers
for (vector<RateScorePair>::const_iterator cit = cluster_center.begin(); cit != cluster_center.end(); ++cit)
{
cluster.push_back(cit->rate);
}
return cluster;
}
//@Note sip peptides get reordered in same order as clusters
static vector<vector<SIPPeptide> > clusterSIPPeptides(const vector<double>& centers, vector<SIPPeptide>& sip_peptides)
{
// one cluster for each cluster center
vector<vector<SIPPeptide> > clusters(centers.size(), vector<SIPPeptide>());
// assign sip peptide to cluster center with largest RIA
for (vector<SIPPeptide>::const_iterator sit = sip_peptides.begin(); sit != sip_peptides.end(); ++sit)
{
const vector<SIPIncorporation>& incs = sit->incorporations;
if (!incs.empty())
{
double largest_ria = incs[incs.size() - 1].rate;
Size closest_cluster_idx = 0;
double closest_cluster_dist = std::numeric_limits<double>::max();
for (Size i = 0; i != centers.size(); ++i)
{
double dist = std::fabs(centers[i] - largest_ria);
if (dist < closest_cluster_dist)
{
closest_cluster_dist = dist;
closest_cluster_idx = i;
}
}
// add SIP peptide to closest cluster
clusters[closest_cluster_idx].push_back(*sit);
}
}
// rearrange SIP peptides to reflect new order
sip_peptides.clear();
for (vector<vector<SIPPeptide> >::const_iterator sit = clusters.begin(); sit != clusters.end(); ++sit)
{
sip_peptides.insert(sip_peptides.end(), sit->begin(), sit->end());
}
return clusters;
}
};
class MetaProSIPReporting
{
public:
static void plotHeatMap(const String& output_dir, const String& tmp_path, const String& file_suffix, const String& file_extension, const vector<vector<double> >& binned_ria, vector<String> class_labels, Size debug_level = 0, const QString& executable = QString("R"))
{
String filename = String("heatmap") + file_suffix + "." + file_extension;
String script_filename = String("heatmap") + file_suffix + String(".R");
TextFile current_script;
StringList ria_list, col_labels;
for (Size i = 0; i != binned_ria[0].size(); ++i)
{
String label = String(i * (100 / binned_ria[0].size())) + "%-" + String((i + 1) * (100 / binned_ria[0].size())) + "%";
col_labels.push_back(label);
}
for (vector<vector<double> >::const_iterator pit = binned_ria.begin(); pit != binned_ria.end(); ++pit)
{
for (vector<double>::const_iterator rit = pit->begin(); rit != pit->end(); ++rit)
{
ria_list.push_back(String(*rit));
}
}
// row labels
StringList row_labels;
if (!class_labels.empty())
{
for (Size i = 0; i != class_labels.size(); ++i)
{
row_labels.push_back(class_labels[i]);
}
}
// plot heatmap
current_script.addLine("library(gplots)");
String ria_list_string;
ria_list_string.concatenate(ria_list.begin(), ria_list.end(), ",");
current_script.addLine("mdat <- matrix(c(" + ria_list_string + "), ncol=" + String(binned_ria[0].size()) + ", byrow=TRUE)");
if (file_extension == "png")
{
current_script.addLine("png('" + tmp_path + "/" + filename + "', width=1000, height=" + String(10 * binned_ria.size()) + ")");
}
else if (file_extension == "svg")
{
current_script.addLine("svg('" + tmp_path + "/" + filename + "', width=8, height=4.5)");
}
else if (file_extension == "pdf")
{
current_script.addLine("pdf('" + tmp_path + "/" + filename + "', width=8, height=4.5)");
}
String labRowString;
if (row_labels.empty())
{
labRowString = "FALSE";
}
else
{
String row_labels_string;
row_labels_string.concatenate(row_labels.begin(), row_labels.end(), "\",\"");
labRowString = String("c(\"") + row_labels_string + "\")";
}
String col_labels_string;
col_labels_string.concatenate(col_labels.begin(), col_labels.end(), "\",\"");
current_script.addLine(R"(heatmap.2(mdat, dendrogram="none", col=colorRampPalette(c("black","red")), Rowv=FALSE, Colv=FALSE, key=FALSE, labRow=)" + labRowString + ",labCol=c(\"" + col_labels_string + R"("),trace="none", density.info="none"))");
current_script.addLine("tmp<-dev.off()");
current_script.store(tmp_path + "/" + script_filename);
QProcess p;
QStringList env = QProcess::systemEnvironment();
env << QString("R_LIBS=") + tmp_path.toQString();
p.setEnvironment(env);
QStringList qparam;
qparam << "--vanilla";
if (debug_level < 1)
{
qparam << "--quiet";
}
qparam << "--slave" << "--file=" + QString(tmp_path.toQString() + "/" + script_filename.toQString());
p.start(executable, qparam);
p.waitForFinished(-1);
int status = p.exitCode();
// cleanup
if (status != 0)
{
std::cerr << "Error: Process returned with non 0 status." << std::endl;
}
else
{
QFile(QString(tmp_path.toQString() + "/" + filename.toQString())).copy(output_dir.toQString() + "/heatmap" + file_suffix.toQString() + "." + file_extension.toQString());
if (debug_level < 1)
{
QFile(QString(tmp_path.toQString() + "/" + script_filename.toQString())).remove();
QFile(QString(tmp_path.toQString() + "/" + filename.toQString())).remove();
}
}
}
static void plotFilteredSpectra(const String& output_dir, const String& tmp_path, const String& file_suffix, const String& file_extension, const vector<SIPPeptide>& sip_peptides, Size debug_level = 0, const QString& executable = QString("R"))
{
String filename = String("spectrum_plot") + file_suffix + "." + file_extension;
String script_filename = String("spectrum_plot") + file_suffix + String(".R");
for (Size i = 0; i != sip_peptides.size(); ++i)
{
TextFile current_script;
StringList mz_list;
StringList intensity_list;
for (Size j = 0; j != sip_peptides[i].accumulated.size(); ++j)
{
const Peak1D& peak = sip_peptides[i].accumulated[j];
mz_list.push_back(String(peak.getMZ()));
intensity_list.push_back(String(peak.getIntensity()));
}
String mz_list_string;
mz_list_string.concatenate(mz_list.begin(), mz_list.end(), ",");
String intensity_list_string;
intensity_list_string.concatenate(intensity_list.begin(), intensity_list.end(), ",");
current_script.addLine("mz<-c(" + mz_list_string + ")");
current_script.addLine("int<-c(" + intensity_list_string + ")");
current_script.addLine("x0=mz; x1=mz; y0=rep(0, length(x0)); y1=int");
if (file_extension == "png")
{
current_script.addLine("png('" + tmp_path + "/" + filename + "')");
}
else if (file_extension == "svg")
{
current_script.addLine("svg('" + tmp_path + "/" + filename + "', width=8, height=4.5)");
}
else if (file_extension == "pdf")
{
current_script.addLine("pdf('" + tmp_path + "/" + filename + "', width=8, height=4.5)");
}
current_script.addLine("plot.new()");
current_script.addLine("plot.window(xlim=c(min(mz),max(mz)), ylim=c(0,max(int)))");
current_script.addLine("axis(1); axis(2)");
current_script.addLine("title(xlab=\"m/z\")");
current_script.addLine("title(ylab=\"intensity\")");
current_script.addLine("box()");
current_script.addLine("segments(x0,y0,x1,y1)");
current_script.addLine("tmp<-dev.off()");
current_script.store(tmp_path + "/" + script_filename);
QProcess p;
QStringList env = QProcess::systemEnvironment();
env << QString("R_LIBS=") + tmp_path.toQString();
p.setEnvironment(env);
QStringList qparam;
qparam << "--vanilla" << "--quiet" << "--slave" << "--file=" + QString(tmp_path.toQString() + "/" + script_filename.toQString());
p.start(executable, qparam);
p.waitForFinished(-1);
int status = p.exitCode();
if (status != 0)
{
std::cerr << "Error: Process returned with non 0 status." << std::endl;
}
else
{
QFile(QString(tmp_path.toQString() + "/" + filename.toQString())).copy(output_dir.toQString() + "/spectrum" + file_suffix.toQString() + "_rt_" + String(sip_peptides[i].feature_rt).toQString() + "." + file_extension.toQString());
if (debug_level < 1)
{
QFile(QString(tmp_path.toQString() + "/" + script_filename.toQString())).remove();
QFile(QString(tmp_path.toQString() + "/" + filename.toQString())).remove();
}
}
}
}
static void writeHTML(const String& qc_output_directory, const String& file_suffix, const String& file_extension, const vector<SIPPeptide>& sip_peptides)
{
TextFile current_script;
// html header
current_script.addLine("<!DOCTYPE html>\n<html>\n<body>\n");
// peptide heat map plot
current_script.addLine(String("<h1>") + "peptide heat map</h1>");
String peptide_heatmap_plot_filename = String("heatmap_peptide") + file_suffix + String(".") + file_extension;
current_script.addLine("<p> <img src=\"" + peptide_heatmap_plot_filename + R"(" alt="graphic"></p>)");
for (Size i = 0; i != sip_peptides.size(); ++i)
{
// heading
current_script.addLine(String("<h1>") + "RT: " + String(sip_peptides[i].feature_rt) + "</h1>");
current_script.addLine("<table border=\"1\">");
// sequence table row
current_script.addLine("<tr>");
current_script.addLine("<td>sequence</td>");
current_script.addLine(String("<td>") + sip_peptides[i].sequence.toString() + "</td>");
current_script.addLine("</tr>");
current_script.addLine("<tr>");
current_script.addLine("<td>rt (min.)</td>");
current_script.addLine(String("<td>" + String::number(sip_peptides[i].feature_rt / 60.0, 2) + "</td>"));
current_script.addLine("</tr>");
current_script.addLine("<tr>");
current_script.addLine("<td>rt (sec.)</td>");
current_script.addLine(String("<td>" + String::number(sip_peptides[i].feature_rt, 2) + "</td>"));
current_script.addLine("</tr>");
current_script.addLine("<tr>");
current_script.addLine("<td>mz</td>");
current_script.addLine(String("<td>" + String::number(sip_peptides[i].feature_mz, 4) + "</td>"));
current_script.addLine("</tr>");
current_script.addLine("<tr>");
current_script.addLine("<td>theo. mz</td>");
current_script.addLine(String("<td>" + String::number(sip_peptides[i].mz_theo, 4) + "</td>"));
current_script.addLine("</tr>");
current_script.addLine("<tr>");
current_script.addLine("<td>charge</td>");
current_script.addLine(String("<td>" + String(sip_peptides[i].charge) + "</td>"));
current_script.addLine("</tr>");
current_script.addLine("<tr>");
current_script.addLine("<td>feature type</td>");
current_script.addLine(String("<td>" + String(sip_peptides[i].feature_type) + "</td>"));
current_script.addLine("</tr>");
if (!sip_peptides[i].accessions.empty())
{
current_script.addLine(String("<tr>"));
current_script.addLine("<td>accessions</td>");
current_script.addLine(String("<td>" + *sip_peptides[i].accessions.begin() + "</td>"));
current_script.addLine(String("</tr>"));
current_script.addLine(String("<tr>"));
current_script.addLine("<td>unique</td>");
current_script.addLine(String("<td>" + String(sip_peptides[i].unique) + "</td>"));
current_script.addLine(String("</tr>"));
}
current_script.addLine(String("<tr>"));
current_script.addLine("<td>search score</td>");
current_script.addLine(String("<td>") + String(sip_peptides[i].score) + "</td>");
current_script.addLine("</tr>");
current_script.addLine("<tr>");
current_script.addLine("<td>global labeling ratio</td>");
current_script.addLine(String("<td>") + String::number(sip_peptides[i].global_LR, 2) + "</td>");
current_script.addLine("</tr>");
current_script.addLine("<tr>");
current_script.addLine("<td>R squared</td>");
current_script.addLine(String("<td>") + String::number(sip_peptides[i].RR, 2) + "</td>");
current_script.addLine("</tr>");
current_script.addLine("</table>");
// table header of incorporations
current_script.addLine("<p>");
current_script.addLine("<table border=\"1\">");
current_script.addLine("<tr>");
for (Size k = 0; k != sip_peptides[i].incorporations.size(); ++k)
{
current_script.addLine(String("<td>RIA") + String(k + 1) + "</td>");
current_script.addLine(String("<td>CORR.") + String(k + 1) + "</td>");
current_script.addLine(String("<td>INT") + String(k + 1) + "</td>");
}
current_script.addLine("</tr>");
// table of incorporations
current_script.addLine("<tr>");
for (Size k = 0; k != sip_peptides[i].incorporations.size(); ++k)
{
SIPIncorporation p = sip_peptides[i].incorporations[k];
current_script.addLine(String("<td>") + String::number(p.rate, 2) + "</td>");
current_script.addLine(String("<td>") + String::number(p.correlation, 2) + "</td>");
current_script.addLine(String("<td>") + String::number(p.abundance, 0) + "</td>");
}
current_script.addLine("</tr>");
current_script.addLine("</table>");
// spectrum plot
String spectrum_filename = String("spectrum") + file_suffix + "_rt_" + String(sip_peptides[i].feature_rt) + "." + file_extension;
current_script.addLine("<p> <img src=\"" + spectrum_filename + R"(" alt="graphic"></p>)");
// score plot
String score_filename = String("scores") + file_suffix + "_rt_" + String(sip_peptides[i].feature_rt) + "." + file_extension;
current_script.addLine("<p> <img src=\"" + score_filename + R"(" alt="graphic"></p>)");
}
current_script.addLine("\n</body>\n</html>");
current_script.store(qc_output_directory.toQString() + "/index" + file_suffix.toQString() + ".html");
}
static void plotScoresAndWeights(const String& output_dir, const String& tmp_path, const String& file_suffix, const String& file_extension, const vector<SIPPeptide>& sip_peptides, double score_plot_yaxis_min, Size debug_level = 0, const QString& executable = QString("R"))
{
String score_filename = String("score_plot") + file_suffix + file_extension;
String script_filename = String("score_plot") + file_suffix + String(".R");
for (Size i = 0; i != sip_peptides.size(); ++i)
{
TextFile current_script;
StringList rate_dec_list;
StringList rate_corr_list;
StringList weights_list;
StringList corr_list;
for (MapRateToScoreType::const_iterator mit = sip_peptides[i].decomposition_map.begin(); mit != sip_peptides[i].decomposition_map.end(); ++mit)
{
rate_dec_list.push_back(String(mit->first));
weights_list.push_back(String(mit->second));
}
for (MapRateToScoreType::const_iterator mit = sip_peptides[i].correlation_map.begin(); mit != sip_peptides[i].correlation_map.end(); ++mit)
{
rate_corr_list.push_back(String(mit->first));
corr_list.push_back(String(mit->second));
}
String rate_dec_list_string;
rate_dec_list_string.concatenate(rate_dec_list.begin(), rate_dec_list.end(), ",");
String weights_list_string;
weights_list_string.concatenate(weights_list.begin(), weights_list.end(), ",");
String rate_corr_list_string;
rate_corr_list_string.concatenate(rate_corr_list.begin(), rate_corr_list.end(), ",");
String corr_list_string;
corr_list_string.concatenate(corr_list.begin(), corr_list.end(), ",");
current_script.addLine("rate_dec<-c(" + rate_dec_list_string + ")");
current_script.addLine("dec<-c(" + weights_list_string + ")");
current_script.addLine("if (max(dec)!=0) {dec<-dec/max(dec)}");
current_script.addLine("rate_corr<-c(" + rate_corr_list_string + ")");
current_script.addLine("corr<-c(" + corr_list_string + ")");
if (score_plot_yaxis_min >= 0)
{
current_script.addLine("corr[corr<0]=0"); // truncate at 0 for better drawing
}
current_script.addLine("x0=rate_dec; x1=rate_dec; y0=rep(0, length(x0)); y1=dec"); // create R segments for decomposition score (vertical bars)
if (file_extension == "png")
{
current_script.addLine("png('" + tmp_path + "/" + score_filename + "')");
}
else if (file_extension == "svg")
{
current_script.addLine("svg('" + tmp_path + "/" + score_filename + "', width=8, height=4.5)");
}
else if (file_extension == "pdf")
{
current_script.addLine("pdf('" + tmp_path + "/" + score_filename + "', width=8, height=4.5)");
}
current_script.addLine("plot.new()");
current_script.addLine("plot.window(xlim=c(0,100), ylim=c(" + String(score_plot_yaxis_min) + ",1))");
current_script.addLine("axis(1); axis(2)");
current_script.addLine("title(xlab=\"RIA\")");
current_script.addLine("title(ylab=\"normalized weight / corr.\")");
current_script.addLine("box()");
current_script.addLine("segments(x0,y0,x1,y1, col='red')");
current_script.addLine("lines(x=rate_corr, y=corr, col='blue')");
current_script.addLine("legend('bottomright', horiz=FALSE, xpd=TRUE, col=c('red', 'blue'), lwd=2, c('weights', 'correlation'))");
current_script.addLine("tmp<-dev.off()");
current_script.store(tmp_path + "/" + script_filename);
QProcess p;
QStringList env = QProcess::systemEnvironment();
env << QString("R_LIBS=") + tmp_path.toQString();
p.setEnvironment(env);
QStringList qparam;
qparam << "--vanilla" << "--quiet" << "--slave" << "--file=" + QString(tmp_path.toQString() + "/" + script_filename.toQString());
p.start(executable, qparam);
p.waitForFinished(-1);
int status = p.exitCode();
if (status != 0)
{
std::cerr << "Error: Process returned with non 0 status." << std::endl;
}
else
{
QFile(QString(tmp_path.toQString() + "/" + score_filename.toQString())).copy(output_dir.toQString() + "/scores" + file_suffix.toQString() + "_rt_" + String(sip_peptides[i].feature_rt).toQString() + "." + file_extension.toQString());
if (debug_level < 1)
{
QFile(QString(tmp_path.toQString() + "/" + script_filename.toQString())).remove();
QFile(QString(tmp_path.toQString() + "/" + score_filename.toQString())).remove();
}
}
}
}
static void createQualityReport(const String& tmp_path,
const String& qc_output_directory,
const String& file_suffix,
const String& file_extension,
const vector<vector<SIPPeptide> >& sip_peptide_cluster,
Size n_heatmap_bins,
double score_plot_y_axis_min,
bool report_natural_peptides,
const QString& executable = QString("R"))
{
vector<SIPPeptide> sip_peptides;
for (vector<vector<SIPPeptide> >::const_iterator cit = sip_peptide_cluster.begin(); cit != sip_peptide_cluster.end(); ++cit)
{
for (vector<SIPPeptide>::const_iterator sit = cit->begin(); sit != cit->end(); ++sit)
{
// skip non natural peptides for reporting if flag is set
if (!report_natural_peptides && sit->incorporations.size() == 1 && sit->incorporations[0].rate < 5.0)
{
continue;
}
sip_peptides.push_back(*sit);
}
}
// heat map based on peptide RIAs
OPENMS_LOG_INFO << "Plotting peptide heat map of " << sip_peptides.size() << endl;
vector<vector<double> > binned_peptide_ria;
vector<String> class_labels;
createBinnedPeptideRIAData_(n_heatmap_bins, sip_peptide_cluster, binned_peptide_ria, class_labels);
plotHeatMap(qc_output_directory, tmp_path, "_peptide" + file_suffix, file_extension, binned_peptide_ria, class_labels, 0, executable);
OPENMS_LOG_INFO << "Plotting filtered spectra for quality report" << endl;
plotFilteredSpectra(qc_output_directory, tmp_path, file_suffix, file_extension, sip_peptides, 0, executable);
OPENMS_LOG_INFO << "Plotting correlation score and weight distribution" << endl;
plotScoresAndWeights(qc_output_directory, tmp_path, file_suffix, file_extension, sip_peptides, score_plot_y_axis_min, 0, executable);
if (file_extension != "pdf") // html doesn't support pdf as image
{
writeHTML(qc_output_directory, file_suffix, file_extension, sip_peptides);
}
}
static void createCSVReport(vector<vector<SIPPeptide> >& sippeptide_cluster, ofstream& os, map<String, String>& proteinid_to_description)
{
SVOutStream out_csv_stream(os, "\t", "_", String::NONE);
// sort clusters by non increasing size
sort(sippeptide_cluster.rbegin(), sippeptide_cluster.rend(), SizeLess());
for (Size pep_clust_i = 0; pep_clust_i != sippeptide_cluster.size(); ++pep_clust_i)
{
const vector<SIPPeptide>& current_cluster = sippeptide_cluster[pep_clust_i];
// Group
map<String, vector<SIPPeptide> > all_peptides; // map sequence to SIPPeptide
map<String, vector<SIPPeptide> > ambigous_peptides; // map sequence to SIPPeptide
map<String, map<String, vector<SIPPeptide> > > unambigous_proteins; // map Accession to unmodified String to SIPPeptides
for (Size k = 0; k != current_cluster.size(); ++k)
{
const SIPPeptide& current_SIPpeptide = current_cluster[k];
String seq = current_SIPpeptide.sequence.toUnmodifiedString();
if (current_SIPpeptide.unique)
{
String first_accession = *current_SIPpeptide.accessions.begin();
unambigous_proteins[first_accession][seq].push_back(current_SIPpeptide);
}
else
{
ambigous_peptides[current_SIPpeptide.sequence.toUnmodifiedString()].push_back(current_SIPpeptide);
}
all_peptides[seq].push_back(current_SIPpeptide);
}
Size n_all_peptides = all_peptides.size(); // # of different (on sequence level) unique and non-unique peptides
//Size n_ambigous_peptides = ambigous_peptides.size();
Size n_unambigous_proteins = unambigous_proteins.size();
// determine median global LR of whole group
vector<double> group_global_LRs;
vector<double> group_number_RIAs;
for (map<String, vector<SIPPeptide> >::const_iterator all_it = all_peptides.begin(); all_it != all_peptides.end(); ++all_it)
{
for (vector<SIPPeptide>::const_iterator v_it = all_it->second.begin(); v_it != all_it->second.end(); ++v_it)
{
group_global_LRs.push_back(v_it->global_LR);
group_number_RIAs.push_back(v_it->incorporations.size());
}
}
double group_global_LR = Math::median(group_global_LRs.begin(), group_global_LRs.end(), false);
Size group_number_RIA = lround(Math::median(group_number_RIAs.begin(), group_number_RIAs.end(), false)); // median number of RIAs
// Group header
// Distinct peptides := different (on sequence level) unique and non-unique peptides
out_csv_stream << String("Group ") + String(pep_clust_i + 1) << "# Distinct Peptides" << "# Unambiguous Proteins" << "Median Global LR";
for (Size i = 0; i != group_number_RIA; ++i)
{
out_csv_stream << "median RIA " + String(i + 1);
}
out_csv_stream << endl;
out_csv_stream << "" << n_all_peptides << n_unambigous_proteins << group_global_LR;
// collect 1th, 2nd, ... RIA of the group based on the peptide RIAs
vector<vector<double> > group_RIAs(group_number_RIA, vector<double>());
vector<double> group_RIA_medians(group_number_RIA, 0);
for (map<String, vector<SIPPeptide> >::const_iterator all_it = all_peptides.begin(); all_it != all_peptides.end(); ++all_it)
{
for (vector<SIPPeptide>::const_iterator v_it = all_it->second.begin(); v_it != all_it->second.end(); ++v_it)
{
for (Size i = 0; i != group_number_RIA; ++i)
{
if (i == v_it->incorporations.size())
{
break;
}
group_RIAs[i].push_back(v_it->incorporations[i].rate);
}
}
}
for (Size i = 0; i != group_number_RIA; ++i)
{
group_RIA_medians[i] = Math::median(group_RIAs[i].begin(), group_RIAs[i].end(), false);
}
for (Size i = 0; i != group_number_RIA; ++i)
{
out_csv_stream << String(group_RIA_medians[i]);
}
out_csv_stream << endl;
// unambiguous protein level
for (map<String, map<String, vector<SIPPeptide> > >::const_iterator prot_it = unambigous_proteins.begin(); prot_it != unambigous_proteins.end(); ++prot_it)
{
// determine median global LR of protein
vector<double> protein_global_LRs;
vector<double> protein_number_RIAs;
for (map<String, vector<SIPPeptide> >::const_iterator pept_it = prot_it->second.begin(); pept_it != prot_it->second.end(); ++pept_it)
{
for (vector<SIPPeptide>::const_iterator v_it = pept_it->second.begin(); v_it != pept_it->second.end(); ++v_it)
{
protein_global_LRs.push_back(v_it->global_LR);
protein_number_RIAs.push_back(v_it->incorporations.size());
}
}
double protein_global_LR = Math::median(protein_global_LRs.begin(), protein_global_LRs.end(), false);
Size protein_number_RIA = (Size)(Math::median(protein_number_RIAs.begin(), protein_number_RIAs.end(), false) + 0.5); // median number of RIAs
out_csv_stream << "" << "Protein Accession" << "Description" << "# Unique Peptides" << "Median Global LR";
for (Size i = 0; i != protein_number_RIA; ++i)
{
out_csv_stream << "median RIA " + String(i + 1);
}
out_csv_stream << endl;
String protein_accession = prot_it->first;
String protein_description = "none";
if (proteinid_to_description.find(protein_accession.trim().toUpper()) != proteinid_to_description.end())
{
protein_description = proteinid_to_description.at(protein_accession.trim().toUpper());
}
out_csv_stream << "" << protein_accession << protein_description << prot_it->second.size() << protein_global_LR;
vector<vector<double> > protein_RIAs(protein_number_RIA, vector<double>());
vector<double> protein_RIA_medians(protein_number_RIA, 0);
// ratio to natural decomposition
vector<vector<double> > protein_ratio(protein_number_RIA, vector<double>());
vector<double> protein_ratio_medians(protein_number_RIA, 0);
// collect 1th, 2nd, ... RIA of the protein based on the peptide RIAs
for (map<String, vector<SIPPeptide> >::const_iterator pept_it = prot_it->second.begin(); pept_it != prot_it->second.end(); ++pept_it)
{
for (vector<SIPPeptide>::const_iterator v_it = pept_it->second.begin(); v_it != pept_it->second.end(); ++v_it)
{
for (Size i = 0; i != protein_number_RIA; ++i)
{
if (i == v_it->incorporations.size())
{
break;
}
protein_RIAs[i].push_back(v_it->incorporations[i].rate);
protein_ratio[i].push_back(v_it->incorporations[i].abundance);
}
}
}
for (Size i = 0; i != protein_number_RIA; ++i)
{
protein_RIA_medians[i] = Math::median(protein_RIAs[i].begin(), protein_RIAs[i].end(), false);
protein_ratio_medians[i] = Math::median(protein_ratio[i].begin(), protein_ratio[i].end(), false);
}
for (Size i = 0; i != protein_number_RIA; ++i)
{
out_csv_stream << String(protein_RIA_medians[i]);
}
out_csv_stream << endl;
// print header of unique peptides
out_csv_stream << "" << "" << "Peptide Sequence" << "RT" << "Exp. m/z" << "Theo. m/z" << "Charge" << "Score" << "TIC fraction" << "#non-natural weights" << "";
Size max_incorporations = 0;
for (map<String, vector<SIPPeptide> >::const_iterator pept_it = prot_it->second.begin(); pept_it != prot_it->second.end(); ++pept_it)
{
for (vector<SIPPeptide>::const_iterator v_it = pept_it->second.begin(); v_it != pept_it->second.end(); ++v_it)
{
max_incorporations = std::max(v_it->incorporations.size(), max_incorporations);
}
}
for (Size i = 0; i != max_incorporations; ++i)
{
out_csv_stream << "RIA " + String(i + 1) << "INT " + String(i + 1) << "Cor. " + String(i + 1);
}
out_csv_stream << "Peak intensities" << "Global LR" << endl;
// print data of unique peptides
for (map<String, vector<SIPPeptide> >::const_iterator pept_it = prot_it->second.begin(); pept_it != prot_it->second.end(); ++pept_it)
{
for (vector<SIPPeptide>::const_iterator v_it = pept_it->second.begin(); v_it != pept_it->second.end(); ++v_it)
{
out_csv_stream << "" << "" << v_it->sequence.toString() << String::number(v_it->feature_rt / 60.0, 2) << String::number(v_it->feature_mz, 4) << v_it->mz_theo << v_it->charge << v_it->score << v_it->explained_TIC_fraction << v_it->non_zero_decomposition_coefficients << "";
for (vector<SIPIncorporation>::const_iterator incorps = v_it->incorporations.begin(); incorps != v_it->incorporations.end(); ++incorps)
{
out_csv_stream << String::number(incorps->rate, 1) << String::number(incorps->abundance, 0) << String::number(incorps->correlation, 2);
}
// blank entries for nicer formatting
for (Int q = 0; q < (Int)max_incorporations - (Int)v_it->incorporations.size(); ++q)
{
out_csv_stream << "" << "" << "";
}
// output peak intensities
String peak_intensities;
for (PeakSpectrum::const_iterator p = v_it->accumulated.begin(); p != v_it->accumulated.end(); ++p)
{
peak_intensities += String::number(p->getIntensity(), 0) + " ";
}
out_csv_stream << peak_intensities;
out_csv_stream << v_it->global_LR;
out_csv_stream << endl;
}
}
}
// print header of non-unique peptides below the protein section
Size max_incorporations = 0;
for (map<String, vector<SIPPeptide> >::const_iterator pept_it = ambigous_peptides.begin(); pept_it != ambigous_peptides.end(); ++pept_it)
{
for (vector<SIPPeptide>::const_iterator v_it = pept_it->second.begin(); v_it != pept_it->second.end(); ++v_it)
{
max_incorporations = std::max(v_it->incorporations.size(), max_incorporations);
}
}
out_csv_stream << "Non-Unique Peptides" << "Accessions" << "Peptide Sequence" << "Descriptions" << "Score" << "RT" << "Exp. m/z" << "Theo. m/z" << "Charge" << "#non-natural weights" << "";
for (Size m = 0; m != max_incorporations; ++m)
{
out_csv_stream << "RIA " + String(m + 1) << "INT " + String(m + 1) << "Cor. " + String(m + 1);
}
out_csv_stream << "Peak intensities" << "Global LR" << endl;
// print data of non-unique peptides below the protein section
for (map<String, vector<SIPPeptide> >::const_iterator pept_it = ambigous_peptides.begin(); pept_it != ambigous_peptides.end(); ++pept_it)
{
// build up the protein accession string for non-unique peptides. Only the first 3 accessions are added.
for (vector<SIPPeptide>::const_iterator v_it = pept_it->second.begin(); v_it != pept_it->second.end(); ++v_it)
{
String accessions_string;
String description_string = "none";
for (Size ac = 0; ac != v_it->accessions.size(); ++ac)
{
if (ac >= 3) // only print at most 3 accessions as these can be quite numorous
{
accessions_string += "...";
break;
}
String protein_accession = v_it->accessions[ac];
accessions_string += protein_accession;
if (proteinid_to_description.find(protein_accession.trim().toUpper()) != proteinid_to_description.end())
{
if (description_string == "none")
{
description_string = "";
}
description_string += proteinid_to_description.at(protein_accession.trim().toUpper());
}
if (ac < v_it->accessions.size() - 1)
{
accessions_string += ", ";
if (description_string != "none")
{
description_string += ", ";
}
}
}
out_csv_stream << "" << accessions_string << v_it->sequence.toString() << description_string << v_it->score << String::number(v_it->feature_rt / 60.0, 2) << String::number(v_it->feature_mz, 4) << v_it->mz_theo << v_it->charge << v_it->non_zero_decomposition_coefficients << "";
// output variable sized RIA part
for (vector<SIPIncorporation>::const_iterator incorps = v_it->incorporations.begin(); incorps != v_it->incorporations.end(); ++incorps)
{
out_csv_stream << String::number(incorps->rate, 1) << String::number(incorps->abundance, 0) << String::number(incorps->correlation, 2);
}
// blank entries for nicer formatting
for (Int q = 0; q < (Int)max_incorporations - (Int)v_it->incorporations.size(); ++q)
{
out_csv_stream << "" << "" << "";
}
// output peak intensities
String peak_intensities;
for (PeakSpectrum::const_iterator p = v_it->accumulated.begin(); p != v_it->accumulated.end(); ++p)
{
peak_intensities += String::number(p->getIntensity(), 0) + " ";
}
out_csv_stream << peak_intensities;
out_csv_stream << v_it->global_LR;
out_csv_stream << endl;
}
}
}
os.close();
}
static void createPeptideCentricCSVReport(const String& in_mzML, const String& file_extension, vector<vector<SIPPeptide> >& sippeptide_cluster, ofstream& os, map<String, String>& proteinid_to_description, String qc_output_directory, String file_suffix, bool report_natural_peptides)
{
SVOutStream out_csv_stream(os, "\t", "_", String::NONE);
// sort clusters by non increasing size
sort(sippeptide_cluster.rbegin(), sippeptide_cluster.rend(), SizeLess());
// store SIP peptide with cluster index for peptide centric view on data
vector<pair<SIPPeptide, Size> > peptide_to_cluster_index;
for (Size i = 0; i != sippeptide_cluster.size(); ++i)
{
const vector<SIPPeptide>& current_cluster = sippeptide_cluster[i];
for (Size k = 0; k != current_cluster.size(); ++k)
{
peptide_to_cluster_index.emplace_back(current_cluster[k], i);
}
}
OPENMS_LOG_INFO << "Writing " << peptide_to_cluster_index.size() << " peptides to peptide centric csv." << endl;
// sort by sequence
sort(peptide_to_cluster_index.begin(), peptide_to_cluster_index.end(), SequenceLess());
out_csv_stream << "Peptide Sequence" << "Feature" << "Quality Report Spectrum" << "Quality report scores" << "Sample Name" << "Protein Accessions" << "Description" << "Unique" << "#Ambiguity members"
<< "Score" << "RT" << "Exp. m/z" << "Theo. m/z" << "Charge" << "TIC fraction" << "#non-natural weights" << "Peak intensities" << "Group" << "Global Peptide LR";
for (Size i = 1; i <= 10; ++i)
{
out_csv_stream << "RIA " + String(i) << "LR of RIA " + String(i) << "INT " + String(i) << "Cor. " + String(i);
}
out_csv_stream << std::endl;
for (Size i = 0; i != peptide_to_cluster_index.size(); ++i)
{
const SIPPeptide& current_SIPpeptide = peptide_to_cluster_index[i].first;
// skip non natural peptides for repoting if flag is set
if (!report_natural_peptides
&& current_SIPpeptide.incorporations.size() == 1
&& current_SIPpeptide.incorporations[0].rate < 5.0)
{
continue;
}
const Size& current_cluster_index = peptide_to_cluster_index[i].second;
// output peptide sequence
out_csv_stream << current_SIPpeptide.sequence.toString() << current_SIPpeptide.feature_type;
// output quality report links if available
if (qc_output_directory.empty() || file_suffix.empty()) // if no qc plots have been generated or no unique file_suffix has been provided we can't generate links to spectra and scores
{
out_csv_stream << "" << "" << in_mzML;
}
else
{
String qr_spectrum_filename = String("file://") + qc_output_directory + "/" + String("spectrum") + file_suffix + "_rt_" + String(current_SIPpeptide.feature_rt) + "." + file_extension;
String qr_scores_filename = String("file://") + qc_output_directory + "/" + String("scores") + file_suffix + "_rt_" + String(current_SIPpeptide.feature_rt) + "." + file_extension;
out_csv_stream << qr_spectrum_filename << qr_scores_filename << in_mzML;
}
// output protein accessions and descriptions
String accession_string;
String protein_descriptions = "none";
for (Size j = 0; j != current_SIPpeptide.accessions.size(); ++j)
{
String current_accession = current_SIPpeptide.accessions[j];
current_accession.trim().toUpper();
accession_string += current_accession;
if (proteinid_to_description.find(current_accession) != proteinid_to_description.end())
{
if (protein_descriptions == "none")
{
protein_descriptions = proteinid_to_description.at(current_accession);
}
else
{
protein_descriptions += proteinid_to_description.at(current_accession);
}
}
// add "," between accessions
if (j != current_SIPpeptide.accessions.size() - 1)
{
accession_string += ",";
protein_descriptions += ",";
}
}
out_csv_stream << accession_string << protein_descriptions << current_SIPpeptide.unique << current_SIPpeptide.accessions.size() << current_SIPpeptide.score << String::number(current_SIPpeptide.feature_rt / 60.0, 2)
<< String::number(current_SIPpeptide.feature_mz, 4) << String::number(current_SIPpeptide.mz_theo, 4) << current_SIPpeptide.charge << current_SIPpeptide.explained_TIC_fraction << current_SIPpeptide.non_zero_decomposition_coefficients;
// output peak intensities
String peak_intensities;
for (PeakSpectrum::const_iterator p = current_SIPpeptide.accumulated.begin(); p != current_SIPpeptide.accumulated.end(); ++p)
{
peak_intensities += String::number(p->getIntensity(), 0) + " ";
}
out_csv_stream << peak_intensities;
out_csv_stream << current_cluster_index << current_SIPpeptide.global_LR;
for (Size j = 0; j != current_SIPpeptide.incorporations.size(); ++j)
{
const double ria = current_SIPpeptide.incorporations[j].rate;
const double abundance = current_SIPpeptide.incorporations[j].abundance;
const double corr = current_SIPpeptide.incorporations[j].correlation;
double LR_of_RIA = 0;
if (ria < 1.5) // first RIA hast natural abundance
{
LR_of_RIA = abundance / current_SIPpeptide.incorporations[0].abundance;
}
out_csv_stream << String::number(ria, 1) << String::number(LR_of_RIA, 1) << String::number(abundance, 1) << String::number(corr, 1);
}
out_csv_stream << endl;
}
out_csv_stream << endl;
os.close();
}
protected:
static void createBinnedPeptideRIAData_(const Size n_heatmap_bins, const vector<vector<SIPPeptide> >& sip_clusters, vector<vector<double> >& binned_peptide_ria, vector<String>& cluster_labels)
{
cluster_labels.clear();
binned_peptide_ria.clear();
for (vector<vector<SIPPeptide> >::const_iterator cit = sip_clusters.begin(); cit != sip_clusters.end(); ++cit)
{
const vector<SIPPeptide>& sip_peptides = *cit;
for (vector<SIPPeptide>::const_iterator pit = sip_peptides.begin(); pit != sip_peptides.end(); ++pit)
{
vector<double> binned(n_heatmap_bins, 0.0);
for (vector<SIPIncorporation>::const_iterator iit = pit->incorporations.begin(); iit != pit->incorporations.end(); ++iit)
{
Int bin = static_cast<Int>(iit->rate / 100.0 * n_heatmap_bins);
bin = bin > (Int)binned.size() - 1 ? (Int)binned.size() - 1 : bin;
bin = bin < 0 ? 0 : bin;
binned[bin] = log1p(iit->abundance);
}
binned_peptide_ria.push_back(binned);
cluster_labels.push_back((String)(cit - sip_clusters.begin()));
}
}
}
};
class MetaProSIPDecomposition
{
public:
///> Perform the decomposition
static Int calculateDecompositionWeightsIsotopicPatterns(Size n_bins, const vector<double>& isotopic_intensities, const IsotopePatterns& patterns, MapRateToScoreType& map_rate_to_decomposition_weight, SIPPeptide& sip_peptide)
{
Matrix<double> beta(n_bins, 1);
Matrix<double> intensity_vector(isotopic_intensities.size(), 1);
for (Size p = 0; p != isotopic_intensities.size(); ++p)
{
intensity_vector(p, 0) = isotopic_intensities[p];
}
Matrix<double> basis_matrix(isotopic_intensities.size(), n_bins);
for (Size row = 0; row != isotopic_intensities.size(); ++row)
{
for (Size col = 0; col != n_bins; ++col)
{
const vector<double>& pattern = patterns[col].second;
if (row <= n_bins)
{
basis_matrix(row, col) = pattern[row];
}
else
{
basis_matrix(row, col) = 0;
}
}
}
Int result = NonNegativeLeastSquaresSolver::solve(basis_matrix, intensity_vector, beta);
for (Size p = 0; p != n_bins; ++p)
{
map_rate_to_decomposition_weight[(double)p / n_bins * 100.0] = beta(p, 0);
}
// calculate R squared
double S_tot = 0;
double mean = accumulate(isotopic_intensities.begin(), isotopic_intensities.end(), 0.0) / isotopic_intensities.size();
for (Size row = 0; row != isotopic_intensities.size(); ++row)
{
S_tot += pow(isotopic_intensities[row] - mean, 2);
}
double S_err = 0;
PeakSpectrum reconstructed;
for (Size row = 0; row != isotopic_intensities.size(); ++row)
{
double predicted = 0;
for (Size col = 0; col != n_bins; ++col)
{
predicted += basis_matrix(row, col) * beta(col, 0);
}
Peak1D peak;
peak.setIntensity(predicted);
peak.setMZ(sip_peptide.mz_theo + sip_peptide.mass_diff / sip_peptide.charge * row);
reconstructed.push_back(peak);
S_err += pow(isotopic_intensities[row] - predicted, 2);
}
for (Size row = 0; row != 5; ++row)
{
double predicted = 0;
for (Size col = 0; col != 3; ++col)
{
predicted += basis_matrix(row, col) * beta(col, 0);
}
sip_peptide.reconstruction_monoistopic.push_back(predicted);
}
sip_peptide.RR = 1.0 - (S_err / S_tot);
sip_peptide.reconstruction = reconstructed;
return result;
}
// Template calculations for base matrix
///> Given a peptide sequence calculate the theoretical isotopic patterns given all incorporations rate (13C Version)
///> extend isotopic patterns by additional_isotopes to collect other element higher isotopes at 100% incorporation
static IsotopePatterns calculateIsotopePatternsFor13CRange(const AASequence& peptide, Size additional_isotopes = 5)
{
IsotopePatterns ret;
const Element* e1 = ElementDB::getInstance()->getElement("Carbon");
Element* e2 = const_cast<Element*>(e1);
EmpiricalFormula peptide_ef = peptide.getFormula();
Size MAXISOTOPES = static_cast<Size>(peptide_ef.getNumberOf(e1));
// calculate empirical formula of modifications - these can not be labeled via substrate feeding and must be taken care of in pattern calculation
AASequence unmodified_peptide = AASequence::fromString(peptide.toUnmodifiedString());
EmpiricalFormula unmodified_peptide_ef = unmodified_peptide.getFormula();
UInt max_labeling_carbon = (UInt)unmodified_peptide_ef.getNumberOf(e1); // max. number of atoms that can be labeled
EmpiricalFormula modifications_ef = peptide_ef - unmodified_peptide_ef; // difference formula for modifications (note that it can contain positive/negative numbers)
if (modifications_ef.getNumberOf(e1) > 0) // modification adds additional (unlabeled) carbon atoms
{
IsotopeDistribution modification_dist = modifications_ef.getIsotopeDistribution(CoarseIsotopePatternGenerator(max_labeling_carbon + additional_isotopes));
for (double abundance = 0.0; abundance < 100.0 - 1e-8; abundance += 100.0 / (double)max_labeling_carbon)
{
double a = abundance / 100.0;
IsotopeDistribution isotopes;
isotopes.clear();
isotopes.insert(12, 1.0 - a);
isotopes.insert(13, a);
e2->setIsotopeDistribution(isotopes);
IsotopeDistribution dist = unmodified_peptide_ef.getIsotopeDistribution(CoarseIsotopePatternGenerator(max_labeling_carbon + additional_isotopes));
dist.set(CoarseIsotopePatternGenerator().convolve(dist.getContainer(), modification_dist.getContainer())); // convolve with modification distribution (which follows the natural distribution)
IsotopeDistribution::ContainerType container = dist.getContainer();
vector<double> intensities;
for (Size i = 0; i != container.size(); ++i)
{
intensities.push_back(container[i].getIntensity());
}
ret.push_back(make_pair(abundance, intensities));
}
}
else
{
// calculate isotope distribution for a given peptide and varying incorporation rates
// modification of isotope distribution in static ElementDB
for (double abundance = 0.0; abundance < 100.0 - 1e-8; abundance += 100.0 / (double)MAXISOTOPES)
{
double a = abundance / 100.0;
IsotopeDistribution isotopes;
isotopes.clear();
isotopes.insert(12, 1.0 - a);
isotopes.insert(13, a);
e2->setIsotopeDistribution(isotopes);
IsotopeDistribution dist = peptide_ef.getIsotopeDistribution(CoarseIsotopePatternGenerator(MAXISOTOPES + additional_isotopes));
IsotopeDistribution::ContainerType container = dist.getContainer();
vector<double> intensities;
for (Size i = 0; i != container.size(); ++i)
{
intensities.push_back(container[i].getIntensity());
}
ret.push_back(make_pair(abundance, intensities));
}
}
// reset to natural occurance
IsotopeDistribution isotopes;
isotopes.clear();
isotopes.insert(12, 0.9893f);
isotopes.insert(13, 0.0107f);
e2->setIsotopeDistribution(isotopes);
return ret;
}
static Size getNumberOfLabelingElements(const String& labeling_element, const AASequence& peptide)
{
const Element * e;
if (labeling_element == "N")
{
e = ElementDB::getInstance()->getElement("Nitrogen");
}
else if (labeling_element == "C")
{
e = ElementDB::getInstance()->getElement("Carbon");
}
else if (labeling_element == "H")
{
e = ElementDB::getInstance()->getElement("Hydrogen");
}
else if (labeling_element == "O")
{
e = ElementDB::getInstance()->getElement("Oxygen");
}
else
{
return 0;
}
// try to determine if modification adds or removes elements
AASequence unmodified_peptide = AASequence::fromString(peptide.toUnmodifiedString());
EmpiricalFormula unmodified_peptide_ef = unmodified_peptide.getFormula();
int labeling_element_mods_excluded = unmodified_peptide_ef.getNumberOf(e);
EmpiricalFormula peptide_ef = peptide.getFormula();
int labeling_element_mods_included = peptide_ef.getNumberOf(e);
int diff = labeling_element_mods_included - labeling_element_mods_excluded;
if (diff >= 0) // common case, mod added unlabeled elements
{
return labeling_element_mods_excluded;
}
else // special case, mod results in loss of labeling element
{
return labeling_element_mods_included;
}
}
///> Given a peptide sequence calculate the theoretical isotopic patterns given all incorporations rate (15C Version)
///> extend isotopic patterns by additional_isotopes to collect other element higher isotopes at 100% incorporation
static IsotopePatterns calculateIsotopePatternsFor15NRange(const AASequence& peptide, Size additional_isotopes = 5)
{
IsotopePatterns ret;
const Element* e1 = ElementDB::getInstance()->getElement("Nitrogen");
Element* e2 = const_cast<Element*>(e1);
EmpiricalFormula peptide_ef = peptide.getFormula();
UInt MAXISOTOPES = static_cast<UInt>(peptide_ef.getNumberOf(e1));
// calculate empirical formula of modifications - these can not be labeled via substrate feeding and must be taken care of in pattern calculation
AASequence unmodified_peptide = AASequence::fromString(peptide.toUnmodifiedString());
EmpiricalFormula unmodified_peptide_ef = unmodified_peptide.getFormula();
UInt max_labeling_nitrogens = (UInt)unmodified_peptide_ef.getNumberOf(e1); // max. number of nitrogen atoms that can be labeled
EmpiricalFormula modifications_ef = peptide_ef - unmodified_peptide_ef; // difference formula for modifications (note that it can contain positive/negative numbers)
if (modifications_ef.getNumberOf(e1) > 0) // modification adds additional (unlabeled) nitrogen atoms
{
IsotopeDistribution modification_dist = modifications_ef.getIsotopeDistribution(CoarseIsotopePatternGenerator(max_labeling_nitrogens + additional_isotopes));
for (double abundance = 0; abundance < 100.0 - 1e-8; abundance += 100.0 / (double)max_labeling_nitrogens)
{
double a = abundance / 100.0;
IsotopeDistribution isotopes;
isotopes.clear();
isotopes.insert(14, 1.0 - a);
isotopes.insert(15, a);
e2->setIsotopeDistribution(isotopes);
IsotopeDistribution dist = unmodified_peptide_ef.getIsotopeDistribution(CoarseIsotopePatternGenerator(max_labeling_nitrogens + additional_isotopes));
dist.set(CoarseIsotopePatternGenerator().convolve(dist.getContainer(), modification_dist.getContainer())); // calculate convolution with isotope distribution of modification(s)
IsotopeDistribution::ContainerType container = dist.getContainer();
vector<double> intensities;
for (Size i = 0; i != container.size(); ++i)
{
intensities.push_back(container[i].getIntensity());
}
ret.push_back(make_pair(abundance, intensities));
}
}
else
{
// calculate isotope distribution for a given peptide and varying incoperation rates
// modification of isotope distribution in static ElementDB
for (double abundance = 0; abundance < 100.0 - 1e-8; abundance += 100.0 / (double)MAXISOTOPES)
{
double a = abundance / 100.0;
IsotopeDistribution isotopes;
isotopes.clear();
isotopes.insert(14, 1.0 - a);
isotopes.insert(15, a);
e2->setIsotopeDistribution(isotopes);
IsotopeDistribution dist = peptide_ef.getIsotopeDistribution(CoarseIsotopePatternGenerator(MAXISOTOPES + additional_isotopes));
IsotopeDistribution::ContainerType container = dist.getContainer();
vector<double> intensities;
for (Size i = 0; i != container.size(); ++i)
{
intensities.push_back(container[i].getIntensity());
}
ret.push_back(make_pair(abundance, intensities));
}
}
// reset to natural occurance
IsotopeDistribution isotopes;
isotopes.clear();
isotopes.insert(14, 0.99632f);
isotopes.insert(15, 0.368f);
e2->setIsotopeDistribution(isotopes);
return ret;
}
static IsotopePatterns calculateIsotopePatternsFor2HRange(const AASequence& peptide, Size additional_isotopes = 5)
{
IsotopePatterns ret;
const Element* e1 = ElementDB::getInstance()->getElement("Hydrogen");
Element* e2 = const_cast<Element*>(e1);
EmpiricalFormula peptide_ef = peptide.getFormula();
Size MAXISOTOPES = static_cast<Size>(peptide_ef.getNumberOf(e1));
// calculate empirical formula of modifications - these can not be labeled via substrate feeding and must be taken care of in pattern calculation
AASequence unmodified_peptide = AASequence::fromString(peptide.toUnmodifiedString());
EmpiricalFormula unmodified_peptide_ef = unmodified_peptide.getFormula();
UInt max_labeling_element = (UInt)unmodified_peptide_ef.getNumberOf(e1); // max. number of atoms that can be labeled
EmpiricalFormula modifications_ef = peptide_ef - unmodified_peptide_ef; // difference formula for modifications (note that it can contain positive/negative numbers)
if (modifications_ef.getNumberOf(e1) > 0) // modification adds additional (unlabeled) atoms
{
IsotopeDistribution modification_dist = modifications_ef.getIsotopeDistribution(CoarseIsotopePatternGenerator(max_labeling_element + additional_isotopes));
for (double abundance = 0.0; abundance < 100.0 - 1e-8; abundance += 100.0 / (double)max_labeling_element)
{
double a = abundance / 100.0;
IsotopeDistribution isotopes;
isotopes.clear();
isotopes.insert(1, 1.0 - a);
isotopes.insert(2, a);
e2->setIsotopeDistribution(isotopes);
IsotopeDistribution dist = unmodified_peptide_ef.getIsotopeDistribution(CoarseIsotopePatternGenerator(max_labeling_element + additional_isotopes));
dist.set(CoarseIsotopePatternGenerator().convolve(dist.getContainer(), modification_dist.getContainer())); // convole with modification distribution (which follows the natural distribution)
IsotopeDistribution::ContainerType container = dist.getContainer();
vector<double> intensities;
for (Size i = 0; i != container.size(); ++i)
{
intensities.push_back(container[i].getIntensity());
}
ret.push_back(make_pair(abundance, intensities));
}
}
else
{
// calculate isotope distribution for a given peptide and varying incoperation rates
// modification of isotope distribution in static ElementDB
for (double abundance = 0.0; abundance < 100.0 - 1e-8; abundance += 100.0 / (double)MAXISOTOPES)
{
double a = abundance / 100.0;
IsotopeDistribution isotopes;
isotopes.clear();
isotopes.insert(1, 1.0 - a);
isotopes.insert(2, a);
e2->setIsotopeDistribution(isotopes);
IsotopeDistribution dist = peptide_ef.getIsotopeDistribution(CoarseIsotopePatternGenerator(MAXISOTOPES + additional_isotopes));
IsotopeDistribution::ContainerType container = dist.getContainer();
vector<double> intensities;
for (Size i = 0; i != container.size(); ++i)
{
intensities.push_back(container[i].getIntensity());
}
ret.push_back(make_pair(abundance, intensities));
}
}
// reset to natural occurance
IsotopeDistribution isotopes;
isotopes.clear();
isotopes.insert(1, 0.999885f);
isotopes.insert(2, 0.000115f);
e2->setIsotopeDistribution(isotopes);
return ret;
}
static IsotopePatterns calculateIsotopePatternsFor18ORange(const AASequence& peptide, Size additional_isotopes = 5)
{
IsotopePatterns ret;
const Element* e1 = ElementDB::getInstance()->getElement("Oxygen");
Element* e2 = const_cast<Element*>(e1);
EmpiricalFormula peptide_ef = peptide.getFormula();
Size MAXISOTOPES = static_cast<Size>(peptide_ef.getNumberOf(e1));
// calculate empirical formula of modifications - these can not be labeled via substrate feeding and must be taken care of in pattern calculation
AASequence unmodified_peptide = AASequence::fromString(peptide.toUnmodifiedString());
EmpiricalFormula unmodified_peptide_ef = unmodified_peptide.getFormula();
UInt max_labeling_element = (UInt)unmodified_peptide_ef.getNumberOf(e1); // max. number of atoms that can be labeled
EmpiricalFormula modifications_ef = peptide_ef - unmodified_peptide_ef; // difference formula for modifications (note that it can contain positive/negative numbers)
if (modifications_ef.getNumberOf(e1) > 0) // modification adds additional (unlabeled) atoms
{
IsotopeDistribution modification_dist = modifications_ef.getIsotopeDistribution(CoarseIsotopePatternGenerator(max_labeling_element + additional_isotopes));
for (double abundance = 0.0; abundance < 100.0 - 1e-8; abundance += 100.0 / static_cast<double>(max_labeling_element * 2.0))
{
double a = abundance / 100.0;
IsotopeDistribution isotopes;
isotopes.insert(1, 1.0 - a);
isotopes.insert(2, 0.0); // 17O is neglectable (=0.038%)
isotopes.insert(3, a);
e2->setIsotopeDistribution(isotopes);
IsotopeDistribution dist = unmodified_peptide_ef.getIsotopeDistribution(CoarseIsotopePatternGenerator(max_labeling_element * 2 + additional_isotopes)); // 2 * isotopic traces
dist.set(CoarseIsotopePatternGenerator().convolve(dist.getContainer(), modification_dist.getContainer())); // convole with modification distribution (which follows the natural distribution)
IsotopeDistribution::ContainerType container = dist.getContainer();
vector<double> intensities;
for (Size i = 0; i != container.size(); ++i)
{
intensities.push_back(container[i].getIntensity());
}
ret.push_back(make_pair(abundance, intensities));
}
}
else
{
// calculate isotope distribution for a given peptide and varying incoperation rates
// modification of isotope distribution in static ElementDB
for (double abundance = 0.0; abundance < 100.0 - 1e-8; abundance += 100.0 / static_cast<double>(MAXISOTOPES * 2.0))
{
double a = abundance / 100.0;
IsotopeDistribution isotopes;
isotopes.clear();
isotopes.insert(1, 1.0 - a);
isotopes.insert(2, 0.0); // 17O is neglectable (=0.038%)
isotopes.insert(3, a);
e2->setIsotopeDistribution(isotopes);
IsotopeDistribution dist = peptide_ef.getIsotopeDistribution(CoarseIsotopePatternGenerator(MAXISOTOPES * 2 + additional_isotopes)); // 2 * isotopic traces
IsotopeDistribution::ContainerType container = dist.getContainer();
vector<double> intensities;
for (Size i = 0; i != container.size(); ++i)
{
intensities.push_back(container[i].getIntensity());
}
ret.push_back(make_pair(abundance, intensities));
}
}
// reset to natural occurance
IsotopeDistribution isotopes;
isotopes.clear();
isotopes.insert(1, 0.99757f);
isotopes.insert(2, 0.00038f);
isotopes.insert(3, 0.00205f);
e2->setIsotopeDistribution(isotopes);
return ret;
}
static IsotopePatterns calculateIsotopePatternsFor15NRangeOfAveraginePeptide(double mass)
{
IsotopePatterns ret;
const Element* e1 = ElementDB::getInstance()->getElement("Nitrogen");
Element* e2 = const_cast<Element*>(e1);
// calculate number of expected labeling elements using averagine model
Size element_count = static_cast<Size>(mass * 0.0122177302837372);
// calculate isotope distribution for a given peptide and varying incoperation rates
// modification of isotope distribution in static ElementDB
for (double abundance = 0; abundance < 100.0 - 1e-8; abundance += 100.0 / (double)element_count)
{
double a = abundance / 100.0;
IsotopeDistribution isotopes;
isotopes.clear();
isotopes.insert(14, 1.0 - a);
isotopes.insert(15, a);
e2->setIsotopeDistribution(isotopes);
CoarseIsotopePatternGenerator solver(element_count);
auto dist = solver.estimateFromPeptideWeight(mass);
IsotopeDistribution::ContainerType container = dist.getContainer();
vector<double> intensities;
for (Size i = 0; i != container.size(); ++i)
{
intensities.push_back(container[i].getIntensity());
}
ret.push_back(make_pair(abundance, intensities));
}
// reset to natural occurance
IsotopeDistribution isotopes;
isotopes.clear();
isotopes.insert(14, 0.99632f);
isotopes.insert(15, 0.368f);
e2->setIsotopeDistribution(isotopes);
return ret;
}
static IsotopePatterns calculateIsotopePatternsFor13CRangeOfAveraginePeptide(double mass)
{
IsotopePatterns ret;
const Element* e1 = ElementDB::getInstance()->getElement("Carbon");
Element* e2 = const_cast<Element*>(e1);
Size element_count = static_cast<Size>(mass * 0.0444398894906044);
// calculate isotope distribution for a given peptide and varying incoperation rates
// modification of isotope distribution in static ElementDB
for (double abundance = 0.0; abundance < 100.0 - 1e-8; abundance += 100.0 / (double)element_count)
{
double a = abundance / 100.0;
IsotopeDistribution isotopes;
isotopes.clear();
isotopes.insert(12, 1.0 - a);
isotopes.insert(13, a);
e2->setIsotopeDistribution(isotopes);
CoarseIsotopePatternGenerator solver(element_count);
auto dist = solver.estimateFromPeptideWeight(mass);
IsotopeDistribution::ContainerType container = dist.getContainer();
vector<double> intensities;
for (Size i = 0; i != container.size(); ++i)
{
intensities.push_back(container[i].getIntensity());
}
ret.push_back(make_pair(abundance, intensities));
}
// reset to natural occurance
IsotopeDistribution isotopes;
isotopes.insert(12, 0.9893f);
isotopes.insert(13, 0.010f);
e2->setIsotopeDistribution(isotopes);
return ret;
}
static IsotopePatterns calculateIsotopePatternsFor2HRangeOfAveraginePeptide(double mass)
{
IsotopePatterns ret;
const Element* e1 = ElementDB::getInstance()->getElement("Hydrogen");
Element* e2 = const_cast<Element*>(e1);
Size element_count = static_cast<Size>(mass * 0.06981572169);
// calculate isotope distribution for a given peptide and varying incoperation rates
// modification of isotope distribution in static ElementDB
for (double abundance = 0.0; abundance < 100.0 - 1e-8; abundance += 100.0 / (double)element_count)
{
double a = abundance / 100.0;
IsotopeDistribution isotopes;
isotopes.clear();
isotopes.insert(1, 1.0 - a);
isotopes.insert(2, a);
e2->setIsotopeDistribution(isotopes);
CoarseIsotopePatternGenerator solver(element_count);
auto dist = solver.estimateFromPeptideWeight(mass);
IsotopeDistribution::ContainerType container = dist.getContainer();
vector<double> intensities;
for (Size i = 0; i != container.size(); ++i)
{
intensities.push_back(container[i].getIntensity());
}
ret.push_back(make_pair(abundance, intensities));
}
// reset to natural occurance
IsotopeDistribution isotopes;
isotopes.clear();
isotopes.insert(1, 0.999885f);
isotopes.insert(2, 0.000115f);
e2->setIsotopeDistribution(isotopes);
return ret;
}
static IsotopePatterns calculateIsotopePatternsFor18ORangeOfAveraginePeptide(double mass)
{
IsotopePatterns ret;
const Element* e1 = ElementDB::getInstance()->getElement("Oxygen");
Element* e2 = const_cast<Element*>(e1);
Size element_count = static_cast<Size>(mass * 0.01329399039);
// calculate isotope distribution for a given peptide and varying incoperation rates
// modification of isotope distribution in static ElementDB
for (double abundance = 0.0; abundance < 100.0 - 1e-8; abundance += 100.0 / (double)element_count)
{
double a = abundance / 100.0;
IsotopeDistribution isotopes;
isotopes.clear();
isotopes.insert(1, 1.0 - a);
isotopes.insert(2, 0);
isotopes.insert(3, a);
e2->setIsotopeDistribution(isotopes);
CoarseIsotopePatternGenerator solver(element_count * 2); // spaces are 2 Da between 18O and 16O but we observe isotopic peaks at every (approx.) nominal mass
auto dist = solver.estimateFromPeptideWeight(mass);
IsotopeDistribution::ContainerType container = dist.getContainer();
vector<double> intensities;
for (Size i = 0; i != container.size(); ++i)
{
intensities.push_back(container[i].getIntensity());
}
ret.push_back(make_pair(abundance, intensities));
}
// reset to natural occurance
IsotopeDistribution isotopes;
isotopes.clear();
isotopes.insert(1, 0.99757f);
isotopes.insert(2, 0.00038f);
isotopes.insert(3, 0.00205f);
e2->setIsotopeDistribution(isotopes);
return ret;
}
};
class MetaProSIPXICExtraction
{
public:
static vector<vector<double> > extractXICs(double seed_rt, vector<double> xic_mzs, double mz_toelrance_ppm, double rt_tolerance_s, const PeakMap& peak_map)
{
// point on first spectrum in tolerance window
PeakMap::ConstIterator rt_begin = peak_map.RTBegin(seed_rt - rt_tolerance_s);
// point on after last spectrum in tolerance window
PeakMap::ConstIterator rt_end = peak_map.RTBegin(seed_rt + rt_tolerance_s);
// create set containing all rts of spectra in tolerance window
set<double> all_rts;
for (PeakMap::ConstIterator rt_it = rt_begin; rt_it != rt_end; ++rt_it)
{
all_rts.insert(rt_it->getRT());
}
vector<vector<double> > xics(xic_mzs.size(), vector<double>());
for (Size i = 0; i < xic_mzs.size(); ++i)
{
// create and initialize xic to contain values for all rts
map<double, double> xic; // rt to summed intensity
for (set<double>::const_iterator sit = all_rts.begin(); sit != all_rts.end(); ++sit)
{
xic[*sit] = 0;
}
double mz_da = mz_toelrance_ppm * xic_mzs[i] * 1e-6; // mz tolerance in Dalton
PeakMap::ConstAreaIterator it = peak_map.areaBeginConst(seed_rt - rt_tolerance_s, seed_rt + rt_tolerance_s, xic_mzs[i] - mz_da, xic_mzs[i] + mz_da);
for (; it != peak_map.areaEndConst(); ++it)
{
double rt = it.getRT();
if (xic.find(rt) != xic.end())
{
xic[rt] += it->getIntensity();
}
else
{
OPENMS_LOG_WARN << "RT: " << rt << " not contained in rt set." << endl;
}
}
// copy map to vector for easier processing
vector<double> v;
for (map<double, double>::const_iterator xic_it = xic.begin(); xic_it != xic.end(); ++xic_it)
{
v.push_back(xic_it->second);
}
xics[i] = v;
}
return xics;
}
static vector<double> correlateXICsToMono(const vector<vector<double> >& xics)
{
vector<double> rrs(xics.size(), 0); // correlation of isotopic xics to monoisotopic xic
rrs[0] = 1.0; // perfect correlation of monoisotopic trace to itself
for (Size i = 1; i < xics.size(); ++i)
{
rrs[i] = Math::pearsonCorrelationCoefficient(xics[0].begin(), xics[0].end(), xics[i].begin(), xics[i].end());
}
return rrs;
}
static vector<double> extractXICsOfIsotopeTraces(Size element_count, double mass_diff, double mz_tolerance_ppm, double rt_tolerance_s, double seed_rt, double seed_mz, double seed_charge, const PeakMap& peak_map, const double min_corr_mono = -1.0)
{
vector<double> xic_mzs;
// calculate centers of XICs to be extracted
for (Size k = 0; k != element_count; ++k)
{
double mz = seed_mz + k * mass_diff / seed_charge;
xic_mzs.push_back(mz);
}
cout << "Element count (+ additional isotopes): " << element_count << endl;
// extract xics
vector<vector<double> > xics = extractXICs(seed_rt, xic_mzs, mz_tolerance_ppm, rt_tolerance_s, peak_map);
vector<double> xic_intensities(xics.size(), 0.0);
if (min_corr_mono > 0)
{
// calculate correlation to mono-isotopic peak
vector<double> RRs = correlateXICsToMono(xics);
// sum over XICs to yield one intensity value for each XIC. If correlation to mono-isotopic is lower then threshold, delete intensity.
for (Size i = 0; i != xic_intensities.size(); ++i)
{
double v = std::accumulate(xics[i].begin(), xics[i].end(), 0.0);
xic_intensities[i] = RRs[i] > min_corr_mono ? v : 0.0;
}
}
else // correlation disabled so just take the XIC intensities
{
for (Size i = 0; i != xic_intensities.size(); ++i)
{
xic_intensities[i] = std::accumulate(xics[i].begin(), xics[i].end(), 0.0);
}
}
cout << "XICs: " << xic_intensities.size() << endl;
return xic_intensities;
}
};
class RIntegration
{
public:
// Perform a simple check if R and all R dependencies are there
static bool checkRDependencies(const String& tmp_path, StringList package_names, const QString& executable = QString("R"))
{
String random_name = String::random(8);
String script_filename = tmp_path + String("/") + random_name + String(".R");
// check if R in path and can be executed
TextFile checkRInPath;
checkRInPath.addLine("q()");
checkRInPath.store(script_filename);
OPENMS_LOG_INFO << "Checking R...";
{
QProcess p;
p.setProcessChannelMode(QProcess::MergedChannels);
QStringList env = QProcess::systemEnvironment();
env << QString("R_LIBS=") + tmp_path.toQString();
p.setEnvironment(env);
QStringList checkRinPathQParam;
checkRinPathQParam << "--vanilla" << "--quiet" << "--slave" << "--file=" + script_filename.toQString();
p.start(executable, checkRinPathQParam);
p.waitForFinished(-1);
if (p.error() == QProcess::FailedToStart || p.exitStatus() == QProcess::CrashExit || p.exitCode() != 0)
{
OPENMS_LOG_INFO << " failed" << std::endl;
OPENMS_LOG_ERROR << "Can't execute R. Do you have R installed? Check if the path to R is in your system path variable." << std::endl;
return false;
}
OPENMS_LOG_INFO << " success" << std::endl;
}
// check dependencies
OPENMS_LOG_INFO << "Checking R dependencies. If package is not found we will try to install it in your temp directory...";
TextFile current_script;
current_script.addLine("LoadOrInstallPackage <-function(x)");
current_script.addLine("{");
current_script.addLine(" x <-as.character(substitute(x))");
current_script.addLine(" if (isTRUE(x %in%.packages(all.available = TRUE)))");
current_script.addLine(" {");
current_script.addLine(" eval(parse(text = paste(\"library(\", x, \")\", sep = \"\")))");
current_script.addLine(" }");
current_script.addLine(" else");
current_script.addLine(" {");
current_script.addLine(" options(repos = structure(c(CRAN = \"http://cran.rstudio.com/\")))");
current_script.addLine(" update.packages()");
current_script.addLine(" eval(parse(text = paste(\"install.packages('\", x, \"')\", sep = \"\")))");
current_script.addLine(" eval(parse(text = paste(\"library(\", x, \")\", sep = \"\")))");
current_script.addLine(" }");
current_script.addLine("}");
for (StringList::const_iterator it = package_names.begin(); it != package_names.end(); ++it)
{
current_script.addLine("LoadOrInstallPackage(" + *it + ")");
}
current_script.store(script_filename);
QProcess p;
p.setProcessChannelMode(QProcess::MergedChannels);
QStringList env = QProcess::systemEnvironment();
env << QString("R_LIBS=") + tmp_path.toQString();
p.setEnvironment(env);
QStringList qparam;
qparam << "--vanilla" << "--quiet" << "--slave" << "--file=" + script_filename.toQString();
p.start(executable, qparam);
p.waitForFinished(-1);
int status = p.exitCode();
if (status != 0)
{
OPENMS_LOG_ERROR << "\nProblem finding all R dependencies. Check if R and following libraries are installed:" << std::endl;
for (TextFile::ConstIterator line_it = current_script.begin(); line_it != current_script.end(); ++line_it)
{
OPENMS_LOG_ERROR << *line_it << std::endl;
}
QString s = p.readAllStandardOutput();
OPENMS_LOG_ERROR << s.toStdString() << std::endl;
return false;
}
OPENMS_LOG_INFO << " success" << std::endl;
return true;
}
};
protected:
Size ADDITIONAL_ISOTOPES;
std::string FEATURE_STRING;
std::string UNASSIGNED_ID_STRING;
std::string UNIDENTIFIED_STRING;
void registerOptionsAndFlags_() override
{
registerInputFile_("in_mzML", "<file>", "", "Centroided MS1 data");
setValidFormats_("in_mzML", ListUtils::create<String>("mzML"));
registerInputFile_("in_fasta", "<file>", "", "Protein sequence database");
setValidFormats_("in_fasta", ListUtils::create<String>("fasta"));
registerOutputFile_("out_csv", "<file>", "", "Column separated file with feature fitting result.");
setValidFormats_("out_csv", ListUtils::create<String>("csv"));
registerOutputFile_("out_peptide_centric_csv", "<file>", "", "Column separated file with peptide centric result.");
setValidFormats_("out_peptide_centric_csv", ListUtils::create<String>("csv"));
registerInputFile_("in_featureXML", "<file>", "", "Feature data annotated with identifications (IDMapper)");
setValidFormats_("in_featureXML", ListUtils::create<String>("featureXML"));
static const bool is_required(false);
static const bool is_advanced_option(true);
// executable
registerInputFile_("r_executable", "<executable>",
// choose the default value according to the platform where it will be executed
#ifdef OPENMS_WINDOWSPLATFORM
"R.exe",
#else
"R",
#endif
"The R executable. Provide a full or relative path, or make sure it can be found in your PATH environment.",
is_required,
!is_advanced_option,
{"is_executable"}
);
registerDoubleOption_("mz_tolerance_ppm", "<tol>", 10.0, "Tolerance in ppm", false, true);
registerDoubleOption_("rt_tolerance_s", "<tol>", 30.0, "Tolerance window around feature rt for XIC extraction", false, true);
registerDoubleOption_("intensity_threshold", "<tol>", 10.0, "Intensity threshold to collect peaks in the MS1 spectrum.", false, true);
registerDoubleOption_("correlation_threshold", "<tol>", 0.7, "Correlation threshold for reporting a RIA", false, true);
registerDoubleOption_("xic_threshold", "<tol>", 0.7, "Minimum correlation to mono-isotopic peak for retaining a higher isotopic peak. If featureXML from reference file is used it should be disabled (set to -1) as no mono-isotopic peak is expected to be present.", false, true);
registerDoubleOption_("decomposition_threshold", "<tol>", 0.7, "Minimum R-squared of decomposition that must be achieved for a peptide to be reported.", false, true);
registerDoubleOption_("weight_merge_window", "<tol>", 5.0, "Decomposition coefficients within +- this rate window will be combined", false, true);
registerDoubleOption_("min_correlation_distance_to_averagine", "<tol>", -1.0, "Minimum difference in correlation between incorporation pattern and averagine pattern. Positive values filter all RIAs passing the correlation threshold but that also show a better correlation to an averagine peptide. Disabled for values <= -1", false, true);
registerDoubleOption_("pattern_15N_TIC_threshold", "<threshold>", 0.95, "The most intense peaks of the theoretical pattern contributing to at least this TIC fraction are taken into account.", false, true);
registerDoubleOption_("pattern_13C_TIC_threshold", "<threshold>", 0.95, "The most intense peaks of the theoretical pattern contributing to at least this TIC fraction are taken into account.", false, true);
registerDoubleOption_("pattern_2H_TIC_threshold", "<threshold>", 0.95, "The most intense peaks of the theoretical pattern contributing to at least this TIC fraction are taken into account.", false, true);
registerDoubleOption_("pattern_18O_TIC_threshold", "<threshold>", 0.95, "The most intense peaks of the theoretical pattern contributing to at least this TIC fraction are taken into account.", false, true);
registerIntOption_("heatmap_bins", "<threshold>", 20, "Number of RIA bins for heat map generation.", false, true);
registerStringOption_("plot_extension", "<extension>", "png", "Extension used for plots (png|svg|pdf).", false, true);
StringList valid_extensions;
valid_extensions.push_back("png");
valid_extensions.push_back("svg");
valid_extensions.push_back("pdf");
setValidStrings_("plot_extension", valid_extensions);
registerStringOption_("qc_output_directory", "<directory>", "", "Output directory for the quality report", false, true);
registerStringOption_("labeling_element", "<parameter>", "C", "Which element (single letter code) is labeled.", false);
StringList valid_element;
valid_element.push_back("C");
valid_element.push_back("N");
valid_element.push_back("H");
valid_element.push_back("O");
setValidStrings_("labeling_element", valid_element);
registerFlag_("use_unassigned_ids", "Include identifications not assigned to a feature in pattern detection.", true);
registerFlag_("use_averagine_ids", "Use averagine peptides as model to perform pattern detection on unidentified peptides.", true);
registerFlag_("report_natural_peptides", "Whether purely natural peptides are reported in the quality report.", true);
registerFlag_("filter_monoisotopic", "Try to filter out mono-isotopic patterns to improve detection of low RIA patterns", true);
registerFlag_("cluster", "Perform grouping", true);
registerDoubleOption_("observed_peak_fraction", "<threshold>", 0.5, "Fraction of observed/expected peaks.", false, true);
registerIntOption_("min_consecutive_isotopes", "<threshold>", 2, "Minimum number of consecutive isotopic intensities needed.", false, true);
registerDoubleOption_("score_plot_yaxis_min", "<threshold>", 0.0, "The minimum value of the score axis. Values smaller than zero usually only make sense if the observed peak fraction is set to 0.", false, true);
registerStringOption_("collect_method", "<method>", "correlation_maximum", "How RIAs are collected.", false, true);
StringList valid_collect_method;
valid_collect_method.push_back("correlation_maximum");
valid_collect_method.push_back("decomposition_maximum");
setValidStrings_("collect_method", valid_collect_method);
registerDoubleOption_("lowRIA_correlation_threshold", "<tol>", -1, "Correlation threshold for reporting low RIA patterns. Disable and take correlation_threshold value for negative values.", false, true);
}
///> filter intensity to remove noise or additional incorporation peaks that otherwise might interfere with correlation calculation
void filterIsotopicIntensities(vector<double>::const_iterator& pattern_begin, vector<double>::const_iterator& pattern_end,
vector<double>::const_iterator& intensities_begin, vector<double>::const_iterator& intensities_end, double TIC_threshold = 0.99)
{
if (std::distance(pattern_begin, pattern_end) != std::distance(intensities_begin, intensities_end))
{
cout << "Error: size of pattern and collected intensities don't match!: (pattern " << std::distance(pattern_begin, pattern_end) << ") (intensities " << std::distance(intensities_begin, intensities_end) << ")" << endl;
}
if (pattern_begin == pattern_end)
{
return;
}
// determine order of peaks based on intensities
vector<double>::const_iterator b_it = pattern_begin;
vector<double>::const_iterator e_it = pattern_end;
// create intensity to offset map for sorting
vector<std::pair<double, Int> > intensity_to_offset;
for (; b_it != e_it; ++b_it)
{
std::pair<double, Int> intensity_offset_pair = make_pair(*b_it, std::distance(pattern_begin, b_it));
intensity_to_offset.push_back(intensity_offset_pair); // pair: intensity, offset to pattern_begin iterator
}
// sort by intensity (highest first)
std::sort(intensity_to_offset.begin(), intensity_to_offset.end(), std::greater<pair<double, Int> >());
// determine sequence of (neighbouring) peaks needed to achieve threshold * 100 % TIC in the patterns
double TIC = 0.0;
Int min_offset = std::distance(pattern_begin, pattern_end);
Int max_offset = 0;
for (vector<std::pair<double, Int> >::const_iterator it = intensity_to_offset.begin(); it != intensity_to_offset.end(); ++it)
{
TIC += it->first;
if (it->second < min_offset)
{
min_offset = it->second;
}
if (it->second > max_offset)
{
max_offset = it->second;
}
if (TIC > TIC_threshold)
{
break;
}
}
vector<double>::const_iterator tmp_pattern_it(pattern_begin);
vector<double>::const_iterator tmp_intensity_it(intensities_begin);
std::advance(pattern_begin, min_offset);
std::advance(intensities_begin, min_offset);
std::advance(tmp_pattern_it, max_offset + 1);
std::advance(tmp_intensity_it, max_offset + 1);
pattern_end = tmp_pattern_it;
intensities_end = tmp_intensity_it;
//cout << "after: " << std::distance(pattern_begin, pattern_end) << " " << min_offset << " " << max_offset << endl;
}
///< Calculates the correlation between measured isotopic_intensities and the theoretical isotopic patterns for all incorporation rates
void calculateCorrelation(Size n_element, const vector<double>& isotopic_intensities, IsotopePatterns patterns,
MapRateToScoreType& map_rate_to_correlation_score, String labeling_element, double mass, double min_correlation_distance_to_averagine)
{
double min_observed_peak_fraction = getDoubleOption_("observed_peak_fraction");
if (debug_level_ > 0)
{
cout << "Calculating " << patterns.size() << " isotope patterns with " << ADDITIONAL_ISOTOPES << " additional isotopes." << endl;
}
double TIC_threshold(0.0);
// N15 has smaller RIA resolution and multiple RIA peaks tend to overlap more in correlation. This reduces the width of the pattern leading to better distinction
if (labeling_element == "N")
{
TIC_threshold = getDoubleOption_("pattern_15N_TIC_threshold");
}
else if (labeling_element == "C")
{
TIC_threshold = getDoubleOption_("pattern_13C_TIC_threshold");
}
else if (labeling_element == "H")
{
TIC_threshold = getDoubleOption_("pattern_2H_TIC_threshold");
}
else if (labeling_element == "O")
{
TIC_threshold = getDoubleOption_("pattern_18O_TIC_threshold");
}
double max_incorporation_rate = 100.0;
double incorporation_step = max_incorporation_rate / (double)n_element;
// calculate correlation with a natural averagine peptide (used to filter out coeluting peptides)
double peptide_weight = mass;
const Size AVERAGINE_CORR_OFFSET = 3;
// calculate correlation for averagine peptides
std::vector<double> averagine_correlation(isotopic_intensities.size(), 0.0);
// extended by zeros on both sides to simplify correlation
vector<double> ext_isotopic_intensities(AVERAGINE_CORR_OFFSET, 0.0);
ext_isotopic_intensities.insert(ext_isotopic_intensities.end(), isotopic_intensities.begin(), isotopic_intensities.end());
for (Size i = 0; i != AVERAGINE_CORR_OFFSET; ++i)
{
ext_isotopic_intensities.push_back(0.0);
}
for (Size ii = 0; ii < isotopic_intensities.size(); ++ii)
{
// calculate isotope distribution of averagine peptide as this will be used to detect spurious correlations with coeluting peptides
// Note: actually it would be more accurate to use 15N-14N or 13C-12C distances. This doesn't affect averagine distribution much so this approximation is sufficient. (see TODO)
double current_weight = peptide_weight + ii * 1.0; // TODO: use 13C-12C or 15N-14N instead of 1.0 as mass distance to be super accurate
CoarseIsotopePatternGenerator solver(10);
IsotopeDistribution averagine = solver.estimateFromPeptideWeight(current_weight);
IsotopeDistribution::ContainerType averagine_intensities_pairs = averagine.getContainer();
// zeros to the left for sliding window correlation
std::vector<double> averagine_intensities(AVERAGINE_CORR_OFFSET, 0.0); // add 0 intensity bins left to actual averagine pattern
for (Size i = 0; i != averagine_intensities_pairs.size(); ++i)
{
averagine_intensities.push_back(averagine_intensities_pairs[i].getIntensity());
}
// zeros to the right
for (Size i = 0; i != AVERAGINE_CORR_OFFSET; ++i)
{
averagine_intensities.push_back(0.0);
}
// number of bins that can be correlated
Int max_correlated_values = std::min((int)ext_isotopic_intensities.size() - ii, averagine_intensities.size());
double corr_with_averagine = Math::pearsonCorrelationCoefficient(averagine_intensities.begin(), averagine_intensities.begin() + max_correlated_values,
ext_isotopic_intensities.begin() + ii, ext_isotopic_intensities.begin() + ii + max_correlated_values);
averagine_correlation[ii] = corr_with_averagine;
}
// calculate correlation of RIA peptide with measured data
for (Size ii = 0; ii != patterns.size(); ++ii)
{
double rate = (double)ii * incorporation_step;
vector<double>::const_iterator pattern_begin = patterns[ii].second.begin();
vector<double>::const_iterator pattern_end = patterns[ii].second.end();
vector<double>::const_iterator intensities_begin = isotopic_intensities.begin();
vector<double>::const_iterator intensities_end = isotopic_intensities.end();
filterIsotopicIntensities(pattern_begin, pattern_end, intensities_begin, intensities_end, TIC_threshold);
Size zeros = 0;
for (vector<double>::const_iterator it = intensities_begin; it != intensities_end; ++it)
{
if (*it < 1e-8)
{
zeros++;
}
}
// remove correlations with only very few peaks
if ((double)zeros / (double)std::distance(intensities_begin, intensities_end) > min_observed_peak_fraction)
{
map_rate_to_correlation_score[rate] = 0;
continue;
}
double correlation_score = Math::pearsonCorrelationCoefficient(pattern_begin, pattern_end, intensities_begin, intensities_end);
// remove correlations that show higher similarity to an averagine peptide
if (rate > 5.0 && correlation_score < averagine_correlation[ii] + min_correlation_distance_to_averagine)
{
map_rate_to_correlation_score[rate] = 0;
continue;
}
// cout << ii << "\t" << std::distance(intensities_end, intensities_begin) << "\t" << std::distance(intensities_begin, isotopic_intensities.begin()) << "\t" << std::distance(intensities_end, isotopic_intensities.begin()) << endl;
if (std::isnan(correlation_score))
{
correlation_score = 0.0;
}
map_rate_to_correlation_score[rate] = correlation_score;
}
}
///< Returns highest scoring rate and score pair in the map
void getBestRateScorePair(const MapRateToScoreType& map_rate_to_score, double& best_rate, double& best_score)
{
best_rate = 0.0;
best_score = -1;
for (MapRateToScoreType::const_iterator mit = map_rate_to_score.begin(); mit != map_rate_to_score.end(); ++mit)
{
if (mit->second > best_score)
{
best_score = mit->second;
best_rate = mit->first;
}
}
}
PeakSpectrum extractPeakSpectrum(Size element_count, double mass_diff, double rt, double feature_hit_theoretical_mz, Int feature_hit_charge, const PeakMap& peak_map)
{
PeakSpectrum spec = *peak_map.RTBegin(rt - 1e-8);
PeakSpectrum::ConstIterator begin_it = spec.MZBegin(feature_hit_theoretical_mz - 1e-8);
PeakSpectrum::ConstIterator end_it = spec.MZEnd(feature_hit_theoretical_mz + element_count * mass_diff / feature_hit_charge + 1e-8);
PeakSpectrum ret;
for (; begin_it != end_it; ++begin_it)
{
if (begin_it->getIntensity() > 1e-8)
{
ret.push_back(*begin_it);
}
}
return ret;
}
// collects intensities starting at seed_mz/_rt, if no peak is found at the expected position a 0 is added
vector<double> extractIsotopicIntensities(Size element_count, double mass_diff, double mz_tolerance_ppm,
double seed_rt, double seed_mz, double seed_charge,
const PeakMap& peak_map)
{
vector<double> isotopic_intensities;
for (Size k = 0; k != element_count; ++k)
{
double min_rt = seed_rt - 0.01; // feature rt
double max_rt = seed_rt + 0.01;
double mz = seed_mz + k * mass_diff / seed_charge;
double min_mz;
double max_mz;
if (k <= 5)
{
double ppm = std::max(10.0, mz_tolerance_ppm); // restrict ppm to 10 for low intensity peaks
min_mz = mz - mz * ppm * 1e-6;
max_mz = mz + mz * ppm * 1e-6;
}
else
{
min_mz = mz - mz * mz_tolerance_ppm * 1e-6;
max_mz = mz + mz * mz_tolerance_ppm * 1e-6;
}
double found_peak_int = 0;
PeakMap::ConstAreaIterator aait = peak_map.areaBeginConst(min_rt, max_rt, min_mz, max_mz);
// find 13C/15N peak in window around theoretical predicted position
vector<double> found_peaks;
for (; aait != peak_map.areaEndConst(); ++aait)
{
double peak_int = aait->getIntensity();
if (peak_int > 1) // we found a valid 13C/15N peak
{
found_peaks.push_back(peak_int);
}
}
found_peak_int = std::accumulate(found_peaks.begin(), found_peaks.end(), 0.0);
// assign peak intensity to first peak in small area around theoretical predicted position (should be usually only be 1)
isotopic_intensities.push_back(found_peak_int);
}
return isotopic_intensities;
}
void writePeakIntensities_(SVOutStream& out_stream, vector<double> isotopic_intensities, bool write_13Cpeaks)
{
double intensities_sum_12C = 0.0;
// calculate 12C summed intensity
for (Size k = 0; k != 5; ++k)
{
if (k >= isotopic_intensities.size())
{
break;
}
intensities_sum_12C += isotopic_intensities[k];
}
// determine 13C peaks and summed intensity
double intensities_sum_13C = 0;
for (Size u = 5; u < isotopic_intensities.size(); ++u)
{
intensities_sum_13C += isotopic_intensities[u];
}
String int_string;
// print 12C peaks
for (Size u = 0; u != 5; ++u)
{
if (u == isotopic_intensities.size())
{
break;
}
int_string += String::number(isotopic_intensities[u], 0);
int_string += " ";
}
int_string += ", ";
if (write_13Cpeaks)
{
// print 13C peaks
for (Size u = 5; u < isotopic_intensities.size(); ++u)
{
int_string += String::number(isotopic_intensities[u], 0);
if (u < isotopic_intensities.size() - 1)
{
int_string += " ";
}
}
out_stream << int_string;
double ratio = 0.0;
if (intensities_sum_12C + intensities_sum_13C > 0.0000001)
{
ratio = intensities_sum_13C / (intensities_sum_12C + intensities_sum_13C);
}
out_stream << ratio; // << skewness(intensities_13C.begin(), intensities_13C.end());
}
else // bad correlation, no need to print intensities, ratio etc.
{
out_stream << "\t\t";
}
}
// scores smaller than 0 will be padded to 0
MapRateToScoreType normalizeToMax(const MapRateToScoreType& map_rate_to_decomposition_weight)
{
// extract highest weight (best score) and rate
double best_rate, best_score;
getBestRateScorePair(map_rate_to_decomposition_weight, best_rate, best_score);
if (debug_level_ >= 10)
{
OPENMS_LOG_DEBUG << "best rate + score: " << best_rate << " " << best_score << endl;
}
// normalize weights to max(weights)=1
MapRateToScoreType map_weights_norm(map_rate_to_decomposition_weight);
for (MapRateToScoreType::iterator mit = map_weights_norm.begin(); mit != map_weights_norm.end(); ++mit)
{
if (best_score > 0)
{
mit->second /= best_score;
}
else
{
mit->second = 0;
}
}
return map_weights_norm;
}
// Extract the mono-isotopic trace and reports the rt of the maximum intensity
// Used to compensate for slight RT shifts (e.g. important if features of a different map are used)
// n_scans corresponds to the number of neighboring scan rts that should be extracted
// n_scan = 2 -> vector size = 1 + 2 + 2
vector<double> findApexRT(const FeatureMap::iterator feature_it, double hit_rt, const PeakMap& peak_map, Size n_scans)
{
vector<double> seeds_rt;
vector<Peak2D> mono_trace;
if (!feature_it->getConvexHulls().empty())
{
// extract elution profile of 12C containing mass trace using a bounding box
// first convex hull contains the monoisotopic 12C trace
const DBoundingBox<2>& mono_bb = feature_it->getConvexHulls()[0].getBoundingBox();
//(min_rt, max_rt, min_mz, max_mz)
PeakMap::ConstAreaIterator ait = peak_map.areaBeginConst(mono_bb.minPosition()[0], mono_bb.maxPosition()[0], mono_bb.minPosition()[1], mono_bb.maxPosition()[1]);
for (; ait != peak_map.areaEndConst(); ++ait)
{
Peak2D p2d;
p2d.setRT(ait.getRT()); // get rt of scan
p2d.setMZ(ait->getMZ()); // get peak 1D mz
p2d.setIntensity(ait->getIntensity());
mono_trace.push_back(p2d);
}
}
// if there is no 12C mono trace generate a valid starting point
if (mono_trace.empty())
{
Peak2D p2d;
double next_valid_scan_rt = peak_map.RTBegin(hit_rt - 0.001)->getRT();
p2d.setRT(next_valid_scan_rt);
p2d.setMZ(0); // actually not needed
p2d.setIntensity(0);
mono_trace.push_back(p2d);
}
// determine trace peak with highest intensity
double max_trace_int = -1e16;
Size max_trace_int_idx = 0;
for (Size j = 0; j != mono_trace.size(); ++j)
{
if (mono_trace[j].getIntensity() > max_trace_int)
{
max_trace_int = mono_trace[j].getIntensity();
max_trace_int_idx = j;
}
}
double max_trace_int_rt = mono_trace[max_trace_int_idx].getRT();
seeds_rt.push_back(max_trace_int_rt);
for (Size i = 1; i <= n_scans; ++i)
{
double rt_after = max_trace_int_rt;
if (max_trace_int_idx < mono_trace.size() - (Int)i)
{
rt_after = mono_trace[max_trace_int_idx + i].getRT();
}
double rt_before = max_trace_int_rt;
if (max_trace_int_idx >= i)
{
rt_before = mono_trace[max_trace_int_idx - i].getRT();
}
if (fabs(max_trace_int_rt - rt_after) < 10.0)
{
seeds_rt.push_back(rt_after);
}
if (fabs(max_trace_int_rt - rt_before) < 10.0)
{
seeds_rt.push_back(rt_before);
}
}
//cout << "Seeds size:" << seeds_rt.size() << endl;
return seeds_rt;
}
PeakSpectrum mergeSpectra(const PeakMap& to_merge)
{
PeakSpectrum merged;
for (Size i = 0; i != to_merge.size(); ++i)
{
std::copy(to_merge[i].begin(), to_merge[i].end(), std::back_inserter(merged));
}
merged.sortByPosition();
return merged;
}
///> converts a vector of isotopic intensities to a peak spectrum starting at mz=mz_start with mass_diff/charge step size
PeakSpectrum isotopicIntensitiesToSpectrum(double mz_start, double mass_diff, Int charge, vector<double> isotopic_intensities)
{
PeakSpectrum ps;
for (Size i = 0; i != isotopic_intensities.size(); ++i)
{
Peak1D peak;
peak.setMZ(mz_start + i * mass_diff / (double)charge);
peak.setIntensity(isotopic_intensities[i]);
ps.push_back(peak);
}
return ps;
}
///> Collect decomposition coefficients in the merge window around the correlation maximum.
///> Final list of RIAs is constructed for the peptide.
void extractIncorporationsAtCorrelationMaxima(SIPPeptide& sip_peptide,
const IsotopePatterns& patterns,
double weight_merge_window = 5.0,
double min_corr_threshold = 0.5,
double min_decomposition_weight = 10.0)
{
const MapRateToScoreType& map_rate_to_decomposition_weight = sip_peptide.decomposition_map;
const MapRateToScoreType& map_rate_to_correlation_score = sip_peptide.correlation_map;
vector<SIPIncorporation> sip_incorporations;
const vector<RateScorePair>& corr_maxima = sip_peptide.correlation_maxima;
double explained_TIC_fraction = 0;
double TIC = 0;
Size non_zero_decomposition_coefficients = 0;
double max_corr_TIC = 0;
for (Size k = 0; k < corr_maxima.size(); ++k)
{
const double rate = corr_maxima[k].rate;
const double corr = corr_maxima[k].score;
if (corr > min_corr_threshold)
{
SIPIncorporation sip_incorporation{};
sip_incorporation.rate = rate;
// sum up decomposition intensities for quantification in merge window
double int_sum = 0;
MapRateToScoreType::const_iterator low = map_rate_to_decomposition_weight.lower_bound(rate - weight_merge_window - 1e-4);
MapRateToScoreType::const_iterator high = map_rate_to_decomposition_weight.lower_bound(rate + weight_merge_window + 1e-4);
for (; low != high; ++low)
{
int_sum += low->second;
}
if (low != map_rate_to_decomposition_weight.end())
{
int_sum += low->second;
}
sip_incorporation.abundance = int_sum; // calculate abundance as sum of all decompositions
sip_incorporation.correlation = min(corr, 1.0);
max_corr_TIC += int_sum;
// find closest idx (could be more efficient using binary search)
Size closest_idx = 0;
for (Size i = 0; i != patterns.size(); ++i)
{
if (fabs(patterns[i].first - rate) < fabs(patterns[closest_idx].first - rate))
{
closest_idx = i;
}
}
#ifdef DEBUG_METAPROSIP
sip_incorporation.theoretical = isotopicIntensitiesToSpectrum(sip_peptide.mz_theo, sip_peptide.mass_diff, sip_peptide.charge, patterns[closest_idx].second);
#endif
if (int_sum > 1e-4)
{
sip_incorporations.push_back(sip_incorporation);
}
else
{
if (debug_level_ > 1)
{
OPENMS_LOG_WARN << "warning: prevented adding of 0 abundance decomposition at rate " << rate << endl;
OPENMS_LOG_WARN << "decomposition: " << endl;
for (MapRateToScoreType::const_iterator it = map_rate_to_decomposition_weight.begin(); it != map_rate_to_decomposition_weight.end(); ++it)
{
OPENMS_LOG_WARN << it->first << " " << it->second << endl;
}
OPENMS_LOG_WARN << "correlation: " << endl;
for (MapRateToScoreType::const_iterator it = map_rate_to_correlation_score.begin(); it != map_rate_to_correlation_score.end(); ++it)
{
OPENMS_LOG_WARN << it->first << " " << it->second << endl;
}
}
}
}
}
// find highest non-natural incorporation
double highest_non_natural_abundance = 0;
double highest_non_natural_rate = 0;
for (vector<SIPIncorporation>::const_iterator it = sip_incorporations.begin(); it != sip_incorporations.end(); ++it)
{
if (it->rate < 5.0) // skip natural
{
continue;
}
if (it->abundance > highest_non_natural_abundance)
{
highest_non_natural_rate = it->rate;
highest_non_natural_abundance = it->abundance;
}
}
bool non_natural = false;
if (highest_non_natural_rate > 5.0 && highest_non_natural_abundance > min_decomposition_weight)
{
non_natural = true;
}
// used for non-gaussian shape detection
for (MapRateToScoreType::const_iterator mit = map_rate_to_decomposition_weight.begin(); mit != map_rate_to_decomposition_weight.end(); ++mit)
{
double decomposition_rate = mit->first;
double decomposition_weight = mit->second;
TIC += decomposition_weight;
if (non_natural && decomposition_weight > 0.05 * highest_non_natural_abundance && decomposition_rate > 5.0)
{
++non_zero_decomposition_coefficients;
}
}
if (TIC > 1e-5)
{
explained_TIC_fraction = max_corr_TIC / TIC;
}
else
{
explained_TIC_fraction = 0;
}
// set results
sip_peptide.incorporations = sip_incorporations;
sip_peptide.explained_TIC_fraction = explained_TIC_fraction;
sip_peptide.non_zero_decomposition_coefficients = non_zero_decomposition_coefficients;
}
///> Collect decomposition coefficients. Starting at the largest decomposition weights merge smaller weights in the merge window.
void extractIncorporationsAtHeighestDecompositionWeights(SIPPeptide& sip_peptide,
const IsotopePatterns& patterns,
double weight_merge_window = 5.0,
double min_corr_threshold = 0.5,
double min_low_RIA_threshold = -1,
double min_decomposition_weight = 10.0)
{
if (min_low_RIA_threshold < 0)
{
min_low_RIA_threshold = min_corr_threshold;
}
const MapRateToScoreType& map_rate_to_decomposition_weight = sip_peptide.decomposition_map;
const MapRateToScoreType& map_rate_to_correlation_score = sip_peptide.correlation_map;
double explained_TIC_fraction = 0;
double TIC = 0;
Size non_zero_decomposition_coefficients = 0;
double max_corr_TIC = 0;
vector<SIPIncorporation> sip_incorporations;
// find decomposition weights with correlation larger than threshold (seeds)
MapRateToScoreType::const_iterator md_it = map_rate_to_decomposition_weight.begin();
MapRateToScoreType::const_iterator mc_it = map_rate_to_correlation_score.begin();
set<pair<double, double> > seeds_weight_rate_pair;
for (; md_it != map_rate_to_decomposition_weight.end(); ++md_it, ++mc_it)
{
if (mc_it->first < 10.0) // lowRIA region
{
if (mc_it->second >= min_low_RIA_threshold && md_it->second >= min_decomposition_weight)
{
seeds_weight_rate_pair.insert(make_pair(md_it->second, md_it->first));
}
}
else // non-low RIA region
{
if (mc_it->second >= min_corr_threshold && md_it->second >= min_decomposition_weight)
{
seeds_weight_rate_pair.insert(make_pair(md_it->second, md_it->first));
//cout << "Seeds insert: " << md_it->second << " " << md_it->first << endl;
}
}
}
// cout << "Seeds: " << seeds_weight_rate_pair.size() << endl;
// seeds_weight_rate_pair contains the seeds ordered by their decomposition weight
while (!seeds_weight_rate_pair.empty())
{
// pop last element from set
set<pair<double, double> >::iterator last_element = --seeds_weight_rate_pair.end();
pair<double, double> current_seed = *last_element;
//cout << current_seed.first << " " << current_seed.second << endl;
// find weights in window to merge, remove from seed map. maybe also remove from original map depending on whether we want to quantify the weight only 1 time
const double rate = current_seed.second;
SIPIncorporation sip_incorporation{};
sip_incorporation.rate = rate;
MapRateToScoreType::const_iterator low = map_rate_to_decomposition_weight.lower_bound(rate - weight_merge_window - 1e-4);
MapRateToScoreType::const_iterator high = map_rate_to_decomposition_weight.lower_bound(rate + weight_merge_window + 1e-4);
// cout << "Distance: " << std::distance(low, high) << endl;;
MapRateToScoreType::const_iterator l1 = low;
MapRateToScoreType::const_iterator h1 = high;
// iterate over peaks in merge window
for (; l1 != h1; ++l1)
{
// remove from seed map
seeds_weight_rate_pair.erase(make_pair(l1->second, l1->first));
}
// Sum up decomposition intensities for quantification in merge window
double int_sum = 0;
for (; low != high; ++low)
{
int_sum += low->second;
}
if (low != map_rate_to_decomposition_weight.end())
{
int_sum += low->second;
}
sip_incorporation.abundance = int_sum;
MapRateToScoreType::const_iterator corr_it = map_rate_to_correlation_score.lower_bound(rate - 1e-6);
sip_incorporation.correlation = min(corr_it->second, 1.0);
max_corr_TIC += int_sum;
PeakSpectrum theoretical_spectrum;
// find closest idx (could be more efficient using binary search)
Size closest_idx = 0;
for (Size i = 0; i != patterns.size(); ++i)
{
if (fabs(patterns[i].first - rate) < fabs(patterns[closest_idx].first - rate))
{
closest_idx = i;
}
}
#ifdef DEBUG_METAPROSIP
sip_incorporation.theoretical = isotopicIntensitiesToSpectrum(sip_peptide.mz_theo, sip_peptide.mass_diff, sip_peptide.charge, patterns[closest_idx].second);
#endif
sip_incorporations.push_back(sip_incorporation);
}
// find highest non-natural incorporation
double highest_non_natural_abundance = 0;
double highest_non_natural_rate = 0;
for (vector<SIPIncorporation>::const_iterator it = sip_incorporations.begin(); it != sip_incorporations.end(); ++it)
{
if (it->rate < 5.0) // skip natural
{
continue;
}
if (it->abundance > highest_non_natural_abundance)
{
highest_non_natural_rate = it->rate;
highest_non_natural_abundance = it->abundance;
}
}
bool non_natural = false;
if (highest_non_natural_rate > 5.0)
{
non_natural = true;
}
// used for non-gaussian shape detection
for (MapRateToScoreType::const_iterator mit = map_rate_to_decomposition_weight.begin(); mit != map_rate_to_decomposition_weight.end(); ++mit)
{
double decomposition_rate = mit->first;
double decomposition_weight = mit->second;
TIC += decomposition_weight;
if (non_natural && decomposition_weight > 0.05 * highest_non_natural_abundance && decomposition_rate > 5.0)
{
++non_zero_decomposition_coefficients;
}
}
if (TIC > 1e-5)
{
explained_TIC_fraction = max_corr_TIC / TIC;
}
else
{
explained_TIC_fraction = 0;
}
// set results
std::sort(sip_incorporations.begin(), sip_incorporations.end(), RIALess());
sip_peptide.incorporations = sip_incorporations;
sip_peptide.explained_TIC_fraction = explained_TIC_fraction;
sip_peptide.non_zero_decomposition_coefficients = non_zero_decomposition_coefficients;
}
///> calculate the global labeling ration based on all but the first 4 peaks
double calculateGlobalLR(const vector<double>& isotopic_intensities)
{
if (isotopic_intensities.size() < 5)
{
return 0.0;
}
double sum = accumulate(isotopic_intensities.begin(), isotopic_intensities.end(), 0.0);
double sum_incorporated = accumulate(isotopic_intensities.begin() + 4, isotopic_intensities.end(), 0.0);
if (sum < 1e-4)
{
return 0.0;
}
return sum_incorporated / sum;
}
ExitCodes main_(int, const char**) override
{
String file_extension_ = getStringOption_("plot_extension");
Int debug_level = getIntOption_("debug");
String in_mzml = getStringOption_("in_mzML");
String in_features = getStringOption_("in_featureXML");
double mz_tolerance_ppm_ = getDoubleOption_("mz_tolerance_ppm");
double rt_tolerance_s = getDoubleOption_("rt_tolerance_s");
double weight_merge_window_ = getDoubleOption_("weight_merge_window");
double intensity_threshold_ = getDoubleOption_("intensity_threshold");
double decomposition_threshold = getDoubleOption_("decomposition_threshold");
Size min_consecutive_isotopes = (Size)getIntOption_("min_consecutive_isotopes");
String qc_output_directory = getStringOption_("qc_output_directory");
Size n_heatmap_bins = getIntOption_("heatmap_bins");
double score_plot_y_axis_min = getDoubleOption_("score_plot_yaxis_min");
String tmp_path = File::getTempDirectory();
tmp_path.substitute('\\', '/');
// Do we want to create a qc report?
if (!qc_output_directory.empty())
{
QString executable = getStringOption_("r_executable").toQString();
// convert path to absolute path
QDir qc_dir(qc_output_directory.toQString());
qc_output_directory = String(qc_dir.absolutePath());
// trying to create qc_output_directory if not present
if (!qc_dir.exists())
{
qc_dir.mkpath(qc_output_directory.toQString());
}
// check if R and dependencies are installed
StringList package_names;
package_names.push_back("gplots");
bool R_is_working = RIntegration::checkRDependencies(tmp_path, package_names, executable);
if (!R_is_working)
{
OPENMS_LOG_INFO << "There was a problem detecting one of the required R libraries." << endl;
return EXTERNAL_PROGRAM_ERROR;
}
}
String out_csv = getStringOption_("out_csv");
ofstream out_csv_stream(out_csv.c_str());
out_csv_stream << fixed << setprecision(4);
String out_peptide_centric_csv = getStringOption_("out_peptide_centric_csv");
ofstream out_peptide_csv_stream(out_peptide_centric_csv.c_str());
out_peptide_csv_stream << fixed << setprecision(4);
String labeling_element = getStringOption_("labeling_element");
//bool plot_merged = getFlag_("plot_merged");
bool report_natural_peptides = getFlag_("report_natural_peptides");
bool use_unassigned_ids = getFlag_("use_unassigned_ids");
bool use_averagine_ids = getFlag_("use_averagine_ids");
//String debug_patterns_name = getStringOption_("debug_patterns_name");
double correlation_threshold = getDoubleOption_("correlation_threshold");
double xic_threshold = getDoubleOption_("xic_threshold");
double min_correlation_distance_to_averagine = getDoubleOption_("min_correlation_distance_to_averagine");
bool cluster_flag = getFlag_("cluster");
// read descriptions from FASTA and create map for fast annotation
String in_fasta = getStringOption_("in_fasta");
vector<FASTAFile::FASTAEntry> fasta_entries;
FASTAFile fasta_file;
fasta_file.setLogType(log_type_);
fasta_file.load(in_fasta, fasta_entries);
map<String, String> proteinid_to_description;
for (vector<FASTAFile::FASTAEntry>::const_iterator it = fasta_entries.begin(); it != fasta_entries.end(); ++it)
{
if (!it->identifier.empty() && !it->description.empty())
{
String s = it->identifier;
proteinid_to_description[s.trim().toUpper()] = it->description;
}
}
OPENMS_LOG_INFO << "loading feature map..." << endl;
FeatureMap feature_map;
FileHandler().loadFeatures(in_features, feature_map, {FileTypes::FEATUREXML});
// annotate as features found using feature finding (to distinguish them from averagine features oder id based features ... see below)
for (FeatureMap::iterator feature_it = feature_map.begin(); feature_it != feature_map.end(); ++feature_it)
{
feature_it->setMetaValue("feature_type", FEATURE_STRING);
}
// if also unassigned ids are used create a pseudo feature
if (use_unassigned_ids)
{
const PeptideIdentificationList unassigned_ids = feature_map.getUnassignedPeptideIdentifications();
Size unassigned_id_features = 0;
for (PeptideIdentificationList::const_iterator it = unassigned_ids.begin(); it != unassigned_ids.end(); ++it)
{
vector<PeptideHit> hits = it->getHits();
if (!hits.empty())
{
Feature f;
f.setMetaValue("feature_type", UNASSIGNED_ID_STRING);
f.setRT(it->getRT());
// take sequence of first hit to calculate ground truth mz
Int charge = hits[0].getCharge();
if (charge == 0)
{
continue;
}
double mz = hits[0].getSequence().getMZ(charge);
f.setMZ(mz);
// add id to pseudo feature
PeptideIdentificationList id;
id.push_back(*it);
f.setPeptideIdentifications(id);
feature_map.push_back(f);
unassigned_id_features++;
}
}
feature_map.updateRanges();
OPENMS_LOG_INFO << "Evaluating " << unassigned_id_features << " unassigned identifications." << endl;
}
// determine all spectra that have not been identified and assign an averagine peptide to it
if (use_averagine_ids)
{
// load only MS2 spectra with precursor information
PeakMap peak_map;
FileHandler mh;
std::vector<Int> ms_level(1, 2);
mh.getOptions().setMSLevels(ms_level);
mh.loadExperiment(in_mzml, peak_map, {FileTypes::MZML});
peak_map.sortSpectra();
peak_map.updateRanges();
// extract rt and mz of all identified precursors and store them in blacklist
vector<Peak2D> blacklisted_precursors;
// in features
for (FeatureMap::iterator feature_it = feature_map.begin(); feature_it != feature_map.end(); ++feature_it) // for each peptide feature
{
const PeptideIdentificationList& f_ids = feature_it->getPeptideIdentifications();
for (PeptideIdentificationList::const_iterator id_it = f_ids.begin(); id_it != f_ids.end(); ++id_it)
{
if (!id_it->getHits().empty())
{
// Feature with id found so we don't need to generate averagine id. Find MS2 in experiment and blacklist it.
Peak2D p;
p.setRT(id_it->getRT());
p.setMZ(id_it->getMZ());
blacklisted_precursors.push_back(p);
}
}
}
// and in unassigned ids
const PeptideIdentificationList unassigned_ids = feature_map.getUnassignedPeptideIdentifications();
for (PeptideIdentificationList::const_iterator it = unassigned_ids.begin(); it != unassigned_ids.end(); ++it)
{
const vector<PeptideHit> hits = it->getHits();
if (!hits.empty())
{
Peak2D p;
p.setRT(it->getRT());
p.setMZ(it->getMZ());
blacklisted_precursors.push_back(p);
}
}
// find index of all precursors that have been blacklisted
vector<Size> blacklist_idx;
for (vector<Peak2D>::const_iterator it = blacklisted_precursors.begin(); it != blacklisted_precursors.end(); ++it)
{
PeakMap::const_iterator map_rt_begin = peak_map.RTBegin(-std::numeric_limits<double>::max());
PeakMap::const_iterator rt_begin = peak_map.RTBegin(it->getRT() - 1e-5);
Size index = std::distance(map_rt_begin, rt_begin);
//cout << "Blacklist Index: " << index << endl;
blacklist_idx.push_back(index);
}
for (Size i = 0; i != peak_map.size(); ++i)
{
// precursor not blacklisted?
if (find(blacklist_idx.begin(), blacklist_idx.end(), i) == blacklist_idx.end() && !peak_map[i].getPrecursors().empty())
{
// store feature with id generated from averagine peptide (pseudo id)
Feature f;
double precursor_mz = peak_map[i].getPrecursors()[0].getMZ();
int precursor_charge = peak_map[i].getPrecursors()[0].getCharge();
//double precursor_mass = (double)precursor_charge * precursor_mz - (double)precursor_charge * Constants::PROTON_MASS_U;
// add averagine id to pseudo feature
PeptideHit pseudo_hit;
// set peptide with lowest deviation from averagine
pseudo_hit.setSequence(AASequence()); // set empty sequence
pseudo_hit.setCharge(precursor_charge);
PeptideIdentification pseudo_id;
vector<PeptideHit> pseudo_hits;
pseudo_hits.push_back(pseudo_hit);
pseudo_id.setHits(pseudo_hits);
PeptideIdentificationList id;
id.push_back(pseudo_id);
f.setPeptideIdentifications(id);
f.setRT(peak_map[i].getRT());
f.setMZ(precursor_mz);
f.setMetaValue("feature_type", UNIDENTIFIED_STRING);
feature_map.push_back(f);
}
}
feature_map.updateRanges();
}
OPENMS_LOG_INFO << "loading experiment..." << endl;
PeakMap peak_map;
FileHandler mh;
std::vector<Int> ms_level(1, 1);
mh.getOptions().setMSLevels(ms_level);
mh.loadExperiment(in_mzml, peak_map, {FileTypes::MZML});
peak_map.updateRanges();
ThresholdMower tm;
Param tm_parameters;
tm_parameters.setValue("threshold", intensity_threshold_);
tm.setParameters(tm_parameters);
tm.filterPeakMap(peak_map);
peak_map.sortSpectra();
// used to generate plots
vector<String> titles;
vector<MapRateToScoreType> weight_maps;
vector<MapRateToScoreType> normalized_weight_maps;
vector<MapRateToScoreType> correlation_maps;
String file_suffix = "_" + String(QFileInfo(in_mzml.toQString()).baseName()) + "_" + String::random(4);
vector<SIPPeptide> sip_peptides;
Size nPSMs = 0; ///< number of PSMs. If 0 IDMapper has not been called.
Size spectrum_with_no_isotopic_peaks(0);
Size spectrum_with_isotopic_peaks(0);
for (FeatureMap::iterator feature_it = feature_map.begin(); feature_it != feature_map.end(); ++feature_it) // for each peptide feature
{
const double feature_hit_center_rt = feature_it->getRT();
// check if out of experiment bounds
if (feature_hit_center_rt > peak_map.getMaxRT() || feature_hit_center_rt < peak_map.getMinRT())
{
continue;
}
// Extract 1 or more MS/MS with identifications assigned to the feature by IDMapper
PeptideIdentificationList pep_ids = feature_it->getPeptideIdentifications();
nPSMs += pep_ids.size();
// Skip features without peptide identifications
if (pep_ids.empty())
{
continue;
}
// add best scoring PeptideHit of all PeptideIdentifications mapping to the current feature to tmp_pepid
PeptideIdentification tmp_pepid;
tmp_pepid.setHigherScoreBetter(pep_ids[0].isHigherScoreBetter());
for (Size i = 0; i != pep_ids.size(); ++i)
{
pep_ids[i].sort();
const vector<PeptideHit>& hits = pep_ids[i].getHits();
if (!hits.empty())
{
tmp_pepid.insertHit(hits[0]);
}
else
{
OPENMS_LOG_WARN << "Empty peptide hit encountered on feature. Ignoring." << endl;
}
}
tmp_pepid.sort();
SIPPeptide sip_peptide;
sip_peptide.feature_type = feature_it->getMetaValue("feature_type"); // used to annotate feature type in reporting
// retrieve identification information
const PeptideHit& feature_hit = tmp_pepid.getHits()[0];
const double feature_hit_score = feature_hit.getScore();
const double feature_hit_center_mz = feature_it->getMZ();
const Int feature_hit_charge = feature_hit.getCharge();
String feature_hit_seq = "";
double feature_hit_theoretical_mz = 0;
AASequence feature_hit_aaseq;
// set theoretical mz of peptide hit to:
// mz of sequence if we have a sequence identified
// otherwise:
// mz of precursor (stored in feature mz) if no sequence identified
if (sip_peptide.feature_type == FEATURE_STRING || sip_peptide.feature_type == UNASSIGNED_ID_STRING)
{
feature_hit_aaseq = feature_hit.getSequence();
feature_hit_seq = feature_hit_aaseq.toString();
feature_hit_theoretical_mz = feature_hit_aaseq.getMZ(feature_hit.getCharge());
}
else if (sip_peptide.feature_type == UNIDENTIFIED_STRING)
{
feature_hit_aaseq = AASequence();
feature_hit_seq = String("");
feature_hit_theoretical_mz = feature_hit_center_mz;
}
if (debug_level_ > 1)
{
OPENMS_LOG_DEBUG << "Feature type: (" << sip_peptide.feature_type << ") Seq.: " << feature_hit_seq << " m/z: " << feature_hit_theoretical_mz << endl;
}
const set<String> protein_accessions = feature_hit.extractProteinAccessionsSet();
sip_peptide.accessions = vector<String>(protein_accessions.begin(), protein_accessions.end());
sip_peptide.sequence = feature_hit_aaseq;
sip_peptide.mz_theo = feature_hit_theoretical_mz;
sip_peptide.mass_theo = feature_hit_theoretical_mz * feature_hit_charge - feature_hit_charge * Constants::PROTON_MASS_U;
sip_peptide.charge = feature_hit_charge;
sip_peptide.score = feature_hit_score;
sip_peptide.feature_rt = feature_hit_center_rt;
sip_peptide.feature_mz = feature_hit_center_mz;
sip_peptide.unique = sip_peptide.accessions.size() == 1;
// determine retention time of scans next to the central scan
vector<double> seeds_rt = findApexRT(feature_it, feature_hit_center_rt, peak_map, 2); // 1 scan at maximum, 2+2 above and below
double max_trace_int_rt = seeds_rt[0];
// determine maximum number of peaks and mass difference
EmpiricalFormula e = feature_hit_aaseq.getFormula();
// assign mass difference between labeling element isotopes
if (labeling_element == "C")
{
sip_peptide.mass_diff = 1.003354837810;
}
else if (labeling_element == "N")
{
sip_peptide.mass_diff = 0.9970349;
}
else if (labeling_element == "H")
{
sip_peptide.mass_diff = 1.00627675;
}
else if (labeling_element == "O")
{
// 18O-16O distance is approx. 2.0042548 Dalton but natural isotopic pattern is dominated by 13C-12C distance (approx. 1.0033548)
// After the convolution of the O-isotope distribution with the natural one we get multiple copies of the O-distribution (with 2 Da spaces)
// shifted by 13C-12C distances. Choosing (18O-16O) / 2 as expected mass trace distance should therefor collect all of them.
sip_peptide.mass_diff = 2.0042548 / 2.0;
}
Size element_count(0);
Size isotopic_trace_count(0);
if (sip_peptide.feature_type == FEATURE_STRING || sip_peptide.feature_type == UNASSIGNED_ID_STRING)
{
element_count = MetaProSIPDecomposition::getNumberOfLabelingElements(labeling_element, feature_hit_aaseq);
cout << "Numver of labeling elements: " << element_count << "\t" << feature_hit_aaseq.toString() << endl;
}
else // if (sip_peptide.feature_type == UNIDENTIFIED_STRING)
{
cout << "Unidentified" << endl;
// calculate number of expected labeling elements using averagine model C:4.9384 H:7.7583 N:1.3577 O:1.4773 S:0.0417 divided by average weight 111.1254
if (labeling_element == "C")
{
element_count = static_cast<Size>(sip_peptide.mass_theo * 0.0444398894906044);
}
else if (labeling_element == "N")
{
element_count = static_cast<Size>(sip_peptide.mass_theo * 0.0122177302837372);
}
else if (labeling_element == "H")
{
element_count = static_cast<Size>(sip_peptide.mass_theo * 0.06981572169);
}
else if (labeling_element == "O")
{
element_count = static_cast<Size>(sip_peptide.mass_theo * 0.01329399039);
}
}
isotopic_trace_count = labeling_element != "O" ? element_count : element_count * 2;
// collect 13C / 15N peaks
if (debug_level_ >= 10)
{
OPENMS_LOG_DEBUG << "Extract XICs" << endl;
}
vector<double> isotopic_intensities = MetaProSIPXICExtraction::extractXICsOfIsotopeTraces(isotopic_trace_count + ADDITIONAL_ISOTOPES, sip_peptide.mass_diff, mz_tolerance_ppm_, rt_tolerance_s, max_trace_int_rt, feature_hit_theoretical_mz, feature_hit_charge, peak_map, xic_threshold);
// set intensity to zero if not enough neighboring isotopic peaks are present
for (Size i = 0; i != isotopic_intensities.size(); ++i)
{
if (isotopic_intensities[i] < 1e-4) continue;
Size consecutive_isotopes = 0;
Size j = i;
while (j != std::numeric_limits<Size>::max()) // unsigned type wrap-around is well defined
{
if (isotopic_intensities[j] <= 1e-4) break;
++consecutive_isotopes;
--j;
}
j = i + 1;
while (j < isotopic_intensities.size())
{
if (isotopic_intensities[j] <= 1e-4) break;
++consecutive_isotopes;
++j;
}
if (consecutive_isotopes < min_consecutive_isotopes)
{
isotopic_intensities[i] = 0;
}
}
double TIC = accumulate(isotopic_intensities.begin(), isotopic_intensities.end(), 0.0);
// collect 13C / 15N peaks
if (debug_level_ >= 10)
{
OPENMS_LOG_DEBUG << "TIC of XICs: " << TIC << endl;
for (Size i = 0; i != isotopic_intensities.size(); ++i)
{
cout << isotopic_intensities[i] << endl;
}
}
// no Peaks collected
if (TIC < 1e-4)
{
++spectrum_with_no_isotopic_peaks;
if (debug_level > 0)
{
OPENMS_LOG_INFO << "no isotopic peaks in spectrum" << endl;
}
continue;
}
else
{
++spectrum_with_isotopic_peaks;
}
// store accumulated intensities at theoretical positions
sip_peptide.accumulated = isotopicIntensitiesToSpectrum(feature_hit_theoretical_mz, sip_peptide.mass_diff, feature_hit_charge, isotopic_intensities);
sip_peptide.global_LR = calculateGlobalLR(isotopic_intensities);
Size non_zero_isotopic_intensities(0);
for (Size i = 0; i != isotopic_intensities.size(); ++i)
{
if (isotopic_intensities[i] > 0.1)
{
++non_zero_isotopic_intensities;
}
}
if (debug_level > 0)
{
cout << "Isotopic intensities found / total: " << non_zero_isotopic_intensities << "/" << isotopic_intensities.size() << endl;
}
OPENMS_LOG_INFO << feature_hit.getSequence().toString() << "\trt: " << max_trace_int_rt << endl;
// correlation filtering
MapRateToScoreType map_rate_to_correlation_score;
IsotopePatterns patterns;
// calculate isotopic patterns for the given sequence, incoroporation interval/steps
if (sip_peptide.feature_type == FEATURE_STRING || sip_peptide.feature_type == UNASSIGNED_ID_STRING)
{
if (labeling_element == "N")
{
patterns = MetaProSIPDecomposition::calculateIsotopePatternsFor15NRange(AASequence::fromString(feature_hit_seq));
}
else if (labeling_element == "C")
{
patterns = MetaProSIPDecomposition::calculateIsotopePatternsFor13CRange(AASequence::fromString(feature_hit_seq));
}
else if (labeling_element == "H")
{
patterns = MetaProSIPDecomposition::calculateIsotopePatternsFor2HRange(AASequence::fromString(feature_hit_seq));
}
else if (labeling_element == "O")
{
patterns = MetaProSIPDecomposition::calculateIsotopePatternsFor18ORange(AASequence::fromString(feature_hit_seq));
}
}
else if (sip_peptide.feature_type == UNIDENTIFIED_STRING)
{
if (labeling_element == "N")
{
patterns = MetaProSIPDecomposition::calculateIsotopePatternsFor15NRangeOfAveraginePeptide(sip_peptide.mass_theo);
}
else if (labeling_element == "C")
{
patterns = MetaProSIPDecomposition::calculateIsotopePatternsFor13CRangeOfAveraginePeptide(sip_peptide.mass_theo);
}
else if (labeling_element == "H")
{
patterns = MetaProSIPDecomposition::calculateIsotopePatternsFor2HRangeOfAveraginePeptide(sip_peptide.mass_theo);
}
else if (labeling_element == "O")
{
patterns = MetaProSIPDecomposition::calculateIsotopePatternsFor18ORangeOfAveraginePeptide(sip_peptide.mass_theo);
}
}
// store theoretical patterns for visualization
sip_peptide.patterns = patterns;
for (IsotopePatterns::const_iterator pit = sip_peptide.patterns.begin(); pit != sip_peptide.patterns.end(); ++pit)
{
cout << pit->first << "\t" << pit->second.size() << endl;
PeakSpectrum p = isotopicIntensitiesToSpectrum(feature_hit_theoretical_mz, sip_peptide.mass_diff, feature_hit_charge, pit->second);
p.setMetaValue("rate", (double)pit->first);
p.setMSLevel(2);
#ifdef DEBUG_METAPROSIP
sip_peptide.pattern_spectra.push_back(p);
#endif
}
// calculate decomposition into isotopic patterns
MapRateToScoreType map_rate_to_decomposition_weight;
MetaProSIPDecomposition::calculateDecompositionWeightsIsotopicPatterns(isotopic_trace_count, isotopic_intensities, patterns, map_rate_to_decomposition_weight, sip_peptide);
// set first intensity to zero and remove first 2 possible RIAs (0% and e.g. 1.07% for carbon)
MapRateToScoreType tmp_map_rate_to_correlation_score;
if (getFlag_("filter_monoisotopic"))
{
// calculate correlation of natural RIAs (for later reporting) before we subtract the intensities. This is somewhat redundant but no speed bottleneck.
calculateCorrelation(isotopic_trace_count, isotopic_intensities, patterns, tmp_map_rate_to_correlation_score, labeling_element, sip_peptide.mass_theo, -1.0);
for (Size i = 0; i != sip_peptide.reconstruction_monoistopic.size(); ++i)
{
if (i == 0)
{
isotopic_intensities[0] = 0;
}
isotopic_intensities[i] -= sip_peptide.reconstruction_monoistopic[i];
if (isotopic_intensities[i] < 0)
{
isotopic_intensities[i] = 0;
}
}
}
sip_peptide.decomposition_map = map_rate_to_decomposition_weight;
// calculate Pearson correlation coefficients
calculateCorrelation(isotopic_trace_count, isotopic_intensities, patterns, map_rate_to_correlation_score, labeling_element, sip_peptide.mass_theo, min_correlation_distance_to_averagine);
// restore original correlation of natural RIAs (take maximum of observed correlations)
if (getFlag_("filter_monoisotopic"))
{
MapRateToScoreType::iterator dc_it = map_rate_to_correlation_score.begin();
MapRateToScoreType::const_iterator tmp_dc_it = tmp_map_rate_to_correlation_score.begin();
dc_it->second = max(tmp_dc_it->second, dc_it->second);
++dc_it;
++tmp_dc_it;
dc_it->second = max(tmp_dc_it->second, dc_it->second);
}
sip_peptide.correlation_map = map_rate_to_correlation_score;
// determine maximum correlations
sip_peptide.correlation_maxima = MetaProSIPInterpolation::getHighPoints(correlation_threshold, map_rate_to_correlation_score);
// FOR REPORTING: store incorporation information like e.g. theoretical spectrum for best correlations
if (getStringOption_("collect_method") == "correlation_maximum")
{
extractIncorporationsAtCorrelationMaxima(sip_peptide, patterns, weight_merge_window_, correlation_threshold);
}
else if (getStringOption_("collect_method") == "decomposition_maximum")
{
extractIncorporationsAtHeighestDecompositionWeights(sip_peptide, patterns, weight_merge_window_, correlation_threshold, getDoubleOption_("lowRIA_correlation_threshold"));
}
// store sip peptide
if (!sip_peptide.incorporations.empty() && sip_peptide.RR > decomposition_threshold)
{
if (debug_level > 0)
{
OPENMS_LOG_INFO << "SIP peptides: " << sip_peptide.incorporations.size() << endl;
}
sip_peptides.push_back(sip_peptide);
}
MapRateToScoreType map_rate_to_normalized_weight = normalizeToMax(map_rate_to_decomposition_weight);
// store for plotting
titles.push_back(feature_hit_seq + " " + String(feature_hit_center_rt));
weight_maps.push_back(map_rate_to_decomposition_weight);
normalized_weight_maps.push_back(map_rate_to_normalized_weight);
correlation_maps.push_back(map_rate_to_correlation_score);
}
OPENMS_LOG_INFO << "Spectra with / without isotopic peaks " << spectrum_with_isotopic_peaks << "/" << spectrum_with_no_isotopic_peaks << endl;
if (nPSMs == 0)
{
OPENMS_LOG_ERROR << "No assigned identifications found in featureXML. Did you forget to run IDMapper?" << endl;
return INCOMPATIBLE_INPUT_DATA;
}
if (sip_peptides.empty())
{
OPENMS_LOG_ERROR << "No peptides passing the incorporation threshold found." << endl;
return INCOMPATIBLE_INPUT_DATA;
}
// copy meta information
PeakMap debug_exp = peak_map;
debug_exp.clear(false);
vector<vector<SIPPeptide> > sippeptide_clusters; // vector of cluster
if (cluster_flag)
{
if (debug_level > 0)
{
OPENMS_LOG_INFO << "Determine cluster center of RIAs: " << endl;
}
vector<double> cluster_center(MetaProSIPClustering::getRIAClusterCenter(sip_peptides));
if (debug_level > 0)
{
OPENMS_LOG_INFO << "Assigning peptides to cluster: " << endl;
}
sippeptide_clusters = MetaProSIPClustering::clusterSIPPeptides(cluster_center, sip_peptides);
// remove cluster with no assigned SIP peptide (spurious highpoints giving rise to cluster may happen because of small bumps caused by interpolation)
vector<vector<SIPPeptide> >::iterator scit = sippeptide_clusters.begin();
vector<double>::iterator ccit = cluster_center.begin();
while (scit != sippeptide_clusters.end() && ccit != cluster_center.end())
{
if (scit->empty())
{
scit = sippeptide_clusters.erase(scit); // remove cluster of SIP peptides
ccit = cluster_center.erase(ccit); // remove cluster center
}
else
{
++scit;
++ccit;
}
}
if (debug_level > 0)
{
for (Size i = 0; i != sippeptide_clusters.size(); ++i)
{
OPENMS_LOG_INFO << "Cluster: " << (i + 1) << " contains " << sippeptide_clusters[i].size() << " peptides." << endl;
}
}
}
else // data hasn't been clustered so just add all SIP peptides as cluster zero
{
sippeptide_clusters.push_back(sip_peptides);
}
// create group/cluster centric report
if (!out_csv.empty())
{
OPENMS_LOG_INFO << "Create CSV report." << endl;
MetaProSIPReporting::createCSVReport(sippeptide_clusters, out_csv_stream, proteinid_to_description);
}
// create peptide centric report
if (!out_peptide_centric_csv.empty())
{
OPENMS_LOG_INFO << "Creating peptide centric report: " << out_peptide_centric_csv << std::endl;
if (getFlag_("test"))
{
MetaProSIPReporting::createPeptideCentricCSVReport("test_mode_enabled.mzML", file_extension_, sippeptide_clusters, out_peptide_csv_stream, proteinid_to_description, qc_output_directory, file_suffix, report_natural_peptides);
}
else
{
MetaProSIPReporting::createPeptideCentricCSVReport(in_mzml, file_extension_, sippeptide_clusters, out_peptide_csv_stream, proteinid_to_description, qc_output_directory, file_suffix, report_natural_peptides);
}
}
// quality report
if (!qc_output_directory.empty())
{
QString executable = getStringOption_("r_executable").toQString();
// TODO plot merged is now passed as false
MetaProSIPReporting::createQualityReport(tmp_path, qc_output_directory, file_suffix, file_extension_, sippeptide_clusters, n_heatmap_bins, score_plot_y_axis_min, report_natural_peptides, executable);
}
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
MetaProSIP tool;
return tool.main(argc, argv);
}
///@endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/SimpleSearchEngine.cpp | .cpp | 4,412 | 134 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Timo Sachsenberg $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/SimpleSearchEngineAlgorithm.h>
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/SYSTEM/File.h>
#ifdef _OPENMP
#include <omp.h>
#endif
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_SimpleSearchEngine SimpleSearchEngine
@brief Identifies peptides in MS/MS spectra.
<CENTER>
<table>
<tr>
<th ALIGN = "center"> pot. predecessor tools </td>
<td VALIGN="middle" ROWSPAN=2> → SimpleSearchEngine →</td>
<th ALIGN = "center"> pot. successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> any signal-/preprocessing tool @n (in mzML format)</td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_IDFilter or @n any protein/peptide processing tool</td>
</tr>
</table>
</CENTER>
@em This search engine is mainly for educational/benchmarking/prototyping use cases.
It lacks behind in speed and/or quality of results when compared to state-of-the-art search engines.
@note Currently mzIdentML (mzid) is not directly supported as an input/output format of this tool. Convert mzid files to/from idXML using @ref TOPP_IDFileConverter if necessary.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_SimpleSearchEngine.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_SimpleSearchEngine.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class SimpleSearchEngine :
public TOPPBase
{
public:
SimpleSearchEngine() :
TOPPBase("SimpleSearchEngine",
"Annotates MS/MS spectra using SimpleSearchEngine.",
true)
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "input file ");
setValidFormats_("in", ListUtils::create<String>("mzML"));
registerInputFile_("database", "<file>", "", "input file ");
setValidFormats_("database", ListUtils::create<String>("fasta"));
registerOutputFile_("out", "<file>", "", "output file ");
setValidFormats_("out", ListUtils::create<String>("idXML"));
// put search algorithm parameters at Search: subtree of parameters
Param search_algo_params_with_subsection;
search_algo_params_with_subsection.insert("Search:", SimpleSearchEngineAlgorithm().getDefaults());
registerFullParam_(search_algo_params_with_subsection);
}
ExitCodes main_(int, const char**) override
{
String in = getStringOption_("in");
String database = getStringOption_("database");
String out = getStringOption_("out");
ProgressLogger progresslogger;
progresslogger.setLogType(log_type_);
vector<ProteinIdentification> protein_ids;
PeptideIdentificationList peptide_ids;
SimpleSearchEngineAlgorithm sse;
sse.setParameters(getParam_().copy("Search:", true));
//TODO ??? Why not use the TOPPBase ExitCodes?
// same for OpenPepXL etc. Otherwise please write a proper mapping.
SimpleSearchEngineAlgorithm::ExitCodes e = sse.search(in, database, protein_ids, peptide_ids);
if (e != SimpleSearchEngineAlgorithm::ExitCodes::EXECUTION_OK)
{
return TOPPBase::ExitCodes::INTERNAL_ERROR;
}
// MS path already set in algorithm. Overwrite here so we get something testable
if (getFlag_("test"))
{
// if test mode set, add file without path so we can compare it
protein_ids[0].setPrimaryMSRunPath({"file://" + File::basename(in)});
}
FileHandler().storeIdentifications(out, protein_ids, peptide_ids, {FileTypes::IDXML});
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
SimpleSearchEngine tool;
return tool.main(argc, argv);
}
///@endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/FeatureFinderMetaboIdent.cpp | .cpp | 15,949 | 370 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/ANALYSIS/OPENSWATH/ChromatogramExtractor.h>
#include <OpenMS/ANALYSIS/OPENSWATH/MRMFeatureFinderScoring.h>
#include <OpenMS/ANALYSIS/OPENSWATH/DATAACCESS/SimpleOpenMSSpectraAccessFactory.h>
#include <OpenMS/ANALYSIS/TARGETED/TargetedExperiment.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/CoarseIsotopePatternGenerator.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/MATH/MathFunctions.h>
#include <OpenMS/FEATUREFINDER/ElutionModelFitter.h>
#include <OpenMS/FEATUREFINDER/FeatureFinderAlgorithmPickedHelperStructs.h>
#include <OpenMS/FEATUREFINDER/FeatureFinderAlgorithmMetaboIdent.h>
#include <OpenMS/SYSTEM/File.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/FORMAT/OMSFileLoad.h>
#include <cmath>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_FeatureFinderMetaboIdent FeatureFinderMetaboIdent
@brief Detects features in MS1 data corresponding to small molecule identifications.
<CENTER>
<table>
<tr>
<td ALIGN="center" BGCOLOR="#EBEBEB"> pot. predecessor tools </td>
<td VALIGN="middle" ROWSPAN=2> → FeatureFinderMetaboIdent →</td>
<td ALIGN="center" BGCOLOR="#EBEBEB"> pot. successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN="center" ROWSPAN=1> @ref TOPP_PeakPickerHiRes (optional) </td>
<td VALIGN="middle" ALIGN="center" ROWSPAN=1> @ref TOPP_TextExporter</td>
</tr>
</table>
</CENTER>
This tool detects quantitative features in MS1 data for a list of targets, typically small molecule/metabolite identifications.
It uses algorithms for targeted data analysis from the OpenSWATH pipeline.
@note This tool is still experimental!
@see @ref TOPP_FeatureFinderIdentification - targeted feature detection based on peptide identifications.
<B>Input format</B>
Spectra are expected in centroided or profile mode. Only MS1 level spectra are considered for feature detection.
The targets to quantify have to be specified in a tab-separated text file that is passed via the @p id parameter.
This file has to start with the following header line, defining its columns:
<pre>
<TT>CompoundName SumFormula Mass Charge RetentionTime RetentionTimeRange IsoDistribution [IonMobility]</TT>
</pre>
Every subsequent line defines a target.
(Except lines starting with "#", which are considered as comments and skipped.)
The following requirements apply:
- @p CompoundName: unique name for the target compound
- @p SumFormula: chemical sum formula (see @ref OpenMS::EmpiricalFormula), optional
- @p Mass: neutral mass; if zero calculated from @p Formula
- @p Charge: charge state, or comma-separated list of multiple charges
- @p RetentionTime: retention time (RT), or comma-separated list of multiple RTs
- @p RetentionTimeRange: RT window around @p RetentionTime for chromatogram extraction, either one value or one per @p RT entry; if zero parameter @p extract:rt_window is used
- @p IsoDistribution: comma-separated list of relative abundances of isotopologues (see @ref OpenMS::IsotopeDistribution); if zero calculated from @p Formula
- @p IonMobility (optional): ion mobility value, or comma-separated list of multiple values (one per RT entry); if not provided or zero, no IM filtering is performed. The extraction window is controlled by parameter @p extract:im_window.
In the simplest case, only @p CompoundName, @p SumFormula, @p Charge and @p RetentionTime need to be given, all other values may be zero.
Every combination of compound (mass), RT and charge defines one target for feature detection.
For ion mobility data, an optional @p IonMobility column can be added to filter extraction by ion mobility.
<B>Ion Mobility Support (experimental)</B>
This tool supports two types of ion mobility data:
@b FAIMS (Field Asymmetric Ion Mobility Spectrometry):
FAIMS data is automatically detected based on compensation voltage (CV) annotations in the mzML file.
The data is split by CV and processed separately for each voltage group.
Features representing the same analyte detected at different CV values are merged by default (controlled by @p faims:merge_features).
No special preparation of the input mzML file is required.
@b Bruker @b TimsTOF (trapped ion mobility):
TimsTOF data requires special preparation of the mzML file. The ion mobility spectra must be concatenated into
single spectra per frame using msconvert with the @p --combineIonMobilitySpectra option:
@code
msconvert input.d --mzML --combineIonMobilitySpectra -o output_dir
@endcode
The resulting mzML file contains one spectrum per frame with ion mobility values stored per peak.
Ion mobility values for targets can be specified in the @p IonMobility column of the input TSV file.
The extraction window is controlled by @p extract:im_window.
<B>Output format</B>
The main output (parameter @p out) is a featureXML file containing the detected features, with annotations in meta data entries.
This file can be visualized in TOPPView - perhaps most usefully as a layer on top of the LC-MS data that gave rise to it.
Compound annotations of features (@p Name entries from the @p id input) can be shown by clicking the "Show feature annotation" button in the tool bar and selecting "Label meta data".
Positions of targets for which no feature was detected can be shown by clicking the "Show unassigned peptide identifications" button and selecting "Show label meta data".
To export the data from the featureXML file to a tabular text file (CSV), use @ref TOPP_TextExporter with the options @p no_ids and <TT>feature:add_metavalues 0</TT> (to include all meta data annotations).
In the result, the information from the @p CompoundName, @p SumFormula, @p Charge and @p RetentionTime columns from the input will be in the @p label, @p sum_formula, @p charge and @p expected_rt columns, respectively.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_FeatureFinderMetaboIdent.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_FeatureFinderMetaboIdent.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPFeatureFinderMetaboIdent :
public TOPPBase
{
public:
TOPPFeatureFinderMetaboIdent() :
TOPPBase("FeatureFinderMetaboIdent", "Detects features in MS1 data based on metabolite identifications.")
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "Input file: LC-MS raw data");
setValidFormats_("in", ListUtils::create<String>("mzML"));
registerInputFile_("id", "<file>", "", "Input file: Metabolite identifications");
setValidFormats_("id", ListUtils::create<String>("tsv"));
registerOutputFile_("out", "<file>", "", "Output file: Features");
setValidFormats_("out", ListUtils::create<String>("featureXML"));
registerOutputFile_("lib_out", "<file>", "", "Output file: Assay library", false);
setValidFormats_("lib_out", ListUtils::create<String>("traML"));
registerOutputFile_("chrom_out", "<file>", "", "Output file: Chromatograms", false);
setValidFormats_("chrom_out", ListUtils::create<String>("mzML"));
registerOutputFile_("trafo_out", "<file>", "", "Output file: Retention times (expected vs. observed)", false);
setValidFormats_("trafo_out", ListUtils::create<String>("trafoXML"));
registerFlag_("force", "Force processing of files with no MS1 spectra", true);
Param ffmetaboident_params;
ffmetaboident_params.insert("", FeatureFinderAlgorithmMetaboIdent().getParameters());
registerFullParam_(ffmetaboident_params); // register algorithm paramters as command line parameters
}
ProgressLogger prog_log_; ///< progress logger
/// Read input file with information about targets
vector<FeatureFinderAlgorithmMetaboIdent::FeatureFinderMetaboIdentCompound> readTargets_(const String& in_path)
{
vector<FeatureFinderAlgorithmMetaboIdent::FeatureFinderMetaboIdentCompound> metaboIdentTable;
// Base required header (optional column IonMobility may follow)
const string header =
"CompoundName\tSumFormula\tMass\tCharge\tRetentionTime\tRetentionTimeRange\tIsoDistribution";
ifstream source(in_path.c_str());
if (!source.is_open())
{
throw Exception::FileNotReadable(__FILE__, __LINE__,
OPENMS_PRETTY_FUNCTION, in_path);
}
string line;
getline(source, line);
if (!String(line).hasPrefix(header))
{
String msg = "expected header line starting with: '" + header + "'";
throw Exception::ParseError(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
line, msg);
}
// Check for optional IM columns in header
bool has_im_columns = String(line).hasSubstring("IonMobility");
Size line_count = 1;
set<String> names;
while (getline(source, line))
{
line_count++;
if (line[0] == '#') continue; // skip comments
vector<String> parts = ListUtils::create<String>(line, '\t'); // split
if (parts.size() < 7)
{
OPENMS_LOG_ERROR
<< "Error: Expected at least 7 tab-separated fields, found only "
<< parts.size() << " in line " << line_count
<< " - skipping this line." << endl;
continue;
}
String name = parts[0];
if (name.empty())
{
OPENMS_LOG_ERROR << "Error: Empty name field in input line "
<< line_count << " - skipping this line." << endl;
continue;
}
if (!names.insert(name).second) // @TODO: is this check necessary?
{
OPENMS_LOG_ERROR << "Error: Duplicate name '" << name
<< "' in input line " << line_count
<< " - skipping this line." << endl;
continue;
}
// Parse optional IM column (column 8, 0-indexed 7)
vector<double> ion_mobilities;
if (has_im_columns && parts.size() > 7)
{
ion_mobilities = ListUtils::create<double>(parts[7]);
}
metaboIdentTable.push_back(FeatureFinderAlgorithmMetaboIdent::FeatureFinderMetaboIdentCompound(name,
parts[1],
parts[2].toDouble(),
ListUtils::create<Int>(parts[3]),
ListUtils::create<double>(parts[4]),
ListUtils::create<double>(parts[5]),
ListUtils::create<double>(parts[6]),
ion_mobilities));
}
return metaboIdentTable;
}
/// Main function
ExitCodes main_(int, const char**) override
{
//-------------------------------------------------------------
// parameter handling
//-------------------------------------------------------------
String in = getStringOption_("in");
String id = getStringOption_("id");
String out = getStringOption_("out");
String lib_out = getStringOption_("lib_out");
String chrom_out = getStringOption_("chrom_out");
String trafo_out = getStringOption_("trafo_out");
bool force = getFlag_("force");
prog_log_.setLogType(log_type_);
//-------------------------------------------------------------
// load input
//-------------------------------------------------------------
OPENMS_LOG_INFO << "Loading targets and creating assay library..." << endl;
auto table = readTargets_(id);
// Prepare algorithm parameters
auto tool_parameter = getParam_().copySubset(FeatureFinderAlgorithmMetaboIdent().getDefaults());
tool_parameter.setValue("debug", debug_level_); // pass down debug level
OPENMS_LOG_INFO << "Loading input LC-MS data..." << endl;
PeakMap exp;
FileHandler mzml;
mzml.getOptions().addMSLevel(1);
mzml.loadExperiment(in, exp, {FileTypes::MZML});
if (exp.empty() && !force)
{
OPENMS_LOG_ERROR << "Error: No MS1 scans in '"
<< in << "' - aborting." << endl;
return INCOMPATIBLE_INPUT_DATA;
}
//-------------------------------------------------------------
// Run feature detection (FAIMS handling is done internally)
//-------------------------------------------------------------
FeatureFinderAlgorithmMetaboIdent ff_mident;
ff_mident.setParameters(tool_parameter);
ff_mident.setMSData(std::move(exp));
FeatureMap features;
ff_mident.run(table, features, in);
// annotate "spectra_data" metavalue and ensure unique IDs
if (getFlag_("test"))
{
// if test mode set, add file without path so we can compare it
features.setPrimaryMSRunPath({"file://" + File::basename(in)});
}
features.ensureUniqueId();
addDataProcessing_(features, getProcessingInfo_(DataProcessing::QUANTITATION));
if (!chrom_out.empty())
{
PeakMap chrom_data = ff_mident.getChromatograms();
addDataProcessing_(chrom_data, getProcessingInfo_(DataProcessing::FILTERING));
FileHandler().storeExperiment(chrom_out, chrom_data, {FileTypes::MZML});
}
//-------------------------------------------------------------
// write output
//-------------------------------------------------------------
OPENMS_LOG_INFO << "Writing final results..." << endl;
FileHandler().storeFeatures(out, features, {FileTypes::FEATUREXML});
// write transition library in TraML format (empty for multi-FAIMS data)
if (!lib_out.empty())
{
FileHandler().storeTransitions(lib_out, ff_mident.getLibrary(), {FileTypes::TRAML});
}
// write expected vs. observed retention times
if (!trafo_out.empty())
{
FileHandler().storeTransformations(trafo_out, ff_mident.getTransformations(), {FileTypes::TRANSFORMATIONXML});
}
//-------------------------------------------------------------
// statistics
//-------------------------------------------------------------
Size n_missing = features.getUnassignedPeptideIdentifications().size();
OPENMS_LOG_INFO << "\nSummary statistics:\n"
<< table.size() << " targets specified\n"
<< features.size() << " features found\n"
<< ff_mident.getNShared() << " features with multiple target annotations\n"
<< n_missing << " targets without features";
const Size n_examples = 5;
const TargetedExperiment& library = ff_mident.getLibrary();
if (n_missing && !library.getCompounds().empty())
{
OPENMS_LOG_INFO << ":";
for (Size i = 0;
((i < features.getUnassignedPeptideIdentifications().size()) &&
(i < n_examples)); ++i)
{
const PeptideIdentification& pep_id =
features.getUnassignedPeptideIdentifications()[i];
if (pep_id.metaValueExists("PeptideRef"))
{
const TargetedExperiment::Compound& compound =
library.getCompoundByRef(pep_id.getMetaValue("PeptideRef"));
OPENMS_LOG_INFO << "\n- " << FeatureFinderAlgorithmMetaboIdent::prettyPrintCompound(compound);
}
}
if (n_missing > n_examples)
{
OPENMS_LOG_INFO << "\n- ... (" << n_missing - n_examples << " more)";
}
}
OPENMS_LOG_INFO << "\n" << endl;
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPFeatureFinderMetaboIdent tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/IDExtractor.cpp | .cpp | 9,339 | 244 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Nico Pfeifer $
// --------------------------------------------------------------------------
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/FORMAT/FASTAFile.h>
#include <OpenMS/METADATA/ProteinIdentification.h>
#include <OpenMS/METADATA/PeptideIdentification.h>
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/CHEMISTRY/EnzymaticDigestion.h>
#include <OpenMS/MATH/MathFunctions.h>
#include <algorithm>
#include <iostream>
#include <map>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_IDExtractor IDExtractor
@brief Extracts 'n' peptides randomly or best 'n' from idXML files.
Input and output format are 'idXML'. The tools allows you to extract subsets of peptides
from idXML files.
@note Currently mzIdentML (mzid) is not directly supported as an input/output format of this tool. Convert mzid files to/from idXML using @ref TOPP_IDFileConverter if necessary.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_IDExtractor.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_IDExtractor.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPIDExtractor :
public TOPPBase
{
public:
TOPPIDExtractor() :
TOPPBase("IDExtractor", "Extracts 'n' peptides randomly or best 'n' from idXML files.")
{
}
static bool compareIDsWithScores(pair<double, PeptideIdentification>& a, pair<double, PeptideIdentification>& b)
{
if (a.second.isHigherScoreBetter())
{
return a.first > b.first;
}
else
{
return a.first < b.first;
}
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "input file");
setValidFormats_("in", ListUtils::create<String>("idXML"));
registerOutputFile_("out", "<file>", "", "output file");
setValidFormats_("out", ListUtils::create<String>("idXML"));
registerIntOption_("number_of_peptides", "<int>", 10, "Number of randomly chosen peptides", false);
setMinInt_("number_of_peptides", 1);
registerIntOption_("number_of_rand_invokations", "<int>", 0, "Number of rand invocations before random draw (basically a seed)", false);
setMinInt_("number_of_rand_invokations", 0);
registerFlag_("best_hits", "If this flag is set the best n peptides are chosen.");
}
ExitCodes main_(int, const char**) override
{
vector<ProteinIdentification> protein_identifications;
vector<ProteinIdentification> chosen_protein_identifications;
PeptideIdentificationList identifications;
PeptideIdentificationList chosen_identifications;
vector<Size> indices;
vector<PeptideHit> temp_peptide_hits;
vector<ProteinHit> temp_protein_hits;
vector<ProteinHit> chosen_protein_hits;
map<String, PeptideIdentificationList > identifiers;
PeptideIdentification temp_identification;
vector<String> chosen_ids;
vector<pair<double, PeptideIdentification> > identifications_with_scores;
vector<pair<double, PeptideIdentification> >::iterator it = identifications_with_scores.begin();
PeptideIdentificationList temp_identifications;
protein_identifications.push_back(ProteinIdentification());
//-------------------------------------------------------------
// parsing parameters
//-------------------------------------------------------------
String inputfile_name = getStringOption_("in");
String outputfile_name = getStringOption_("out");
Size number_of_peptides = getIntOption_("number_of_peptides");
Size number_of_rand_invokations = getIntOption_("number_of_rand_invokations");
bool best_hits = getFlag_("best_hits");
//-------------------------------------------------------------
// reading input
//-------------------------------------------------------------
FileHandler().loadIdentifications(inputfile_name, protein_identifications, identifications, {FileTypes::IDXML});
if (number_of_peptides > identifications.size())
{
writeLogError_("Number of existing peptides smaller than number of chosen peptides. Aborting!");
return ILLEGAL_PARAMETERS;
}
//-------------------------------------------------------------
// calculations
//-------------------------------------------------------------
if (best_hits)
{
for (Size i = 0; i < identifications.size(); ++i)
{
identifications_with_scores.emplace_back(identifications[i].getHits()[0].getScore(), identifications[i]);
}
sort(identifications_with_scores.begin(), identifications_with_scores.end(), TOPPIDExtractor::compareIDsWithScores);
it = identifications_with_scores.begin();
while (it != identifications_with_scores.end() && chosen_ids.size() < number_of_peptides)
{
if (find(chosen_ids.begin(), chosen_ids.end(), it->second.getHits()[0].getSequence().toString()) == chosen_ids.end())
{
chosen_ids.push_back(it->second.getHits()[0].getSequence().toString());
chosen_identifications.push_back(it->second);
if (identifiers.find(it->second.getIdentifier()) == identifiers.end())
{
temp_identifications.clear();
}
else
{
temp_identifications = identifiers[it->second.getIdentifier()];
identifiers.erase(it->second.getIdentifier());
}
temp_identifications.push_back(it->second);
identifiers.insert(make_pair(it->second.getIdentifier(), temp_identifications));
}
++it;
}
}
else
{
indices.resize(identifications.size(), 0);
for (Size i = 0; i < identifications.size(); ++i)
{
indices[i] = i;
}
Math::RandomShuffler r(number_of_rand_invokations);
r.portable_random_shuffle(indices.begin(), indices.end());
Size index = 0;
while (chosen_ids.size() < number_of_peptides && index < indices.size())
{
if (!identifications[indices[index]].getHits().empty() && find(chosen_ids.begin(), chosen_ids.end(), identifications[indices[index]].getHits()[0].getSequence().toString()) == chosen_ids.end())
{
chosen_ids.push_back(identifications[indices[index]].getHits()[0].getSequence().toString());
chosen_identifications.push_back(identifications[indices[index]]);
if (identifiers.find(identifications[indices[index]].getIdentifier()) == identifiers.end())
{
temp_identifications.clear();
}
else
{
temp_identifications = identifiers[identifications[indices[index]].getIdentifier()];
identifiers.erase(identifications[indices[index]].getIdentifier());
}
temp_identifications.push_back(identifications[indices[index]]);
identifiers.insert(make_pair(identifications[indices[index]].getIdentifier(), temp_identifications));
}
++index;
}
}
if (chosen_ids.size() < number_of_peptides)
{
writeLogError_("Number of existing unique peptides (" + String(chosen_ids.size()) + ") smaller than number of chosen peptides. Aborting!");
return ILLEGAL_PARAMETERS;
}
for (Size i = 0; i < protein_identifications.size(); ++i)
{
temp_protein_hits = protein_identifications[i].getHits();
chosen_protein_hits.clear();
if (identifiers.find(protein_identifications[i].getIdentifier()) != identifiers.end())
{
temp_identifications = identifiers[protein_identifications[i].getIdentifier()];
for (Size j = 0; j < temp_protein_hits.size(); ++j)
{
bool already_chosen = false;
for (Size k = 0; k < temp_identifications.size(); ++k)
{
temp_peptide_hits.clear();
set<String> accession;
accession.insert(temp_protein_hits[j].getAccession());
temp_peptide_hits = PeptideIdentification::getReferencingHits(temp_identifications[k].getHits(), accession);
if (!temp_peptide_hits.empty() && !already_chosen)
{
chosen_protein_hits.push_back(temp_protein_hits[j]);
already_chosen = true;
}
}
}
if (chosen_protein_hits.empty())
{
cout << "No protein hits found for " << protein_identifications[i].getIdentifier()
<< " although having " << temp_identifications.size() << " ids" << endl;
}
protein_identifications[i].setHits(chosen_protein_hits);
chosen_protein_identifications.push_back(protein_identifications[i]);
}
}
FileHandler().storeIdentifications(outputfile_name,
chosen_protein_identifications,
chosen_identifications,
{FileTypes::IDXML});
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPIDExtractor tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/IDScoreSwitcher.cpp | .cpp | 4,403 | 112 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/ANALYSIS/ID/IDScoreSwitcherAlgorithm.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/METADATA/ProteinIdentification.h>
#include <OpenMS/METADATA/PeptideIdentification.h>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_IDScoreSwitcher IDScoreSwitcher
@brief Switches between different scores of peptide hits (PSMs) or protein hits in identification data.
In the idXML file format and in OpenMS' internal representation of identification data, every peptide spectrum match (PSM, "peptide hit") and every protein hit can be associated with a single numeric (quality) score of an arbitrary type. However, database search engines that generate PSMs or tools for post-processing of identification data may assign multiple scores of different types to each PSM/protein. These scores can be captured as meta data associated with the PSMs/proteins (in idXML: "UserParam" elements), but they are typically not considered by TOPP tools that utilize the scores. This utility allows to switch between "primary" scores and scores stored as meta values.
By default this tool operates on PSM scores; to consider protein scores instead, set the @p proteins flag. The meta value that is supposed to replace the PSM/protein score - given by parameter @p new_score - has to be numeric (type "float") and exist for every peptide or protein hit, respectively. The old score will be stored as a meta value, the name for which is given by the parameter @p old_score. It is an error if a meta value with this name already exists for any hit, unless that meta value already stores the same score.
@note Currently mzIdentML (mzid) is not directly supported as an input/output format of this tool. Convert mzid files to/from idXML using @ref TOPP_IDFileConverter if necessary.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_IDScoreSwitcher.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_IDScoreSwitcher.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPIDScoreSwitcher :
public TOPPBase
{
public:
TOPPIDScoreSwitcher() :
TOPPBase("IDScoreSwitcher", "Switches between different scores of peptide or protein hits in identification data")
{
}
protected:
IDScoreSwitcherAlgorithm switcher_{};
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "Input file");
setValidFormats_("in", ListUtils::create<String>("idXML"));
registerOutputFile_("out", "<file>", "", "Output file");
setValidFormats_("out", ListUtils::create<String>("idXML"));
registerFullParam_(switcher_.getParameters());
}
ExitCodes main_(int, const char**) override
{
switcher_.setParameters(getParam_().copySubset(switcher_.getParameters()));
String in = getStringOption_("in"), out = getStringOption_("out");
bool do_proteins_ = getFlag_("proteins"); // from full param of IDScoreSwitcherAlgorithm
vector<ProteinIdentification> proteins;
PeptideIdentificationList peptides;
FileHandler().loadIdentifications(in, proteins, peptides, {FileTypes::IDXML});
Size counter = 0;
if (do_proteins_)
{
for (auto& pid : proteins)
{
switcher_.switchScores<ProteinIdentification>(pid, counter);
}
}
else
{
for (auto& pepid : peptides)
{
switcher_.switchScores<PeptideIdentification>(pepid, counter);
}
}
FileHandler().storeIdentifications(out, proteins, peptides, {FileTypes::IDXML});
OPENMS_LOG_INFO << "Successfully switched " << counter << " "
<< (do_proteins_ ? "protein" : "PSM") << " scores." << endl;
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPIDScoreSwitcher tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/IDRipper.cpp | .cpp | 6,328 | 162 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg$
// $Authors: Immanuel Luhn, Leon Kuchenbecker$
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/IDRipper.h>
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/SYSTEM/File.h>
#include <QDir>
using std::map;
using std::pair;
using std::vector;
using namespace OpenMS;
//-------------------------------------------------------------
// Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_IDRipper IDRipper
@brief IDRipper splits the protein/peptide identifications of an idXML file into several idXML files according their annotated file origin.
<center>
<table>
<tr>
<th ALIGN = "center"> potential predecessor tools </td>
<td VALIGN="middle" ROWSPAN=3> → IDRipper→</td>
<th ALIGN = "center"> potential successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN ="center" ROWSPAN=1> @ref TOPP_IDFilter</td>
<td VALIGN="middle" ALIGN ="center" ROWSPAN=1> @ref TOPP_IDMapper</td>
</tr>
</table>
</center>
<B>Example</B>
<p>Assuming each peptide identification in a given idXML file is annotated with its file origin (e.g. IDRipper_test.idXML) :</p>
@p <tt><userParam type="string" name="file_origin" value="IDMerger1_test.idXML"/></tt> or <br />
@p <tt><userParam type="string" name="file_origin" value="IDMerger2_test.idXML"/></tt>
<p>Obviously the file contains protein/peptide identifications of IDMerger1_test.idXML and IDMerger2_test.idXML.</p>
<p>Calling IDRipper with an input file (here: @p -in IDRipper_test.idXML) and an output directory (via @p out) will
result in two idXML files stored in the specified directory and named according to their file origin.</p>
<p>In theory, merging files with @p IDMerger and splitting the resulting file with @p IDRipper will result in the original input files.
<B>NOTE: The meta value "file_origin" is removed by the <tt>IDSplitter</tt>!</B>
@note Currently mzIdentML (mzid) is not directly supported as an input/output format of this tool. Convert mzid files to/from idXML using @ref TOPP_IDFileConverter if necessary.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_IDRipper.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_IDRipper.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPIDRipper : public TOPPBase
{
public:
TOPPIDRipper() : TOPPBase("IDRipper", "Split protein/peptide identification file into several files according to identification run and annotated file origin.")
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "Input file, in which the protein/peptide identifications must be tagged with 'file_origin'");
setValidFormats_("in", ListUtils::create<String>("idXML"));
registerOutputPrefix_("out", "<directory>", "", "Path to the output directory to write the ripped files to.", true, false);
registerFlag_("numeric_filenames", "Do not infer output filenames from spectra_data or file_origin but use the input filename with numeric suffixes.");
registerFlag_("split_ident_runs", "Split different identification runs into separate files.");
}
ExitCodes main_(int, const char**) override
{
//-------------------------------------------------------------
// parameter handling
//-------------------------------------------------------------
String file_name = getStringOption_("in");
String out_dir = getStringOption_("out");
bool numeric_filenames = getFlag_("numeric_filenames");
bool split_ident_runs = getFlag_("split_ident_runs");
String output_directory = QFileInfo(out_dir.toQString()).absoluteFilePath().toStdString();
//-------------------------------------------------------------
// calculations
//-------------------------------------------------------------
vector<ProteinIdentification> proteins;
PeptideIdentificationList peptides;
FileHandler().loadIdentifications(file_name, proteins, peptides, {FileTypes::IDXML});
// ensure protein and peptide identifications are presented, otherwise we don't have to rip anything anyhow
if (proteins.empty() || peptides.empty())
{
throw Exception::Precondition(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "idXML file has to store protein and peptide identifications!");
}
IDRipper::RipFileMap ripped;
// rip the idXML-file into several idXML according to the annotated file origin
IDRipper ripper;
ripper.rip(ripped, proteins, peptides, numeric_filenames, split_ident_runs);
//-------------------------------------------------------------
// writing output
//-------------------------------------------------------------
for (IDRipper::RipFileMap::iterator it = ripped.begin(); it != ripped.end(); ++it)
{
const IDRipper::RipFileIdentifier& rfi = it->first;
const IDRipper::RipFileContent& rfc = it->second;
QString output = output_directory.toQString();
String out_fname;
if (numeric_filenames)
{
String s_ident_run_idx = split_ident_runs ? '_' + String(rfi.ident_run_idx) : "";
String s_file_origin_idx = '_' + String(rfi.file_origin_idx);
out_fname = QFileInfo(file_name.toQString()).completeBaseName().toStdString() + s_ident_run_idx + s_file_origin_idx + ".idXML";
}
else
{
out_fname = QFileInfo(rfi.out_basename.toQString()).completeBaseName().toStdString() + ".idXML";
}
String out = QDir::toNativeSeparators(output.append(QString("/")).append(out_fname.toQString())).toStdString();
OPENMS_LOG_INFO << "Storing file: '" << out << "'." << std::endl;
QDir dir(output_directory.toQString());
FileHandler().storeIdentifications(out, rfc.prot_idents, rfc.pep_idents, {FileTypes::IDXML});
}
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPIDRipper tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/TargetedFileConverter.cpp | .cpp | 8,008 | 226 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: George Rosenberger, Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/TransitionTSVFile.h>
#include <OpenMS/ANALYSIS/OPENSWATH/TransitionPQPFile.h>
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/CONCEPT/Exception.h>
#include <OpenMS/CONCEPT/ProgressLogger.h>
#include <OpenMS/DATASTRUCTURES/ListUtils.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/FORMAT/FileTypes.h>
using namespace OpenMS;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_TargetedFileConverter TargetedFileConverter
@brief Converts different spectral libraries / transition files for targeted proteomics and metabolomics analysis.
Can convert multiple formats to and from TraML (standardized transition format). The following formats are supported:
<ul>
<li> @ref OpenMS::TraMLFile "TraML" </li>
<li> @ref OpenMS::TransitionTSVFile "OpenSWATH TSV transition lists" </li>
<li> @ref OpenMS::TransitionPQPFile "OpenSWATH PQP SQLite files" </li>
<li> SpectraST MRM transition lists </li>
<li> Skyline transition lists </li>
<li> Spectronaut transition lists </li>
</ul>
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_TargetedFileConverter.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_TargetedFileConverter.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPTargetedFileConverter :
public TOPPBase
{
public:
TOPPTargetedFileConverter() :
TOPPBase("TargetedFileConverter", "Converts different transition files for targeted proteomics / metabolomics analysis.")
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "Input file to convert.\n "
"See http://www.openms.de/current_doxygen/html/TOPP_TargetedFileConverter.html for format of OpenSWATH transition TSV file or SpectraST MRM file.");
registerStringOption_("in_type", "<type>", "", "input file type -- default: determined from file extension or content\n", false);
StringList formats{"tsv", "mrm" ,"pqp", "TraML"};
setValidFormats_("in", formats);
setValidStrings_("in_type", formats);
formats = { "tsv", "pqp", "TraML" };
registerOutputFile_("out", "<file>", "", "Output file");
setValidFormats_("out", formats);
registerStringOption_("out_type", "<type>", "", "Output file type -- default: determined from file extension or content\nNote: not all conversion paths work or make sense.", false);
setValidStrings_("out_type", formats);
registerSubsection_("algorithm", "Algorithm parameters section");
registerFlag_("legacy_traml_id", "PQP to TraML: Should legacy TraML IDs be used?", true);
}
Param getSubsectionDefaults_(const String&) const override
{
return TransitionTSVFile().getDefaults();
}
ExitCodes main_(int, const char**) override
{
FileHandler fh;
//input file type
String in = getStringOption_("in");
FileTypes::Type in_type = FileTypes::nameToType(getStringOption_("in_type"));
if (in_type == FileTypes::UNKNOWN)
{
in_type = fh.getType(in);
writeDebug_(String("Input file type: ") + FileTypes::typeToName(in_type), 2);
}
if (in_type == FileTypes::UNKNOWN)
{
writeLogError_("Error: Could not determine input file type!");
return PARSE_ERROR;
}
//output file names and types
String out = getStringOption_("out");
FileTypes::Type out_type = FileTypes::nameToType(getStringOption_("out_type"));
if (out_type == FileTypes::UNKNOWN)
{
out_type = fh.getTypeByFileName(out);
}
if (out_type == FileTypes::UNKNOWN)
{
writeLogError_("Error: Could not determine output file type!");
return PARSE_ERROR;
}
bool legacy_traml_id = getFlag_("legacy_traml_id");
//---------------------------------------------------------------------------
// Start Conversion
//---------------------------------------------------------------------------
// Use memory-efficient Light path for TSV/PQP → TSV/PQP conversions
bool use_light_path = (in_type == FileTypes::TSV || in_type == FileTypes::MRM || in_type == FileTypes::PQP)
&& (out_type == FileTypes::TSV || out_type == FileTypes::PQP);
if (use_light_path)
{
// Memory-efficient Light path for TSV/PQP workflows
OpenSwath::LightTargetedExperiment light_exp;
if (in_type == FileTypes::TSV || in_type == FileTypes::MRM)
{
Param reader_parameters = getParam_().copy("algorithm:", true);
TransitionTSVFile tsv_reader;
tsv_reader.setLogType(log_type_);
tsv_reader.setParameters(reader_parameters);
tsv_reader.convertTSVToTargetedExperiment(in.c_str(), in_type, light_exp);
}
else if (in_type == FileTypes::PQP)
{
TransitionPQPFile pqp_reader;
Param reader_parameters = getParam_().copy("algorithm:", true);
pqp_reader.setLogType(log_type_);
pqp_reader.setParameters(reader_parameters);
// Light path uses TRAML_ID (legacy_traml_id=true) to preserve original string identifiers
pqp_reader.convertPQPToTargetedExperiment(in.c_str(), light_exp, true);
}
if (out_type == FileTypes::TSV)
{
TransitionTSVFile tsv_writer;
tsv_writer.setLogType(log_type_);
tsv_writer.convertLightTargetedExperimentToTSV(out.c_str(), light_exp);
}
else if (out_type == FileTypes::PQP)
{
TransitionPQPFile pqp_writer;
pqp_writer.setLogType(log_type_);
pqp_writer.convertLightTargetedExperimentToPQP(out.c_str(), light_exp);
}
}
else
{
// Heavy path for TraML conversions (maintains full metadata)
TargetedExperiment targeted_exp;
if (in_type == FileTypes::TSV || in_type == FileTypes::MRM)
{
Param reader_parameters = getParam_().copy("algorithm:", true);
TransitionTSVFile tsv_reader;
tsv_reader.setLogType(log_type_);
tsv_reader.setParameters(reader_parameters);
tsv_reader.convertTSVToTargetedExperiment(in.c_str(), in_type, targeted_exp);
tsv_reader.validateTargetedExperiment(targeted_exp);
}
else if (in_type == FileTypes::PQP)
{
TransitionPQPFile pqp_reader;
Param reader_parameters = getParam_().copy("algorithm:", true);
pqp_reader.setLogType(log_type_);
pqp_reader.setParameters(reader_parameters);
pqp_reader.convertPQPToTargetedExperiment(in.c_str(), targeted_exp, legacy_traml_id);
pqp_reader.validateTargetedExperiment(targeted_exp);
}
else if (in_type == FileTypes::TRAML)
{
FileHandler().loadTransitions(in, targeted_exp, {FileTypes::TRAML});
}
if (out_type == FileTypes::TSV)
{
TransitionTSVFile tsv_writer;
tsv_writer.setLogType(log_type_);
tsv_writer.convertTargetedExperimentToTSV(out.c_str(), targeted_exp);
}
else if (out_type == FileTypes::PQP)
{
TransitionPQPFile pqp_writer;
pqp_writer.setLogType(log_type_);
pqp_writer.convertTargetedExperimentToPQP(out.c_str(), targeted_exp);
}
else if (out_type == FileTypes::TRAML)
{
FileHandler().storeTransitions(out, targeted_exp, {FileTypes::TRAML});
}
}
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPTargetedFileConverter tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/IDRTCalibration.cpp | .cpp | 6,867 | 172 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: $
// --------------------------------------------------------------------------
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/METADATA/PeptideIdentification.h>
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/CONCEPT/LogStream.h>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_IDRTCalibration IDRTCalibration
@brief Can be used to calibrate the RTs of peptide hits linearly to standards.
<CENTER>
<table>
<tr>
<th ALIGN = "center"> pot. predecessor tools </td>
<td VALIGN="middle" ROWSPAN=2> → IDRTCalibration →</td>
<th ALIGN = "center"> pot. successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_CometAdapter (or other ID engines) </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_PeptideIndexer (or other tools operating @n with identifications (in idXML format))</td>
</tr>
</table>
</CENTER>
This tool can be used to linearly align RTs of the idXML-File to a reference. If only calibrant_1_input and
calibrant_2_input are given, the first calibrant will result at RT 0.1 and calibrant_2_input will be at 0.9.
If one wants to align the RTs of this idXML file to the IDs of a reference file one can also give the RTs
of the same calibrant in the reference file (calibrant_1_reference, calibrant_2_reference). If these calibrants
are given, the linear transformation (shift and scale) will be calculated such that calibrant_1_input will
be at the same RT as calibrant_1_reference and calibrant_2_input will
be at the same RT as calibrant_2_reference. This only applies if calibrant_1* has a smaller RT than calibrant_2*.
Otherwise the values are swapped.
@note Currently mzIdentML (mzid) is not directly supported as an input/output format of this tool. Convert mzid files to/from idXML using @ref TOPP_IDFileConverter if necessary.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_IDRTCalibration.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_IDRTCalibration.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPIDRTCalibration :
public TOPPBase
{
public:
TOPPIDRTCalibration() :
TOPPBase("IDRTCalibration", "Calibrate RTs of peptide hits linearly to standards.")
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "input file ");
setValidFormats_("in", ListUtils::create<String>("idXML"));
registerOutputFile_("out", "<file>", "", "output file ");
setValidFormats_("out", ListUtils::create<String>("idXML"));
registerDoubleOption_("calibrant_1_reference", "<RT>", 0.1, "The RT of the first calibrant in the reference file.", false);
registerDoubleOption_("calibrant_2_reference", "<RT>", 0.9, "The RT of the second calibrant in the reference file.", false);
registerDoubleOption_("calibrant_1_input", "<RT>", -1.0, "The RT of the first calibrant in the input file. Please note that this value needs to be set. The default value -1.0 is not allowed.", false);
registerDoubleOption_("calibrant_2_input", "<RT>", -1.0, "The RT of the second calibrant in the input file. Please note that this value needs to be set. The default value -1.0 is not allowed.", false);
}
ExitCodes main_(int, const char**) override
{
//-------------------------------------------------------------
// parameter handling
//-------------------------------------------------------------
String in_file = getStringOption_("in");
String out_file = getStringOption_("out");
double rt_calibrant_1_input = getDoubleOption_("calibrant_1_input");
double rt_calibrant_2_input = getDoubleOption_("calibrant_2_input");
double rt_calibrant_1_reference = getDoubleOption_("calibrant_1_reference");
double rt_calibrant_2_reference = getDoubleOption_("calibrant_2_reference");
if (rt_calibrant_1_input == rt_calibrant_2_input)
{
OPENMS_LOG_ERROR << "rt_calibrant_1_input and rt_calibrant_2_input must not have the same value";
return ILLEGAL_PARAMETERS;
}
if (rt_calibrant_1_reference == rt_calibrant_2_reference)
{
OPENMS_LOG_ERROR << "rt_calibrant_1_reference and rt_calibrant_2_reference must not have the same value";
return ILLEGAL_PARAMETERS;
}
if (rt_calibrant_1_reference == -1 || rt_calibrant_2_reference == -1)
{
OPENMS_LOG_ERROR << "rt_calibrant_1_reference and rt_calibrant_2_reference must be set";
return ILLEGAL_PARAMETERS;
}
//-------------------------------------------------------------
// testing whether input and output files are accessible
//-------------------------------------------------------------
if (rt_calibrant_1_input > rt_calibrant_2_input)
{
double temp = rt_calibrant_1_input;
rt_calibrant_1_input = rt_calibrant_2_input;
rt_calibrant_2_input = temp;
}
if (rt_calibrant_1_reference > rt_calibrant_2_reference)
{
double temp = rt_calibrant_1_reference;
rt_calibrant_1_reference = rt_calibrant_2_reference;
rt_calibrant_2_reference = temp;
}
//-------------------------------------------------------------
// calculations
//-------------------------------------------------------------
FileHandler file;
vector<ProteinIdentification> protein_identifications;
PeptideIdentificationList identifications;
file.loadIdentifications(in_file, protein_identifications, identifications, {FileTypes::IDXML});
for (Size i = 0; i < identifications.size(); ++i)
{
if (identifications[i].hasRT())
{
double temp_rt = identifications[i].getRT();
temp_rt = (temp_rt - rt_calibrant_1_input) / (rt_calibrant_2_input - rt_calibrant_1_input)
* (rt_calibrant_2_reference - rt_calibrant_1_reference) + rt_calibrant_1_reference;
identifications[i].setRT(temp_rt);
}
}
//-------------------------------------------------------------
// writing output
//-------------------------------------------------------------
file.storeIdentifications(out_file,
protein_identifications,
identifications, {FileTypes::IDXML});
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPIDRTCalibration tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/IonMobilityBinning.cpp | .cpp | 6,589 | 178 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/IONMOBILITY/IMDataConverter.h>
#include <OpenMS/IONMOBILITY/FAIMSHelper.h>
#include <iomanip>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_IonMobilityBinning IonMobilityBinning
@brief Splits an mzML file with IonMobility frames into multiple mzML files by binning(merging) spectra by their IM values
This tool supports two modes:
- Regular ion mobility: Splits data into a user-defined number of bins
- FAIMS: Automatically splits data by the FAIMS compensation voltages (CVs) present in the file
For regular ion mobility data:
- Useful to convert IM data to a format that can be processed by tools that do not support IM data (e.g. FeatureFinderCentroided or SearchEngines)
- The results of individual bins can be processed separately and then recombined afterwards
- To decide on the number of bins, try running @ref TOPP_FileInfo on the input file to get an idea of the range of IM values present
For FAIMS data:
- Automatically detects FAIMS compensation voltages in the input file
- Creates one output file per unique CV value
- MS2 spectra without explicit FAIMS CV are assigned to the preceding MS1 FAIMS CV
- No binning parameters required as the splitting is based on the discrete CV values
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_IonMobilityBinning.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_IonMobilityBinning.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPIonMobilityBinning :
public TOPPBase
{
public:
TOPPIonMobilityBinning() :
TOPPBase("IonMobilityBinning", "Splits an mzML file with IonMobility frames into multiple mzML files by binning(merging) spectra by their IM values")
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "Input file (containing RT, IM, m/z, i.e. IM-frames).");
setValidFormats_("in", {"mzML"});
registerOutputPrefix_("out", "<directory>", "", "Path to the output directory to write the binned mzML files to.", true, false);
registerIntOption_("bins", "<number>", 5, "Number of ion mobility bins to split the input file into", false, false);
registerDoubleOption_("bin_extension_abs", "<number>", 0.0,
"Absolute extension of the bin in IM dimension (causes bins to overlap by 2x this value)", false, false);
registerTOPPSubsection_("SpectraMerging", "Options for merging spectra within the same IM bin (from the same IM-frame)");
registerDoubleOption_("SpectraMerging:mz_binning_width", "<number>", 0.01, "Width of the m/z bins", false, false);
registerStringOption_("SpectraMerging:mz_binning_width_unit", "<unit>", "Da", "Unit of the m/z bin width", false, false);
setValidStrings_("SpectraMerging:mz_binning_width_unit", {"Da", "ppm"});
}
std::pair<std::vector<PeakMap>, Math::BinContainer> processFAIMSData_(PeakMap&& experiment)
{
// IMDataConverter::splitByFAIMSCV() returns a vector of (FAIMS CV, experiment) pairs
// (with ascending CV order). We convert this to a vector of PeakMaps and a
// BinContainer that encodes the CV values as [min,max] = [CV,CV].
auto bins_by_cv = IMDataConverter::splitByFAIMSCV(std::move(experiment));
Size n_bins = bins_by_cv.size();
std::vector<PeakMap> mzML_bins;
mzML_bins.reserve(n_bins);
Math::BinContainer im_ranges;
for (Size i = 0; i < n_bins; ++i)
{
const double faims_cv = bins_by_cv[i].first;
PeakMap& pm = bins_by_cv[i].second;
im_ranges[i].setMax(faims_cv);
im_ranges[i].setMin(faims_cv);
mzML_bins.push_back(std::move(pm));
}
return {std::move(mzML_bins), std::move(im_ranges)};
}
void writeOutputFiles_(std::vector<PeakMap>& mzML_bins,
const Math::BinContainer& im_ranges,
const String& out_prefix,
Size n_bins)
{
const Size width = String(n_bins).size();
for (Size i = 0; i < n_bins; ++i)
{
ostringstream out_name;
out_name << out_prefix << "_part"
<< setw(width) << setfill('0') << (1+i)
<< "of" << n_bins << "_"
<< im_ranges[i].getMin() << "-"
<< im_ranges[i].getMax() << ".mzML";
addDataProcessing_(mzML_bins[i],
getProcessingInfo_(DataProcessing::ION_MOBILITY_BINNING));
FileHandler().storeExperiment(out_name.str(), mzML_bins[i], {FileTypes::MZML});
}
}
ExitCodes main_(int, const char **) override
{
String input_file = getStringOption_("in");
String out_prefix = getStringOption_("out");
int bins = getIntOption_("bins");
double bin_extension_abs = getDoubleOption_("bin_extension_abs");
double mz_binning_width = getDoubleOption_("SpectraMerging:mz_binning_width");
MZ_UNITS mz_binning_width_unit = getStringOption_("SpectraMerging:mz_binning_width_unit") == "Da" ? MZ_UNITS::DA : MZ_UNITS::PPM;
PeakMap experiment;
FileHandler().loadExperiment(input_file, experiment, {FileTypes::MZML}, log_type_);
// Decide FAIMS vs. regular IM processing first (avoid moving 'experiment' before branching)
const auto cvs = FAIMSHelper::getCompensationVoltages(experiment);
Size n_bins{};
std::vector<PeakMap> mzML_bins;
Math::BinContainer im_ranges;
if (!cvs.empty())
{
// FAIMS data: split by discrete compensation voltages
std::tie(mzML_bins, im_ranges) = processFAIMSData_(std::move(experiment));
n_bins = mzML_bins.size();
}
else
{
// Regular IM data: bin into user-defined IM bins
std::tie(mzML_bins, im_ranges) = IMDataConverter::splitExperimentByIonMobility(
std::move(experiment),
bins,
bin_extension_abs,
mz_binning_width,
mz_binning_width_unit);
n_bins = bins;
}
writeOutputFiles_(mzML_bins, im_ranges, out_prefix, n_bins);
return EXECUTION_OK;
}
};
int main(int argc, const char ** argv)
{
TOPPIonMobilityBinning tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/OpenSwathFileSplitter.cpp | .cpp | 4,939 | 136 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
// Files
#include <OpenMS/ANALYSIS/OPENSWATH/SwathQC.h>
#include <OpenMS/ANALYSIS/OPENSWATH/SwathWindowLoader.h>
#include <OpenMS/FORMAT/DATAACCESS/MSDataTransformingConsumer.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/FORMAT/MzMLFile.h>
#include <OpenMS/FORMAT/SwathFile.h>
#include <OpenMS/METADATA/ExperimentalSettings.h>
#include <OpenMS/SYSTEM/File.h>
#include <QDir>
using namespace OpenMS;
// OpenMS base classes
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/CONCEPT/ProgressLogger.h>
//-------------------------------------------------------------
// Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_OpenSwathFileSplitter OpenSwathFileSplitter
@brief A tool for splitting a single SWATH / DIA file into a set of files, each containing one SWATH window (plus one file for the MS1 survey scans).
Will use the input SWATH / DIA file to generate one output file containing
the MS1 survey scans and \a n individual files for each SWATH / DIA window in
the input file. The number of windows is read from the input file itself.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_OpenSwathFileSplitter.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_OpenSwathFileSplitter.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPOpenSwathFileSplitter : public TOPPBase
{
public:
TOPPOpenSwathFileSplitter() : TOPPBase("OpenSwathFileSplitter", "Splits SWATH files into n files, each containing one window.")
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<files>", "", "Input file (SWATH/DIA file)");
setValidFormats_("in", ListUtils::create<String>("mzML,mzXML"));
registerOutputPrefix_("outputDirectory", "<output>", "./", "Output file prefix", false, true);
// additional QC data
registerOutputFile_("out_qc", "<file>", "", "Optional QC meta data (charge distribution in MS1). Only works with mzML input files.", false, true);
setValidFormats_("out_qc", ListUtils::create<String>("json"));
}
void loadSwathFiles(const String& file_in, const String& tmp, const String& readoptions, std::shared_ptr<ExperimentalSettings>& exp_meta, std::vector<OpenSwath::SwathMap>& swath_maps,
Interfaces::IMSDataConsumer* plugin_consumer = nullptr)
{
SwathFile swath_file;
swath_file.setLogType(log_type_);
FileTypes::Type in_file_type = FileHandler::getTypeByFileName(file_in);
if (in_file_type == FileTypes::MZML)
{
swath_maps = swath_file.loadMzML(file_in, tmp, exp_meta, readoptions, plugin_consumer);
}
else if (in_file_type == FileTypes::MZXML)
{
swath_maps = swath_file.loadMzXML(file_in, tmp, exp_meta, readoptions);
}
else
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Input file needs to have ending .mzML(.gz) or .mzXML(.gz)");
}
}
ExitCodes main_(int, const char**) override
{
///////////////////////////////////
// Prepare Parameters
///////////////////////////////////
String file_in = getStringOption_("in");
// make sure tmp is a directory with proper separator at the end (downstream methods simply do path + filename)
// (do not use QDir::separator(), since its platform specific (/ or \) while absolutePath() will always use '/')
String tmp_dir = String(QDir(getStringOption_("outputDirectory").c_str()).absolutePath()).ensureLastChar('/');
QFileInfo fi(file_in.toQString());
String tmp = tmp_dir + String(fi.baseName());
String out_qc = getStringOption_("out_qc");
///////////////////////////////////
// Load the SWATH files
///////////////////////////////////
std::shared_ptr<ExperimentalSettings> exp_meta(new ExperimentalSettings);
std::vector<OpenSwath::SwathMap> swath_maps;
// collect some QC data
if (out_qc.empty())
{
loadSwathFiles(file_in, tmp, "split", exp_meta, swath_maps);
}
else
{
OpenSwath::SwathQC qc(30, 0.04);
MSDataTransformingConsumer qc_consumer; // apply some transformation
qc_consumer.setSpectraProcessingFunc(qc.getSpectraProcessingFunc());
qc_consumer.setExperimentalSettingsFunc(qc.getExpSettingsFunc());
loadSwathFiles(file_in, tmp, "split", exp_meta, swath_maps, &qc_consumer);
qc.storeJSON(out_qc);
}
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPOpenSwathFileSplitter tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/QCCalculator.cpp | .cpp | 8,709 | 185 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Mathias Walzer, Axel Walter $
// $Author: Mathias Walzer, Sven Nahnsen $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/DATASTRUCTURES/String.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/KERNEL/FeatureMap.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/KERNEL/ConsensusMap.h>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_QCCalculator QCCalculator
@brief Calculates basic quality parameters from MS experiments and compiles data for subsequent QC into a mzQC or qcML file.
<CENTER>
<table>
<tr>
<th ALIGN = "center"> pot. predecessor tools </td>
<td VALIGN="middle" ROWSPAN=3> → QCCalculator →</td>
<th ALIGN = "center"> pot. successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_FeatureFinderCentroided </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_QCMerger </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_CometAdapter </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_QCExporter </td>
</tr>
</table>
</CENTER>
The calculated quality parameters or data compiled as attachments for easy plotting input include file origin, spectra distribution, aquisition details, ion current stability ( & TIC ), id accuracy statistics and feature statistics.
The MS experiments base name is used as name to the qcML element that is comprising all quality parameter values for the given run (including the given downstream analysis data).
- @p id produces quality parameter values for the identification file; this file should contain either only the final psm to each spectrum (1 PeptideHit per identified spectrum) or have the PeptideHits sorted to 'best' first, where 'best' depends on the use case.
- @p feature produces quality parameter values for the feature file; this file can be either mapped or unmapped, the latter reulting in less metrics available.
- @p consensus produces quality parameter values for the consensus file;
some quality parameter calculation are only available if both feature and ids are given.
- @p remove_duplicate_features only needed when you work with a set of merged features. Then considers duplicate features only once.
- @p name only for mzQC: name of the person creating the mzQC file
- @p address only for mzQC: contact address (mail/e-mail or phone) of the person creating the mzQC file
- @p label only for mzQC: RECOMMENDED unique and informative label for the run, so that it can be used as a figure label
- @p description only for mzQC: description and comments about the mzQC file contents
- @p out_type specifies the output file type, default: determined by output file extension
Output is in mzQC with JSON formatting or qcML format (see parameter @p out) which can be viewed directly in a modern browser (chromium, firefox, safari).
The output file specified by the user determines which output file format will be used.
@note Currently mzIdentML (mzid) is not directly supported as an input/output format of this tool. Convert mzid files to/from idXML using @ref TOPP_IDFileConverter if necessary.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_QCCalculator.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_QCCalculator.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wshadow"
#endif
class TOPPQCCalculator :
public TOPPBase
{
public:
TOPPQCCalculator() :
TOPPBase("QCCalculator",
"Calculates basic quality parameters from MS experiments and subsequent analysis data as identification or feature detection.",
true,
{{ "Walzer M, Pernas LE, Nasso S, Bittremieux W, Nahnsen S, Kelchtermans P, Martens, L",
"qcML: An Exchange Format for Quality Control Metrics from Mass Spectrometry Experiments",
"Molecular & Cellular Proteomics 2014; 13(8)" , "10.1074/mcp.M113.035907"
}})
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "raw data input file (this is relevant if you want to look at MS1, MS2 and precursor peak information)");
setValidFormats_("in", ListUtils::create<String>("mzML"));
registerOutputFile_("out", "<file>", "", "Your QC file.");
setValidFormats_("out", {"mzQC", "qcML"});
registerStringOption_("out_type", "<type>", "", "Output file type -- default: determined from file extension or content", false);
setValidStrings_("out_type", {"mzQC", "qcML"});
registerStringOption_("label", "<label>", "label", "unique name for the run that can be used in a figure label", false);
registerStringOption_("name", "<contact_name>", "", "name of the person creating this mzQC file", false);
registerStringOption_("address", "<contact_address>", "", "contact address (mail/e-mail or phone)", false);
registerStringOption_("description", "<description>", "", "description and comments about the mzQC file contents", false);
registerInputFile_("id", "<file>", "", "Input idXML file containing the identifications. Your identifications will be exported in an easy-to-read format", false);
setValidFormats_("id", ListUtils::create<String>("idXML"));
registerInputFile_("feature", "<file>", "", "feature input file (this is relevant for most QC issues)", false);
setValidFormats_("feature", ListUtils::create<String>("featureXML"));
registerInputFile_("consensus", "<file>", "", "consensus input file (this is only used for charge state deconvoluted output. Use the consensusXML output form the DeCharger)", false);
setValidFormats_("consensus", ListUtils::create<String>("consensusXML"));
registerFlag_("remove_duplicate_features", "This flag should be set, if you work with a set of merged features.");
}
ExitCodes main_(int, const char**) override
{
// parsing parameters
String inputfile_id = getStringOption_("id");
String inputfile_feature = getStringOption_("feature");
String inputfile_consensus = getStringOption_("consensus");
String inputfile_name = getStringOption_("in");
String outputfile_name = getStringOption_("out");
String contact_name = getStringOption_("name");
String contact_address = getStringOption_("address");
String description = getStringOption_("description");
String label = getStringOption_("label");
bool remove_duplicate_features(getFlag_("remove_duplicate_features"));
// ensure output file hase valid extension
FileTypes::Type out_type = FileHandler::getConsistentOutputfileType(outputfile_name, getStringOption_("out_type"));
// prepare input
cout << "Reading mzML file..." << endl;
MSExperiment exp;
FileHandler().loadExperiment(inputfile_name, exp, {FileTypes::MZML}, log_type_);
exp.sortSpectra();
exp.updateRanges();
FeatureMap feature_map;
if (!inputfile_feature.empty())
{
cout << "Reading featureXML file..." << endl;
FileHandler().loadFeatures(inputfile_feature, feature_map, {FileTypes::FEATUREXML}, log_type_);
feature_map.updateRanges();
feature_map.sortByRT();
}
ConsensusMap consensus_map;
if (!inputfile_consensus.empty())
{
cout << "Reading consensusXML file..." << endl;
FileHandler().loadConsensusFeatures(inputfile_consensus, consensus_map, {FileTypes::CONSENSUSXML}, log_type_);
}
vector<ProteinIdentification> prot_ids;
PeptideIdentificationList pep_ids;
if (!inputfile_id.empty())
{
cout << "Reading idXML file..." << endl;
FileHandler().loadIdentifications(inputfile_id, prot_ids, pep_ids, {FileTypes::IDXML}, log_type_);
}
// collect QC data and store according to output file extension
FileHandler().storeQC(inputfile_name, outputfile_name, exp, feature_map, prot_ids, pep_ids, consensus_map, contact_name,
contact_address, description, label, remove_duplicate_features, {out_type});
return EXECUTION_OK;
}
};
#ifdef __clang__
#pragma clang diagnostic pop
#endif
int main(int argc, const char** argv)
{
TOPPQCCalculator tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/MRMMapper.cpp | .cpp | 5,540 | 163 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/ANALYSIS/TARGETED/TargetedExperiment.h>
#include <OpenMS/CONCEPT/ProgressLogger.h>
#include <OpenMS/ANALYSIS/TARGETED/MRMMapping.h>
using namespace OpenMS;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_MRMMapper MRMMapper
@brief MRMMapper maps measured chromatograms (mzML) and the transitions used (TraML).
<CENTER>
<table>
<tr>
<th ALIGN = "center"> potential predecessor tools </td>
<td VALIGN="middle" ROWSPAN=3> → MRMMapper →</td>
<th ALIGN = "center"> potential successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=2> @ref TOPP_FileFilter </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_OpenSwathAnalyzer </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_MRMTransitionGroupPicker </td>
</tr>
</table>
</CENTER>
This tool reads an mzML containing chromatograms (presumably measured on an
SRM instrument) and a TraML file that contains the data that was used to
generate the instrument method to measure said data. It then maps the
transitions in the TraML file to the chromatograms found in the mzML file
and stores the chromatograms annotated with meta-data from the TraML file.
Thus, the output chromatograms are an annotated copy of the input
chromatograms with native id, precursor information and peptide sequence (if
available) annotated in the chromatogram files.
The algorithm tries to match a given set of chromatograms and targeted
assays. It iterates through all the chromatograms retrieves one or more
matching targeted assay for the chromatogram. By default, the algorithm
assumes that a 1:1 mapping exists. If a chromatogram cannot be mapped
(does not have a corresponding assay) the algorithm issues a warning, the
user can specify that the program should abort in such a case (see
error_on_unmapped).
If multiple mapping is enabled (see map_multiple_assays parameter)
then each mapped assay will get its own chromatogram that contains the
same raw data but different meta-annotation. This *can* be useful if the
same transition is used to monitor multiple analytes but may also
indicate a problem with too wide mapping tolerances.
The thus mapped mzML file can then be used in a downstream analysis.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_MRMMapper.cli
<B>The algorithm parameters for the Analyzer filter are:</B>
@htmlinclude TOPP_MRMMapper.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPMRMMapper
: public TOPPBase
{
public:
TOPPMRMMapper() :
TOPPBase("MRMMapper", "MRMMapper maps measured chromatograms (mzML) and the transitions used (TraML)", true)
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "Input file containing chromatograms (converted mzXML file)");
setValidFormats_("in", ListUtils::create<String>("mzML"));
registerInputFile_("tr", "<file>", "", "transition file");
setValidFormats_("tr", ListUtils::create<String>("traML"));
registerOutputFile_("out", "<file>", "", "Output file containing mapped chromatograms");
setValidFormats_("out", ListUtils::create<String>("mzML"));
registerSubsection_("algorithm", "Algorithm parameters section");
}
Param getSubsectionDefaults_(const String& name) const override
{
if (name == "algorithm")
{
return MRMMapping().getDefaults();
}
else
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Unknown subsection", name);
}
}
ExitCodes main_(int, const char **) override
{
String in = getStringOption_("in");
String tr_file = getStringOption_("tr");
String out = getStringOption_("out");
OpenMS::TargetedExperiment targeted_exp;
OpenMS::PeakMap chromatogram_map;
OpenMS::PeakMap output;
FileHandler().loadTransitions(tr_file, targeted_exp, {FileTypes::TRAML}, log_type_);
FileHandler().loadExperiment(in, chromatogram_map, {FileTypes::MZML}, log_type_);
Param param = getParam_().copy("algorithm:", true);
MRMMapping mrmm;
mrmm.setParameters(param);
mrmm.mapExperiment(chromatogram_map, targeted_exp, output);
// add all data processing information to all the chromatograms
DataProcessing dp_ = getProcessingInfo_(DataProcessing::FORMAT_CONVERSION);
DataProcessingPtr dp = std::shared_ptr<DataProcessing>(new DataProcessing(dp_));
std::vector<MSChromatogram > chromatograms = output.getChromatograms();
for (Size i=0; i<chromatograms.size(); ++i)
{
chromatograms[i].getDataProcessing().push_back(dp);
}
output.setChromatograms(chromatograms);
FileHandler().storeExperiment(out, output, {FileTypes::MZML}, log_type_);
return EXECUTION_OK;
}
};
int main(int argc, const char ** argv)
{
TOPPMRMMapper tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/RNAMassCalculator.cpp | .cpp | 12,162 | 331 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/CHEMISTRY/NASequence.h>
#include <OpenMS/DATASTRUCTURES/ListUtils.h>
#include <OpenMS/FORMAT/SVOutStream.h>
#include <OpenMS/SYSTEM/File.h>
#include <iomanip> // for "setprecision"
#include <ostream>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_RNAMassCalculator RNAMassCalculator
@brief Calculates masses, mass-to-charge ratios and sum formulas of RNA sequences.
Given an RNA (oligonucleotide) sequence and a charge state, the charged mass (including H+ adducts or losses), mass-to-charge ratio and molecular sum formula are computed.
The sequence can include modifications (for information on valid notation see the @ref OpenMS::NASequence "NASequence" class documentation).
Neutral masses can be computed by using "0" as charge state.
Input can be given directly as values of the parameters: @p in_seq for RNA sequences and @p charge for charge states.
Alternatively, it can be read from a file (see parameter @p in) with the following format: An RNA sequence at the beginning of each line, optionally followed by any number of charge states.
Whitespace, commas or semicolons can de used to delimit the different items.
Parts of the input that cannot be understood will be skipped.
If charge states are given in the input file as well as via the @p charge parameter, results are returned for the union of both sets of charge states.
Output can be written to a file or to the screen (see parameter @p out).
Results for different charge states are always ordered from lowest to highest charge.
A number of different output formats are available via the parameter @p format:
- @p list writes a human-readable list of the form "ACGU: z=-2 m=1221.1951 m/z=610.5976 f=C38H46N15O26P3, z=-1 m=1222.2030 m/z=1222.2030 f=C38H47N15O26P3";
- @p table produces a CSV-like table (using parameter @p separator to delimit fields) with the columns "sequence", "charge", "mass", "mass-to-charge" and "formula", and with one row per sequence and charge state;
- @p mass_only writes only mass values (one line per sequence, values for different charge states separated by spaces);
- @p mz_only writes only mass-to-charge ratios (one line per sequence, values for different charge states separated by spaces);
- @p formula_only writes only sum formulas.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_RNAMassCalculator.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_RNAMassCalculator.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPRNAMassCalculator :
public TOPPBase
{
public:
TOPPRNAMassCalculator() :
TOPPBase("RNAMassCalculator", "Calculates masses, mass-to-charge ratios and sum formulas of RNA sequences", true), use_avg_mass_(false), output_(nullptr), format_(), frag_type_(NASequence::NASFragmentType::Full)
{
frag_type_names_["full"] = NASequence::NASFragmentType::Full;
frag_type_names_["internal"] = NASequence::NASFragmentType::Internal;
frag_type_names_["5-prime"] = NASequence::NASFragmentType::FivePrime;
frag_type_names_["3-prime"] = NASequence::NASFragmentType::ThreePrime;
frag_type_names_["a-B-ion"] = NASequence::NASFragmentType::AminusB;
frag_type_names_["a-ion"] = NASequence::NASFragmentType::AIon;
frag_type_names_["b-ion"] = NASequence::NASFragmentType::BIon;
frag_type_names_["c-ion"] = NASequence::NASFragmentType::CIon;
frag_type_names_["d-ion"] = NASequence::NASFragmentType::DIon;
frag_type_names_["w-ion"] = NASequence::NASFragmentType::WIon;
frag_type_names_["x-ion"] = NASequence::NASFragmentType::XIon;
frag_type_names_["y-ion"] = NASequence::NASFragmentType::YIon;
frag_type_names_["z-ion"] = NASequence::NASFragmentType::ZIon;
}
protected:
bool use_avg_mass_;
ostream* output_; // pointer to output stream (stdout or file)
String format_, separator_;
NASequence::NASFragmentType frag_type_;
map<String, NASequence::NASFragmentType> frag_type_names_;
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "Input file with RNA sequences and optionally charge numbers (mutually exclusive to 'in_seq')", false);
setValidFormats_("in", {"txt"});
registerStringList_("in_seq", "<sequences>", StringList(), "List of RNA sequences (mutually exclusive to 'in')", false, false);
registerOutputFile_("out", "<file>", "", "Output file; if empty, output is written to the screen", false);
setValidFormats_("out", {"txt"});
registerIntList_("charge", "<numbers>", {0}, "List of charge states; required if 'in_seq' is given", false);
registerStringOption_("format", "<choice>", "list", "Output format ('list': human-readable list, 'table': CSV-like table, 'mass_only': mass values only, 'mz_only': m/z values only, 'formula_only': sum formula only)\n", false);
setValidStrings_("format", {"list", "table", "mass_only", "mz_only", "formula_only"});
registerFlag_("average_mass", "Compute average (instead of monoisotopic) oligonucleotide masses");
registerStringOption_("fragment_type", "<choice>", "full", "For what type of sequence/fragment the mass should be computed\n", false);
setValidStrings_("fragment_type", ListUtils::create<String>("full,internal,5-prime,3-prime,a-B-ion,a-ion,b-ion,c-ion,d-ion,w-ion,x-ion,y-ion,z-ion"));
registerStringOption_("separator", "<sep>", "", "Field separator for 'table' output format; by default, the 'tab' character is used", false);
}
double computeMass_(const NASequence& seq, Int charge) const
{
if (use_avg_mass_) return seq.getAverageWeight(frag_type_, charge);
else return seq.getMonoWeight(frag_type_, charge);
}
void writeTable_(const NASequence& seq, const set<Int>& charges)
{
SVOutStream sv_out(*output_, separator_);
for (set<Int>::const_iterator it = charges.begin(); it != charges.end();
++it)
{
double mass = computeMass_(seq, *it);
EmpiricalFormula formula = seq.getFormula(frag_type_, *it);
sv_out << seq.toString() << *it << mass;
sv_out.writeValueOrNan(abs(mass / *it));
sv_out << formula.toString();
sv_out << endl;
}
}
void writeList_(const NASequence& seq, const set<Int>& charges)
{
*output_ << seq.toString() << ": ";
for (set<Int>::const_iterator it = charges.begin(); it != charges.end();
++it)
{
double mass = computeMass_(seq, *it);
EmpiricalFormula formula = seq.getFormula(frag_type_, *it);
if (it != charges.begin()) *output_ << ", ";
*output_ << "z=" << *it << " m=" << mass << " m/z=";
if (*it != 0) *output_ << abs(mass / *it);
else *output_ << "inf";
*output_ << " f=" << formula.toString();
}
*output_ << endl;
}
void writeMassOnly_(const NASequence& seq, const set<Int>& charges,
bool mz = false)
{
for (set<Int>::const_iterator it = charges.begin(); it != charges.end();
++it)
{
double mass = computeMass_(seq, *it);
if (it != charges.begin()) *output_ << " ";
if (!mz) *output_ << mass;
else if (*it == 0) *output_ << "inf";
else *output_ << abs(mass / *it);
}
*output_ << endl;
}
void writeFormulaOnly_(const NASequence& seq, const set<Int>& charges)
{
bool first = true;
for (Int charge : charges)
{
EmpiricalFormula formula = seq.getFormula(frag_type_, charge);
if (!first) *output_ << " ";
*output_ << formula.toString();
first = false;
}
*output_ << endl;
}
void writeLine_(const NASequence& seq, const set<Int>& charges)
{
if (format_ == "list") writeList_(seq, charges);
else if (format_ == "table") writeTable_(seq, charges);
else if (format_ == "mass_only") writeMassOnly_(seq, charges);
else if (format_ == "mz_only") writeMassOnly_(seq, charges, true);
else writeFormulaOnly_(seq, charges);
}
String getItem_(String& line, const String& skip = " \t,;")
{
Size pos = line.find_first_of(skip);
String prefix = line.substr(0, pos);
pos = line.find_first_not_of(skip, pos);
if (pos == String::npos) line = "";
else line = line.substr(pos);
return prefix.trim();
}
void readFile_(const String& filename, const set<Int>& charges)
{
ifstream input(filename.c_str());
String line;
Size line_count(0);
while (getline(input, line))
{
++line_count;
String item = getItem_(line);
if ((item[0] == '"') && (item[item.size() - 1] == '"'))
{
item.unquote();
}
NASequence seq;
try
{
seq = NASequence::fromString(item);
}
catch (Exception::ParseError& /*e*/)
{
OPENMS_LOG_WARN << "Warning: '" << item << "' is not a valid RNA sequence - skipping\n";
continue;
}
set<Int> local_charges(charges);
Size conversion_failed_count(0);
while (!line.empty())
{
item = getItem_(line);
try
{
local_charges.insert(item.toInt());
}
catch (Exception::ConversionError& /*e*/)
{
++conversion_failed_count;
}
}
if (conversion_failed_count)
{
OPENMS_LOG_WARN << "Warning: Invalid charge state specified in line:" << line_count << ".\n";
}
if (local_charges.empty())
{
OPENMS_LOG_WARN << "Warning: No charge state specified - skipping (line:" << line_count << ")\n";
continue;
}
writeLine_(seq, local_charges);
}
input.close();
}
ExitCodes main_(int, const char**) override
{
String in = getStringOption_("in");
StringList in_seq = getStringList_("in_seq");
String out = getStringOption_("out");
IntList charge_list = getIntList_("charge");
set<Int> charges(charge_list.begin(), charge_list.end());
use_avg_mass_ = getFlag_("average_mass");
frag_type_ = frag_type_names_[getStringOption_("fragment_type")];
ofstream outfile;
if (out.empty())
{
output_ = &cout;
}
else
{
outfile.open(out.c_str());
output_ = &outfile;
}
// use 4 decimal places:
*output_ << std::fixed << std::setprecision(4);
format_ = getStringOption_("format");
if (format_ == "table")
{
separator_ = getStringOption_("separator");
if (separator_.empty()) separator_ = "\t";
// write header:
SVOutStream sv_out(*output_, separator_);
sv_out << "sequence" << "charge" << "mass" << "mass-to-charge" << "formula" << endl;
}
if ((!in.empty()) && (!in_seq.empty()))
{
OPENMS_LOG_ERROR << "Specifying an input file and input sequences at the same time is not allowed!";
return ILLEGAL_PARAMETERS;
}
if (!in.empty())
{
readFile_(in, charges);
}
else
{
if (charges.empty())
{
OPENMS_LOG_ERROR << "Error: No charge state specified";
return ILLEGAL_PARAMETERS;
}
for (StringList::iterator it = in_seq.begin(); it != in_seq.end(); ++it)
{
NASequence seq;
try
{
seq = NASequence::fromString(*it);
}
catch (Exception::ParseError& /*e*/)
{
OPENMS_LOG_WARN << "Warning: '" << *it << "' is not a valid RNA sequence - skipping\n";
continue;
}
writeLine_(seq, charges);
}
}
if (!out.empty()) outfile.close();
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPRNAMassCalculator tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/AssayGeneratorMetaboSirius.cpp | .cpp | 18,762 | 359 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Oliver Alka, Axel Walter $
// $Authors: Oliver Alka, Axel Walter $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/SiriusExportAlgorithm.h>
#include <OpenMS/ANALYSIS/OPENSWATH/MRMAssay.h>
#include <OpenMS/ANALYSIS/OPENSWATH/TransitionPQPFile.h>
#include <OpenMS/ANALYSIS/OPENSWATH/TransitionTSVFile.h>
#include <OpenMS/ANALYSIS/QUANTITATION/KDTreeFeatureMaps.h>
#include <OpenMS/ANALYSIS/TARGETED/MetaboTargetedAssay.h>
#include <OpenMS/ANALYSIS/TARGETED/MetaboTargetedTargetDecoy.h>
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/CONCEPT/Exception.h>
#include <OpenMS/PROCESSING/CALIBRATION/PrecursorCorrection.h>
#include <OpenMS/PROCESSING/DEISOTOPING/Deisotoper.h>
#include <OpenMS/FORMAT/CsvFile.h>
#include <OpenMS/FORMAT/DATAACCESS/SiriusFragmentAnnotation.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/KERNEL/RangeUtils.h>
#include <OpenMS/SYSTEM/File.h>
#include <QtCore/QDir>
#include <QtCore/QDirIterator>
#include <QtCore/QString>
#include <algorithm>
#include <map>
using namespace OpenMS;
//-------------------------------------------------------------
//Doxygen docu
//----------------------------------------------------------
/**
@page TOPP_AssayGeneratorMetaboSirius AssayGeneratorMetaboSirius
@brief Generates an assay library from SIRIUS fragmentation trees (Metabolomics)
<CENTER>
<table>
<tr>
<th ALIGN = "center"> potential predecessor tools </td>
<td VALIGN="middle" ROWSPAN=2> → AssayGeneratorMetaboSirius →</td>
<th ALIGN = "center"> potential successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_FeatureFinderMetabo </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> OpenSWATH pipeline </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_AccurateMassSearch </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_SiriusExport </td>
</tr>
</table>
</CENTER>
Build an assay library from a SIRIUS project directory using fragmentation trees.
- Use the SiriusExport TOPP tool with each samples mzML and featureXML files to generate a SIRIUS input ms file.
@code
SiriusExport -in sample.mzML -in_featureinfo sample.featureXML -out_ms sample.ms
@endcode
- Run SIRIUS externally with "--no-compression" flag and the formula, passatutto (optional, for decoy generation) and write-summaries tools.
- This tool was tested with the SIRIUS project directory generated with SIRIUS versions >= 5.5.1 and <= 5.8.6.
@code
sirius --input sample.ms --project sirius-project --maxmz 300 --no-compression formula passatutto write-summaries
@endcode
- Provide the path to SIRIUS project as input parameter for this tool.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_AssayGeneratorMetaboSirius.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_AssayGeneratorMetaboSirius.html
*/
/// @cond TOPPCLASSES
class TOPPAssayGeneratorMetaboSirius :
public TOPPBase,
private TransitionTSVFile
{
public:
TOPPAssayGeneratorMetaboSirius() :
TOPPBase("AssayGeneratorMetaboSirius", "Assay library generation from a SIRIUS project directory (Metabolomics)")
{}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<directory>", "", "SIRIUS project directory");
registerInputFile_("in_compoundinfo", "<file>", "", "Compound info table (.tsv file)");
setValidFormats_("in_compoundinfo", ListUtils::create<String>("tsv"));
registerOutputFile_("out", "<file>", "", "Assay library output file");
setValidFormats_("out", ListUtils::create<String>("tsv,traML,pqp"));
registerDoubleOption_("ambiguity_resolution_mz_tolerance", "<num>", 10.0, "Mz tolerance for the resolution of identification ambiguity over multiple files", false);
registerStringOption_("ambiguity_resolution_mz_tolerance_unit", "<choice>", "ppm", "Unit of the ambiguity_resolution_mz_tolerance", false, true);
setValidStrings_("ambiguity_resolution_mz_tolerance_unit", ListUtils::create<String>("ppm,Da"));
registerDoubleOption_("ambiguity_resolution_rt_tolerance", "<num>", 10.0, "RT tolerance in seconds for the resolution of identification ambiguity over multiple files", false);
registerDoubleOption_("total_occurrence_filter", "<num>", 0.1, "Filter compound based on total occurrence in analysed samples", false);
setMinFloat_("total_occurrence_filter", 0.0);
setMaxFloat_("total_occurrence_filter", 1.0);
registerDoubleOption_("fragment_annotation_score_threshold", "<num>", 0.80, "Filters annotations based on the explained intensity of the peaks in a spectrum", false);
setMinFloat_("fragment_annotation_score_threshold", 0.0);
setMaxFloat_("fragment_annotation_score_threshold", 1.0);
registerStringOption_("method", "<choice>", "highest_intensity", "Spectrum with the highest precursor intensity or a consensus spectrum is used for assay library construction (if no fragment annotation is used).",false);
setValidStrings_("method", ListUtils::create<String>("highest_intensity,consensus_spectrum"));
registerFlag_("use_exact_mass", "Use exact mass for precursor and fragment annotations", false);
registerFlag_("exclude_ms2_precursor", "Excludes precursor in ms2 from transition list", false);
registerFlag_("use_known_unknowns", "Use features without identification information", false);
// transition extraction
registerIntOption_("min_transitions", "<int>", 3, "Minimal number of transitions", false);
registerIntOption_("max_transitions", "<int>", 6, "Maximal number of transitions", false);
registerDoubleOption_("transition_threshold", "<num>", 5, "Further transitions need at least x% of the maximum intensity (default 5%)", false);
registerDoubleOption_("min_fragment_mz", "<num>", 0.0, "Minimal m/z of a fragment ion choosen as a transition", false, true);
registerDoubleOption_("max_fragment_mz", "<num>", 2000.0, "Maximal m/z of a fragment ion choosen as a transition" , false, true);
// decoys
registerFlag_("decoy_generation", "Decoys will be generated using the fragmentation tree re-rooting approach. This option does only work in combination with the fragment annotation via Sirius.", false);
registerStringOption_("decoy_generation_method", "<choice>", "original", "Uses different methods for decoy generation. Basis for the method is the fragmentation-tree re-rooting approach ('original'). This approach can be extended by using 'resolve_overlap', which will resolve overlapping target/decoy fragments by adding -CH2 mass to the overlapping decoy fragments. 'generate_missing_decoys' will add a -CH2 mass shift to the target fragments and use them as decoys if fragmentation-tree re-rooting failed. 'Both' combines the extended methods (resolve_overlap, generate_missing_decoys).",false);
setValidStrings_("decoy_generation_method", ListUtils::create<String>("original,resolve_overlap,generate_missing_decoys,both"));
registerDoubleOption_("decoy_resolution_mz_tolerance", "<num>", 10.0, "Mz tolerance for the resolution of overlapping m/z values for targets and decoys of one compound.", false);
registerStringOption_("decoy_resolution_mz_tolerance_unit", "<choice>", "ppm", "Unit of the decoy_resolution_mz_tolerance", false, true);
setValidStrings_("decoy_resolution_mz_tolerance_unit", ListUtils::create<String>("ppm,Da"));
}
ExitCodes main_(int, const char **) override
{
//-------------------------------------------------------------
// Parsing parameters
//-------------------------------------------------------------
String sirius_project_directory = getStringOption_("in");
String compoundinfo_file = getStringOption_("in_compoundinfo");
String out = getStringOption_("out");
String method = getStringOption_("method");
double ar_mz_tol = getDoubleOption_("ambiguity_resolution_mz_tolerance");
String ar_mz_tol_unit_res = getStringOption_("ambiguity_resolution_mz_tolerance_unit");
double ar_rt_tol = getDoubleOption_("ambiguity_resolution_rt_tolerance");
double total_occurrence_filter = getDoubleOption_("total_occurrence_filter");
double score_threshold = getDoubleOption_("fragment_annotation_score_threshold");
bool decoy_generation = getFlag_("decoy_generation");
bool use_exact_mass = getFlag_("use_exact_mass");
bool exclude_ms2_precursor = getFlag_("exclude_ms2_precursor");
String decoy_generation_method = getStringOption_("decoy_generation_method");
bool original = false;
bool resolve_overlap = false;
bool generate_missing_decoys = false;
if (decoy_generation_method == "original" && decoy_generation)
{
OPENMS_LOG_INFO << "Decoy method: fragmentation tree re-rooting." << std::endl;
original = true;
}
else if (decoy_generation_method == "resolve_overlap" && decoy_generation)
{
OPENMS_LOG_INFO << "Decoy method: fragmentation tree re-rooting and overlap resolution." << std::endl;
resolve_overlap = true;
}
else if (decoy_generation_method == "generate_missing_decoys" && decoy_generation)
{
OPENMS_LOG_INFO << "Decoy method: fragmentation tree re-rooting and filling missing decoys by addition of -CH2 mass shift where re-rooting was not possible." << std::endl;
generate_missing_decoys = true;
}
else if (decoy_generation_method == "both" && decoy_generation)
{
OPENMS_LOG_INFO << "Decoy method: fragmentation tree re-rooting with overlap resolution and addition of -CH2 mass shift to generate missing decoys where re-rooting was not possible." << std::endl;
resolve_overlap = true;
generate_missing_decoys = true;
}
double decoy_mz_tol = getDoubleOption_("decoy_resolution_mz_tolerance");
String decoy_mz_tol_unit_res = getStringOption_("decoy_resolution_mz_tolerance_unit");
int min_transitions = getIntOption_("min_transitions");
int max_transitions = getIntOption_("max_transitions");
double min_fragment_mz = getDoubleOption_("min_fragment_mz");
double max_fragment_mz = getDoubleOption_("max_fragment_mz");
double transition_threshold = getDoubleOption_("transition_threshold");
bool use_known_unknowns = getFlag_("use_known_unknowns");
//-------------------------------------------------------------
// Get all subdirectories within the SIRIUS project directory
//-------------------------------------------------------------
std::vector<String> subdirs;
QDirIterator it(sirius_project_directory.toQString(), QDir::Dirs | QDir::NoDotAndDotDot, QDirIterator::NoIteratorFlags);
while (it.hasNext())
{
subdirs.emplace_back(it.next());
}
OPENMS_LOG_DEBUG << subdirs.size() << " spectra were annotated using SIRIUS." << std::endl;
if (subdirs.empty())
{
decoy_generation = false;
throw OpenMS::Exception::Postcondition(__FILE__,__LINE__, OPENMS_PRETTY_FUNCTION, "SIRIUS project directory is empty.");
}
//-------------------------------------------------------------
// Get CompoundInfo objects from tsv file
//-------------------------------------------------------------
std::vector<SiriusMSFile::CompoundInfo> v_cmpinfo;
// get number of files from maximum file_index value
size_t n_files = 0;
CsvFile csv(compoundinfo_file, '\t');
size_t row_count = csv.rowCount();
for (size_t i = 1; i < row_count; ++i) {
StringList row_data;
csv.getRow(i, row_data);
SiriusMSFile::CompoundInfo cmp_info;
// Convert and assign each field from row_data to cmp_info's attributes
cmp_info.cmp = row_data[0];
cmp_info.file_index = stoi(row_data[1]);
cmp_info.pmass = stod(row_data[2]);
cmp_info.rt = stod(row_data[4]);
cmp_info.fmz = stod(row_data[5]);
cmp_info.fid = row_data[6];
cmp_info.formula = row_data[7];
cmp_info.charge = stoi(row_data[8]);
cmp_info.ionization = row_data[9];
cmp_info.des = row_data[10];
cmp_info.source_file = row_data[12];
cmp_info.m_ids_id = row_data[15];
// add if "use_known_unknown" flag is set or compound name is not "UNKNOWN"
if (use_known_unknowns || cmp_info.des != "UNKNOWN") {
v_cmpinfo.push_back(cmp_info);
}
// update n_files with most recent (highest) file_index
n_files = cmp_info.file_index + 1;
}
//--------------------------------------------------------------------------
// Get list of MetaboTargetedAssay (compound with all possible transitions)
//--------------------------------------------------------------------------
//get annotated spectra from SIRIUS project subdirs
std::vector<SiriusFragmentAnnotation::SiriusTargetDecoySpectra> annotated_spectra =
SiriusFragmentAnnotation::extractAndResolveSiriusAnnotations(subdirs, score_threshold, use_exact_mass, decoy_generation);
// combine compound info with annotated spectra
std::vector<MetaboTargetedAssay::CompoundTargetDecoyPair> v_cmp_spec;
v_cmp_spec = MetaboTargetedAssay::pairCompoundWithAnnotatedTDSpectraPairs(v_cmpinfo, annotated_spectra);
// pair compound info with potential transitions (filtered by min/max, exclude precursor)
std::vector<MetaboTargetedAssay> v_mta;
v_mta = MetaboTargetedAssay::extractMetaboTargetedAssayFragmentAnnotation(v_cmp_spec,
transition_threshold,
min_fragment_mz,
max_fragment_mz,
use_exact_mass,
exclude_ms2_precursor);
//--------------------------------------------------------------------------------------------
// Combine ambigous identifications (derived from consensus features with similar m/z and RT)
//--------------------------------------------------------------------------------------------
// build feature maps (matching original raw data files by file_index) and perfom feature linking
std::unordered_map< UInt64, std::vector<MetaboTargetedAssay> > ambiguity_groups = MetaboTargetedAssay::buildAmbiguityGroup(v_mta, ar_mz_tol, ar_rt_tol, ar_mz_tol_unit_res, n_files);
// resolve identification ambiguity based on highest occurrence and highest intensity
MetaboTargetedAssay::resolveAmbiguityGroup(ambiguity_groups, total_occurrence_filter, n_files);
//--------------------------------------------------------------------------------------------
// Merge all transitions in a TargetedExperiment and filter number of transitions
//--------------------------------------------------------------------------------------------
std::vector<TargetedExperiment::Compound> v_cmp;
std::vector<ReactionMonitoringTransition> v_rmt_all;
for (const auto &it : ambiguity_groups)
{
for (const auto &comp_it : it.second)
{
v_cmp.push_back(comp_it.potential_cmp);
v_rmt_all.insert(v_rmt_all.end(), comp_it.potential_rmts.begin(), comp_it.potential_rmts.end());
}
}
TargetedExperiment t_exp;
t_exp.setCompounds(v_cmp);
t_exp.setTransitions(v_rmt_all);
// use MRMAssay methods for filtering
MRMAssay assay;
// sort by highest intensity - filter: min/max transitions (targets), filter: max transitions (decoys)
// e.g. if only one decoy fragment is available it will not be filtered out!
assay.filterMinMaxTransitionsCompound(t_exp, min_transitions, max_transitions);
//------------------------------------------------------
// Decoys
//------------------------------------------------------
if (decoy_generation)
{
// remove decoys which do not have a respective target after min/max transition filtering
// based on the TransitionGroupID (similar for targets "0_Acephate_[M+H]+_0" and decoys "0_Acephate_decoy_[M+H]+_0")
assay.filterUnreferencedDecoysCompound(t_exp);
// resolve overlapping target and decoy masses
// after selection of decoy masses based on highest intensity (arbitrary, since passatutto uses
// the intensities based on the previous fragmentation tree), overlapping masses between targets
// and decoys of one respective metabolite_adduct combination can be resolved by adding a CH2 mass
if (!original)
{
const double chtwo_mass = EmpiricalFormula("CH2").getMonoWeight();
std::vector<MetaboTargetedTargetDecoy::MetaboTargetDecoyMassMapping> mappings = MetaboTargetedTargetDecoy::constructTargetDecoyMassMapping(t_exp);
if (resolve_overlap)
{
MetaboTargetedTargetDecoy::resolveOverlappingTargetDecoyMassesByDecoyMassShift(t_exp, mappings, chtwo_mass, decoy_mz_tol, decoy_mz_tol_unit_res);
}
if (generate_missing_decoys)
{
MetaboTargetedTargetDecoy::generateMissingDecoysByMassShift(t_exp, mappings, chtwo_mass);
}
}
}
// sort TargetedExperiment by name (TransitionID)
t_exp.sortTransitionsByName();
//-------------------------------------------------------------
// writing output
//-------------------------------------------------------------
String extension = out.substr(out.find_last_of(".")+1);
if (extension == "tsv")
{
// validate and write
OpenMS::TransitionTSVFile::convertTargetedExperimentToTSV(out.c_str(), t_exp);
}
else if (extension == "traML")
{
// validate
OpenMS::TransitionTSVFile::validateTargetedExperiment(t_exp);
// write traML
FileHandler().storeTransitions(out, t_exp, {FileTypes::TRAML});
}
else if (extension == "pqp")
{
//validate
OpenMS::TransitionTSVFile::validateTargetedExperiment(t_exp);
// write pqp
TransitionPQPFile pqp_out;
pqp_out.convertTargetedExperimentToPQP(out.c_str(), t_exp);
}
return EXECUTION_OK;
}
};
int main(int argc, const char ** argv)
{
TOPPAssayGeneratorMetaboSirius tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/MassTraceExtractor.cpp | .cpp | 13,500 | 379 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Erhan Kenar, Holger Franken $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/FEATUREFINDER/MassTraceDetection.h>
#include <OpenMS/FEATUREFINDER/ElutionPeakDetection.h>
#include <OpenMS/KERNEL/FeatureMap.h>
#include <OpenMS/KERNEL/MassTrace.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/MATH/MathFunctions.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/SYSTEM/File.h>
#include <OpenMS/CONCEPT/Constants.h>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_MassTraceExtractor MassTraceExtractor
@brief MassTraceExtractor extracts mass traces from a MSExperiment map and stores them into a FeatureXMLFile.
<CENTER>
<table>
<tr>
<th ALIGN = "center"> pot. predecessor tools </td>
<td VALIGN="middle" ROWSPAN=3> → MassTraceExtractor →</td>
<th ALIGN = "center"> pot. successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_PeakPickerHiRes </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_FeatureFinderMetabo</td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_PeakPickerHiRes </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_TextExporter </td>
</tr>
</table>
</CENTER>
This TOPP tool detects mass traces in centroided LC-MS maps and stores them as features in
a FeatureMap. These features may be either used directly as input for an metabolite ID approach or further
be assembled to aggregate features according to a theoretical isotope pattern. For metabolomics experiments,
the @ref TOPP_FeatureFinderMetabo tool offers both mass trace extraction and isotope pattern assembly.
For proteomics data, please refer to the @ref TOPP_FeatureFinderCentroided tool.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_MassTraceExtractor.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_MassTraceExtractor.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPMassTraceExtractor :
public TOPPBase
{
public:
TOPPMassTraceExtractor() :
TOPPBase("MassTraceExtractor", "Detects mass traces in centroided LC-MS data.")
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "input centroided mzML file");
setValidFormats_("in", ListUtils::create<String>("mzML"));
registerOutputFile_("out", "<file>", "", "output featureXML file with mass traces");
setValidFormats_("out", ListUtils::create<String>("featureXML,consensusXML"));
registerStringOption_("out_type", "<type>", "", "output file type -- default: determined from file extension or content", false);
setValidStrings_("out_type", ListUtils::create<String>("featureXML,consensusXML"));
addEmptyLine_();
registerSubsection_("algorithm", "Algorithm parameters section");
}
Param getSubsectionDefaults_(const String& /*section*/) const override
{
Param combined;
Param p_com;
p_com.setValue("noise_threshold_int", 10.0, "Intensity threshold below which peaks are regarded as noise.");
p_com.setValue("chrom_peak_snr", 3.0, "Minimum signal-to-noise a mass trace should have.");
p_com.setValue("chrom_fwhm", 5.0, "Expected chromatographic peak width (in seconds).");
combined.insert("common:", p_com);
Param p_mtd = MassTraceDetection().getDefaults();
p_mtd.remove("noise_threshold_int");
p_mtd.remove("chrom_peak_snr");
combined.insert("mtd:", p_mtd);
Param p_epd = ElutionPeakDetection().getDefaults();
p_epd.remove("noise_threshold_int");
p_epd.remove("chrom_peak_snr");
p_epd.remove("chrom_fwhm");
p_epd.setValue("enabled", "true", "Enables/disables the chromatographic peak detection of mass traces");
p_epd.setValidStrings("enabled", {"true","false"});
combined.insert("epd:", p_epd);
return combined;
}
ExitCodes main_(int, const char**) override
{
//-------------------------------------------------------------
// parameter handling
//-------------------------------------------------------------
String in = getStringOption_("in");
String out = getStringOption_("out");
FileTypes::Type out_type = FileTypes::nameToType(getStringOption_("out_type"));
if (out_type == FileTypes::UNKNOWN)
{
out_type = FileHandler().getTypeByFileName(out);
}
//-------------------------------------------------------------
// loading input
//-------------------------------------------------------------
FileHandler mz_data_file;
PeakMap ms_peakmap;
std::vector<Int> ms_level(1, 1);
(mz_data_file.getOptions()).setMSLevels(ms_level);
mz_data_file.loadExperiment(in, ms_peakmap, {FileTypes::MZML}, log_type_);
if (ms_peakmap.empty())
{
OPENMS_LOG_WARN << "The given file does not contain any conventional peak data, but might"
" contain chromatograms. This tool currently cannot handle them, sorry.";
return INCOMPATIBLE_INPUT_DATA;
}
// make sure that the spectra are sorted by m/z
ms_peakmap.sortSpectra(true);
//-------------------------------------------------------------
// get params for MTD and EPD algorithms
//-------------------------------------------------------------
Param com_param = getParam_().copy("algorithm:common:", true);
writeDebug_("Common parameters passed to both sub-algorithms (mtd and epd)", com_param, 3);
Param mtd_param = getParam_().copy("algorithm:mtd:", true);
writeDebug_("Parameters passed to MassTraceDetection", mtd_param, 3);
Param epd_param = getParam_().copy("algorithm:epd:", true);
writeDebug_("Parameters passed to ElutionPeakDetection", epd_param, 3);
//-------------------------------------------------------------
// configure and run MTD
//-------------------------------------------------------------
MassTraceDetection mt_ext;
mtd_param.insert("", com_param);
mtd_param.remove("chrom_fwhm");
mt_ext.setParameters(mtd_param);
vector<MassTrace> m_traces;
mt_ext.run(ms_peakmap, m_traces);
vector<MassTrace> m_traces_final;
bool use_epd = epd_param.getValue("enabled").toBool();
if (!use_epd)
{
swap(m_traces_final, m_traces);
}
else
{
ElutionPeakDetection ep_det;
epd_param.remove("enabled"); // artificially added above
epd_param.insert("", com_param);
ep_det.setParameters(epd_param);
std::vector<MassTrace> split_mtraces;
// note: this step will destroy any meta data annotation (e.g. FWHM_mz_avg)
ep_det.detectPeaks(m_traces, split_mtraces);
if (ep_det.getParameters().getValue("width_filtering") == "auto")
{
m_traces_final.clear();
ep_det.filterByPeakWidth(split_mtraces, m_traces_final);
OPENMS_LOG_INFO << "Notice: " << split_mtraces.size() - m_traces_final.size()
<< " of total " << split_mtraces.size()
<< " were dropped because of too low peak width." << std::endl;
}
else
{
swap(m_traces_final, split_mtraces);
}
}
//-------------------------------------------------------------
// writing consensus map output
//-------------------------------------------------------------
if (out_type == FileTypes::CONSENSUSXML)
{
ConsensusMap consensus_map;
if (getFlag_("test"))
{
// if test mode set, add file without path so we can compare it
consensus_map.setPrimaryMSRunPath({"file://" + File::basename(in)});
}
else
{
consensus_map.setPrimaryMSRunPath({in}, ms_peakmap);
}
for (Size i = 0; i < m_traces_final.size(); ++i)
{
if (m_traces_final[i].getSize() == 0)
{
continue;
}
ConsensusFeature fcons;
int k = 0;
for (const Peak2D& mss : m_traces_final[i])
{
FeatureHandle fhandle;
fhandle.setRT(mss.getRT());
fhandle.setMZ(mss.getMZ());
fhandle.setIntensity(mss.getIntensity());
fhandle.setUniqueId(++k);
fcons.insert(fhandle);
}
fcons.setMetaValue(3, m_traces_final[i].getLabel());
fcons.setCharge(0);
fcons.setWidth(m_traces_final[i].estimateFWHM(use_epd));
fcons.setQuality(1 - (1.0 / m_traces_final[i].getSize()));
fcons.setRT(m_traces_final[i].getCentroidRT());
fcons.setMZ(m_traces_final[i].getCentroidMZ());
fcons.setIntensity(m_traces_final[i].getIntensity(false));
// attach mz peak FWHM as meta array is available
if (mt_ext.hasFwhmMz())
{
fcons.setMetaValue(Constants::UserParam::FWHM_MZ_AVG, m_traces_final[i].fwhm_mz_avg);
}
// Similarly, check for ion mobility centroid presence. Annotate output with ion mobility
if (mt_ext.hasCentroidIm())
{
//fcons.setMetaValue("Ion Mobility Centroid", m_traces_final[i].getCentroidIM());
fcons.setMetaValue(Constants::UserParam::ION_MOBILITY_CENTROID, m_traces_final[i].getCentroidIM());
}
// if ion mobility peak FWHM is presnt, add to meta data
if (mt_ext.hasFwhmIm())
{
fcons.setMetaValue(Constants::UserParam::FWHM_IM_AVG, m_traces_final[i].fwhm_im_avg);
}
consensus_map.push_back(fcons);
}
consensus_map.applyMemberFunction(&UniqueIdInterface::setUniqueId);
addDataProcessing_(consensus_map, getProcessingInfo_(DataProcessing::QUANTITATION));
consensus_map.setUniqueId();
FileHandler().storeConsensusFeatures(out, consensus_map, {FileTypes::CONSENSUSXML});
}
else //(out_type == FileTypes::FEATUREXML)
{
//-----------------------------------------------------------
// convert mass traces to features
//-----------------------------------------------------------
std::vector<double> stats_sd;
FeatureMap ms_feat_map;
if (getFlag_("test"))
{
// if test mode set, add file without path so we can compare it
ms_feat_map.setPrimaryMSRunPath({"file://" + File::basename(in)});
}
else
{
ms_feat_map.setPrimaryMSRunPath({in}, ms_peakmap);
}
for (Size i = 0; i < m_traces_final.size(); ++i)
{
if (m_traces_final[i].getSize() == 0)
{
continue;
}
m_traces_final[i].updateMeanMZ();
m_traces_final[i].updateWeightedMZsd();
Feature f;
f.setMetaValue(3, m_traces_final[i].getLabel());
f.setCharge(0);
f.setMZ(m_traces_final[i].getCentroidMZ());
f.setIntensity(m_traces_final[i].getIntensity(false));
f.setRT(m_traces_final[i].getCentroidRT());
f.setWidth(m_traces_final[i].estimateFWHM(use_epd));
f.setOverallQuality(1 - (1.0 / m_traces_final[i].getSize()));
f.getConvexHulls().push_back(m_traces_final[i].getConvexhull());
double sd = m_traces_final[i].getCentroidSD();
f.setMetaValue(Constants::UserParam::SD, sd);
f.setMetaValue(Constants::UserParam::SD_ppm, sd / f.getMZ() * 1e6);
if (mt_ext.hasFwhmMz())
{
f.setMetaValue(Constants::UserParam::FWHM_MZ_AVG, m_traces_final[i].fwhm_mz_avg);
}
// Similarly, check for ion mobility centroid presence. Annotate output with ion mobility
if (mt_ext.hasCentroidIm())
{
f.setMetaValue(Constants::UserParam::ION_MOBILITY_CENTROID, m_traces_final[i].getCentroidIM());
}
if (mt_ext.hasFwhmIm())
{
f.setMetaValue(Constants::UserParam::FWHM_IM_AVG, m_traces_final[i].fwhm_im_avg);
}
stats_sd.push_back(m_traces_final[i].getCentroidSD());
ms_feat_map.push_back(f);
}
// print some stats about standard deviation of mass traces
if (!stats_sd.empty())
{
std::sort(stats_sd.begin(), stats_sd.end());
OPENMS_LOG_INFO << "Mass trace m/z s.d.\n"
<< " low quartile: " << stats_sd[stats_sd.size() * 1 / 4] << "\n"
<< " median: " << stats_sd[stats_sd.size() * 1 / 2] << "\n"
<< " upp quartile: " << stats_sd[stats_sd.size() * 3 / 4] << std::endl;
}
ms_feat_map.applyMemberFunction(&UniqueIdInterface::setUniqueId);
// ensure data is sorted
ms_feat_map.sortByPosition();
//-------------------------------------------------------------
// writing output
//-------------------------------------------------------------
// annotate output with data processing info TODO
addDataProcessing_(ms_feat_map, getProcessingInfo_(DataProcessing::QUANTITATION));
//ms_feat_map.setUniqueId();
FileHandler().storeFeatures(out, ms_feat_map, {FileTypes::FEATUREXML});
}
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPMassTraceExtractor tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/FuzzyDiff.cpp | .cpp | 7,795 | 219 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: $
// --------------------------------------------------------------------------
#include <OpenMS/config.h>
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/CONCEPT/FuzzyStringComparator.h>
#include <OpenMS/DATASTRUCTURES/ListUtils.h>
#include <OpenMS/SYSTEM/File.h>
#include <algorithm>
#include <fstream>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_FuzzyDiff FuzzyDiff
@brief Compares two files, tolerating numeric differences.
In the diff output, \"position\" refers to the characters in the string, whereas \"column\" is meant for the text editor.
Only one of 'ratio' or 'absdiff' has to be satisfied. Use \"absdiff\" to deal with cases like \"zero vs. epsilon\".
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_FuzzyDiff.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_FuzzyDiff.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPFuzzyDiff :
public TOPPBase
{
public:
TOPPFuzzyDiff() :
TOPPBase("FuzzyDiff", "Compares two files, tolerating numeric differences.")
{
}
protected:
void registerOptionsAndFlags_() override
{
addEmptyLine_();
registerInputFile_("in1", "<file>", "", "first input file", true, false);
registerInputFile_("in2", "<file>", "", "second input file", true, false);
addEmptyLine_();
registerDoubleOption_("ratio", "<double>", 1, R"(acceptable relative error. Only one of 'ratio' or 'absdiff' has to be satisfied. Use "absdiff" to deal with cases like "zero vs. epsilon".)", false, false);
setMinFloat_("ratio", 1);
registerDoubleOption_("absdiff", "<double>", 0, "acceptable absolute difference. Only one of 'ratio' or 'absdiff' has to be satisfied. ", false, false);
setMinFloat_("absdiff", 0);
addEmptyLine_();
registerStringList_("whitelist", "<string list>", ListUtils::create<String>("<?xml-stylesheet"), "Lines containing one of these strings are skipped", false, true);
registerStringList_("matched_whitelist", "<string list>", ListUtils::create<String>(""), "Lines where one file contains one string and the other file another string are skipped. Input is given as list of colon separated tuples, e.g. String1:String2 String3:String4", false, true);
registerIntOption_("verbose", "<int>", 2, "set verbose level:\n"
"0 = very quiet mode (absolutely no output)\n"
"1 = quiet mode (no output unless differences detected)\n"
"2 = default (include summary at end)\n"
"3 = continue after errors\n",
false, false
);
setMinInt_("verbose", 0);
setMaxInt_("verbose", 3);
registerIntOption_("tab_width", "<int>", 8, "tabulator width, used for calculation of column numbers", false, false);
setMinInt_("tab_width", 1);
registerIntOption_("first_column", "<int>", 1, "number of first column, used for calculation of column numbers", false, false);
setMinInt_("first_column", 0);
addEmptyLine_();
registerFlag_("sort", "sort the input files before comparison (useful for tabular files where row order may vary). The first line of each file is assumed to be a header and is not sorted.");
}
ExitCodes main_(int, const char **) override
{
//-------------------------------------------------------------
// parameter handling
//-------------------------------------------------------------
String in1 = getStringOption_("in1");
String in2 = getStringOption_("in2");
double acceptable_ratio = getDoubleOption_("ratio");
double acceptable_absdiff = getDoubleOption_("absdiff");
StringList whitelist = getStringList_("whitelist");
StringList raw_matched_whitelist = getStringList_("matched_whitelist");
int verbose_level = getIntOption_("verbose");
int tab_width = getIntOption_("tab_width");
int first_column = getIntOption_("first_column");
bool do_sort = getFlag_("sort");
// This is for debugging the parsing of whitelist_ from cmdline or ini file. Converting StringList back to String is intentional.
writeDebug_(String("whitelist: ") + String(whitelist) + " (size: " + whitelist.size() + ")", 1);
writeDebug_(String("matched_whitelist: ") + String(raw_matched_whitelist) + " (size: " + raw_matched_whitelist.size() + ")", 1);
OpenMS::FuzzyStringComparator fsc;
std::vector< std::pair<std::string, std::string> > parsed_matched_whitelist;
for (Size i = 0; i < raw_matched_whitelist.size(); i++)
{
// Split each entry at the colon to produce a pair of strings
std::vector<String> tmp;
raw_matched_whitelist[i].split(":", tmp);
if (tmp.size() != 2)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
String(raw_matched_whitelist[i]) + " does not have the format String1:String2");
}
parsed_matched_whitelist.emplace_back(tmp[0], tmp[1]);
}
fsc.setAcceptableRelative(acceptable_ratio);
fsc.setAcceptableAbsolute(acceptable_absdiff);
fsc.setWhitelist(whitelist);
fsc.setMatchedWhitelist(parsed_matched_whitelist);
fsc.setVerboseLevel(verbose_level);
fsc.setTabWidth(tab_width);
fsc.setFirstColumn(first_column);
// If sorting is requested, sort both files (keeping header) and write to temp files
String compare_in1 = in1;
String compare_in2 = in2;
String temp_file1, temp_file2;
if (do_sort)
{
auto sortFile = [](const String& input_file, String& output_file)
{
std::ifstream infile(input_file);
if (!infile)
{
throw Exception::FileNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, input_file);
}
std::string header;
std::vector<std::string> lines;
// Read header (first line)
if (std::getline(infile, header))
{
// Read remaining lines
std::string line;
while (std::getline(infile, line))
{
lines.push_back(line);
}
}
infile.close();
// Sort data lines
std::sort(lines.begin(), lines.end());
// Write to temp file
output_file = File::getTempDirectory() + "/" + File::basename(input_file) + ".sorted." + File::getUniqueName() + ".tmp";
std::ofstream outfile(output_file);
if (!outfile)
{
throw Exception::UnableToCreateFile(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, output_file);
}
outfile << header << "\n";
for (const auto& l : lines)
{
outfile << l << "\n";
}
outfile.close();
};
sortFile(in1, temp_file1);
sortFile(in2, temp_file2);
compare_in1 = temp_file1;
compare_in2 = temp_file2;
}
bool result = fsc.compareFiles(compare_in1, compare_in2);
// Clean up temp files
if (do_sort)
{
File::remove(temp_file1);
File::remove(temp_file2);
}
if (result)
{
return EXECUTION_OK;
}
else
{
// TODO think about better exit codes.
return PARSE_ERROR;
}
}
};
int main(int argc, const char ** argv)
{
TOPPFuzzyDiff tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/MapRTTransformer.cpp | .cpp | 9,352 | 214 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Marc Sturm, Clemens Groepl, Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/MapAlignerBase.h>
#include <OpenMS/FORMAT/FileHandler.h>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
// Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_MapRTTransformer MapRTTransformer
@brief Applies retention time transformations to maps.
<CENTER>
<table>
<tr>
<th ALIGN = "center"> potential predecessor tools </td>
<td VALIGN="middle" ROWSPAN=2> → MapRTTransformer →</td>
<th ALIGN = "center"> potential successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_MapAlignerIdentification @n (or another alignment algorithm) </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_FeatureLinkerUnlabeled or @n @ref TOPP_FeatureLinkerUnlabeledQT </td>
</tr>
</table>
</CENTER>
This tool can apply retention time transformations to different types of data (mzML, featureXML, consensusXML, and idXML files).
The transformations might have been generated by a previous invocation of one of the MapAligner tools (linked below).
However, the trafoXML file format is not very complicated, so it is relatively easy to write (or generate) your own files.
Each input file will give rise to one output file.
@see @ref TOPP_MapAlignerIdentification @ref TOPP_MapAlignerPoseClustering
With this tool it is also possible to invert transformations, or to fit a different model than originally specified to the retention time data in the transformation files. To fit a new model, choose a value other than "none" for the model type (see below).
Original retention time values can be kept as meta data. With the option @p store_original_rt, meta values with the name "original_RT" and the original retention time will be created for every major data element (spectrum, chromatogram, feature, consensus feature, peptide identification), unless they already exist - "original_RT" values from a previous invocation will not be overwritten.
Since %OpenMS 1.8, the extraction of data for the alignment has been separate from the modeling of RT transformations based on that data. It is now possible to use different models independently of the chosen algorithm. The different available models are:
- @ref OpenMS::TransformationModelLinear "linear": Linear model.
- @ref OpenMS::TransformationModelBSpline "b_spline": Smoothing spline (non-linear).
- @ref OpenMS::TransformationModelInterpolated "interpolated": Different types of interpolation.
The following parameters control the modeling of RT transformations (they can be set in the "model" section of the INI file):
@htmlinclude OpenMS_MapRTTransformerModel.parameters @n
@note As output options, either @p out or @p trafo_out has to be provided. They can be used together.
@note Currently mzIdentML (mzid) is not directly supported as an input/output format of this tool. Convert mzid files to/from idXML using @ref TOPP_IDFileConverter if necessary.
<B>The command line parameters of this tool are:</B> @n
@verbinclude TOPP_MapRTTransformer.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_MapRTTransformer.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPMapRTTransformer :
public TOPPBase
{
public:
TOPPMapRTTransformer() :
TOPPBase("MapRTTransformer", "Applies retention time transformations to maps.")
{
}
protected:
void registerOptionsAndFlags_() override
{
String file_formats = "mzML,featureXML,consensusXML,idXML";
// "in" is not required, in case we only want to invert a transformation:
registerInputFile_("in", "<file>", "", "Input file to transform (separated by blanks)", false);
setValidFormats_("in", ListUtils::create<String>(file_formats));
registerOutputFile_("out", "<file>", "", "Output file (same file type as 'in'). This option or 'trafo_out' has to be provided; they can be used together.", false);
setValidFormats_("out", ListUtils::create<String>(file_formats));
registerInputFile_("trafo_in", "<file>", "", "Transformation to apply");
setValidFormats_("trafo_in", ListUtils::create<String>("trafoXML"));
registerOutputFile_("trafo_out", "<file>", "", "Transformation output file. This option or 'out' has to be provided; they can be used together.", false);
setValidFormats_("trafo_out", ListUtils::create<String>("trafoXML"));
registerFlag_("invert", "Invert transformation (approximatively) before applying it");
registerFlag_("store_original_rt", "Store the original retention times (before transformation) as meta data in the output file");
addEmptyLine_();
registerSubsection_("model", "Options to control the modeling of retention time transformations from data");
}
Param getSubsectionDefaults_(const String& /* section */) const override
{
return MapAlignerBase::getModelDefaults("none");
}
template <class TMap>
void applyTransformation_(const TransformationDescription& trafo, TMap& map)
{
bool store_original_rt = getFlag_("store_original_rt");
MapAlignmentTransformer::transformRetentionTimes(map, trafo,
store_original_rt);
addDataProcessing_(map, getProcessingInfo_(DataProcessing::ALIGNMENT));
}
ExitCodes main_(int, const char**) override
{
//-------------------------------------------------------------
// parameter handling
//-------------------------------------------------------------
String in = getStringOption_("in");
String out = getStringOption_("out");
String trafo_in = getStringOption_("trafo_in");
String trafo_out = getStringOption_("trafo_out");
Param model_params = getParam_().copy("model:", true);
String model_type = model_params.getValue("type").toString();
model_params = model_params.copy(model_type + ":", true);
ProgressLogger progresslogger;
progresslogger.setLogType(log_type_);
//-------------------------------------------------------------
// check for valid input
//-------------------------------------------------------------
if (out.empty() && trafo_out.empty())
{
writeLogError_("Error: A data or a transformation output file has to be provided (parameters 'out'/'trafo_out')");
return ILLEGAL_PARAMETERS;
}
if (in.empty() != out.empty())
{
writeLogError_("Error: Data input and output parameters ('in'/'out') must be used together");
return ILLEGAL_PARAMETERS;
}
//-------------------------------------------------------------
// apply transformation
//-------------------------------------------------------------
TransformationDescription trafo;
FileHandler().loadTransformations(trafo_in, trafo, true, {FileTypes::TRANSFORMATIONXML});
if (model_type != "none")
{
trafo.fitModel(model_type, model_params);
}
if (getFlag_("invert"))
{
trafo.invert();
}
if (!trafo_out.empty())
{
FileHandler().storeTransformations(trafo_out, trafo, {FileTypes::TRANSFORMATIONXML});
}
if (!in.empty()) // load input
{
FileTypes::Type in_type = FileHandler::getType(in);
if (in_type == FileTypes::MZML)
{
PeakMap map;
FileHandler().loadExperiment(in, map, {FileTypes::MZML}, log_type_);
applyTransformation_( trafo, map);
FileHandler().storeExperiment(out, map, {FileTypes::MZML}, log_type_);
}
else if (in_type == FileTypes::FEATUREXML)
{
FeatureMap map;
FileHandler().loadFeatures(in, map, {FileTypes::FEATUREXML}, log_type_);
applyTransformation_( trafo, map);
FileHandler().storeFeatures(out, map, {FileTypes::FEATUREXML}, log_type_);
}
else if (in_type == FileTypes::CONSENSUSXML)
{
ConsensusMap map;
FileHandler().loadConsensusFeatures(in, map, {FileTypes::CONSENSUSXML}, log_type_);
applyTransformation_( trafo, map);
FileHandler().storeConsensusFeatures(out, map, {FileTypes::CONSENSUSXML}, log_type_);
}
else if (in_type == FileTypes::IDXML)
{
vector<ProteinIdentification> proteins;
PeptideIdentificationList peptides;
FileHandler().loadIdentifications(in, proteins, peptides, {FileTypes::IDXML}, log_type_);
bool store_original_rt = getFlag_("store_original_rt");
MapAlignmentTransformer::transformRetentionTimes(peptides, trafo,
store_original_rt);
// no "data processing" section in idXML
FileHandler().storeIdentifications(out, proteins, peptides, {FileTypes::IDXML}, log_type_);
}
}
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPMapRTTransformer tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/ClusterMassTracesByPrecursor.cpp | .cpp | 16,272 | 397 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/MasstraceCorrelator.h>
#include <OpenMS/FORMAT/FileHandler.h>
#ifdef TESTING
#define DEBUG_MASSTRACES
#endif
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_ClusterMassTracesByPrecursor ClusterMassTracesByPrecursor
@brief Identifies precursor mass traces and tries to correlate them with fragment ion mass traces in SWATH maps.
This algorithm will try to correlate the masstraces to find co-eluting traces and cluster them.
This program looks at mass traces in a precursor MS1 map and tries to
correlate them with features found in the corresponding MS2 map based on
their elution profile. It uses
- the mass traces from the MS1 in consensusXML format [note this is an unintended use of the consesusXML format to also store intensities]
- the mass traces from the MS2 (SWATH map)
It does a separate correlation analysis on the MS1 and the MS2 map,
both produces a set of pseudo spectra.
In a second (optional) step, the MS2 pseudo spectra are correlated with
the MS1 traces and the most likely precursor is assigned to the pseudo
spectrum.
It is based on the following papers:
ETISEQ -- an algorithm for automated elution time ion sequencing of concurrently fragmented peptides for mass spectrometry-based proteomics
BMC Bioinformatics 2009, 10:244 doi:10.1186/1471-2105-10-244 ; http://www.biomedcentral.com/1471-2105/10/244
they use FFT to correlate and then use lag of at least 1 scan and pearson correlation of 0.7 to assign precursors to product ions
If one fragment matches to multiple precursors, it is assigned to all of them. If it doesn't match any, it is assigned to all
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_ClusterMassTracesByPrecursor.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_ClusterMassTracesByPrecursor.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
#include <OpenMS/APPLICATIONS/TOPPBase.h>
using namespace std;
using namespace OpenMS;
class TOPPCorrelateMasstraces
: public TOPPBase,
public ProgressLogger
{
public:
TOPPCorrelateMasstraces()
: TOPPBase("ClusterMassTracesByPrecursor", "Correlate precursor masstraces with fragment ion masstraces in SWATH maps based on their elution profile.")
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in_ms1","<file>","","MS1 mass traces");
setValidFormats_("in_ms1",ListUtils::create<String>("consensusXML"));
registerInputFile_("in_swath","<file>","","MS2 / SWATH mass traces");
setValidFormats_("in_swath",ListUtils::create<String>("consensusXML"));
registerOutputFile_("out","<file>","","output file");
setValidFormats_("out",ListUtils::create<String>("mzML"));
// registerFlag_("ms1_centric","MS1 centric - find MS1 features first and then add MS2s (MSE like)");
registerFlag_("assign_unassigned_to_all","Assign unassigned MS2 fragments to all precursors (only for ms1_centrif)");
registerDoubleOption_("min_pearson_correlation", "<double>", 0.7, "Minimal pearson correlation score to match elution profiles to each other.", false); // try 0.3, 0.5 and 0.7
registerIntOption_("max_lag", "<number>", 1, "Maximal lag (e.g. by how many spectra the peak may be shifted at most). This parameter will depend on your chromatographic setup but a number between 1 and 3 is usually sensible.", false);
registerIntOption_("min_nr_ions", "<number>", 3, "Minimal number of ions to report a spectrum.", false);
registerDoubleOption_("max_rt_apex_difference", "<double>", 5.0, "Maximal difference of the apex in retention time (in seconds). This is a hard parameter, all profiles further away will not be considered at all.", false);
registerDoubleOption_("swath_lower", "<double>", 0.0, "Swath lower isolation window", false);
registerDoubleOption_("swath_upper", "<double>", 0.0, "Swath upper isolation window", false);
}
public:
ExitCodes main_(int , const char**) override
{
setLogType(log_type_);
String ms1 = getStringOption_("in_ms1");
String in_swath = getStringOption_("in_swath");
String out = getStringOption_("out");
// bool ms1_centric = getFlag_("ms1_centric");
double swath_lower = getDoubleOption_("swath_lower");
double swath_upper = getDoubleOption_("swath_upper");
// Load input:
// - MS1 feature map containing the MS1 mass traces
// - MS2 feature map containing the MS2 (SWATH) mass traces
ConsensusMap MS1_feature_map;
ConsensusMap MS2_feature_map;
FileHandler().loadConsensusFeatures(ms1, MS1_feature_map, {FileTypes::CONSENSUSXML}, log_type_);
FileHandler().loadConsensusFeatures(in_swath, MS2_feature_map, {FileTypes::CONSENSUSXML}, log_type_);
cout << "Loaded consensus maps" << endl;
#ifdef DEBUG_MASSTRACES
for (Size i=0; i<MS1_feature_map.size(); ++i)
{
ConsensusFeature f1 = MS1_feature_map[i];
cout << "MS1 mass trace " << i << " at " << f1.getMZ() << " and " <<
f1.getRT() << " +/- " << f1.getWidth() << " with " << f1.getIntensity() << endl;
}
#endif
MSExperiment pseudo_spectra_ms1centric;
MS1CentricClustering(MS1_feature_map, MS2_feature_map,
swath_lower, swath_upper, pseudo_spectra_ms1centric);
FileHandler().storeExperiment(out,pseudo_spectra_ms1centric, {FileTypes::MZML}, log_type_);
return EXECUTION_OK;
}
/** @brief Cluster fragments ions with their corresponding precursors
*
* This is based on the ETISEQ algorithm and works as follows:
*
* - Identify the precursor traces
* - For each precursor determine which are the most likely fragments and
* then assign those to the precursor
* - Assign unassigned fragments to scans
* - Create actual precursor spectra
*
* TODO: incorporate elements from DIAUmpire
* - allow ions to be assigned to multiple precursors
* - also generate mass traces from the unfragmented precursors
*
*/
void MS1CentricClustering(ConsensusMap& MS1_feature_map, ConsensusMap& MS2_feature_map,
double swath_lower, double swath_upper,
MSExperiment& pseudo_spectra_precursors1)
{
// -----------------------------------
// Parameters
// -----------------------------------
double min_pscore = getDoubleOption_("min_pearson_correlation");
int max_lag = getIntOption_("max_lag");
double rt_max_distance = getDoubleOption_("max_rt_apex_difference");
Size min_nr_ions = (Size)getIntOption_("min_nr_ions");
bool unassigned = getFlag_("assign_unassigned_to_all");
// to consider all signals within 2 seconds equal makes sense with
// 3.2 seconds between each recording => each swath will be within
// +/- 2.0 seconds of a full scan
double mindiff = 2.0;
OpenMS::MasstraceCorrelator mtcorr;
std::map< int, std::vector< std::vector<double> > > feature_attributes; // temporary array storing the attributes for all the features
std::vector<bool> ms2feature_used;
std::map< int, std::vector<int> > ms1_assignment_map; // map MS1 feature ids to MS2 feature ids
ms2feature_used.resize(MS2_feature_map.size());
// -----------------------------------
// Cache datastructures
// -----------------------------------
// We cache the RT and intensities of each feature
std::vector< MasstraceCorrelator::MasstracePointsType > feature_points_ms2;
std::vector< std::pair<double,double> > max_intensities_ms2;
std::vector< double > rt_cache_ms2;
mtcorr.createConsensusMapCache(MS2_feature_map, feature_points_ms2, max_intensities_ms2, rt_cache_ms2);
std::vector< MasstraceCorrelator::MasstracePointsType > feature_points_ms1;
std::vector< std::pair<double,double> > max_intensities_ms1;
std::vector< double > rt_cache_ms1;
mtcorr.createConsensusMapCache(MS1_feature_map, feature_points_ms1, max_intensities_ms1, rt_cache_ms1);
// cache the m/z of each MS1 feature
std::vector< double > mz_cache_ms1;
for (Size i = 0; i < MS1_feature_map.size(); ++i)
{
mz_cache_ms1.push_back(MS1_feature_map[i].getMZ());
}
double* rt_cache_ptr;
double current_rt;
// -----------------------------------
// Step 1 - assign fragment mass traces to precursors
//
// Go through all precursors and find suitable MS2 signals which could
// potentially belong to this precursor.
//
startProgress(0, MS1_feature_map.size(), "assigning precursor to fragment ions");
for (Size i=0; i<MS1_feature_map.size(); ++i)
{
setProgress(i);
if (mz_cache_ms1[i] < swath_lower || mz_cache_ms1[i] > swath_upper) continue;
ms1_assignment_map[i].clear();
// Identify a given precursor and get its RT (current_rt)
//
// Obtain a pointer to the beginning of the RT vector of all MS2 features
// (and decrement by one since in the loop we first increment the ptr)
current_rt = rt_cache_ms1[i];
rt_cache_ptr = &rt_cache_ms2[0];
--rt_cache_ptr;
for (Size j=0; j<MS2_feature_map.size(); ++j)
{
++rt_cache_ptr;
// First check whether this feature is within a suitable RT distance
// and that is not already used.
// Check whether the feature is already used
// TODO : this implies we can assign only one feature to one
// precursor, we might have to change that! See DIA Umpire!
if (fabs(current_rt - (*rt_cache_ptr) ) > rt_max_distance ) continue;
if (ms2feature_used[j]) continue;
#ifdef DEBUG_MASSTRACES
for (Size kk=0; kk<f1_points.size(); kk++)
{
cout << f1_points[kk].first << " f/s " << f1_points[kk].second << endl;
}
cout << " above prec, below frag " << endl;
for (Size kk=0; kk<f2_points.size(); kk++)
{
cout << f2_points[kk].first << " f/s " << f2_points[kk].second << endl;
}
#endif
// Score the MS1 mass trace against the MS2 mass trace
int lag; double lag_intensity; double pearson_score;
mtcorr.scoreHullpoints(feature_points_ms1[i], feature_points_ms2[j],
lag, lag_intensity, pearson_score, min_pscore, max_lag, mindiff);
if (pearson_score > min_pscore && lag >= -max_lag && lag <= max_lag)
{
#ifdef DEBUG_MASSTRACES
cout << "assign fragment to precursor! " << f1.getMZ() << " -> " << f2.getMZ() <<
" [scores " << lag << " " << pearson_score << "]" << endl;
#endif
ms2feature_used[j] = true;
ms1_assignment_map[i].push_back(j);
std::vector< double > feature_arr;
feature_arr.push_back(rt_cache_ms2[j]); // MS2 retention time
feature_arr.push_back(fabs(rt_cache_ms1[i] - rt_cache_ms2[j] ) ); // difference between MS1 and MS2 RT
feature_arr.push_back(lag); // lag
feature_arr.push_back(pearson_score); // pearson score
feature_arr.push_back(lag_intensity); // lag intensity
feature_attributes[i].push_back(feature_arr);
}
}
// only keep those assignments which have enough ions
if (ms1_assignment_map[i].size() <= min_nr_ions)
{
ms1_assignment_map[i].clear();
}
#ifdef DEBUG_MASSTRACES
if (ms1_assignment_map[i].size() > 1)
{
cout << i << " idx " << " " << f1 << " size " << MS1_feature_map[i].size() << endl;
cout << " to precursor " << i << " i assigned " << ms1_assignment_map[i].size() << " points" << endl;
}
cout << "MS1 mass trace " << i << " at " << f1.getMZ() << " and " << f1.getRT( ) << " with " << f1.getIntensity() << endl;
#endif
}
endProgress();
// Stats
Size cnt_ms2_used = 0;
Size cnt_ms1_used = 0;
for (Size i = 0; i < MS1_feature_map.size(); i++)
{
if (!ms1_assignment_map[i].empty()) cnt_ms1_used++;
}
for (Size i = 0; i < ms2feature_used.size(); i++) {
if (ms2feature_used[i]) cnt_ms2_used++;
}
std::cout <<"I have assigned " << cnt_ms2_used << " (out of " << MS2_feature_map.size() <<
") MS2 features to " << cnt_ms1_used << " (out of " << MS1_feature_map.size() << ") MS1 features " << std::endl;
// -----------------------------------
// Step 2 - assign the unused fragment ions (if requested)
//
// TODO :
// i) just assign them to all potentially matching spectra
// ii) assign a fragment ion only to a single precursor
int cnt = 0;
startProgress(0, MS2_feature_map.size(), "assigning the unused fragments ");
for (Size j=0; j<MS2_feature_map.size() && unassigned; ++j)
{
setProgress(j);
if (ms2feature_used[j]) continue;
cnt++;
// find suitable MS1 spectra to assign these
for (Size i=0; i<MS1_feature_map.size(); ++i)
{
if (mz_cache_ms1[i] < swath_lower || mz_cache_ms1[i] > swath_upper ) continue;
if (ms1_assignment_map[i].empty()) continue;
if (fabs(rt_cache_ms1[i] - rt_cache_ms2[j]) > rt_max_distance) continue;
// Assign to all matching MS1 precursors
ms1_assignment_map[i].push_back(j);
}
}
endProgress();
cout << "There were " << cnt << " (out of " << MS2_feature_map.size() << " ) unused fragment ions that were assigned to all spectra within RT range." << endl;
// -----------------------------------
// Step 3 - create spectra and assign precursor and fragments to spectra
cnt = 0;
startProgress(0, MS1_feature_map.size(), "create the spectra and assign the fragments ");
for (Size i=0; i<MS1_feature_map.size(); ++i)
{
setProgress(i);
if (mz_cache_ms1[i] < swath_lower || mz_cache_ms1[i] > swath_upper) continue;
MSSpectrum spectrum;
ConsensusFeature f2 = MS1_feature_map[i];
spectrum.setRT(f2.getRT());
spectrum.setMSLevel(2);
Precursor p;
p.setMZ(f2.getMZ());
std::vector<Precursor> preclist;
preclist.push_back(p);
spectrum.setPrecursors(preclist);
// fill meta data
spectrum.getFloatDataArrays().clear();
spectrum.getFloatDataArrays().resize(5);
spectrum.getFloatDataArrays()[0].setName("RT_apex");
spectrum.getFloatDataArrays()[1].setName("RT_diff");
spectrum.getFloatDataArrays()[2].setName("lag");
spectrum.getFloatDataArrays()[3].setName("pearson_score");
spectrum.getFloatDataArrays()[4].setName("lag_intensity");
int j = 0;
for (std::vector<int>::iterator it = ms1_assignment_map[i].begin(); it != ms1_assignment_map[i].end(); ++it)
{
ConsensusFeature f1 = MS2_feature_map[*it];
Peak1D peak;
peak.setMZ(f1.getMZ());
peak.setIntensity(f1.getIntensity());
spectrum.push_back(peak);
spectrum.getFloatDataArrays()[0].push_back(feature_attributes[i][j][0]);
spectrum.getFloatDataArrays()[1].push_back(feature_attributes[i][j][1]);
spectrum.getFloatDataArrays()[2].push_back(feature_attributes[i][j][2]);
spectrum.getFloatDataArrays()[3].push_back(feature_attributes[i][j][3]);
spectrum.getFloatDataArrays()[4].push_back(feature_attributes[i][j][4]);
j++;
}
if (spectrum.size() > min_nr_ions)
{
pseudo_spectra_precursors1.addSpectrum(spectrum);
cnt++;
#ifdef DEBUG_MASSTRACES
cout << "MS1 mass trace " << i << " was assigned " << ms1_assignment_map[i].size() << " " << f2.getRT() << " " << f2.getMZ() << " " << f2.getIntensity() << endl;
#endif
}
}
endProgress();
cout << "There were " << cnt << " precursor ions with more than " << min_nr_ions << " fragment ion assigned." << endl;
}
};
int main( int argc, const char** argv )
{
TOPPCorrelateMasstraces tool;
return tool.main(argc,argv);
}
///@endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/ExecutePipeline.cpp | .cpp | 6,354 | 184 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Johannes Veit $
// $Authors: Johannes Junker, Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/config.h>
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/VISUAL/TOPPASScene.h>
#include <OpenMS/SYSTEM/File.h>
#include <OpenMS/DATASTRUCTURES/ListUtils.h>
#include <OpenMS/VISUAL/TOPPASResources.h>
#include <QApplication>
#include <QtCore/QDir>
#include <iostream>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_ExecutePipeline ExecutePipeline
@brief Executes workflows created by TOPPAS.
This tool is the non-GUI, i.e. command line version for non-interactive execution of TOPPAS pipelines.
In order to really use this tool in batch-mode, you can provide a TOPPAS resource file (.trf) which specifies the
input files for the input nodes in your pipeline.
<B> *.trf files </B>
A TOPPAS resource file (<TT>*.trf</TT>) specifies the locations of input files for a pipeline.
It is an XML file following the normal TOPP INI file schema, i.e. it can be edited using the INIFileEditor or filled using a script (we do NOT provide one - sorry).
It can be exported from TOPPAS (<TT>File -> Save TOPPAS resource file</TT>). For two input nodes 1 and 2 with files (<TT>dataA.mzML</TT>, <TT>dataB.mzML</TT>) and (<TT>dataC.mzML</TT>) respectively it has the following format.
\code
<?xml version="1.0" encoding="ISO-8859-1"?>
<PARAMETERS version="1.3" xsi:noNamespaceSchemaLocation="http://open-ms.sourceforge.net/schemas/Param_1_3.xsd" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<NODE name="1" description="">
<ITEMLIST name="url_list" type="string" description="">
<LISTITEM value="file:///Users/jeff/dataA.mzML"/>
<LISTITEM value="file:///Users/jeff/dataB.mzML"/>
</ITEMLIST>
</NODE>
<NODE name="2" description="">
<ITEMLIST name="url_list" type="string" description="">
<LISTITEM value="file:///Users/jeff/dataC.mzML"/>
</ITEMLIST>
</NODE>
</PARAMETERS>
\endcode
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_ExecutePipeline.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_ExecutePipeline.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPExecutePipeline :
public TOPPBase
{
public:
TOPPExecutePipeline() :
TOPPBase("ExecutePipeline",
"Executes workflows created by TOPPAS.")
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "The workflow to be executed.");
setValidFormats_("in", ListUtils::create<String>("toppas"));
registerStringOption_("out_dir", "<directory>", "", "Directory for output files (default: user's home directory)", false);
registerStringOption_("resource_file", "<file>", "", "A TOPPAS resource file (*.trf) specifying the files this workflow is to be applied to", false);
registerIntOption_("num_jobs", "<integer>", 1, "Maximum number of jobs running in parallel", false, false);
setMinInt_("num_jobs", 1);
}
ExitCodes main_(int argc, const char ** argv) override
{
QString toppas_file = getStringOption_("in").toQString();
QString out_dir_name = getStringOption_("out_dir").toQString();
QString resource_file = getStringOption_("resource_file").toQString();
int num_jobs = getIntOption_("num_jobs");
QApplication a(argc, const_cast<char **>(argv), false);
//set & create temporary path -- make sure its a new subdirectory, as it will be deleted later
QString new_tmp_dir = File::getUniqueName().toQString();
QDir qd(File::getTempDirectory().toQString());
qd.mkdir(new_tmp_dir);
qd.cd(new_tmp_dir);
QString tmp_path = qd.absolutePath();
TOPPASScene ts(nullptr, tmp_path, false);
if (! a.connect(&ts, &TOPPASScene::entirePipelineFinished, &a, &QApplication::quit))
{
return UNKNOWN_ERROR;
}
if (! a.connect(&ts, &TOPPASScene::pipelineExecutionFailed, &ts, &TOPPASScene::quitWithError))
{
return UNKNOWN_ERROR;
}
ts.load(toppas_file);
ts.setAllowedThreads(num_jobs);
if (resource_file != "")
{
TOPPASResources resources;
resources.load(resource_file);
ts.loadResources(resources);
}
if (out_dir_name != "")
{
if (QDir::isRelativePath(out_dir_name))
{
out_dir_name = QDir::currentPath() + QDir::separator() + out_dir_name;
}
out_dir_name = QDir::cleanPath(out_dir_name);
if (File::exists(out_dir_name) && File::isDirectory(out_dir_name))
{
ts.setOutDir(out_dir_name);
}
else
{
cout << "The specified output directory does not exist." << endl;
return CANNOT_WRITE_OUTPUT_FILE;
}
}
else
{
QFileInfo fi(ts.getSaveFileName().toQString());
out_dir_name = QDir::cleanPath(ts.getOutDir() + QDir::separator() + String(fi.baseName()).toQString() + QDir::separator());
cout << "No output directory specified. Using the user's home directory (" << out_dir_name.toStdString() << ")" << endl;
ts.setOutDir(out_dir_name);
QDir qd;
if (!(qd.exists(out_dir_name) || qd.mkdir(out_dir_name)) || !File::writable(out_dir_name + "test_file_in_the_current_directory"))
{
cerr << "You do not have permission to write to " << out_dir_name.toStdString() << endl;
return CANNOT_WRITE_OUTPUT_FILE;
}
}
ts.runPipeline();
if (a.exec() == 0)
{
// delete temporary files
// safety measure: only delete if subdirectory of Temp path; we do not want to delete / or c:
if (String(tmp_path).substitute("\\", "/").hasPrefix(File::getTempDirectory().substitute("\\", "/") + "/"))
{
File::removeDirRecursively(tmp_path);
}
return EXECUTION_OK;
}
return UNKNOWN_ERROR;
}
};
int main(int argc, const char ** argv)
{
TOPPExecutePipeline tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/MapAlignerIdentification.cpp | .cpp | 22,463 | 555 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Marc Sturm, Clemens Groepl, Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MAPMATCHING/MapAlignmentAlgorithmIdentification.h>
#include <OpenMS/APPLICATIONS/MapAlignerBase.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/FORMAT/FeatureXMLFile.h>
#include <OpenMS/FORMAT/ConsensusXMLFile.h>
#include <OpenMS/METADATA/ExperimentalDesign.h>
#include <OpenMS/FORMAT/ExperimentalDesignFile.h>
#include <OpenMS/FORMAT/OMSFile.h>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
// Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_MapAlignerIdentification MapAlignerIdentification
@brief Corrects retention time distortions between maps, using information from peptides identified in different maps.
<CENTER>
<table>
<tr>
<th ALIGN = "center"> potential predecessor tools </td>
<td VALIGN="middle" ROWSPAN=4> → MapAlignerIdentification →</td>
<th ALIGN = "center"> potential successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_CometAdapter @n (or another search engine adapter) </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_IDMerger </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_IDFileConverter </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=2> @ref TOPP_FeatureLinkerUnlabeled or @n @ref TOPP_FeatureLinkerUnlabeledQT </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_IDMapper </td>
</tr>
</table>
</CENTER>
Reference:\n
Weisser <em>et al.</em>: <a href="https://doi.org/10.1021/pr300992u">An automated pipeline for high-throughput label-free quantitative proteomics</a> (J. Proteome Res., 2013, PMID: 23391308).
This tool provides an algorithm to align the retention time scales of multiple input files, correcting shifts and distortions between them. Retention time adjustment may be necessary to correct for chromatography differences e.g. before data from multiple LC-MS runs can be combined (feature grouping), or when one run should be annotated with peptide identifications obtained in a different run.
All map alignment tools (MapAligner...) collect retention time data from the input files and - by fitting a model to this data - compute transformations that map all runs to a common retention time scale. They can apply the transformations right away and return output files with aligned time scales (parameter @p out), and/or return descriptions of the transformations in trafoXML format (parameter @p trafo_out). Transformations stored as trafoXML can be applied to arbitrary files with the @ref TOPP_MapRTTransformer tool.
The map alignment tools differ in how they obtain retention time data for the modeling of transformations, and consequently what types of data they can be applied to. The alignment algorithm implemented here is based on peptide identifications, and thus applicable to files containing peptide IDs (idXML, annotated featureXML/consensusXML). It finds peptide sequences that different input files have in common and uses them as points of correspondence between the inputs. For more details and algorithm-specific parameters (set in the INI file) see "Detailed Description" in the @ref OpenMS::MapAlignmentAlgorithmIdentification "algorithm documentation".
@see @ref TOPP_MapAlignerPoseClustering @ref TOPP_MapRTTransformer
Note that alignment is based on the sequence including modifications, thus an exact match is required. I.e., a peptide with oxidised methionine will not be matched to its unmodified version. This behavior is generally desired since (some) modifications can cause retention time shifts.
Since %OpenMS 1.8, the extraction of data for the alignment has been separate from the modeling of RT transformations based on that data. It is now possible to use different models independently of the chosen algorithm. This algorithm has been tested mostly with the "b_spline" model. The different available models are:
- @ref OpenMS::TransformationModelLinear "linear": Linear model.
- @ref OpenMS::TransformationModelBSpline "b_spline": Smoothing spline (non-linear).
- @ref OpenMS::TransformationModelLowess "lowess": Local regression (non-linear).
- @ref OpenMS::TransformationModelInterpolated "interpolated": Different types of interpolation.
The following parameters control the modeling of RT transformations (they can be set in the "model" section of the INI file):
@htmlinclude OpenMS_MapAlignerIdentificationModel.parameters @n
@note Currently mzIdentML (mzid) is not directly supported as an input/output format of this tool. Convert mzid files to/from idXML using @ref TOPP_IDFileConverter if necessary.
<B>The command line parameters of this tool are:</B> @n
@verbinclude TOPP_MapAlignerIdentification.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_MapAlignerIdentification.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPMapAlignerIdentification :
public TOPPMapAlignerBase
{
public:
TOPPMapAlignerIdentification() :
TOPPMapAlignerBase("MapAlignerIdentification", "Corrects retention time distortions between maps based on common peptide identifications.")
{
}
private:
template <typename MapType, typename FileType>
void loadInitialMaps_(vector<MapType>& maps, StringList& ins,
FileType& input_file)
{
// custom progress logger for this task:
ProgressLogger progresslogger;
progresslogger.setLogType(TOPPMapAlignerBase::log_type_);
progresslogger.startProgress(0, ins.size(), "loading input files");
for (Size i = 0; i < ins.size(); ++i)
{
progresslogger.setProgress(i);
input_file.load(ins[i], maps[i]);
}
progresslogger.endProgress();
}
// helper function to avoid code duplication between consensusXML and
// featureXML storage operations:
template <typename MapType, typename FileType>
void storeTransformedMaps_(vector<MapType>& maps, StringList& outs,
FileType& output_file)
{
// custom progress logger for this task:
ProgressLogger progresslogger;
progresslogger.setLogType(log_type_);
progresslogger.startProgress(0, outs.size(), "writing output files");
for (Size i = 0; i < outs.size(); ++i)
{
progresslogger.setProgress(i);
// annotate output with data processing info:
addDataProcessing_(maps[i],
getProcessingInfo_(DataProcessing::ALIGNMENT));
output_file.store(outs[i], maps[i]);
}
progresslogger.endProgress();
}
template <typename DataType>
void performAlignment_(MapAlignmentAlgorithmIdentification& algorithm,
vector<DataType>& data,
vector<TransformationDescription>& transformations,
Int reference_index)
{
// find model parameters:
Param model_params = getParam_().copy("model:", true);
String model_type = model_params.getValue("type").toString();
try
{
algorithm.align(data, transformations, reference_index);
}
catch (Exception::MissingInformation& err)
{
if (getFlag_("force"))
{
OPENMS_LOG_ERROR
<< "Error: alignment failed. Details:\n" << err.what()
<< "\nSince 'force' is set, processing will continue using 'identity' transformations."
<< endl;
model_type = "identity";
transformations.resize(data.size());
}
else throw;
}
if (model_type != "none")
{
model_params = model_params.copy(model_type + ":", true);
for (TransformationDescription& tra : transformations)
{
tra.fitModel(model_type, model_params);
}
}
}
template <typename DataType>
void applyTransformations_(vector<DataType>& data,
const vector<TransformationDescription>& transformations)
{
bool store_original_rt = getFlag_("store_original_rt");
for (Size i = 0; i < data.size(); ++i)
{
MapAlignmentTransformer::transformRetentionTimes(
data[i], transformations[i], store_original_rt);
}
}
void storeTransformationDescriptions_(const vector<TransformationDescription>&
transformations, StringList& trafos)
{
OPENMS_PRECONDITION(transformations.size() == trafos.size(), "Transformation descriptions and list of transformation files need to be equal.");
// custom progress logger for this task:
ProgressLogger progresslogger;
progresslogger.setLogType(log_type_);
progresslogger.startProgress(0, trafos.size(),
"writing transformation files");
OPENMS_LOG_INFO << "Writing " << transformations.size() << " transformations " << " to " << trafos.size() << " files.";
for (Size i = 0; i < transformations.size(); ++i)
{
FileHandler().storeTransformations(trafos[i], transformations[i], {FileTypes::TRANSFORMATIONXML});
}
progresslogger.endProgress();
}
Int getReference_(MapAlignmentAlgorithmIdentification& algorithm)
{
// consistency of reference parameters has already been checked via
// "TOPPMapAlignerBase::checkParameters_"
Size reference_index = getIntOption_("reference:index");
String reference_file = getStringOption_("reference:file");
if (!reference_file.empty())
{
FileTypes::Type filetype = FileHandler::getType(reference_file);
switch (filetype)
{
case FileTypes::FEATUREXML:
{
FeatureMap features;
FileHandler().loadFeatures(reference_file, features, {}, log_type_);
algorithm.setReference(features);
}
break;
case FileTypes::CONSENSUSXML:
{
ConsensusMap consensus;
FileHandler().loadConsensusFeatures(reference_file, consensus, {}, log_type_);
algorithm.setReference(consensus);
}
break;
case FileTypes::IDXML:
{
vector<ProteinIdentification> proteins;
PeptideIdentificationList peptides;
FileHandler().loadIdentifications(reference_file, proteins, peptides, {}, log_type_);
algorithm.setReference(peptides);
}
break;
case FileTypes::OMS:
{
IdentificationData id_data;
OMSFile().load(reference_file, id_data);
algorithm.setReference(id_data);
}
break;
default: // to avoid compiler warnings
throw Exception::WrongParameterType(__FILE__, __LINE__,
OPENMS_PRETTY_FUNCTION,
"reference:file");
}
}
return Int(reference_index) - 1; // internally, we count from zero
}
void registerOptionsAndFlags_() override
{
String formats = "featureXML,consensusXML,idXML,oms";
TOPPMapAlignerBase::registerOptionsAndFlagsMapAligners_(formats, REF_FLEXIBLE);
// TODO: potentially move to base class so every aligner has to support design
registerInputFile_("design", "<file>", "", "Input file containing the experimental design", false);
setValidFormats_("design", ListUtils::create<String>("tsv"));
registerFlag_("store_original_rt", "Store the original retention times (before transformation) as meta data in the output?");
registerSubsection_("algorithm", "Algorithm parameters section");
registerSubsection_("model", "Options to control the modeling of retention time transformations from data");
}
Param getSubsectionDefaults_(const String& section) const override
{
if (section == "algorithm")
{
MapAlignmentAlgorithmIdentification algo;
return algo.getParameters();
}
if (section == "model")
{
return MapAlignerBase::getModelDefaults("b_spline");
}
return Param(); // this shouldn't happen
}
ExitCodes main_(int, const char**) override
{
ExitCodes return_code = TOPPMapAlignerBase::checkParameters_();
if (return_code != EXECUTION_OK) return return_code;
// set up alignment algorithm:
MapAlignmentAlgorithmIdentification algorithm;
Param algo_params = getParam_().copy("algorithm:", true);
algorithm.setParameters(algo_params);
algorithm.setLogType(log_type_);
Int reference_index = getReference_(algorithm);
// handle in- and output files:
StringList input_files = getStringList_("in");
if (input_files.size() == 1)
{
OPENMS_LOG_WARN << "Only one file provided as input to MapAlignerIdentification." << std::endl;
}
StringList output_files = getStringList_("out");
StringList trafo_files = getStringList_("trafo_out");
FileTypes::Type in_type = FileHandler::getType(input_files[0]);
vector<TransformationDescription> transformations;
switch (in_type)
{
//-------------------------------------------------------------
// perform feature alignment
//-------------------------------------------------------------
case FileTypes::FEATUREXML:
{
vector<FeatureMap> feature_maps(input_files.size());
FeatureXMLFile fxml_file;
if (output_files.empty())
{
// store only transformation descriptions, not transformed data =>
// we can load only minimum required information:
fxml_file.getOptions().setLoadConvexHull(false);
fxml_file.getOptions().setLoadSubordinates(false);
}
loadInitialMaps_(feature_maps, input_files, fxml_file);
//-------------------------------------------------------------
// extract (optional) fraction identifiers and associate with featureXMLs
//-------------------------------------------------------------
String design_file = getStringOption_("design");
// determine map of fractions to runs
map<unsigned, vector<String>> frac2files;
// TODO: check if can be put in common helper function
if (!design_file.empty())
{
// parse design file and determine fractions
ExperimentalDesign ed = ExperimentalDesignFile::load(design_file,
false);
// determine if design defines more than one fraction (note: fraction and run IDs are one-based)
frac2files = ed.getFractionToMSFilesMapping();
// check if all fractions have the same number of MS runs associated
if (!ed.sameNrOfMSFilesPerFraction())
{
writeLogError_("Error: Number of runs must match for every fraction!");
return ILLEGAL_PARAMETERS;
}
}
else // no design file given
{
for (Size i = 0; i != input_files.size(); ++i)
{
// TODO: read proper MS file name from meta data
frac2files[1].push_back("file" + String(i)); // associate each file with fraction 1
}
}
// TODO: check and handle if featureXML order differs from run order
// perform fraction-based alignment
if (frac2files.size() == 1) // group one fraction
{
performAlignment_(algorithm, feature_maps, transformations,
reference_index);
applyTransformations_(feature_maps, transformations);
}
else // group multiple fractions
{
for (Size i = 1; i <= frac2files.size(); ++i)
{
vector<FeatureMap> fraction_maps;
vector<TransformationDescription> fraction_transformations;
size_t n_fractions = frac2files.size();
// TODO FRACTIONS: determine map index based on annotated MS files (getPrimaryMSRuns())
for (size_t feature_map_index = 0; feature_map_index != n_fractions;
++feature_map_index)
{
fraction_maps.push_back(feature_maps[feature_map_index]);
}
performAlignment_(algorithm, fraction_maps, fraction_transformations,
reference_index);
applyTransformations_(fraction_maps, fraction_transformations);
// copy into transformations and feature maps
transformations.insert(transformations.end(),
fraction_transformations.begin(),
fraction_transformations.end());
Size f = 0;
for (size_t feature_map_index = 0; feature_map_index != n_fractions;
++feature_map_index, ++f)
{
feature_maps[feature_map_index].swap(fraction_maps[f]);
}
}
}
if (!output_files.empty())
{
storeTransformedMaps_(feature_maps, output_files, fxml_file);
}
}
break;
//-------------------------------------------------------------
// perform consensus alignment
//-------------------------------------------------------------
case FileTypes::CONSENSUSXML:
{
std::vector<ConsensusMap> consensus_maps(input_files.size());
ConsensusXMLFile cxml_file;
loadInitialMaps_(consensus_maps, input_files, cxml_file);
performAlignment_(algorithm, consensus_maps, transformations,
reference_index);
applyTransformations_(consensus_maps, transformations);
if (!output_files.empty())
{
storeTransformedMaps_(consensus_maps, output_files, cxml_file);
}
}
break;
//-------------------------------------------------------------
// perform peptide alignment
//-------------------------------------------------------------
case FileTypes::IDXML:
{
vector<vector<ProteinIdentification>> protein_ids(input_files.size());
vector<PeptideIdentificationList> peptide_ids(input_files.size());
FileHandler idxml_file;
ProgressLogger progresslogger;
progresslogger.setLogType(log_type_);
progresslogger.startProgress(0, input_files.size(),
"loading input files");
for (Size i = 0; i < input_files.size(); ++i)
{
progresslogger.setProgress(i);
idxml_file.loadIdentifications(input_files[i], protein_ids[i], peptide_ids[i], {FileTypes::IDXML}, log_type_);
}
progresslogger.endProgress();
performAlignment_(algorithm, peptide_ids, transformations,
reference_index);
applyTransformations_(peptide_ids, transformations);
if (!output_files.empty())
{
progresslogger.startProgress(0, output_files.size(),
"writing output files");
for (Size i = 0; i < output_files.size(); ++i)
{
progresslogger.setProgress(i);
idxml_file.storeIdentifications(output_files[i], protein_ids[i], peptide_ids[i], {FileTypes::IDXML}, log_type_);
}
progresslogger.endProgress();
}
}
break;
//-------------------------------------------------------------
// perform spectrum match alignment
//-------------------------------------------------------------
case FileTypes::OMS:
{
vector<IdentificationData> id_data(input_files.size());
OMSFile oms_file;
ProgressLogger progresslogger;
progresslogger.setLogType(log_type_);
progresslogger.startProgress(0, input_files.size(),
"loading input files");
for (Size i = 0; i < input_files.size(); ++i)
{
progresslogger.setProgress(i);
oms_file.load(input_files[i], id_data[i]);
}
progresslogger.endProgress();
// add data processing information:
DateTime processing_time = DateTime::now(); // use same for each file
IdentificationData::ProcessingSoftware sw(toolName_(), version_);
if (test_mode_) sw.setVersion("test");
String reference_file = getStringOption_("reference:file");
for (IdentificationData& id : id_data)
{
IdentificationData::ProcessingSoftwareRef sw_ref =
id.registerProcessingSoftware(sw);
IdentificationData::ProcessingStep step(sw_ref);
for (const String& input_file : input_files)
{
IdentificationData::InputFileRef ref =
id.registerInputFile(IdentificationData::InputFile(input_file));
step.input_file_refs.push_back(ref);
}
if (!reference_file.empty())
{
IdentificationData::InputFileRef ref =
id.registerInputFile(IdentificationData::InputFile(reference_file));
step.input_file_refs.push_back(ref);
}
step.date_time = processing_time;
step.actions.insert(DataProcessing::ALIGNMENT);
id.registerProcessingStep(step);
}
performAlignment_(algorithm, id_data, transformations, reference_index);
applyTransformations_(id_data, transformations);
if (!output_files.empty())
{
progresslogger.startProgress(0, output_files.size(),
"writing output files");
for (Size i = 0; i < output_files.size(); ++i)
{
progresslogger.setProgress(i);
oms_file.store(output_files[i], id_data[i]);
}
progresslogger.endProgress();
}
}
break;
default: // to avoid compiler warnings
throw Exception::WrongParameterType(__FILE__, __LINE__,
OPENMS_PRETTY_FUNCTION, "in");
}
if (!trafo_files.empty())
{
storeTransformationDescriptions_(transformations, trafo_files);
}
// Transform optional spectra files
StringList in_spectra_files = getStringList_("in_spectra_files");
StringList out_spectra_files = getStringList_("out_spectra_files");
bool store_original_rt = getFlag_("store_original_rt");
transformSpectraFiles_(in_spectra_files, out_spectra_files, transformations, store_original_rt);
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPMapAlignerIdentification tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/OpenSwathFeatureXMLToTSV.cpp | .cpp | 16,749 | 501 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/ANALYSIS/TARGETED/TargetedExperiment.h>
#include <OpenMS/CONCEPT/Types.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/KERNEL/FeatureMap.h>
#include <OpenMS/KERNEL/Feature.h>
#include <fstream>
#include <clocale>
using namespace OpenMS;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_OpenSwathFeatureXMLToTSV OpenSwathFeatureXMLToTSV
@brief Converts a featureXML to a mProphet tsv
<CENTER>
<table>
<tr>
<th ALIGN = "center"> potential predecessor tools </td>
<td VALIGN="middle" ROWSPAN=3> → OpenSwathFeatureXMLToTSV →</td>
<th ALIGN = "center"> potential successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_OpenSwathAnalyzer </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=2> Downstream data analysis </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_OpenSwathConfidenceScoring </td>
</tr>
</table>
</CENTER>
Creates a tsv that is compatible as input to mProphet.
Furthermore it creates the columns "decoy" and
"transition_group_id" which are required by mProphet.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_OpenSwathFeatureXMLToTSV.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_OpenSwathFeatureXMLToTSV.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
std::map<String, std::vector<const ReactionMonitoringTransition *> > peptide_transition_map;
void write_out_header(std::ostream &os, FeatureMap &feature_map, /* String main_var_name, */ std::vector<String> &meta_value_names, bool short_format)
{
std::vector<String> meta_value_names_tmp;
os << "transition_group_id" << "\t"
<< "run_id" << "\t"
<< "filename" << "\t"
<< "RT" << "\t"
<< "id" << "\t"
<< "Sequence" << "\t"
<< "FullPeptideName" << "\t"
<< "Charge" << "\t"
<< "m/z" << "\t"
<< "Intensity" << "\t"
<< "ProteinName" << "\t"
<< "decoy" << "\t";
// get all meta values from the first feature
feature_map[0].getKeys(meta_value_names_tmp);
for (Size i = 0; i < meta_value_names_tmp.size(); i++)
{
if (meta_value_names_tmp[i] != "PeptideRef" && meta_value_names_tmp[i] != "PrecursorMZ")
{
meta_value_names.push_back(meta_value_names_tmp[i]);
}
}
std::sort(meta_value_names.begin(), meta_value_names.end());
for (Size i = 0; i < meta_value_names.size(); i++)
{
os << meta_value_names[i] << "\t";
}
if (!short_format)
{
os << "Peak_Area" << "\t";
os << "Peak_Apex" << "\t";
os << "Fragment_Annotation" << "\t";
os << "ProductMZ";
}
else
{
os << "aggr_Peak_Area" << "\t";
os << "aggr_Peak_Apex" << "\t";
os << "aggr_Fragment_Annotation";
}
os << std::endl;
}
void write_out_body_(std::ostream &os, Feature *feature_it, TargetedExperiment &transition_exp,
std::vector<String> &meta_value_names, int run_id, bool short_format, String identifier, String filename)
{
String peptide_ref = feature_it->getMetaValue("PeptideRef");
String precursor_mz = feature_it->getMetaValue("PrecursorMZ");
String sequence;
String full_peptide_name = "NA";
String protein_name = "NA";
String decoy = "NA";
String charge = "NA";
if (!transition_exp.hasPeptide(peptide_ref))
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Did not find the peptide " + peptide_ref + " in the targeted experiment.");
}
const OpenMS::TargetedExperiment::Peptide &pep = transition_exp.getPeptideByRef(peptide_ref);
sequence = pep.sequence;
if (!pep.protein_refs.empty())
{
// For now just take the first one, assuming the protein name is the id
protein_name = pep.protein_refs[0];
}
// handle charge
if (pep.hasCVTerm("MS:1000041"))
{
charge = pep.getCVTerms().at("MS:1000041")[0].getValue().toString();
}
else if (pep.hasCharge())
{
charge = (String)pep.getChargeState();
}
if (charge == "NA" && !full_peptide_name.empty())
{
// deal with FullPeptideNames like PEPTIDE/2
std::vector<String> substrings;
full_peptide_name.split("/", substrings);
if (substrings.size() == 2)
{
charge = substrings[1];
}
}
// handle decoy tag
if (peptide_transition_map.find(peptide_ref) != peptide_transition_map.end() && !peptide_transition_map[peptide_ref].empty())
{
const ReactionMonitoringTransition *transition = peptide_transition_map[peptide_ref][0];
#if 1
const auto& terms = transition->getCVTerms();
if (terms.find("decoy") != terms.end())
{
decoy = transition->getCVTerms().at("decoy")[0].getValue().toString();
}
else if (terms.find("MS:1002007") != terms.end() ) // target SRM transition
{
decoy = "0";
}
else if (terms.find("MS:1002008") != terms.end() ) // decoy SRM transition
{
decoy = "1";
}
else if (terms.find("MS:1002007") != terms.end() && terms.find("MS:1002008") != terms.end() ) // both == illegal
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Peptide " + peptide_ref + " cannot be target and decoy at the same time.");
}
else
#endif
if (transition->getDecoyTransitionType() == ReactionMonitoringTransition::UNKNOWN)
{
// assume its target
decoy = "0";
}
else if (transition->getDecoyTransitionType() == ReactionMonitoringTransition::TARGET)
{
decoy = "0";
}
else if (transition->getDecoyTransitionType() == ReactionMonitoringTransition::DECOY)
{
decoy = "1";
}
else
{
// assume its target
decoy = "0";
}
}
else
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Did not find the peptide " + peptide_ref + " in the targeted experiment.");
}
if (pep.metaValueExists("full_peptide_name"))
{
full_peptide_name = pep.getMetaValue("full_peptide_name");
}
// adjust peptide ref with current file identifier
peptide_ref += "_" + identifier;
String line = "";
// Start writing out
line += peptide_ref + "\t" + (String)run_id + "\t" + (String)filename + "\t" + feature_it->getRT() + "\tf_" + feature_it->getUniqueId() + "\t";
line += sequence + "\t" + full_peptide_name + "\t";
line += (String)charge + "\t";
line += precursor_mz + "\t";
line += (String)feature_it->getIntensity() + "\t";
line += protein_name + "\t";
line += decoy + "\t";
String meta_values = "";
for (Size i = 0; i < meta_value_names.size(); i++)
{
meta_values += feature_it->getMetaValue(meta_value_names[i]).toString() + "\t";
}
// Write out the individual transition
if (short_format)
{
String aggr_Peak_Area = "";
String aggr_Peak_Apex = "";
String aggr_Fragment_Annotation = "";
for (Feature& sub : feature_it->getSubordinates())
{
aggr_Peak_Area += String(sub.getIntensity()) + ";";
if (sub.metaValueExists("peak_apex_int"))
{
aggr_Peak_Apex += String((double)sub.getMetaValue("peak_apex_int")) + ";";
}
else
{
aggr_Peak_Apex += "NA;";
}
aggr_Fragment_Annotation += (String)sub.getMetaValue("native_id") + ";";
}
// remove the last semicolon
if (!feature_it->getSubordinates().empty())
{
aggr_Peak_Area = aggr_Peak_Area.substr(0, aggr_Peak_Area.size() - 1);
aggr_Peak_Apex = aggr_Peak_Apex.substr(0, aggr_Peak_Apex.size() - 1);
aggr_Fragment_Annotation = aggr_Fragment_Annotation.substr(0, aggr_Fragment_Annotation.size() - 1);
}
os << line << meta_values << aggr_Peak_Area << "\t" << aggr_Peak_Apex << "\t" << aggr_Fragment_Annotation << std::endl;
}
else
{
for (Feature& sub : feature_it->getSubordinates())
{
os.precision(writtenDigits(double()));
String apex = "NA";
if (sub.metaValueExists("peak_apex_int"))
{
apex = String((double)sub.getMetaValue("peak_apex_int"));
}
os << line << meta_values << String(sub.getIntensity()) << "\t" << apex << "\t" << (String)sub.getMetaValue("native_id") << "\t" << String(sub.getMZ()) << std::endl;
}
}
}
Feature *find_best_feature(const std::vector<Feature *> &features, String score_)
{
double best_score = -std::numeric_limits<double>::max();
Feature *best_feature = nullptr;
for (Size i = 0; i < features.size(); i++)
{
double score = features[i]->getMetaValue(score_).toString().toDouble();
if (score > best_score)
{
best_feature = features[i];
best_score = score;
}
}
return best_feature;
}
void write_out_body_best_score(std::ostream &os, FeatureMap &feature_map,
TargetedExperiment &transition_exp, std::vector<String> &meta_value_names,
int run_id, bool short_format, String best_score, String filename)
{
// for each peptide reference search for the best feature
typedef std::map<String, std::vector<Feature *> > PeptideFeatureMapType;
PeptideFeatureMapType peptide_feature_map;
for (Feature& feature : feature_map)
{
String peptide_ref = feature.getMetaValue("PeptideRef");
peptide_feature_map[peptide_ref].push_back(&feature);
}
for (PeptideFeatureMapType::iterator peptide_it = peptide_feature_map.begin(); peptide_it != peptide_feature_map.end(); ++peptide_it)
{
if (peptide_it->second.size() > 1)
{
//std::cout << "Warning " << peptide_it->first << " has " << peptide_it->second.size() << " features!" << std::endl;
// for (Size j =0; j<peptide_it->second.size(); j++)
// std::cout << *peptide_it->second.at(j) << std::endl;
}
}
for (PeptideFeatureMapType::iterator peptide_it = peptide_feature_map.begin(); peptide_it != peptide_feature_map.end(); ++peptide_it)
{
Feature *bestfeature = find_best_feature(peptide_it->second, best_score);
if (bestfeature == nullptr)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Did not find best feature for peptide " + peptide_it->first);
}
write_out_body_(os, bestfeature, transition_exp, meta_value_names, run_id, short_format, feature_map.getIdentifier(), filename);
}
}
class TOPPOpenSwathFeatureXMLToTSV
: public TOPPBase, public ProgressLogger
{
public:
TOPPOpenSwathFeatureXMLToTSV() :
TOPPBase("OpenSwathFeatureXMLToTSV", "Converts a featureXML to a mProphet tsv.", true)
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFileList_("in", "<files>", StringList(), "Input files separated by blank");
setValidFormats_("in", ListUtils::create<String>("featureXML"));
registerInputFile_("tr", "<file>", "", "TraML transition file");
setValidFormats_("tr", ListUtils::create<String>("traML"));
//registerStringOption_("main_var_name","<varname>","xx_lda_prelim_score","Name of the main variable", false);
registerOutputFile_("out", "<file>", "", "tsv output file (mProphet compatible)");
setValidFormats_("out", ListUtils::create<String>("csv"));
registerFlag_("short_format", "Whether to write short (one peptide per line) or long format (one transition per line).");
registerStringOption_("best_scoring_peptide", "<varname>", "", "If only the best scoring feature per peptide should be printed, give the variable name", false);
}
void write_out_body(std::ostream &os, FeatureMap &feature_map,
TargetedExperiment &transition_exp, std::vector<String> &meta_value_names,
int run_id, bool short_format, String filename)
{
Size progress = 0;
startProgress(0, feature_map.size(), "writing out features");
for (Feature& feature : feature_map)
{
setProgress(progress++);
write_out_body_(os, &feature, transition_exp, meta_value_names, run_id, short_format, feature_map.getIdentifier(), filename);
}
endProgress();
}
ExitCodes main_(int, const char **) override
{
StringList file_list = getStringList_("in");
String tr_file = getStringOption_("tr");
String out = getStringOption_("out");
//String main_var_name = getStringOption_("main_var_name");
String best_scoring = getStringOption_("best_scoring_peptide");
bool short_format = getFlag_("short_format");
setLogType(log_type_);
TargetedExperiment transition_exp;
FileHandler().loadTransitions(tr_file, transition_exp, {FileTypes::TRAML});
startProgress(0, transition_exp.getTransitions().size(), "indexing transitions peaks");
for (Size i = 0; i < transition_exp.getTransitions().size(); i++)
{
setProgress(i);
const ReactionMonitoringTransition *transition = &transition_exp.getTransitions()[i];
{
peptide_transition_map[transition->getPeptideRef()].push_back(&transition_exp.getTransitions()[i]);
}
}
endProgress();
std::ofstream os(out.c_str());
//set high precision for writing of floating point numbers
os.precision(writtenDigits(double()));
if (!os)
{
throw Exception::UnableToCreateFile(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, out);
}
// write the csv header (we need to know which parameters are in the map to do that)
if (file_list.empty())
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "No input files given ");
}
FeatureMap feature_map;
// feature_file.load() resets the locale to the user's (Don't know where, maybe QT or Xerces)
// Somehow even our variable OpenMS::Internal::OpenMS_locale is overwritten
// Create copy here and reset it later. TODO this needs to be fixed more thouroughly.
String locale_before = String(OpenMS::Internal::OpenMS_locale);
FileHandler().loadFeatures(file_list[0], feature_map, {FileTypes::FEATUREXML}, log_type_);
setlocale(LC_ALL, locale_before.c_str());
if (feature_map.getIdentifier().empty())
{
feature_map.setIdentifier("run0");
}
std::vector<String> meta_value_names;
if (feature_map.empty() && file_list.size() > 1)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Feature map " + file_list[0] + " is empty.");
}
else if (feature_map.empty())
{
std::cout << "Warning: Feature map " + file_list[0] + " is empty." << std::endl;
return EXECUTION_OK;
}
write_out_header(os, feature_map, /* main_var_name, */ meta_value_names, short_format);
String filename;
filename = file_list[0];
if (getFlag_("test"))
{
filename = "testfile.file";
}
// write out the one we just loaded
if (best_scoring.empty())
{
write_out_body(os, feature_map, transition_exp, meta_value_names, 0, short_format, filename);
}
else
{
write_out_body_best_score(os, feature_map, transition_exp, meta_value_names, 0, short_format, best_scoring, filename);
}
// start with the second in the list (we just wrote out the first one)
for (Size i = 1; i < file_list.size(); ++i)
{
FileHandler().loadFeatures(file_list[i], feature_map, {FileTypes::FEATUREXML}, log_type_);
if (feature_map.getIdentifier().empty())
{
feature_map.setIdentifier("run" + (String)i);
}
if (feature_map.empty())
{
continue;
}
filename = file_list[i];
if (getFlag_("test"))
{
filename = "testfile.file";
}
if (best_scoring.empty())
{
write_out_body(os, feature_map, transition_exp, meta_value_names, boost::numeric_cast<int>(i), short_format, filename);
}
else
{
write_out_body_best_score(os, feature_map, transition_exp, meta_value_names, boost::numeric_cast<int>(i), short_format, best_scoring, filename);
}
}
os.close();
return EXECUTION_OK;
}
};
int main(int argc, const char **argv)
{
TOPPOpenSwathFeatureXMLToTSV tool;
int code = tool.main(argc, argv);
return code;
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/XFDR.cpp | .cpp | 8,989 | 264 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Eugen Netz $
// $Authors: Lukas Zimmermann, Eugen Netz $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/FORMAT/XMLFile.h>
#include <OpenMS/FORMAT/HANDLERS/XMLHandler.h>
#include <OpenMS/FORMAT/XQuestResultXMLFile.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/ANALYSIS/XLMS/XFDRAlgorithm.h>
#include <cassert>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
// Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_XFDR XFDR
@brief Calculates false discovery rate estimates on crosslink identifications.
This tool calculates and FDR estimate for crosslink identifications, which are produced by OpenPepXL.
The method employed currently is identical to the target-decoy approach used by xProphet (Walzthoeni et al., 2012).
Consequently, this tool can also consume xquest.xml files (produced either by OpenPepXL or xQuest). The tool supports
output in the idXML and mzIdentML formats.
<center>
<table>
<tr>
<th ALIGN = "center"> pot. predecessor tools </td>
<td VALIGN="middle" ROWSPAN=3> → XFDR →</td>
<th ALIGN = "center"> pot. successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_OpenPepXL </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> - </td>
</tr>
</table>
</center>
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_XFDR.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_XFDR.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPXFDR final :
public TOPPBase
{
public:
TOPPXFDR() :
TOPPBase("XFDR", "Calculates false discovery rate estimates on crosslink identifications", true)
{
}
protected:
// this function will be used to register the tool parameters
// it gets automatically called on tool execution
void registerOptionsAndFlags_() final
{
StringList formats = ListUtils::create<String>("xml,idXML,mzid,xquest.xml");
// File input
registerInputFile_(TOPPXFDR::param_in_, "<file>", "", "Crosslink Identifications in either xquest.xml, idXML, or mzIdentML format (as produced by OpenPepXL)", false);
setValidFormats_(TOPPXFDR::param_in_, formats);
// File input type (if omitted, guessed from the file extension) @TODO this can be removed in the future
registerStringOption_(TOPPXFDR::param_in_type_, "<in_type>", "", "Type of input file provided with -in. If omitted, the file type is guessed from the file extension.", false, false);
setValidStrings_(TOPPXFDR::param_in_type_, formats);
// idXML output
registerOutputFile_(TOPPXFDR::param_out_idXML_, "<idXML_file>", "", "Output as idXML file", false, false);
setValidFormats_(TOPPXFDR::param_out_idXML_, ListUtils::create<String>("idXML"));
// mzIdentML output
registerOutputFile_(TOPPXFDR::param_out_mzid_, "<mzIdentML_file>", "", "Output as mzIdentML file", false, false);
setValidFormats_(TOPPXFDR::param_out_mzid_, ListUtils::create<String>("mzid"));
// xquest.xml output
registerOutputFile_(TOPPXFDR::param_out_xquest_, "<xQuestXML_file>", "", "Output as xquest.xml file", false, false);
setValidFormats_(TOPPXFDR::param_out_xquest_, ListUtils::create<String>("xquest.xml"));
registerFullParam_(XFDRAlgorithm().getDefaults());
}
// the main_ function is called after all parameters are read
ExitCodes main_(int, const char **) final
{
// Tool Arguments
loadArguments_();
ExitCodes tool_arg_validation_code = validateToolArguments_();
if (tool_arg_validation_code != EXECUTION_OK)
{
return tool_arg_validation_code;
}
// initialize algorithm and paramteres
XFDRAlgorithm fdr_algorithm;
Param this_param = getParam_().copy("", true);
Param algo_param = fdr_algorithm.getParameters();
algo_param.update(this_param, false, getGlobalLogDebug()); // suppress param. update message
fdr_algorithm.setParameters(algo_param);
fdr_algorithm.setLogType(this->log_type_);
// TODO use this code? or just run the function?
XFDRAlgorithm::ExitCodes class_arg_validation_code = fdr_algorithm.validateClassArguments();
if (class_arg_validation_code == XFDRAlgorithm::ExitCodes::ILLEGAL_PARAMETERS)
{
logFatal("Invalid input parameters!");
return ILLEGAL_PARAMETERS;
}
writeLogInfo_("Reading input file...");
PeptideIdentificationList peptide_ids;
ProteinIdentification protein_id;
// Input File loading, initializes all_pep_ids_ vector
ExitCodes load_result = loadInputFile_(peptide_ids, protein_id);
if (load_result != EXECUTION_OK)
{
logFatal("Loading of input file has failed");
return load_result;
}
fdr_algorithm.run(peptide_ids, protein_id);
std::vector<ProteinIdentification> protein_ids;
protein_ids.push_back(protein_id);
writeLogInfo_("Writing output...");
// write idXML
if (! arg_out_idXML_.empty())
{
FileHandler().storeIdentifications(arg_out_idXML_, protein_ids, peptide_ids, {FileTypes::IDXML});
}
// write mzid file
if (! arg_out_mzid_.empty())
{
FileHandler().storeIdentifications(arg_out_mzid_, protein_ids, peptide_ids, {FileTypes::MZIDENTML});
}
// write xquest.xml file
if (! arg_out_xquest_.empty())
{
FileHandler().storeIdentifications(arg_out_xquest_, protein_ids, peptide_ids, {FileTypes::XQUESTXML});
}
return EXECUTION_OK;
}
private:
String arg_out_idXML_;
String arg_out_mzid_;
String arg_out_xquest_;
String arg_in_;
String arg_in_type_;
static const String param_in_;
static const String param_in_type_;
static const String param_out_idXML_;
static const String param_out_mzid_;
static const String param_out_xquest_;
void loadArguments_()
{
arg_out_idXML_ = getStringOption_(TOPPXFDR::param_out_idXML_);
arg_out_mzid_ = getStringOption_(TOPPXFDR::param_out_mzid_);
arg_out_xquest_ = getStringOption_(TOPPXFDR::param_out_xquest_);
arg_in_ = getStringOption_(TOPPXFDR::param_in_);
arg_in_type_ = getStringOption_(TOPPXFDR::param_in_type_);
}
/**
* Loads the input file.
* @return 0 if the loading of the input was successful, error code otherwise
*/
ExitCodes loadInputFile_(PeptideIdentificationList& peptide_ids, ProteinIdentification& protein_id)
{
std::vector<ProteinIdentification> protein_ids;
FileHandler().loadIdentifications(arg_in_, protein_ids, peptide_ids, {FileTypes::MZIDENTML, FileTypes::IDXML, FileTypes::XQUESTXML});
const Size n_pep_ids = peptide_ids.size();
const Size n_prot_ids = protein_ids.size();
writeLogInfo_("Number of Peptide IDs in input file: " + String(n_pep_ids));
writeLogInfo_("Number of Protein IDs in input file: " + String(n_prot_ids));
// Terminate if no hits could be found
if (n_pep_ids == 0)
{
logFatal("Input file does not contain any identifications.");
return INPUT_FILE_EMPTY;
}
// Terminate if do not exactly encounter one protein id
if (n_prot_ids != 1)
{
logFatal("There is not exactly one protein identification in the input file. This is unsupported!");
return INPUT_FILE_CORRUPT;
}
protein_id = protein_ids[0];
return EXECUTION_OK;
}
void logFatal(const String &message) const
{
OPENMS_LOG_ERROR << "FATAL: " << message << " Terminating now!" << std::endl;
}
ExitCodes validateToolArguments_() const
{
if (this->arg_out_idXML_.empty() && this->arg_out_mzid_.empty() && this->arg_out_xquest_.empty())
{
logFatal(
"No output file specified. You must at least specify one output with -"
+ String(TOPPXFDR::param_out_idXML_)
+ " or -" + String(TOPPXFDR::param_out_mzid_)
+ " or -" + String(TOPPXFDR::param_out_xquest_)
+ " or -" + String(TOPPXFDR::param_out_xquest_)
);
return ILLEGAL_PARAMETERS;
}
if (arg_in_.empty())
{
logFatal("Input file is empty");
return ILLEGAL_PARAMETERS;
}
return EXECUTION_OK;
}
};
const String TOPPXFDR::param_in_ = "in";
const String TOPPXFDR::param_in_type_ = "in_type";
const String TOPPXFDR::param_out_idXML_ = "out_idXML";
const String TOPPXFDR::param_out_mzid_ = "out_mzIdentML";
const String TOPPXFDR::param_out_xquest_ = "out_xquest";
// the actual main function needed to create an executable
int main(int argc, const char ** argv)
{
TOPPXFDR tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/AccurateMassSearch.cpp | .cpp | 8,283 | 225 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Erhan Kenar, Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/ANALYSIS/ID/AccurateMassSearchEngine.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/FORMAT/FileTypes.h>
#include <OpenMS/FORMAT/MzTab.h>
#include <OpenMS/FORMAT/MzTabFile.h>
#include <OpenMS/FORMAT/MzTabMFile.h>
#include <OpenMS/FORMAT/OMSFile.h>
#include <OpenMS/KERNEL/FeatureMap.h>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_AccurateMassSearch AccurateMassSearch
@brief An algorithm to search for exact mass matches from a spectrum against a database (e.g. HMDB).
<CENTER>
<table>
<tr>
<th ALIGN = "center"> pot. predecessor tools </td>
<td VALIGN="middle" ROWSPAN=3> → AccurateMassSearch →</td>
<th ALIGN = "center"> pot. successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_FeatureFinderMetabo </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> NA</td>
</tr>
</table>
</CENTER>
Accurate mass search against a database (usually HMDB).
For details see @ref OpenMS::AccurateMassSearchEngine "AccurateMassSearchEngine".
@note Currently mzIdentML (mzid) is not directly supported as an input/output format of this tool. Convert mzid files to/from idXML using @ref TOPP_IDFileConverter if necessary.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_AccurateMassSearch.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_AccurateMassSearch.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPAccurateMassSearch :
public TOPPBase
{
public:
TOPPAccurateMassSearch() :
TOPPBase("AccurateMassSearch", "Match MS signals to molecules from a database by mass.")
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "featureXML or consensusXML file");
setValidFormats_("in", {"featureXML", "consensusXML"});
registerOutputFile_("out", "<file>", "", "mzTab file");
setValidFormats_("out", ListUtils::create<String>("mzTab"));
registerOutputFile_("out_annotation", "<file>", "", "A copy of the input file, annotated with matching hits from the database.", false);
setValidFormats_("out_annotation", {"featureXML", "consensusXML", "oms"});
// move some params from algorithm section to top level (to support input file functionality)
Param p = AccurateMassSearchEngine().getDefaults();
registerTOPPSubsection_("db", "Database files which contain the identifications");
registerInputFileList_("db:mapping", "<file(s)>", ListUtils::toStringList<std::string>(p.getValue("db:mapping")), p.getDescription("db:mapping"), true, false, {"skipexists"});
setValidFormats_("db:mapping", {"tsv"});
registerInputFileList_("db:struct", "<file(s)>", ListUtils::toStringList<std::string>(p.getValue("db:struct")), p.getDescription("db:struct"), true, false, {"skipexists"});
setValidFormats_("db:struct", {"tsv"});
registerInputFile_("positive_adducts", "<file>", p.getValue("positive_adducts").toString(), p.getDescription("positive_adducts"), true, false, {"skipexists"});
setValidFormats_("positive_adducts", {"tsv"});
registerInputFile_("negative_adducts", "<file>", p.getValue("negative_adducts").toString(), p.getDescription("negative_adducts"), true, false, {"skipexists"});
setValidFormats_("negative_adducts", {"tsv"});
// addEmptyLine_();
// addText_("Parameters for the accurate mass search can be given in the 'algorithm' part of INI file.");
registerSubsection_("algorithm", "Algorithm parameters section");
}
Param getSubsectionDefaults_(const String& /*section*/) const override
{
Param p = AccurateMassSearchEngine().getDefaults();
// remove params which are already registered at top level (see registerOptionsAndFlags_())
p.remove("db:mapping");
p.remove("db:struct");
p.remove("positive_adducts");
p.remove("negative_adducts");
return p;
}
ExitCodes main_(int, const char**) override
{
//-------------------------------------------------------------
// parameter handling
//-------------------------------------------------------------
String in = getStringOption_("in");
String out = getStringOption_("out");
String file_ann = getStringOption_("out_annotation");
Param ams_param = getParam_().copy("algorithm:", true);
// copy top-level params to algorithm
ams_param.setValue("db:mapping", ListUtils::create<std::string>(getStringList_("db:mapping")));
ams_param.setValue("db:struct", ListUtils::create<std::string>(getStringList_("db:struct")));
ams_param.setValue("positive_adducts", getStringOption_("positive_adducts"));
ams_param.setValue("negative_adducts", getStringOption_("negative_adducts"));
if (file_ann.hasSuffix("oms"))
{
ams_param.setValue("id_format", "ID"); // use IdentificationData to store id results
}
writeDebug_("Parameters passed to AccurateMassSearch", ams_param, 3);
// mzTAB output data structure
MzTab mztab_output;
MzTabM mztabm_output;
AccurateMassSearchEngine ams;
ams.setParameters(ams_param);
ams.init();
std::string idf = std::string(ams.getParameters().getValue("id_format"));
bool id_format = idf == "ID" ? true : false;
FileTypes::Type filetype = FileHandler::getType(in);
if (filetype == FileTypes::FEATUREXML)
{
FeatureMap ms_feat_map;
FileHandler().loadFeatures(in, ms_feat_map, {FileTypes::FEATUREXML});
//-------------------------------------------------------------
// do the work
//-------------------------------------------------------------
if (id_format) // if format ID is used, MzTabM output will be generated
{
ams.run(ms_feat_map, mztabm_output);
}
else
{
ams.run(ms_feat_map, mztab_output);
}
//-------------------------------------------------------------
// writing output
//-------------------------------------------------------------
if (file_ann.hasSuffix("featureXML"))
{
FileHandler().storeFeatures(file_ann, ms_feat_map, {FileTypes::FEATUREXML});
}
else if (file_ann.hasSuffix("oms"))
{
OMSFile().store(file_ann, ms_feat_map);
}
}
else if (filetype == FileTypes::CONSENSUSXML && id_format)
{
throw Exception::InvalidValue(__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"FATAL: CONSENSUSXML is currently not supporting ID and its MzTabM (v2.0.0-M) output, please use legacy_id",
"");
}
else if (filetype == FileTypes::CONSENSUSXML)
{
ConsensusMap ms_cons_map;
FileHandler().loadConsensusFeatures(in, ms_cons_map, {FileTypes::CONSENSUSXML});
//-------------------------------------------------------------
// do the work
//-------------------------------------------------------------
ams.run(ms_cons_map, mztab_output);
//-------------------------------------------------------------
// writing output
//-------------------------------------------------------------
if (!file_ann.empty())
{
FileHandler().storeConsensusFeatures(file_ann, ms_cons_map, {FileTypes::CONSENSUSXML});
}
}
if(id_format && filetype == FileTypes::FEATUREXML)
{
MzTabMFile mztabm_file;
mztabm_file.store(out, mztabm_output);
}
else
{
MzTabFile mztab_file;
mztab_file.store(out, mztab_output);
}
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPAccurateMassSearch tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/IDMerger.cpp | .cpp | 19,400 | 484 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/IDMergerAlgorithm.h>
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/FORMAT/OMSFile.h>
#include <OpenMS/SYSTEM/File.h>
#include <algorithm>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_IDMerger IDMerger
@brief Merges several idXML files into one idXML file.
<center>
<table>
<tr>
<th ALIGN = "center"> potential predecessor tools </td>
<td VALIGN="middle" ROWSPAN=3> → IDMerger →</td>
<th ALIGN = "center"> potential successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_CometAdapter (or other ID engines) </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_ConsensusID </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_IDFileConverter </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_IDMapper </td>
</tr>
</table>
</center>
The peptide hits and protein hits of the input files will be written into the single output file. In general, the number of idXML files that can be merged into one file is not limited.
If an additional file is given through the @p add_to parameter, identifications from the main inputs (@p in) are added to that file, but only for those peptide sequences that were not already present. Only the best peptide hit per identification (MS2 spectrum) is taken into account; peptide identifications and their corresponding protein identifications are transferred.
Alternatively, with the @p pepxml_protxml option, results from corresponding PeptideProphet and ProteinProphet runs can be combined. In this case, exactly two idXML files are expected as input: one containing data from a pepXML file, and the other containing data from a protXML file that was created based on the pepXML (meaningful results can only be obtained for matching files!). pepXML or protXML can be converted to idXML with the @ref TOPP_IDFileConverter tool.
@note Currently mzIdentML (mzid) is not directly supported as an input/output format of this tool. Convert mzid files to/from idXML using @ref TOPP_IDFileConverter if necessary.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_IDMerger.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_IDMerger.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPIDMerger :
public TOPPBase
{
public:
TOPPIDMerger() :
TOPPBase("IDMerger", "Merges several protein/peptide identification files into one file.")
{
}
protected:
void mergePepXMLProtXML_(StringList filenames, vector<ProteinIdentification>&
proteins, PeptideIdentificationList& peptides)
{
FileHandler idxml;
idxml.loadIdentifications(filenames[0], proteins, peptides, {FileTypes::IDXML});
vector<ProteinIdentification> pepxml_proteins, protxml_proteins;
PeptideIdentificationList pepxml_peptides, protxml_peptides;
if (proteins[0].getProteinGroups().empty()) // first idXML contains data from the pepXML
{
proteins.swap(pepxml_proteins);
peptides.swap(pepxml_peptides);
idxml.loadIdentifications(filenames[1], protxml_proteins, protxml_peptides, {FileTypes::IDXML});
if (protxml_proteins[0].getProteinGroups().empty())
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "None of the input files seems to be derived from a protXML file (information about protein groups is missing).");
}
}
else // first idXML contains data from the protXML
{
proteins.swap(protxml_proteins);
peptides.swap(protxml_peptides);
idxml.loadIdentifications(filenames[1], pepxml_proteins, pepxml_peptides, {FileTypes::IDXML});
}
if ((protxml_peptides.size() > 1) || (protxml_proteins.size() > 1))
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "The idXML derived from a protXML file should contain only one 'ProteinIdentification' and one 'PeptideIdentification' instance.");
}
// peptide information comes from the pepXML (additional information in
// the protXML - adapted peptide hit score, "is_unique", "is_contributing"
// - is not transferred):
peptides.swap(pepxml_peptides);
// prepare scores and coverage values of protein hits from the protXML:
map<String, pair<double, double> > hit_values;
ProteinIdentification& protein = protxml_proteins[0];
for (ProteinHit & hit : protein.getHits())
{
hit_values[hit.getAccession()] = make_pair(hit.getScore(), hit.getCoverage());
}
// merge protein information:
proteins.swap(pepxml_proteins);
for (ProteinIdentification & prot : proteins)
{
prot.getProteinGroups() = protein.getProteinGroups();
prot.getIndistinguishableProteins() =
protein.getIndistinguishableProteins();
// TODO: since a protXML file can integrate data from several protein
// identification runs, the protein groups/indistinguishable proteins
// that we write to one identification run could contain references to
// proteins that are not observed in this run, but in others; also, some
// protein hits without enough evidence may not occur in the protXML
// (thus also not in the protein groups) - clean this up?
prot.setScoreType(protein.getScoreType());
prot.setHigherScoreBetter(protein.isHigherScoreBetter());
prot.setSignificanceThreshold(protein.getSignificanceThreshold());
for (ProteinHit & prot_hit : prot.getHits())
{
if (const auto pos = hit_values.find(prot_hit.getAccession()); pos == hit_values.end())
{
prot_hit.setScore(-1);
}
else
{
prot_hit.setScore(pos->second.first);
prot_hit.setCoverage(pos->second.second);
}
}
}
}
void annotateFileOrigin_(vector<ProteinIdentification>& proteins,
PeptideIdentificationList& peptides,
String filename)
{
if (test_mode_) { filename = File::basename(filename); }
for (ProteinIdentification& protein : proteins)
{
protein.setMetaValue("file_origin", DataValue(filename));
}
for (PeptideIdentification& pep : peptides)
{
pep.setMetaValue("file_origin", DataValue(filename));
}
}
void registerOptionsAndFlags_() override
{
vector<String> formats = {"idXML", "oms"};
registerInputFileList_("in", "<files>", StringList(), "Input files separated by blanks (all must have the same type)");
setValidFormats_("in", formats);
registerOutputFile_("out", "<file>", "", "Output file (must have the same type as the input files)");
setValidFormats_("out", formats);
registerStringOption_("out_type", "<type>", "", "Output file type (default: determined from file extension)", false);
setValidStrings_("out_type", formats);
registerInputFile_("add_to", "<file>", "", "Optional input file. IDs from 'in' are added to this file, but only if the (modified) peptide sequences are not present yet (considering only best hits per spectrum).", false);
setValidFormats_("add_to", {"idXML"}); // .oms input currently not supported
registerStringOption_("annotate_file_origin", "<annotate>", "true", "Store the original filename in each protein/peptide identification (meta value: 'file_origin') - idXML input/output only", false);
setValidStrings_("annotate_file_origin", {"true","false"});
registerFlag_("pepxml_protxml", "Merge idXML files derived from a pepXML and corresponding protXML file.\nExactly two input files are expected in this case. Not compatible with 'add_to'.");
registerFlag_("merge_proteins_add_PSMs", "Merge all identified proteins by accession into one protein identification run but keep all the PSMs with updated links to potential new protein ID#s. Not compatible with 'add_to'.");
}
ExitCodes main_(int, const char**) override
{
//-------------------------------------------------------------
// parameter handling
//-------------------------------------------------------------
StringList file_names = getStringList_("in");
String out = getStringOption_("out");
String add_to = getStringOption_("add_to");
bool annotate_file_origin = getStringOption_("annotate_file_origin") == "true" ? true : false;
if (file_names.empty())
{
// this also allows exactly 1 file, because it might be useful for
// a TOPPAS pipeline containing an IDMerger, to run only with one file
writeLogError_("No input filename given. Aborting!");
printUsage_();
return ILLEGAL_PARAMETERS;
}
bool pepxml_protxml = getFlag_("pepxml_protxml");
if (pepxml_protxml && (file_names.size() != 2))
{
writeLogError_("Exactly two input filenames expected for option 'pepxml_protxml'. Aborting!");
printUsage_();
return ILLEGAL_PARAMETERS;
}
if (pepxml_protxml && !add_to.empty())
{
// currently not allowed to keep the code simpler and because it doesn't
// seem useful, but should be possible in principle:
writeLogError_("The options 'add_to' and 'pepxml_protxml' cannot be used together. Aborting!");
printUsage_();
return ILLEGAL_PARAMETERS;
}
bool merge_proteins_add_PSMs = getFlag_("merge_proteins_add_PSMs");
if (merge_proteins_add_PSMs && (pepxml_protxml || !add_to.empty()))
{
// currently not allowed to keep the code simpler and because it doesn't
// seem useful, but should be possible in principle:
writeLogError_("The options 'merge_proteins_add_PSMs', 'add_to' and 'pepxml_protxml' cannot be used together. Aborting!");
printUsage_();
return ILLEGAL_PARAMETERS;
}
// check file types:
FileTypes::Type type;
String out_type = getStringOption_("out_type");
if (!out_type.empty())
{
type = FileTypes::nameToType(out_type);
}
else
{
type = FileHandler::getTypeByFileName(out);
}
for (const String& file_name : file_names)
{
FileTypes::Type current_type = FileHandler::getType(file_name);
if ((type == FileTypes::UNKNOWN) && (current_type != FileTypes::UNKNOWN))
{
type = current_type; // determine output file type from input
continue;
}
if (current_type != type)
{
writeLogError_("Mixing different file types is not supported. Aborting!");
printUsage_();
return ILLEGAL_PARAMETERS;
}
}
if (type == FileTypes::UNKNOWN)
{
writeLogError_("Could not determine input/output file type. Aborting!");
printUsage_();
return ILLEGAL_PARAMETERS;
}
//-------------------------------------------------------------
// calculations
//-------------------------------------------------------------
if (type == FileTypes::OMS)
{
if (!add_to.empty() || pepxml_protxml || merge_proteins_add_PSMs)
{
// 'annotate_file_origin' is on by default - just ignore it
writeLogError_("Options are currently not supported when merging .oms files. Aborting!");
printUsage_();
return ILLEGAL_PARAMETERS;
}
OMSFile oms_file;
// load first file (others will be merged in):
IdentificationData data;
oms_file.load(file_names[0], data);
// merge in other files:
for (Size index = 1; index < file_names.size(); ++index)
{
IdentificationData more_data;
oms_file.load(file_names[index], more_data);
data.merge(more_data);
}
oms_file.store(out, data);
return EXECUTION_OK;
}
// file type: idXML
vector<ProteinIdentification> proteins;
PeptideIdentificationList peptides;
if (pepxml_protxml)
{
mergePepXMLProtXML_(file_names, proteins, peptides);
}
else if (merge_proteins_add_PSMs)
{
proteins.resize(1);
FileHandler idXMLf;
IDMergerAlgorithm merger{};
Param p = merger.getParameters();
p.setValue("annotate_origin", annotate_file_origin ? "true" : "false");
merger.setParameters(p);
for (String& file : file_names)
{
vector<ProteinIdentification> prots;
PeptideIdentificationList peps;
idXMLf.loadIdentifications(file,prots,peps, {FileTypes::IDXML});
merger.insertRuns(prots, peps);
}
merger.returnResultsAndClear(proteins[0], peptides);
}
else
{
mergeIds_(file_names, annotate_file_origin, add_to, proteins, peptides);
}
//-------------------------------------------------------------
// writing output
//-------------------------------------------------------------
OPENMS_LOG_DEBUG << "protein IDs: " << proteins.size() << endl
<< "peptide IDs: " << peptides.size() << endl;
FileHandler().storeIdentifications(out, proteins, peptides, {FileTypes::IDXML});
return EXECUTION_OK;
}
void mergeIds_(StringList file_names,
bool annotate_file_origin,
const String& add_to,
vector<ProteinIdentification>& proteins,
PeptideIdentificationList& peptides)
{
map<String, ProteinIdentification> proteins_by_id;
vector<PeptideIdentificationList> peptides_by_file;
StringList add_to_ids; // IDs from the "add_to" file (if any)
if (!add_to.empty())
{ // make 'add_to' filename the first in the list
std::erase(file_names, add_to);
file_names.insert(file_names.begin(), add_to);
}
peptides_by_file.resize(file_names.size());
for (Size i = 0; i < file_names.size(); ++i)
{
const String& file_name = file_names[i];
vector<ProteinIdentification> additional_proteins;
FileHandler().loadIdentifications(file_name, additional_proteins, peptides_by_file[i], {FileTypes::IDXML});
if (annotate_file_origin) // set MetaValue "file_origin" if flag is set
{
annotateFileOrigin_(additional_proteins, peptides_by_file[i],
file_name);
}
for (const ProteinIdentification& prot : additional_proteins)
{
const String& id = prot.getIdentifier();
proteins_by_id[id] = prot;
if (i == 0) { add_to_ids.push_back(id); }
}
}
if (add_to.empty()) // copy proteins from map into vector for writing
{
// append peptides in same vector
for (PeptideIdentificationList & peps : peptides_by_file)
{
peptides.insert(peptides.end(), peps.begin(), peps.end());
}
// only append the runs (no merging of proteins)
for (auto map_it = proteins_by_id.begin(); map_it != proteins_by_id.end(); ++map_it)
{
proteins.push_back(map_it->second);
}
}
else // add only new IDs to an existing file
{
// copy over data from reference file ("add_to"):
map<String, ProteinIdentification> selected_proteins;
for (auto ids_it = add_to_ids.begin();
ids_it != add_to_ids.end(); ++ids_it)
{
selected_proteins[*ids_it] = proteins_by_id[*ids_it];
}
// keep track of peptides that shouldn't be duplicated:
set<AASequence> sequences;
PeptideIdentificationList& base_peptides = peptides_by_file[0];
for (PeptideIdentification & pep : base_peptides)
{
if (pep.getHits().empty()) continue;
pep.sort();
sequences.insert(pep.getHits()[0].getSequence());
}
peptides.insert(peptides.end(), base_peptides.begin(),
base_peptides.end());
// merge in data from other files:
for (auto file_it = ++peptides_by_file.begin(); file_it != peptides_by_file.end();
++file_it)
{
set<String> accessions; // keep track to avoid duplicates
for (auto pep_it = file_it->begin(); pep_it != file_it->end(); ++pep_it)
{
if (pep_it->getHits().empty()) continue;
pep_it->sort();
const PeptideHit& hit = pep_it->getHits()[0];
OPENMS_LOG_DEBUG << "peptide: " << hit.getSequence().toString() << endl;
// skip ahead if peptide is not new:
if (sequences.find(hit.getSequence()) != sequences.end()) continue;
OPENMS_LOG_DEBUG << "new peptide!" << endl;
pep_it->getHits().resize(1); // restrict to best hit for simplicity
peptides.push_back(*pep_it);
set<String> protein_accessions = hit.extractProteinAccessionsSet();
// copy over proteins:
for (String const & acc : protein_accessions)
{
OPENMS_LOG_DEBUG << "accession: " << acc << endl;
// skip ahead if accession is not new:
if (accessions.find(acc) != accessions.end())
{
continue;
}
OPENMS_LOG_DEBUG << "new accession!" << endl;
// first find the right protein identification:
const String& id = pep_it->getIdentifier();
OPENMS_LOG_DEBUG << "identifier: " << id << endl;
if (proteins_by_id.find(id) == proteins_by_id.end())
{
writeLogError_("Error: identifier '" + id + "' linking peptides and proteins not found. Skipping.");
continue;
}
ProteinIdentification& protein = proteins_by_id[id];
// now find the protein hit:
auto hit_it = protein.findHit(acc);
if (hit_it == protein.getHits().end())
{
writeLogError_("Error: accession '" + acc + "' not found in "
"protein identification '" + id + "'. Skipping.");
continue;
}
// we may need to copy protein ID meta data, if we haven't yet:
if (selected_proteins.find(id) == selected_proteins.end())
{
OPENMS_LOG_DEBUG << "adding protein identification" << endl;
selected_proteins[id] = protein;
selected_proteins[id].getHits().clear();
// remove potentially invalid information:
selected_proteins[id].getProteinGroups().clear();
selected_proteins[id].getIndistinguishableProteins().clear();
}
selected_proteins[id].insertHit(*hit_it);
accessions.insert(acc);
// NOTE: we're only adding the first protein hit for each
// accession, not taking into account scores or any meta data
}
}
}
for (auto map_it = selected_proteins.rbegin(); map_it != selected_proteins.rend();
++map_it)
{
proteins.push_back(map_it->second);
}
}
}
};
int main(int argc, const char** argv)
{
TOPPIDMerger tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/SpectraFilterWindowMower.cpp | .cpp | 3,906 | 133 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Mathias Walzer$
// $Authors: $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/PROCESSING/FILTERING/WindowMower.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <typeinfo>
using namespace OpenMS;
using namespace std;
/**
@page TOPP_SpectraFilterWindowMower SpectraFilterWindowMower
@brief Retains the highest peaks in a sliding or jumping window
<CENTER>
<table>
<tr>
<th ALIGN = "center"> pot. predecessor tools </td>
<td VALIGN="middle" ROWSPAN=2> → SpectraFilter →</td>
<th ALIGN = "center"> pot. successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_PeakPickerHiRes </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> any tool operating on MS peak data @n (in mzML format)</td>
</tr>
</table>
</CENTER>
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_SpectraFilterWindowMower.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_SpectraFilterWindowMower.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPSpectraFilterWindowMower :
public TOPPBase
{
public:
TOPPSpectraFilterWindowMower() :
TOPPBase("SpectraFilterWindowMower", "Applies thresholdfilter to peak spectra.")
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "input file ");
setValidFormats_("in", ListUtils::create<String>("mzML"));
registerOutputFile_("out", "<file>", "", "output file ");
setValidFormats_("out", ListUtils::create<String>("mzML"));
// register one section for each algorithm
registerSubsection_("algorithm", "Algorithm parameter subsection.");
}
Param getSubsectionDefaults_(const String & /*section*/) const override
{
return WindowMower().getParameters();
}
ExitCodes main_(int, const char **) override
{
//-------------------------------------------------------------
// parameter handling
//-------------------------------------------------------------
//input/output files
String in(getStringOption_("in"));
String out(getStringOption_("out"));
//-------------------------------------------------------------
// loading input
//-------------------------------------------------------------
PeakMap exp;
FileHandler().loadExperiment(in, exp, {FileTypes::MZML}, log_type_);
//-------------------------------------------------------------
// if meta data arrays are present, remove them and warn
//-------------------------------------------------------------
if (exp.clearMetaDataArrays())
{
writeLogWarn_("Warning: Spectrum meta data arrays cannot be sorted. They are deleted.");
}
//-------------------------------------------------------------
// filter
//-------------------------------------------------------------
Param filter_param = getParam_().copy("algorithm:", true);
writeDebug_("Used filter parameters", filter_param, 3);
WindowMower filter;
filter.setParameters(filter_param);
filter.filterPeakMap(exp);
//-------------------------------------------------------------
// writing output
//-------------------------------------------------------------
//annotate output with data processing info
addDataProcessing_(exp, getProcessingInfo_(DataProcessing::FILTERING));
FileHandler().storeExperiment(out, exp, {FileTypes::MZML}, log_type_);
return EXECUTION_OK;
}
};
int main(int argc, const char ** argv)
{
TOPPSpectraFilterWindowMower tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/NoiseFilterGaussian.cpp | .cpp | 7,452 | 239 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Eva Lange $
// --------------------------------------------------------------------------
#include <OpenMS/config.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/FORMAT/FileHandler.h>
// TODO remove needed here for transform
#include <OpenMS/FORMAT/MzMLFile.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/PROCESSING/SMOOTHING/GaussFilter.h>
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/DATASTRUCTURES/StringListUtils.h>
#include <OpenMS/FORMAT/DATAACCESS/MSDataWritingConsumer.h>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_NoiseFilterGaussian NoiseFilterGaussian
@brief Executes a Gaussian filter to reduce the noise in an MS experiment.
<center>
<table>
<tr>
<th ALIGN = "center"> pot. predecessor tools </td>
<td VALIGN="middle" ROWSPAN=4> → NoiseFilterGaussian →</td>
<th ALIGN = "center"> pot. successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_FileConverter </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_PeakPickerHiRes</td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=2> @ref TOPP_Resampler </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_PeakPickerHiRes</td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_BaselineFilter</td>
</tr>
</table>
</center>
The Gaussian filter is a peak area preserving low-pass filter and is characterized by narrow bandwidths,
sharp cutoffs, and low passband ripple.
@note The Gaussian filter works for uniform as well as for non-uniform data.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_NoiseFilterGaussian.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_NoiseFilterGaussian.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPNoiseFilterGaussian :
public TOPPBase
{
public:
TOPPNoiseFilterGaussian() :
TOPPBase("NoiseFilterGaussian", "Removes noise from profile spectra by using Gaussian filter (on uniform as well as non-uniform data).")
{
}
/**
@brief Helper class for the Low Memory Noise filtering
*/
class NFGaussMzMLConsumer :
public MSDataWritingConsumer
{
public:
NFGaussMzMLConsumer(const String& filename, const GaussFilter& gf) :
MSDataWritingConsumer(filename)
{
gf_ = gf;
}
void processSpectrum_(MapType::SpectrumType& s) override
{
gf_.filter(s);
}
void processChromatogram_(MapType::ChromatogramType& c) override
{
gf_.filter(c);
}
private:
GaussFilter gf_;
};
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "input raw data file ");
setValidFormats_("in", ListUtils::create<String>("mzML"));
registerOutputFile_("out", "<file>", "", "output raw data file ");
setValidFormats_("out", ListUtils::create<String>("mzML"));
registerStringOption_("processOption", "<name>", "inmemory", "Whether to load all data and process them in-memory or whether to process the data on the fly (lowmemory) without loading the whole file into memory first", false, true);
setValidStrings_("processOption", ListUtils::create<String>("inmemory,lowmemory"));
registerSubsection_("algorithm", "Algorithm parameters section");
}
Param getSubsectionDefaults_(const String & /*section*/) const override
{
return GaussFilter().getDefaults();
}
ExitCodes doLowMemAlgorithm(const GaussFilter& gauss)
{
///////////////////////////////////
// Create the consumer object, add data processing
///////////////////////////////////
NFGaussMzMLConsumer gaussConsumer(out, gauss);
gaussConsumer.addDataProcessing(getProcessingInfo_(DataProcessing::SMOOTHING));
///////////////////////////////////
// Create new MSDataReader and set our consumer
///////////////////////////////////
MzMLFile mz_data_file;
mz_data_file.setLogType(log_type_);
mz_data_file.transform(in, &gaussConsumer);
return EXECUTION_OK;
}
ExitCodes main_(int, const char **) override
{
//-------------------------------------------------------------
// parameter handling
//-------------------------------------------------------------
in = getStringOption_("in");
out = getStringOption_("out");
String process_option = getStringOption_("processOption");
Param filter_param = getParam_().copy("algorithm:", true);
writeDebug_("Parameters passed to filter", filter_param, 3);
GaussFilter gauss;
gauss.setLogType(log_type_);
gauss.setParameters(filter_param);
if (process_option == "lowmemory")
{
return doLowMemAlgorithm(gauss);
}
//-------------------------------------------------------------
// loading input
//-------------------------------------------------------------
PeakMap exp;
FileHandler().loadExperiment(in, exp, {FileTypes::MZML}, log_type_);
if (exp.empty() && exp.getChromatograms().empty())
{
OPENMS_LOG_WARN << "The given file does not contain any conventional peak data, but might"
" contain chromatograms. This tool currently cannot handle them, sorry.";
return INCOMPATIBLE_INPUT_DATA;
}
//check for peak type (profile data required)
if (!exp.empty() && exp[0].getType(true) == SpectrumSettings::SpectrumType::CENTROID)
{
writeLogWarn_("Warning: OpenMS peak type estimation indicates that this is not profile data!");
}
//check if spectra are sorted
for (Size i = 0; i < exp.size(); ++i)
{
if (!exp[i].isSorted())
{
writeLogError_("Error: Not all spectra are sorted according to peak m/z positions. Use FileFilter to sort the input!");
return INCOMPATIBLE_INPUT_DATA;
}
}
//check if chromatograms are sorted
for (Size i = 0; i < exp.getChromatograms().size(); ++i)
{
if (!exp.getChromatogram(i).isSorted())
{
writeLogError_("Error: Not all chromatograms are sorted according to peak m/z positions. Use FileFilter to sort the input!");
return INCOMPATIBLE_INPUT_DATA;
}
}
//-------------------------------------------------------------
// calculations
//-------------------------------------------------------------
try
{
gauss.filterExperiment(exp);
}
catch (Exception::IllegalArgument & e)
{
writeLogError_(String("Error: ") + e.what());
return INCOMPATIBLE_INPUT_DATA;
}
//-------------------------------------------------------------
// writing output
//-------------------------------------------------------------
//annotate output with data processing info
addDataProcessing_(exp, getProcessingInfo_(DataProcessing::SMOOTHING));
FileHandler().storeExperiment(out, exp, {FileTypes::MZML}, log_type_);
return EXECUTION_OK;
}
String in;
String out;
};
int main(int argc, const char ** argv)
{
TOPPNoiseFilterGaussian tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/IDSplitter.cpp | .cpp | 5,785 | 169 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/KERNEL/FeatureMap.h>
#include <OpenMS/METADATA/ProteinIdentification.h>
#include <OpenMS/METADATA/PeptideIdentification.h>
#include <OpenMS/METADATA/AnnotatedMSRun.h>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_IDSplitter IDSplitter
@brief Splits protein/peptide identifications off of annotated data files.
This performs the reverse operation as IDMapper.
@note Currently mzIdentML (mzid) is not directly supported as an input/output format of this tool. Convert mzid files to/from idXML using @ref TOPP_IDFileConverter if necessary.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_IDSplitter.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_IDSplitter.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPIDSplitter :
public TOPPBase
{
public:
TOPPIDSplitter() :
TOPPBase("IDSplitter", "Splits protein/peptide identifications off of annotated data files")
{
}
protected:
void removeDuplicates_(PeptideIdentificationList & peptides)
{
// there is no "PeptideIdentification::operator<", so we can't use a set
// or sort + unique to filter out duplicates...
// just use the naive O(n²) algorithm
PeptideIdentificationList unique;
for (PeptideIdentificationList::iterator in_it = peptides.begin();
in_it != peptides.end(); ++in_it)
{
bool duplicate = false;
for (PeptideIdentificationList::iterator out_it = unique.begin();
out_it != unique.end(); ++out_it)
{
if (*in_it == *out_it)
{
duplicate = true;
break;
}
}
if (!duplicate) unique.push_back(*in_it);
}
peptides.swap(unique);
}
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "Input file (data annotated with identifications)");
setValidFormats_("in", ListUtils::create<String>("featureXML,consensusXML"));
registerOutputFile_("out", "<file>", "", "Output file (data without identifications). Either 'out' or 'id_out' are required. They can be used together.", false);
setValidFormats_("out", ListUtils::create<String>("featureXML,consensusXML"));
registerOutputFile_("id_out", "<file>", "", "Output file (identifications). Either 'out' or 'id_out' are required. They can be used together.", false);
setValidFormats_("id_out", ListUtils::create<String>("idXML"));
}
ExitCodes main_(int, const char **) override
{
String in = getStringOption_("in"), out = getStringOption_("out"),
id_out = getStringOption_("id_out");
if (out.empty() && id_out.empty())
{
throw Exception::RequiredParameterNotGiven(__FILE__, __LINE__,
OPENMS_PRETTY_FUNCTION,
"out/id_out");
}
vector<ProteinIdentification> proteins;
PeptideIdentificationList peptides;
FileTypes::Type in_type = FileHandler::getType(in);
if (in_type == FileTypes::FEATUREXML)
{
FeatureMap features;
FileHandler().loadFeatures(in, features, {FileTypes::FEATUREXML});
features.getUnassignedPeptideIdentifications().swap(peptides);
for (FeatureMap::Iterator feat_it = features.begin();
feat_it != features.end(); ++feat_it)
{
peptides.insert(peptides.end(),
feat_it->getPeptideIdentifications().begin(),
feat_it->getPeptideIdentifications().end());
feat_it->getPeptideIdentifications().clear();
}
features.getProteinIdentifications().swap(proteins);
if (!out.empty())
{
addDataProcessing_(features,
getProcessingInfo_(DataProcessing::FILTERING));
FileHandler().storeFeatures(out, features, {FileTypes::FEATUREXML});
}
}
else // consensusXML
{
ConsensusMap consensus;
FileHandler().loadConsensusFeatures(in, consensus, {FileTypes::CONSENSUSXML});
consensus.getUnassignedPeptideIdentifications().swap(peptides);
for (ConsensusMap::Iterator cons_it = consensus.begin();
cons_it != consensus.end(); ++cons_it)
{
peptides.insert(peptides.end(),
cons_it->getPeptideIdentifications().begin(),
cons_it->getPeptideIdentifications().end());
cons_it->getPeptideIdentifications().clear();
}
consensus.getProteinIdentifications().swap(proteins);
if (!out.empty())
{
addDataProcessing_(consensus,
getProcessingInfo_(DataProcessing::FILTERING));
FileHandler().storeConsensusFeatures(out, consensus, {FileTypes::CONSENSUSXML});
}
}
if (!id_out.empty())
{
// IDMapper can match a peptide ID to several overlapping features,
// resulting in duplicates
removeDuplicates_(peptides);
FileHandler().storeIdentifications(id_out, proteins, peptides, {FileTypes::IDXML});
}
return EXECUTION_OK;
}
};
int main(int argc, const char ** argv)
{
TOPPIDSplitter tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/PeptideDataBaseSearchFI.cpp | .cpp | 4,719 | 128 | // Copyright (c) 2002-present, The OpenMS Team -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: $
// $Authors: $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/PeptideSearchEngineFIAlgorithm.h>
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/SYSTEM/File.h>
#include <OpenMS/METADATA/PeptideIdentificationList.h>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
// Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_PeptideDataBaseSearchFI PeptideDataBaseSearchFI
@brief Identifies peptides in MS/MS spectra.
<CENTER>
<table>
<tr>
<th ALIGN = "center"> pot. predecessor tools </td>
<td VALIGN="middle" ROWSPAN=2> → PeptideDataBaseSearchFI →</td>
<th ALIGN = "center"> pot. successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> any signal-/preprocessing tool @n (in mzML format)</td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_IDFilter or @n any protein/peptide processing tool</td>
</tr>
</table>
</CENTER>
@em This search engine is mainly for educational/benchmarking/prototyping use cases.
It lacks behind in speed and/or quality of results when compared to state-of-the-art search engines.
@note Currently mzIdentML (mzid) is not directly supported as an input/output format of this tool. Convert mzid files to/from idXML using @ref TOPP_IDFileConverter if necessary.
@note Open-search mode is automatically determined by the precursor mass tolerance: enabled when tolerance exceeds 1 Da or 1000 ppm. No explicit open-search parameter is needed. This is logged at runtime and recorded in the output search parameters as UserParam 'open_search'.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_PeptideDataBaseSearchFI.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_PeptideDataBaseSearchFI.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class PeptideDataBaseSearchFI :
public TOPPBase
{
public:
PeptideDataBaseSearchFI() :
TOPPBase("PeptideDataBaseSearchFI",
"Annotates bottom-up MS/MS spectra using PeptideDataBaseSearchFI.",
false)
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "input file ");
setValidFormats_("in", ListUtils::create<String>("mzML"));
registerInputFile_("database", "<file>", "", "input file ");
setValidFormats_("database", ListUtils::create<String>("fasta"));
registerOutputFile_("out", "<file>", "", "output file ");
setValidFormats_("out", ListUtils::create<String>("idXML"));
// put search algorithm parameters at Search: subtree of parameters
Param search_algo_params_with_subsection;
search_algo_params_with_subsection.insert("Search:", PeptideSearchEngineFIAlgorithm().getDefaults());
registerFullParam_(search_algo_params_with_subsection);
}
ExitCodes main_(int, const char**) override
{
String in = getStringOption_("in");
String database = getStringOption_("database");
String out = getStringOption_("out");
ProgressLogger progresslogger;
progresslogger.setLogType(log_type_);
vector<ProteinIdentification> protein_ids;
PeptideIdentificationList peptide_ids;
PeptideSearchEngineFIAlgorithm sse;
sse.setParameters(getParam_().copy("Search:", true));
// map algorithm exit code to application exit code
PeptideSearchEngineFIAlgorithm::ExitCodes e = sse.search(in, database, protein_ids, peptide_ids);
if (e != PeptideSearchEngineFIAlgorithm::ExitCodes::EXECUTION_OK)
{
return TOPPBase::ExitCodes::INTERNAL_ERROR;
}
// MS path already set in algorithm. Overwrite here so we get something testable
if (getFlag_("test"))
{
// if test mode set, add file without path so we can compare it
protein_ids[0].setPrimaryMSRunPath({"file://" + File::basename(in)});
}
FileHandler().storeIdentifications(out, protein_ids, peptide_ids, {FileTypes::IDXML});
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
PeptideDataBaseSearchFI tool;
return tool.main(argc, argv);
}
///@endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/FeatureLinkerUnlabeledQT.cpp | .cpp | 3,718 | 110 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Marc Sturm, Clemens Groepl, Steffen Sass $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MAPMATCHING/FeatureGroupingAlgorithmQT.h>
#include "FeatureLinkerBase.cpp"
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
// Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_FeatureLinkerUnlabeledQT FeatureLinkerUnlabeledQT
@brief Groups corresponding features from multiple maps using a QT clustering approach.
<CENTER>
<table>
<tr>
<th ALIGN = "center"> potential predecessor tools </td>
<td VALIGN="middle" ROWSPAN=4> → FeatureLinkerUnlabeledQT →</td>
<th ALIGN = "center"> potential successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_FeatureFinderCentroided @n (or another feature detection algorithm) </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_ProteinQuantifier </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=2> @ref TOPP_MapAlignerPoseClustering @n (or another map alignment algorithm) </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_TextExporter </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_SeedListGenerator </td>
</tr>
</table>
</CENTER>
Reference:\n
Weisser <em>et al.</em>: <a href="https://doi.org/10.1021/pr300992u">An automated pipeline for high-throughput label-free quantitative proteomics</a> (J. Proteome Res., 2013, PMID: 23391308).
This tool provides an algorithm for grouping corresponding features in
multiple runs of label-free experiments. For more details and
algorithm-specific parameters (set in the ini file) see "Detailed
Description" in the @ref OpenMS::FeatureGroupingAlgorithmQT "algorithm
documentation".
FeatureLinkerUnlabeledQT takes several feature maps (featureXML files) and
stores the corresponding features in a consensus map (consensusXML file).
Feature maps can be created from MS experiments (peak data) using one of
the FeatureFinder TOPP tools.
@see @ref TOPP_FeatureLinkerUnlabeled @ref TOPP_FeatureLinkerLabeled
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_FeatureLinkerUnlabeledQT.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_FeatureLinkerUnlabeledQT.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPFeatureLinkerUnlabeledQT :
public TOPPFeatureLinkerBase
{
public:
TOPPFeatureLinkerUnlabeledQT() :
TOPPFeatureLinkerBase("FeatureLinkerUnlabeledQT", "Groups corresponding features from multiple maps.")
{
}
protected:
void registerOptionsAndFlags_() override
{
TOPPFeatureLinkerBase::registerOptionsAndFlags_();
registerSubsection_("algorithm", "Algorithm parameters section");
}
Param getSubsectionDefaults_(const String & /*section*/) const override
{
FeatureGroupingAlgorithmQT algo;
Param p = algo.getParameters();
return p;
}
ExitCodes main_(int, const char **) override
{
FeatureGroupingAlgorithmQT algo;
return TOPPFeatureLinkerBase::common_main_(&algo);
}
};
int main(int argc, const char ** argv)
{
TOPPFeatureLinkerUnlabeledQT tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/IDFileConverter.cpp | .cpp | 33,684 | 808 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Katharina Albers, Clemens Groepl, Chris Bielow, Mathias Walzer,
// Hendrik Weisser
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/CHEMISTRY/ProteaseDB.h>
#include <OpenMS/CHEMISTRY/ProteaseDigestion.h>
#include <OpenMS/CHEMISTRY/SpectrumAnnotator.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/FORMAT/FileTypes.h>
#include <OpenMS/FORMAT/IdXMLFile.h>
#include <OpenMS/FORMAT/MascotXMLFile.h>
#include <OpenMS/FORMAT/MzIdentMLFile.h>
#include <OpenMS/FORMAT/OMSFile.h>
#include <OpenMS/FORMAT/OMSSAXMLFile.h>
#include <OpenMS/FORMAT/PepXMLFile.h>
#include <OpenMS/FORMAT/PercolatorOutfile.h>
#include <OpenMS/FORMAT/ProtXMLFile.h>
#include <OpenMS/FORMAT/SequestOutfile.h>
#include <OpenMS/FORMAT/TextFile.h>
#include <OpenMS/FORMAT/XQuestResultXMLFile.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/METADATA/ID/IdentificationDataConverter.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/FORMAT/XTandemXMLFile.h>
#include <OpenMS/FORMAT/MzMLFile.h>
#include <OpenMS/SYSTEM/File.h>
#ifdef _OPENMP
#include <omp.h>
#endif
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
// Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_IDFileConverter IDFileConverter
@brief Converts peptide/protein identification engine file formats.
<CENTER>
<table>
<tr>
<th ALIGN = "center"> potential predecessor tools </td>
<td VALIGN="middle" ROWSPAN=3> → IDFileConverter →</td>
<th ALIGN = "center"> potential successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> TPP tools: PeptideProphet, ProteinProphet </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=2> TPP tools: ProteinProphet\n(for conversion from idXML to pepXML) </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> Sequest protein identification engine </td>
</tr>
</table>
</CENTER>
IDFileConverter can be used to convert identification results from external tools/pipelines (like TPP, Sequest, Mascot, OMSSA, X! Tandem) into other (OpenMS-specific) formats.
For search engine results, it might be advisable to use the respective TOPP Adapters (e.g. CometAdapter) to avoid the extra conversion step.
The most simple format accepted is '.tsv': A tab separated text file, which contains one or more peptide sequences per line.
Each line represents one spectrum, i.e. is stored as a PeptideIdentification with one or more PeptideHits.
Lines starting with "#" are ignored by the parser.
Conversion from the TPP file formats pepXML and protXML to OpenMS' idXML is quite comprehensive, to the extent that the original data can be
represented in the simpler idXML format.
In contrast, support for converting from idXML to pepXML is limited. The purpose here is simply to create pepXML files containing the relevant
information for the use of ProteinProphet.
We use the following heuristic: if peptideprophet_analyzed is set, we take the scores from the idXML as is and assume
the PeptideHits contain all necessary information. If peptideprophet is not set, we only provide ProteinProphet-compatible
results with probability-based scores (i.e. Percolator with PEP score or scores from IDPosteriorErrorProbability). All
secondary or non-probability main scores will be written as "search_scores" only.
Support for conversion to/from mzIdentML (.mzid) is still experimental and may lose information.
The xquest.xml format is very specific to Protein-Protein Cross-Linking MS (XL-MS) applications and is only considered useful for compatibility
of OpenPepXL with the xQuest / xProphet / xTract pipeline. It will only have useful output when converting from idXML or mzid containg XL-MS data.
Also supports generation of .mzML files with theoretical spectra from a .FASTA input.
<B>Details on additional parameters:</B>
@p mz_file: @n
Some search engine output files (like pepXML, mascotXML, Sequest .out files) may not contain retention times, only scan numbers or spectrum IDs. To be able to look up the actual RT values, the raw file has to be provided using the parameter @p mz_file. (If the identification results should be used later to annotate feature maps or consensus maps, it is critical that they contain RT values. See also @ref TOPP_IDMapper.)
@p mz_name: @n
pepXML files can contain results from multiple experiments. However, the idXML format does not support this. The @p mz_name parameter (or @p mz_file, if given) thus serves to define what parts to extract from the pepXML.
@p scan_regex: @n
This advanced parameter defines a spectrum reference format via a Perl-style regular expression. The reference format connects search hits to the MS2 spectra that were searched, and may be needed to look up e.g. retention times in the raw data (@p mz_file). See the documentation of class @ref OpenMS::SpectrumLookup "SpectrumLookup" for details on how to specify spectrum reference formats. Note that it is not necessary to look up any information in the raw data if that information can be extracted directly from the spectrum reference, in which case @p mz_file is not needed.@n
For Mascot results exported to (Mascot) XML, scan numbers that can be used to look up retention times (via @p mz_file) should be given in the "pep_scan_title" XML elements, but the format can vary. Some default formats are defined in the Mascot XML reader, but if those fail to extract the scan numbers, @p scan_regex can be used to overwrite the defaults.@n
For pepXML, supplying @p scan_regex may be necessary for files exported from Mascot, but only if the default reference formats (same as for Mascot XML) do not match. The spectrum references to which @p scan_regex is applied are read from the "spectrum" attribute of the "spectrum_query" elements.@n
For Percolator tab-delimited output, information is extracted from the "PSMId" column. By default, extraction of scan numbers and charge states is supported for MS-GF+ Percolator results (retention times and precursor m/z values can then be looked up in the raw data via @p mz_file).@n
Some information about the supported input types:
@li @ref OpenMS::MzIdentMLFile "mzIdentML"
@li @ref OpenMS::IdXMLFile "idXML"
@li @ref OpenMS::PepXMLFile "pepXML"
@li @ref OpenMS::ProtXMLFile "protXML"
@li @ref OpenMS::MascotXMLFile "Mascot XML"
@li @ref OpenMS::OMSSAXMLFile "OMSSA XML"
@li @ref OpenMS::XTandemXMLFile "X! Tandem XML"
@li @ref OpenMS::SequestOutfile "Sequest .out directory"
@li @ref OpenMS::PercolatorOutfile "Percolator tab-delimited output"
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_IDFileConverter.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_IDFileConverter.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPIDFileConverter :
public TOPPBase
{
public:
TOPPIDFileConverter() :
TOPPBase("IDFileConverter", "Converts identification engine file formats.")
{
}
private:
bool add_ionmatches_(PeptideIdentificationList& peptide_identifications, String filename, double tolerance)
{
TheoreticalSpectrumGenerator tg;
Param tgp(tg.getDefaults());
tgp.setValue("add_metainfo", "true");
tgp.setValue("add_losses", "true");
tgp.setValue("add_precursor_peaks", "true");
tgp.setValue("add_abundant_immonium_ions", "true");
tgp.setValue("add_first_prefix_ion", "true");
tgp.setValue("add_y_ions", "true");
tgp.setValue("add_b_ions", "true");
tgp.setValue("add_a_ions", "true");
tgp.setValue("add_x_ions", "true");
tg.setParameters(tgp);
SpectrumAlignment sa;
Param sap = sa.getDefaults();
sap.setValue("tolerance", tolerance, "...");
sa.setParameters(sap);
SpectrumAnnotator annot;
bool ret = true;
PeakMap expmap;
SpectrumLookup lookup;
FileHandler().loadExperiment(filename, expmap, {}, log_type_);
lookup.readSpectra(expmap.getSpectra());
#pragma omp parallel for
for (SignedSize i = 0; i < (SignedSize)peptide_identifications.size(); ++i)
{
try
{
String ref = peptide_identifications[i].getSpectrumReference();
Size index = lookup.findByNativeID(ref);
annot.addIonMatchStatistics(peptide_identifications[i], expmap[index], tg, sa);
}
catch (Exception::ElementNotFound&)
{
#pragma omp critical (IDFileConverter_ERROR)
{
OPENMS_LOG_ERROR << "Error: Failed to look up spectrum - none with corresponding native ID found." << endl;
ret = false;
}
}
}
return ret;
}
protected:
Param getSubsectionDefaults_(const String& /*section*/) const override
{
Param p(TheoreticalSpectrumGenerator().getDefaults());
vector<String> all_enzymes;
ProteaseDB::getInstance()->getAllNames(all_enzymes);
p.setValue("enzyme", "Trypsin", "Enzym used to digest the fasta proteins");
p.setValidStrings("enzyme", ListUtils::create<std::string>(all_enzymes));
p.setValue("missed_cleavages", 0, "Number of allowed missed cleavages while digesting the fasta proteins");
p.setValue("min_charge", 1, "Minimum charge");
p.setValue("max_charge", 1, "Maximum charge");
p.setValue("precursor_charge", 0, "Manually set precursor charge. (default: 0, meaning max_charge + 1 will be used as precursor charge)");
return p;
}
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<path/file>", "",
"Input file or directory containing the data to convert. This may be:\n"
"- a single file in OpenMS database format (.oms),\n"
"- a single file in a multi-purpose XML format (.idXML, .mzid, .pepXML, .protXML),\n"
"- a single file in a search engine-specific format (Mascot: .mascotXML, OMSSA: .omssaXML, X! Tandem: .xml, Percolator: .psms, xQuest: .xquest.xml),\n"
"- a single file in fasta format (can only be used to generate a theoretical mzML),\n"
"- a single text file (tab separated) with one line for all peptide sequences matching a spectrum (top N hits),\n"
"- for Sequest results, a directory containing .out files.\n");
setValidFormats_("in", ListUtils::create<String>("oms,idXML,mzid,fasta,pepXML,protXML,mascotXML,omssaXML,xml,psms,tsv,xquest.xml"));
registerOutputFile_("out", "<file>", "", "Output file", true);
String formats("oms,idXML,mzid,pepXML,fasta,xquest.xml,mzML");
setValidFormats_("out", ListUtils::create<String>(formats));
registerStringOption_("out_type", "<type>", "", "Output file type (default: determined from file extension)", false);
setValidStrings_("out_type", ListUtils::create<String>(formats));
addEmptyLine_();
registerInputFile_("mz_file", "<file>", "", "[pepXML, Sequest, Mascot, X! Tandem, mzid, Percolator only] Retention times and native spectrum ids (spectrum_references) will be looked up in this file", false);
setValidFormats_("mz_file", ListUtils::create<String>("mzML,mzXML,mzData"));
addEmptyLine_();
registerStringOption_("mz_name", "<file>", "", "[pepXML only] Experiment filename/path (extension will be removed) to match in the pepXML file ('base_name' attribute). Only necessary if different from 'mz_file'.", false);
registerFlag_("peptideprophet_analyzed", "[pepXML output only] Write output in the format of a PeptideProphet analysis result. By default a 'raw' pepXML is produced that contains only search engine results.", false);
registerStringOption_("score_type", "<choice>", PercolatorOutfile::score_type_names[0], "[Percolator only] Which of the Percolator scores to report as 'the' score for a peptide hit", false);
setValidStrings_("score_type", vector<String>(PercolatorOutfile::score_type_names, PercolatorOutfile::score_type_names + static_cast<int>(PercolatorOutfile::ScoreType::SIZE_OF_SCORETYPE)));
registerFlag_("ignore_proteins_per_peptide", "[Sequest only] Workaround to deal with .out files that contain e.g. \"+1\" in references column,\n"
"but do not list extra references in subsequent lines (try -debug 3 or 4)", true);
registerStringOption_("scan_regex", "<expression>", "", "[Mascot, pepXML, Percolator only] Regular expression used to extract the scan number or retention time. See documentation for details.", false, true);
registerFlag_("no_spectra_data_override", "[+mz_file only] Avoid overriding 'spectra_data' in protein identifications if 'mz_file' is given and 'spectrum_reference's are added/updated. Use only if you are sure it is absolutely the same 'mz_file' as used for identification.", true);
registerFlag_("no_spectra_references_override", "[+mz_file only] Avoid overriding 'spectrum_reference' in peptide identifications if 'mz_file' is given and a 'spectrum_reference' is already present.", true);
registerDoubleOption_("add_ionmatch_annotation", "<tolerance>", 0, "[+mz_file only] Annotate the identifications with ion matches from spectra in 'mz_file' using the given tolerance (in Da). This will take quite some time.", false, true);
registerFlag_("concatenate_peptides", "[FASTA output only] Will concatenate the top peptide hits to one peptide sequence, rather than write a new peptide for each hit.", true);
registerIntOption_("number_of_hits", "<integer>", 1, "[FASTA output only] Controls how many peptide hits will be exported. A value of 0 or less exports all hits.", false, true);
registerSubsection_("fasta_to_mzml", "[FASTA input + MzML output only] Parameters used to adjust simulation of the theoretical spectra.");
}
ExitCodes main_(int, const char**) override
{
//-------------------------------------------------------------
// general variables and data
//-------------------------------------------------------------
FileHandler fh;
PeptideIdentificationList peptide_identifications;
vector<ProteinIdentification> protein_identifications;
SpectrumMetaDataLookup lookup;
IdentificationData id_data;
//-------------------------------------------------------------
// reading input
//-------------------------------------------------------------
const String in = getStringOption_("in");
const String mz_file = getStringOption_("mz_file");
FileTypes::Type in_type = FileTypes::UNKNOWN; // set below if 'in' isn't a directory
const String out = getStringOption_("out");
FileTypes::Type out_type = FileHandler::getConsistentOutputfileType(out, getStringOption_("out_type"));
if (out_type == FileTypes::UNKNOWN)
{
writeLogError_("Error: Could not determine output file type!");
return PARSE_ERROR;
}
ProgressLogger logger;
logger.setLogType(ProgressLogger::CMD);
logger.startProgress(0, 1, "Loading...");
if (File::isDirectory(in))
{
const String in_directory = File::absolutePath(in).ensureLastChar('/');
const bool ignore_proteins_per_peptide = getFlag_("ignore_proteins_per_peptide");
UInt i = 0;
FileTypes::Type type;
PeakMap msexperiment;
// Note: we had issues with leading zeroes, so let us represent scan numbers as Int (next line used to be map<String, float> num_and_rt;) However, now String::toInt() might throw.
map<Int, float> num_and_rt;
vector<String> NativeID;
// The mz-File (if given)
if (!mz_file.empty())
{
type = fh.getTypeByFileName(mz_file);
fh.loadExperiment(mz_file, msexperiment, {type}, log_type_, false, false);
for (PeakMap::Iterator spectra_it = msexperiment.begin(); spectra_it != msexperiment.end(); ++spectra_it)
{
String(spectra_it->getNativeID()).split('=', NativeID);
try
{
num_and_rt[NativeID[1].toInt()] = spectra_it->getRT();
// cout << "num_and_rt: " << NativeID[1] << " = " << NativeID[1].toInt() << " : " << num_and_rt[NativeID[1].toInt()] << endl; // CG debuggging 2009-07-01
}
catch (Exception::ConversionError& e)
{
writeLogWarn_(String("Error: Cannot read scan number as integer. '") + e.what());
}
}
}
// Get list of the actual Sequest .out-Files
StringList in_files;
if (!File::fileList(in_directory, "*.out", in_files))
{
writeLogError_(String("Error: No .out files found in '") + in_directory + "'. Aborting!");
}
// Now get to work ...
for (vector<String>::const_iterator in_files_it = in_files.begin(); in_files_it != in_files.end(); ++in_files_it)
{
PeptideIdentificationList peptide_ids_seq;
ProteinIdentification protein_id_seq;
vector<double> pvalues_seq;
vector<String> in_file_vec;
SequestOutfile sequest_outfile;
writeDebug_(String("Reading file ") + *in_files_it, 3);
try
{
sequest_outfile.load((String) (in_directory + *in_files_it), peptide_ids_seq, protein_id_seq, 1.0, pvalues_seq, "Sequest", ignore_proteins_per_peptide);
in_files_it->split('.', in_file_vec);
for (Size j = 0; j < peptide_ids_seq.size(); ++j)
{
// We have to explicitly set the identifiers, because the normal set ones are composed of search engine name and date, which is the same for a bunch of sequest out-files.
peptide_ids_seq[j].setIdentifier(*in_files_it + "_" + i);
Int scan_number = 0;
if (!mz_file.empty())
{
try
{
scan_number = in_file_vec[2].toInt();
peptide_ids_seq[j].setRT(num_and_rt[scan_number]);
}
catch (Exception::ConversionError& e)
{
writeLogError_(String("Error: Cannot read scan number as integer. '") + e.what());
}
catch (exception& e)
{
writeLogError_(String("Error: Cannot read scan number as integer. '") + e.what());
}
//double real_mz = ( peptide_ids_seq[j].getMZ() - hydrogen_mass )/ (double)peptide_ids_seq[j].getHits()[0].getCharge(); // ???? semantics of mz
const double real_mz = peptide_ids_seq[j].getMZ() / (double) peptide_ids_seq[j].getHits()[0].getCharge();
peptide_ids_seq[j].setMZ(real_mz);
}
writeDebug_(String("scan: ") + String(scan_number) + String(" RT: ") + String(peptide_ids_seq[j].getRT()) + " MZ: " + String(peptide_ids_seq[j].getMZ()) + " Ident: " + peptide_ids_seq[j].getIdentifier(), 4);
peptide_identifications.push_back(peptide_ids_seq[j]);
}
protein_id_seq.setIdentifier(*in_files_it + "_" + i);
protein_identifications.push_back(protein_id_seq);
++i;
}
catch (Exception::ParseError& pe)
{
writeLogError_(pe.what() + String("(file: ") + *in_files_it + ")");
throw;
}
catch (...)
{
writeLogError_(String("Error reading file: ") + *in_files_it);
throw;
}
}
writeDebug_("All files processed.", 3);
} // ! directory
else
{
in_type = fh.getType(in);
switch (in_type)
{
case FileTypes::PEPXML:
{
String mz_name = getStringOption_("mz_name");
if (mz_file.empty())
{
PepXMLFile().load(in, protein_identifications,
peptide_identifications, mz_name);
}
else
{
PeakMap exp;
fh.loadExperiment(mz_file, exp, {}, log_type_, false,
false);
if (mz_name.empty()) mz_name = mz_file;
String scan_regex = getStringOption_("scan_regex");
// we may have to parse Mascot spectrum references in pepXML, too:
MascotXMLFile::initializeLookup(lookup, exp, scan_regex);
PepXMLFile().load(in, protein_identifications,
peptide_identifications, mz_name, lookup);
}
}
break;
case FileTypes::IDXML:
{
FileHandler().loadIdentifications(in, protein_identifications, peptide_identifications, {FileTypes::IDXML});
// get spectrum_references from the mz data, if necessary:
if (!mz_file.empty())
{
SpectrumMetaDataLookup::addMissingSpectrumReferences(
peptide_identifications,
mz_file,
false,
!getFlag_("no_spectra_data_override"),
!getFlag_("no_spectra_references_override"),
protein_identifications);
double add_ions = getDoubleOption_("add_ionmatch_annotation");
if (add_ions > 0)
{
add_ionmatches_(peptide_identifications, mz_file, add_ions);
}
}
}
break;
case FileTypes::MZIDENTML:
{
OPENMS_LOG_WARN << "Converting from mzid: you might experience loss of information depending on the capabilities of the target format." << endl;
FileHandler().loadIdentifications(in, protein_identifications,
peptide_identifications, {FileTypes::MZIDENTML});
// get retention times from the mz data, if necessary:
if (!mz_file.empty())
{
// Add RTs if missing
MSExperiment exp;
MzMLFile mzml_file{};
mzml_file.getOptions().setMetadataOnly(true);
mzml_file.load(mz_file, exp);
SpectrumMetaDataLookup::addMissingRTsToPeptideIDs(peptide_identifications, exp);
double add_ions = getDoubleOption_("add_ionmatch_annotation");
if (add_ions > 0)
{
add_ionmatches_(peptide_identifications, mz_file, add_ions);
}
}
}
break;
case FileTypes::PROTXML:
{
FileHandler().loadIdentifications(in, protein_identifications,
peptide_identifications, {FileTypes::PROTXML});
}
break;
case FileTypes::OMSSAXML:
{
FileHandler().loadIdentifications(in, protein_identifications,
peptide_identifications);
}
break;
case FileTypes::MASCOTXML:
{
if (!mz_file.empty())
{
String scan_regex = getStringOption_("scan_regex");
PeakMap exp;
// load only MS2 spectra:
fh.getOptions().addMSLevel(2);
fh.loadExperiment(mz_file, exp, {}, log_type_, false,
false);
MascotXMLFile::initializeLookup(lookup, exp, scan_regex);
}
protein_identifications.resize(1);
MascotXMLFile().load(in, protein_identifications[0],
peptide_identifications, lookup);
}
break;
case FileTypes::XML: // X! Tandem
{
ProteinIdentification protein_id;
ModificationDefinitionsSet mod_defs;
XTandemXMLFile().load(in, protein_id, peptide_identifications,
mod_defs);
protein_id.setSearchEngineVersion("");
protein_id.setSearchEngine("XTandem");
protein_identifications.push_back(protein_id);
if (!mz_file.empty())
{
PeakMap exp;
fh.getOptions().addMSLevel(2);
fh.loadExperiment(mz_file, exp, {}, log_type_, false,
false);
for (PeptideIdentification& pep : peptide_identifications)
{
UInt id = (Int)pep.getMetaValue("spectrum_id");
--id; // native IDs were written 1-based
if (id < exp.size())
{
pep.setRT(exp[id].getRT());
double pre_mz(0.0);
if (!exp[id].getPrecursors().empty())
{
pre_mz = exp[id].getPrecursors()[0].getMZ();
}
pep.setMZ(pre_mz);
pep.removeMetaValue("spectrum_id");
}
else
{
OPENMS_LOG_ERROR << "XTandem xml: Error: id '" << id << "' not found in peak map!" << endl;
}
}
}
}
break;
case FileTypes::PSMS: // Percolator
{
String score_type = getStringOption_("score_type");
PercolatorOutfile::ScoreType perc_score =
PercolatorOutfile::getScoreType(score_type);
if (!mz_file.empty())
{
PeakMap experiment;
fh.loadExperiment(mz_file, experiment, {}, log_type_, false, false);
lookup.readSpectra(experiment.getSpectra());
}
String scan_regex = getStringOption_("scan_regex");
if (!scan_regex.empty()) lookup.addReferenceFormat(scan_regex);
protein_identifications.resize(1);
PercolatorOutfile().load(in, protein_identifications[0],
peptide_identifications, lookup, perc_score);
}
break;
case FileTypes::TSV:
{
ProteinIdentification protein_id;
protein_id.setSearchEngineVersion("");
protein_id.setSearchEngine("XTandem");
protein_identifications.push_back(protein_id);
TextFile tf;
tf.load(in, true, -1, true);
for (TextFile::Iterator it = tf.begin(); it != tf.end(); ++it)
{
it->trim();
// skip empty and comment lines
if (it->empty() || it->hasPrefix("#")) continue;
PeptideIdentification pepid;
StringList peps;
it->split('\t', peps, false);
std::vector<PeptideHit> hits;
for (StringList::const_iterator sit=peps.begin(); sit != peps.end(); ++sit)
{
PeptideHit hit;
hit.setSequence(AASequence::fromString(*sit));
hits.push_back(hit);
}
pepid.setHits(hits);
peptide_identifications.push_back(pepid);
}
}
break;
case FileTypes::XQUESTXML:
{
FileHandler().loadIdentifications(in, protein_identifications, peptide_identifications, {FileTypes::XQUESTXML});
}
break;
case FileTypes::FASTA:
{
// handle out type
if (out_type != FileTypes::MZML)
{
writeLogError_("Error: Illegal output file type given. Fasta can only be converted to an MzML. Aborting!");
printUsage_();
return ILLEGAL_PARAMETERS;
}
MSExperiment exp;
TheoreticalSpectrumGenerator tsg;
// extract parameters and remove non tsg params
Param p = getParam_().copy("fasta_to_mzml:", true);
String enzyme = p.getValue("enzyme").toString();
Int mc = p.getValue("missed_cleavages");
Int min_charge = p.getValue("min_charge");
Int max_charge = p.getValue("max_charge");
Int pc_charge = p.getValue("precursor_charge");
p.remove("enzyme");
p.remove("missed_cleavages");
p.remove("min_charge");
p.remove("max_charge");
p.remove("precursor_charge");
if (min_charge > max_charge)
{
writeLogError_("Error: 'fasta_to_mzml:min_charge' must be smaller than or equal to 'fasta_to_mzml:max_charge'.");
printUsage_();
return ILLEGAL_PARAMETERS;
}
OPENMS_PRECONDITION(pc_charge == 0 || pc_charge >= max_charge, "Error: 'fasta_to_mzml:precursor_charge' must be bigger than or equal to 'fasta_to_mzml:max_charge'.\nSet 'precursor_charge' to '0' to automaticly use 'max_charge' + 1.");
tsg.setParameters(p);
ProteaseDigestion digestor;
digestor.setEnzyme(enzyme);
digestor.setMissedCleavages(mc);
// loop through fasta input
FASTAFile::FASTAEntry entry;
FASTAFile f;
f.readStart(in);
UInt count_catches{};
while (f.readNext(entry))
{
// digest sequence of fasta entry
vector<AASequence> digested_peptides;
AASequence seq = AASequence::fromString(entry.sequence);
digestor.digest(seq, digested_peptides);
// for each peptide calculate the theoretical spectrum
for (const auto& peptide : digested_peptides)
{
PeakSpectrum spec;
try
{
tsg.getSpectrum(spec, peptide, min_charge, max_charge, pc_charge);
}
catch (Exception::InvalidSize())
{
++count_catches;
}
exp.addSpectrum(move(spec));
}
}
if (count_catches > 0)
{
writeLogWarn_("No spectra were calculated for " + String(count_catches) + " peptides because they were to small for generating a C- or X-ion.");
}
logger.endProgress();
logger.startProgress(0, 1, "Storing...");
FileHandler().storeExperiment(out, exp, {FileTypes::MZML});
logger.endProgress();
return EXECUTION_OK;
}
break;
case FileTypes::OMS:
{
OMSFile().load(in, id_data);
if (out_type != FileTypes::OMS)
{
IdentificationDataConverter::exportIDs(id_data, protein_identifications, peptide_identifications);
}
}
break;
default:
writeLogError_("Error: Unknown input file type given. Aborting!");
printUsage_();
return ILLEGAL_PARAMETERS;
}
}
logger.endProgress();
//-------------------------------------------------------------
// writing output
//-------------------------------------------------------------
logger.startProgress(0, 1, "Storing...");
switch (out_type)
{
case FileTypes::PEPXML:
{
bool peptideprophet_analyzed = getFlag_("peptideprophet_analyzed");
String mz_name = getStringOption_("mz_name");
PepXMLFile().store(out, protein_identifications, peptide_identifications,
mz_file, mz_name, peptideprophet_analyzed);
}
break;
case FileTypes::IDXML:
FileHandler().storeIdentifications(out, protein_identifications, peptide_identifications, {FileTypes::IDXML});
break;
case FileTypes::MZIDENTML:
FileHandler().storeIdentifications(out, protein_identifications,
peptide_identifications, {FileTypes::MZIDENTML});
break;
case FileTypes::XQUESTXML:
FileHandler().storeIdentifications(out, protein_identifications,
peptide_identifications, {FileTypes::XQUESTXML});
break;
case FileTypes::FASTA:
{
Size count = 0;
Int max_hits = getIntOption_("number_of_hits");
if (max_hits < 1)
{
max_hits = INT_MAX;
}
bool concat = getFlag_("concatenate_peptides");
//Because by concatenation of peptides [KR]|P sites will probably be created, peptides starting with 'P' are
//saved separately and later moved to the beginning of the concatenated sequence.
//This is done to avoid losing information about the preceding peptides if a peptides starts with 'P'.
String all_p; //peptides beginning with 'P'
String all_but_p; //all the others
FASTAFile f;
f.writeStart(out);
FASTAFile::FASTAEntry entry;
for (const PeptideIdentification& pep_id : peptide_identifications)
{
Int curr_hit = 1;
for (const PeptideHit& hit : pep_id.getHits())
{
if (curr_hit > max_hits)
{
break;
}
++curr_hit;
String seq = hit.getSequence().toUnmodifiedString();
if (concat)
{
if (seq[0] == 'P')
{
all_p += seq;
}
else
{
all_but_p += seq;
}
}
else
{
std::set<String> prot = hit.extractProteinAccessionsSet();
entry.sequence = seq;
entry.identifier = seq;
entry.description = String(count) + " " + hit.getSequence().toString() + " " + ListUtils::concatenate(StringList(prot.begin(), prot.end()), ";");
f.writeNext(entry);
++count;
}
}
}
if (concat)
{
entry.sequence = all_p + all_but_p;
entry.identifier = protein_identifications[0].getSearchEngine() + "_" + Constants::UserParam::CONCAT_PEPTIDE;
entry.description = "";
f.writeNext(entry);
}
}
break;
case FileTypes::OMS:
{
if (in_type != FileTypes::OMS)
{
IdentificationDataConverter::importIDs(id_data, protein_identifications, peptide_identifications);
}
OMSFile().store(out, id_data);
}
break;
default:
writeLogError_("Unsupported output file type given. Aborting!");
printUsage_();
return ILLEGAL_PARAMETERS;
}
logger.endProgress();
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPIDFileConverter tool;
return tool.main(argc, argv);
}
///@endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/MultiplexResolver.cpp | .cpp | 25,683 | 624 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Lars Nilse $
// $Authors: Lars Nilse $
// --------------------------------------------------------------------------
#include <OpenMS/config.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/KERNEL/ConsensusMap.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/METADATA/PeptideIdentification.h>
#include <OpenMS/METADATA/PeptideHit.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/FEATUREFINDER/MultiplexDeltaMasses.h>
#include <OpenMS/FEATUREFINDER/MultiplexDeltaMassesGenerator.h>
#include <OpenMS/FEATUREFINDER/MultiplexIsotopicPeakPattern.h>
#include <boost/algorithm/string/split.hpp>
#include <boost/algorithm/string/replace.hpp>
using namespace std;
using namespace OpenMS;
//#define DEBUG
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_MultiplexResolver MultiplexResolver
@brief Completes peptide multiplets and resolves conflicts within them.
<CENTER>
<table>
<tr>
<th ALIGN = "center"> pot. predecessor tools </td>
<td VALIGN="middle" ROWSPAN=3> → MultiplexResolver →</td>
<th ALIGN = "center"> pot. successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_IDMapper </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=2> @ref TOPP_ProteinQuantifier </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_IDConflictResolver </td>
</tr>
</table>
</CENTER>
Tools such as FeatureFinderMultiplex can detect peptide feature multiplets in labeled experimental data. The multiplets can then be annotated with peptide sequences
using the IDMapper tool (*). The MultiplexResolver tool is consolidating these results in two steps.
- Any multiplets with conflicting quantitative and sequence information are filtered out. As example, let us consider a triple SILAC analyis. Let us assume a sequence
"LDNLVAIFDINR(Label:13C(6)15N(4))" with a single Arg10 label is mapped to the light feature in a SILAC triplet. Either peptide feature detection or sequence information
must be incorrect und the triplet is removed.
- In a second step, any incomplete peptide feature groups are completed with dummy features of zero intensity. As example, let us stay with the triple SILAC analysis.
But let us now assume the sequence "LDNLVAIFDINR(Label:13C(6)15N(4))" is mapped to the heavy partner of a peptide feature pair. This is no conflict. Medium and heavy
peptides have been correctly detected. The MultiplexResolver adds a dummy peptide feature of zero intensity at the light position and thereby completes the triplet.
(*) Note that the MultiplexResolver tool takes only a single (the first) peptide sequence annotation into account. By running IDConflictResolver first, it is assured that
each multiplet has only one peptide sequence annotation, the best one. Multiplets without sequence annotation are passed to the optional out_conflicts output.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_MultiplexResolver.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_MultiplexResolver.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPMultiplexResolver :
public TOPPBase
{
private:
// input and output files
String in_;
String in_blacklist_;
String out_;
String out_conflicts_;
// section "algorithm"
String labels_;
unsigned missed_cleavages_;
double mass_tolerance_;
double mz_tolerance_;
double rt_tolerance_;
// section "labels"
map<String, double> label_mass_shift_;
// blacklist
MSExperiment exp_blacklist_;
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "Peptide multiplets with assigned sequence information");
setValidFormats_("in", ListUtils::create<String>("consensusXML"));
registerInputFile_("in_blacklist", "<file>", "", "Optional input containing spectral peaks blacklisted during feature detection. Needed for generation of dummy features.", false);
setValidFormats_("in_blacklist", ListUtils::create<String>("mzML"));
registerOutputFile_("out", "<file>", "", "Complete peptide multiplets.");
setValidFormats_("out", ListUtils::create<String>("consensusXML"));
registerOutputFile_("out_conflicts", "<file>", "", "Optional output containing peptide multiplets without ID annotation or with conflicting quant/ID information.", false);
setValidFormats_("out_conflicts", ListUtils::create<String>("consensusXML"));
registerSubsection_("algorithm", "Parameters for the algorithm.");
registerSubsection_("labels", "Isotopic labels that can be specified in section \'algorithm:labels\'.");
}
// create parameters for sections (set default values and restrictions)
Param getSubsectionDefaults_(const String& section) const override
{
Param defaults;
if (section == "algorithm")
{
defaults.setValue("labels", "[][Lys8,Arg10]", "Labels used for labelling the samples. [...] specifies the labels for a single sample. For example\n\n[][Lys8,Arg10] ... SILAC\n[][Lys4,Arg6][Lys8,Arg10] ... triple-SILAC\n[Dimethyl0][Dimethyl6] ... Dimethyl\n[Dimethyl0][Dimethyl4][Dimethyl8] ... triple Dimethyl\n[ICPL0][ICPL4][ICPL6][ICPL10] ... ICPL");
defaults.setValue("missed_cleavages", 0, "Maximum number of missed cleavages due to incomplete digestion. (Only relevant if enzymatic cutting site coincides with labelling site. For example, Arg/Lys in the case of trypsin digestion and SILAC labelling.)");
defaults.setMinInt("missed_cleavages", 0);
defaults.setValue("mass_tolerance", 0.1, "Mass tolerance in Da for matching the mass shifts in the detected peptide multiplet to the theoretical mass shift pattern.", {"advanced"});
defaults.setValue("mz_tolerance", 10, "m/z tolerance in ppm for checking if dummy feature vicinity was blacklisted.", {"advanced"});
defaults.setValue("rt_tolerance", 5, "Retention time tolerance in seconds for checking if dummy feature vicinity was blacklisted.", {"advanced"});
}
if (section == "labels")
{
MultiplexDeltaMassesGenerator generator;
Param p = generator.getParameters();
for (Param::ParamIterator it = p.begin(); it != p.end(); ++it)
{
defaults.setValue(it->name, it->value, it->description, {"advanced"});
defaults.setMinFloat(it->name, 0.0);
}
}
return defaults;
}
/**
* @brief process parameters of 'input/output' section
*/
void getParameters_in_out_()
{
in_ = getStringOption_("in");
in_blacklist_ = getStringOption_("in_blacklist");
out_ = getStringOption_("out");
out_conflicts_ = getStringOption_("out_conflicts");
}
/**
* @brief process parameters of 'algorithm' section
*/
void getParameters_algorithm_()
{
labels_ = getParam_().getValue("algorithm:labels").toString();
missed_cleavages_ = getParam_().getValue("algorithm:missed_cleavages");
mass_tolerance_ = getParam_().getValue("algorithm:mass_tolerance");
mz_tolerance_ = getParam_().getValue("algorithm:mz_tolerance");
rt_tolerance_ = getParam_().getValue("algorithm:rt_tolerance");
}
/**
* @brief process parameters of 'labels' section
*/
void getParameters_labels_()
{
Param p = getParam_();
// create map of pairs (label as string, mass shift as double)
for (Param::ParamIterator it = p.begin(); it != p.end(); ++it)
{
label_mass_shift_.insert(make_pair(it->name, it->value));
}
}
/**
* @brief returns the relative delta mass between the first feature
* and the feature with the map index idx
*
* @param[in] feature_handles feature handles of a consensus feature
* @param[in] idx map index of interest
*/
double deltaMassFromMapIndex_(const ConsensusFeature::HandleSetType& feature_handles, unsigned idx)
{
double first_mass = feature_handles.begin()->getMZ() * feature_handles.begin()->getCharge();
for (ConsensusFeature::HandleSetType::const_iterator it_feat = feature_handles.begin(); it_feat != feature_handles.end(); ++it_feat)
{
if (it_feat->getMapIndex() == idx)
{
return it_feat->getMZ() * it_feat->getCharge() - first_mass;
}
}
// return NaN if no matching index was found
return numeric_limits<double>::quiet_NaN();
}
/**
* @brief check whether the theoretical delta mass pattern
* contains the label set of the detected pattern
*
* @param[in] pattern theoretical pattern
* @param[in] label_set label set of the detected pettern
* @param[in] index_label_set index within the pattern at which the label sets were matched
*
* @return mass shift in the theoretical pattern where both label sets match
*/
double matchLabelSet_(const std::vector<MultiplexDeltaMasses::DeltaMass>& pattern, const MultiplexDeltaMasses::LabelSet& label_set, int& index_label_set)
{
for (std::vector<MultiplexDeltaMasses::DeltaMass>::const_iterator it_mass_shift = pattern.begin(); it_mass_shift != pattern.end(); ++it_mass_shift)
{
if (it_mass_shift->label_set == label_set)
{
index_label_set = it_mass_shift - pattern.begin();
return it_mass_shift->delta_mass;
}
}
// return NaN if no matching label set was found
return numeric_limits<double>::quiet_NaN();
}
/**
* @brief check wether all delta masses in the detected pattern
* match up with a delta mass in the theoretical pattern
*
* @param[in] consensus detected pattern
* @param[in] pattern theoretical pattern
* @param[in] delta_mass_at_label_set delta mass in the theoretical pattern at which the matching label set was found
* @param[in] delta_mass_matched Was this delta mass in the theoretical pattern matched?
*
* @return All delta masses matching?
*/
bool matchDeltaMasses_(const ConsensusMap::ConstIterator consensus, const std::vector<MultiplexDeltaMasses::DeltaMass>& pattern, double theoretical_delta_mass_at_label_set, std::vector<bool>& delta_mass_matched)
{
double first_mass = consensus->getFeatures().begin()->getMZ() * consensus->getFeatures().begin()->getCharge();
if (!consensus->getPeptideIdentifications()[0].metaValueExists("map_index"))
{
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "The meta value 'map_index' is missing in the input data. In the IDMapper tool, please set the advanced parameter consensus:annotate_ids_with_subelements = true.");
}
double detected_delta_mass_at_label_set = deltaMassFromMapIndex_(consensus->getFeatures(), consensus->getPeptideIdentifications()[0].getMetaValue("map_index"));
if (std::isnan(detected_delta_mass_at_label_set))
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "No delta mass with this map_index could be found.", "");
}
// loop over features in consensus
for (ConsensusFeature::HandleSetType::const_iterator it_feat = consensus->getFeatures().begin(); it_feat != consensus->getFeatures().end(); ++it_feat)
{
// delta mass in the detected pattern relative to the feature with the matched label set
double mass_shift_detected = (it_feat->getMZ() * it_feat->getCharge() - first_mass) - detected_delta_mass_at_label_set;
bool matched = false;
// loop over delta masses in theoretical pattern
for (std::vector<MultiplexDeltaMasses::DeltaMass>::const_iterator it_mass_shift = pattern.begin(); it_mass_shift != pattern.end(); ++it_mass_shift)
{
// delta mass in the theoretical pattern relative to the feature with the matched label set
double mass_shift_theoretical = it_mass_shift->delta_mass - theoretical_delta_mass_at_label_set;
if (abs(mass_shift_detected - mass_shift_theoretical) < mass_tolerance_)
{
delta_mass_matched[it_mass_shift - pattern.begin()] = true;
matched = true;
break;
}
}
if (!matched)
{
return false;
}
}
return true;
}
/**
* @brief find a theoretical delta mass pattern that matches the detected pattern
*
* @param[in] consensus detected pattern
* @param[in] label set label set extracted from the detected pattern
* @param[in] theoretical_patterns list of theoretical delta mass patterns
* @param[in] delta_mass_matched Was this delta mass in the theoretical pattern matched?
* @param[in] index_label_set index within the pattern at which the label sets were matched
*
* @return index of matching pattern
*/
int findMatchingPattern_(const ConsensusMap::ConstIterator consensus, const MultiplexDeltaMasses::LabelSet& label_set, const std::vector<MultiplexDeltaMasses>& theoretical_patterns, std::vector<bool>& delta_mass_matched, int& index_label_set)
{
// loop over theoretical patterns
for (std::vector<MultiplexDeltaMasses>::const_iterator it_pattern = theoretical_patterns.begin(); it_pattern != theoretical_patterns.end(); ++it_pattern)
{
std::vector<MultiplexDeltaMasses::DeltaMass> pattern = it_pattern->getDeltaMasses();
double shift = matchLabelSet_(pattern, label_set, index_label_set);
if (!std::isnan(shift))
{
// reset boolean vector to false
delta_mass_matched.assign(delta_mass_matched.size(), false);
bool match = matchDeltaMasses_(consensus, pattern, shift, delta_mass_matched);
if (match)
{
return (it_pattern - theoretical_patterns.begin());
}
}
}
return -1;
}
/**
* @brief find the m/z for the complete consensus
*
* @param[in] mz m/z of the incomplete consensus
* @param[in] charge charge of the incomplete consensus
* @param[in] pattern matching theoretical delta mass pattern
* @param[in] delta_mass_matched Was this delta mass in the theoretical pattern matched?
*
* @return m/z for the complete consensus
*/
double findNewMZ_(double mz, int charge, const std::vector<MultiplexDeltaMasses::DeltaMass>& pattern, const std::vector<bool>& delta_mass_matched)
{
// loop over delta masses in theoretical pattern
std::vector<MultiplexDeltaMasses::DeltaMass>::const_iterator it_mass_shift;
std::vector<bool>::const_iterator it_delta_mass_matched;
for (it_mass_shift = pattern.begin(), it_delta_mass_matched = delta_mass_matched.begin();
it_mass_shift != pattern.end() && it_delta_mass_matched != delta_mass_matched.end();
++it_mass_shift, ++it_delta_mass_matched)
{
// find the first match
if (*it_delta_mass_matched)
{
return (mz * charge - it_mass_shift->delta_mass) / charge;
}
}
// Should never happen.
return mz;
}
/**
* @brief check if this position is blacklisted
*
* @param[in] RT
* @param[in] mz
* @param[in] charge
*/
bool isBlacklisted(double rt, double mz, size_t charge)
{
double mz_tolerance = mz_tolerance_ * mz / 1000000; // m/z tolerance in Da
MSExperiment::ConstIterator it_rt_begin = exp_blacklist_.RTBegin(rt - rt_tolerance_);
MSExperiment::ConstIterator it_rt_end = exp_blacklist_.RTEnd(rt + rt_tolerance_);
// loop over range of relevant spectra
for (MSExperiment::ConstIterator it_rt = it_rt_begin; it_rt < it_rt_end; ++it_rt)
{
// Loop over first three isotopes in dummy feature (and check if one of them is blacklisted).
for (size_t isotope = 0; isotope < 3; ++isotope)
{
double mz_isotope = mz + isotope * Constants::C13C12_MASSDIFF_U / charge;
MSSpectrum::ConstIterator it_mz = it_rt->MZBegin(mz_isotope);
if ((std::abs(it_mz->getMZ() - mz_isotope)) < mz_tolerance)
{
// There is a blacklisted peak close-by.
return true;
}
}
}
// None of the first three isotopes has a blacklisted peak near-by.
return false;
}
/**
* @brief complete consensus
*
* @param[in] consensus (possibly) incomplete consensus
* @param[in] pattern matching theoretical delta mass pattern
* @param[in] delta_mass_matched Was this delta mass in the theoretical pattern matched?
* @param[in] index_label_set index within the pattern at which the label sets were matched
*
* @return completed consensus
*/
ConsensusFeature completeConsensus_(const ConsensusFeature& consensus, const std::vector<MultiplexDeltaMasses::DeltaMass>& pattern, const std::vector<bool>& delta_mass_matched, int index_label_set)
{
// Nothing to do. Detected consensus is already complete.
if (consensus.size() == pattern.size())
{
return ConsensusFeature(consensus);
}
if (pattern.size() != delta_mass_matched.size())
{
throw Exception::InvalidSize(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, delta_mass_matched.size(), "pattern size does not match delta_mass_matched size");
}
// new complete consensus feature
ConsensusFeature consensus_complete;
int charge = consensus.getCharge();
double RT = consensus.getRT();
double mz = consensus.getMZ();
// find m/z of the new complete consensus
double mz_complete = findNewMZ_(mz, charge, pattern, delta_mass_matched);
consensus_complete.setMZ(mz_complete);
consensus_complete.setRT(consensus.getRT());
consensus_complete.setCharge(consensus.getCharge());
consensus_complete.setIntensity(consensus.getIntensity()); // Alternatively, reduce intensity due to new zero-intensity dummy features.
consensus_complete.setQuality(consensus.getQuality());
consensus_complete.setPeptideIdentifications(consensus.getPeptideIdentifications());
consensus_complete.getPeptideIdentifications()[0].getHits()[0].setMetaValue("map_index", index_label_set);
// loop over delta masses in theoretical pattern
std::vector<MultiplexDeltaMasses::DeltaMass>::const_iterator it_mass_shift;
std::vector<bool>::const_iterator it_delta_mass_matched;
ConsensusFeature::HandleSetType::const_iterator it_feature;
for (it_mass_shift = pattern.begin(), it_delta_mass_matched = delta_mass_matched.begin(), it_feature = consensus.getFeatures().begin();
it_mass_shift != pattern.end() && it_delta_mass_matched != delta_mass_matched.end();
++it_mass_shift, ++it_delta_mass_matched)
{
//OPENMS_LOG_DEBUG << " index = " << (it_mass_shift - pattern.begin()) << " shift = " << it_mass_shift->delta_mass;
if (*it_delta_mass_matched)
{
// copy feature from incomplete consensus
FeatureHandle feature_handle(*it_feature);
feature_handle.setMapIndex(it_mass_shift - pattern.begin());
consensus_complete.insert(feature_handle);
if (it_feature != consensus.getFeatures().end())
{
++it_feature;
}
}
else
{
// construct dummy feature
FeatureHandle feature_handle;
feature_handle.setMZ(mz_complete + it_mass_shift->delta_mass / charge);
feature_handle.setRT(RT);
if (isBlacklisted(RT, mz_complete + it_mass_shift->delta_mass / charge, charge))
{
// Some peaks close-by were blacklisted during feature detection i.e. another peptide feature overlaps with the dummy feature.
// Consequently, we better report NaN i.e. not quantifiable.
feature_handle.setIntensity(std::numeric_limits<double>::quiet_NaN());
}
else
{
// There is no blacklisted peak near-by i.e. there is no peptide feature in the vicinity.
// Consequently, we can confidently report zero i.e. the peptide is absent.
feature_handle.setIntensity(0.0);
}
feature_handle.setCharge(charge);
feature_handle.setMapIndex(it_mass_shift - pattern.begin());
consensus_complete.insert(feature_handle);
// debug output
//std::cout << "dummy feature @ RT = " << RT << " m/z = " << (mz_complete + it_mass_shift->delta_mass / charge) << " blacklisted = " << isBlacklisted(RT, mz_complete + it_mass_shift->delta_mass / charge, charge) << "\n";
}
}
return consensus_complete;
}
/**
* @brief construct the new consensus map
* (1) remove quant/ID conflicts
* (2) fill in dummy features in order to complete multiplets
*
* @param[in] map_in input consensus map
* @param[in] map_out consensus map without conflicts and complete multiplets
* @param[in] map_conflicts consensus map with conflicts
* @param[in] generator generator for the list of theoretical patterns
*/
void constructNewConsensusMap_(const ConsensusMap& map_in, ConsensusMap& map_out, ConsensusMap& map_conflicts, MultiplexDeltaMassesGenerator generator)
{
// unsigned found_pattern_count = 0;
std::vector<MultiplexDeltaMasses> theoretical_masses = generator.getDeltaMassesList();
size_t multiplicity = theoretical_masses[0].getDeltaMasses().size();
for (ConsensusMap::ConstIterator cit = map_in.begin(); cit != map_in.end(); ++cit)
{
//OPENMS_LOG_DEBUG << "consensus = " << (cit - map_in.begin());
//OPENMS_LOG_DEBUG << " RT = " << cit->getRT();
//OPENMS_LOG_DEBUG << " mz = " << cit->getMZ();
// Consensus features without sequence annotations are written unchanged to the conflict output.
if (cit->getPeptideIdentifications().empty())
{
//OPENMS_LOG_DEBUG << " (no ID)\n\n";
ConsensusFeature consensus(*cit);
map_conflicts.push_back(consensus);
continue;
}
// extract the label set from the attached peptide sequence (There should be only one, since IDConflictResolver was run first.)
AASequence sequence = cit->getPeptideIdentifications()[0].getHits()[0].getSequence();
MultiplexDeltaMasses::LabelSet label_set = generator.extractLabelSet(sequence);
std::vector<bool> delta_mass_matched(multiplicity, false);
int index_label_set = -1;
int index = findMatchingPattern_(cit, label_set, theoretical_masses, delta_mass_matched, index_label_set);
if (index >= 0)
{
//OPENMS_LOG_DEBUG << " (Ok)\n\n";
// ++found_pattern_count;
ConsensusFeature consensus = completeConsensus_(*cit, theoretical_masses[index].getDeltaMasses(), delta_mass_matched, index_label_set);
map_out.push_back(consensus);
}
else
{
//OPENMS_LOG_DEBUG << " (Conflict)\n\n";
ConsensusFeature consensus(*cit);
map_conflicts.push_back(consensus);
}
}
// update map sizes
for (unsigned map_index = 0; map_index < multiplicity; ++map_index)
{
map_out.getColumnHeaders()[map_index].size = map_out.size();
}
map_out.applyMemberFunction(&UniqueIdInterface::setUniqueId);
map_conflicts.applyMemberFunction(&UniqueIdInterface::setUniqueId);
/*OPENMS_LOG_DEBUG << "\n";
OPENMS_LOG_DEBUG << "number of consensuses = " << map_in.size() << "\n";
OPENMS_LOG_DEBUG << "number of consensuses without conflicts = " << found_pattern_count << "\n";
OPENMS_LOG_DEBUG << "\n";*/
}
public:
TOPPMultiplexResolver() :
TOPPBase("MultiplexResolver", "Completes peptide multiplets and resolves conflicts within them."),
missed_cleavages_(0), mass_tolerance_(0.1)
{
}
ExitCodes main_(int, const char**) override
{
/**
* handle parameters
*/
getParameters_in_out_();
getParameters_labels_();
getParameters_algorithm_();
/**
* load consensus map
*/
ConsensusMap map_in;
FileHandler().loadConsensusFeatures(in_, map_in, {FileTypes::CONSENSUSXML}, log_type_);
/**
* load (optional) blacklist
*/
if (!(in_blacklist_.empty()))
{
FileHandler().loadExperiment(in_blacklist_, exp_blacklist_, {FileTypes::MZML}, log_type_);
}
/**
* generate patterns
*/
MultiplexDeltaMassesGenerator generator = MultiplexDeltaMassesGenerator(labels_, missed_cleavages_, label_mass_shift_);
#ifdef DEBUG
generator.printSamplesLabelsList(std::cout);
generator.printDeltaMassesList(std::cout);
#endif
/**
* construct the new consensus map
*/
ConsensusMap map_out = map_in;
ConsensusMap map_conflicts = map_in;
map_out.resize(0);
map_conflicts.resize(0);
constructNewConsensusMap_(map_in, map_out, map_conflicts, generator);
/**
* store consensus maps
*/
FileHandler().storeConsensusFeatures(out_, map_out, {FileTypes::CONSENSUSXML}, log_type_);
if (!out_conflicts_.empty())
{
FileHandler().storeConsensusFeatures(out_conflicts_, map_conflicts, {FileTypes::CONSENSUSXML}, log_type_);
}
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPMultiplexResolver tool;
return tool.main(argc, argv);
}
///@endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/OpenSwathDecoyGenerator.cpp | .cpp | 17,903 | 391 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: George Rosenberger $
// $Authors: George Rosenberger, Hannes Roest, Witold Wolski $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/MRMDecoy.h>
#include <OpenMS/ANALYSIS/OPENSWATH/TransitionTSVFile.h>
#include <OpenMS/ANALYSIS/OPENSWATH/TransitionPQPFile.h>
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/CONCEPT/ProgressLogger.h>
#include <OpenMS/CONCEPT/Exception.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/DATASTRUCTURES/ListUtils.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/FORMAT/FileTypes.h>
using namespace OpenMS;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_OpenSwathDecoyGenerator OpenSwathDecoyGenerator
@brief Generates decoys according to different models for a specific TraML
<CENTER>
<table>
<tr>
<th ALIGN = "center"> potential predecessor tools </td>
<td VALIGN="middle" ROWSPAN=3> → OpenSwathDecoyGenerator →</td>
<th ALIGN = "center"> potential successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=2> @ref TOPP_OpenSwathAssayGenerator </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_OpenSwathAnalyzer </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_OpenSwathWorkflow </td>
</tr>
</table>
</CENTER>
This module generates "decoy" transitions from a set of real or "target"
transitions. The idea is to use the decoy transitions in a statistical scoring
process to estimate the false hits in an SRM / SWATH experiment. The tool
operates on @ref OpenMS::TraMLFile "TraML" files, which can come from @ref
TOPP_TargetedFileConverter or any other tool.
There are multiple methods to create the decoy transitions, the simplest ones
are reverse and pseudo-reverse which reverse the sequence either completely or
leaving the last (tryptic) AA untouched respectively.
Another decoy generation method is "shuffle" which uses an algorithm similar
to the one described in Lam, Henry, et al. (2010). "Artificial decoy spectral
libraries for false discovery rate estimation in spectral library searching in
proteomics". Journal of Proteome Research 9, 605-610. It shuffles the amino
acid sequence (excluding N-/C-terminal and K/R/P) and shuffles the fragment
ion intensities accordingly. If the new sequence does not reach a threshold of
identity within a set number of trials, a random amino acid (not N-/C-terminal
or modified) is "mutated" to a random other amino acid.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_OpenSwathDecoyGenerator.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_OpenSwathDecoyGenerator.html
*/
// TODO: could theoretical also produce an annotation in the TraML of what it thinks the ion is?
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPOpenSwathDecoyGenerator
: public TOPPBase
{
public:
TOPPOpenSwathDecoyGenerator() :
TOPPBase("OpenSwathDecoyGenerator", "Generates decoys according to different models for a specific TraML", true)
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "Input file");
registerStringOption_("in_type", "<type>", "", "Input file type -- default: determined from file extension or content\n", false);
String formats("tsv,mrm,pqp,TraML");
setValidFormats_("in", ListUtils::create<String>(formats));
setValidStrings_("in_type", ListUtils::create<String>(formats));
formats = "tsv,pqp,TraML";
registerOutputFile_("out", "<file>", "", "Output file");
setValidFormats_("out", ListUtils::create<String>(formats));
registerStringOption_("out_type", "<type>", "", "Output file type -- default: determined from file extension or content\n", false);
setValidStrings_("out_type", ListUtils::create<String>(formats));
registerStringOption_("method", "<type>", "shuffle", "Decoy generation method", false);
setValidStrings_("method", ListUtils::create<String>(String("shuffle,pseudo-reverse,reverse,shift")));
registerStringOption_("decoy_tag", "<type>", "DECOY_", "decoy tag", false);
registerDoubleOption_("min_decoy_fraction", "<double>", 0.8, "Minimum fraction of decoy / target peptides and proteins", false, true);
registerDoubleOption_("aim_decoy_fraction", "<double>", 1.0, "Number of decoys the algorithm should generate (if unequal to 1, the algorithm will randomly select N peptides for decoy generation)", false, true);
registerIntOption_("shuffle_max_attempts", "<int>", 30, "shuffle: maximum attempts to lower the amino acid sequence identity between target and decoy for the shuffle algorithm", false, true);
registerDoubleOption_("shuffle_sequence_identity_threshold", "<double>", 0.5, "shuffle: target-decoy amino acid sequence identity threshold for the shuffle algorithm", false, true);
registerDoubleOption_("shift_precursor_mz_shift", "<double>", 0.0, "shift: precursor ion MZ shift in Thomson for shift decoy method", false, true);
registerDoubleOption_("shift_product_mz_shift", "<double>", 20, "shift: fragment ion MZ shift in Thomson for shift decoy method", false, true);
registerDoubleOption_("product_mz_threshold", "<double>", 0.025, "MZ threshold in Thomson for fragment ion annotation", false, true);
registerStringOption_("allowed_fragment_types", "<type>", "b,y", "allowed fragment types", false, true);
registerStringOption_("allowed_fragment_charges", "<type>", "1,2,3,4", "allowed fragment charge states", false, true);
registerFlag_("enable_detection_specific_losses", "set this flag if specific neutral losses for detection fragment ions should be allowed", true);
registerFlag_("enable_detection_unspecific_losses", "set this flag if unspecific neutral losses (H2O1, H3N1, C1H2N2, C1H2N1O1) for detection fragment ions should be allowed", true);
registerStringOption_("switchKR", "<true/false>", "true", "Whether to switch terminal K and R (to achieve different precursor mass)", false);
setValidStrings_("switchKR", ListUtils::create<String>(String("true,false")));
registerFlag_("separate", "set this flag if decoys should not be appended to targets.", true);
}
ExitCodes main_(int, const char **) override
{
FileHandler fh;
//input file type
String in = getStringOption_("in");
FileTypes::Type in_type = FileTypes::nameToType(getStringOption_("in_type"));
if (in_type == FileTypes::UNKNOWN)
{
in_type = fh.getType(in);
writeDebug_(String("Input file type: ") + FileTypes::typeToName(in_type), 2);
}
if (in_type == FileTypes::UNKNOWN)
{
writeLogError_("Error: Could not determine input file type!");
return PARSE_ERROR;
}
//output file names and types
String out = getStringOption_("out");
FileTypes::Type out_type = FileTypes::nameToType(getStringOption_("out_type"));
if (out_type == FileTypes::UNKNOWN)
{
out_type = fh.getTypeByFileName(out);
}
if (out_type == FileTypes::UNKNOWN)
{
writeLogError_("Error: Could not determine output file type!");
return PARSE_ERROR;
}
String method = getStringOption_("method");
String decoy_tag = getStringOption_("decoy_tag");
double min_decoy_fraction = getDoubleOption_("min_decoy_fraction");
double aim_decoy_fraction = getDoubleOption_("aim_decoy_fraction");
Int max_attempts = getIntOption_("shuffle_max_attempts");
double identity_threshold = getDoubleOption_("shuffle_sequence_identity_threshold");
double precursor_mz_shift = getDoubleOption_("shift_precursor_mz_shift");
double product_mz_shift = getDoubleOption_("shift_product_mz_shift");
double product_mz_threshold = getDoubleOption_("product_mz_threshold");
String allowed_fragment_types_string = getStringOption_("allowed_fragment_types");
String allowed_fragment_charges_string = getStringOption_("allowed_fragment_charges");
bool enable_detection_specific_losses = getFlag_("enable_detection_specific_losses");
bool enable_detection_unspecific_losses = getFlag_("enable_detection_unspecific_losses");
bool switchKR = getStringOption_("switchKR") == "true";
bool separate = getFlag_("separate");
std::vector<String> allowed_fragment_types;
allowed_fragment_types_string.split(",", allowed_fragment_types);
std::vector<String> allowed_fragment_charges_string_vector;
std::vector<size_t> allowed_fragment_charges;
allowed_fragment_charges_string.split(",", allowed_fragment_charges_string_vector);
for (size_t i = 0; i < allowed_fragment_charges_string_vector.size(); i++)
{
size_t charge = std::atoi(allowed_fragment_charges_string_vector.at(i).c_str());
allowed_fragment_charges.push_back(charge);
}
// Use memory-efficient Light path for TSV/PQP → TSV/PQP conversions
bool use_light_path = (in_type == FileTypes::TSV || in_type == FileTypes::MRM || in_type == FileTypes::PQP)
&& (out_type == FileTypes::TSV || out_type == FileTypes::PQP);
if (use_light_path)
{
// Memory-efficient Light path
OpenSwath::LightTargetedExperiment light_exp;
OpenSwath::LightTargetedExperiment light_decoy;
OpenSwath::LightTargetedExperiment light_merged;
OPENMS_LOG_INFO << "Loading targets from file (Light path): " << in << std::endl;
if (in_type == FileTypes::TSV || in_type == FileTypes::MRM)
{
Param reader_parameters = getParam_().copy("algorithm:", true);
TransitionTSVFile tsv_reader;
tsv_reader.setLogType(log_type_);
tsv_reader.setParameters(reader_parameters);
tsv_reader.convertTSVToTargetedExperiment(in.c_str(), in_type, light_exp);
}
else if (in_type == FileTypes::PQP)
{
TransitionPQPFile pqp_reader;
Param reader_parameters = getParam_().copy("algorithm:", true);
pqp_reader.setLogType(log_type_);
pqp_reader.setParameters(reader_parameters);
pqp_reader.convertPQPToTargetedExperiment(in.c_str(), light_exp);
}
MRMDecoy decoys;
decoys.setLogType(ProgressLogger::CMD);
OPENMS_LOG_INFO << "Generate decoys (Light)" << std::endl;
decoys.generateDecoysLight(light_exp, light_decoy, method,
aim_decoy_fraction, switchKR, decoy_tag, max_attempts,
identity_threshold, precursor_mz_shift,
product_mz_shift, product_mz_threshold,
allowed_fragment_types, allowed_fragment_charges,
enable_detection_specific_losses,
enable_detection_unspecific_losses);
// Check if we have enough peptides left
OPENMS_LOG_INFO << "Number of target compounds: " << light_exp.compounds.size() << std::endl;
OPENMS_LOG_INFO << "Number of decoy compounds: " << light_decoy.compounds.size() << std::endl;
OPENMS_LOG_INFO << "Number of target proteins: " << light_exp.proteins.size() << std::endl;
OPENMS_LOG_INFO << "Number of decoy proteins: " << light_decoy.proteins.size() << std::endl;
if (light_exp.compounds.empty() || light_exp.proteins.empty())
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"The input experiment has no compounds or proteins.");
}
if ((float)light_decoy.compounds.size() / (float)light_exp.compounds.size() < min_decoy_fraction ||
(float)light_decoy.proteins.size() / (float)light_exp.proteins.size() < min_decoy_fraction)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"The number of decoys for peptides or proteins is below the threshold of " + String(min_decoy_fraction * 100) + "% of the number of targets.");
}
if (separate)
{
OPENMS_LOG_INFO << "Writing only decoys to file: " << out << std::endl;
light_merged = std::move(light_decoy);
}
else
{
OPENMS_LOG_INFO << "Writing targets and decoys to file: " << out << std::endl;
light_merged = std::move(light_exp);
// Append decoys
light_merged.transitions.insert(light_merged.transitions.end(),
light_decoy.transitions.begin(), light_decoy.transitions.end());
light_merged.compounds.insert(light_merged.compounds.end(),
light_decoy.compounds.begin(), light_decoy.compounds.end());
light_merged.proteins.insert(light_merged.proteins.end(),
light_decoy.proteins.begin(), light_decoy.proteins.end());
}
if (out_type == FileTypes::TSV)
{
TransitionTSVFile tsv_writer;
tsv_writer.setLogType(log_type_);
tsv_writer.convertLightTargetedExperimentToTSV(out.c_str(), light_merged);
}
else if (out_type == FileTypes::PQP)
{
TransitionPQPFile pqp_writer;
pqp_writer.setLogType(log_type_);
pqp_writer.convertLightTargetedExperimentToPQP(out.c_str(), light_merged);
}
}
else
{
// Heavy path for TraML
TargetedExperiment targeted_merged;
{
TargetedExperiment targeted_exp;
TargetedExperiment targeted_decoy;
OPENMS_LOG_INFO << "Loading targets from file: " << in << std::endl;
if (in_type == FileTypes::TSV || in_type == FileTypes::MRM)
{
const char* tr_file = in.c_str();
Param reader_parameters = getParam_().copy("algorithm:", true);
TransitionTSVFile tsv_reader = TransitionTSVFile();
tsv_reader.setLogType(log_type_);
tsv_reader.setParameters(reader_parameters);
tsv_reader.convertTSVToTargetedExperiment(tr_file, in_type, targeted_exp);
tsv_reader.validateTargetedExperiment(targeted_exp);
}
else if (in_type == FileTypes::PQP)
{
const char* tr_file = in.c_str();
TransitionPQPFile pqp_reader = TransitionPQPFile();
Param reader_parameters = getParam_().copy("algorithm:", true);
pqp_reader.setLogType(log_type_);
pqp_reader.setParameters(reader_parameters);
pqp_reader.convertPQPToTargetedExperiment(tr_file, targeted_exp);
pqp_reader.validateTargetedExperiment(targeted_exp);
}
else if (in_type == FileTypes::TRAML)
{
FileHandler().loadTransitions(in, targeted_exp, {FileTypes::TRAML});
}
MRMDecoy decoys = MRMDecoy();
decoys.setLogType(ProgressLogger::CMD);
OPENMS_LOG_INFO << "Generate decoys" << std::endl;
decoys.generateDecoys(targeted_exp, targeted_decoy, method,
aim_decoy_fraction, switchKR, decoy_tag, max_attempts,
identity_threshold, precursor_mz_shift,
product_mz_shift, product_mz_threshold,
allowed_fragment_types, allowed_fragment_charges,
enable_detection_specific_losses,
enable_detection_unspecific_losses);
// Check if we have enough peptides left
OPENMS_LOG_INFO << "Number of target peptides: " << targeted_exp.getPeptides().size() << std::endl;
OPENMS_LOG_INFO << "Number of decoy peptides: " << targeted_decoy.getPeptides().size() << std::endl;
OPENMS_LOG_INFO << "Number of target proteins: " << targeted_exp.getProteins().size() << std::endl;
OPENMS_LOG_INFO << "Number of decoy proteins: " << targeted_decoy.getProteins().size() << std::endl;
if ((float)targeted_decoy.getPeptides().size() / (float)targeted_exp.getPeptides().size() < min_decoy_fraction ||
(float)targeted_decoy.getProteins().size() / (float)targeted_exp.getProteins().size() < min_decoy_fraction)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"The number of decoys for peptides or proteins is below the threshold of " + String(min_decoy_fraction * 100) + "% of the number of targets.");
}
if (separate)
{
OPENMS_LOG_INFO << "Writing only decoys to file: " << out << std::endl;
targeted_merged = std::move(targeted_decoy);
}
else
{
OPENMS_LOG_INFO << "Writing targets and decoys to file: " << out << std::endl;
targeted_merged = std::move(targeted_exp);
targeted_merged += std::move(targeted_decoy);
}
}
if (out_type == FileTypes::TSV)
{
const char* tr_file = out.c_str();
TransitionTSVFile tsv_reader = TransitionTSVFile();
tsv_reader.setLogType(log_type_);
tsv_reader.convertTargetedExperimentToTSV(tr_file, targeted_merged);
}
else if (out_type == FileTypes::PQP)
{
const char * tr_file = out.c_str();
TransitionPQPFile pqp_reader = TransitionPQPFile();
pqp_reader.setLogType(log_type_);
pqp_reader.convertTargetedExperimentToPQP(tr_file, targeted_merged);
}
else if (out_type == FileTypes::TRAML)
{
FileHandler().storeTransitions(out, targeted_merged, {FileTypes::TRAML});
}
}
return EXECUTION_OK;
}
};
int main(int argc, const char **argv)
{
TOPPOpenSwathDecoyGenerator gen;
return gen.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/PeptideIndexer.cpp | .cpp | 8,536 | 205 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Andreas Bertsch, Chris Bielow, Knut Reinert $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/ANALYSIS/ID/PeptideIndexing.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/METADATA/PeptideIdentification.h>
#include <OpenMS/METADATA/ProteinIdentification.h>
#include <OpenMS/SYSTEM/File.h>
using namespace OpenMS;
/**
@page TOPP_PeptideIndexer PeptideIndexer
@brief Refreshes the protein references for all peptide hits from an idXML file and adds target/decoy information.
<CENTER>
<table>
<tr>
<th ALIGN = "center"> pot. predecessor tools </td>
<td VALIGN="middle" ROWSPAN=2> → PeptideIndexer →</td>
<th ALIGN = "center"> pot. successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_IDFilter or @n any protein/peptide processing tool </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_FalseDiscoveryRate </td>
</tr>
</table>
</CENTER>
PeptideIndexer refreshes target/decoy information and mapping of peptides to proteins.
The target/decoy information is crucial for the @ref TOPP_FalseDiscoveryRate tool. (For FDR calculations, peptides hitting both target and decoy proteins are counted as target hits.)
PeptideIndexer allows for ambiguous amino acids (B|J|Z|X) in the protein database and peptide sequence.
Enzyme cutting rules and partial specificity are derived from input idXML automatically by default or can be specified explicitly by the user.
All peptide and protein hits are annotated with target/decoy information, using the meta value 'target_decoy'.
For proteins the possible values are "target" and "decoy", depending on whether the protein accession contains the decoy pattern (parameter @p decoy_string)
as a suffix or prefix, respectively (see parameter @p prefix).
Resulting protein hits appear in the order of the FASTA file, except for orphaned proteins, which will appear first with an empty 'target_decoy' metavalue.
Duplicate protein accessions & sequences will not raise a warning, but create multiple hits (PeptideIndexer reads the FASTA file piecewise for efficiency
reasons, and thus might not see all accessions & sequences at once).
Peptide hits are annotated with metavalue 'protein_references', and if matched to at least one protein also with metavalue 'target_decoy'.
The possible values for 'target_decoy' in peptides are "target", "decoy" and "target+decoy",
depending on whether the peptide sequence is found only in target proteins, only in decoy proteins, or in both. If the peptide is unmatched the metavalue is missing.
Runtime: PeptideIndexer is usually very fast (loading and storing the data takes the most time) and search speed can be further improved (linearly) by using more threads.
Avoid allowing too many (>=4) ambiguous amino acids if your database contains long stretches of 'X' (exponential search space).
PeptideIndexer supports relative database filenames, which (when not found in the current working directory) are looked up in the directories specified
by @p OpenMS.ini:id_db_dir. The database is by default derived from the input idXML's metainformation ('auto' setting), but can be specified explicitly.
@note Currently mzIdentML (mzid) is not directly supported as an input/output format of this tool. Convert mzid files to/from idXML using @ref TOPP_IDFileConverter if necessary.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_PeptideIndexer.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_PeptideIndexer.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPPeptideIndexer :
public TOPPBase
{
public:
TOPPPeptideIndexer() :
TOPPBase("PeptideIndexer",
"Refreshes the protein references for all peptide hits.")
{
}
protected:
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "Input idXML file containing the identifications.");
setValidFormats_("in", ListUtils::create<String>("idXML"));
registerInputFile_("fasta", "<file>", "", "Input sequence database in FASTA format. "
"Leave empty for using the same DB as used for the input idXML (this might fail). "
"Non-existing relative filenames are looked up via 'OpenMS.ini:id_db_dir'", false, false, { "skipexists" });
setValidFormats_("fasta", { "fasta" }, false);
registerOutputFile_("out", "<file>", "", "Output idXML file.");
setValidFormats_("out", {"idXML"});
registerFullParam_(PeptideIndexing().getParameters());
}
ExitCodes main_(int, const char**) override
{
//-------------------------------------------------------------
// parsing parameters
//-------------------------------------------------------------
String in = getStringOption_("in");
String out = getStringOption_("out");
String db_name = getStringOption_("fasta"); // optional. Might be empty.
//-------------------------------------------------------------
// reading input
//-------------------------------------------------------------
// we stream the Fasta file
std::vector<ProteinIdentification> prot_ids;
PeptideIdentificationList pep_ids;
FileHandler().loadIdentifications(in, prot_ids, pep_ids, {FileTypes::IDXML});
if (db_name.empty())
{ // determine from metadata in idXML
OPENMS_LOG_INFO << "Automatically deriving DB from meta data ...";
for (const auto& pi : prot_ids)
{
if (!db_name.empty() && db_name != pi.getSearchParameters().db)
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Different conflicting database names in idXML files are from multiple runs. Cannot automatically determine DB from these candidates:\n"
"1) " + db_name + "\n"
"2) " + pi.getSearchParameters().db);
}
db_name = pi.getSearchParameters().db;
}
OPENMS_LOG_INFO << "DB: " << db_name << std::endl;
}
if (!File::readable(db_name))
{
String full_db_name;
try
{
full_db_name = File::findDatabase(db_name);
}
catch (...)
{
printUsage_();
return ILLEGAL_PARAMETERS;
}
db_name = full_db_name;
}
//-------------------------------------------------------------
// calculations
//-------------------------------------------------------------
PeptideIndexing indexer;
Param param = getParam_();
Param param_pi = indexer.getParameters();
param_pi.update(param, false, false, false, false, getGlobalLogDebug()); // suppress param. update message
indexer.setParameters(param_pi);
indexer.setLogType(this->log_type_);
FASTAContainer<TFI_File> proteins(db_name);
PeptideIndexing::ExitCodes indexer_exit = indexer.run(proteins, prot_ids, pep_ids);
//-------------------------------------------------------------
// calculate protein coverage
//-------------------------------------------------------------
if (param.getValue("write_protein_sequence").toBool())
{
for (Size i = 0; i < prot_ids.size(); ++i)
{
prot_ids[i].computeCoverage(pep_ids);
}
}
//-------------------------------------------------------------
// writing output
//-------------------------------------------------------------
FileHandler().storeIdentifications(out, prot_ids, pep_ids, {FileTypes::IDXML});
if (indexer_exit == PeptideIndexing::ExitCodes::DATABASE_EMPTY)
{
return INPUT_FILE_EMPTY;
}
else if (indexer_exit == PeptideIndexing::ExitCodes::UNEXPECTED_RESULT)
{
return UNEXPECTED_RESULT;
}
else if ((indexer_exit != PeptideIndexing::ExitCodes::EXECUTION_OK) &&
(indexer_exit != PeptideIndexing::ExitCodes::PEPTIDE_IDS_EMPTY))
{
return UNKNOWN_ERROR;
}
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPPeptideIndexer tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/GenericWrapper.cpp | .cpp | 22,655 | 564 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/APPLICATIONS/ToolHandler.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/CONCEPT/UniqueIdGenerator.h>
#include <OpenMS/DATASTRUCTURES/Param.h>
#include <OpenMS/DATASTRUCTURES/String.h>
#include <OpenMS/DATASTRUCTURES/ToolDescription.h>
#include <OpenMS/SYSTEM/File.h>
#include <QtCore/QProcess>
#include <QFileInfo>
#include <QDir>
#include <QRegularExpression>
#include <typeinfo>
using namespace OpenMS;
using namespace std;
/**
@page TOPP_GenericWrapper GenericWrapper
@brief Allows generically the wrapping of external tools.
<CENTER>
<table>
<tr>
<th ALIGN = "center"> pot. predecessor tools </td>
<td VALIGN="middle" ROWSPAN=2> → GenericWrapper →</td>
<th ALIGN = "center"> pot. successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> any file the external tool can read </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> any tool reading the output format </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_IDFileConverter (to produce pepXML) </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> → GenericWrapper (type 'ProteinProphet') →</td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> @ref TOPP_IDFileConverter (protXML to idXML) </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> RAW file </td>
<td VALIGN="middle" ROWSPAN=1> → GenericWrapper (type 'RAWFileConvert') →</td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> any tool accepting mzML </td>
</tr>
</table>
</CENTER>
This tool is a wrapper to call external (non-OpenMS) executables/scripts.
Each supported tool is represented by a certain <tt>type</tt>.
Each type exposes certain parameters which you can set (usually at least a <tt>in</tt> and <tt>out</tt>).
To obtain support for more external programs, visit the OpenMS website or (if you cannot find your tool there) ask on the OpenMS mailing list.
<b>The following section is for experts only, who want to add their own external tool:</b>
Each external tool is configured via a wrapper XML file in 'OpenMS/share/OpenMS/TOOLS/EXTERNAL'. All files have the ending .ttd (TOPP tool description).
You can add one or more wrappers (i.e. types) per file, but we recommend one. The filename does not really matter, but it should be descriptive.
The ttd file has the following structure:
<table>
<tr><th>type</th><td>
The name of the type which is added to list of valid GenericWrapper types. It should be unique, otherwise you get a fatal error.
</td></tr>
<tr><th>category</th><td>
Category for TOPPAS.
</td></tr>
<tr><th>cloptions</th><td>
Command line options (arguments) appended to the executable.
This string might contain placeholders of the form "%<i>"
where each placeholder will be substituted with a value that is determined in the
mappings section (see below).
Example:
@code
<cloptions>-o "%1" --mzML "%2"</cloptions>
@endcode
</td></tr>
<tr><th>path</th><td>
Path (can be relative) to the executable that is executed.
</td></tr>
<tr><th>mappings</th><td>
Used to replace placeholders with input parameters.
The mapping id corresponds to the placeholder in <tt>cloptions</tt>.
The template used as starting string is given in <tt>cl</tt>.
All tokens therein will be replaced and the result will be patched into the <tt>cloptions</tt> string.
Allowed tokens are:
<ul>
<li>\%TMP --> The current temp directory, fetched using File::getTempDirectory()
<li>\%DIR --> directory prefix, e.g.:, c:/tmp/mzfile.mzML gives 'c:/tmp'
<li>\%BASENAME[file] --> the basename of a file, e.g. c:/tmp/myfile.mzML gives 'myfile'
<li>\%RND --> generates a long random number, which can be used to generate unique directory or file names in a <file_pre> tag
<li>\%WORKINGDIR --> expands to the current working directory (default is '.'), settable by <workingdirectory> tag in the .ttd file.
<li>\%\%<param> --> any param registered in the ini_param section, e.g. '\%\%in'
</ul>
Example:
@code
<mapping id="2" cl="-output_file %BASENAME[%%in].mgf -temp_dir %TMP -depth 3" />
@endcode
</td></tr>
<tr><th>ini_param</th><td>
Contains part of a normal INI file with describes the parameters. Valid tags are those that are in the ParamXML scheme below 'NODE', e.g. 'ITEM'.
Example:
@code
<ITEM name="out" value="" type="string" description="output XML file containg regression line and confidence interval" tags="output file" />
<ITEM name="mz_tolerance" value="1" type="float" description="Tolerance in m/z dimension" />
@endcode
</td></tr>
</table>
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_GenericWrapper.cli
<B>INI file documentation of this tool:</B>
*/
// no @htmlinclude TOPP_GenericWrapper.html since it needs a type to create an .INI (which would be only valid for this type)
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPGenericWrapper :
public TOPPBase
{
public:
TOPPGenericWrapper() :
TOPPBase("GenericWrapper", "Allows the generic wrapping of external tools.")
{
}
protected:
/**
@brief format filenames and quote stringlists
*/
String paramToString_(const Param::ParamEntry & p)
{
if (p.value.valueType() == ParamValue::STRING_LIST) // quote each element
{
StringList val = ListUtils::toStringList<std::string>(p.value);
if (p.tags.count("input file") || p.tags.count("output file"))
{
for (Size i = 0; i < val.size(); ++i)
{
val[i] = QDir::toNativeSeparators(val[i].toQString());
}
}
return "\"" + ListUtils::concatenate(val, "\" \"") + "\"";
}
if (p.tags.count("input file") || p.tags.count("output file"))
{
// ensure that file names are formated according to system spec
return QDir::toNativeSeparators(String(p.value.toString()).toQString());
}
else
{
return p.value.toString();
}
}
/**
@brief Simple compare struct to sort a vector of String by the length of
the contained strings
*/
struct StringSizeLess
{
bool operator()(String const & left, String const & right) const
{
return left.size() < right.size();
}
};
void createFragment_(String & fragment, const Param & param, const std::map<int, std::string>& optional_mappings = (std::map<int, std::string>()))
{
//std::cerr << "FRAGMENT: " << fragment << "\n\n";
// e.g.: -input %BASENAME[%%in].mzML
// we have to make this little detour param -> vector<String>
// to sort the param names by length, otherwise we have a
// problem with parameter substitution
// i.e., if A is a prefix of B and gets replaced first, the
// suffix of B remains and will cause trouble, e.g.: "%%out" vs. "%%out_fm"
vector<String> param_names;
param_names.reserve(param.size());
for (Param::ParamIterator it = param.begin(); it != param.end(); ++it)
{
param_names.push_back(it->name);
}
// sort by length
std::sort(param_names.begin(), param_names.end(), [](auto &left, auto &right) {StringSizeLess cmp; return cmp(right, left);});
// iterate through all input params and replace with values:
SignedSize allowed_percent(0); // filenames might contain '%', which are allowed to remain there (and even must remain)
for (vector<String>::iterator it = param_names.begin(); it != param_names.end(); ++it)
{
if (!fragment.hasSubstring("%%" + *it)) continue;
String s_new = paramToString_(param.getEntry(*it));
allowed_percent += s_new.length() - String(s_new).substitute("%", "").length();
//std::cerr << "IN: " << s_new << "(" << allowed_percent << "\n";
fragment.substitute("%%" + *it, s_new);
}
if (fragment.hasSubstring("%%"))
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Invalid '%%' found in '" + fragment + "' after replacing all parameters!", fragment);
}
// mapping replace> e.g.: %2
// do it reverse, since %10 should precede %1
for (std::map<int, std::string>::const_reverse_iterator it = optional_mappings.rbegin(); it != optional_mappings.rend(); ++it)
{
String m = String("%") + it->first;
if (fragment.hasSubstring(m)) {
writeDebug_(String("Replacing '") + m + "' in '" + fragment + "' by '" + it->second + "'\n", 10);
fragment.substitute(m, it->second);
}
}
// %TMP replace:
fragment.substitute("%TMP", File::getTempDirectory());
// %RND replace:
fragment.substitute("%RND", String(UniqueIdGenerator::getUniqueId()));
// %WORKINGDIR replace:
fragment.substitute("%WORKINGDIR", tde_.working_directory);
// %DIR% replace
{
QRegularExpression rx(R"(%DIR\[(.*)\])");
rx.setPatternOptions(QRegularExpression::InvertedGreedinessOption);
QString t_tmp = fragment.toQString();
//std::cout << "fragment is:" << fragment << std::endl;
for (const QRegularExpressionMatch& match : rx.globalMatch(fragment.toQString()))
{
String value = match.captured(1); // param name (hopefully)
// replace in fragment:
QFileInfo qfi(value.toQString());
//std::cout << "match @ " << pos << " " << value << " --> " << qfi.canonicalPath() << "\n";
t_tmp.replace(String("%DIR[" + value + "]").toQString(), qfi.canonicalPath());
}
fragment = t_tmp;
//std::cout << "NEW fragment is:" << fragment << std::endl;
}
// %BASENAME% replace
{
QRegularExpression rx(R"(%BASENAME\[(.*)\])");
rx.setPatternOptions(QRegularExpression::InvertedGreedinessOption);
int count = 0;
QString t_tmp = fragment.toQString();
for (const QRegularExpressionMatch& match : rx.globalMatch(fragment.toQString()))
{
//std::cout << "match @ " << pos << "\n";
String value = match.captured(1); // param name (hopefully)
// replace in fragment:
QFileInfo qfi(value.toQString());
//std::cout << "match @ " << pos << " " << value << " --> " << qfi.completeBaseName() << "\n";
t_tmp.replace(String("%BASENAME[" + value + "]").toQString(), qfi.completeBaseName());
++count;
}
// update expected count of valid '%'
allowed_percent -= (fragment.length() - String(fragment).substitute("%", "").length()) // original # of %
- (t_tmp.length() - String(t_tmp).substitute("%", "").length()) // new # of %
- count; // expected # of % due to %BASENAME
fragment = String(t_tmp);
}
SignedSize diff = (fragment.length() - String(fragment).substitute("%", "").length()) - allowed_percent;
//std::cerr << "allowed: " << allowed_percent << "\n" << "diff: " << diff << " in: " << fragment << "\n";
if (diff > 0)
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Mapping still contains '%' after substitution! Did you use % instead of %%?", fragment);
}
else if (diff < 0)
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Error: '%' from a filename where accidentally considered command tags! "
"This is a bug! Remove '%' from input filesnames to fix, but please report this as well!", fragment);
}
//std::cout << fragment << "'\n";
}
Internal::ToolExternalDetails tde_;
ExitCodes wrapExit(const ExitCodes return_code) const
{
if (return_code != EXECUTION_OK)
{
OPENMS_LOG_ERROR << "\n" << tde_.text_fail << "\n";
}
return return_code;
}
void registerOptionsAndFlags_() override
{
registerSubsection_("ETool", "tool specific parameters");
registerStringOption_("type", "", "", "Which external tool configuration to load?! See '" + ToolHandler::getExternalToolsPath() + "'.", true, false);
setValidStrings_("type", ToolHandler::getTypes(toolName_()));
}
Param getSubsectionDefaults_(const String & /*section*/) const override
{
String type = getStringOption_("type"); // this will throw() if not set in param_
// find params for 'type'
Internal::ToolDescription gw = ToolHandler::getTOPPToolList(true)[toolName_()];
for (Size i = 0; i < gw.types.size(); ++i)
{
if (type == gw.types[i])
{
return gw.external_details[i].param;
}
}
// requested TDD is not found -- might be a custom TTD
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "The value of 'Type' is invalid! Are you missing a TTD?", type);
}
ExitCodes main_(int, const char **) override
{
// find the config for the tool:
String type = getStringOption_("type");
Param tool_param = this->getParam_();
// check required parameters (TOPPBase does not do this as we did not use registerInputFile_(...) etc)
Param p = tool_param.copy("ETool:", true);
for (Param::ParamIterator it = p.begin(); it != p.end(); ++it)
{
if ((it->tags).count("required") > 0)
{
String in = String(it->value.toString()).trim(); // will give '[]' for empty lists (hack, but DataValue class does not offer a convenient query)
if (in.empty() || in == "[]") // any required parameter should have a value
{
OPENMS_LOG_ERROR << "The INI-parameter 'ETool:" << it->name << "' is required, but was not given! Aborting ..." << std::endl;
return wrapExit(CANNOT_WRITE_OUTPUT_FILE);
}
else if ((it->tags).count("input file") > 0) // any required input file should exist
{
StringList ifs;
switch (it->value.valueType())
{
case ParamValue::STRING_VALUE:
ifs.push_back(it->value.toChar());
break;
case ParamValue::STRING_LIST:
ifs = ListUtils::toStringList<std::string>(it->value);
break;
default:
OPENMS_LOG_ERROR << "The INI-parameter 'ETool:" << it->name << "' is tagged as input file and thus must be a string! Aborting ...";
return wrapExit(ILLEGAL_PARAMETERS);
}
for (StringList::const_iterator itf = ifs.begin(); itf != ifs.end(); ++itf)
{
if (!File::exists(*itf))
{
OPENMS_LOG_ERROR << "Input file '" << *itf << "' does not exist! Aborting ...";
return wrapExit(INPUT_FILE_NOT_FOUND);
}
}
}
}
}
Internal::ToolDescription gw = ToolHandler::getTOPPToolList(true)[toolName_()];
for (Size i = 0; i < gw.types.size(); ++i)
{
if (type == gw.types[i])
{
tde_ = gw.external_details[i];
if (tde_.working_directory.trim().empty())
{
tde_.working_directory = ".";
}
break;
}
}
OPENMS_LOG_INFO << tde_.text_startup << "\n";
String command_args = tde_.commandline;
// check for double spaces and warn
if (command_args.hasSubstring(" "))
{
OPENMS_LOG_WARN << "Command line contains double spaces, which is not allowed. Condensing...\n";
while (command_args.hasSubstring(" "))
{
command_args.substitute(" ", " ");
}
OPENMS_LOG_WARN << "result: " << command_args << std::endl;
}
writeDebug_("CommandLine from ttd (unprocessed): " + command_args, 1);
// do "pre" moves (e.g. if the wrapped tool works on its data in-place (overwrites) it - we need to make a copy first
// - we copy the file
// - we set the value of the affected parameter to the copied tmp file, such that subsequent calls target the tmp file
for (Size i = 0; i < tde_.tr_table.pre_moves.size(); ++i)
{
const Internal::FileMapping& fm = tde_.tr_table.pre_moves[i];
// find target param:
Param p = tool_param.copy("ETool:", true);
String target = fm.target;
if (!p.exists(target))
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Cannot find target parameter '" + target + "' being mapped from external tools output!", target);
}
String tmp_location = fm.location;
// fragment's placeholder evaluation:
createFragment_(tmp_location, p);
// check if target already exists:
String target_file = p.getValue(target).toString();
if (File::exists(tmp_location))
{
if (!File::remove(tmp_location))
{
OPENMS_LOG_ERROR << "While writing a tmp file: Cannot remove conflicting file '" + tmp_location + "'. Check permissions! Aborting ...";
return wrapExit(CANNOT_WRITE_OUTPUT_FILE);
}
}
// create the temp file tmp_location target_file
writeDebug_(String("Copying '") + target_file + "' to '" + tmp_location + "'", 1);
bool move_ok = QFile::copy(target_file.toQString(), tmp_location.toQString());
if (!move_ok)
{
OPENMS_LOG_ERROR << "Copying the target file '" + tmp_location + "' from '" + target_file + "' failed! Aborting ...";
return wrapExit(CANNOT_WRITE_OUTPUT_FILE);
}
// set the input file's value to the temp file
tool_param.setValue(String("ETool:") + target, tmp_location);
}
///// construct the command line:
std::map<int, std::string> mappings; // remember the values for each mapping (for file_post substitution later on)
// go through mappings (reverse because replacing %10 must come before %1):
for (std::map<Int, String>::reverse_iterator it = tde_.tr_table.mapping.rbegin(); it != tde_.tr_table.mapping.rend(); ++it)
{
//std::cout << "mapping #" << it->first << "\n";
String fragment = it->second;
// fragment's placeholder evaluation:
createFragment_(fragment, tool_param.copy("ETool:", true));
// replace fragment in cl
//std::cout << "replace : " << "%"+String(it->first) << " with '" << fragment << "\n";
command_args.substitute("%" + String(it->first), fragment);
// cache mapping
mappings[it->first] = fragment;
}
QProcess builder;
builder.setProcessChannelMode(QProcess::MergedChannels);
String call = tde_.path + " " + command_args;
writeDebug_("call command: " + call, 1);
builder.setWorkingDirectory(tde_.working_directory.toQString());
// TODO: start() with single argument is deprecated in Qt 5.15. Can probably be replaced with
// QStringList commandArgs = QString::fromStdString(command_args).split(" ");
// QString program = commandArgs.takeFirst();
// builder.start(program, commandArgs);
builder.start(call.toQString());
if (!builder.waitForFinished(-1) || builder.exitStatus() != 0 || builder.exitCode() != 0)
{
OPENMS_LOG_ERROR << ("External tool returned with exit code (" + String(builder.exitCode()) + "), exit status (" + String(builder.exitStatus()) + ") or timed out. Aborting ...\n");
OPENMS_LOG_ERROR << ("External tool output:\n" + String(QString(builder.readAll())));
return wrapExit(EXTERNAL_PROGRAM_ERROR);
}
OPENMS_LOG_INFO << ("External tool output:\n" + String(QString(builder.readAll())));
// post processing (file moving via 'file_post' command)
for (Size i = 0; i < tde_.tr_table.post_moves.size(); ++i)
{
const Internal::FileMapping & fm = tde_.tr_table.post_moves[i];
// find target param:
Param p = tool_param.copy("ETool:", true);
String source_file = fm.location;
// fragment's placeholder evaluation:
createFragment_(source_file, p, mappings);
// check if target already exists:
String target = fm.target;
if (!p.exists(target))
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Cannot find target parameter '" + target + "' being mapped from external tools output!", target);
}
String target_file = p.getValue(target).toString();
if (target_file.trim().empty()) // if target was not given, we skip the copying step (usually for optional parameters)
{
OPENMS_LOG_INFO << "Parameter '" + target + "' not given. Skipping forwarding of files.\n";
continue;
}
// check if the target exists already (should not; if yes, delete it before overwriting it)
if (File::exists(target_file))
{
if (!File::remove(target_file))
{
OPENMS_LOG_ERROR << "Cannot remove conflicting file '" + target_file + "'. Check permissions! Aborting ..." << std::endl;
return wrapExit(CANNOT_WRITE_OUTPUT_FILE);
}
}
// move to target
writeDebug_(String("<file_post>: moving '") + source_file + "' to '" + target_file + "'", 1);
if (!File::exists(source_file))
{
OPENMS_LOG_ERROR << "Moving the source file '" + source_file + "' during <file_post> failed, since it does not exist!\n"
<< "Make sure the external program created the file and its filename is either\n"
<< "unique or you only run one GenericWrapper at a time to avoid overwriting of files!\n"
<< "Ideally, (if the external program allows to specify output filenames directly) avoid <file_post>\n"
<< "in the TTD and request the output file directly. Aborting ..." << std::endl;
return wrapExit(CANNOT_WRITE_OUTPUT_FILE);
}
bool move_ok = QFile::rename(source_file.toQString(), target_file.toQString());
if (!move_ok)
{
OPENMS_LOG_ERROR << "Moving the target file '" + target_file + "' from '" + source_file + "' failed!\n"
<< "This file exists, but is either currently open for writing or otherwise blocked (concurrent process?). Aborting ..." << std::endl;
return wrapExit(CANNOT_WRITE_OUTPUT_FILE);
}
}
OPENMS_LOG_INFO << tde_.text_finish << "\n";
return wrapExit(EXECUTION_OK);
}
};
int main(int argc, const char ** argv)
{
TOPPGenericWrapper tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/topp/MassCalculator.cpp | .cpp | 10,537 | 298 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Hendrik Weisser, Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/CHEMISTRY/AASequence.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/DATASTRUCTURES/ListUtils.h>
#include <OpenMS/FORMAT/SVOutStream.h>
#include <OpenMS/SYSTEM/File.h>
#include <ostream>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_MassCalculator MassCalculator
@brief Calculates masses and mass-to-charge ratios of peptide sequences.
Given a peptide sequence and a charge state, the charged mass (including H+ adducts) and the mass-to-charge ratio are computed.
The peptide sequence can include modifications (for information on valid notation see the @ref OpenMS::AASequence "AASequence" class documentation).
Neutral masses can be computed by using "0" as charge state.
Input can be given directly as values of the parameters: @p in_seq for peptide sequences and @p charge for charge states.
Alternatively, it can be read from a file (see parameter @p in) with the following format: A peptide sequence at the beginning of each line, optionally followed by any number of charge states.
Whitespace, commas or semicolons can de used to delimit the different items. Parts of the input that cannot be understood will be skipped.
If charge states are given in the input file as well as via the @p charge parameter, results are returned for the union of both sets of charge states.
Output can be written to a file or to the screen (see parameter @p out). Results for different charge states are always ordered from lowest to highest charge.
A number of different output formats are available via the parameter @p format:
- @p list writes a human-readable list of the form "ABCDEF: z=1 m=566.192 m/z=566.192, z=2 m=567.199 m/z=283.599";
- @p table produces a CSV-like table (using parameter @p separator to delimit fields) with the columns "peptide", "charge", "mass", and "mass-to-charge", and with one row per peptide and charge state;
- @p mass_only writes only mass values (one line per peptide, values for different charge states separated by spaces);
- @p mz_only writes only mass-to-charge ratios (one line per peptide, values for different charge states separated by spaces).
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_MassCalculator.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_MassCalculator.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPMassCalculator :
public TOPPBase
{
public:
TOPPMassCalculator() :
TOPPBase("MassCalculator", "Calculates masses and mass-to-charge ratios of peptide sequences"), use_avg_mass_(false), output_(nullptr), format_(), res_type_(Residue::Full)
{
for (Size i = 0; i < Residue::SizeOfResidueType; i++)
{
Residue::ResidueType res_type = Residue::ResidueType(i);
res_type_names_[Residue::getResidueTypeName(res_type)] = res_type;
}
}
protected:
bool use_avg_mass_;
ostream* output_; // pointer to output stream (stdout or file)
String format_, separator_;
Residue::ResidueType res_type_;
map<String, Residue::ResidueType> res_type_names_;
void registerOptionsAndFlags_() override
{
registerInputFile_("in", "<file>", "", "Input file with peptide sequences and optionally charge numbers (mutually exclusive to 'in_seq')", false);
setValidFormats_("in",ListUtils::create<String>("tsv"));
registerStringList_("in_seq", "<peptide_sequences>", StringList(), "List of peptide sequences (mutually exclusive to 'in')", false, false);
registerOutputFile_("out", "<file>", "", "Output file; if empty, output is written to the screen", false);
setValidFormats_("out",ListUtils::create<String>("csv"));
registerIntList_("charge", "<numbers>", ListUtils::create<Int>("0"), "List of charge states; required if 'in_seq' is given", false);
registerStringOption_("format", "<choice>", "list", "Output format ('list': human-readable list, 'table': CSV-like table, 'mass_only': mass values only, 'mz_only': m/z values only)\n", false);
setValidStrings_("format", ListUtils::create<String>("list,table,mass_only,mz_only"));
registerFlag_("average_mass", "Compute average (instead of monoisotopic) peptide masses");
registerStringOption_("fragment_type", "<choice>", "full", "For what type of sequence/fragment the mass should be computed\n", false);
setValidStrings_("fragment_type", ListUtils::create<String>("full,internal,N-terminal,C-terminal,a-ion,b-ion,c-ion,x-ion,y-ion,z-ion"));
registerStringOption_("separator", "<sep>", "", "Field separator for 'table' output format; by default, the 'tab' character is used", false);
}
double computeMass_(const AASequence& seq, Int charge) const
{
if (use_avg_mass_) return seq.getAverageWeight(res_type_, charge);
else return seq.getMonoWeight(res_type_, charge);
}
void writeTable_(const AASequence& seq, const set<Int>& charges)
{
SVOutStream sv_out(*output_, separator_);
for (set<Int>::const_iterator it = charges.begin(); it != charges.end();
++it)
{
double mass = computeMass_(seq, *it);
sv_out << seq.toString() << *it << mass;
sv_out.writeValueOrNan(mass / *it);
sv_out << endl;
}
}
void writeList_(const AASequence& seq, const set<Int>& charges)
{
*output_ << seq.toString() << ": ";
for (set<Int>::const_iterator it = charges.begin(); it != charges.end();
++it)
{
double mass = computeMass_(seq, *it);
if (it != charges.begin()) *output_ << ", ";
*output_ << "z=" << *it << " m=" << mass << " m/z=";
if (*it != 0) *output_ << (mass / *it);
else *output_ << "inf";
}
*output_ << endl;
}
void writeMassOnly_(const AASequence& seq, const set<Int>& charges,
bool mz = false)
{
for (set<Int>::const_iterator it = charges.begin(); it != charges.end();
++it)
{
double mass = computeMass_(seq, *it);
if (it != charges.begin()) *output_ << " ";
if (!mz) *output_ << mass;
else if (*it == 0) *output_ << "inf";
else *output_ << mass / *it;
}
*output_ << endl;
}
void writeLine_(const AASequence& seq, const set<Int>& charges)
{
if (format_ == "list") writeList_(seq, charges);
else if (format_ == "table") writeTable_(seq, charges);
else if (format_ == "mass_only") writeMassOnly_(seq, charges);
else writeMassOnly_(seq, charges, true); // "mz_only"
}
String getItem_(String& line, const String& skip = " \t,;")
{
Size pos = line.find_first_of(skip);
String prefix = line.substr(0, pos);
pos = line.find_first_not_of(skip, pos);
if (pos == String::npos) line = "";
else line = line.substr(pos);
return prefix;
}
void readFile_(const String& filename, const set<Int>& charges)
{
ifstream input(filename.c_str());
String line;
Size line_count(0);
while (getline(input, line))
{
++line_count;
String item = getItem_(line);
if ((item[0] == '"') && (item[item.size() - 1] == '"'))
{
item.unquote();
}
AASequence seq;
try
{
seq = AASequence::fromString(item);
}
catch (Exception::ParseError& /*e*/)
{
OPENMS_LOG_WARN << "Warning: '" << item << "' is not a valid peptide sequence - skipping\n";
continue;
}
set<Int> local_charges(charges);
Size conversion_failed_count(0);
while (!line.empty())
{
item = getItem_(line);
try
{
local_charges.insert(item.toInt());
}
catch (Exception::ConversionError& /*e*/)
{
++conversion_failed_count;
}
}
if (conversion_failed_count)
{
OPENMS_LOG_WARN << "Warning: Invalid charge state specified in line:" << line_count << ".\n";
}
if (local_charges.empty())
{
OPENMS_LOG_WARN << "Warning: No charge state specified - skipping (line:" << line_count << ")\n";
continue;
}
writeLine_(seq, local_charges);
}
input.close();
}
ExitCodes main_(int, const char**) override
{
String in = getStringOption_("in");
StringList in_seq = getStringList_("in_seq");
String out = getStringOption_("out");
IntList charge_list = getIntList_("charge");
set<Int> charges(charge_list.begin(), charge_list.end());
use_avg_mass_ = getFlag_("average_mass");
res_type_ = res_type_names_[getStringOption_("fragment_type")];
ofstream outfile;
if (out.empty())
{
output_ = &cout;
}
else
{
outfile.open(out.c_str());
output_ = &outfile;
}
format_ = getStringOption_("format");
if (format_ == "table")
{
separator_ = getStringOption_("separator");
if (separator_.empty()) separator_ = "\t";
// write header:
SVOutStream sv_out(*output_, separator_);
sv_out << "peptide" << "charge" << "mass" << "mass-to-charge" << endl;
}
if ((!in.empty()) && (!in_seq.empty()))
{
OPENMS_LOG_ERROR << "Specifying an input file and input sequences at the same time is not allowed!";
return ILLEGAL_PARAMETERS;
}
if (!in.empty())
{
readFile_(in, charges);
}
else
{
if (charges.empty())
{
OPENMS_LOG_ERROR << "Error: No charge state specified";
return ILLEGAL_PARAMETERS;
}
for (StringList::iterator it = in_seq.begin(); it != in_seq.end(); ++it)
{
AASequence seq;
try
{
seq = AASequence::fromString(*it);
}
catch (Exception::ParseError& /*e*/)
{
OPENMS_LOG_WARN << "Warning: '" << *it << "' is not a valid peptide sequence - skipping\n";
continue;
}
writeLine_(seq, charges);
}
}
if (!out.empty()) outfile.close();
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPMassCalculator tool;
return tool.main(argc, argv);
}
/// @endcond
| C++ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.