keyword stringclasses 7 values | repo_name stringlengths 8 98 | file_path stringlengths 4 244 | file_extension stringclasses 29 values | file_size int64 0 84.1M | line_count int64 0 1.6M | content stringlengths 1 84.1M ⌀ | language stringclasses 14 values |
|---|---|---|---|---|---|---|---|
3D | OpenMS/OpenMS | src/openms/source/CONCEPT/ClassTest.cpp | .cpp | 26,567 | 780 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Marc Sturm, Clemens Groepl, Chris Bielow, Timo Sachsenberg $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/ClassTest.h>
#include <OpenMS/CONCEPT/UniqueIdGenerator.h>
#include <OpenMS/CONCEPT/FuzzyStringComparator.h>
#include <OpenMS/DATASTRUCTURES/StringListUtils.h>
#include <OpenMS/DATASTRUCTURES/ListUtilsIO.h>
#include <OpenMS/FORMAT/ConsensusXMLFile.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/FORMAT/FileTypes.h>
#include <OpenMS/FORMAT/IdXMLFile.h>
#include <OpenMS/FORMAT/MzDataFile.h>
#include <OpenMS/FORMAT/MzMLFile.h>
#include <OpenMS/FORMAT/MzXMLFile.h>
#include <OpenMS/FORMAT/FeatureXMLFile.h>
#include <OpenMS/FORMAT/TransformationXMLFile.h>
#include <OpenMS/FORMAT/ParamXMLFile.h>
#include <OpenMS/SYSTEM/File.h>
#include <boost/math/special_functions/fpclassify.hpp>
#include <iomanip>
#include <fstream>
#include <QtCore/QFileInfo>
namespace OpenMS::Internal::ClassTest
{
bool all_tests = true;
bool equal_files;
bool newline = false;
bool test = true;
bool this_test;
char line_buffer[65536];
const char* version_string = nullptr;
double absdiff = 0.;
double absdiff_max = 0.;
double absdiff_max_allowed = 1E-5;
double ratio = 1.;
double ratio_max = 1.;
double ratio_max_allowed = 1. + 1E-5;
int exception = 0;
int line_num_1_max = -1;
int line_num_2_max = -1;
int start_section_line = 0;
int test_count = 0;
int test_line = 0;
int verbose = 0;
std::ifstream infile;
std::ifstream templatefile;
std::string add_message;
std::string exception_message = "";
std::string exception_name = "";
std::string fuzzy_message;
std::string test_name = "";
std::vector<std::string> tmp_file_list;
std::vector<UInt> failed_lines_list;
StringList whitelist;
void mainInit(const char* version, const char* class_name, int argc, const char* argv0)
{
// if env var "OPENMS_TEST_VERBOSE=True" enable output of successfull line
char* pverbose = std::getenv("OPENMS_TEST_VERBOSE");
if (pverbose != nullptr)
{
if (std::string(pverbose) == "True") TEST::verbose = 2;
}
OpenMS::UniqueIdGenerator::setSeed(2453440375);
TEST::version_string = version;
if (argc > 1)
{
std::cerr
<< "This is " << argv0 << ", the test program for the\n"
<< class_name << " class.\n"
"\n"
"On successful operation it returns PASSED,\n"
"otherwise FAILED is printed.\n";
exit(1);
}
}
void filesEqual(int line, const char* filename, const char* templatename, const char* filename_stringified, const char* templatename_stringified)
{
++TEST::test_count;
TEST::test_line = line;
TEST::equal_files = true;
TEST::infile.open(filename, std::ios::in);
TEST::templatefile.open(templatename, std::ios::in);
if (TEST::infile.good() && TEST::templatefile.good())
{
String TEST_FILE__template_line;
String TEST_FILE__line;
while (TEST::infile.good() && TEST::templatefile.good())
{
TEST::templatefile.getline(TEST::line_buffer, 65535);
TEST_FILE__template_line = TEST::line_buffer;
TEST::infile.getline(TEST::line_buffer, 65535);
TEST_FILE__line = TEST::line_buffer;
TEST_FILE__template_line.trim(); // remove leading and trailing whitespaces (ignore CR/LF line endings on Unix)
TEST_FILE__line.trim(); // remove leading and trailing whitespaces (ignore CR/LF line endings on Unix)
if (TEST_FILE__template_line != TEST_FILE__line)
{
TEST::equal_files = false;
TEST::initialNewline();
stdcout << " TEST_FILE_EQUAL: line mismatch:\n got: '"
<< TEST_FILE__line << "'\n expected: '"
<< TEST_FILE__template_line << "'\n";
}
}
}
else
{
TEST::equal_files = false;
{
TEST::initialNewline();
stdcout << " + line "
<< line
<< ": TEST_FILE_EQUAL("
<< filename_stringified
<< ", "
<< templatename_stringified;
stdcout << ") : "
<< " cannot open file: \"";
if (!TEST::infile.good())
{
stdcout << filename << "\" (input file) ";
}
if (!TEST::templatefile.good())
{
stdcout << templatename << "\" (template file) ";
}
stdcout << "'\n";
}
}
TEST::infile.close();
TEST::templatefile.close();
TEST::infile.clear();
TEST::templatefile.clear();
TEST::this_test = TEST::equal_files;
TEST::test = TEST::test && TEST::this_test;
{
TEST::initialNewline();
if (TEST::this_test)
{
if (TEST::verbose > 1)
{
stdcout << " + line "
<< line
<< ": TEST_FILE_EQUAL("
<< filename_stringified
<< ", "
<< templatename_stringified
<< "): true";
}
}
else
{
stdcout << " - line "
<< line
<< ": TEST_FILE_EQUAL("
<< filename_stringified
<< ", "
<< templatename_stringified
<< "): false (different files: "
<< filename
<< " "
<< templatename
<< " )\n";
TEST::failed_lines_list.push_back(TEST::test_line);
}
}
}
void removeTempFiles()
{
for (OpenMS::Size i = 0; i < TEST::tmp_file_list.size(); ++i)
{
if (!OpenMS::File::remove(TEST::tmp_file_list[i]))
{
stdcout << "Warning: unable to remove temporary file '"
<< TEST::tmp_file_list[i]
<< "'"
<< '\n';
}
}
}
void
setWhitelist(const char* const /* file */, const int line,
const std::string& whitelist_)
{
TEST::whitelist = ListUtils::create<String>(whitelist_);
if ((TEST::verbose > 1) || (!TEST::this_test && (TEST::verbose > 0)))
{
TEST::initialNewline();
stdcout << " + line " << line << ": WHITELIST(\"" << whitelist_
<< "\"): whitelist is: " << TEST::whitelist << '\n';
}
return;
}
void
initialNewline()
{
if (!newline)
{
newline = true;
std::cout << '\n';
}
return;
}
void
printWithPrefix(const std::string& text, const int marked)
{
std::istringstream is(text);
std::string line;
int line_number = 0;
while (std::getline(is, line))
{
++line_number;
std::cout << (line_number == marked ? " # :|: " : " :|: ") << line << '\n';
}
return;
}
bool
validate(const std::vector<std::string>& file_names)
{
std::cout << "checking (created temporary files)...\n";
bool passed_all = true;
for (Size i = 0; i < file_names.size(); ++i)
{
if (File::exists(file_names[i]))
{
FileTypes::Type type = FileHandler::getType(file_names[i]);
bool passed_single = true;
bool skipped = false;
switch (type)
{
case FileTypes::MZML:
{
if (!MzMLFile().isValid(file_names[i]))
{
std::cout << " - Error: mzML file does not validate against XML schema '" << file_names[i] << "'\n";
passed_single = false;
}
StringList errors, warnings;
if (!MzMLFile().isSemanticallyValid(file_names[i], errors,
warnings))
{
std::cout << " - Error: mzML file semantically invalid '" << file_names[i] << "'\n";
for (Size j = 0; j < errors.size(); ++j)
{
std::cout << "Error - " << errors[j] << '\n';
}
passed_single = false;
}
}
break;
case FileTypes::MZDATA:
if (!MzDataFile().isValid(file_names[i], std::cerr))
{
std::cout << " - Error: Invalid mzData file '" << file_names[i] << "'\n";
passed_single = false;
}
break;
case FileTypes::MZXML:
if (!MzXMLFile().isValid(file_names[i], std::cerr))
{
std::cout << " - Error: Invalid mzXML file '" << file_names[i] << "'\n";
passed_single = false;
}
break;
case FileTypes::FEATUREXML:
if (!FeatureXMLFile().isValid(file_names[i], std::cerr))
{
std::cout << " - Error: Invalid featureXML file '" << file_names[i] << "'\n";
passed_single = false;
}
break;
case FileTypes::IDXML:
if (!IdXMLFile().isValid(file_names[i], std::cerr))
{
std::cout << " - Error: Invalid idXML file '" << file_names[i] << "'\n";
passed_single = false;
}
break;
case FileTypes::CONSENSUSXML:
if (!ConsensusXMLFile().isValid(file_names[i], std::cerr))
{
std::cout << " - Error: Invalid consensusXML file '" << file_names[i] << "'\n";
passed_single = false;
}
break;
case FileTypes::INI:
if (!ParamXMLFile().isValid(file_names[i], std::cerr))
{
std::cout << " - Error: Invalid Param file '" << file_names[i] << "'\n";
passed_single = false;
}
break;
case FileTypes::TRANSFORMATIONXML:
if (!TransformationXMLFile().isValid(file_names[i], std::cerr))
{
passed_single = false;
}
break;
default:
skipped = true;
break;
}
//output for single file
if (skipped)
{
std::cout << " + skipped file '" << file_names[i] << "' (type: " << FileTypes::typeToName(type) << ")\n";
}
else if (passed_single)
{
std::cout << " + valid file '" << file_names[i] << "' (type: " << FileTypes::typeToName(type) << ")\n";
}
else
{
passed_all = false;
std::cout << " - invalid file '" << file_names[i] << "' (type: " << FileTypes::typeToName(type) << ")\n";
}
}
}
//output for all files
if (passed_all)
{
std::cout << ": passed" << std::endl << '\n';
}
else
{
std::cout << ": failed" << std::endl << '\n';
}
return passed_all;
}
std::string
createTmpFileName(const std::string& file, int line, const std::string& extension)
{
QFileInfo fi(file.c_str());
std::string filename = (String(fi.baseName())) + '_' + String(line) + ".tmp" + extension;
TEST::tmp_file_list.push_back(filename);
TEST::initialNewline();
stdcout << " creating new temporary filename '"
<< filename
<< "' (line "
<< __LINE__
<< ")\n";
return filename;
}
void testRealSimilar(const char* /*file*/, int line,
long double number_1, const char* number_1_stringified, bool number_1_is_realtype, Int number_1_written_digits,
long double number_2, const char* number_2_stringified, bool /* number_2_is_realtype */, Int number_2_written_digits
)
{
TEST::initialNewline();
++TEST::test_count;
TEST::test_line = line;
TEST::this_test = true;
if (!number_1_is_realtype)
{
TEST::this_test = false;
stdcout << " - line " << line << ':'
<< "TEST_REAL_SIMILAR(" << number_1_stringified << ','
<< number_2_stringified << "):"
" argument " << number_1_stringified
<< " does not have a floating point type! Go fix your code!"
<< '\n';
failed_lines_list.push_back(line);
}
TEST::test = TEST::test && TEST::this_test;
if (TEST::this_test)
{
TEST::this_test = TEST::isRealSimilar(number_1, number_2);
TEST::test = TEST::test && TEST::this_test;
{
if (TEST::this_test)
{
if (TEST::verbose > 1)
{
stdcout << " + line " << line << ": TEST_REAL_SIMILAR("
<< number_1_stringified << ',' << number_2_stringified
<< "): got " << std::setprecision(number_1_written_digits)
<< number_1 << ", expected "
<< std::setprecision(number_2_written_digits) << number_2 << '\n';
}
}
else
{
stdcout << " - line " << TEST::test_line
<< ": TEST_REAL_SIMILAR(" << number_1_stringified << ','
<< number_2_stringified << "): got "
<< std::setprecision(number_1_written_digits) << number_1
<< ", expected "
<< std::setprecision(number_2_written_digits) << number_2
<< " (absolute: " << TEST::absdiff << " ["
<< TEST::absdiff_max_allowed << "], relative: "
<< TEST::ratio << " [" << TEST::ratio_max_allowed
<< "], message: \"" << TEST::fuzzy_message << "\"\n";
failed_lines_list.push_back(line);
}
}
}
}
bool isRealSimilar(long double number_1, long double number_2)
{
// Note: The original version of the stuff below was copied from
// FuzzyStringComparator and then heavily modified for ClassTest.
// But still the case distinctions should be similar.
absdiff = 0.;
ratio = 0.;
fuzzy_message.clear();
if (std::isnan(number_1))
{
fuzzy_message = "number_1 is nan";
return false;
}
if (std::isnan(number_2))
{
fuzzy_message = "number_2 is nan";
return false;
}
// check if absolute difference is small
absdiff = number_1 - number_2;
if (absdiff < 0)
{
absdiff = -absdiff;
}
if (absdiff > absdiff_max)
{
absdiff_max = absdiff;
}
// If absolute difference is small, large relative errors will be
// tolerated in the cases below. But a large absolute difference is
// not an error, if relative error is small. We do not jump out of
// the case distinction here because we want to record the relative
// error even in case of a successful comparison.
bool is_absdiff_small = (absdiff <= absdiff_max_allowed);
if (!number_1) // number_1 is zero
{
if (!number_2) // both numbers are zero
{
fuzzy_message = "both numbers are zero";
return true;
}
else
{
if (!is_absdiff_small)
{
fuzzy_message = "number_1 is zero, but number_2 is not small";
return false;
}
else
{
fuzzy_message = "number_1 is zero, number_2 is small";
return true;
}
}
}
else // number_1 is not zero
{
if (!number_2)
{
if (!is_absdiff_small)
{
fuzzy_message = "number_1 is not zero, but number_2 is";
return false;
}
else
{
fuzzy_message = "number_2 is zero, but number_1 is not small";
return true;
}
}
else // both numbers are not zero
{
ratio = number_1 / number_2;
if (ratio < 0.)
{
if (!is_absdiff_small)
{
fuzzy_message
= "numbers have different signs and difference is not small";
return false;
}
else
{
fuzzy_message
= "numbers have different signs, but difference is small";
return true;
}
}
else // ok, numbers have same sign, but we still need to check their ratio
{
if (ratio < 1.) // take reciprocal value
{
ratio = 1. / ratio;
}
// by now, we are sure that ratio >= 1
if (ratio > ratio_max) // update running max
{
ratio_max = ratio;
}
if (ratio > ratio_max_allowed)
{
if (!is_absdiff_small)
{
fuzzy_message = "ratio of numbers is large";
return false;
}
else
{
fuzzy_message
= "ratio of numbers is large, but numbers are small";
return true;
}
}
else
{
fuzzy_message = "ratio of numbers is small";
return true;
}
}
}
}
}
void testStringEqual(const char* /*file*/, int line,
const std::string& string_1,
const char* string_1_stringified,
const std::string& string_2,
const char* string_2_stringified)
{
++test_count;
test_line = line;
this_test = (string_1 == string_2);
test = test && this_test;
{
initialNewline();
if (this_test)
{
if (TEST::verbose > 1)
{
stdcout << " + line " << line << ": TEST_STRING_EQUAL("
<< string_1_stringified << ',' << string_2_stringified
<< "): got \"" << string_1 << "\", expected \"" << string_2
<< "\"\n";
}
}
else
{
stdcout << " - line " << line << ": TEST_STRING_EQUAL("
<< string_1_stringified << ',' << string_2_stringified
<< "): got \"" << string_1 << "\", expected \"" << string_2
<< "\"\n";
failed_lines_list.push_back(line);
}
}
}
void testStringSimilar(const char* /*file*/, int line,
const std::string& string_1,
const char* string_1_stringified,
const std::string& string_2,
const char* string_2_stringified
)
{
++TEST::test_count;
TEST::test_line = line;
TEST::fuzzy_message.clear();
FuzzyStringComparator fsc;
fsc.setAcceptableAbsolute(absdiff_max_allowed);
fsc.setAcceptableRelative(ratio_max_allowed);
fsc.setVerboseLevel(2);
fsc.setWhitelist(whitelist);
std::ostringstream os;
fsc.setLogDestination(os);
fsc.use_prefix_ = true;
TEST::this_test = fsc.compareStrings(string_1, string_2);
TEST::fuzzy_message = os.str();
TEST::absdiff = fsc.absdiff_max_;
TEST::ratio = fsc.ratio_max_;
TEST::line_num_1_max = fsc.line_num_1_max_;
TEST::line_num_2_max = fsc.line_num_2_max_;
TEST::test = TEST::test && TEST::this_test;
TEST::initialNewline();
if (TEST::this_test)
{
if (TEST::verbose > 1)
{
stdcout << " + line " << line << ": TEST_STRING_SIMILAR("
<< string_1_stringified << ',' << string_2_stringified << "): "
"absolute: " << TEST::absdiff << " (" << TEST::absdiff_max_allowed
<< "), relative: " << TEST::ratio << " ("
<< TEST::ratio_max_allowed << ") +\n";
stdcout << "got:\n";
TEST::printWithPrefix(string_1, TEST::line_num_1_max);
stdcout << "expected:\n";
TEST::printWithPrefix(string_2, TEST::line_num_2_max);
}
}
else
{
stdcout << " - line " << TEST::test_line
<< ": TEST_STRING_SIMILAR(" << string_1_stringified << ','
<< string_2_stringified << ") ... -\n"
"got:\n";
TEST::printWithPrefix(string_1, TEST::line_num_1_max);
stdcout << "expected:\n";
TEST::printWithPrefix(string_2, TEST::line_num_2_max);
stdcout << "message: \n";
stdcout << TEST::fuzzy_message;
failed_lines_list.push_back(line);
}
}
bool
isFileSimilar(const std::string& filename_1,
const std::string& filename_2)
{
fuzzy_message.clear();
FuzzyStringComparator fsc;
fsc.setAcceptableAbsolute(absdiff_max_allowed);
fsc.setAcceptableRelative(ratio_max_allowed);
fsc.setVerboseLevel(2);
fsc.setWhitelist(whitelist);
std::ostringstream os;
fsc.setLogDestination(os);
fsc.use_prefix_ = true;
bool result = fsc.compareFiles(filename_1, filename_2);
fuzzy_message = os.str();
absdiff = fsc.absdiff_max_;
ratio = fsc.ratio_max_;
line_num_1_max = fsc.line_num_1_max_;
line_num_2_max = fsc.line_num_2_max_;
return result;
}
void printLastException(std::ostream& out)
{
std::exception_ptr ex = std::current_exception();
try
{
std::rethrow_exception(ex);
}
catch (::OpenMS::Exception::BaseException& e)
{
TEST::this_test = false;
TEST::test = false;
TEST::all_tests = false;
{
TEST::initialNewline();
out << "Error: Caught unexpected OpenMS exception of type '" << e.getName() << "'";
if ((e.getLine() > 0) && std::strcmp(e.getFile(), ""))
{
out << " thrown in line " << e.getLine() << " of file '" << e.getFile() << "' in function '" << e.getFunction() << "'";
}
out << " - Message: " << e.what() << '\n';
}
} /* catch std:: exceptions */
catch (std::exception& e)
{
TEST::this_test = false;
TEST::test = false;
TEST::all_tests = false;
{
TEST::initialNewline();
out << "Error: Caught unexpected std::exception\n";
out << " - Message: " << e.what() << '\n';
}
} /* catch all other exceptions */
catch (...)
{
TEST::this_test = false;
TEST::test = false;
TEST::all_tests = false;
{
TEST::initialNewline();
out << "Error: Caught unidentified and unexpected exception - No message.\n";
}
}
}
int endTestPostProcess(std::ostream& out)
{
/* check validity of temporary files if known */
if (!TEST::validate(TEST::tmp_file_list))
{
TEST::all_tests = false;
}
if (TEST::verbose == 0)
{
out << "Output of successful tests were suppressed. Set the environment variable 'OPENMS_TEST_VERBOSE=True' to enable them.\n";
} /* check for exit code */
if (!TEST::all_tests)
{
out << "FAILED\n";
if (TEST::add_message != "")
out << "Message: " << TEST::add_message << '\n';
out << "Failed lines: ";
for (OpenMS::Size i = 0; i < TEST::failed_lines_list.size(); ++i)
{
out << TEST::failed_lines_list[i] << " ";
}
out << '\n';
return 1;
}
else
{ /* remove temporary files*/
TEST::removeTempFiles();
out << "PASSED";
if (TEST::add_message != "")
out << " (" << TEST::add_message << ")";
out << '\n';
return 0;
}
}
void endSectionPostProcess(std::ostream& out, const int line)
{
TEST::all_tests = TEST::all_tests && TEST::test;
if (TEST::test)
{
out << ": passed\n";
}
else
{
out << ": failed\n";
}
if (TEST::test_count == 0)
{
if (OpenMS::String(TEST::test_name).has('~'))
out << "Warning: no subtests performed in '" << TEST::test_name << "' (line " << line << ")!\n";
}
stdcout << '\n';
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CONCEPT/PrecisionWrapper.cpp | .cpp | 386 | 12 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Marc Sturm, Clemens Groepl $
// --------------------------------------------------------------------------
namespace OpenMS
{
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CONCEPT/RAIICleanup.cpp | .cpp | 370 | 14 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Chris Bielow $
// --------------------------------------------------------------------------
namespace OpenMS
{
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CONCEPT/UniqueIdIndexer.cpp | .cpp | 418 | 15 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Clemens Groepl $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/UniqueIdIndexer.h>
namespace OpenMS
{
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CONCEPT/FuzzyStringComparator.cpp | .cpp | 25,615 | 762 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Clemens Groepl, Stephan Aiche $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/FuzzyStringComparator.h>
#include <OpenMS/DATASTRUCTURES/StringUtils.h>
#include <OpenMS/DATASTRUCTURES/StringListUtils.h>
#include <OpenMS/FORMAT/TextFile.h>
#include <OpenMS/SYSTEM/File.h>
#include <QtCore/QDir>
#include <fstream>
#include <istream>
#include <iomanip>
#include <iostream>
// #define DEBUG_FUZZY
namespace OpenMS
{
FuzzyStringComparator::FuzzyStringComparator() :
log_dest_(&std::cout),
input_1_name_("input_1"),
input_2_name_("input_2"),
input_line_1_(),
input_line_2_(),
line_num_1_(0),
line_num_2_(0),
line_num_1_max_(-1),
line_num_2_max_(-1),
line_str_1_max_(),
line_str_2_max_(),
ratio_max_allowed_(1.0),
ratio_max_(1.0),
absdiff_max_allowed_(0.0),
absdiff_max_(0.0),
element_1_(),
element_2_(),
is_absdiff_small_(false),
verbose_level_(2),
tab_width_(8),
first_column_(1),
is_status_success_(true),
use_prefix_(false),
whitelist_(),
whitelist_cases_()
{
}
FuzzyStringComparator::~FuzzyStringComparator() = default;
const double& FuzzyStringComparator::getAcceptableRelative() const
{
return ratio_max_allowed_;
}
void FuzzyStringComparator::setAcceptableRelative(const double rhs)
{
this->ratio_max_allowed_ = rhs;
if (ratio_max_allowed_ < 1.0)
{
ratio_max_allowed_ = 1 / ratio_max_allowed_;
}
}
const double& FuzzyStringComparator::getAcceptableAbsolute() const
{
return absdiff_max_allowed_;
}
void FuzzyStringComparator::setAcceptableAbsolute(const double rhs)
{
this->absdiff_max_allowed_ = rhs;
if (absdiff_max_allowed_ < 0.0)
{
absdiff_max_allowed_ = -absdiff_max_allowed_;
}
}
const StringList& FuzzyStringComparator::getWhitelist() const
{
return whitelist_;
}
StringList& FuzzyStringComparator::getWhitelist()
{
return whitelist_;
}
void FuzzyStringComparator::setWhitelist(const StringList& rhs)
{
whitelist_ = rhs;
}
void FuzzyStringComparator::setMatchedWhitelist(const std::vector< std::pair<std::string, std::string> >& rhs)
{
matched_whitelist_ = rhs;
}
const std::vector< std::pair<std::string, std::string> >& FuzzyStringComparator::getMatchedWhitelist() const
{
return matched_whitelist_;
}
const int& FuzzyStringComparator::getVerboseLevel() const
{
return verbose_level_;
}
void FuzzyStringComparator::setVerboseLevel(const int rhs)
{
this->verbose_level_ = rhs;
}
const int& FuzzyStringComparator::getTabWidth() const
{
return tab_width_;
}
void FuzzyStringComparator::setTabWidth(const int rhs)
{
this->tab_width_ = rhs;
}
const int& FuzzyStringComparator::getFirstColumn() const
{
return first_column_;
}
void FuzzyStringComparator::setFirstColumn(const int rhs)
{
this->first_column_ = rhs;
}
std::ostream& FuzzyStringComparator::getLogDestination() const
{
return *log_dest_;
}
void FuzzyStringComparator::setLogDestination(std::ostream& rhs)
{
this->log_dest_ = &rhs;
}
void FuzzyStringComparator::reportFailure_(char const* const message) const
{
// We neither want this entire method be non-const nor make
// is_status_success_ a mutable. So lets hack around it. (Documented in
// class.)
const_cast<bool&>(is_status_success_) = false;
if (verbose_level_ >= 1)
{
PrefixInfo_ prefix1(input_line_1_, tab_width_, first_column_);
PrefixInfo_ prefix2(input_line_2_, tab_width_, first_column_);
std::string prefix;
if (use_prefix_)
{
prefix = " :|: ";
}
*log_dest_ << std::boolalpha <<
prefix << "FAILED: '" << message << "'\n" <<
prefix << "\n" <<
prefix << " input:\tin1\tin2\n" <<
prefix << " line:\t" << line_num_1_ << '\t' << line_num_2_ << "\n" <<
prefix << " pos/col:\t" << input_line_1_.line_position_ << '/' << prefix1.line_column << '\t' << input_line_2_.line_position_ << '/' << prefix2.line_column << "\n" <<
prefix << " --------------------------------\n" <<
prefix << " is_number:\t" << element_1_.is_number << '\t' << element_2_.is_number << "\n" <<
prefix << " numbers:\t" << element_1_.number << '\t' << element_2_.number << "\n" <<
prefix << " is_space:\t" << element_1_.is_space << '\t' << element_2_.is_space << "\n" <<
prefix << " is_letter:\t" << (!element_1_.is_number && !element_1_.is_space) << '\t' << (!element_2_.is_number && !element_2_.is_space) << "\n" <<
prefix << " letters:\t\"" << element_1_.letter << "\"\t\"" << element_2_.letter << "\"\n" <<
prefix << " char_codes:\t" << static_cast<UInt>(element_1_.letter) << "\t" << static_cast<UInt>(element_2_.letter) << "\n" <<
prefix << " --------------------------------\n" <<
prefix << " relative_max: " << ratio_max_ << "\n" <<
prefix << " relative_acceptable: " << ratio_max_allowed_ << "\n" <<
prefix << " --------------------------------\n" <<
prefix << " absolute_max: " << absdiff_max_ << "\n" <<
prefix << " absolute_acceptable: " << absdiff_max_allowed_ << '\n';
writeWhitelistCases_(prefix);
*log_dest_
<< prefix << "\n"
<< prefix << "Offending lines:\t\t\t(tab_width = " << tab_width_ << ", first_column = " << first_column_ << ")\n"
<< prefix << "\n"
<< prefix << "in1: " << QDir::toNativeSeparators(File::absolutePath(input_1_name_).toQString()).toStdString() << " (line: " << line_num_1_ << ", position/column: " << input_line_1_.line_position_ << '/' << prefix1.line_column << ")\n"
<< prefix << prefix1.prefix << "!\n"
<< prefix << prefix1.prefix_whitespaces << OpenMS::String(input_line_1_.line_.str()).suffix(input_line_1_.line_.str().size() - prefix1.prefix.size()) << "\n"
<< prefix << "\n"
<< prefix << "in2: " << QDir::toNativeSeparators(File::absolutePath(input_2_name_).toQString()).toStdString() << " (line: " << line_num_2_ << ", position/column: " << input_line_2_.line_position_ << '/' << prefix2.line_column << ")\n"
<< prefix << prefix2.prefix << "!\n"
<< prefix << prefix2.prefix_whitespaces << OpenMS::String(input_line_2_.line_.str()).suffix(input_line_2_.line_.str().size() - prefix2.prefix.size()) << "\n"
<< prefix << "\n\n"
<< "Easy Access:" << "\n"
<< QDir::toNativeSeparators(File::absolutePath(input_1_name_).toQString()).toStdString() << ':' << line_num_1_ << ":" << prefix1.line_column << ":\n"
<< QDir::toNativeSeparators(File::absolutePath(input_2_name_).toQString()).toStdString() << ':' << line_num_2_ << ":" << prefix2.line_column << ":\n"
<< "\n"
#ifdef WIN32
<< "TortoiseGitMerge"
<< " /base:\"" << QDir::toNativeSeparators(File::absolutePath(input_1_name_).toQString()).toStdString() << "\""
<< " /mine:\"" << QDir::toNativeSeparators(File::absolutePath(input_2_name_).toQString()).toStdString() << "\""
#else
<< "diff"
<< " " << QDir::toNativeSeparators(File::absolutePath(input_1_name_).toQString()).toStdString()
<< " " << QDir::toNativeSeparators(File::absolutePath(input_2_name_).toQString()).toStdString()
#endif
<< '\n';
}
// If verbose level is low, report only the first error.
if (verbose_level_ < 3)
{
throw FuzzyStringComparator::AbortComparison();
}
return;
} // reportFailure_()
void FuzzyStringComparator::reportSuccess_() const
{
if (is_status_success_ && verbose_level_ >= 2)
{
std::string prefix;
if (use_prefix_)
{
prefix = " :|: ";
}
*log_dest_ <<
prefix << "PASSED.\n" <<
prefix << '\n' <<
prefix << " relative_max: " << ratio_max_ << '\n' <<
prefix << " relative_acceptable: " << ratio_max_allowed_ << '\n' <<
prefix << '\n' <<
prefix << " absolute_max: " << absdiff_max_ << '\n' <<
prefix << " absolute_acceptable: " << absdiff_max_allowed_ << '\n';
writeWhitelistCases_(prefix);
*log_dest_ << prefix << '\n';
if (line_num_1_max_ == -1 && line_num_2_max_ == -1)
{
*log_dest_ <<
prefix << "No numeric differences were found.\n" <<
prefix << '\n';
}
else
{
*log_dest_ <<
prefix << "Maximum relative error was attained at these lines, enclosed in \"\":\n" <<
prefix << '\n' <<
QDir::toNativeSeparators(input_1_name_.c_str()).toStdString() << ':' << line_num_1_max_ << ":\n" <<
"\"" << line_str_1_max_ << "\"\n" <<
'\n' <<
QDir::toNativeSeparators(input_2_name_.c_str()).toStdString() << ':' << line_num_2_max_ << ":\n" <<
"\"" << line_str_2_max_ << "\"\n" <<
std::endl;
}
}
return;
}
bool FuzzyStringComparator::compareLines_(std::string const& line_str_1, std::string const& line_str_2)
{
// in most cases, results will be identical. If not, do the expensive fuzzy compare
if (line_str_1 == line_str_2)
{
return true;
}
for (StringList::const_iterator slit = whitelist_.begin();
slit != whitelist_.end(); ++slit)
{
if (line_str_1.find(*slit) != String::npos &&
line_str_2.find(*slit) != String::npos)
{
++whitelist_cases_[*slit];
// *log_dest_ << "whitelist_ case: " << *slit << '\n';
return is_status_success_;
}
}
// check matched whitelist
// If file 1 contains element 1 and file 2 contains element 2, they are skipped over.
for (std::vector< std::pair<std::string, std::string> >::const_iterator pair_it = matched_whitelist_.begin();
pair_it != matched_whitelist_.end(); ++pair_it)
{
if ((line_str_1.find(pair_it->first) != String::npos &&
line_str_2.find(pair_it->second) != String::npos
) ||
(line_str_1.find(pair_it->second) != String::npos &&
line_str_2.find(pair_it->first) != String::npos
)
)
{
// ++whitelist_cases_[*slit];
// *log_dest_ << "whitelist_ case: " << *slit << '\n';
return is_status_success_;
}
}
input_line_1_.setToString(line_str_1);
input_line_2_.setToString(line_str_2);
try
{
while (input_line_1_.ok() && input_line_2_.ok())
{
element_1_.fillFromInputLine(input_line_1_, line_str_1);
element_2_.fillFromInputLine(input_line_2_, line_str_2);
if (element_1_.is_number)
{
if (element_2_.is_number) // we are comparing numbers
{
#ifdef DEBUG_FUZZY
std::cout << "cmp number: " << String(element_1_.number) << " : " << String(element_2_.number) << '\n';
#endif
if (element_1_.number == element_2_.number)
{
continue;
}
// check if absolute difference is small
double absdiff = element_1_.number - element_2_.number;
if (absdiff < 0)
{
absdiff = -absdiff;
}
if (absdiff > absdiff_max_)
{
absdiff_max_ = absdiff;
}
// If absolute difference is small, large relative errors will be
// tolerated in the cases below. But a large absolute difference is
// not an error, if relative error is small. We do not jump out of
// the case distinction here because we want to record the relative
// error even in case of a successful comparison.
is_absdiff_small_ = (absdiff <= absdiff_max_allowed_);
if (!element_1_.number) // element_1_.number_ is zero
{
if (!element_2_.number) // both numbers are zero
{
continue;
}
else
{
if (!is_absdiff_small_)
{
reportFailure_("element_1_.number_ is zero, but element_2_.number_ is not");
continue;
}
}
}
else // element_1_.number_ is not zero
{
if (!element_2_.number)
{
if (!is_absdiff_small_)
{
reportFailure_("element_1_.number_ is not zero, but element_2_.number_ is");
continue;
}
}
else // both numbers are not zero
{
double ratio = element_1_.number / element_2_.number;
if (ratio < 0)
{
if (!is_absdiff_small_)
{
reportFailure_("numbers have different signs");
continue;
}
}
else // ok, numbers have same sign, but we still need to check their ratio
{
if (ratio < 1) // take reciprocal value
{
ratio = 1.0 / ratio;
}
#ifdef DEBUG_FUZZY
std::cout << " check ratio: " << ratio << " vs " << ratio_max_ << '\n';
#endif
// by now, we are sure that ratio >= 1
if (ratio > ratio_max_) // update running max
{
line_num_1_max_ = line_num_1_;
line_num_2_max_ = line_num_2_;
line_str_1_max_ = line_str_1;
line_str_2_max_ = line_str_2;
if (ratio > ratio_max_allowed_)
{
#ifdef DEBUG_FUZZY
std::cout << "Ratio test failed: is larger than ratio_max \n";
#endif
if (!is_absdiff_small_)
{
ratio_max_ = ratio;
reportFailure_("ratio of numbers is too large");
continue;
}
}
}
}
// okay
continue;
}
}
}
else
{
reportFailure_("input_1 is a number, but input_2 is not");
continue;
}
}
else // input_1 is not a number
{
if (element_2_.is_number)
{
reportFailure_("input_1 is not a number, but input_2 is");
continue;
}
else // ok, both inputs are not numbers, let us compare them as characters or whitespace
{
if (element_1_.is_space)
{
if (element_2_.is_space) // ok, both inputs are whitespace
{
continue;
}
else
{
if (element_1_.letter == ASCII__CARRIAGE_RETURN) // should be 13 == ascii carriage return char
{
// we skip over '\r'
input_line_2_.line_.clear(); // reset status
input_line_2_.line_.seekg(input_line_2_.line_position_); // rewind to saved position
continue;
//reportFailure_("input_1 is carriage return, but input_2_ is not whitespace");
}
else
{
reportFailure_("input_1 is whitespace, but input_2 is not");
}
continue;
}
}
else // input_1 is not whitespace
{
if (element_2_.is_space)
{
if (element_2_.letter == ASCII__CARRIAGE_RETURN) // should be 13 == ascii carriage return char
{
// we skip over '\r'
input_line_1_.line_.clear(); // reset status
input_line_1_.line_.seekg(input_line_1_.line_position_); // rewind to saved position
continue;
//reportFailure_("input_1 is not whitespace, but input_2 is carriage return");
}
else
{
reportFailure_("input_1 is not whitespace, but input_2 is");
}
continue;
}
else // both inputs are neither numbers nor whitespace, let us compare them as characters
{
if (element_1_.letter == element_2_.letter) // ok, same characters
{
continue;
}
else
{
reportFailure_("different letters");
continue;
}
}
}
}
}
if (is_absdiff_small_)
{
is_absdiff_small_ = false;
continue;
}
verbose_level_ = 10000;
reportFailure_
("This cannot happen. You should never get here ... "
"please report this bug along with the data that produced it."
);
} // while ( input_line_1_ || input_line_2_ )
if (input_line_1_.ok() && !input_line_2_.ok())
{
reportFailure_("line from input_2 is shorter than line from input_1");
}
if (!input_line_1_.ok() && input_line_2_.ok())
{
reportFailure_("line from input_1 is shorter than line from input_2");
}
}
catch (FuzzyStringComparator::AbortComparison const&)
{
// *log_dest_ << "compareLines_(): Caught FuzzyStringComparator::AbortComparison\n";
}
return is_status_success_;
} // compareLines_()
bool FuzzyStringComparator::compareStrings(std::string const& lhs, std::string const& rhs)
{
std::istringstream input_1(lhs);
std::istringstream input_2(rhs);
return compareStreams(input_1, input_2);
} // compareStrings()
bool FuzzyStringComparator::compareStreams(std::istream& input_1, std::istream& input_2)
{
// reset 'success' state to true, in case its currently false due to a prior call (reporting depends on it)
const_cast<bool&>(is_status_success_) = true;
std::string line_str_1;
std::string line_str_2;
while (input_1 || input_2)
{
readNextLine_(input_1, line_str_1, line_num_1_);
#ifdef DEBUG_FUZZY
std::cout << "eof: " << input_1.eof() << " failbit: " << input_1.fail() << " badbit: " << input_1.bad() << " reading " << input_1.tellg () << "chars\n";
#endif
readNextLine_(input_2, line_str_2, line_num_2_);
#ifdef DEBUG_FUZZY
std::cout << "eof: " << input_2.eof() << " failbit: " << input_2.fail() << " badbit: " << input_2.bad() << " reading " << input_2.tellg () << "chars\n";
std::cout << line_str_1 << "\n" << line_str_2 << '\n';
#endif
// compare the two lines of input
if (!compareLines_(line_str_1, line_str_2) && verbose_level_ < 3)
{
break;
}
} // while ( input_1 || input_2 )
reportSuccess_();
return is_status_success_;
} // compareStreams()
void FuzzyStringComparator::readNextLine_(std::istream& input_stream, std::string& line_string, int& line_number) const
{
// use TextFile::getLine for reading, since it will remove \r automatically on all platforms without much overhead
// This allows to compare otherwise equal lines between files quickly (see compareLines_(...))
for (line_string.clear(); static_cast<void>(++line_number), TextFile::getLine(input_stream, line_string); )
{
if (line_string.empty())
{
continue; // read next line
}
std::string::const_iterator iter = line_string.begin(); // loop initialization
for (; iter != line_string.end() && isspace((unsigned char)*iter); ++iter)
{
}
// skip over whitespace
if (iter != line_string.end())
{
return; // line is not empty or whitespace only
}
}
}
bool FuzzyStringComparator::compareFiles(const std::string& filename_1, const std::string& filename_2)
{
input_1_name_ = filename_1;
input_2_name_ = filename_2;
if (input_1_name_ == input_2_name_)
{
*log_dest_ << "Error: first and second input file have the same name. That's cheating!\n";
return false;
}
std::ifstream input_1_f;
if (!openInputFileStream_(input_1_name_, input_1_f))
{
return false;
}
std::ifstream input_2_f;
if (!openInputFileStream_(input_2_name_, input_2_f))
{
return false;
}
//------------------------------------------------------------
// main loop
compareStreams(input_1_f, input_2_f);
return is_status_success_;
} // compareFiles()
bool FuzzyStringComparator::openInputFileStream_(const std::string& filename, std::ifstream& input_stream) const
{
input_stream.open(filename.c_str(), std::ios::in | std::ios::binary);
if (!input_stream)
{
*log_dest_ << "Error opening first input file '" << filename << "'.\n";
return false;
}
input_stream.unsetf(std::ios::skipws);
return true;
}
void FuzzyStringComparator::writeWhitelistCases_(const std::string& prefix) const
{
if (!whitelist_cases_.empty())
{
*log_dest_ <<
prefix << '\n' <<
prefix << " whitelist cases:\n";
Size length = 0;
for (std::map<String, UInt>::const_iterator wlcit = whitelist_cases_.begin();
wlcit != whitelist_cases_.end(); ++wlcit)
{
if (wlcit->first.size() > length)
{
length = wlcit->first.size();
}
}
for (std::map<String, UInt>::const_iterator wlcit = whitelist_cases_.begin();
wlcit != whitelist_cases_.end(); ++wlcit)
{
*log_dest_ <<
prefix << " " << std::setw(length + 3) << std::left <<
("\"" + wlcit->first + "\"") << std::setw(3) << std::right <<
wlcit->second << "x\n";
}
}
}
void FuzzyStringComparator::StreamElement_::fillFromInputLine(InputLine& input_line, const std::string& str_line)
{
// first reset all internal variables so we do not mess with
// old values
reset();
input_line.updatePosition();
input_line.line_ >> letter; // read letter
if ((is_space = (isspace(letter) != 0))) // is whitespace?
{
input_line.line_ >> std::ws; // skip over further whitespace
}
else
{
// go back to initial position and try to read as double
input_line.seekGToSavedPosition();
auto it_start = str_line.begin() + (int)input_line.line_position_;
auto it_start_fixed = it_start;
// extracting the double does NOT modify the stream (since we work on the string)
is_number = StringUtils::extractDouble(it_start, str_line.end(), number);
if (is_number)
{ // forward the stream
input_line.line_.seekg(long(input_line.line_.tellg()) + long(std::distance(it_start_fixed, it_start)));
}
else
{ // no double/float/int either ... so read as letter
input_line.line_ >> letter;
}
}
}
void FuzzyStringComparator::StreamElement_::reset()
{
is_number = false;
is_space = false;
letter = '\0';
number = std::numeric_limits<double>::quiet_NaN();
}
FuzzyStringComparator::StreamElement_::StreamElement_() :
number(0),
letter(0),
is_number(false),
is_space(false)
{
}
FuzzyStringComparator::InputLine::InputLine() :
line_()
{
}
void FuzzyStringComparator::InputLine::setToString(const std::string& s)
{
line_.str(s);
line_.seekp(0);
line_.clear();
line_.unsetf(std::ios::skipws);
line_position_ = line_.tellg();
}
void FuzzyStringComparator::InputLine::updatePosition()
{
line_position_ = (Int(line_.tellg()) != -1 ? line_.tellg() : std::ios::pos_type(line_.str().length())); // save current reading position
}
void FuzzyStringComparator::InputLine::seekGToSavedPosition()
{
line_.clear(); // reset status
line_.seekg(line_position_); // rewind to saved position
}
bool FuzzyStringComparator::InputLine::ok() const
{
return !line_.fail(); // failbit AND badbit are both NOT set; using fail() seems the only portable solution for both C++98 and C++11
// operator bool() (C++11 only) and operator void*() (C++98 only) are both not very sexy since they are not "safe bool idiomatic" and would require
// a macro here... So we use a real function name (both internally and externally)
}
FuzzyStringComparator::PrefixInfo_::PrefixInfo_(const InputLine& input_line, const int this_tab_width_, const int this_first_column_) :
prefix(input_line.line_.str()), line_column(0)
{
prefix = prefix.prefix(size_t(input_line.line_position_));
prefix_whitespaces = prefix;
for (String::iterator iter = prefix_whitespaces.begin(); iter != prefix_whitespaces.end(); ++iter)
{
if (*iter != '\t')
{
*iter = ' ';
++line_column;
}
else
{
line_column = (line_column / this_tab_width_ + 1) * this_tab_width_;
}
}
line_column += this_first_column_;
}
} //namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CONCEPT/Init.cpp | .cpp | 754 | 35 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/Init.h>
#include <xercesc/util/PlatformUtils.hpp>
namespace OpenMS::Internal
{
// Initialize xerces
// see ticket #352 for more details
struct xerces_init
{
xerces_init()
{
xercesc::XMLPlatformUtils::Initialize();
}
~xerces_init()
{
xercesc::XMLPlatformUtils::Terminate();
}
};
const xerces_init xinit;
} //OpenMS //Internal
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CONCEPT/LogStream.cpp | .cpp | 18,226 | 665 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow, Stephan Aiche $
// $Authors: Chris Bielow, Stephan Aiche, Andreas Bertsch $
// --------------------------------------------------------------------------
/**
Generously provided by the BALL people, taken from version 1.2
with slight modifications
Originally implemented by OK who refused to take any responsibility
for the code ;)
*/
#include <limits>
#include <string>
#include <cstring>
#include <cstdio>
#include <algorithm> // std::min
#include <OpenMS/CONCEPT/Colorizer.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/CONCEPT/StreamHandler.h>
#include <sstream>
#include <iostream>
#define BUFFER_LENGTH 32768
using namespace std;
namespace OpenMS
{
namespace Logger
{
const time_t LogStreamBuf::MAX_TIME = numeric_limits<time_t>::max();
const std::string LogStreamBuf::UNKNOWN_LOG_LEVEL = "UNKNOWN_LOG_LEVEL";
LogStreamBuf::LogStreamBuf(const std::string& log_level, Colorizer* col)
: std::streambuf(),
level_(log_level),
colorizer_(col)
{
pbuf_ = new char[BUFFER_LENGTH];
std::streambuf::setp(pbuf_, pbuf_ + BUFFER_LENGTH - 1);
}
LogStreamBuf::LogStreamBuf(LogStreamBuf* source_buf, Colorizer* col)
: std::streambuf(),
level_(source_buf ? source_buf->level_ : UNKNOWN_LOG_LEVEL),
colorizer_(col)
{
pbuf_ = new char[BUFFER_LENGTH];
std::streambuf::setp(pbuf_, pbuf_ + BUFFER_LENGTH - 1);
// Copy the stream_list_ from the source buffer
if (source_buf)
{
stream_list_ = source_buf->stream_list_;
}
}
std::list<LogStreamBuf::StreamStruct>& LogStreamBuf::getStreamList_()
{
return stream_list_;
}
const std::list<LogStreamBuf::StreamStruct>& LogStreamBuf::getStreamList_() const
{
return stream_list_;
}
LogStreamBuf::~LogStreamBuf()
{
// Prevent issue on OSX with OpenMP: destructors of global objects seem to be called after tearing down the OpenMP context, we therefore cannot use any locks here.
syncLF_();
{
clearCache();
if (!incomplete_line_.empty())
{
distribute_(incomplete_line_);
}
delete[] pbuf_;
pbuf_ = nullptr;
}
}
int LogStreamBuf::overflow(int c)
{
if (c != traits_type::eof())
{
*pptr() = c;
pbump(1);
sync();
return c;
}
else
{
return traits_type::eof();
}
}
LogStreamBuf * LogStream::rdbuf()
{
return (LogStreamBuf *)std::ios::rdbuf();
}
LogStreamBuf * LogStream::operator->()
{
return rdbuf();
}
void LogStream::setLevel(std::string level)
{
if (rdbuf() == nullptr)
{
return;
}
// set the new level
rdbuf()->level_ = std::move(level);
}
std::string LogStream::getLevel()
{
if (rdbuf() != nullptr)
{
return rdbuf()->level_;
}
else
{
return LogStreamBuf::UNKNOWN_LOG_LEVEL;
}
}
// caching methods
Size LogStreamBuf::getNextLogCounter_()
{
return ++log_cache_counter_;
}
bool LogStreamBuf::isInCache_(std::string const & line)
{
//cout << "LogCache (count)" << log_cache_.count(line) << endl;
if (log_cache_.count(line) == 0)
{
return false;
}
else
{
// increment counter
log_cache_[line].counter++;
// remove old entry
log_time_cache_.erase(log_cache_[line].timestamp);
// update timestamp
Size counter_value = getNextLogCounter_();
log_cache_[line].timestamp = counter_value;
log_time_cache_[counter_value] = line;
return true;
}
}
std::string LogStreamBuf::addToCache_(std::string const & line)
{
std::string extra_message = "";
if (log_cache_.size() > 1) // check if we need to remove one of the entries
{
// get smallest key
map<Size, string>::iterator it = log_time_cache_.begin();
// check if message occurred more then once
if (log_cache_[it->second].counter != 0)
{
std::stringstream stream;
stream << "<" << it->second << "> occurred " << ++log_cache_[it->second].counter << " times";
extra_message = stream.str();
}
log_cache_.erase(it->second);
log_time_cache_.erase(it);
}
Size counter_value = getNextLogCounter_();
log_cache_[line].counter = 0;
log_cache_[line].timestamp = counter_value;
log_time_cache_[counter_value] = line;
return extra_message;
}
void LogStreamBuf::clearCache()
{
// if there are any streams in our list, we
// copy the line into that streams, too and flush them
map<std::string, LogCacheStruct>::iterator it = log_cache_.begin();
for (; it != log_cache_.end(); ++it)
{
if ((it->second).counter != 0)
{
std::stringstream stream;
stream << "<" << it->first << "> occurred " << ++(it->second).counter << " times";
distribute_(stream.str());
}
}
// remove all entries from cache
log_cache_.clear();
log_time_cache_.clear();
}
void LogStreamBuf::distribute_(const std::string& outstring)
{
// if there are any streams in our list, we
// copy the line into that streams, too and flush them
std::list<StreamStruct>::iterator list_it = stream_list_.begin();
for (; list_it != stream_list_.end(); ++list_it)
{
if (colorizer_)
{
*(list_it->stream) << (*colorizer_)(); // enable color
}
*(list_it->stream) << expandPrefix_(list_it->prefix, time(nullptr))
<< outstring;
if (colorizer_)
{
*(list_it->stream) << (*colorizer_).undo(); // disable color
}
*(list_it->stream) << std::endl;
if (list_it->target != nullptr)
{
list_it->target->logNotify();
}
}
}
int LogStreamBuf::syncLF_()
{
// sync our stream buffer...
if (pptr() != pbase())
{
// check if we have attached streams, so we don't waste time to
// prepare the output
if (!stream_list_.empty())
{
char *line_start = pbase();
char *line_end = pbase();
static char buf[BUFFER_LENGTH];
while (line_end < pptr())
{
// search for the first end of line
for (; line_end < pptr() && *line_end != '\n'; line_end++)
{
}
if (line_end >= pptr())
{
// Copy the incomplete line to the incomplete_line_ buffer
size_t length = line_end - line_start;
length = std::min(length, (size_t) (BUFFER_LENGTH - 1));
strncpy(&(buf[0]), line_start, length);
// if length was too large, we copied one byte less than BUFFER_LENGTH to have
// room for the final \0
buf[length] = '\0';
incomplete_line_ += &(buf[0]);
// mark everything as read
line_end = pptr() + 1;
}
else
{
// note: pptr() - pbase() should be bounded by BUFFER_LENGTH, so this should always work
memcpy(&(buf[0]), line_start, line_end - line_start + 1);
buf[line_end - line_start] = '\0';
// assemble the string to be written
// (consider leftovers of the last buffer from incomplete_line_)
std::string outstring;
std::swap(outstring, incomplete_line_); // init outstring, while resetting incomplete_line_
outstring += &(buf[0]);
// avoid adding empty lines to the cache
if (outstring.empty())
{
distribute_(outstring);
}
// check if we have already seen this log message
else if (!isInCache_(outstring))
{
// add line to the log cache
std::string extra_message = addToCache_(outstring);
// send outline (and extra_message) to attached streams
if (!extra_message.empty())
{
distribute_(extra_message);
}
distribute_(outstring);
}
// update the line pointers (increment both)
line_start = ++line_end;
}
}
}
// remove all processed lines from the buffer
pbump((int) (pbase() - pptr()));
}
return 0;
}
int LogStreamBuf::sync()
{
int ret = 0;
ret = syncLF_();
return ret;
}
string LogStreamBuf::expandPrefix_
(const std::string & prefix, time_t time) const
{
string::size_type index = 0;
Size copied_index = 0;
string result;
while ((index = prefix.find('%', index)) != String::npos)
{
// append any constant parts of the string to the result
if (copied_index < index)
{
result.append(prefix.substr(copied_index, index - copied_index));
copied_index = (SignedSize)index;
}
if (index < prefix.size())
{
char buffer[64] = "";
char * buf = &(buffer[0]);
switch (prefix[index + 1])
{
case '%': // append a '%' (escape sequence)
result.append("%");
break;
case 'y': // append the message type (error/warning/information)
result.append(level_);
break;
case 'T': // time: HH:MM:SS
strftime(buf, 64, "%H:%M:%S", localtime(&time));
result.append(buf);
break;
case 't': // time: HH:MM
strftime(buf, 64, "%H:%M", localtime(&time));
result.append(buf);
break;
case 'D': // date: DD.MM.YYYY
strftime(buf, 64, "%Y/%m/%d", localtime(&time));
result.append(buf);
break;
case 'd': // date: DD.MM.
strftime(buf, 64, "%m/%d", localtime(&time));
result.append(buf);
break;
case 'S': // time+date: DD.MM.YYYY, HH:MM:SS
strftime(buf, 64, "%Y/%m/%d, %H:%M:%S", localtime(&time));
result.append(buf);
break;
case 's': // time+date: DD.MM., HH:MM
strftime(buf, 64, "%m/%d, %H:%M", localtime(&time));
result.append(buf);
break;
default:
break;
}
index += 2;
copied_index += 2;
}
}
if (copied_index < prefix.size())
{
result.append(prefix.substr(copied_index, prefix.size() - copied_index));
}
return result;
}
LogStreamNotifier::LogStreamNotifier() :
registered_at_(nullptr)
{
}
LogStreamNotifier::~LogStreamNotifier()
{
unregister();
}
void LogStreamNotifier::logNotify()
{
}
void LogStreamNotifier::unregister()
{
if (registered_at_ == nullptr)
{
return;
}
registered_at_->remove(stream_);
registered_at_ = nullptr;
}
void LogStreamNotifier::registerAt(LogStream & log)
{
unregister();
registered_at_ = &log;
log.insertNotification(stream_, *this);
}
// keep the given buffer
LogStream::LogStream(LogStreamBuf * buf, bool delete_buf, std::ostream * stream) :
std::ios(buf),
std::ostream(buf),
delete_buffer_(delete_buf)
{
if (stream != nullptr)
{
insert(*stream);
}
}
LogStream::~LogStream()
{
if (delete_buffer_)
{
// delete the stream buffer
delete rdbuf();
// set it to 0
std::ios(nullptr);
}
}
void LogStream::insert(std::ostream & stream)
{
if (!bound_() || hasStream_(stream))
{
return;
}
// we didn't find it - create a new entry in the list
LogStreamBuf::StreamStruct s_struct;
s_struct.stream = &stream;
rdbuf()->stream_list_.push_back(s_struct);
}
void LogStream::remove(std::ostream & stream)
{
if (!bound_())
return;
StreamIterator it = findStream_(stream);
if (it != rdbuf()->stream_list_.end())
{
rdbuf()->sync();
// HINT: we do NOT clear the cache (because we cannot access it from here)
// and we do not flush incomplete_line_!!!
rdbuf()->stream_list_.erase(it);
}
}
void LogStream::removeAllStreams()
{
if (!bound_())
return;
rdbuf()->sync();
// Distribute any incomplete line before clearing streams
if (!rdbuf()->incomplete_line_.empty())
{
rdbuf()->distribute_(rdbuf()->incomplete_line_);
rdbuf()->incomplete_line_.clear();
}
// Flush all streams before clearing the list
for (auto& stream_struct : rdbuf()->stream_list_)
{
if (stream_struct.stream != nullptr)
{
stream_struct.stream->flush();
}
}
rdbuf()->stream_list_.clear();
}
void LogStream::insertNotification(std::ostream & s, LogStreamNotifier & target)
{
if (!bound_())
{
return;
}
insert(s);
StreamIterator it = findStream_(s);
(*it).target = ⌖
}
LogStream::StreamIterator LogStream::findStream_(const std::ostream & s)
{
StreamIterator list_it = rdbuf()->stream_list_.begin();
for (; list_it != rdbuf()->stream_list_.end(); ++list_it)
{
if (list_it->stream == &s)
{
return list_it;
}
}
return list_it;
}
bool LogStream::hasStream_(std::ostream & stream)
{
if (!bound_())
{
return false;
}
return findStream_(stream) != rdbuf()->stream_list_.end();
}
void LogStream::setPrefix(const std::ostream & s, const string & prefix)
{
if (!bound_())
{
return;
}
StreamIterator it = findStream_(s);
if (it != rdbuf()->stream_list_.end())
{
(*it).prefix = prefix;
}
}
void LogStream::setPrefix(const string & prefix)
{
if (!bound_())
{
return;
}
for (StreamIterator it = rdbuf()->stream_list_.begin(); it != rdbuf()->stream_list_.end(); ++it)
{
(*it).prefix = prefix;
}
}
bool LogStream::bound_() const
{
LogStream * non_const_this = const_cast<LogStream *>(this);
return non_const_this->rdbuf() != nullptr;
}
void LogStream::flush()
{
std::ostream::flush();
}
void LogStream::flushIncomplete()
{
if (!bound_())
return;
rdbuf()->sync();
// Distribute any incomplete line (text not terminated by newline)
if (!rdbuf()->incomplete_line_.empty())
{
rdbuf()->distribute_(rdbuf()->incomplete_line_);
rdbuf()->incomplete_line_.clear();
}
}
} // namespace Logger
// global StreamHandler
OPENMS_DLLAPI StreamHandler STREAM_HANDLER;
// Internal (static) global log streams - not directly accessible from outside this file.
// Use getGlobalLog*() accessor functions for configuration purposes.
// Use OPENMS_LOG_* macros (which use thread-local streams) for actual logging.
namespace
{
Logger::LogStream g_log_fatal(new Logger::LogStreamBuf("FATAL_ERROR", &red), true, &cerr);
Logger::LogStream g_log_error(new Logger::LogStreamBuf("ERROR", &red), true, &cerr);
Logger::LogStream g_log_warn(new Logger::LogStreamBuf("WARNING", &yellow), true, &cout);
Logger::LogStream g_log_info(new Logger::LogStreamBuf("INFO", nullptr), true, &cout);
// OPENMS_LOG_DEBUG is disabled by default, but will be enabled in TOPPAS.cpp or TOPPBase.cpp if started in debug mode (--debug or -debug X)
Logger::LogStream g_log_debug(new Logger::LogStreamBuf("DEBUG", &magenta), false); // last param should be 'true', but segfaults...
}
//
// Global log stream accessor functions (for configuration purposes)
// WARNING: Direct logging to these streams is NOT thread-safe.
// Use OPENMS_LOG_* macros for actual logging.
//
Logger::LogStream& getGlobalLogFatal() { return g_log_fatal; }
Logger::LogStream& getGlobalLogError() { return g_log_error; }
Logger::LogStream& getGlobalLogWarn() { return g_log_warn; }
Logger::LogStream& getGlobalLogInfo() { return g_log_info; }
Logger::LogStream& getGlobalLogDebug() { return g_log_debug; }
//
// Thread-local log stream accessors
// Each thread gets its own LogStream instance with a private buffer,
// but shares the stream_list_ (output destinations) with the global instance.
//
Logger::LogStream& getThreadLocalLogFatal()
{
thread_local Logger::LogStream tls(new Logger::LogStreamBuf(g_log_fatal.rdbuf(), &red), true);
return tls;
}
Logger::LogStream& getThreadLocalLogError()
{
thread_local Logger::LogStream tls(new Logger::LogStreamBuf(g_log_error.rdbuf(), &red), true);
return tls;
}
Logger::LogStream& getThreadLocalLogWarn()
{
thread_local Logger::LogStream tls(new Logger::LogStreamBuf(g_log_warn.rdbuf(), &yellow), true);
return tls;
}
Logger::LogStream& getThreadLocalLogInfo()
{
thread_local Logger::LogStream tls(new Logger::LogStreamBuf(g_log_info.rdbuf(), nullptr), true);
return tls;
}
Logger::LogStream& getThreadLocalLogDebug()
{
thread_local Logger::LogStream tls(new Logger::LogStreamBuf(g_log_debug.rdbuf(), &magenta), true);
return tls;
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CONCEPT/Types.cpp | .cpp | 1,411 | 41 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Oliver Kohlbacher $
// $Authors: Oliver Kohlbacher $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/Types.h>
#include <clocale>
#include <string>
namespace OpenMS::Internal
{
// Helper function to initialize OpenMS locale while preserving the environment locale
// This is important for Python bindings to not affect Python's locale settings
const char* initializeOpenMSLocale()
{
// Save the current locale
const char* current_locale = setlocale(LC_ALL, nullptr);
std::string saved_locale;
if (current_locale != nullptr)
{
// Make a copy of the current locale string since setlocale may overwrite it
saved_locale = current_locale;
}
// Set locale to "C" for OpenMS operations
setlocale(LC_ALL, "C");
// Restore the original locale to avoid affecting the calling environment
if (! saved_locale.empty()) { setlocale(LC_ALL, saved_locale.c_str()); }
// Return "C" which OpenMS will use internally
// Note: We return a string literal, not the pointer from setlocale() which may be invalidated
return "C";
}
const char* OpenMS_locale = initializeOpenMSLocale();
} // namespace OpenMS::Internal
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CONCEPT/Colorizer.cpp | .cpp | 5,828 | 212 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow, Moritz Berger $
// $Authors: Chris Bielow, Moritz Berger $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/Colorizer.h>
#include <iostream>
#ifdef OPENMS_WINDOWSPLATFORM
#include <OpenMS/APPLICATIONS/ConsoleUtils.h>
#include <windows.h>
#endif
#ifdef OPENMS_HAS_UNISTD_H
#include <unistd.h> // for isatty(), STDOUT_FILENO, STDERR_FILENO
#endif
namespace OpenMS
{
/// Initialize the Console (Windows OS) and upon closing the program make sure
/// that the TTY colors are restored to default again
struct InitConsole
{
/// D'tor: reset console colors when exiting the program
~InitConsole()
{
// use a local Colorizer! The global ones may not exist anymore when this Dtor is called
Colorizer undo(ConsoleColor::BLUE); // any color will do...
// reset color to default using both cerr and cout
// We call both, to ensure the reset is invoked in case either of them is
// redirected (not a TTY), hence our ANSI code will not reach the stream.
// See Colorizer::colorStream_()
std::cout << undo.undoAll();
std::cerr << undo.undoAll();
//std::cout << "\nundone coloring\n";
//std::cerr << "\nundone coloring\n";
}
#ifdef OPENMS_WINDOWSPLATFORM
// Windows 10 (since it Anniversary Edition from 2016) understands ANSI codes, but only
// if we tell it to ...
InitConsole()
{
initStream(STD_OUTPUT_HANDLE);
initStream(STD_ERROR_HANDLE);
}
// Set output mode to handle virtual terminal sequences
DWORD initStream(DWORD handle)
{
HANDLE hOut = GetStdHandle(handle);
if (hOut == INVALID_HANDLE_VALUE)
{
//std::cerr << "no " << handle << "\n";
return GetLastError();
}
DWORD dwMode = 0;
if (!GetConsoleMode(hOut, &dwMode))
{
//std::cerr << "no mode get for " << handle << "\n";
return GetLastError();
}
dwMode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING;
if (!SetConsoleMode(hOut, dwMode))
{
//std::cerr << "no mode set for " << handle << "\n";
return GetLastError();
}
return dwMode;
}
#endif // end OPENMS_WINDOWSPLATFORM
};
#ifdef OPENMS_WINDOWSPLATFORM
/// implementation of isatty() for Windows
/// Returns 'true' if the stream is shown on the console
/// and 'false' if redirected somewhere else (file or NUL)
bool isattyWin(const std::ostream& stream)
{
DWORD h_stream;
if (&stream == &std::cout)
h_stream = STD_OUTPUT_HANDLE;
else if (&stream == &std::cerr)
h_stream = STD_ERROR_HANDLE;
else
return false;
HANDLE hOut = GetStdHandle(h_stream);
if (hOut == INVALID_HANDLE_VALUE)
{
//std::cerr << "no handle for " << h_stream << "\n";
return false;
}
DWORD dwMode = 0;
if (!GetConsoleMode(hOut, &dwMode))
{
return false;
}
return true;
}
#endif
// our local object, which will be initialized and destroyed at startup/teardown
static InitConsole windows_console_prep_and_restore;
Colorizer::Colorizer(const ConsoleColor color)
: color_(color)
{
}
Colorizer& Colorizer::undo()
{
this->input_.str("");
undo_ = true;
undos_only = true;
return *this;
}
Colorizer& Colorizer::undoAll()
{
this->input_.str("");
undo_all_ = true;
undos_only = true;
return *this;
}
void Colorizer::colorStream_(std::ostream& stream, const char* ANSI_command)
{
// check if the output is being fed to file or console
// supress output of ANSI codes into a redirected cout/cerr file
if (&stream == &std::cout || &stream == &std::cerr)
{
if (!isTTY(stream))
{
return;
}
}
// color cout/cerr if visible, or any other stream (mostly for testing purposes)
// debug: stream << "(" << ANSI_command + 2 << ") ";
stream << ANSI_command;
}
bool Colorizer::isTTY(const std::ostream& stream)
{
#ifdef OPENMS_WINDOWSPLATFORM
return isattyWin(stream);
#else
if (&stream == &std::cout && isatty(STDOUT_FILENO))
{
return true;
}
if (&stream == &std::cerr && isatty(STDERR_FILENO))
{
return true;
}
return false;
#endif
}
void Colorizer::outputToStream_(std::ostream& o_stream)
{
if (!undos_only) // undo() or undoAll() were called - do not color the stream or print empty data; it does not make sense
{
// color the stream (or console)
colorStream_(o_stream, colors_[(int)color_].enable);
// paste text
o_stream << input_.str();
}
if (undo_all_)
{
colorStream_(o_stream, color_undo_all_);
}
else if (undo_)
{
colorStream_(o_stream, colors_[(int)color_].disable);
}
}
// extern colizers with predefined colors
OpenMS::Colorizer red(ConsoleColor::RED);
OpenMS::Colorizer green(ConsoleColor::GREEN);
OpenMS::Colorizer yellow(ConsoleColor::YELLOW);
OpenMS::Colorizer blue(ConsoleColor::BLUE);
OpenMS::Colorizer magenta(ConsoleColor::MAGENTA);
OpenMS::Colorizer cyan(ConsoleColor::CYAN);
OpenMS::Colorizer invert(ConsoleColor::INVERT);
OpenMS::Colorizer bright(ConsoleColor::BRIGHT);
OpenMS::Colorizer underline(ConsoleColor::UNDERLINE);
std::ostream& operator<<(std::ostream& o_stream, OpenMS::Colorizer& col)
{
// colorize stream; dump internal string (if any); and reset the color (if col.resetColor() was used before).
col.outputToStream_(o_stream);
return o_stream;
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CONCEPT/UniqueIdGenerator.cpp | .cpp | 3,240 | 101 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/UniqueIdGenerator.h>
#include <boost/date_time/posix_time/posix_time_types.hpp> //no i/o just types
namespace OpenMS
{
UInt64 UniqueIdGenerator::seed_ = 0;
UniqueIdGenerator* UniqueIdGenerator::instance_ = nullptr;
boost::mt19937_64* UniqueIdGenerator::rng_ = nullptr;
boost::uniform_int<UInt64>* UniqueIdGenerator::dist_ = nullptr;
UInt64 UniqueIdGenerator::getUniqueId()
{
UniqueIdGenerator& instance = getInstance_();
#ifdef _OPENMP
UInt64 val;
#pragma omp critical (OPENMS_UniqueIdGenerator_getUniqueId)
{
val = (*instance.dist_)(*instance.rng_);
}
// note: OpenMP can only work on a structured block, return needs to be outside that block
return val;
#else
return (*instance.dist_)(*instance.rng_);
#endif
}
UInt64 UniqueIdGenerator::getSeed()
{
return getInstance_().seed_;
}
void UniqueIdGenerator::setSeed(UInt64 seed)
{
// modifies static members
#ifdef _OPENMP
#pragma omp critical (OPENMS_UniqueIdGenerator_setSeed)
#endif
{
UniqueIdGenerator& instance = getInstance_();
instance.seed_ = seed;
instance.rng_->seed( instance.seed_ );
instance.dist_->reset();
}
}
UniqueIdGenerator::UniqueIdGenerator() = default;
UniqueIdGenerator & UniqueIdGenerator::getInstance_()
{
// modifies static members
#ifdef _OPENMP
#pragma omp critical (OPENMS_UniqueIdGenerator_getInstance_)
#endif
{
if (!instance_)
{
instance_ = new UniqueIdGenerator();
instance_->init_();
}
}
return *instance_;
}
void UniqueIdGenerator::init_()
{
// modifies static members
#ifdef _OPENMP
#pragma omp critical (OPENMS_UniqueIdGenerator_init_)
#endif
{
// find a seed:
// get something with high resolution (around microseconds) -- its hard to do better on Windows --
// which has absolute system time (there is higher resolution available for the time since program startup, but
// we do not want this here since this seed usually gets initialized at the same program uptime).
// Reason for high-res: in pipelines, instances of TOPP tools can get initialized almost simultaneously (i.e., resolution in seconds is not enough),
// leading to identical random numbers (e.g. feature-IDs) in two or more distinct files.
// C++11 note: C++ build-in alternative once C++11 can be presumed: 'std::chrono::high_resolution_clock'
boost::posix_time::ptime t(boost::posix_time::microsec_clock::local_time() );
seed_ = t.time_of_day().ticks(); // independent of implementation; as opposed to nanoseconds(), which need not be available on every platform
rng_ = new boost::mt19937_64 (seed_);
dist_ = new boost::uniform_int<UInt64> (0, std::numeric_limits<UInt64>::max());
}
}
UniqueIdGenerator::~UniqueIdGenerator()
{
delete rng_;
delete dist_;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CONCEPT/ProgressLogger.cpp | .cpp | 6,965 | 273 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg$
// $Authors: Marc Sturm, Stephan Aiche$
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/ProgressLogger.h>
#include <OpenMS/CONCEPT/Macros.h>
#include <OpenMS/SYSTEM/StopWatch.h>
#include <OpenMS/SYSTEM/SysInfo.h>
#include <QtCore/QString>
#include <iostream>
using namespace std;
namespace OpenMS
{
class CMDProgressLoggerImpl :
public ProgressLogger::ProgressLoggerImpl
{
public:
CMDProgressLoggerImpl() = default;
/// create new object (needed by Factory)
static ProgressLogger::ProgressLoggerImpl* create()
{
return new CMDProgressLoggerImpl();
}
void startProgress(const SignedSize begin, const SignedSize end, const String& label, const int current_recursion_depth) const override
{
begin_ = begin;
current_ = begin_;
end_ = end;
cout << string(2 * current_recursion_depth, ' ') << "Progress of '" << label << "':" << endl;
stop_watch_.reset();
stop_watch_.start();
}
void setProgress(const SignedSize value, const int current_recursion_depth) const override
{
if (begin_ == end_)
{
cout << '.' << flush;
}
else if (value < begin_ || value > end_)
{
cout << "ProgressLogger: Invalid progress value '" << value
<< "'. Should be between '" << begin_ << "' and '" << end_ << "'!" << endl;
}
else
{
cout << '\r' << string(2 * current_recursion_depth, ' ') << QString::number(float(value - begin_) / float(end_ - begin_) * 100.0, 'f', 2).toStdString() << " % ";
cout << flush;
}
}
SignedSize nextProgress() const override
{
#pragma omp atomic
++current_;
return current_;
}
void endProgress(const int current_recursion_depth, UInt64 bytes_processed) const override
{
stop_watch_.stop();
String IO_stats;
if (bytes_processed)
{
IO_stats = " @ " + bytesToHumanReadable(bytes_processed / stop_watch_.getClockTime()) + "/s";
}
cout << '\r' << string(2 * current_recursion_depth, ' ') << "-- done [took " << StopWatch::toString(stop_watch_.getCPUTime()) << " (CPU), " << StopWatch::toString(stop_watch_.getClockTime()) << " (Wall)" << IO_stats << "] -- " << endl;
}
private:
mutable StopWatch stop_watch_;
mutable SignedSize begin_{0};
mutable SignedSize end_{0};
mutable SignedSize current_{0};
};
class NoProgressLoggerImpl :
public ProgressLogger::ProgressLoggerImpl
{
public:
/// create new object (needed by Factory)
static ProgressLogger::ProgressLoggerImpl* create()
{
return new NoProgressLoggerImpl();
}
void startProgress(const SignedSize /* begin */, const SignedSize /* end */, const String& /* label */, const int /* current_recursion_depth */) const override
{
}
void setProgress(const SignedSize /* value */, const int /* current_recursion_depth */) const override
{
}
SignedSize nextProgress() const override
{
return 0;
}
void endProgress(const int /* current_recursion_depth */, UInt64 /*bytes_processed*/) const override
{
}
};
// Simple runtime plugin system for GUI progress logger.
// An external library (e.g., OpenMS_GUI) can set this function to provide a GUI logger.
// As default, it just uses the NonProgressLoggerImpl.
MakeGUIProgressLoggerFunc make_gui_progress_logger =
[]() -> ProgressLogger::ProgressLoggerImpl* { return new NoProgressLoggerImpl(); };
int ProgressLogger::recursion_depth_ = 0;
ProgressLogger::ProgressLogger() :
type_(NONE),
last_invoke_()
{
current_logger_ = new NoProgressLoggerImpl();
}
ProgressLogger::ProgressLogger(const ProgressLogger& other) :
type_(other.type_),
last_invoke_(other.last_invoke_)
{
switch (type_)
{
case NONE:
{
current_logger_ = new NoProgressLoggerImpl();
break;
}
case CMD:
{
current_logger_ = new CMDProgressLoggerImpl();
break;
}
case GUI:
{
current_logger_ = make_gui_progress_logger();
}
}
}
ProgressLogger& ProgressLogger::operator=(const ProgressLogger& other)
{
if (&other == this)
{
return *this;
}
this->last_invoke_ = other.last_invoke_;
this->type_ = other.type_;
// we clean our old logger
delete current_logger_;
// .. and get a new one
switch (type_)
{
case NONE:
{
current_logger_ = new NoProgressLoggerImpl();
break;
}
case CMD:
{
current_logger_ = new CMDProgressLoggerImpl();
break;
}
case GUI:
{
current_logger_ = make_gui_progress_logger();
}
}
return *this;
}
ProgressLogger::~ProgressLogger()
{
delete current_logger_;
}
void ProgressLogger::setLogType(LogType type) const
{
type_ = type;
// remove the old logger
delete current_logger_;
switch (type)
{
case NONE:
{
current_logger_ = new NoProgressLoggerImpl();
break;
}
case CMD:
{
current_logger_ = new CMDProgressLoggerImpl();
break;
}
case GUI:
{
current_logger_ = make_gui_progress_logger();
}
}
}
void ProgressLogger::setLogger(ProgressLoggerImpl* logger)
{
delete current_logger_;
current_logger_ = logger;
}
ProgressLogger::LogType ProgressLogger::getLogType() const
{
return type_;
}
void ProgressLogger::startProgress(SignedSize begin, SignedSize end, const String& label) const
{
OPENMS_PRECONDITION(begin <= end, "ProgressLogger::init : invalid range!");
last_invoke_ = time(nullptr);
current_logger_->startProgress(begin, end, label, recursion_depth_);
++recursion_depth_;
}
void ProgressLogger::setProgress(SignedSize value) const
{
// update only if at least 1 second has passed
if (last_invoke_ == time(nullptr))
{
return;
}
last_invoke_ = time(nullptr);
current_logger_->setProgress(value, recursion_depth_);
}
void ProgressLogger::nextProgress() const
{
auto p = current_logger_->nextProgress();
// update only if at least 1 second has passed
if (last_invoke_ == time(nullptr))
{
return;
}
last_invoke_ = time(nullptr);
current_logger_->setProgress(p, recursion_depth_);
}
void ProgressLogger::endProgress(UInt64 bytes_processed) const
{
if (recursion_depth_)
{
--recursion_depth_;
}
current_logger_->endProgress(recursion_depth_, bytes_processed);
}
} //namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CONCEPT/Exception.cpp | .cpp | 13,408 | 323 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg$
// $Authors: Marc Sturm $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/Exception.h>
#include <OpenMS/DATASTRUCTURES/String.h>
#include <OpenMS/CONCEPT/GlobalExceptionHandler.h>
#include <cstdio>
#include <iostream>
#include <sstream>
#include <typeinfo>
#define DEF_EXCEPTION(a, b) \
a::a(const char* file, int line, const char* function) noexcept : \
BaseException(file, line, function, # a, b) \
{ \
} \
using namespace std;
namespace OpenMS
{
namespace Exception
{
BaseException::BaseException() noexcept :
std::runtime_error("unknown error"),
file_("?"),
line_(-1),
function_("?"),
name_("Exception")
{
GlobalExceptionHandler::getInstance().set(file_, line_, function_, name_, what());
}
BaseException::BaseException(const char* file, int line, const char* function, const std::string& name, const std::string& message) noexcept :
std::runtime_error(message),
file_(file),
line_(line),
function_(function),
name_(name)
{
GlobalExceptionHandler::getInstance().set(file_, line_, function_, name_, what());
}
BaseException::BaseException(const char* file, int line, const char* function) noexcept :
std::runtime_error("unknown error"),
file_(file),
line_(line),
function_(function),
name_("Exception")
{
GlobalExceptionHandler::getInstance().set(file_, line_, function_, name_, what());
}
BaseException::BaseException(const BaseException& exception) noexcept :
std::runtime_error(exception),
file_(exception.file_),
line_(exception.line_),
function_(exception.function_),
name_(exception.name_)
{
}
BaseException::~BaseException() noexcept
{
}
const char* BaseException::getName() const noexcept
{
return name_.c_str();
}
const char* BaseException::getFile() const noexcept
{
return file_;
}
const char* BaseException::getFunction() const noexcept
{
return function_;
}
const char* BaseException::getMessage() const noexcept
{
return what();
}
int BaseException::getLine() const noexcept
{
return line_;
}
Precondition::Precondition(const char* file, int line, const char* function, const string& condition) noexcept :
BaseException(file, line, function, "Precondition failed", std::string(condition))
{
GlobalExceptionHandler::getInstance().setMessage(what());
}
Postcondition::Postcondition(const char* file, int line, const char* function, const string& condition) noexcept :
BaseException(file, line, function, "Postcondition failed", std::string(condition))
{
GlobalExceptionHandler::getInstance().setMessage(what());
}
IndexUnderflow::IndexUnderflow(const char* file, int line, const char* function, SignedSize index, Size size) noexcept :
BaseException(file, line, function, "IndexUnderflow", "the given index was too small: " + String(index) + " (size = " + String(size) + ")")
{
GlobalExceptionHandler::getInstance().setMessage(what());
}
IndexOverflow::IndexOverflow(const char* file, int line, const char* function, SignedSize index, Size size) noexcept :
BaseException(file, line, function, "IndexOverflow", "the given index was too large: " + String(index) + " (size = " + String(size) + ")")
{
GlobalExceptionHandler::getInstance().setMessage(what());
}
NotSorted::NotSorted(const char* file, int line, const char* function, const std::string& message) noexcept:
BaseException(file, line, function, "NotSorted", message)
{
GlobalExceptionHandler::getInstance().setMessage(what());
}
FailedAPICall::FailedAPICall(const char* file, int line, const char* function, const std::string& message) noexcept :
BaseException(file, line, function, "FailedAPICall", message)
{
}
OutOfMemory::OutOfMemory(const char* file, int line, const char* function, Size size) noexcept :
BaseException(file, line, function, "OutOfMemory", "unable to allocate enough memory (size = " + String(size) + " bytes) ")
{
GlobalExceptionHandler::getInstance().setMessage(static_cast<std::runtime_error>(*this).what());
}
SizeUnderflow::SizeUnderflow(const char* file, int line, const char* function, Size size) noexcept :
BaseException(file, line, function, "SizeUnderflow", "the given size was too small: " + String(size))
{
GlobalExceptionHandler::getInstance().setMessage(what());
}
InvalidSize::InvalidSize(const char* file, int line, const char* function, Size size, const std::string& message) noexcept :
BaseException(file, line, function, "InvalidSize", "the given size was not expected: " + String(size) + " (" + message + ")")
{
GlobalExceptionHandler::getInstance().setMessage(what());
}
IllegalPosition::IllegalPosition(const char* file, int line, const char* function, float x, float y, float z) noexcept :
BaseException(file, line, function, "IllegalPosition:", "(" + String(x) + "," + String(y) + "," + String(z) + ")")
{
GlobalExceptionHandler::getInstance().setMessage(what());
}
ParseError::ParseError(const char* file, int line, const char* function, const std::string& expression, const std::string& message) noexcept :
BaseException(file, line, function, "Parse Error", message + " in: " + expression)
{
GlobalExceptionHandler::getInstance().setMessage(what());
}
FileNotFound::FileNotFound(const char* file, int line, const char* function, const std::string& filename) noexcept :
BaseException(file, line, function, "FileNotFound", "the file '" + filename + "' could not be found")
{
GlobalExceptionHandler::getInstance().setMessage(what());
}
ExternalExecutableNotFound::ExternalExecutableNotFound(const char* file, int line, const char* function, const std::string& filename) noexcept:
BaseException(file, line, function, "ExternalExecutableNotFound", "the executable '" + filename + "' could not be found")
{
GlobalExceptionHandler::getInstance().setMessage(what());
}
FileNotReadable::FileNotReadable(const char* file, int line, const char* function, const std::string& filename) noexcept :
BaseException(file, line, function, "FileNotReadable", "the file '" + filename + "' is not readable for the current user")
{
GlobalExceptionHandler::getInstance().setMessage(what());
}
FileNotWritable::FileNotWritable(const char* file, int line, const char* function, const std::string& filename) noexcept :
BaseException(file, line, function, "FileNotWritable", "the file '" + filename + "' is not writable for the current user")
{
GlobalExceptionHandler::getInstance().setMessage(what());
}
FileNameTooLong::FileNameTooLong(const char* file, int line, const char* function, const std::string& filename, int max_length) noexcept :
BaseException(file, line, function, "FileNameTooLong",
"the file '" + filename + "' is too long (" + String(filename.size()) + " chars) "
+ "and exceeds the allowed limit of " + String(max_length) + "; "
+ "use shorter filenames and/or fewer subdirectories.")
{
GlobalExceptionHandler::getInstance().setMessage(what());
}
IOException::IOException(const char* file, int line, const char* function, const std::string& filename) noexcept :
BaseException(file, line, function, "IOException", "IO error for file '" + filename + "'")
{
GlobalExceptionHandler::getInstance().setMessage(what());
}
SqlOperationFailed::SqlOperationFailed(const char* file, int line, const char* function, const std::string& description) noexcept :
BaseException(file, line, function, "SqlOperationFailed", "an sql operation failed ('" + description + "')")
{
GlobalExceptionHandler::getInstance().setMessage(what());
}
FileEmpty::FileEmpty(const char* file, int line, const char* function, const std::string& filename) noexcept :
BaseException(file, line, function, "FileEmpty", "the file '" + filename + "' is empty")
{
GlobalExceptionHandler::getInstance().setMessage(what());
}
ConversionError::ConversionError(const char* file, int line, const char* function, const std::string& error) noexcept :
BaseException(file, line, function, "ConversionError", error)
{
GlobalExceptionHandler::getInstance().setMessage(what());
}
InvalidValue::InvalidValue(const char* file, int line, const char* function, const std::string& message, const std::string& value) noexcept :
BaseException(file, line, function, "InvalidValue", "the value '" + value + "' was used but is not valid; " + message)
{
GlobalExceptionHandler::getInstance().setMessage(what());
}
InvalidParameter::InvalidParameter(const char* file, int line, const char* function, const std::string& message) noexcept :
BaseException(file, line, function, "InvalidParameter", message)
{
}
UnableToCreateFile::UnableToCreateFile(const char* file, int line, const char* function, const std::string& filename, const std::string& message) noexcept :
BaseException(file, line, function, "UnableToCreateFile", "the file '" + filename + "' could not be created. " + message)
{
GlobalExceptionHandler::getInstance().setMessage(what());
}
InvalidFileType::InvalidFileType(const char* file, int line, const char* function, const std::string& filename, const std::string& message) noexcept :
BaseException(file, line, function, "InvalidFileType", "the file '" + filename + "' could not be created because the type specified was not valid. " + message)
{
GlobalExceptionHandler::getInstance().setMessage(what());
}
IllegalArgument::IllegalArgument(const char* file, int line, const char* function, const string& error_message) noexcept :
BaseException(file, line, function, "IllegalArgument", error_message)
{
}
InternalToolError::InternalToolError(const char* file, int line, const char* function, const std::string& error_message) noexcept:
BaseException(file, line, function, "InternalToolError", error_message)
{
}
MissingInformation::MissingInformation(const char* file, int line, const char* function, const string& error_message) noexcept :
BaseException(file, line, function, "MissingInformation", error_message)
{
}
ElementNotFound::ElementNotFound(const char* file, int line, const char* function, const string& element) noexcept :
BaseException(file, line, function, "ElementNotFound", "the element '" + element + "' could not be found")
{
GlobalExceptionHandler::getInstance().setMessage(what());
}
UnableToFit::UnableToFit(const char* file, int line, const char* function, const string& name, const string& message) noexcept :
BaseException(file, line, function, name, message)
{
}
UnableToCalibrate::UnableToCalibrate(const char* file, int line, const char* function, const string& name, const string& message) noexcept :
BaseException(file, line, function, name, message)
{
}
DepletedIDPool::DepletedIDPool(const char* file, int line, const char* function, const string& name, const string& message) noexcept :
BaseException(file, line, function, name, message)
{
}
InvalidRange::InvalidRange(const char* file, int line, const char* function) noexcept :
BaseException(file, line, function, "InvalidRange", "the range of the operation was invalid")
{
}
InvalidRange::InvalidRange(const char* file, int line, const char* function, const std::string& message) noexcept :
BaseException(file, line, function, "InvalidRange", message)
{
}
DEF_EXCEPTION(DivisionByZero, "a division by zero was requested")
DEF_EXCEPTION(OutOfRange, "the argument was not in range")
DEF_EXCEPTION(NullPointer, "a null pointer was specified")
DEF_EXCEPTION(InvalidIterator, "the iterator is invalid - probably it is not bound to a container")
DEF_EXCEPTION(IncompatibleIterators, "the iterator could not be assigned because it is bound to a different container")
DEF_EXCEPTION(NotImplemented, "this method has not been implemented yet. Feel free to complain about it!")
DEF_EXCEPTION(IllegalSelfOperation, "cannot perform operation on the same object")
DEF_EXCEPTION(IllegalTreeOperation, "an illegal tree operation was requested")
DEF_EXCEPTION(BufferOverflow, "the maximum buffersize has been reached")
DEF_EXCEPTION(OutOfGrid, "a point was outside a grid")
} // namespace Exception
std::ostream& operator<<(std::ostream& os, const Exception::BaseException& e)
{
os << e.getName() << " @ " << e.getFile() << ":" << e.getFunction() << " (Line " << e.getLine() << "): " << e.what();
return os;
}
} // namespace OPENMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CONCEPT/CommonEnums.cpp | .cpp | 412 | 15 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/CommonEnums.h>
namespace OpenMS
{
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CONCEPT/StreamHandler.cpp | .cpp | 5,008 | 176 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg$
// $Authors: Stephan Aiche$
// --------------------------------------------------------------------------
#include <iostream>
#include <fstream>
#include <sstream>
#include <OpenMS/CONCEPT/StreamHandler.h>
#include <OpenMS/SYSTEM/File.h>
#include <OpenMS/CONCEPT/LogStream.h>
using std::ostream;
using std::map;
using std::ios_base;
using std::ostringstream;
using std::ofstream;
namespace OpenMS
{
StreamHandler::StreamHandler() = default;
StreamHandler::StreamHandler(const StreamHandler & source)
{
name_to_stream_map_ = source.name_to_stream_map_;
name_to_counter_map_ = source.name_to_counter_map_;
name_to_type_map_ = source.name_to_type_map_;
}
StreamHandler::~StreamHandler()
{
// close all associated streams
for (map<String, ostream *>::iterator iter = name_to_stream_map_.begin(); iter != name_to_stream_map_.end(); ++iter)
{
ostream * stream_pointer = iter->second;
// file streams need to be closed before
if (name_to_type_map_[iter->first] == FILE)
{
(static_cast<ofstream *>(stream_pointer))->close();
}
delete stream_pointer; // call destructor
}
}
StreamHandler & StreamHandler::operator=(const StreamHandler & source) = default;
ostream & StreamHandler::getStream(StreamType const type, const String & stream_name)
{
if (hasStream(type, stream_name))
{
return *name_to_stream_map_[stream_name];
}
else
{
throw Exception::ElementNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, stream_name);
}
}
ostream * StreamHandler::createStream_(const StreamType type, const String & stream_name)
{
ostream * stream_pointer;
switch (type)
{
case STRING:
stream_pointer = new ostringstream();
break;
case FILE:
default:
stream_pointer = new ofstream(File::absolutePath(stream_name).c_str(), ios_base::app);
break;
}
return stream_pointer;
}
Int StreamHandler::registerStream(StreamType const type, const String & stream_name)
{
Int state = 1;
if (name_to_stream_map_.count(stream_name) == 0) // this is an unknown stream .. register
{
name_to_stream_map_[stream_name] = createStream_(type, stream_name);
name_to_type_map_[stream_name] = type;
name_to_counter_map_[stream_name] = 1;
// check stream
if (name_to_stream_map_[stream_name]->fail())
{
state = 1; // indicate that something went wrong while creating this stream
}
}
else
{
// check type consistency
if (name_to_type_map_[stream_name] != type)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "This stream was already registered with a different type.");
}
++name_to_counter_map_[stream_name];
}
return state;
}
bool StreamHandler::hasStream(const StreamType type, const String & stream_name)
{
if (name_to_stream_map_.count(stream_name) != 0)
{
return name_to_type_map_[stream_name] == type;
}
else
{
return false;
}
}
void StreamHandler::unregisterStream(StreamType const type, const String & stream_name)
{
if (name_to_stream_map_.count(stream_name) != 0) // check if we know this stream
{
if (name_to_counter_map_[stream_name] > 1)
{
// if there are still references left to this stream
// just decrease the number of references
--name_to_counter_map_[stream_name];
}
else
{
// delete the stream
if (type == FILE)
{
// file streams need to be closed before
(static_cast<ofstream *>(name_to_stream_map_[stream_name]))->close();
}
delete name_to_stream_map_[stream_name];
// remove entry from the local registry
name_to_stream_map_.erase(stream_name);
name_to_counter_map_.erase(stream_name);
name_to_type_map_.erase(stream_name);
}
}
else
{
throw Exception::ElementNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, stream_name);
}
}
std::ostream & operator<<(std::ostream & os, StreamHandler const & stream_handler)
{
for (map<String, ostream *>::const_iterator iter = stream_handler.name_to_stream_map_.begin(); iter != stream_handler.name_to_stream_map_.end(); ++iter)
{
os << "[" << iter->first << "] of type";
if ((stream_handler.name_to_type_map_.find(iter->first))->second == StreamHandler::FILE)
{
os << " FILE";
}
else
{
os << " STRING";
}
os << " #" << (stream_handler.name_to_counter_map_.find(iter->first))->second << " " << iter->second << std::endl;
}
return os;
}
} // end namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CONCEPT/GlobalExceptionHandler.cpp | .cpp | 5,359 | 182 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg$
// $Authors: Stephan Aiche, Marc Sturm $
// --------------------------------------------------------------------------
#include <OpenMS/config.h>
#include <OpenMS/CONCEPT/GlobalExceptionHandler.h>
#include <OpenMS/CONCEPT/PrecisionWrapper.h>
#include <cstdlib> // for getenv in terminate()
//#include <sys/types.h>
#include <csignal> // for SIGSEGV and kill
#include <iostream>
#ifndef OPENMS_WINDOWSPLATFORM
#ifdef OPENMS_HAS_UNISTD_H
#include <unistd.h> // for getpid
#endif
#endif
#define OPENMS_CORE_DUMP_ENVNAME "OPENMS_DUMP_CORE"
namespace OpenMS::Exception
{
GlobalExceptionHandler::GlobalExceptionHandler() throw()
{
std::set_terminate(terminate);
//std::set_unexpected(terminate); // removed in c++17
std::set_new_handler(newHandler);
}
void GlobalExceptionHandler::newHandler()
{
throw OutOfMemory(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION);
}
void GlobalExceptionHandler::terminate() throw()
{
// add cerr to the log stream
// and write all available information on
// the exception to the log stream (potentially with an assigned file!)
// and cerr
std::cout << std::endl;
std::cout << "---------------------------------------------------" << std::endl;
std::cout << "FATAL: uncaught exception!" << std::endl;
std::cout << "---------------------------------------------------" << std::endl;
if ((line_() != -1) && (name_() != "unknown"))
{
std::cout << "last entry in the exception handler: " << std::endl;
std::cout << "exception of type " << name_().c_str() << " occurred in line "
<< line_() << ", function " << function_() << " of " << file_().c_str() << std::endl;
std::cout << "error message: " << what_().c_str() << std::endl;
}
std::cout << "---------------------------------------------------" << std::endl;
#ifndef OPENMS_WINDOWSPLATFORM
// if the environment variable declared in OPENMS_CORE_DUMP_ENVNAME
// is set, provoke a core dump (this is helpful to get a stack traceback)
if (getenv(OPENMS_CORE_DUMP_ENVNAME) != nullptr)
{
#ifdef OPENMS_HAS_KILL
std::cout << "dumping core file.... (to avoid this, unset " << OPENMS_CORE_DUMP_ENVNAME
<< " in your environment)" << std::endl;
// provoke a core dump
kill(getpid(), SIGSEGV);
#endif
}
#endif
// otherwise exit as default terminate() would:
abort();
}
void GlobalExceptionHandler::set(const std::string & file, int line, const std::string & function, const std::string & name, const std::string & message) throw()
{
GlobalExceptionHandler::name_() = name;
GlobalExceptionHandler::line_() = line;
GlobalExceptionHandler::what_() = message;
GlobalExceptionHandler::file_() = file;
GlobalExceptionHandler::function_() = function;
}
void GlobalExceptionHandler::setName(const std::string & name) throw()
{
GlobalExceptionHandler::name_() = name;
}
void GlobalExceptionHandler::setMessage(const std::string & message) throw()
{
GlobalExceptionHandler::what_() = message;
}
void GlobalExceptionHandler::setFile(const std::string & file) throw()
{
GlobalExceptionHandler::file_() = file;
}
void GlobalExceptionHandler::setFunction(const std::string & function) throw()
{
GlobalExceptionHandler::function_() = function;
}
void GlobalExceptionHandler::setLine(int line) throw()
{
GlobalExceptionHandler::line_() = line;
}
GlobalExceptionHandler & GlobalExceptionHandler::getInstance()
{
static GlobalExceptionHandler * globalExceptionHandler_;
if (globalExceptionHandler_ == nullptr)
{
globalExceptionHandler_ = new GlobalExceptionHandler;
}
return *globalExceptionHandler_;
}
std::string & GlobalExceptionHandler::file_()
{
static std::string * file_ = nullptr;
if (file_ == nullptr)
{
file_ = new std::string;
*file_ = "unknown";
}
return *file_;
}
int & GlobalExceptionHandler::line_()
{
static int * line_ = nullptr;
if (line_ == nullptr)
{
line_ = new int;
*line_ = -1;
}
return *line_;
}
std::string & GlobalExceptionHandler::function_()
{
static std::string * function_ = nullptr;
if (function_ == nullptr)
{
function_ = new std::string;
*function_ = "unknown";
}
return *function_;
}
std::string & GlobalExceptionHandler::name_()
{
static std::string * name_ = nullptr;
if (name_ == nullptr)
{
name_ = new std::string;
*name_ = "unknown exception";
}
return *name_;
}
std::string & GlobalExceptionHandler::what_()
{
static std::string * what_ = nullptr;
if (what_ == nullptr)
{
what_ = new std::string;
*what_ = " - ";
}
return *what_;
}
} // namespace OpenMS::Exception
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CONCEPT/LogConfigHandler.cpp | .cpp | 13,763 | 438 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg$
// $Authors: Stephan Aiche$
// --------------------------------------------------------------------------
#include <iostream>
#include <algorithm>
#include <OpenMS/CONCEPT/LogConfigHandler.h>
using std::cout;
using std::cerr;
using std::endl;
namespace OpenMS
{
String LogConfigHandler::PARAM_NAME = "log";
namespace
{
// Order of log levels from lowest to highest priority
const std::vector<String> LOG_LEVELS = {"DEBUG", "INFO", "WARNING", "ERROR", "FATAL_ERROR"};
}
LogConfigHandler::LogConfigHandler()
{
// add default configuration
fatal_streams_.insert("cerr");
error_streams_.insert("cerr");
warn_streams_.insert("cout");
info_streams_.insert("cout");
}
LogConfigHandler::LogConfigHandler(const LogConfigHandler & other)
{
debug_streams_ = other.debug_streams_;
info_streams_ = other.info_streams_;
warn_streams_ = other.warn_streams_;
error_streams_ = other.error_streams_;
fatal_streams_ = other.fatal_streams_;
stream_type_map_ = other.stream_type_map_;
}
LogConfigHandler::~LogConfigHandler() = default;
LogConfigHandler & LogConfigHandler::operator=(const LogConfigHandler & source) = default;
Param LogConfigHandler::parse(const StringList & settings)
{
Param p;
String suffix = " FILE";
std::vector<std::string> commands;
for (StringList::const_iterator iter = settings.begin(); iter != settings.end(); ++iter)
{
// split by " " to get all keywords
StringList l;
(*iter).split(' ', l, true);
if (l.size() < 2 || l.size() > 3)
{
throw Exception::ParseError(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, (*iter), "Error while parsing logger config. Setting can only have 2 or 3 arguments.");
}
// we parse a command line here, so we append a FILE to each of the arguments
// to indicate, that all of these streams are FILE streams
// for cout/cerr the type parameter is ignored
String new_command = *iter + suffix;
commands.push_back(new_command);
}
p.setValue(LogConfigHandler::PARAM_NAME, commands, "List of all settings that should be applied to the current Logging Configuration");
return p;
}
void LogConfigHandler::configure(const Param & param)
{
StringList configurations = ListUtils::toStringList<std::string>(param.getValue(LogConfigHandler::PARAM_NAME));
for (StringList::const_iterator iter = configurations.begin(); iter != configurations.end(); ++iter)
{
// split by " " to get the commands
StringList commands;
iter->split(' ', commands, true);
Logger::LogStream & log = getLogStreamByName_(commands[0]);
// convenience variables
String & command = commands[1];
// identify action
if (command == "add")
{
// convenience variables
const String & stream_name = commands[2];
// add the stream given by the 3rd argument to the defined log
if (stream_name == "cout")
{
log.insert(cout);
}
else if (stream_name == "cerr")
{
log.insert(cerr);
}
else
{
if (commands.size() <= 3) // write error to cerr and not a LogStream (because we're just configuring it...)
{
std::cerr << "Error during configuring logging: the command '" << (*iter) << "' requires 4 entries but has only " << commands.size() << "\n";
continue;
}
const String & stream_type = commands[3];
// check if a stream with the same name, but different type was already registered
auto existing = stream_type_map_.find(stream_name);
if (existing != stream_type_map_.end())
{
if (existing->second != getStreamTypeByName_(stream_type))
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "A stream with the same name but different type was already registered.");
}
}
StreamHandler::StreamType type = getStreamTypeByName_(stream_type);
Int status = STREAM_HANDLER.registerStream(type, stream_name);
if (!status)
{
// operation failed
throw Exception::FileNotWritable(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, commands[2]);
}
log.insert(STREAM_HANDLER.getStream(type, stream_name));
log.setPrefix(STREAM_HANDLER.getStream(type, stream_name), "[%S] ");
stream_type_map_[stream_name] = type;
}
// register the stream internally, so that the LogConfigHandler knows
// which streams were added
getConfigSetByName_(commands[0]).insert(stream_name);
}
else if (command == "remove")
{
// convenience variables
const String & stream_name = commands[2];
// add the stream given by the 3rd argument to the defined log
if (stream_name == "cout")
{
log.remove(cout);
}
else if (stream_name == "cerr")
{
log.remove(cerr);
}
else
{
if (commands.size() <= 3) // write error to cerr and not a LogStream (because we're just configuring it...)
{
std::cerr << "Error during configuring logging: the command '" << (*iter) << "' requires 4 entries but has only " << commands.size() << "\n";
continue;
}
const String & stream_type = commands[3];
StreamHandler::StreamType type = getStreamTypeByName_(stream_type);
// it is a file, get the ostream from the StreamHandler
if (STREAM_HANDLER.hasStream(type, stream_name))
{
log.remove(STREAM_HANDLER.getStream(type, stream_name));
STREAM_HANDLER.unregisterStream(type, stream_name);
}
}
// unregister the stream internally, so that the LogConfigHandler knows
// which streams were added
getConfigSetByName_(commands[0]).erase(stream_name);
// remove the type from the stream_type_map if there is no
// stream referencing it anymore
if (!STREAM_HANDLER.hasStream(stream_type_map_[stream_name], stream_name))
{
stream_type_map_.erase(stream_name);
}
}
else if (command == "clear")
{
// remove all streams from the given log
for (std::set<String>::iterator it = getConfigSetByName_(commands[0]).begin(); it != getConfigSetByName_(commands[0]).end(); ++it)
{
if (*it == "cout")
{
log.remove(cout);
}
else if (*it == "cerr")
{
log.remove(cerr);
}
else // handle the file streams
{
log.remove(STREAM_HANDLER.getStream(stream_type_map_[*it], *it));
STREAM_HANDLER.unregisterStream(stream_type_map_[*it], *it);
// remove the type from the stream_type_map if there is no
// stream referencing it anymore
if (!STREAM_HANDLER.hasStream(stream_type_map_[*it], *it))
{
stream_type_map_.erase(*it);
}
}
}
// clean the set
getConfigSetByName_(commands[0]).clear();
}
}
}
void LogConfigHandler::setLogLevel(const String & log_level)
{
// Special case: "NONE" means disable all logging
if (log_level == "NONE")
{
for (const auto& lvl : LOG_LEVELS)
{
getLogStreamByName_(lvl).removeAllStreams();
}
return;
}
// Find the index of the target log level
auto target_it = std::find(LOG_LEVELS.begin(), LOG_LEVELS.end(), log_level);
if (target_it == LOG_LEVELS.end())
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Invalid log level '" + log_level + "'. Valid levels are: DEBUG, INFO, WARNING, ERROR, FATAL_ERROR, NONE");
}
size_t target_index = std::distance(LOG_LEVELS.begin(), target_it);
// Remove streams from all levels below the target level
for (size_t i = 0; i < target_index; ++i)
{
getLogStreamByName_(LOG_LEVELS[i]).removeAllStreams();
}
// Restore configured streams for the target level and all levels above it
for (size_t i = target_index; i < LOG_LEVELS.size(); ++i)
{
const String& lvl = LOG_LEVELS[i];
Logger::LogStream& log = getLogStreamByName_(lvl);
const std::set<String>& configured_streams = getConfigSetByName_(lvl);
// First, remove all current streams
log.removeAllStreams();
// Then add back the configured streams
for (const String& stream_name : configured_streams)
{
if (stream_name == "cout")
{
log.insert(cout);
}
else if (stream_name == "cerr")
{
log.insert(cerr);
}
else
{
// Handle file/string streams from StreamHandler
auto it = stream_type_map_.find(stream_name);
if (it != stream_type_map_.end())
{
StreamHandler::StreamType type = it->second;
if (STREAM_HANDLER.hasStream(type, stream_name))
{
log.insert(STREAM_HANDLER.getStream(type, stream_name));
log.setPrefix(STREAM_HANDLER.getStream(type, stream_name), "[%S] ");
}
}
}
}
}
}
Logger::LogStream & LogConfigHandler::getLogStreamByName_(const String & stream_name)
{
Logger::LogStream * log = &getGlobalLogDebug(); // default
if (stream_name == "DEBUG")
{
log = &getGlobalLogDebug();
}
else if (stream_name == "INFO")
{
log = &getGlobalLogInfo();
}
else if (stream_name == "WARNING")
{
log = &getGlobalLogWarn();
}
else if (stream_name == "ERROR")
{
log = &getGlobalLogError();
}
else if (stream_name == "FATAL_ERROR")
{
log = &getGlobalLogFatal();
}
else
{
throw Exception::ElementNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, stream_name);
}
return *log;
}
std::set<String> & LogConfigHandler::getConfigSetByName_(const String & stream_type)
{
std::set<String> * s = &debug_streams_;
if (stream_type == "DEBUG")
{
s = &debug_streams_;
}
else if (stream_type == "INFO")
{
s = &info_streams_;
}
else if (stream_type == "WARNING")
{
s = &warn_streams_;
}
else if (stream_type == "ERROR")
{
s = &error_streams_;
}
else if (stream_type == "FATAL_ERROR")
{
s = &fatal_streams_;
}
else
{
throw Exception::ElementNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, stream_type);
}
return *s;
}
std::ostream & LogConfigHandler::getStream(const String & name)
{
auto it = stream_type_map_.find(name);
if (it != stream_type_map_.end())
{
return STREAM_HANDLER.getStream(it->second, name);
}
else
{
// there is no stream with this name
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "There is no stream with the given name.");
}
}
StreamHandler::StreamType LogConfigHandler::getStreamTypeByName_(const String & stream_type)
{
StreamHandler::StreamType type;
if (stream_type == "FILE")
{
type = StreamHandler::FILE;
}
else if (stream_type == "STRING")
{
type = StreamHandler::STRING;
}
else
{
// unsupported log type
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "The log type " + stream_type + " is not supported");
}
return type;
}
void printStreamConfig_(std::ostream & os, const String & name, const std::set<String> & stream_names, const std::map<String, StreamHandler::StreamType> & stream_type_map);
void printStreamConfig_(std::ostream & os, const String & name, const std::set<String> & stream_names, const std::map<String, StreamHandler::StreamType> & stream_type_map)
{
os << name << endl;
for (std::set<String>::const_iterator it = stream_names.begin(); it != stream_names.end(); ++it)
{
os << "->" << "\t" << *it;
// append stream type
os << " (";
switch ((stream_type_map.find(*it))->second)
{
case StreamHandler::STRING:
os << "STRINGSTREAM";
break;
case StreamHandler::FILE:
default:
os << "FILE";
break;
}
os << ")";
os << std::endl;
}
}
std::ostream & operator<<(std::ostream & os, LogConfigHandler const & lch)
{
printStreamConfig_(os, "OPENMS_LOG_DEBUG", lch.debug_streams_, lch.stream_type_map_);
printStreamConfig_(os, "OPENMS_LOG_INFO", lch.info_streams_, lch.stream_type_map_);
printStreamConfig_(os, "OPENMS_LOG_WARN", lch.warn_streams_, lch.stream_type_map_);
printStreamConfig_(os, "OPENMS_LOG_ERROR", lch.error_streams_, lch.stream_type_map_);
printStreamConfig_(os, "OPENMS_LOG_FATAL_ERROR", lch.fatal_streams_, lch.stream_type_map_);
return os;
}
LogConfigHandler * LogConfigHandler::instance_ = nullptr;
LogConfigHandler * LogConfigHandler::getInstance()
{
if (LogConfigHandler::instance_ == nullptr)
{
LogConfigHandler::instance_ = new LogConfigHandler();
}
return LogConfigHandler::instance_;
}
} // end namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CONCEPT/Constants.cpp | .cpp | 422 | 17 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg$
// $Authors: Marc Sturm $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/Constants.h>
namespace OpenMS
{
} //OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CONCEPT/UniqueIdInterface.cpp | .cpp | 1,184 | 54 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/UniqueIdInterface.h>
#include <OpenMS/CONCEPT/UniqueIdGenerator.h>
namespace OpenMS
{
Size UniqueIdInterface::setUniqueId()
{
unique_id_ = UniqueIdGenerator::getUniqueId();
return 1;
}
Size UniqueIdInterface::ensureUniqueId()
{
if (!hasValidUniqueId())
{
unique_id_ = UniqueIdGenerator::getUniqueId();
return 1;
}
else
return 0;
}
void UniqueIdInterface::setUniqueId(const String & rhs)
{
clearUniqueId();
String::size_type last_underscore = rhs.rfind('_');
String s = rhs.substr(last_underscore + 1);
for (String::const_iterator s_i = s.begin(); s_i < s.end(); ++s_i)
{
int i = (*s_i - '0');
if (i < 0 || i > 9)
{
clearUniqueId();
return;
}
unique_id_ = 10 * unique_id_ + i;
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CONCEPT/VersionInfo.cpp | .cpp | 4,707 | 160 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Clemens Groepl, Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/VersionInfo.h>
#include <OpenMS/DATASTRUCTURES/String.h>
#include <OpenMS/CONCEPT/Exception.h>
#include <fstream>
// these headers are generated by the build system
// and therefore intentionally break the naming convention (tagging them as automatically build)
#include <OpenMS/openms_package_version.h>
using namespace std;
namespace OpenMS
{
const VersionInfo::VersionDetails VersionInfo::VersionDetails::EMPTY;
bool VersionInfo::VersionDetails::operator<(const VersionInfo::VersionDetails & rhs) const
{
return (this->version_major < rhs.version_major)
|| (this->version_major == rhs.version_major && this->version_minor < rhs.version_minor)
|| (this->version_major == rhs.version_major && this->version_minor == rhs.version_minor && this->version_patch < rhs.version_patch)
// note: if one version is pre-release, then it should compare as "less than"
|| (this->version_major == rhs.version_major && this->version_minor == rhs.version_minor && this->version_patch == rhs.version_patch &&
(!this->pre_release_identifier.empty() && rhs.pre_release_identifier.empty()) );
}
bool VersionInfo::VersionDetails::operator==(const VersionInfo::VersionDetails & rhs) const
{
return this->version_major == rhs.version_major &&
this->version_minor == rhs.version_minor &&
this->version_patch == rhs.version_patch &&
this->pre_release_identifier == rhs.pre_release_identifier;
}
bool VersionInfo::VersionDetails::operator!=(const VersionInfo::VersionDetails & rhs) const
{
return !(this->operator==(rhs));
}
bool VersionInfo::VersionDetails::operator>(const VersionInfo::VersionDetails & rhs) const
{
return !(*this < rhs || *this == rhs);
}
VersionInfo::VersionDetails VersionInfo::VersionDetails::create(const String & version) //static
{
VersionInfo::VersionDetails result;
size_t first_dot = version.find('.');
// we demand at least one "."
if (first_dot == string::npos)
{
return VersionInfo::VersionDetails::EMPTY;
}
try
{
result.version_major = String(version.substr(0, first_dot)).toInt();
}
catch (Exception::ConversionError & /*e*/)
{
return VersionInfo::VersionDetails::EMPTY;
}
// returns npos if no second "." is found - which does not hurt
size_t second_dot = version.find('.', first_dot + 1);
try
{
result.version_minor = String(version.substr(first_dot + 1, second_dot - (first_dot + 1))).toInt();
}
catch (Exception::ConversionError & /*e*/)
{
return VersionInfo::VersionDetails::EMPTY;
}
// if there is no second dot: return
if (second_dot == string::npos)
{
return result;
}
// returns npos if no final pre-release dash "-" is found - which does not hurt
size_t pre_release_dash = version.find('-', second_dot + 1);
try
{
result.version_patch = String(version.substr(second_dot + 1, pre_release_dash - (second_dot + 1))).toInt();
}
catch (Exception::ConversionError & /*e*/)
{
return VersionInfo::VersionDetails::EMPTY;
}
if (pre_release_dash == string::npos)
{
return result;
}
result.pre_release_identifier = String(version.substr(pre_release_dash + 1, version.size() - (pre_release_dash + 1)));
return result;
}
String VersionInfo::getTime()
{
static bool is_initialized = false;
static String result;
if (!is_initialized)
{
result = String(__DATE__) + ", " + __TIME__;
is_initialized = true;
}
return result;
}
String VersionInfo::getVersion()
{
static bool is_initialized = false;
static String result;
if (!is_initialized)
{
result = OPENMS_PACKAGE_VERSION;
result.trim();
is_initialized = true;
}
return result;
}
VersionInfo::VersionDetails VersionInfo::getVersionStruct()
{
static bool is_initialized = false;
static VersionDetails result;
if (!is_initialized)
{
result = VersionDetails::create(getVersion());
is_initialized = true;
}
return result;
}
String VersionInfo::getRevision()
{
return String(OPENMS_GIT_SHA1);
}
String VersionInfo::getBranch()
{
return String(OPENMS_GIT_BRANCH);
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/TOPDOWN/Qvalue.cpp | .cpp | 7,599 | 200 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Kyowon Jeong$
// $Authors: Kyowon Jeong$
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/TOPDOWN/PeakGroup.h>
#include <OpenMS/ANALYSIS/TOPDOWN/Qvalue.h>
#include <OpenMS/DATASTRUCTURES/MatrixEigen.h>
#include <Eigen/QR>
namespace OpenMS
{
/// simple function to generate distribution from input vector values
Matrix<double> getDistVector(const std::vector<double>& values, Size num_bin, double minv, double maxv)
{
Matrix<double> ret(num_bin, 1, .0);
for (const auto& v : values)
{
if (v >= maxv) continue;
if (v < minv) continue;
Size bin = Size((v - minv) / (maxv - minv) * num_bin);
ret.setValue(bin, 0, ret.getValue(bin, 0) + 1);
}
return ret;
}
double Qvalue::updatePeakGroupQvalues(std::vector<DeconvolvedSpectrum>& deconvolved_spectra) // per ms level + precursor update as well.
{
double noise_weight = 1;
std::map<uint, std::vector<double>> score_map_target; // target PeakGroupScoring vector per ms level
std::map<uint, std::vector<double>> score_signal_decoy_map; // signal decoy PeakGroupScoring vector per ms level
std::map<uint, std::vector<double>> score_noise_decoy_map; // noise decoy PeakGroupScoring vector per ms level
std::map<uint, std::map<double, double>> qscore_qvalue_map; // mapping from qscore to qvalue
// to calculate qvalues per ms level, store Qscores per ms level
std::set<uint> used_feature_indices;
for (auto& deconvolved_spectrum : deconvolved_spectra)
{
if (deconvolved_spectrum.empty())
continue;
uint ms_level = deconvolved_spectrum.getOriginalSpectrum().getMSLevel();
for (auto& pg : deconvolved_spectrum)
{
if (pg.getFeatureIndex() > 0 && used_feature_indices.find(pg.getFeatureIndex()) != used_feature_indices.end())
continue;
used_feature_indices.insert(pg.getFeatureIndex());
if (pg.getTargetDecoyType() == PeakGroup::TargetDecoyType::target)
{
score_map_target[ms_level].push_back(pg.getQscore2D());
}
else if (pg.getTargetDecoyType() == PeakGroup::TargetDecoyType::signal_decoy)
{
score_signal_decoy_map[ms_level].push_back(pg.getQscore2D());
}
else if (pg.getTargetDecoyType() == PeakGroup::TargetDecoyType::noise_decoy)
{
score_noise_decoy_map[ms_level].push_back(pg.getQscore2D());
}
}
}
// per ms score, calculate Qvalues
for (auto& [ms_level, scores_target] : score_map_target)
{
auto& scores_signal_decoy = score_signal_decoy_map[ms_level];
auto& scores_noise_decoy = score_noise_decoy_map[ms_level];
std::sort(scores_target.begin(), scores_target.end());
std::sort(scores_signal_decoy.begin(), scores_signal_decoy.end());
std::sort(scores_noise_decoy.begin(), scores_noise_decoy.end());
double sum = 0;
double max_score_for_weight_calculation = .7;
double min_score_for_weight_calculation = .3;
double iso_sum = std::accumulate(scores_signal_decoy.begin(), scores_signal_decoy.end(), .0);
for (int i = scores_signal_decoy.size() - 1; i >= 0; i--)
{
sum += scores_signal_decoy[i];
if (sum > iso_sum * .8 || scores_signal_decoy[i] < .5)
{
max_score_for_weight_calculation = std::min(max_score_for_weight_calculation, scores_signal_decoy[i]);
break;
}
}
Size num_bin = 6;
// get the score distributions
auto score_dist_target = getDistVector(scores_target, num_bin, min_score_for_weight_calculation, max_score_for_weight_calculation);
auto score_dist_noise_decoy = getDistVector(scores_noise_decoy, num_bin, min_score_for_weight_calculation, max_score_for_weight_calculation);
auto score_dist_signal_decoy = getDistVector(scores_signal_decoy, num_bin, min_score_for_weight_calculation, max_score_for_weight_calculation);
// noise decoy weight calculation using Least Square
Matrix<double> left(score_dist_target.rows(), 2, 1);
for (int r = 0; r < score_dist_target.rows(); r++)
{
double v = score_dist_target.getValue(r, 0);
v -= score_dist_signal_decoy.getValue(r, 0);
score_dist_target.setValue(r, 0, v);
left.setValue(r, 0, score_dist_noise_decoy.getValue(r, 0));
}
auto calculated_vec = eigenView(left).completeOrthogonalDecomposition().pseudoInverse() * eigenView(score_dist_target);
noise_weight = calculated_vec.row(0)[0];
if (calculated_vec.row(1)[0] < 0)
{
auto calculated_vec_non_negative = eigenView(score_dist_noise_decoy).completeOrthogonalDecomposition().pseudoInverse() * eigenView(score_dist_target);
noise_weight = calculated_vec_non_negative.row(0)[0];
}
if (std::isnan(noise_weight)) noise_weight = 1.0;
noise_weight = std::max(noise_weight, 0.01);
std::sort(scores_target.rbegin(), scores_target.rend());
std::sort(scores_signal_decoy.rbegin(), scores_signal_decoy.rend());
std::sort(scores_noise_decoy.rbegin(), scores_noise_decoy.rend());
// now get the qvalues
auto& map_qvalue = qscore_qvalue_map[ms_level];
double nom_i = 0, nom_c = 0, nom_n = 0;
Size j_i = 0, j_n = 0;
for (Size i = 0; i < scores_target.size(); i++)
{
double ts = scores_target[i];
double di = 0, dc = 0, dn = 0;
while (i < scores_target.size() - 1 && scores_target[i + 1] == ts)
{
i++;
}
while (j_n < scores_noise_decoy.size() && scores_noise_decoy[j_n] >= ts)
{
dn += noise_weight;
++j_n;
}
while (j_i < scores_signal_decoy.size() && scores_signal_decoy[j_i] >= ts)
{
di++;
++j_i;
}
nom_n += dn;
nom_i += di;
nom_c += dc;
double tmp_q = (nom_i + nom_c + nom_n) / double(1 + i);
map_qvalue[ts] = std::min(1.0, tmp_q);
}
}
// refine qvalues to make them monotonic decreasing
for (const auto& titem : score_map_target)
{
uint ms_level = titem.first;
auto& map_qvalue = qscore_qvalue_map[ms_level];
double cummin = 1.0;
{
for (auto&& rit = map_qvalue.begin(); rit != map_qvalue.end(); ++rit)
{
cummin = std::min(rit->second, cummin);
rit->second = cummin;
}
}
for (auto& deconvolved_spectrum : deconvolved_spectra)
{
if (deconvolved_spectrum.empty() || deconvolved_spectrum.isDecoy())
continue;
// set precursor Qvalue here
if (deconvolved_spectrum.getOriginalSpectrum().getMSLevel() == ms_level + 1 && !deconvolved_spectrum.getPrecursorPeakGroup().empty())
{
auto precursor_pg = deconvolved_spectrum.getPrecursorPeakGroup();
double qs = precursor_pg.getQscore2D();
precursor_pg.setQvalue(map_qvalue[qs]);
deconvolved_spectrum.setPrecursorPeakGroup(precursor_pg);
}
if (deconvolved_spectrum.getOriginalSpectrum().getMSLevel() != ms_level)
{
continue;
}
for (auto& pg : deconvolved_spectrum)
{
pg.setQvalue(map_qvalue[pg.getQscore2D()]);
}
}
}
return noise_weight;
}
} // namespace OpenMS | C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/TOPDOWN/FLASHDeconvAlgorithm.cpp | .cpp | 29,845 | 750 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Kyowon Jeong, Jihyung Kim, Jaekwan Kim $
// $Authors: Kyowon Jeong, Jihyung Kim, Jaekwan Kim $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/TOPDOWN/DeconvolvedSpectrum.h>
#include <OpenMS/ANALYSIS/TOPDOWN/FLASHDeconvAlgorithm.h>
#include <OpenMS/ANALYSIS/TOPDOWN/MassFeatureTrace.h>
#include <OpenMS/ANALYSIS/TOPDOWN/PeakGroup.h>
#include <OpenMS/ANALYSIS/TOPDOWN/Qvalue.h>
#include <OpenMS/ANALYSIS/TOPDOWN/TopDownIsobaricQuantification.h>
#include <OpenMS/PROCESSING/SPECTRAMERGING/SpectraMerger.h>
#include <OpenMS/PROCESSING/FILTERING/ThresholdMower.h>
#include <OpenMS/METADATA/SpectrumLookup.h>
#include <OpenMS/MATH/STATISTICS/GaussFitter.h>
#ifdef _OPENMP
#include <omp.h>
#endif
namespace OpenMS
{
inline const Size max_peak_count_for_centroid_ = 3e4;
inline const Size max_peak_count_for_profile_ = 1e5;
FLASHDeconvAlgorithm::FLASHDeconvAlgorithm(): DefaultParamHandler("FLASHDeconvAlgorithm"), ProgressLogger()
{
// FLASHIda log file read somewhere. ida_log_file_ should be updated.
defaults_.setValue("ida_log", "", "Provide the log file generated by FLASHIda (e.g., IDA*.log) for coupling with FLASHIda acquisition.");
defaults_.setValue(
"report_FDR", "false",
"Generate q-values (approximate FDR) for deconvolved masses. Decoy masses are also reported which were used for FDR calculation. (Beta version)");
defaults_.setValidStrings("report_FDR", {"true", "false"});
defaults_.setValue("allowed_isotope_error", 0,
"Tolerance for isotope index errors when calculating FDR. For instance, setting a value of 2 permits the inclusion of up to 2 isotope errors as valid matches. Beta version.");
defaults_.addTag("allowed_isotope_error", "advanced");
defaults_.setValue("use_RNA_averagine", "false", "Use the RNA (nucleotide) averagine model for deconvolution.");
defaults_.setValidStrings("use_RNA_averagine", {"true", "false"});
defaults_.addTag("use_RNA_averagine", "advanced");
defaults_.setValue(
"precursor_MS1_window", 1,
"Number of MS1 spectra around each MS2 spectrum to search for precursor peaks when determining the MS2 precursors. For MS2 spectrum, the mass of precursor ion should be determined for better deconvolution and reliable identification. "
"If the mass of precursor ion is not found in the immediately preceding MS1 spectrum, previous or next MS1 spectra may be used instead. "
"This parameter determines up to how many MS1 spectra around each MS2 spectrum will be searched.");
defaults_.setMinInt("precursor_MS1_window", 1);
defaults_.addTag("precursor_MS1_window", "advanced");
defaults_.setValue(
"isolation_window", 5.0,
"Specify the isolation window width for precursor determination. Used when this information is absent in the mzML file.");
defaults_.addTag("isolation_window", "advanced");
defaults_.setValue("merging_method", 0,
"Method for merging spectra before deconvolution. 0: No merging 1: Gaussian averaging per MS level, effective for Q-TOF datasets. For MSn (n > 1), only the spectra from the same precursor mass "
"(subject to tolerance set by SD:tol) are averaged. 2: Block merging, combining all spectra into one per MS level (e.g., for NativeMS datasets).");
defaults_.setMinInt("merging_method", 0);
defaults_.setMaxInt("merging_method", 2);
defaults_.setValue("merging_min_ms_level", 1, "Min MS level for merging");
defaults_.setValue("merging_max_ms_level", 2, "Max MS level for merging");
defaults_.setMinInt("merging_min_ms_level", 1);
defaults_.setMinInt("merging_max_ms_level", 1);
auto sd_defaults = SpectralDeconvolution().getDefaults();
sd_defaults.remove("allowed_isotope_error");
defaults_.insert("SD:", sd_defaults);
Param mf_defaults = MassFeatureTrace().getDefaults();
mf_defaults.setValue("min_cos", -1.0,
"Cosine similarity threshold between avg. and observed isotope pattern. When negative, MS1 cosine threshold for spectral "
"deconvolution (set by -SD:min_cos will be used ");
mf_defaults.setValue(
"mass_error_ppm", -1.0,
"Specifies the mass error tolerance for feature tracing in ppm. When negative, the MS1 tolerance for deconvolution is used (e.g., 16 ppm is used when -SD:tol 16).");
mf_defaults.addTag("min_cos", "advanced");
mf_defaults.addTag("mass_error_ppm", "advanced");
mf_defaults.remove("noise_threshold_int");
mf_defaults.remove("reestimate_mt_sd");
mf_defaults.remove("trace_termination_criterion");
mf_defaults.remove("trace_termination_outliers");
mf_defaults.remove("chrom_peak_snr");
defaults_.insert("ft:", mf_defaults);
defaults_.insert("iq:", TopDownIsobaricQuantification().getDefaults());
defaultsToParam_();
}
void FLASHDeconvAlgorithm::updateMembers_()
{
tols_ = param_.getValue("SD:tol");
min_cos_ = param_.getValue("SD:min_cos");
precursor_MS1_window_ = param_.getValue("precursor_MS1_window");
use_RNA_averagine_ = param_.getValue("use_RNA_averagine") != "false";
report_decoy_ = param_.getValue("report_FDR") != "false";
merge_spec_ = param_.getValue("merging_method");
isolation_window_size_ = param_.getValue("isolation_window");
ida_log_file_ = param_.getValue("ida_log").toString();
current_min_ms_level_ = max_ms_level_;
current_max_ms_level_ = 0;
}
void FLASHDeconvAlgorithm::updateMSLevels_(MSExperiment& map)
{
// read input dataset once to count spectra
for (auto& it : map)
{
// if forced_ms_level > 0, force MS level of all spectra to 1.
if (forced_ms_level_ > 0) { it.setMSLevel(forced_ms_level_); }
if (it.empty()) { continue; }
if (it.getMSLevel() > max_ms_level_) { continue; }
uint ms_level = it.getMSLevel();
current_max_ms_level_ = current_max_ms_level_ < ms_level ? ms_level : current_max_ms_level_;
current_min_ms_level_ = current_min_ms_level_ > ms_level ? ms_level : current_min_ms_level_;
}
// Max MS Level is adjusted according to the input dataset
current_max_ms_level_ = current_max_ms_level_ > max_ms_level_ ? max_ms_level_ : current_max_ms_level_;
}
void FLASHDeconvAlgorithm::filterLowPeaks_(MSExperiment& map)
{
OPENMS_LOG_INFO << "Filtering low peaks in spectra ... ";
ThresholdMower threshold_mower_filter; // threshold
Param t_filter_param = threshold_mower_filter.getParameters(); //"threshold", .00001
t_filter_param.setValue("threshold", 1e-6);
threshold_mower_filter.setParameters(t_filter_param);
threshold_mower_filter.filterPeakMap(map);
#pragma omp parallel for default(none), shared(map)
for (int i = 0; i < (int) map.size(); i++)
{
auto& it = map[i];
if (it.empty()) continue;
Size count = it.getType(false) == SpectrumSettings::SpectrumType::CENTROID ? max_peak_count_for_centroid_ : max_peak_count_for_profile_;
it.sortByIntensity(true);
double threshold = it.size() < count ? 0 : it[count].getIntensity();
threshold = std::max(threshold, (double)it.begin()->getIntensity() / 1000);
// pop back the low intensity peaks using threshold
while (! it.empty() && it.back().getIntensity() <= threshold)
{
it.pop_back();
}
it.sortByPosition();
}
OPENMS_LOG_INFO << "Done" << std::endl;
}
void FLASHDeconvAlgorithm::mergeSpectra_(MSExperiment& map, uint ms_level)
{
SpectraMerger merger;
merger.setLogType(CMD);
Param sm_param = merger.getDefaults();
sm_param.setValue("mz_binning_width", tols_[ms_level - 1] / 2.5);
sm_param.setValue("mz_binning_width_unit", "ppm");
uint min_ms_level = param_.getValue("merging_min_ms_level");
uint max_ms_level = param_.getValue("merging_max_ms_level");
if (merge_spec_ == 1 && ms_level >= min_ms_level && ms_level <= max_ms_level)
{
if (ms_level == 1)
{
OPENMS_LOG_INFO << "Gaussian averaging MS1 spectra ... " << std::endl;
merger.setParameters(sm_param);
map.sortSpectra();
merger.average(map, "gaussian", (int)ms_level);
}
else
{
// For ms n, first find precursors for all ms n. then make a tmp map having the precursor masses as precursor
std::map<String, std::vector<Precursor>> original_precursor_map;
#pragma omp parallel for default(none), shared(map, ms_level, original_precursor_map)
for (int i = 0; i < (int) map.size(); i++)
{
auto spec = map[i];
if (spec.getMSLevel() != ms_level) continue;
const auto& native_id = spec.getNativeID();
original_precursor_map[native_id] = spec.getPrecursors();
if (! spec.getPrecursors().empty() && native_id_precursor_peak_group_map_.find(native_id) != native_id_precursor_peak_group_map_.end())
{
auto precursor_pg = native_id_precursor_peak_group_map_[native_id];
auto precursor = spec.getPrecursors()[0];
precursor.setCharge(1);
precursor.setMZ(precursor_pg.getMonoMass());
precursor.setIntensity(precursor_pg.getIntensity());
map[i].setPrecursors(std::vector<Precursor> {precursor});
}
}
// merge MS n using precursor method
OPENMS_LOG_INFO << "Merging MS" << ms_level << " spectra from the same deconvolved precursor masses... " << std::endl;
sm_param.setValue("precursor_method:mz_tolerance", 0.2);
sm_param.setValue("precursor_method:rt_tolerance", 30.0); // TODO make this as a user input?
merger.setParameters(sm_param);
map.sortSpectra();
merger.mergeSpectraPrecursors(map);
for (auto& mspec : map)
{
auto native_id_str = mspec.getNativeID();
std::vector<String> native_ids;
native_id_str.split(",", native_ids);
for (auto& native_id : native_ids)
{
if (original_precursor_map.find(native_id) == original_precursor_map.end()) continue;
mspec.setPrecursors(original_precursor_map[native_id]);
}
}
}
}
else if (merge_spec_ == 2)
{
OPENMS_LOG_INFO << "Merging spectra into a single spectrum for MS" << ms_level << std::endl;
sm_param.setValue("block_method:rt_block_size", map.size() + 1);
map.sortSpectra();
sm_param.setValue("block_method:ms_levels", IntList {(int)ms_level});
merger.setParameters(sm_param);
merger.mergeSpectraBlockWise(map);
}
filterLowPeaks_(map);
}
int FLASHDeconvAlgorithm::getScanNumber(const MSExperiment& map, Size index)
{
auto native_id_str = map[index].getNativeID();
std::vector<String> native_ids;
native_id_str.split(",", native_ids);
String type_accession = "MS:1000768";
if (!map.getSourceFiles().empty())
{
type_accession = map.getSourceFiles()[0].getNativeIDTypeAccession();
if (type_accession.empty()) type_accession = "MS:1000768";
}
int scan_number = SpectrumLookup::extractScanNumber(native_ids.back(), type_accession);
if (scan_number < 0) { scan_number = (int)index + 1; }
return scan_number;
}
std::vector<double> FLASHDeconvAlgorithm::getTolerances() const
{
return tols_;
}
int FLASHDeconvAlgorithm::findPrecursorScanNumber_(const MSExperiment& map, Size index, uint ms_level) const
{
for (int p_index = (int)index - 1; p_index >= 0; p_index--)
{
if (map[p_index].getMSLevel() == ms_level - 1) { return getScanNumber(map, p_index); }
}
return -1;
}
void FLASHDeconvAlgorithm::appendDecoyPeakGroups_(DeconvolvedSpectrum& deconvolved_spectrum,
const MSSpectrum& spec,
int scan_number,
const PeakGroup& precursor_pg)
{
#pragma omp parallel sections default(none) shared(spec, scan_number, precursor_pg, deconvolved_spectrum)
{
#pragma omp section
sd_noise_decoy_.performSpectrumDeconvolution(spec, scan_number, precursor_pg);
#pragma omp section
sd_signal_decoy_.performSpectrumDeconvolution(spec, scan_number, precursor_pg);
}
deconvolved_spectrum.sortByQscore();
deconvolved_spectrum.reserve(deconvolved_spectrum.size() + sd_signal_decoy_.getDeconvolvedSpectrum().size()
+ sd_noise_decoy_.getDeconvolvedSpectrum().size());
for (const auto& pg : sd_signal_decoy_.getDeconvolvedSpectrum())
{
deconvolved_spectrum.push_back(pg);
}
for (const auto& pg : sd_noise_decoy_.getDeconvolvedSpectrum())
{
deconvolved_spectrum.push_back(pg);
}
deconvolved_spectrum.sort();
}
void FLASHDeconvAlgorithm::runSpectralDeconvolution_(MSExperiment& map, std::vector<DeconvolvedSpectrum>& deconvolved_spectra)
{
startProgress(0, (SignedSize)map.size(), "running FLASHDeconv");
std::map<double, int> rt_scan_map;
for (Size index = 0; index < map.size(); index++)
{
int scan_number = getScanNumber(map, index);
rt_scan_map[map[index].getRT()] = scan_number;
}
for (uint ms_level = 1; ms_level <= current_max_ms_level_; ms_level++)
{
if (ms_level > 1) { findPrecursorPeakGroupsForMSnSpectra_(map, deconvolved_spectra, ms_level); }
if (merge_spec_ > 0)
{
mergeSpectra_(map, ms_level);
if (ms_level > 1) { findPrecursorPeakGroupsForMSnSpectra_(map, deconvolved_spectra, ms_level); }
}
for (Size index = 0; index < map.size(); index++)
{
int scan_number = merge_spec_ == 0 ? getScanNumber(map, index) :
(rt_scan_map.find(map[index].getRT()) == rt_scan_map.end() ? getScanNumber(map, index) :
rt_scan_map[map[index].getRT()]);
const auto& spec = map[index];
if (ms_level != spec.getMSLevel()) { continue; }
nextProgress();
if (spec.empty()) { continue; }
String native_id = spec.getNativeID();
PeakGroup precursor_pg;
if (native_id_precursor_peak_group_map_.find(native_id) != native_id_precursor_peak_group_map_.end())
{
precursor_pg = native_id_precursor_peak_group_map_[native_id];
}
sd_.performSpectrumDeconvolution(spec, scan_number, precursor_pg);
auto& deconvolved_spectrum = sd_.getDeconvolvedSpectrum();
if (ms_level > 1 && precursor_pg.empty())
{
int precursor_scan = findPrecursorScanNumber_(map, index, ms_level);
if (precursor_scan >= 0) { deconvolved_spectrum.setPrecursorScanNumber(precursor_scan); }
}
if (report_decoy_ && !deconvolved_spectrum.empty())
{
appendDecoyPeakGroups_(deconvolved_spectrum, spec, scan_number, precursor_pg);
}
deconvolved_spectra.push_back(deconvolved_spectrum);
}
std::sort(deconvolved_spectra.begin(), deconvolved_spectra.end());
}
endProgress();
}
const FLASHHelperClasses::PrecalculatedAveragine& FLASHDeconvAlgorithm::getAveragine()
{
return sd_.getAveragine();
}
const FLASHHelperClasses::PrecalculatedAveragine& FLASHDeconvAlgorithm::getDecoyAveragine()
{
if (!report_decoy_) return getAveragine();
return sd_noise_decoy_.getAveragine();
}
std::vector<int> FLASHDeconvAlgorithm::getHistogram_(const std::vector<double>& data, double min_range, double max_range, double bin_size)
{
int num_bins = static_cast<int>((max_range - min_range) / bin_size) + 1;
std::vector<int> bins(num_bins, 0);
// Populate the bins
for (double value : data) {
if (value >= min_range && value <= max_range) {
int bin_index = static_cast<int>((value - min_range) / bin_size);
bins[bin_index]++;
}
}
return bins;
}
void FLASHDeconvAlgorithm::determineTolerance_(const MSExperiment& map, const Param& sd_param, const FLASHHelperClasses::PrecalculatedAveragine& avg, const uint ms_level)
{
OPENMS_LOG_INFO << "Determining tolerance for MS" << ms_level << " ... ";
auto sd = SpectralDeconvolution();
auto sd_param_t = sd_param;
sd.setAveragine(avg);
tols_[ms_level - 1] = 200; // maximum tolerance
sd_param_t.setValue("min_charge", 1); // better to include charge 1 to determine ppm error.
sd_param_t.setValue("min_mass", 50.0); // better to include small masses to determine ppm error.
sd_param_t.setValue("tol", tols_);
sd.setParameters(sd_param_t);
sd.setToleranceEstimation();
int sample_rate = 100; //
int count = 0;
std::vector<double> sampled_tols;
for (const auto& spec : map)
{
if (ms_level != (uint)spec.getMSLevel()) { continue; }
if (spec.empty()) { continue; }
if (count++ % sample_rate != 0) continue;
PeakGroup precursor_pg;
sd.performSpectrumDeconvolution(spec, 0, precursor_pg);
const auto& deconvolved_spectrum = sd.getDeconvolvedSpectrum(); // estimate both.
for (const auto& pg : deconvolved_spectrum)
{
if (pg.getQscore() < .9) // TODO automatically find the good mass threshold
continue;
for (auto error : pg.getMassErrors())
{
sampled_tols.push_back(error);
}
}
}
if (sampled_tols.size() < 6)
{
OPENMS_LOG_INFO << "failed. Cannot be determined - no MS" << ms_level << " spectrum. Set to 10ppm (default tolerance)." << std::endl;
tols_[ms_level - 1] = 10;
return;
}
try
{
const int l = -100, r = 100, bin_size = 1;
const auto& bins = getHistogram_(sampled_tols, l, r, bin_size);
// Calculate mean using std::accumulate
std::vector<DPosition<2>> points;
for (int b = l; b <= r; b += bin_size)
{
points.emplace_back(b, bins[b - l]);
}
Math::GaussFitter fitter;
const auto fit = fitter.fit(points);
tols_[ms_level - 1] = round(fit.sigma * 2.17 * 2); // 97% area under curve in gaussian
OPENMS_LOG_INFO << "done. Determined tolerance: " << tols_[ms_level - 1] << " ppm. You may test around this tolerance for better results."
<< std::endl;
}
catch (const Exception::UnableToFit& e)
{
// Handle the exception and issue a warning
OPENMS_LOG_INFO << "failed. Cannot be determined - Gaussian fitting failure. Set to 10ppm (default tolerance)." << std::endl;
tols_[ms_level - 1] = 10;
}
catch (const std::exception& e)
{
// Handle other exceptions if necessary
OPENMS_LOG_INFO << "failed. Cannot be determined - Gaussian fitting failure. Set to 10ppm (default tolerance)." << std::endl;
tols_[ms_level - 1] = 10;
}
}
void FLASHDeconvAlgorithm::run(MSExperiment& map,
std::vector<DeconvolvedSpectrum>& deconvolved_spectra,
std::vector<FLASHHelperClasses::MassFeature>& deconvolved_features)
{
updateMSLevels_(map);
filterLowPeaks_(map);
sd_ = SpectralDeconvolution();
Param sd_param = param_.copy("SD:", true);
sd_param.setValue("allowed_isotope_error", param_.getValue("allowed_isotope_error"));
OPENMS_LOG_INFO<< "Calculating Averagines ... " << std::flush;
sd_.setParameters(sd_param);
sd_.calculateAveragine(use_RNA_averagine_);
OPENMS_LOG_INFO<< "Done" << std::endl;
const auto& avg = sd_.getAveragine();
// determine tolerance in case tolerance input is negative
for (uint ms_level = 1; ms_level <= current_max_ms_level_; ms_level++)
{
if (tols_[ms_level - 1] > 0) continue;
determineTolerance_(map, sd_param, avg, ms_level);
}
sd_param.setValue("tol", tols_);
sd_.setParameters(sd_param);
if (report_decoy_)
{
sd_noise_decoy_.setParameters(sd_param);
sd_noise_decoy_.setTargetDecoyType(PeakGroup::TargetDecoyType::noise_decoy, sd_.getDeconvolvedSpectrum()); // noise
sd_noise_decoy_.calculateAveragine(use_RNA_averagine_); // for noise, averagine needs to be calculated differently.
sd_signal_decoy_.setParameters(sd_param);
sd_signal_decoy_.setTargetDecoyType(PeakGroup::TargetDecoyType::signal_decoy, sd_.getDeconvolvedSpectrum()); // isotope
sd_signal_decoy_.setAveragine(avg);
}
setLogType(CMD);
deconvolved_spectra.reserve(map.size() * 4);
// run spectral deconvolution here and get deconvolved spectra
runSpectralDeconvolution_(map, deconvolved_spectra);
// feature tracing here and update FeatureQScores
runFeatureFinding_(deconvolved_spectra, deconvolved_features);
noise_decoy_weight_ = Qvalue::updatePeakGroupQvalues(deconvolved_spectra);
TopDownIsobaricQuantification quantifier;
Param quant_param = param_.copy("iq:", true);
quantifier.setParameters(quant_param);
// Isobaric quant run
quantifier.quantify(map, deconvolved_spectra, deconvolved_features);
}
std::pair<int, int> FLASHDeconvAlgorithm::findScanNumberBounds_(const MSExperiment& map, Size index, uint ms_level) const
{
// find beginning scan number
int num_precursor_window = ms_level == 2 ? precursor_MS1_window_ : 1;
auto index_copy = index;
while (index_copy > 0 && num_precursor_window > 0)
{
index_copy--;
if (map[index_copy].getMSLevel() == ms_level - 1) { num_precursor_window--; }
}
int b_scan_number = getScanNumber(map, index_copy);
// find ending scan number
index_copy = index;
num_precursor_window = ms_level == 2 ? precursor_MS1_window_ : 0;
while (index_copy < map.size() - 1 && num_precursor_window > 0)
{
index_copy++;
if (map[index_copy].getMSLevel() == ms_level - 1) { num_precursor_window--; }
}
int a_scan_number = getScanNumber(map, index_copy);
return {b_scan_number, a_scan_number};
}
std::vector<DeconvolvedSpectrum> FLASHDeconvAlgorithm::collectSurveyScans_(const std::vector<DeconvolvedSpectrum>& deconvolved_spectra,
int b_scan_number,
int a_scan_number,
uint ms_level) const
{
std::vector<DeconvolvedSpectrum> survey_scans;
auto diter = std::lower_bound(deconvolved_spectra.begin(), deconvolved_spectra.end(), DeconvolvedSpectrum(b_scan_number));
auto aiter = std::lower_bound(deconvolved_spectra.begin(), deconvolved_spectra.end(), DeconvolvedSpectrum(a_scan_number));
if (diter == deconvolved_spectra.end()) { return survey_scans; }
while (diter < deconvolved_spectra.end() && diter <= aiter)
{
if (diter->getOriginalSpectrum().getMSLevel() == ms_level - 1) { survey_scans.push_back(*diter); }
diter++;
}
return survey_scans;
}
std::pair<double, double> FLASHDeconvAlgorithm::getIsolationWindowMzRange_(const MSSpectrum& spec) const
{
double start_mz = 0;
double end_mz = 0;
for (const auto& precursor : spec.getPrecursors())
{
double loffset = precursor.getIsolationWindowLowerOffset();
double uoffset = precursor.getIsolationWindowUpperOffset();
loffset = loffset <= 0 ? isolation_window_size_ / 2.0 : loffset;
uoffset = uoffset <= 0 ? isolation_window_size_ / 2.0 : uoffset;
start_mz = loffset > 100.0 ? loffset : -loffset + precursor.getMZ();
end_mz = uoffset > 100.0 ? uoffset : uoffset + precursor.getMZ();
}
return {start_mz, end_mz};
}
PeakGroup FLASHDeconvAlgorithm::findBestPrecursorPeakGroup_(const std::vector<DeconvolvedSpectrum>& survey_scans,
double start_mz,
double end_mz) const
{
PeakGroup best_pg;
double max_score = -1.0;
for (int i = (int)survey_scans.size() - 1; i >= 0; i--)
{
const auto& precursor_spectrum = survey_scans[i];
if (precursor_spectrum.empty()) { continue; }
for (const auto& pg : precursor_spectrum)
{
if (pg[0].mz > end_mz || pg[pg.size() - 1].mz < start_mz) { continue; }
double max_intensity = 0.0;
const FLASHHelperClasses::LogMzPeak* tmp_precursor = nullptr;
int c = int(round(pg.getMonoMass() / start_mz));
for (const auto& tmp_peak : pg)
{
if (tmp_peak.abs_charge != c) { continue; }
if (tmp_peak.mz < start_mz || tmp_peak.mz > end_mz) { continue; }
if (tmp_peak.intensity < max_intensity) { continue; }
max_intensity = tmp_peak.intensity;
tmp_precursor = &tmp_peak;
}
if (tmp_precursor == nullptr) { continue; }
auto score = pg.getChargeSNR(tmp_precursor->abs_charge);
if (score < max_score) { continue; }
max_score = score;
best_pg = pg;
}
if (!best_pg.empty()) { break; }
}
return best_pg;
}
void FLASHDeconvAlgorithm::findPrecursorPeakGroupsForMSnSpectra_(const MSExperiment& map,
const std::vector<DeconvolvedSpectrum>& deconvolved_spectra,
uint ms_level)
{
for (Size index = 0; index < map.size(); index++)
{
const auto& spec = map[index];
if (spec.getMSLevel() != ms_level) { continue; }
String native_id = spec.getNativeID();
auto [b_scan_number, a_scan_number] = findScanNumberBounds_(map, index, ms_level);
auto survey_scans = collectSurveyScans_(deconvolved_spectra, b_scan_number, a_scan_number, ms_level);
if (survey_scans.empty()) { continue; }
auto [start_mz, end_mz] = getIsolationWindowMzRange_(spec);
PeakGroup best_pg = findBestPrecursorPeakGroup_(survey_scans, start_mz, end_mz);
if (!best_pg.empty()) { native_id_precursor_peak_group_map_[native_id] = best_pg; }
}
}
void FLASHDeconvAlgorithm::updatePrecursorQScores_(std::vector<DeconvolvedSpectrum>& deconvolved_spectra, int ms_level)
{
// update precursor feature QScores and qvalues
std::map<int, DeconvolvedSpectrum> scan_fullscan;
for (auto& dspec : deconvolved_spectra)
{
if ((int)dspec.getOriginalSpectrum().getMSLevel() != ms_level - 1) continue;
int scan = dspec.getScanNumber();
scan_fullscan[scan] = dspec;
}
for (auto& dspec : deconvolved_spectra)
{
if ((int)dspec.getOriginalSpectrum().getMSLevel() != ms_level) continue;
if (dspec.getPrecursorPeakGroup().empty()) continue;
auto precursor_pg = dspec.getPrecursorPeakGroup();
int pscan = precursor_pg.getScanNumber();
if (scan_fullscan.find(pscan) == scan_fullscan.end()) continue;
auto fullscan = scan_fullscan[pscan];
auto iter = std::lower_bound(fullscan.begin(), fullscan.end(), precursor_pg);
if (iter == fullscan.end()) continue;
if (precursor_pg.getMonoMass() == iter->getMonoMass())
{
precursor_pg.setFeatureIndex(iter->getFeatureIndex());
precursor_pg.setQscore(iter->getQscore());
if (iter->getFeatureIndex() > 0) precursor_pg.setQscore2D(iter->getQscore2D());
}
else { precursor_pg.setFeatureIndex(0); }
dspec.setPrecursorPeakGroup(precursor_pg);
}
}
void FLASHDeconvAlgorithm::runFeatureFinding_(std::vector<DeconvolvedSpectrum>& deconvolved_spectra,
std::vector<FLASHHelperClasses::MassFeature>& deconvolved_features)
{
if (merge_spec_ == 2) return;
auto mass_tracer = MassFeatureTrace();
auto decoy_mass_tracer = MassFeatureTrace();
Param mf_param = param_.copy("ft:", true);
if (((double)mf_param.getValue("mass_error_ppm")) < 0) { mf_param.setValue("mass_error_ppm", tols_[0]); }
if (((double)mf_param.getValue("min_cos")) < 0) { mf_param.setValue("min_cos", min_cos_[0]); }
mf_param.setValue("noise_threshold_int", .0);
mf_param.setValue("reestimate_mt_sd", "false");
mf_param.setValue("trace_termination_criterion", "outlier");
mf_param.setValue("trace_termination_outliers", 20);
mf_param.setValue("chrom_peak_snr", .0);
mass_tracer.setParameters(mf_param); // maybe go to set param
// Find features for MS1 or the minimum MS level in the dataset.
deconvolved_features = mass_tracer.findFeaturesAndUpdateQscore2D(sd_.getAveragine(), deconvolved_spectra, (int)current_min_ms_level_, false);
if (report_decoy_)
{
const auto& decoy_deconvolved_features = mass_tracer.findFeaturesAndUpdateQscore2D(sd_.getAveragine(), deconvolved_spectra, (int)current_min_ms_level_, true);
deconvolved_features.insert(deconvolved_features.end(), decoy_deconvolved_features.begin(), decoy_deconvolved_features.end());
}
mf_param.setValue("min_trace_length", 1e-5); // allow all traces for MSn
mass_tracer.setParameters(mf_param);
// Find features for MSn
for (int ms_level = (int)current_min_ms_level_ + 1; ms_level <= (int)current_max_ms_level_; ms_level++)
{
updatePrecursorQScores_(deconvolved_spectra, ms_level);
std::map<uint, std::vector<Size>> feature_index_set;
for (Size i = 0; i < deconvolved_spectra.size(); i++)
{
const auto& dspec = deconvolved_spectra[i];
if ((int)dspec.getOriginalSpectrum().getMSLevel() != ms_level) continue;
if (dspec.getPrecursorPeakGroup().empty()) continue;
uint findex = dspec.getPrecursorPeakGroup().getFeatureIndex();
if (findex == 0) continue;
feature_index_set[findex].push_back(i);
}
for (const auto& element : feature_index_set)
{
std::vector<DeconvolvedSpectrum> tmp_dspec;
tmp_dspec.reserve(element.second.size());
for (Size i : element.second)
tmp_dspec.push_back(deconvolved_spectra[i]);
const auto& df = mass_tracer.findFeaturesAndUpdateQscore2D(sd_.getAveragine(), tmp_dspec, ms_level, false);
deconvolved_features.insert(deconvolved_features.end(), df.begin(), df.end());
if (report_decoy_)
{
const auto& df_decoy = mass_tracer.findFeaturesAndUpdateQscore2D(sd_.getAveragine(), tmp_dspec, ms_level, true);
deconvolved_features.insert(deconvolved_features.end(), df_decoy.begin(), df_decoy.end());
}
Size j = 0;
for (Size i : element.second)
deconvolved_spectra[i] = tmp_dspec[j++];
}
}
}
} // namespace OpenMS | C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/TOPDOWN/FLASHHelperClasses.cpp | .cpp | 7,597 | 251 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Kyowon Jeong, Jihyung Kim $
// $Authors: Kyowon Jeong, Jihyung Kim $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/TOPDOWN/FLASHHelperClasses.h>
#include <OpenMS/CHEMISTRY/ModificationsDB.h>
#include <utility>
namespace OpenMS
{
FLASHHelperClasses::PrecalculatedAveragine::PrecalculatedAveragine(const double min_mass, const double max_mass, const double delta, CoarseIsotopePatternGenerator& generator,
const bool use_RNA_averagine, const double decoy_iso_distance) :
mass_interval_(delta),
min_mass_(min_mass)
{
int i = 0;
int max_left_count = 0;
int max_right_count = 0;
while (true)
{
double mass = i * mass_interval_;
i++;
if (mass < min_mass)
{
continue;
}
if (mass > max_mass)
{
break;
}
auto iso = use_RNA_averagine ? generator.estimateFromRNAMonoWeight(mass) : generator.estimateFromPeptideMonoWeight(mass);
if (decoy_iso_distance > 0)
{
auto decoy_iso(iso);
for (Size k = 0; k < iso.size(); k++)
{
decoy_iso[k].setMZ(iso[k].getMZ() * decoy_iso_distance);
}
decoy_iso.sortByMass();
decoy_iso.renormalize();
iso = decoy_iso;
}
const double min_pwr = .9999;
const Size min_iso_length = 2;
const int min_left_right_count = 2;
double total_pwr = .0;
size_t most_abundant_index_ = 0;
double most_abundant_int = 0;
/// sum of squared intensities to see the total power of isotope pattern. The range of isotope pattern is
/// determined so those within range cover min_pwr of the total power.
for (Size k = 0; k < iso.size(); k++)
{
total_pwr += iso[k].getIntensity() * iso[k].getIntensity();
if (most_abundant_int >= iso[k].getIntensity())
{
continue;
}
most_abundant_int = iso[k].getIntensity();
most_abundant_index_ = k;
}
int left_count = 0;
int right_count = (int)iso.size() - 1;
int trim_count = 0;
double pwr = 0;
while (iso.size() - trim_count > min_iso_length && left_count < right_count)
{
double lint = iso[left_count].getIntensity();
double rint = iso[right_count].getIntensity();
bool trim_left = true;
if (lint < rint)
{
pwr += lint * lint;
}
else
{
pwr += rint * rint;
trim_left = false;
}
if (total_pwr - pwr < total_pwr * min_pwr)
{
break;
}
trim_count++;
if (trim_left)
{
iso[left_count].setIntensity(0);
left_count++;
}
else
{
iso[right_count].setIntensity(0); // for trimming
right_count--;
}
}
total_pwr -= pwr;
left_count = (int)most_abundant_index_ - left_count;
right_count = right_count - (int)most_abundant_index_;
double intensity_sum = 0;
for (auto& k : iso)
{
float ori_int = k.getIntensity();
k.setIntensity(ori_int / (float)sqrt(total_pwr));
intensity_sum += k.getIntensity();
}
left_count = left_count < min_left_right_count ? min_left_right_count : left_count;
right_count = right_count < min_left_right_count ? min_left_right_count : right_count;
max_left_count = std::max(max_left_count, left_count);
max_right_count = std::max(max_right_count, right_count);
apex_index_.push_back(most_abundant_index_);
right_count_from_apex_.push_back(max_right_count);
left_count_from_apex_.push_back(max_left_count);
average_mono_mass_difference_.push_back(iso.averageMass() - iso[0].getMZ());
abundant_mono_mass_difference_.push_back(iso.getMostAbundant().getMZ() - iso[0].getMZ());
isotopes_.push_back(iso);
snr_mul_factor_.push_back(intensity_sum * intensity_sum);
}
}
Size FLASHHelperClasses::PrecalculatedAveragine::massToIndex_(const double mass) const
{
Size i = (Size)round(std::max(.0, mass - min_mass_) / mass_interval_);
i = std::min(i, isotopes_.size() - 1);
return i;
}
IsotopeDistribution FLASHHelperClasses::PrecalculatedAveragine::get(const double mass) const
{
return isotopes_[massToIndex_(mass)];
}
size_t FLASHHelperClasses::PrecalculatedAveragine::getMaxIsotopeIndex() const
{
return max_isotope_index_;
}
Size FLASHHelperClasses::PrecalculatedAveragine::getLeftCountFromApex(const double mass) const
{
return (Size)left_count_from_apex_[massToIndex_(mass)];
}
double FLASHHelperClasses::PrecalculatedAveragine::getAverageMassDelta(const double mass) const
{
return average_mono_mass_difference_[massToIndex_(mass)];
}
double FLASHHelperClasses::PrecalculatedAveragine::getMostAbundantMassDelta(const double mass) const
{
return abundant_mono_mass_difference_[massToIndex_(mass)];
}
double FLASHHelperClasses::PrecalculatedAveragine::getSNRMultiplicationFactor(const double mass) const
{
return snr_mul_factor_[massToIndex_(mass)];
}
Size FLASHHelperClasses::PrecalculatedAveragine::getRightCountFromApex(const double mass) const
{
return (Size)right_count_from_apex_[massToIndex_(mass)];
}
Size FLASHHelperClasses::PrecalculatedAveragine::getApexIndex(const double mass) const
{
return apex_index_[massToIndex_(mass)];
}
Size FLASHHelperClasses::PrecalculatedAveragine::getLastIndex(const double mass) const
{
Size index = massToIndex_(mass);
return apex_index_[index] + right_count_from_apex_[index];
}
void FLASHHelperClasses::PrecalculatedAveragine::setMaxIsotopeIndex(const int index)
{
max_isotope_index_ = index;
}
FLASHHelperClasses::LogMzPeak::LogMzPeak(const Peak1D& peak, const bool positive) :
mz(peak.getMZ()), intensity(peak.getIntensity()), logMz(getLogMz(peak.getMZ(), positive)), abs_charge(0), is_positive(positive), isotopeIndex(0)
{
}
double FLASHHelperClasses::LogMzPeak::getUnchargedMass() const
{
if (abs_charge == 0)
{
return .0;
}
if (mass <= 0)
{
return (mz - getChargeMass(is_positive)) * (float)abs_charge;
}
return mass;
}
bool FLASHHelperClasses::LogMzPeak::operator<(const LogMzPeak& a) const
{
if (this->logMz == a.logMz)
{
return this->intensity < a.intensity;
}
return this->logMz < a.logMz;
}
bool FLASHHelperClasses::LogMzPeak::operator>(const LogMzPeak& a) const
{
if (this->logMz == a.logMz)
{
return this->intensity > a.intensity;
}
return this->logMz > a.logMz;
}
bool FLASHHelperClasses::LogMzPeak::operator==(const LogMzPeak& a) const
{
return this->logMz == a.logMz && this->intensity == a.intensity;
}
float FLASHHelperClasses::getChargeMass(const bool positive_ioniziation_mode)
{
return (float)(positive_ioniziation_mode ? Constants::PROTON_MASS_U : -Constants::PROTON_MASS_U);
}
double FLASHHelperClasses::getLogMz(const double mz, const bool positive)
{
return std::log(mz - getChargeMass(positive));
}
bool FLASHHelperClasses::IsobaricQuantities::empty() const
{
return quantities.empty();
}
} // namespace OpenMS | C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/TOPDOWN/TopDownIsobaricQuantification.cpp | .cpp | 11,402 | 294 | // Copyright (c) 2002-2025, The OpenMS Team -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Kyowon Jeong $
// $Authors: Kyowon Jeong$
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/QUANTITATION/IsobaricChannelExtractor.h>
#include <OpenMS/ANALYSIS/QUANTITATION/IsobaricQuantifier.h>
#include <OpenMS/ANALYSIS/QUANTITATION/ItraqEightPlexQuantitationMethod.h>
#include <OpenMS/ANALYSIS/QUANTITATION/ItraqFourPlexQuantitationMethod.h>
#include <OpenMS/ANALYSIS/QUANTITATION/TMTEighteenPlexQuantitationMethod.h>
#include <OpenMS/ANALYSIS/QUANTITATION/TMTElevenPlexQuantitationMethod.h>
#include <OpenMS/ANALYSIS/QUANTITATION/TMTSixPlexQuantitationMethod.h>
#include <OpenMS/ANALYSIS/QUANTITATION/TMTSixteenPlexQuantitationMethod.h>
#include <OpenMS/ANALYSIS/QUANTITATION/TMTTenPlexQuantitationMethod.h>
#include <OpenMS/ANALYSIS/TOPDOWN/TopDownIsobaricQuantification.h>
#include <OpenMS/KERNEL/ConsensusMap.h>
#include <OpenMS/METADATA/SpectrumLookup.h>
namespace OpenMS
{
TopDownIsobaricQuantification::TopDownIsobaricQuantification() : DefaultParamHandler("TopDownIsobaricQuantification")
{
setDefaultParams_();
}
TopDownIsobaricQuantification::TopDownIsobaricQuantification(const TopDownIsobaricQuantification& other) : DefaultParamHandler(other)
{
}
TopDownIsobaricQuantification& TopDownIsobaricQuantification::operator=(const TopDownIsobaricQuantification& rhs)
{
if (this == &rhs)
return *this;
DefaultParamHandler::operator=(rhs);
return *this;
}
void TopDownIsobaricQuantification::setDefaultParams_()
{
defaults_.setValue("type", "none", "Specifies the isobaric quantification method used in the experiment.");
defaults_.setValidStrings("type", {"none", "itraq4plex", "itraq8plex", "tmt10plex", "tmt11plex", "tmt16plex", "tmt18plex", "tmt6plex"});
defaults_.setValue("isotope_correction", "true",
"Enable isotope correction (highly recommended).");
defaults_.setValidStrings("isotope_correction", {"true", "false"});
defaults_.setValue("reporter_mz_tol", 2e-3, "Specifies the m/z tolerance (in Th) for reporter ion detection.");
defaults_.setValue("only_fully_quantified", "false", "Restricts analysis to spectra that are fully quantified, meaning all channels have non-zero intensity reporter ions.");
defaults_.setValidStrings("only_fully_quantified", {"true", "false"});
defaultsToParam_();
}
void TopDownIsobaricQuantification::updateMembers_()
{
addMethod_(std::make_unique<ItraqFourPlexQuantitationMethod>());
addMethod_(std::make_unique<ItraqEightPlexQuantitationMethod>());
addMethod_(std::make_unique<TMTSixPlexQuantitationMethod>());
addMethod_(std::make_unique<TMTTenPlexQuantitationMethod>());
addMethod_(std::make_unique<TMTElevenPlexQuantitationMethod>());
addMethod_(std::make_unique<TMTSixteenPlexQuantitationMethod>());
addMethod_(std::make_unique<TMTEighteenPlexQuantitationMethod>());
only_fully_quantified_ = param_.getValue("only_fully_quantified").toString() == "true";
}
void TopDownIsobaricQuantification::quantify(const MSExperiment& exp, std::vector<DeconvolvedSpectrum>& deconvolved_spectra, const std::vector<FLASHHelperClasses::MassFeature>& mass_features)
{
// set the parameters for this method
String type = getParameters().getValue("type").toString();
if (type == "none")
{
return;
}
const auto& quant_method = quant_methods_[type];
IsobaricChannelExtractor channel_extractor(quant_method.get());
Param extract_param = channel_extractor.getDefaults();
extract_param.setValue("reporter_mass_shift", getParameters().getValue("reporter_mz_tol"));
channel_extractor.setParameters(extract_param);
IsobaricQuantifier quantifier(quant_method.get());
Param quant_param = quantifier.getDefaults();
quant_param.setValue("isotope_correction", getParameters().getValue("isotope_correction"));
quant_param.setValue("normalization", "false"); // here use its own normalization for the same precursor masses.
quantifier.setParameters(quant_param);
ConsensusMap consensus_map_raw, consensus_map_quant;
// extract channel information
channel_extractor.extractChannels(exp, consensus_map_raw);
quantifier.quantify(consensus_map_raw, consensus_map_quant);
std::map<double, int> rt_scan_map;
std::map<int, std::set<PeakGroup>> scan_precursors_map; // MS1 scan to precursor peak groups
std::vector<std::vector<PeakGroup>> precursor_clusters; // clusters of the precursor peak groups
std::vector<std::vector<std::vector<double>>> intensity_clusters;
std::vector<std::vector<double>> merged_intensity_clusters;
std::map<PeakGroup, int> precursor_cluster_index; // precursor to cluster index
std::map<int, std::vector<int>> precursor_scan_ms2_scans; // from precursor scan to ms2 scans
std::map<int, int> ms2_scan_precursor_scan; // from ms2 scan to precursor scan
std::map<int, double> ms2_scan_precursor_mz; // from ms2 scan to precursor mz
int pre_scan = 0;
for (auto it = exp.begin(); it != exp.end(); ++it)
{
int scan_number = exp.getSourceFiles().empty() ? -1 : SpectrumLookup::extractScanNumber(it->getNativeID(), exp.getSourceFiles()[0].getNativeIDTypeAccession());
if (scan_number < 0)
{
scan_number = (int)std::distance(exp.begin(), it) + 1;
}
rt_scan_map[it->getRT()] = scan_number;
if (it->getMSLevel() == 1)
{
pre_scan = scan_number;
precursor_scan_ms2_scans[pre_scan] = std::vector<int>();
}
else
{
precursor_scan_ms2_scans[pre_scan].push_back(scan_number);
ms2_scan_precursor_scan[scan_number] = pre_scan;
ms2_scan_precursor_mz[scan_number] = it->getPrecursors()[0].getMZ();
}
}
for (auto& dspec : deconvolved_spectra)
{
if (dspec.getOriginalSpectrum().getMSLevel() == 1)
{
continue;
}
auto& precursor = dspec.getPrecursorPeakGroup();
if (precursor.empty())
continue;
scan_precursors_map[precursor.getScanNumber()].insert(precursor);
}
for (auto& mf : mass_features)
{
auto& mass_trace = mf.mt;
std::vector<PeakGroup> cluster;
cluster.reserve(mass_trace.getSize());
// each peak = a precursor peak group
for (auto& p : mass_trace)
{
auto trt = *rt_scan_map.lower_bound(p.getRT());
if (abs(trt.first - p.getRT()) > .01)
continue;
int scan = trt.second;
if (scan_precursors_map.find(scan) == scan_precursors_map.end())
continue;
for (auto& pg : scan_precursors_map[scan])
{
if (abs(pg.getMonoMass() - p.getMZ()) > .01)
continue;
cluster.push_back(pg);
}
}
if (!cluster.empty())
{
precursor_clusters.push_back(cluster);
}
for (auto& pg : cluster)
{
precursor_cluster_index[pg] = (int)precursor_clusters.size() - 1;
}
}
for (auto& dspec : deconvolved_spectra)
{
if (dspec.getOriginalSpectrum().getMSLevel() == 1)
{
continue;
}
auto& precursor = dspec.getPrecursorPeakGroup();
if (precursor.empty() || precursor_cluster_index.find(precursor) != precursor_cluster_index.end())
continue;
precursor_clusters.push_back(std::vector<PeakGroup> {precursor});
precursor_cluster_index[precursor] = (int)precursor_clusters.size() - 1;
}
std::map<int, std::vector<double>> ms2_ints; // from ms2 scan to intensities
for (auto& feature : consensus_map_quant)
{
std::vector<double> intensities;
float max_int = 0;
for (auto& i : feature)
{
max_int = std::max(max_int, i.getIntensity());
intensities.push_back(i.getIntensity());
}
if (max_int <= 0)
continue;
auto trt = *rt_scan_map.lower_bound(feature.getRT());
if (abs(trt.first - feature.getRT()) > .01)
continue;
int scan = trt.second;
ms2_ints[scan] = intensities;
}
intensity_clusters.resize(precursor_clusters.size());
merged_intensity_clusters.resize(precursor_clusters.size());
for (auto& dspec : deconvolved_spectra)
{
if (dspec.getOriginalSpectrum().getMSLevel() == 1)
{
continue;
}
int scan = dspec.getScanNumber();
double pre_mz = ms2_scan_precursor_mz[scan];
std::vector<double> intensities (0);
for (int ms2_scan : precursor_scan_ms2_scans[ms2_scan_precursor_scan[scan]])
{
if (ms2_ints.find(ms2_scan) == ms2_ints.end() || ms2_ints[ms2_scan].empty())
continue;
if (ms2_scan_precursor_mz.find(ms2_scan) == ms2_scan_precursor_mz.end() || abs(ms2_scan_precursor_mz[ms2_scan] - pre_mz) > .01)
continue;
if (intensities.empty())
{
intensities = ms2_ints[ms2_scan];
}
else
{
for (Size j = 0; j < intensities.size(); j++)
intensities[j] += ms2_ints[ms2_scan][j];
}
}
if (intensities.empty()) continue;
int cluster_index = dspec.getPrecursorPeakGroup().empty()? -1 : precursor_cluster_index[dspec.getPrecursorPeakGroup()];
double min_intensity = only_fully_quantified_? *std::min_element(intensities.begin(), intensities.end()) : *std::max_element(intensities.begin(), intensities.end());
if (min_intensity > 0) // at least one channel quantified
{
if (cluster_index >= 0) intensity_clusters[cluster_index].push_back(intensities);
FLASHHelperClasses::IsobaricQuantities iq;
iq.scan = scan;
iq.quantities = intensities;
iq.merged_quantities = intensities;
dspec.setQuantities(iq);
}
}
for (Size i = 0; i < intensity_clusters.size(); i++)
{
const auto& intensities = intensity_clusters[i];
if (intensities.empty())
continue;
merged_intensity_clusters[i] = intensities[0];
for (Size j = 1; j < intensities.size(); j++)
{
for (Size k = 0; k < merged_intensity_clusters[i].size(); k++)
{
merged_intensity_clusters[i][k] += intensities[j][k];
}
}
}
for (auto& dspec : deconvolved_spectra)
{
if (dspec.getOriginalSpectrum().getMSLevel() == 1 || dspec.getPrecursorPeakGroup().empty())
continue;
int cluster_index = precursor_cluster_index[dspec.getPrecursorPeakGroup()];
if (merged_intensity_clusters[cluster_index].empty())
continue;
if (dspec.getQuantities().empty())
continue;
auto intensities = merged_intensity_clusters[cluster_index];
if (intensities.empty()) continue;
double min_intensity = only_fully_quantified_? *std::min_element(intensities.begin(), intensities.end()) : *std::max_element(intensities.begin(), intensities.end());
if (min_intensity > 0) // all channel quantified
{
FLASHHelperClasses::IsobaricQuantities iq = dspec.getQuantities();
iq.merged_quantities = intensities;
dspec.setQuantities(iq);
}
}
}
} // namespace OpenMS | C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/TOPDOWN/PeakGroup.cpp | .cpp | 41,932 | 1,267 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Kyowon Jeong, Jihyung Kim $
// $Authors: Kyowon Jeong, Jihyung Kim $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/TOPDOWN/PeakGroup.h>
#include <OpenMS/ANALYSIS/TOPDOWN/PeakGroupScoring.h>
#include <OpenMS/ANALYSIS/TOPDOWN/SpectralDeconvolution.h>
namespace OpenMS
{
PeakGroup::PeakGroup(const int min_abs_charge, const int max_abs_charge, const bool is_positive):
min_abs_charge_(min_abs_charge),
max_abs_charge_(max_abs_charge),
is_positive_(is_positive)
{
}
bool PeakGroup::operator<(const PeakGroup& a) const
{
if (this->monoisotopic_mass_ == a.monoisotopic_mass_) { return this->intensity_ < a.intensity_; }
return this->monoisotopic_mass_ < a.monoisotopic_mass_;
}
bool PeakGroup::operator>(const PeakGroup& a) const
{
if (this->monoisotopic_mass_ == a.monoisotopic_mass_) { return this->intensity_ > a.intensity_; }
return this->monoisotopic_mass_ > a.monoisotopic_mass_;
}
bool PeakGroup::operator==(const PeakGroup& a) const
{
return this->monoisotopic_mass_ == a.monoisotopic_mass_ && this->intensity_ == a.intensity_;
}
std::vector<float> PeakGroup::getMassErrors(bool ppm) const
{
std::vector<float> errors;
int i = 0;
float i_cntr = 0;
float i_error = .0f;
for (const auto& p : *this)
{
if (i != p.isotopeIndex)
{
if (i_cntr > 0)
{
errors.push_back(i_error / i_cntr);
}
i = p.isotopeIndex;
i_cntr = 0;
i_error = 0;
}
i_cntr ++;
i_error += ppm ? getPPMError_(p) : getDaError_(p);
}
if (i_cntr > 0)
{
errors.push_back(i_error / i_cntr);
}
return errors;
}
void PeakGroup::updateAvgMassError_()
{
avg_ppm_error_ = 0;
const auto& ppm_errors = getMassErrors();
for (const auto e : ppm_errors)
avg_ppm_error_ += std::abs(e);
avg_ppm_error_ /= (float)ppm_errors.size();
avg_da_error_ = .0f;
const auto& da_errors = getMassErrors(false);
for (const auto e : da_errors)
avg_da_error_ += std::abs(e);
avg_da_error_ /= (float)da_errors.size();
}
float PeakGroup::getPPMError_(const LogMzPeak& p) const
{
auto mass = (float)(monoisotopic_mass_ + p.isotopeIndex * iso_da_distance_);
return (float)((mass / (float)p.abs_charge + FLASHHelperClasses::getChargeMass(p.is_positive) - p.mz) / p.mz * 1e6);
}
float PeakGroup::getDaError_(const LogMzPeak& p) const
{
auto mass = (float)(monoisotopic_mass_ + p.isotopeIndex * iso_da_distance_);
return (float)((mass - p.getUnchargedMass()));
}
int PeakGroup::getMinNegativeIsotopeIndex() const
{
return min_negative_isotope_index_;
}
void PeakGroup::updatePerChargeCos_(const FLASHHelperClasses::PrecalculatedAveragine& avg, double tol)
{
auto iso_dist = avg.get(monoisotopic_mass_);
auto current_per_isotope_intensities = std::vector<float>(getIsotopeIntensities().size(), .0f);
if (min_abs_charge_ == max_abs_charge_) setChargeIsotopeCosine(min_abs_charge_, getIsotopeCosine());
else
{
for (int abs_charge = min_abs_charge_; abs_charge <= max_abs_charge_; abs_charge++)
{
int min_isotope_index, max_isotope_index;
getPerIsotopeIntensities_(current_per_isotope_intensities, min_isotope_index, max_isotope_index, abs_charge, 0, tol);
float cos_score
= SpectralDeconvolution::getCosine(current_per_isotope_intensities, min_isotope_index, max_isotope_index, iso_dist, 0,
SpectralDeconvolution::min_iso_size);
setChargeIsotopeCosine(abs_charge, cos_score); //
}
}
}
int PeakGroup::updateQscore(const std::vector<LogMzPeak>& noisy_peaks,
const FLASHHelperClasses::PrecalculatedAveragine& avg,
const double min_cos,
const double tol,
const bool is_low_charge,
const std::vector<double>& excluded_masses,
const bool is_last)
{
qscore_ = 0;
if (empty()) { return 0; }
updatePerChargeInformation_(noisy_peaks, tol, is_last);
updateChargeRange_();
updateChargeFitScoreAndChargeIntensities_(is_low_charge);
if (charge_score_ < .7f) //
{
return 0;
}
updateMonoMassAndIsotopeIntensities(tol); //
if (per_isotope_int_.empty() || max_abs_charge_ < min_abs_charge_) { return 0; }
int h_offset;
int window_width = is_last ? 0 : -1;
isotope_cosine_score_ = SpectralDeconvolution::getIsotopeCosineAndIsoOffset(
monoisotopic_mass_, per_isotope_int_, h_offset, avg,
-min_negative_isotope_index_, // change if to select cosine calculation and if to get second best hits
window_width, excluded_masses);
if (!is_last && h_offset != 0) return h_offset;
if (isotope_cosine_score_ < min_cos) { return 0; }
updatePerChargeCos_(avg, tol);
updateAvgMassError_();
updateSNR_((float)avg.getSNRMultiplicationFactor(monoisotopic_mass_));
//if (target_decoy_type_ == PeakGroup::signal_decoy) isotope_cosine_score_ = std::min(1.0f, isotope_cosine_score_ + .02f); // the smaller
for (int abs_charge = min_abs_charge_; abs_charge <= max_abs_charge_; abs_charge++)
{
if (getChargeSNR(abs_charge) > getChargeSNR(max_snr_abs_charge_)) { max_snr_abs_charge_ = abs_charge; }
}
qscore_ = PeakGroupScoring::getQscore(this);
return h_offset;
}
float PeakGroup::getNoisePeakPower_(const std::vector<FLASHHelperClasses::LogMzPeak>& noisy_peaks, const int z, const double tol) const
{
if (noisy_peaks.empty()) return 0;
const Size max_noisy_peak_number = z == 0? std::min(200, 40 * (max_abs_charge_ - min_abs_charge_ + 1)) :40; // too many noise peaks will slow down the process
const Size bin_number_margin = 8;
const Size max_bin_number = bin_number_margin + 12; // 12 bin + 8 extra bin
float threshold = -1;
std::vector<std::pair<Peak1D, bool>> all_peaks; // peak + is signal?
all_peaks.reserve(max_noisy_peak_number + logMzpeaks_.size());
auto noise_peak_count = std::count_if(noisy_peaks.begin(), noisy_peaks.end(), [&](const auto& noisy_peak) {
return z == 0 || noisy_peak.abs_charge == z;
});
if (noise_peak_count == 0) return 0;
// get intensity threshold
if (noise_peak_count > (int)max_noisy_peak_number)
{
std::vector<float> intensities;
intensities.reserve(noise_peak_count);
for (const auto& noisy_peak : noisy_peaks)
{
if (z > 0 && noisy_peak.abs_charge != z) continue;
//if (noisy_peak.abs_charge < min_abs_charge_ || noisy_peak.abs_charge > max_abs_charge_) continue;
intensities.push_back(noisy_peak.intensity);
}
std::sort(intensities.rbegin(), intensities.rend());
threshold = intensities[max_noisy_peak_number];
}
// filter peaks and check which mzs are signal and which are noise.
for (const auto& noisy_peak : noisy_peaks)
{
if ((z > 0 && noisy_peak.abs_charge != z) || noisy_peak.intensity < threshold) continue;
//if (noisy_peak.abs_charge < min_abs_charge_ || noisy_peak.abs_charge > max_abs_charge_) continue;
all_peaks.emplace_back(Peak1D(noisy_peak.getUnchargedMass(), noisy_peak.intensity), false);
}
if (all_peaks.empty()) return 0;
for (const auto& peak : logMzpeaks_)
{
if ((z > 0 && peak.abs_charge != z) || peak.intensity < threshold) continue;
//if (peak.abs_charge < min_abs_charge_ || peak.abs_charge > max_abs_charge_) continue;
all_peaks.emplace_back(Peak1D(peak.getUnchargedMass(), peak.intensity), true);
}
std::sort(all_peaks.begin(), all_peaks.end(),
[](std::pair<Peak1D, bool>& left, std::pair<Peak1D, bool>& right) { return left.first.getMZ() < right.first.getMZ(); }); //
float charge_noise_pwr = 0;
std::vector<std::vector<Size>> per_bin_edges(max_bin_number);
std::vector<int> per_bin_start_index(max_bin_number, -2); // -2 means bin is empty. -1 means bin is used. zero or positive = edge index
std::map<float, Size> max_intensity_sum_to_bin;
const std::vector<double> div_factors {1.0, 2.0, 3.0}; // allow two skips for each bin
for (Size k = 0; k < max_bin_number; k++)
{
per_bin_edges[k] = std::vector<Size>(all_peaks.size(), 0);
}
// first collect all possible edges. An edge means mass difference between two peaks.
for (Size i = 0; i < all_peaks.size(); i++)
{
const auto& [p1, p1_signal] = all_peaks[i];
const auto p1_mass = p1.getMZ();
std::vector<double> per_bin_error(max_bin_number, -1.0);
for (Size j = i + 1; j < all_peaks.size(); j++)
{
const auto& [p2, p2_signal] = all_peaks[j];
const double normalized_dist = (p2.getMZ() - p1_mass) / iso_da_distance_;
if (p1_signal && p2_signal
&& normalized_dist >= .75) // if both are signals, and they are different from each other by more than .75 isotope distance, do not connect.
// Otherwise, connect as they may a part of consecutive other noisy peaks.
{
continue;
}
for (double d : div_factors)
{
double distance = normalized_dist / d * (max_bin_number - bin_number_margin);
Size bin = (Size)round(distance);
if (bin == 0) { continue; }
if (bin >= max_bin_number) { break; }
per_bin_start_index[bin] = -1;
double current_error = d * max_bin_number + std::abs((double)bin - distance); // larger when d gets larger. For the same d, comparable.
if (per_bin_error[bin] >= 0 && per_bin_error[bin] < current_error) { continue; }
per_bin_edges[bin][i] = j;
per_bin_error[bin] = current_error;
}
}
}
// then from each bin find the highest intensity path consisting of the same mass differences.
for (Size k = 0; k < max_bin_number; k++)
{
if (per_bin_start_index[k] == -2) { continue; }
const auto& edges = per_bin_edges[k];
float max_sum_intensity = 0;
for (Size i = 0; i < edges.size(); i++)
{
if (edges[i] == 0) { break; }
float sum_intensity = all_peaks[i].first.getIntensity();
for (Size j = edges[i]; j < edges.size(); j = edges[j])
{
sum_intensity += all_peaks[j].first.getIntensity();
if (j == 0) break;
}
if (max_sum_intensity < sum_intensity) // at least two edges should be there.
{
max_sum_intensity = sum_intensity;
per_bin_start_index[k] = (int)i; //
}
}
max_intensity_sum_to_bin[max_sum_intensity] = k; // how to deal with profile peaks?
}
auto unused = boost::dynamic_bitset<>(all_peaks.size());
unused.flip();
// Now from the highest intensity path to the lowest, sum up intensities excluding already used peaks or signal peaks.
for (auto it = max_intensity_sum_to_bin.rbegin(); it != max_intensity_sum_to_bin.rend(); ++it)
{
//if (signal_pwr / 100 > it->first * it->first) continue; // if the noise summed intensity is too small, skip
Size bin = it->second;
int index = per_bin_start_index[bin];
if (index < 0) { continue; }
const auto& edges = per_bin_edges[bin];
const double ori_mass = all_peaks[index].first.getMZ();
const int ori_index = index;
float sum_intensity = .0;
while (index < (int)all_peaks.size())
{
const auto& p = all_peaks[index].first;
if (p.getMZ() - ori_mass > tol / 2.0 * p.getMZ()) break;
float intensity = // p_signal ? 0 :
p.getIntensity();
Size j = edges[index];
if (j == 0) { break; }
if (unused[index])
{
sum_intensity += intensity;
unused[index] = false;
}
else { break; }
for (; j < edges.size(); j = edges[j])
{
if (j == 0) { break; }
if (unused[j])
{
sum_intensity += all_peaks[j].first.getIntensity();
unused[j] = false;
}
else
break;
}
index++;
}
index = ori_index - 1;
while (index >= 0)
{
const auto& p = all_peaks[index].first;
if (ori_mass - p.getMZ() > tol / 2.0 * ori_mass) break;
float intensity = // p_signal ? 0 :
p.getIntensity();
Size j = edges[index];
if (j == 0) { break; }
if (unused[index])
{
sum_intensity += intensity;
unused[index] = false;
}
else { break; }
for (; j < edges.size(); j = edges[j])
{
if (j == 0) { break; }
if (unused[j])
{
sum_intensity += all_peaks[j].first.getIntensity();
unused[j] = false;
}
else
break;
}
index--;
}
charge_noise_pwr += sum_intensity * sum_intensity;
}
// if still peaks are remaining, add their individual power.
Size index = unused.find_first();
double prev_mass = .0;
float tmp_int_sum = 0;
while (index != unused.npos)
{
const auto& [p, p_signal] = all_peaks[index];
if (! p_signal)
{
if (p.getMZ() - prev_mass > p.getMZ() * tol)
{
charge_noise_pwr += tmp_int_sum * tmp_int_sum;
prev_mass = p.getMZ();
tmp_int_sum = 0;
}
tmp_int_sum += p.getIntensity();
}
index = unused.find_next(index);
}
charge_noise_pwr += tmp_int_sum * tmp_int_sum;
return charge_noise_pwr;
}
void PeakGroup::updatePerChargeInformation_(const std::vector<LogMzPeak>& noisy_peaks, const double tol, const bool is_last)
{
per_charge_sum_signal_squared_ = std::vector<float>(1 + max_abs_charge_, .0f);
per_charge_int_ = std::vector<float>(1 + max_abs_charge_, .0f);
int max_iso = 0;
// calculate per charge intensity, and per charge sum of signal intensity squared
for (const auto& p : logMzpeaks_)
{
per_charge_int_[p.abs_charge] += p.intensity;
max_iso = std::max(max_iso, p.isotopeIndex);
}
Matrix<float> per_charge_isotope_int(1 + max_abs_charge_, 1 + max_iso, .0f);
for (const auto& p : logMzpeaks_)
{
float prev_v = per_charge_isotope_int.getValue(p.abs_charge, p.isotopeIndex);
per_charge_isotope_int.setValue(p.abs_charge, p.isotopeIndex, prev_v + p.intensity);
}
for (int z = min_abs_charge_; z <= max_abs_charge_; z++)
{
for (Size i = 0; i < (Size)per_charge_isotope_int.cols(); i++)
{
float v = per_charge_isotope_int.getValue(z, i);
per_charge_sum_signal_squared_[0] += v * v;
per_charge_sum_signal_squared_[z] += v * v;
}
}
// for each charge calculate signal and noise power
per_charge_noise_pwr_ = std::vector<float>(1 + max_abs_charge_, .0f);
if (is_last)
{
for (int z = min_abs_charge_; z <= max_abs_charge_; z++)
{
per_charge_noise_pwr_[z] = getNoisePeakPower_(noisy_peaks, z, tol);
}
per_charge_noise_pwr_[0] = getNoisePeakPower_(noisy_peaks, 0, tol);
}
else
{
for (const auto& p : noisy_peaks)
{
float pwr = p.intensity * p.intensity;
if (p.abs_charge < min_abs_charge_ || p.abs_charge > max_abs_charge_) continue;
per_charge_noise_pwr_[0] += pwr;
per_charge_noise_pwr_[p.abs_charge] += pwr;
}
}
}
void PeakGroup::updateChargeRange_()
{
int max_sig_charge = 0;
double max_sig = 0;
// first, find the maximum snr charge.
for (int z = min_abs_charge_; z <= max_abs_charge_; z++)
{
double tmp_snr = per_charge_int_[z] * per_charge_int_[z] / (1 + per_charge_noise_pwr_[z]);
if (max_sig < tmp_snr)
{
max_sig = tmp_snr;
max_sig_charge = z;
}
}
// determine the final charge ranges based on per charge power.
// If more than two consecutive charges do not contain any signal peak, the charge range stops at that charge.
int new_max_abs_charge;
int new_min_abs_charge;
new_max_abs_charge = new_min_abs_charge = max_sig_charge;
double threshold = std::min(max_sig / 10, 1.0);
for (int z = max_sig_charge; z <= max_abs_charge_; z++)
{
float per_charge_signal_power = per_charge_int_[z] * per_charge_int_[z];
if ((per_charge_signal_power / (1 + per_charge_noise_pwr_[z])) < threshold) { break; }
new_max_abs_charge = z;
}
for (int z = max_sig_charge; z >= min_abs_charge_; z--)
{
float per_charge_signal_power = per_charge_int_[z] * per_charge_int_[z];
if ((per_charge_signal_power / (1 + per_charge_noise_pwr_[z])) < threshold) { break; }
new_min_abs_charge = z;
}
// if the updated charge range is different from the original one, signal and noisy peaks are again updated
if (max_abs_charge_ != new_max_abs_charge || min_abs_charge_ != new_min_abs_charge)
{
std::vector<LogMzPeak> new_logMzpeaks;
new_logMzpeaks.reserve(size());
// now only take the signal and noise peaks within the new charge range.
for (const auto& p : logMzpeaks_)
{
if (p.abs_charge < new_min_abs_charge || p.abs_charge > new_max_abs_charge) { continue; }
new_logMzpeaks.push_back(p);
}
new_logMzpeaks.swap(logMzpeaks_);
max_abs_charge_ = new_max_abs_charge;
min_abs_charge_ = new_min_abs_charge;
}
if (min_abs_charge_ > max_abs_charge_) { clear_(); }
else { sort(); }
}
std::vector<FLASHHelperClasses::LogMzPeak> PeakGroup::recruitAllPeaksInSpectrum(const MSSpectrum& spec,
const double tol,
const FLASHHelperClasses::PrecalculatedAveragine& avg,
double mono_mass,
bool renew_signal_peaks)
{
const double mul_tol = .8; // not all peaks within tolerance are considered as signal peaks.
std::vector<LogMzPeak> noisy_peaks;
if (renew_signal_peaks) clear_(); // clear logMzPeaks
if (mono_mass < 0) { return noisy_peaks; }
if (max_abs_charge_ - min_abs_charge_ < max_abs_charge_ / 20) // if charge range is too small ...
{
return noisy_peaks;
}
monoisotopic_mass_ = mono_mass;
// int iso_margin = 3; // how many isotopes do we want to scan before the monoisotopic mass?
int max_isotope = (int)avg.getLastIndex(mono_mass);
int min_isotope = (int)(avg.getApexIndex(mono_mass) - avg.getLeftCountFromApex(mono_mass) + min_negative_isotope_index_);
int max_signal_isotope = 0;
min_isotope = std::max(min_negative_isotope_index_, min_isotope);
negative_iso_peaks_.clear();
reserve((max_isotope) * (max_abs_charge_ - min_abs_charge_ + 1) * 2);
noisy_peaks.reserve(max_isotope * (max_abs_charge_ - min_abs_charge_ + 1) * 2);
// scan from the largest to the smallest charges and recruit the raw peaks for this monoisotopic_mass
for (int c = max_abs_charge_; c >= min_abs_charge_; c--)
{
if (c <= 0) { break; }
double cmz = (mono_mass) / c + FLASHHelperClasses::getChargeMass(is_positive_);
double left_mz = (mono_mass - (1 - min_negative_isotope_index_) * iso_da_distance_) / c + FLASHHelperClasses::getChargeMass(is_positive_);
Size index = spec.findNearest(left_mz * (1 - tol * mul_tol));
double iso_delta = iso_da_distance_ / c;
for (; index < spec.size(); index++)
{
float pint = spec[index].getIntensity();
if (pint <= 0) { continue; }
double pmz = spec[index].getMZ();
int iso_index = (int)round((pmz - cmz) / iso_delta);
if (iso_index > max_isotope) { break; }
if (iso_index < min_isotope) { continue; }
if (abs(pmz - cmz - iso_index * iso_delta) <= pmz * tol * mul_tol)
{
auto p = LogMzPeak(spec[index], is_positive_);
p.isotopeIndex = iso_index;
p.abs_charge = c;
if (iso_index < 0) { negative_iso_peaks_.push_back(p); }
else
{
max_signal_isotope = std::max(max_signal_isotope, iso_index);
if (renew_signal_peaks) push_back(p);
}
}
else if (iso_index >= 0)
{
auto p = LogMzPeak(spec[index], is_positive_);
int noise_iso_index = (int)floor((pmz - cmz) / iso_delta);
p.isotopeIndex = noise_iso_index;
p.abs_charge = c;
noisy_peaks.push_back(p);
}
}
if (index >= spec.size()) { break; }
}
std::vector<LogMzPeak> _noisy_peaks;
_noisy_peaks.reserve(noisy_peaks.size());
for (const auto& p : noisy_peaks)
{
if (p.isotopeIndex > max_signal_isotope) continue;
_noisy_peaks.push_back(p);
}
return _noisy_peaks;
}
std::vector<FLASHHelperClasses::LogMzPeak> PeakGroup::getNoisyPeaks(const MSSpectrum& spec,
const double tol,
const FLASHHelperClasses::PrecalculatedAveragine& avg) const
{
// Const version of recruitAllPeaksInSpectrum for use in output functions
// Uses existing member values instead of modifying them
const double mul_tol = 0.8;
std::vector<LogMzPeak> noisy_peaks;
const double mono_mass = monoisotopic_mass_;
if (mono_mass < 0) { return noisy_peaks; }
if (max_abs_charge_ - min_abs_charge_ < max_abs_charge_ / 20)
{
return noisy_peaks;
}
int max_isotope = static_cast<int>(avg.getLastIndex(mono_mass));
int min_isotope = static_cast<int>(avg.getApexIndex(mono_mass) - avg.getLeftCountFromApex(mono_mass) + min_negative_isotope_index_);
min_isotope = std::max(min_negative_isotope_index_, min_isotope);
// Find max signal isotope from existing peaks (const access)
int max_signal_isotope = 0;
for (const auto& p : logMzpeaks_)
{
if (p.isotopeIndex >= 0)
{
max_signal_isotope = std::max(max_signal_isotope, p.isotopeIndex);
}
}
noisy_peaks.reserve(max_isotope * (max_abs_charge_ - min_abs_charge_ + 1) * 2);
for (int c = max_abs_charge_; c >= min_abs_charge_; c--)
{
if (c <= 0) { break; }
double cmz = mono_mass / c + FLASHHelperClasses::getChargeMass(is_positive_);
double left_mz = (mono_mass - (1 - min_negative_isotope_index_) * iso_da_distance_) / c + FLASHHelperClasses::getChargeMass(is_positive_);
Size index = spec.findNearest(left_mz * (1 - tol * mul_tol));
double iso_delta = iso_da_distance_ / c;
for (; index < spec.size(); index++)
{
float pint = spec[index].getIntensity();
if (pint <= 0) { continue; }
double pmz = spec[index].getMZ();
int iso_index = static_cast<int>(round((pmz - cmz) / iso_delta));
if (iso_index > max_isotope) { break; }
if (iso_index < min_isotope) { continue; }
// Only collect noisy peaks (those that don't match the isotope pattern tolerance)
if (!(abs(pmz - cmz - iso_index * iso_delta) <= pmz * tol * mul_tol))
{
if (iso_index >= 0)
{
auto p = LogMzPeak(spec[index], is_positive_);
int noise_iso_index = static_cast<int>(floor((pmz - cmz) / iso_delta));
p.isotopeIndex = noise_iso_index;
p.abs_charge = c;
noisy_peaks.push_back(p);
}
}
}
if (index >= spec.size()) { break; }
}
// Filter noisy peaks to only include those within signal isotope range
std::vector<LogMzPeak> filtered_noisy_peaks;
filtered_noisy_peaks.reserve(noisy_peaks.size());
for (const auto& p : noisy_peaks)
{
if (p.isotopeIndex <= max_signal_isotope)
{
filtered_noisy_peaks.push_back(p);
}
}
return filtered_noisy_peaks;
}
void PeakGroup::updateChargeFitScoreAndChargeIntensities_(bool is_low_charge)
{
if (max_abs_charge_ == min_abs_charge_)
{
charge_score_ = 1;
return;
}
float max_per_charge_intensity = .0;
float summed_intensity = .0;
int max_index = -1;
int first_index = -1;
int last_index = -1;
for (int c = min_abs_charge_; c <= max_abs_charge_; c++)
{
summed_intensity += per_charge_int_[c];
if (per_charge_int_[c] > 0)
{
if (first_index < 0) { first_index = c; }
last_index = c;
}
if (max_per_charge_intensity > per_charge_int_[c]) { continue; }
max_per_charge_intensity = per_charge_int_[c];
max_index = c;
}
if (max_index < 0)
{
charge_score_ = 0;
return;
}
first_index = first_index < 0 ? 0 : first_index;
float p = .0f;
const float factor = .3;
for (int c = max_index; c < last_index; c++)
{
float diff = per_charge_int_[c + 1] - per_charge_int_[c];
if (diff > 0) p += diff;
else if (! is_low_charge && diff < -(per_charge_int_[c]) * factor)
p -= diff + (per_charge_int_[c]) * factor;
}
for (int c = max_index; c > first_index; c--)
{
float diff = per_charge_int_[c - 1] - per_charge_int_[c];
if (diff > 0) p += diff;
else if (! is_low_charge && diff < -(per_charge_int_[c]) * factor)
p -= diff + (per_charge_int_[c]) * factor;
}
charge_score_ = std::max(.0f, 1.0f - p / summed_intensity);
}
void PeakGroup::getPerIsotopeIntensities_(std::vector<float>& intensities,
int& min_isotope_index,
int& max_isotope_index,
int abs_charge,
int min_negative_isotope_index,
double tol)
{
// Compute Da tolerance from ppm tolerance using the first peak's uncharged mass.
// Assumption: all peaks in this group share similar mass, so the first peak is a
// representative for tolerance calculation. Division by 2.0 converts from full-width
// tolerance to half-width (radius) for the smoothing window.
int da_tol = (int)(tol * logMzpeaks_[0].getUnchargedMass() / 2.0);
min_isotope_index = 1e5;
max_isotope_index = -1; // this is inclusive!!
for (const auto& p : logMzpeaks_)
{
max_isotope_index = max_isotope_index < p.isotopeIndex ? p.isotopeIndex : max_isotope_index;
min_isotope_index = min_isotope_index < p.isotopeIndex ? min_isotope_index : p.isotopeIndex;
}
intensities = std::vector<float>(max_isotope_index + 1 + da_tol - min_negative_isotope_index_, .0f);
std::fill(intensities.begin(), intensities.end(), .0f);
// Gaussian smoothing denominator derivation:
// - Standard Gaussian: exp(-x²/(2σ²)), where FWHM = 2.355σ, so σ = FWHM/2.355
// - Here da_tol is treated as FWHM, giving σ = da_tol/2.355
// - The formula: denom = 2σ² / iso_da_distance_ = 2*(da_tol/2.355)² / iso_da_distance_
// - This scales the Gaussian width by isotope spacing, producing broader smoothing
// that allows adjacent isotope peaks to blend together. This is intentional to
// handle mass calibration errors and improve isotope pattern matching robustness.
// NOTE: This nonstandard scaling factor lacks formal validation. Consider adding
// unit tests demonstrating its impact on isotope deconvolution accuracy.
double denom = 2.0 * std::pow(da_tol / 2.355, 2.0) / iso_da_distance_;
for (const auto& peak : logMzpeaks_)
{
if (abs_charge > 0 && peak.abs_charge != abs_charge) { continue; }
if (peak.isotopeIndex - min_negative_isotope_index >= (int)intensities.size()) { continue; }
if (peak.isotopeIndex < 0) { continue; }
for (int margin = -da_tol; margin <= da_tol; margin++)
{
int index = peak.isotopeIndex + margin - min_negative_isotope_index;
if (index < 0 || index >= (int)intensities.size()) continue;
// Fallback when denom <= 0: can occur if da_tol is 0 (very small mass or tight tolerance)
// or iso_da_distance_ is 0/negative (degenerate isotope spacing). In such edge cases,
// skip Gaussian weighting and add raw intensity directly - safe because the smoothing
// window collapses to a single bin anyway.
intensities[index] += denom > 0 ? float(peak.intensity * exp(-margin * margin / denom)) : peak.intensity;
}
}
if (min_negative_isotope_index != 0)
{
for (const auto& peak : negative_iso_peaks_)
{
if (abs_charge > 0 && peak.abs_charge != abs_charge) { continue; }
if (peak.isotopeIndex - min_negative_isotope_index >= (int)intensities.size()) { continue; }
if (peak.isotopeIndex - min_negative_isotope_index < 0) { continue; }
for (int margin = -da_tol; margin <= da_tol; margin++)
{
int index = peak.isotopeIndex + margin - min_negative_isotope_index;
if (index < 0 || index >= (int)intensities.size()) continue;
// Same denom > 0 fallback as above for negative isotope peaks
intensities[index] += denom > 0 ? float(peak.intensity * exp(-margin * margin / denom)) : peak.intensity;
}
}
}
}
void PeakGroup::updateMonoMassAndIsotopeIntensities(double tol)
{
if (logMzpeaks_.empty()) { return; }
int min_isotope_index, max_isotope_index;
std::sort(logMzpeaks_.begin(), logMzpeaks_.end());
intensity_ = .0;
double nominator = .0;
getPerIsotopeIntensities_(per_isotope_int_, min_isotope_index, max_isotope_index, 0, min_negative_isotope_index_, tol);
for (const auto& p : logMzpeaks_)
{
float pi = p.intensity;
if (p.isotopeIndex - min_negative_isotope_index_ < 0) { continue; }
nominator += pi * (p.getUnchargedMass() - p.isotopeIndex * iso_da_distance_);
intensity_ += pi;
}
monoisotopic_mass_ = nominator / intensity_;
}
void PeakGroup::setSNR(const float snr)
{
snr_ = snr;
}
void PeakGroup::setChargeSNR(const int abs_charge, const float c_snr)
{
if (max_abs_charge_ < abs_charge) { return; }
if (per_charge_snr_.empty()) { per_charge_snr_ = std::vector<float>(1 + max_abs_charge_, .0); }
per_charge_snr_[abs_charge] = c_snr;
}
void PeakGroup::setTargeted()
{
is_targeted_ = true;
}
void PeakGroup::setChargeIsotopeCosine(const int abs_charge, const float cos)
{
if (max_abs_charge_ < abs_charge) { return; }
if (per_charge_cos_.empty()) { per_charge_cos_ = std::vector<float>(1 + max_abs_charge_, .0); }
per_charge_cos_[abs_charge] = cos;
}
void PeakGroup::setAbsChargeRange(const int min_abs_charge, const int max_abs_charge)
{
min_abs_charge_ = min_abs_charge;
max_abs_charge_ = max_abs_charge;
}
void PeakGroup::setScanNumber(const int sn)
{
scan_number_ = sn;
}
void PeakGroup::setIsotopeCosine(const float cos)
{
isotope_cosine_score_ = cos;
}
void PeakGroup::setMonoisotopicMass(double mono_mass)
{
monoisotopic_mass_ = mono_mass;
}
void PeakGroup::setRepAbsCharge(const int max_snr_abs_charge)
{
max_snr_abs_charge_ = max_snr_abs_charge;
}
void PeakGroup::setChargeScore(const float score)
{
charge_score_ = score;
}
void PeakGroup::setAvgPPMError(const float error)
{
avg_ppm_error_ = error;
}
void PeakGroup::setQscore(const double qscore)
{
qscore_ = std::min(1.0, qscore);
}
std::tuple<double, double> PeakGroup::getRepMzRange() const
{
return getMzRange(getRepAbsCharge());
}
std::tuple<double, double> PeakGroup::getMzRange(int abs_charge) const
{
double mz_start = -1;
double mz_end = -10;
if (! (abs_charge > max_abs_charge_ || abs_charge < min_abs_charge_))
{
for (const auto& tmp_p : logMzpeaks_)
{
if (tmp_p.abs_charge != abs_charge) { continue; }
if (mz_start < 0) { mz_start = tmp_p.mz; }
else { mz_start = mz_start < tmp_p.mz ? mz_start : tmp_p.mz; }
mz_end = mz_end > tmp_p.mz ? mz_end : tmp_p.mz;
}
}
return std::tuple<double, double> {mz_start, mz_end};
}
std::tuple<int, int> PeakGroup::getAbsChargeRange() const
{
return std::tuple<int, int> {min_abs_charge_, max_abs_charge_};
}
const std::vector<float>& PeakGroup::getIsotopeIntensities() const
{
return per_isotope_int_;
}
int PeakGroup::getScanNumber() const
{
return scan_number_;
}
float PeakGroup::getPeakOccupancy() const
{
int min_i = -1, max_i = 0;
for (const auto& p : *this)
{
int i = p.isotopeIndex;
max_i = std::max(max_i, i);
if (min_i < 0) min_i = i;
min_i = std::min(min_i, i);
}
auto used = std::vector<bool>((max_abs_charge_ - min_abs_charge_ + 1) * (max_i - min_i + 1), false);
for (const auto& p : *this)
{
used[(p.abs_charge - min_abs_charge_ + 1) * (p.isotopeIndex - min_i + 1) - 1] = true;
}
int count = 0;
for (const auto& b : used)
if (b) count++;
return (float)count / (float)used.size();
}
double PeakGroup::getMonoMass() const
{
return monoisotopic_mass_;
}
float PeakGroup::getIntensity() const
{
return intensity_;
}
float PeakGroup::getIsotopeCosine() const
{
return isotope_cosine_score_;
}
int PeakGroup::getRepAbsCharge() const
{
return max_snr_abs_charge_;
}
double PeakGroup::getQscore() const
{
return qscore_;
}
double PeakGroup::getQscore2D() const
{
return std::max(qscore_, qscore2D_);
}
void PeakGroup::setFeatureIndex(uint findex)
{
findex_ = findex;
}
bool PeakGroup::isTargeted() const
{
return is_targeted_;
}
void PeakGroup::updateSNR_(float mul_factor)
{
per_charge_snr_ = std::vector<float>(1 + max_abs_charge_, .0);
float total_nom = 0;
float total_denom = 0;
for (size_t c = min_abs_charge_; (int)c < 1 + max_abs_charge_; ++c)
{
if (per_charge_cos_.size() > c)
{
float per_charge_cos_squared = per_charge_cos_[c] * per_charge_cos_[c];
float sig_pwr = per_charge_sum_signal_squared_[c] * per_charge_cos_squared;
float nom = 1e-6f * mul_factor + mul_factor * sig_pwr;
float denom = 1e-6f * mul_factor + per_charge_noise_pwr_[c] + (1 - per_charge_cos_squared) * per_charge_sum_signal_squared_[c];
per_charge_snr_[c] = denom <= 0 ? .0f : (nom / denom);
total_denom += (1 - per_charge_cos_squared) * per_charge_sum_signal_squared_[c];
total_nom += sqrt(sig_pwr);
}
}
snr_ = mul_factor * (1e-6f + total_nom * total_nom) / (1e-6f * mul_factor + total_denom + per_charge_noise_pwr_[0]);
per_charge_sum_signal_squared_.clear();
per_charge_noise_pwr_.clear();
}
float PeakGroup::getQvalue() const
{
return qvalue_;
}
float PeakGroup::getSNR() const
{
return snr_;
}
float PeakGroup::getChargeScore() const
{
return charge_score_;
}
float PeakGroup::getAvgPPMError() const
{
return avg_ppm_error_;
}
float PeakGroup::getAvgDaError() const
{
return avg_da_error_;
}
float PeakGroup::getChargeSNR(const int abs_charge) const
{
if (abs_charge < 0 || (int)per_charge_snr_.size() <= abs_charge) { return 0; }
return per_charge_snr_[abs_charge];
}
float PeakGroup::getChargeIsotopeCosine(const int abs_charge) const
{
if (abs_charge < 0 || (int)per_charge_cos_.size() <= abs_charge) { return 0; }
return per_charge_cos_[abs_charge];
}
float PeakGroup::getChargeIntensity(const int abs_charge) const
{
if (abs_charge < 0 || per_charge_int_.empty() || (int)per_charge_int_.size() <= abs_charge) { return 0; }
return per_charge_int_[abs_charge];
}
bool PeakGroup::isPositive() const
{
return is_positive_;
}
PeakGroup::TargetDecoyType PeakGroup::getTargetDecoyType() const
{
return target_decoy_type_;
}
void PeakGroup::setTargetDecoyType(PeakGroup::TargetDecoyType index)
{
target_decoy_type_ = index;
}
void PeakGroup::setIsotopeDaDistance(const double d)
{
iso_da_distance_ = d;
}
double PeakGroup::getIsotopeDaDistance() const
{
return iso_da_distance_;
}
void PeakGroup::setIndex(const uint i)
{
index_ = i;
}
uint PeakGroup::getIndex() const
{
return index_;
}
uint PeakGroup::getFeatureIndex() const
{
return findex_;
}
void PeakGroup::setQscore2D(double fqscore)
{
qscore2D_ = fqscore;
}
std::vector<FLASHHelperClasses::LogMzPeak>::const_iterator PeakGroup::begin() const noexcept
{
return logMzpeaks_.begin();
}
std::vector<FLASHHelperClasses::LogMzPeak>::const_iterator PeakGroup::end() const noexcept
{
return logMzpeaks_.end();
}
std::vector<FLASHHelperClasses::LogMzPeak>::iterator PeakGroup::begin() noexcept
{
return logMzpeaks_.begin();
}
std::vector<FLASHHelperClasses::LogMzPeak>::iterator PeakGroup::end() noexcept
{
return logMzpeaks_.end();
}
const FLASHHelperClasses::LogMzPeak& PeakGroup::operator[](const Size i) const
{
return logMzpeaks_[i];
}
void PeakGroup::push_back(const FLASHHelperClasses::LogMzPeak& pg)
{
logMzpeaks_.push_back(pg);
}
FLASHHelperClasses::LogMzPeak& PeakGroup::back()
{
return logMzpeaks_.back();
}
Size PeakGroup::size() const noexcept
{
return logMzpeaks_.size();
}
void PeakGroup::clear_()
{
std::vector<LogMzPeak>().swap(logMzpeaks_);
}
void PeakGroup::reserve(Size n)
{
logMzpeaks_.reserve(n);
}
bool PeakGroup::empty() const
{
return logMzpeaks_.empty();
}
void PeakGroup::swap(std::vector<FLASHHelperClasses::LogMzPeak>& x)
{
logMzpeaks_.swap(x);
}
void PeakGroup::sort()
{
std::sort(logMzpeaks_.begin(), logMzpeaks_.end());
}
void PeakGroup::setQvalue(double q)
{
qvalue_ = (float)q;
}
std::tuple<std::vector<double>, std::vector<double>> PeakGroup::getDLVector(const MSSpectrum& spec,
const Size charge_count,
const Size isotope_count,
const FLASHHelperClasses::PrecalculatedAveragine& avg,
double tol)
{
//assert(isotope_count > 3 && charge_count > 0);
std::tuple<std::vector<double>, std::vector<double>> sig_noise;
auto noisy_peaks = recruitAllPeaksInSpectrum(spec, tol * 1e-6, avg, getMonoMass(), false);
const int apex_iso_index = avg.getApexIndex(getMonoMass());
int apex_charge = -1;
double z_intensity = 0;
double max_s_intensity = 0, max_n_intensity = 0;
for (int z = min_abs_charge_; z<= max_abs_charge_; z++)
{
double z_i = getChargeIntensity(z);
if (z_intensity > z_i) continue;
z_intensity = z_i;
apex_charge = z;
}
int min_z = int(apex_charge - charge_count / 2);
int max_z = int(apex_charge + charge_count / 2); // inclusive
int min_iso_index = int(apex_iso_index - isotope_count / 2);
int max_iso_index = int(apex_iso_index + isotope_count / 2); // inclusive
auto& [sig, noise] = sig_noise;
sig.resize(charge_count * isotope_count, .0);
noise.resize(charge_count * isotope_count, .0);
for (const auto& p : logMzpeaks_)
{
if (min_z > p.abs_charge || max_z < p.abs_charge) continue;
if (min_iso_index > p.isotopeIndex || max_iso_index < p.isotopeIndex) continue;
int z_index = int(p.abs_charge - apex_charge + charge_count / 2);
int iso_index = int(p.isotopeIndex - apex_iso_index + isotope_count / 2);
int v_index = int(z_index * isotope_count + iso_index);
sig[v_index] += p.intensity;
max_s_intensity = std::max(max_s_intensity, sig[v_index]);
}
for (const auto& p : noisy_peaks)
{
if (min_z > p.abs_charge || max_z < p.abs_charge) continue;
if (min_iso_index > p.isotopeIndex || max_iso_index < p.isotopeIndex) continue;
int z_index = int(p.abs_charge - apex_charge + charge_count / 2);
int iso_index = int(p.isotopeIndex - apex_iso_index + isotope_count / 2);
int v_index = int(z_index * isotope_count + iso_index);
if (sig[v_index] > 0)
{
bool too_close = false;
for (int off = -1; off < 2; off ++)
{
if (off + p.isotopeIndex < 0) continue;
double correct_mass = getMonoMass() + (off + p.isotopeIndex) * iso_da_distance_;
if (std::abs(p.getUnchargedMass() - correct_mass) < .2)
{
too_close = true;
break;
}
}
if (too_close) continue;
}
noise[v_index] += p.intensity;
max_n_intensity = std::max(max_n_intensity, sig[v_index]);
}
if (max_s_intensity > 0)
{
for (auto& s : sig)
{
s /= max_s_intensity;
}
}
if (max_n_intensity > 0)
{
for (auto& n : noise)
{
n /= max_n_intensity;
}
}
return sig_noise;
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/TOPDOWN/SpectralDeconvolution.cpp | .cpp | 60,615 | 1,495 | // Copyright (c) 2002-2025, The OpenMS Team -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Kyowon Jeong, Jihyung Kim, Jaekwan Kim $
// $Authors: Kyowon Jeong, Jihyung Kim, Jaekwan Kim $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/TOPDOWN/DeconvolvedSpectrum.h>
#include <OpenMS/ANALYSIS/TOPDOWN/PeakGroup.h>
#include <OpenMS/ANALYSIS/TOPDOWN/SpectralDeconvolution.h>
#ifdef _OPENMP
#include <omp.h>
#endif
namespace OpenMS
{
/// harmonic charge factors that will be considered for harmonic mass reduction.
inline const std::vector<int> harmonic_charges_ {2, 3, 5, 7, 11};
/// high and low charges are differently deconvolved. This value determines the (inclusive) threshold for low charge.
inline const int low_charge_ = 10; // 10 inclusive
inline const double tol_div_factor = 2.5; // use narrow tolerance for deconvolution and at the end use the input tolerance to filter out overlapping masses.
SpectralDeconvolution::SpectralDeconvolution(): DefaultParamHandler("SpectralDeconvolution")
{
defaults_.setValue(
"tol", DoubleList {10.0, 10.0},
"PPM Tolerance for MS1, 2, ...: Specify the tolerance values in parts per million (ppm) using the this option. For example, -tol 10.0 5.0 sets "
"the tolerance to 10.0 ppm for MS1 and 5.0 ppm for MS2. "
"If a negative value (e.g., -1) is provided, the tolerance will be estimated automatically by a tolerance estimation algorithm.");
defaults_.setValue("min_mass", 50.0, "Minimum mass (Da)");
defaults_.setValue("max_mass", 100000.0, "Maximum mass (Da)");
defaults_.setValue("min_charge", 1,
"Minimum charge state for MS1 spectra (can be negative for negative mode). For MSn (n > 1), minimum charge is fixed to 1.");
defaults_.setValue(
"max_charge", 100,
"Maximum charge state for spectra (can be negative for negative mode). Apart from min_charge, this option applies to all MS levels.");
defaults_.setValue("precursor_charge", 0,
"Charge state of the target precursor. All precursor charge for MSn (n > 1) is fixed to this value. When precursor m/z is "
"provided within the input mzML file or is specified using precursor_mz option, "
"precursor mass is calculated accordingly.");
defaults_.setMinInt("precursor_charge", 0);
defaults_.setValue(
"precursor_mz", 0.0,
"Target precursor m/z value. This option must be used in combination with the -precursor_charge option; otherwise, it will be ignored. "
"If the -precursor_charge option is specified but this option is not, the precursor m/z value from the input mzML file will be used as the "
"default.");
defaults_.setMinFloat("precursor_mz", 0.0);
defaults_.addTag("precursor_mz", "advanced");
defaults_.setValue("min_cos", DoubleList {.85, .85},
"Cosine similarity thresholds between avg. and observed isotope pattern for MS1, 2, ...: e.g., -min_cos 0.3 0.6 to specify 0.3 "
"and 0.6 for MS1 and MS2, respectively.");
defaults_.addTag("min_cos", "advanced");
defaults_.setMinFloat("min_cos", 0.5);
defaults_.setMaxFloat("min_cos", 1.0);
defaults_.setValue("min_snr", DoubleList {.25, .25},
"Minimum charge SNR (the SNR of the isotope pattern of a specific charge) thresholds for MS1, 2, ...: e.g., -min_snr 1.0 0.6 to "
"specify 1.0 and 0.6 for MS1 and MS2, respectively.");
defaults_.addTag("min_snr", "advanced");
defaults_.setMinFloat("min_snr", 0.25);
defaults_.setValue("allowed_isotope_error", 0,
"Tolerance for isotope index errors when calculating FDR. For instance, setting a value of 2 permits the inclusion of up to 2 "
"isotope errors as valid matches. Beta version.");
defaults_.addTag("allowed_isotope_error", "advanced");
defaultsToParam_();
}
// Calculate the nominal (integer) mass from double mass. Multiplying 0.999497 to the original mass and then rounding reduce the error between the
// original and nominal masses.
int SpectralDeconvolution::getNominalMass(const double mass)
{
return (int)round(mass * 0.999497);
}
void SpectralDeconvolution::setTargetPrecursorCharge_()
{
const auto& spec = deconvolved_spectrum_.getOriginalSpectrum();
if (spec.getPrecursors().empty() && target_precursor_mz_ == 0)
{
OPENMS_LOG_INFO << "Attempted to set target precursor charge but failed - no precursor is found in MS2 spectra. Specify target precursor m/z "
"with -target_precursor_mz option"
<< std::endl;
return;
}
auto precursor = spec.getPrecursors()[0];
double target_precursor_mass
= (precursor.getMZ() - FLASHHelperClasses::getChargeMass(target_precursor_charge_ > 0)) * std::abs(target_precursor_charge_);
precursor.setCharge(target_precursor_charge_);
PeakGroup precursorPeakGroup(1, std::abs(target_precursor_charge_), target_precursor_charge_ > 0);
precursorPeakGroup.push_back(FLASHHelperClasses::LogMzPeak());
precursorPeakGroup.setMonoisotopicMass(target_precursor_mass);
precursorPeakGroup.setSNR(1.0);
precursorPeakGroup.setRepAbsCharge(std::abs(target_precursor_charge_));
precursorPeakGroup.setChargeSNR(std::abs(target_precursor_charge_), 1.0);
precursorPeakGroup.setQscore(1.0);
deconvolved_spectrum_.setPrecursor(precursor);
deconvolved_spectrum_.setPrecursorPeakGroup(precursorPeakGroup);
}
void SpectralDeconvolution::prepareSignalDecoyExclusions_()
{
for (const auto& pg : *target_dspec_for_decoy_calculation_)
{
for (int i = 0; i <= 0; i++)
{
excluded_masses_for_decoy_runs_.push_back(pg.getMonoMass() + i * iso_da_distance_);
excluded_peak_masses_for_decoy_runs_.push_back(pg.getMonoMass() + avg_.getAverageMassDelta(pg.getMonoMass()) + i * iso_da_distance_);
}
}
std::sort(excluded_masses_for_decoy_runs_.begin(), excluded_masses_for_decoy_runs_.end());
excluded_masses_for_decoy_runs_.erase(unique(excluded_masses_for_decoy_runs_.begin(), excluded_masses_for_decoy_runs_.end()),
excluded_masses_for_decoy_runs_.end());
std::sort(excluded_peak_masses_for_decoy_runs_.begin(), excluded_peak_masses_for_decoy_runs_.end());
excluded_peak_masses_for_decoy_runs_.erase(unique(excluded_peak_masses_for_decoy_runs_.begin(), excluded_peak_masses_for_decoy_runs_.end()),
excluded_peak_masses_for_decoy_runs_.end());
}
void SpectralDeconvolution::prepareNoiseDecoySpectrum_(const MSSpectrum& spec)
{
std::set<double> signal_mzs;
for (const auto& pg : *target_dspec_for_decoy_calculation_)
{
for (const auto& p : pg)
{
signal_mzs.insert(p.mz);
}
}
auto nspec = spec;
nspec.clear(false);
for (const auto& p : spec)
{
if (signal_mzs.find(p.getMZ()) != signal_mzs.end()) { continue; }
nspec.push_back(p);
}
deconvolved_spectrum_.setOriginalSpectrum(nspec);
}
void SpectralDeconvolution::registerPrecursorForMSn_(const PeakGroup& precursor_peak_group)
{
for (const auto& precursor : deconvolved_spectrum_.getOriginalSpectrum().getPrecursors())
{
for (const auto& activation_method : precursor.getActivationMethods())
{
deconvolved_spectrum_.setActivationMethod(activation_method);
if (deconvolved_spectrum_.getActivationMethod() == Precursor::ActivationMethod::HCID)
{
deconvolved_spectrum_.setActivationMethod(Precursor::ActivationMethod::HCD);
}
break;
}
deconvolved_spectrum_.setPrecursor(precursor);
}
if (target_precursor_charge_ != 0 || target_precursor_mz_ > 0) { setTargetPrecursorCharge_(); }
if (deconvolved_spectrum_.getPrecursorPeakGroup().empty() && !precursor_peak_group.empty())
{
deconvolved_spectrum_.setPrecursorPeakGroup(precursor_peak_group);
deconvolved_spectrum_.setPrecursorScanNumber(precursor_peak_group.getScanNumber());
Precursor precursor(deconvolved_spectrum_.getPrecursor());
int abs_charge = (int)round(precursor_peak_group.getMonoMass() / precursor.getMZ());
precursor.setCharge(precursor_peak_group.isPositive() ? abs_charge : -abs_charge);
deconvolved_spectrum_.setPrecursor(precursor);
}
}
// The main function called from outside. precursor_map_for_FLASHIda is used to read FLASHIda information
void SpectralDeconvolution::performSpectrumDeconvolution(const MSSpectrum& spec, const int scan_number, const PeakGroup& precursor_peak_group)
{
// First prepare for decoy runs. if it is noisy decoy, change the distance between the isotopes
iso_da_distance_ = target_decoy_type_ == PeakGroup::noise_decoy
? Constants::ISOTOPE_MASSDIFF_55K_U * noise_iso_delta_
: Constants::ISOTOPE_MASSDIFF_55K_U;
// excluded peak masses or mass bins or monoisotopic masses for signal decoy runs
excluded_peak_masses_for_decoy_runs_.clear();
excluded_mass_bins_for_decoy_runs_.reset();
excluded_masses_for_decoy_runs_.clear();
if (target_decoy_type_ == PeakGroup::signal_decoy) { prepareSignalDecoyExclusions_(); }
ms_level_ = spec.getMSLevel();
deconvolved_spectrum_ = DeconvolvedSpectrum(scan_number);
deconvolved_spectrum_.setOriginalSpectrum(spec);
if (target_decoy_type_ == PeakGroup::noise_decoy) { prepareNoiseDecoySpectrum_(spec); }
if (ms_level_ > 1) { registerPrecursorForMSn_(precursor_peak_group); }
// based on MS level, adjust charge and mass ranges. Precursor charge and mass determine those.
current_max_charge_ = deconvolved_spectrum_.getCurrentMaxAbsCharge(max_abs_charge_);
current_max_mass_ = deconvolved_spectrum_.getCurrentMaxMass(max_mass_);
current_min_mass_ = deconvolved_spectrum_.getCurrentMinMass(min_mass_);
// set universal pattern filter and harmonic pattern filters
setFilters_();
// LogMzPeaks are generated from raw peaks
updateLogMzPeaks_();
if (log_mz_peaks_.empty()) { return; }
// This is the main FLASHDeconv function in which deconvolution is performed.
generatePeakGroupsFromSpectrum_();
}
void SpectralDeconvolution::updateMembers_()
{
min_abs_charge_ = param_.getValue("min_charge");
max_abs_charge_ = param_.getValue("max_charge");
is_positive_ = min_abs_charge_ > 0;
min_abs_charge_ = abs(min_abs_charge_);
max_abs_charge_ = abs(max_abs_charge_);
if (min_abs_charge_ > max_abs_charge_)
{
int tmp = min_abs_charge_;
min_abs_charge_ = max_abs_charge_;
max_abs_charge_ = tmp;
}
current_max_mass_ = max_mass_ = param_.getValue("max_mass");
current_min_mass_ = min_mass_ = param_.getValue("min_mass");
bin_mul_factors_.clear();
tolerance_ = param_.getValue("tol");
for (double& j : tolerance_)
{
j *= 1e-6;
bin_mul_factors_.push_back(1.0 / j * tol_div_factor);
}
min_isotope_cosine_ = param_.getValue("min_cos");
min_snr_ = param_.getValue("min_snr");
target_precursor_mz_ = param_.getValue("precursor_mz");
target_precursor_charge_ = param_.getValue("precursor_charge");
}
const FLASHHelperClasses::PrecalculatedAveragine& SpectralDeconvolution::getAveragine()
{
return avg_;
}
void SpectralDeconvolution::calculateAveragine(const bool use_RNA_averagine)
{
CoarseIsotopePatternGenerator generator(300);
auto iso = use_RNA_averagine ? generator.estimateFromRNAWeight(current_max_mass_) : generator.estimateFromPeptideWeight(current_max_mass_);
iso.trimRight(0.0001 * iso.getMostAbundant().getIntensity());
auto max_isotope = std::max(200, (int)iso.size());
generator.setMaxIsotope(max_isotope);
avg_ = FLASHHelperClasses::PrecalculatedAveragine(50, current_max_mass_, 25, generator, use_RNA_averagine,
target_decoy_type_ == PeakGroup::noise_decoy ? Constants::ISOTOPE_MASSDIFF_55K_U * noise_iso_delta_ : -1);
avg_.setMaxIsotopeIndex((int)(max_isotope - 1));
}
// generate filters
void SpectralDeconvolution::setFilters_()
{
universal_pattern_.clear();
int charge_range = current_max_charge_;
for (int i = 0; i < charge_range; i++)
{
universal_pattern_.push_back(-log(i + 1));
}
harmonic_pattern_matrix_ = Matrix<double>(harmonic_charges_.size(), charge_range, .0);
for (Size k = 0; k < harmonic_charges_.size(); k++)
{
int hc = harmonic_charges_[k];
int n = hc / 2;
// int m = (1 + hc) / 2;
for (int i = 0; i < charge_range; i++)
{
double a = i > 0 ? exp(-universal_pattern_[i - 1]) : 0;
double b = exp(-universal_pattern_[i]);
harmonic_pattern_matrix_.setValue(k, i, -log(b - (b - a) * n / hc));
}
}
}
// Generate uncharged log mz transformated peaks
void SpectralDeconvolution::updateLogMzPeaks_()
{
log_mz_peaks_.clear();
log_mz_peaks_.reserve(deconvolved_spectrum_.getOriginalSpectrum().size());
for (const auto& peak : deconvolved_spectrum_.getOriginalSpectrum())
{
if (peak.getIntensity() <= 0) //
{
continue;
}
log_mz_peaks_.emplace_back(peak, is_positive_);
}
}
// from bin to raw value
double SpectralDeconvolution::getBinValue_(const Size bin, const double min_value, const double bin_mul_factor)
{
return min_value + (double)bin / bin_mul_factor;
}
// from value to bin number
Size SpectralDeconvolution::getBinNumber_(const double value, const double min_value, const double bin_mul_factor)
{
if (value < min_value) { return 0; }
return (Size)round((value - min_value) * bin_mul_factor);
}
// From log mz to mz bins.
void SpectralDeconvolution::binLogMzPeaks_(const Size bin_number, std::vector<float>& binned_log_mz_peak_intensities)
{
binned_log_mz_peaks_ = boost::dynamic_bitset<>(bin_number);
double bin_mul_factor = bin_mul_factors_[ms_level_ - 1];
for (const auto& p : log_mz_peaks_)
{
Size bi = getBinNumber_(p.logMz, mz_bin_min_value_, bin_mul_factor);
if (bi >= bin_number) { break; }
binned_log_mz_peaks_.set(bi);
binned_log_mz_peak_intensities[bi] += p.intensity;
}
}
// Find candidate mass bins from the current spectrum. The runtime of FLASHDeconv is determined by this function.
void SpectralDeconvolution::updateCandidateMassBins_(std::vector<float>& mass_intensities, const std::vector<float>& mz_intensities)
{ //
Size mz_bin_index = binned_log_mz_peaks_.find_first();
auto mz_bin_index_reverse = std::vector<Size>();
mz_bin_index_reverse.reserve(binned_log_mz_peaks_.count());
// invert mz bins so charges are counted from small to large given a mass
while (mz_bin_index != binned_log_mz_peaks_.npos)
{
mz_bin_index_reverse.push_back(mz_bin_index);
mz_bin_index = binned_log_mz_peaks_.find_next(mz_bin_index);
}
size_t h_charge_size = harmonic_charges_.size();
long bin_end = (long)binned_log_masses_.size();
auto support_peak_count
= std::vector<unsigned short>(binned_log_masses_.size(), 0); // per mass bin how many peaks are present to support that mass bin
// to calculate continuous charges, the previous charge value per mass should be stored
auto prev_charges = std::vector<unsigned short>(binned_log_masses_.size(), current_max_charge_ + 2);
// not just charges but intensities are stored to see the intensity fold change
auto prev_intensities = std::vector<float>(binned_log_masses_.size(), .0f);
mass_intensities = std::vector<float>(binned_log_masses_.size(), .0f);
double bin_mul_factor = bin_mul_factors_[ms_level_ - 1];
std::vector<float> sub_max_h_intensity(h_charge_size, .0f);
// traverse from right to left in the log mz bin space
for (auto iter = mz_bin_index_reverse.rbegin(); iter < mz_bin_index_reverse.rend(); iter++)
{
mz_bin_index = *iter;
const float intensity = mz_intensities[mz_bin_index];
const double log_mz = getBinValue_(mz_bin_index, mz_bin_min_value_, bin_mul_factor); // uncharged log mz;
const double mz = exp(log_mz); // uncharged mz
const double iso_div_by_mz = iso_da_distance_ / mz;
// scan through charges
for (int j = 0; j < current_max_charge_; j++) // loop over all charges
{
// mass is given by shifting by binned_universal_pattern_[j]
const long mass_bin_index = (long)mz_bin_index + binned_universal_pattern_[j];
if (mass_bin_index < 0) { continue; }
if (mass_bin_index >= bin_end) { break; }
if (! excluded_mass_bins_for_decoy_runs_.empty() && excluded_mass_bins_for_decoy_runs_[mass_bin_index]) { continue; }
auto& spc = support_peak_count[mass_bin_index];
const int abs_charge = (j + 1);
float& prev_intensity = prev_intensities[mass_bin_index];
auto& prev_charge = prev_charges[mass_bin_index];
const bool charge_not_continous = prev_charge - j != -1 && (prev_charge <= current_max_charge_);
bool pass_first_check = false;
// intensity ratio between consecutive charges should not exceed the factor.
const float highest_factor = 10.0f;
const float factor = abs_charge <= low_charge_ ? highest_factor : (highest_factor / 2 + highest_factor / 2 * low_charge_ / (float)abs_charge);
// intensity ratio between consecutive charges for possible harmonic should be within this factor
const float hfactor = factor / 2.0f;
// intensity of previous charge
// intensity ratio between current and previous charges
float intensity_ratio = prev_intensity <= 0 ? (factor + 1) : (intensity / prev_intensity);
intensity_ratio = intensity_ratio < 1 ? 1.0f / intensity_ratio : intensity_ratio;
float support_peak_intensity = 0;
// check if peaks of continuous charges are present
std::fill(sub_max_h_intensity.begin(), sub_max_h_intensity.end(), .0f);
// if charge not continuous or intensity ratio is too high reset support_peak_count
if (charge_not_continous || intensity_ratio > factor) { spc = 0; }
else
{
pass_first_check = true;
if (spc == 0 && abs_charge > low_charge_) { support_peak_intensity = prev_intensity; }
}
// for low charges, check isotope peak presence.
if (! pass_first_check && abs_charge <= low_charge_)
{
// find the next isotope peak(s) and try to avoid harmonic masses
for (int d = 1; d >= (avg_.getApexIndex(mz * abs_charge) > 0 ? -1 : 1); d -= 2)
{
bool iso_exist = false;
int next_iso_bin = 0;
const int nib = (int)getBinNumber_(log_mz + d * iso_div_by_mz / abs_charge, mz_bin_min_value_, bin_mul_factor);
const int nibr
= abs_charge > 1 ? (int)getBinNumber_(log_mz + (d * iso_div_by_mz / (abs_charge - 1)), mz_bin_min_value_, bin_mul_factor) : 0;
const int nibl = (int)getBinNumber_(log_mz + (d * iso_div_by_mz / (abs_charge + 1)), mz_bin_min_value_, bin_mul_factor);
if (abs(nib - nibr) < tol_div_factor
|| abs(nib - nibl)
< tol_div_factor) // if different charges are not distinguishable, we ignore. Not informative and the source of the errors.
break;
for (int t = -1; t < 2; t++)
{
int nibt = nib + t;
if (std::abs(nibt - (int)mz_bin_index) >= tol_div_factor && nibt > 0 && nibt < (int)binned_log_mz_peaks_.size()
&& binned_log_mz_peaks_[nibt])
{
iso_exist = true;
pass_first_check = true;
if (next_iso_bin == 0 || mz_intensities[next_iso_bin] < mz_intensities[nibt]) { next_iso_bin = nibt; }
}
}
// harmonic check
if (iso_exist)
{
const double h_threshold = intensity + mz_intensities[next_iso_bin]; // std::min(intensity, mz_intensities[next_iso_bin]); //
for (size_t k = 0; k < h_charge_size; k++)
{
const int hc = harmonic_charges_[k];
int harmonic_cntr = 0;
if (ms_level_ > 1 && hc * abs_charge > current_max_charge_) { break; }
const int hdiff = (int)round((double)(next_iso_bin - mz_bin_index)) / hc * (hc / 2);
const int next_harmonic_iso_bin = (int)mz_bin_index + hdiff;
// check if there are harmonic peaks between the current peak and the next isotope peak.
// no perfect filtration. Just obvious ones are filtered out by checking if a peak is in the harmonic position and the intensity ratio
// is within two folds from the current peak (specified by mz_bin_index)
if (std::abs(next_harmonic_iso_bin - (int)mz_bin_index) >= tol_div_factor && next_harmonic_iso_bin >= 0
&& next_harmonic_iso_bin < (int)binned_log_mz_peaks_.size() && binned_log_mz_peaks_[next_harmonic_iso_bin]
&& mz_intensities[next_harmonic_iso_bin] > h_threshold / 2 && mz_intensities[next_harmonic_iso_bin] < h_threshold * 2)
{
harmonic_cntr++;
sub_max_h_intensity[k] += mz_intensities[next_harmonic_iso_bin];
}
if (harmonic_cntr > 0) { pass_first_check = false; }
}
}
if (pass_first_check)
{
support_peak_intensity += mz_intensities[next_iso_bin];
}
}
pass_first_check &= *std::max_element(sub_max_h_intensity.begin(), sub_max_h_intensity.end()) <= 0;
}
if (pass_first_check)
{
if (prev_charge - j == -1) // check harmonic artifacts for high charge ranges
{
float max_intensity = intensity;
float min_intensity = prev_intensity;
if (prev_intensity <= .0)
{
max_intensity = intensity;
min_intensity = intensity;
}
else if (min_intensity > max_intensity)
{
float tmpi = min_intensity;
min_intensity = max_intensity;
max_intensity = tmpi;
}
const float high_threshold = max_intensity * hfactor;
const float low_threshold = min_intensity / hfactor;
bool is_harmonic = false;
// check if harmonic peaks are present with different harmonic multiple factors (2, 3, 5, 7, 11 defined in harmonic_charges_).
int min_dis = (int)(tol_div_factor + 1);
for (size_t k = 0; k < h_charge_size; k++)
{
if (ms_level_ > 1 && harmonic_charges_[k] * abs_charge > current_max_charge_) break;
float harmonic_intensity = 0;
for (int t = -(int)tol_div_factor; t <= (int)tol_div_factor; t++)
{
long hmz_bin_index = mass_bin_index - binned_harmonic_patterns.getValue(k, j) + t;
if (hmz_bin_index > 0 && hmz_bin_index != (long)mz_bin_index && hmz_bin_index < (int)binned_log_mz_peaks_.size()
&& binned_log_mz_peaks_[hmz_bin_index])
{
float h_intensity = mz_intensities[hmz_bin_index];
if (h_intensity > low_threshold && h_intensity < high_threshold)
{
is_harmonic = true;
if (abs(t) < min_dis)
{
harmonic_intensity = h_intensity;
min_dis = abs(t);
}
}
}
}
sub_max_h_intensity[k] += harmonic_intensity;
}
if (! is_harmonic) // if it is not harmonic
{
mass_intensities[mass_bin_index] += intensity + support_peak_intensity;
if (! binned_log_masses_[mass_bin_index])
{
spc++;
if (spc >= min_support_peak_count_ || spc >= abs_charge / 2) { binned_log_masses_[mass_bin_index] = true; }
}
}
else // if harmonic
{
mass_intensities[mass_bin_index]
-= *std::max_element(sub_max_h_intensity.begin(), sub_max_h_intensity.end());
if (spc > 0) { spc--; }
}
}
else if (abs_charge <= low_charge_) // for low charge, include the mass if isotope is present
{
mass_intensities[mass_bin_index] += intensity + support_peak_intensity;
if (! binned_log_masses_[mass_bin_index])
{
spc++;
binned_log_masses_[mass_bin_index] = true;
}
}
}
prev_intensity = intensity;
prev_charge = j;
}
}
}
// Subfunction of updateMassBins_. If a peak corresponds to multiple masses, only one mass is selected for the peak based on intensities.
// mass level harmonic check is also performed in this function
// it also outputs the charge range of each mass bin
Matrix<int> SpectralDeconvolution::filterMassBins_(const std::vector<float>& mass_intensities)
{
Matrix<int> abs_charge_ranges(2, binned_log_masses_.size(), INT_MAX);
for (Size i = 0; i < binned_log_masses_.size(); i++)
{
abs_charge_ranges.setValue(1, (int)i, INT_MIN);
}
Size mz_bin_index = binned_log_mz_peaks_.find_first();
long bin_size = (long)binned_log_masses_.size();
auto to_skip = binned_log_masses_.flip();
binned_log_masses_.reset();
const int select_top_N = 2; // select top N charges per peak. We allow up to 2 just to consider frequent coelution.
std::vector<long> max_indices(select_top_N, -1);
std::vector<int> max_intensity_abs_charge_ranges(select_top_N, -1);
while (mz_bin_index != binned_log_mz_peaks_.npos)
{
std::fill(max_indices.begin(), max_indices.end(), -1);
std::fill(max_intensity_abs_charge_ranges.begin(), max_intensity_abs_charge_ranges.end(), -1);
float max_intensity = 0;
for (int j = 0; j < current_max_charge_; j++)
{
long mass_bin_index = (long)mz_bin_index + binned_universal_pattern_[j];
if (mass_bin_index < 0) { continue; }
if (mass_bin_index >= bin_size) { break; }
if (! target_mono_masses_.empty() && target_mass_bins_[mass_bin_index])
{
float t = mass_intensities[mass_bin_index];
if (t == 0)
{ // no signal
continue;
}
max_intensity = 1e38f;
// store best values after shift by 1.
for (int i = select_top_N - 1; i > 0; i--)
{
max_indices[i] = max_indices[i - 1];
max_intensity_abs_charge_ranges[i] = max_intensity_abs_charge_ranges[i - 1];
}
max_indices[0] = mass_bin_index;
max_intensity_abs_charge_ranges[0] = j;
}
else
{
if (to_skip[mass_bin_index]) { continue; }
float t = mass_intensities[mass_bin_index];
if (t == 0) // no signal
{
continue;
}
if (max_intensity == 0 || max_intensity < t)
{
// store best values after shift by 1.
bool big_diff = t > max_intensity * 2.0;
for (int i = select_top_N - 1; i > 0; i--)
{
max_indices[i] = big_diff ? -1 : max_indices[i - 1]; // if big change, only take the best charge
max_intensity_abs_charge_ranges[i] = max_intensity_abs_charge_ranges[i - 1];
}
max_indices[0] = mass_bin_index;
max_intensity_abs_charge_ranges[0] = j;
max_intensity = t;
}
}
}
for (int i = 0; i < select_top_N; i++)
{
long max_index = max_indices[i];
int max_intensity_abs_charge_range = max_intensity_abs_charge_ranges[i];
if (max_index >= 0 && max_index < bin_size)
{
abs_charge_ranges.setValue(0, max_index, std::min(abs_charge_ranges.getValue(0, max_index), max_intensity_abs_charge_range));
abs_charge_ranges.setValue(1, max_index, std::max(abs_charge_ranges.getValue(1, max_index), max_intensity_abs_charge_range));
binned_log_masses_[max_index] = true;
}
}
mz_bin_index = binned_log_mz_peaks_.find_next(mz_bin_index);
}
return abs_charge_ranges;
}
// update mass bins which will be used to select peaks in the input spectrum...
Matrix<int> SpectralDeconvolution::updateMassBins_(const std::vector<float>& mz_intensities)
{
std::vector<float> mass_intensities;
updateCandidateMassBins_(mass_intensities, mz_intensities); // universal pattern matching
auto per_mass_abs_charge_ranges = filterMassBins_(mass_intensities);
return per_mass_abs_charge_ranges;
}
// With binned_log_masses_ from updateMassBins_ function, select peaks from the same mass in the original input spectrum
void SpectralDeconvolution::getCandidatePeakGroups_(const Matrix<int>& per_mass_abs_charge_ranges)
{
double bin_mul_factor = bin_mul_factors_[ms_level_ - 1];
double tol = tolerance_[ms_level_ - 1];
int charge_range = current_max_charge_;
Size mass_bin_size = binned_log_masses_.size();
int log_mz_peak_size = (int)log_mz_peaks_.size();
// this stores which peak is now being considered per charge. Per charge, peak is considered from left (lowest m/z) to right (highest m/z).
auto current_peak_index = std::vector<int>(charge_range, 0);
deconvolved_spectrum_.reserve(binned_log_masses_.count());
Size mass_bin_index = binned_log_masses_.find_first();
auto peak_bin_numbers = std::vector<Size>(log_mz_peak_size);
// per peak, store the m/z bin number for fast processing
for (int i = 0; i < log_mz_peak_size; i++)
{
peak_bin_numbers[i] = getBinNumber_(log_mz_peaks_[i].logMz, mz_bin_min_value_, bin_mul_factor);
}
// main iteration. per_mass_abs_charge_ranges gives the range of charges for each mass bin
while (mass_bin_index != binned_log_masses_.npos)
{
double log_m = getBinValue_(mass_bin_index, mass_bin_min_value_, bin_mul_factor);
double mass = exp(log_m);
PeakGroup pg(1, per_mass_abs_charge_ranges.getValue(1, mass_bin_index) + 1, // make an empty peakGroup (mass)
is_positive_);
pg.reserve(charge_range * 12);
pg.setIsotopeDaDistance(iso_da_distance_);
// the range of isotope span. For a given peak the peaks within the span are searched.
Size right_index = avg_.getRightCountFromApex(mass);
Size left_index = avg_.getLeftCountFromApex(mass);
// scan through charge - from mass to m/z
for (size_t j = per_mass_abs_charge_ranges.getValue(0, mass_bin_index); j <= (size_t)per_mass_abs_charge_ranges.getValue(1, mass_bin_index); j++)
{
int max_peak_index = -1;
size_t abs_charge = j + 1;
int bin_offset = binned_universal_pattern_[j];
if (mass_bin_index < (size_t)bin_offset) { continue; }
Size b_index = mass_bin_index - bin_offset; // m/z bin
int& cpi = current_peak_index[j]; // in this charge which peak is to be considered?
double max_intensity = -1;
while (cpi < log_mz_peak_size - 1) // scan through peaks from cpi
{
if (peak_bin_numbers[cpi] == b_index) // if the peak of consideration matches to this mass with charge abs_charge
{
double intensity = log_mz_peaks_[cpi].intensity;
if (intensity > max_intensity) // compare with other matching peaks and select the most intense peak (in max_peak_index)
{
max_intensity = intensity;
max_peak_index = cpi;
}
}
else if (peak_bin_numbers[cpi] > b_index) { break; }
cpi++;
}
if (max_peak_index < 0) { continue; }
// Search for local max.
if (max_peak_index > 0 && max_peak_index <= log_mz_peak_size && peak_bin_numbers[max_peak_index - 1] == b_index - 1
&& log_mz_peaks_[max_peak_index - 1].intensity > max_intensity)
{
continue;
}
if (max_peak_index < log_mz_peak_size - 1 && peak_bin_numbers[max_peak_index + 1] == b_index + 1
&& log_mz_peaks_[max_peak_index + 1].intensity > max_intensity)
{
continue;
}
// now we have a matching peak for this mass of charge abs_charge. From here, isotope peaks are collected
const double mz = log_mz_peaks_[max_peak_index].mz; // charged mz
const double iso_delta = iso_da_distance_ / (double)abs_charge;
double mz_delta = std::min(max_mass_dalton_tolerance_ / (double)abs_charge, tol * mz); //
double max_mz = mz;
for (int peak_index = max_peak_index; peak_index < log_mz_peak_size; peak_index++)
{
const double observed_mz = log_mz_peaks_[peak_index].mz;
double mz_diff = observed_mz - mz;
int tmp_i = (int)round(mz_diff / iso_delta);
if (observed_mz - max_mz > (double)right_index * iso_delta + mz_delta) { break; }
if (abs(mz_diff - tmp_i * iso_delta) < mz_delta) // if peak is signal
{
const Size bin = peak_bin_numbers[peak_index] + bin_offset;
if (bin < mass_bin_size && ! (bin < excluded_mass_bins_for_decoy_runs_.size() && excluded_mass_bins_for_decoy_runs_[bin]))
{
LogMzPeak p(log_mz_peaks_[peak_index]);
p.abs_charge = (int)abs_charge;
p.isotopeIndex = tmp_i;
pg.push_back(p);
}
}
}
for (int peak_index = max_peak_index - 1; peak_index >= 0; peak_index--)
{
const double observed_mz = log_mz_peaks_[peak_index].mz;
double mz_diff = observed_mz - mz;
int tmp_i = (int)round(mz_diff / iso_delta);
if (max_mz - observed_mz > (float)left_index * iso_delta + mz_delta) { break; }
if (abs(mz_diff - tmp_i * iso_delta) < mz_delta)
{
const Size bin = peak_bin_numbers[peak_index] + bin_offset;
if (bin < mass_bin_size && ! (bin < excluded_mass_bins_for_decoy_runs_.size() && excluded_mass_bins_for_decoy_runs_[bin]))
{
LogMzPeak p(log_mz_peaks_[peak_index]);
p.abs_charge = (int)abs_charge;
p.isotopeIndex = tmp_i;
pg.push_back(p);
}
}
}
}
if (! pg.empty())
{
double max_intensity = -1.0;
double t_mass = .0;
auto new_peaks = std::vector<LogMzPeak>();
new_peaks.reserve(pg.size());
for (const auto& p : pg)
{
if (max_intensity < p.intensity)
{
max_intensity = p.intensity;
t_mass = p.getUnchargedMass();
}
}
double iso_tolerance = tol * t_mass;
int min_off = 10000;
int max_off = -1;
int max_charge = -1;
int apex_index = (int)avg_.getApexIndex(t_mass);
for (auto& p : pg)
{
p.isotopeIndex = (int)round((p.getUnchargedMass() - t_mass) / iso_da_distance_);
if (abs(t_mass - p.getUnchargedMass() + iso_da_distance_ * p.isotopeIndex) > iso_tolerance) { continue; }
p.isotopeIndex += apex_index;
new_peaks.push_back(p);
min_off = min_off > p.isotopeIndex ? p.isotopeIndex : min_off;
max_off = max_off < p.isotopeIndex ? p.isotopeIndex : max_off;
max_charge = max_charge < p.abs_charge ? p.abs_charge : max_charge;
}
if (min_off != max_off)
{
pg.swap(new_peaks);
pg.updateMonoMassAndIsotopeIntensities(tol);
if (pg.getMonoMass() < current_min_mass_ || pg.getMonoMass() > current_max_mass_) { continue; }
pg.setScanNumber(deconvolved_spectrum_.getScanNumber());
pg.sort();
deconvolved_spectrum_.push_back(pg); //
}
}
mass_bin_index = binned_log_masses_.find_next(mass_bin_index);
}
}
DeconvolvedSpectrum& SpectralDeconvolution::getDeconvolvedSpectrum()
{
return deconvolved_spectrum_;
}
void SpectralDeconvolution::setTargetDecoyType(PeakGroup::TargetDecoyType target_decoy_type,
const DeconvolvedSpectrum& target_dspec_for_decoy_calcualtion)
{
target_decoy_type_ = target_decoy_type;
if (target_decoy_type == PeakGroup::signal_decoy)
allowed_iso_error_ = param_.getValue("allowed_isotope_error");
target_dspec_for_decoy_calculation_ = &target_dspec_for_decoy_calcualtion;
}
// spectral deconvolution main function
void SpectralDeconvolution::generatePeakGroupsFromSpectrum_()
{
deconvolved_spectrum_.clear();
int current_charge_range = current_max_charge_;
int tmp_peak_cntr = current_charge_range - min_support_peak_count_;
tmp_peak_cntr = tmp_peak_cntr < 0 ? 0 : tmp_peak_cntr;
double mass_bin_max_value = std::min(log_mz_peaks_.back().logMz - universal_pattern_[tmp_peak_cntr],
log(current_max_mass_ + (double)avg_.getRightCountFromApex(current_max_mass_) + 1.0));
double bin_mul_factor = bin_mul_factors_[ms_level_ - 1];
mass_bin_min_value_ = log(std::max(1.0, 50 - avg_.getAverageMassDelta(50)));
mz_bin_min_value_ = log_mz_peaks_[0].logMz;
double mz_bin_max_value = log_mz_peaks_.back().logMz;
Size mass_bin_number = getBinNumber_(mass_bin_max_value, mass_bin_min_value_, bin_mul_factor) + 1;
binned_universal_pattern_.clear();
for (int i = 0; i < current_charge_range; i++)
{
binned_universal_pattern_.push_back((int)round((mz_bin_min_value_ - universal_pattern_[i] - mass_bin_min_value_) * bin_mul_factor));
}
binned_harmonic_patterns = Matrix<int>(harmonic_pattern_matrix_.rows(), harmonic_pattern_matrix_.cols(), 0);
for (Size k = 0; k < (Size)harmonic_pattern_matrix_.rows(); k++)
{
for (Size i = 0; i < (Size)harmonic_pattern_matrix_.cols(); i++)
{
binned_harmonic_patterns.setValue(
k, i, (int)round((mz_bin_min_value_ - harmonic_pattern_matrix_.getValue(k, i) - mass_bin_min_value_) * bin_mul_factor));
}
}
Size mz_bin_number = getBinNumber_(mz_bin_max_value, mz_bin_min_value_, bin_mul_factor) + 1;
auto binned_log_mz_peak_intensities = std::vector<float>(mz_bin_number, .0f);
// bin log mzs
binLogMzPeaks_(mz_bin_number, binned_log_mz_peak_intensities);
binned_log_masses_ = boost::dynamic_bitset<>(mass_bin_number);
// for FDR estimation
if (! excluded_peak_masses_for_decoy_runs_.empty())
{
excluded_mass_bins_for_decoy_runs_ = boost::dynamic_bitset<>(binned_log_masses_.size());
// always positive
int bin_offset = (int)round(tol_div_factor);
for (double m : excluded_peak_masses_for_decoy_runs_)
{
if (m <= 0) { continue; }
Size j = getBinNumber_(log(m), mass_bin_min_value_, bin_mul_factors_[ms_level_ - 1]);
if ((int)j >= bin_offset && j < excluded_mass_bins_for_decoy_runs_.size() - bin_offset)
{
for (int k = -bin_offset; k <= bin_offset; k++)
excluded_mass_bins_for_decoy_runs_[j + k] = true;
}
}
}
// for targeted deconvolution
if (! target_mono_masses_.empty())
{
target_mass_bins_.reset();
target_mass_bins_ = boost::dynamic_bitset<>(binned_log_masses_.size());
for (double& tm : target_mono_masses_)
{
for (int off = -1; off < 2; off++)
{
double m = tm + off * iso_da_distance_;
double mass_delta = avg_.getMostAbundantMassDelta(m);
Size j = getBinNumber_(log(m + mass_delta), mass_bin_min_value_, bin_mul_factors_[ms_level_ - 1]);
if (j < 1) { continue; }
if (j >= target_mass_bins_.size() - 2) { break; }
target_mass_bins_[j - 1] = true;
target_mass_bins_[j] = true;
target_mass_bins_[j + 1] = true;
}
}
}
// main algorithm to generate mass bins
const auto per_mass_abs_charge_ranges = updateMassBins_(binned_log_mz_peak_intensities);
// main algorithm to generate peak groups
getCandidatePeakGroups_(per_mass_abs_charge_ranges);
scoreAndFilterPeakGroups_();
}
bool SpectralDeconvolution::isPeakGroupInExcludedMassForDecoyRuns_(const PeakGroup& peak_group, double tol, int offset) const
{
if (excluded_masses_for_decoy_runs_.empty()) return false;
bool match = false;
double mass = offset * iso_da_distance_ + peak_group.getMonoMass();
double mass_da_tol = mass * tol;
auto upper
= std::upper_bound(excluded_masses_for_decoy_runs_.begin(), excluded_masses_for_decoy_runs_.end(), mass + mass_da_tol);
while (upper != excluded_masses_for_decoy_runs_.end())
{
double delta = mass - *upper;
if (std::abs(delta) < mass_da_tol)
{
match = true;
break;
}
if (delta > mass_da_tol) break;
if (upper == excluded_masses_for_decoy_runs_.begin()) { break; }
upper--;
}
return match;
}
void SpectralDeconvolution::scoreAndFilterPeakGroups_()
{
double tol = tolerance_[ms_level_ - 1];
auto selected = boost::dynamic_bitset<>(deconvolved_spectrum_.size());
double snr_threshold = min_snr_[ms_level_ - 1];
#pragma omp parallel for default(none) shared(tol, selected, snr_threshold, std::cout)
for (int i = 0; i < (int)deconvolved_spectrum_.size(); i++)
{
int offset = 0;
auto& peak_group = deconvolved_spectrum_[i];
peak_group.setTargetDecoyType(target_decoy_type_);
if (isPeakGroupInExcludedMassForDecoyRuns_(peak_group, tol)) { continue; }
int num_iteration = 10;
bool mass_determined = false;
double prev_cos = 0;
// isotope pattern matching and qscore update part. Isotope pattern matching is done multiple times until finding the maximum isotope cosine
for (int k = 0; k < num_iteration; k++)
{
auto noisy_peaks = peak_group.recruitAllPeaksInSpectrum(deconvolved_spectrum_.getOriginalSpectrum(), tol, avg_,
peak_group.getMonoMass() + offset * iso_da_distance_);
if (isPeakGroupInExcludedMassForDecoyRuns_(peak_group, tol)) { break; }
// min cosine is checked in here. mono mass is also updated one last time. SNR, per charge SNR, and avg errors are updated here.
const auto& [z1, z2] = peak_group.getAbsChargeRange();
offset = peak_group.updateQscore(noisy_peaks, avg_, min_isotope_cosine_[ms_level_ - 1], tol,
(z1 + z2) < 2 * low_charge_, excluded_masses_for_decoy_runs_, false);
if (prev_cos > peak_group.getIsotopeCosine() || offset == 0 || k >= num_iteration - 1) //
{
if (peak_group.getChargeSNR(peak_group.getRepAbsCharge()) < snr_threshold) // to speed up
break;
if (offset != 0) noisy_peaks = peak_group.recruitAllPeaksInSpectrum(deconvolved_spectrum_.getOriginalSpectrum(), tol, avg_,
peak_group.getMonoMass() + offset * iso_da_distance_);
peak_group.updateQscore(noisy_peaks, avg_, min_isotope_cosine_[ms_level_ - 1], tol,
(z1 + z2) < 2 * low_charge_, excluded_masses_for_decoy_runs_, true);
mass_determined = true;
break;
}
prev_cos = peak_group.getIsotopeCosine();
}
if (isPeakGroupInExcludedMassForDecoyRuns_(peak_group, tol))
{
continue;
}
if (! mass_determined || peak_group.empty() || peak_group.getQscore() <= 0 || peak_group.getMonoMass() < current_min_mass_
|| peak_group.getMonoMass() > current_max_mass_)
continue;
auto [z1, z2] = peak_group.getAbsChargeRange();
if (z1 > low_charge_ && (z2 - z1) < min_support_peak_count_) { continue; }
if (! target_mono_masses_.empty())
{
double delta = peak_group.getMonoMass() * tolerance_[ms_level_ - 1] * 2;
auto upper = std::upper_bound(target_mono_masses_.begin(), target_mono_masses_.end(), peak_group.getMonoMass() + delta);
while (! peak_group.isTargeted())
{
if (upper != target_mono_masses_.end())
{
if (std::abs(*upper - peak_group.getMonoMass()) < delta) { peak_group.setTargeted(); }
if (peak_group.getMonoMass() - *upper > delta) { break; }
}
if (upper == target_mono_masses_.begin()) { break; }
--upper;
}
}
#pragma omp critical
selected[i] = true;
}
Size selected_count = selected.count();
if (selected_count == 0)
{
deconvolved_spectrum_.clear();
return;
}
std::vector<PeakGroup> filtered_peak_groups;
filtered_peak_groups.reserve(selected_count);
Size index = selected.find_first();
while (index != selected.npos)
{
if (filtered_peak_groups.empty() || filtered_peak_groups.back().getMonoMass() < deconvolved_spectrum_[index].getMonoMass())
filtered_peak_groups.push_back(deconvolved_spectrum_[index]);
index = selected.find_next(index);
}
if (target_decoy_type_ == PeakGroup::noise_decoy)
{
for (const auto& pg2 : *target_dspec_for_decoy_calculation_)
{
filtered_peak_groups.push_back(pg2);
}
}
deconvolved_spectrum_.setPeakGroups(filtered_peak_groups);
deconvolved_spectrum_.sort();
removeChargeErrorPeakGroups_(deconvolved_spectrum_, target_decoy_type_);
// final harmonics removal
selected = boost::dynamic_bitset<>(deconvolved_spectrum_.size());
filtered_peak_groups = std::vector<PeakGroup>();
#pragma omp parallel for default(none) shared(tol, selected, harmonic_charges_)
for (int i = 0; i < (int)deconvolved_spectrum_.size(); i++)
{
const auto& peak_group = deconvolved_spectrum_[i];
bool pass = true;
const auto& [z1, z2] = peak_group.getAbsChargeRange();
for (auto hz : harmonic_charges_)
{
PeakGroup pg(std::min(current_max_charge_, z1 * hz), std::min(current_max_charge_, z2 * hz), is_positive_);
pg.setIsotopeDaDistance(iso_da_distance_);
const auto& [z1_, z2_] = pg.getAbsChargeRange();
if (z2 - z1 > z2_ - z1_) break; // if harmonic charges are too high stop
pg.setMonoisotopicMass(peak_group.getMonoMass() * hz);
auto nps = pg.recruitAllPeaksInSpectrum(deconvolved_spectrum_.getOriginalSpectrum(), tol, avg_, pg.getMonoMass());
pg.updateQscore(nps, avg_, min_isotope_cosine_[ms_level_ - 1], tol,
(z1 + z2) * hz < 2 * low_charge_, excluded_masses_for_decoy_runs_, true);
if (pg.getQscore() > 0 && pg.getSNR() > peak_group.getSNR())
{
pass = false;
break;
}
}
if (! pass) continue;
#pragma omp critical
selected[i] = true;
}
selected_count = selected.count();
if (selected_count == 0)
{
deconvolved_spectrum_.clear();
return;
}
filtered_peak_groups.reserve(selected_count);
index = selected.find_first();
while (index != selected.npos)
{
const auto& peak_group = deconvolved_spectrum_[index];
if (filtered_peak_groups.empty() || filtered_peak_groups.back().getMonoMass() < peak_group.getMonoMass())
filtered_peak_groups.push_back(peak_group);
index = selected.find_next(index);
}
deconvolved_spectrum_.setPeakGroups(filtered_peak_groups);
deconvolved_spectrum_.sort();
removeOverlappingPeakGroups_(deconvolved_spectrum_, tol * 1.2, target_decoy_type_);
removeExcludedMasses_(deconvolved_spectrum_, excluded_masses_, tol);
/// test
filtered_peak_groups.clear();
filtered_peak_groups.reserve(deconvolved_spectrum_.size());
std::set<Size> indices;
if (target_decoy_type_ == PeakGroup::signal_decoy)
{
excluded_masses_for_decoy_runs_.clear();
for (const auto& pg : *target_dspec_for_decoy_calculation_) // pg are the target peak groups from normal spectrum deconvolution
{
for (int j = -allowed_iso_error_; j <= allowed_iso_error_; j++)
{
excluded_masses_for_decoy_runs_.push_back(pg.getMonoMass() + j * iso_da_distance_);
}
}
for (Size k = 0; k < deconvolved_spectrum_.size(); k++)
{
auto& pg = deconvolved_spectrum_[k];
if (isPeakGroupInExcludedMassForDecoyRuns_(pg, tol)) continue;
bool pass = true;
for (const auto & pg2 : *target_dspec_for_decoy_calculation_)
{
if (std::abs(pg.getMonoMass() - pg2.getMonoMass()) < (3 + allowed_iso_error_) * iso_da_distance_ + .1) // if they are close enough
{
if (std::abs(pg2.getIsotopeCosine() - pg.getIsotopeCosine()) > .005 * (allowed_iso_error_ + 1))
{
pass = false;
break;
}
}
}
if (!pass) continue;// || (is_isotope_error && qs == pg.getQscore())
indices.insert(k);
}
}
for (Size k = 0; k < deconvolved_spectrum_.size(); k++)
{
auto& pg = deconvolved_spectrum_[k];
if (!indices.empty() && (indices.find(k) == indices.end()))
{
continue;
}
if (pg.getQscore() >= min_qscore_ && pg.getChargeSNR(pg.getRepAbsCharge()) > snr_threshold)
{
filtered_peak_groups.push_back(pg);
}
}
deconvolved_spectrum_.setPeakGroups(filtered_peak_groups);
deconvolved_spectrum_.sort();
}
float SpectralDeconvolution::getIsotopeCosineAndIsoOffset(double mono_mass,
const std::vector<float>& per_isotope_intensities,
int& offset,
const PrecalculatedAveragine& avg,
const int iso_int_shift,
const int window_width,
const std::vector<double>& excluded_masses)
{
offset = 0;
if ((int)per_isotope_intensities.size() < min_iso_size + iso_int_shift) { return .0; }
auto iso = avg.get(mono_mass);
int right = (int)avg.getApexIndex(mono_mass) / 4 + 1;
int left = right;
right += iso_int_shift;
left -= iso_int_shift;
float max_cos = -1000;
int max_isotope_index = (int)per_isotope_intensities.size(); // exclusive
int min_isotope_index = -1; // inclusive
for (int i = 0; i < max_isotope_index; i++)
{
if (per_isotope_intensities[i] <= 0) { continue; }
if (min_isotope_index < 0) { min_isotope_index = i; }
}
if (max_isotope_index - min_isotope_index < min_iso_size) { return .0; }
std::vector<std::pair<int, float>> offset_cos;
offset_cos.reserve(right + left + 1);
for (int tmp_offset = -left; tmp_offset <= right; tmp_offset++)
{
if (window_width >= 0 && abs(tmp_offset - iso_int_shift) > window_width)
continue;
if (!excluded_masses.empty())
{
bool exclude = false;
for (auto em : excluded_masses)
{
if ( std::abs(mono_mass + (tmp_offset - iso_int_shift)* Constants::ISOTOPE_MASSDIFF_55K_U - em) < mono_mass * 1e-5) // tmp
{
exclude = true;
break;
}
}
if (exclude) continue;
}
float tmp_cos = getCosine(per_isotope_intensities, min_isotope_index, max_isotope_index, iso, tmp_offset, min_iso_size);
offset_cos.emplace_back(tmp_offset, tmp_cos);
}
if (offset_cos.empty()) return max_cos;
std::sort(offset_cos.begin(), offset_cos.end(),
[](const std::pair<int, float>& p1, const std::pair<int, float>& p2) { return p1.second > p2.second; });
for (const auto& [o, c] : offset_cos)
{
if (o > right || o < -left) continue;
if (window_width >= 0 && abs(o - iso_int_shift) > window_width) //
continue;
offset = o;
max_cos = c;
break;
}
max_cos = std::max(max_cos, .0f);
offset -= iso_int_shift;
return max_cos;
}
float SpectralDeconvolution::getCosine(const std::vector<float>& a, int a_start, int a_end, const IsotopeDistribution& b, int offset, int min_iso_len)
{
float n = .0, a_norm = .0, b_norm = 1.0f;
a_start = std::max(0, a_start);
a_end = std::min((int)a.size(), a_end);
if (a_end - a_start < min_iso_len) { return 0; }
float max_intensity = 0;
for (int j = a_start; j < a_end; j++)
{
int i = j - offset;
a_norm += a[j] * a[j];
if (max_intensity < a[j]) { max_intensity = a[j]; }
if (i >= (int)b.size() || i < 0 || b[i].getIntensity() <= 0) { continue; }
else { n += a[j] * b[i].getIntensity(); }
}
if (a_norm <= 0) { return 0; }
return n / sqrt(a_norm * b_norm);
}
void SpectralDeconvolution::removeChargeErrorPeakGroups_(DeconvolvedSpectrum& dspec, const PeakGroup::TargetDecoyType& target_decoy_type) const
{
std::map<double, std::set<int>> peak_to_pgs;
std::map<double, float> mz_to_intensities;
std::vector<PeakGroup> filtered_pg_vec;
filtered_pg_vec.reserve(dspec.size());
std::vector<float> overlap_intensity(dspec.size(), .0f);
for (Size i = 0; i < dspec.size(); i++)
{
const auto& pg = dspec[i];
for (auto& p : pg)
{
peak_to_pgs[p.mz].insert((int)i);
mz_to_intensities[p.mz] = p.intensity;
}
}
for (const auto& e : peak_to_pgs)
{
const auto& pg_is = e.second;
double pmz = e.first;
float pint = mz_to_intensities[pmz];
if (pg_is.size() == 1) { continue; }
for (auto i : pg_is)
{
bool is_overlap = false;
double mass1 = dspec[i].getMonoMass();
int repz1 = (int)round(mass1 / (pmz - FLASHHelperClasses::getChargeMass(is_positive_)));
const double high_snr = 10;
if (dspec[i].getSNR() > high_snr && dspec[i].getChargeSNR(repz1) > high_snr) // if signal is almost perfect, skip the filtration
continue;
for (auto j : pg_is)
{
if (i == j) { continue; }
double mass2 = dspec[j].getMonoMass();
int repz2 = (int)round(mass2 / (pmz - FLASHHelperClasses::getChargeMass(is_positive_)));
if (repz1 == repz2) continue;
if (dspec[i].getChargeSNR(repz1) > dspec[j].getChargeSNR(repz2)) { continue; }
is_overlap = true;
break;
}
if (is_overlap) overlap_intensity[i] += pint;
}
}
for (Size i = 0; i < dspec.size(); i++)
{
if (dspec[i].getTargetDecoyType() != target_decoy_type) { continue; }
if ((ms_level_ == 1 && dspec[i].getRepAbsCharge() < min_abs_charge_) || dspec[i].getRepAbsCharge() > max_abs_charge_) { continue; }
const double mul_factor = .5;
if (! dspec[i].isTargeted() && // z1 != z2 &&
overlap_intensity[i] >= dspec[i].getIntensity() * mul_factor) // If the overlapped intensity takes more than mul_factor * total intensity then
// it is a peakgroup with a charge error. the smaller, the harsher
{
continue;
}
filtered_pg_vec.push_back(dspec[i]);
}
dspec.setPeakGroups(filtered_pg_vec);
}
void SpectralDeconvolution::removeOverlappingPeakGroups_(DeconvolvedSpectrum& dspec, double tol, PeakGroup::TargetDecoyType target_decoy_type)
{
if (dspec.empty()) { return; }
// dspec.sort();
std::vector<PeakGroup> filtered_pg_vec; //
filtered_pg_vec.reserve(dspec.size());
double start_mass = dspec[0].getMonoMass();
float local_max_SNR = -1.0f;
Size local_max_index = 0;
Size last_local_max_index = dspec.size();
for (Size i = 0; i < dspec.size(); i++)
{
double mass = dspec[i].getMonoMass();
if (mass - start_mass > mass * tol)
{
if (! dspec[local_max_index].isTargeted()) // targeted ones were already push_backed.
{
if (dspec[local_max_index].getTargetDecoyType() == target_decoy_type
&& last_local_max_index != local_max_index)
filtered_pg_vec.push_back(dspec[local_max_index]);
}
last_local_max_index = local_max_index;
start_mass = mass;
local_max_SNR = -1.0f;
}
float snr = dspec[i].getSNR();
if (local_max_SNR < snr)
{
local_max_SNR = snr;
local_max_index = i;
}
if (dspec[i].isTargeted())
{
if (dspec[i].getTargetDecoyType() == target_decoy_type)
filtered_pg_vec.push_back(dspec[i]);
}
}
if (local_max_SNR >= 0)
{
if (! dspec[local_max_index].isTargeted()) // targeted ones were already push_backed.
{
if (dspec[local_max_index].getTargetDecoyType() == target_decoy_type
&& last_local_max_index != local_max_index)
filtered_pg_vec.push_back(dspec[local_max_index]);
}
}
dspec.setPeakGroups(filtered_pg_vec);
std::vector<PeakGroup>().swap(filtered_pg_vec);
}
void SpectralDeconvolution::removeExcludedMasses_(DeconvolvedSpectrum& dspec, std::vector<double> excluded_masses, double tol) const
{
if (excluded_masses.empty()) return;
std::vector<PeakGroup> filtered_pg_vec; //
filtered_pg_vec.reserve(dspec.size());
// spec.sort();
for (const auto& peak_group : dspec)
{
if (peak_group.getTargetDecoyType() != target_decoy_type_) continue;
double delta = peak_group.getMonoMass() * tol;
auto upper = std::upper_bound(excluded_masses.begin(), excluded_masses.end(), peak_group.getMonoMass() + delta);
bool exclude = false;
while (! exclude)
{
if (upper != excluded_masses.end())
{
if (std::abs(*upper - peak_group.getMonoMass()) < delta) { exclude = true; }
if (peak_group.getMonoMass() - *upper > delta) { break; }
}
if (upper == excluded_masses.begin()) { break; }
--upper;
}
if (exclude) { continue; }
filtered_pg_vec.push_back(peak_group);
}
dspec.setPeakGroups(filtered_pg_vec);
std::vector<PeakGroup>().swap(filtered_pg_vec);
}
void SpectralDeconvolution::setTargetMasses(const std::vector<double>& masses, bool excluded)
{
if (excluded)
{
excluded_masses_.clear();
excluded_masses_.reserve(masses.size() * 30);
}
else
{
target_mono_masses_.clear();
target_mono_masses_.reserve(masses.size() * 3);
}
for (const auto& m : masses)
{
int start = 0;
int end = 0;
if (excluded) { end = (int)(avg_.getApexIndex(m) + avg_.getRightCountFromApex(m)); }
for (int j = start; j <= end + 1; j++)
{
if (excluded) excluded_masses_.push_back(m + iso_da_distance_ * j);
else
target_mono_masses_.push_back(m + iso_da_distance_ * j);
}
}
}
void SpectralDeconvolution::setAveragine(const SpectralDeconvolution::PrecalculatedAveragine& avg)
{
avg_ = avg;
}
double SpectralDeconvolution::getMassFromMassBin_(Size mass_bin, double bin_mul_factor) const
{
return exp(getBinValue_(mass_bin, mass_bin_min_value_, bin_mul_factor));
}
double SpectralDeconvolution::getMzFromMzBin_(Size mass_bin, double bin_mul_factor) const
{
return exp(getBinValue_(mass_bin, mz_bin_min_value_, bin_mul_factor));
}
} // namespace OpenMS | C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/TOPDOWN/DeconvolvedSpectrum.cpp | .cpp | 8,986 | 340 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Kyowon Jeong, Jihyung Kim $
// $Authors: Kyowon Jeong, Jihyung Kim $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/TOPDOWN/DeconvolvedSpectrum.h>
namespace OpenMS
{
DeconvolvedSpectrum::DeconvolvedSpectrum(const int scan_number) : scan_number_(scan_number)
{
}
bool DeconvolvedSpectrum::operator<(const DeconvolvedSpectrum& a) const
{
return this->scan_number_ < a.scan_number_;
}
bool DeconvolvedSpectrum::operator>(const DeconvolvedSpectrum& a) const
{
return this->scan_number_ > a.scan_number_;
}
bool DeconvolvedSpectrum::operator==(const DeconvolvedSpectrum& a) const
{
return this->scan_number_ == a.scan_number_;
}
MSSpectrum DeconvolvedSpectrum::toSpectrum(const int to_charge, double tol, bool retain_undeconvolved)
{
auto out_spec = MSSpectrum(spec_);
out_spec.clear(false);
double charge_mass_offset = (double)abs(to_charge) * FLASHHelperClasses::getChargeMass(to_charge >= 0);
std::unordered_set<double> deconvolved_mzs;
std::stringstream val {};
val << "tol=" << std::to_string(tol) << ";massoffset=" << std::to_string(charge_mass_offset) << ";chargemass=" << std::to_string(FLASHHelperClasses::getChargeMass(peak_groups_[0].isPositive()));
if (!precursor_peak_group_.empty())
{
val << ";precursorscan=" << precursor_scan_number_ << ";precursormass=" << std::to_string(precursor_peak_group_.getMonoMass())
<< ";precursorQscore=" << std::to_string(precursor_peak_group_.getQscore2D()) << ";precursorSNR=" << std::to_string(precursor_peak_group_.getChargeSNR(precursor_peak_.getCharge())) ;
}
else
{
val << ";precursorscan=0;precursormass=0;precursorQscore=0;precursorSNR=0";
}
val << ";peaks=";
out_spec.reserve(size());
for (auto& pg : *this)
{
if (pg.empty() || pg.getTargetDecoyType() != PeakGroup::TargetDecoyType::target)
{
continue;
}
out_spec.emplace_back(pg.getMonoMass() + charge_mass_offset, pg.getIntensity());
auto [z1, z2] = pg.getAbsChargeRange();
int min_iso = -1, max_iso = 0;
for (auto& p : pg)
{
min_iso = min_iso < 0 ? p.isotopeIndex : std::min(min_iso, p.isotopeIndex);
max_iso = std::max(max_iso, p.isotopeIndex);
}
val << z1 << ":" << z2 << "," << min_iso << ":" << max_iso << ";";
if (retain_undeconvolved)
{
for (auto& p : pg)
{
deconvolved_mzs.insert(p.mz);
}
}
}
val << "cos=";
for (auto& pg : *this)
{
if (pg.empty() || pg.getTargetDecoyType() != PeakGroup::TargetDecoyType::target)
{
continue;
}
val << pg.getIsotopeCosine() << ",";
}
val << ";snr=";
for (auto& pg : *this)
{
if (pg.empty() || pg.getTargetDecoyType() != PeakGroup::TargetDecoyType::target)
{
continue;
}
val << pg.getSNR() << ",";
}
val << ";qscore=";
for (auto& pg : *this)
{
if (pg.empty() || pg.getTargetDecoyType() != PeakGroup::TargetDecoyType::target)
{
continue;
}
val << pg.getQscore2D() << ",";
}
val << ";qvalue=";
for (auto& pg : *this)
{
if (pg.empty() || pg.getTargetDecoyType() != PeakGroup::TargetDecoyType::target)
{
continue;
}
val << pg.getQvalue() << ",";
}
out_spec.setMetaValue("DeconvMassInfo", val.str());
if (retain_undeconvolved)
{
for (const auto& p : spec_)
{
if (deconvolved_mzs.find(p.getMZ()) != deconvolved_mzs.end()) // if p is deconvolved
{
continue;
}
out_spec.emplace_back(p.getMZ() + charge_mass_offset - FLASHHelperClasses::getChargeMass(to_charge >= 0), p.getIntensity());
}
}
out_spec.sortByPosition();
if (!precursor_peak_group_.empty())
{
Precursor precursor(precursor_peak_);
precursor.setCharge(to_charge);
precursor.setMZ(precursor_peak_group_.getMonoMass() + charge_mass_offset);
precursor.setIntensity(precursor_peak_group_.getIntensity());
out_spec.getPrecursors().clear();
out_spec.getPrecursors().emplace_back(precursor);
}
return out_spec;
}
const MSSpectrum& DeconvolvedSpectrum::getOriginalSpectrum() const
{
return spec_;
}
const PeakGroup& DeconvolvedSpectrum::getPrecursorPeakGroup() const
{
return precursor_peak_group_;
}
int DeconvolvedSpectrum::getPrecursorCharge() const
{
return precursor_peak_.getCharge();
}
double DeconvolvedSpectrum::getCurrentMaxMass(const double max_mass) const
{
if (spec_.getMSLevel() == 1 || precursor_peak_group_.empty())
{
return max_mass;
}
return std::min(max_mass, precursor_peak_group_.getMonoMass());
}
double DeconvolvedSpectrum::getCurrentMinMass(const double min_mass) const
{
if (spec_.getMSLevel() == 1)
{
return min_mass;
}
return 50.0;
}
int DeconvolvedSpectrum::getCurrentMaxAbsCharge(const int max_abs_charge) const
{
if (spec_.getMSLevel() == 1 || precursor_peak_group_.empty())
{
return max_abs_charge;
}
return std::min(max_abs_charge, abs(precursor_peak_group_.getRepAbsCharge()));
}
const Precursor& DeconvolvedSpectrum::getPrecursor() const
{
return precursor_peak_;
}
int DeconvolvedSpectrum::getScanNumber() const
{
return scan_number_;
}
int DeconvolvedSpectrum::getPrecursorScanNumber() const
{
return precursor_scan_number_;
}
const Precursor::ActivationMethod& DeconvolvedSpectrum::getActivationMethod() const
{
return activation_method_;
}
void DeconvolvedSpectrum::setPrecursor(const Precursor& precursor)
{
precursor_peak_ = precursor;
}
void DeconvolvedSpectrum::setActivationMethod(const Precursor::ActivationMethod& method)
{
activation_method_ = method;
}
void DeconvolvedSpectrum::setPrecursorPeakGroup(const PeakGroup& pg)
{
precursor_peak_group_ = pg;
}
void DeconvolvedSpectrum::setOriginalSpectrum(const MSSpectrum& spec)
{
spec_ = spec;
}
void DeconvolvedSpectrum::setPrecursorScanNumber(const int scan_number)
{
precursor_scan_number_ = scan_number;
}
std::vector<PeakGroup>::const_iterator DeconvolvedSpectrum::begin() const noexcept
{
return peak_groups_.begin();
}
std::vector<PeakGroup>::const_iterator DeconvolvedSpectrum::end() const noexcept
{
return peak_groups_.end();
}
std::vector<PeakGroup>::iterator DeconvolvedSpectrum::begin() noexcept
{
return peak_groups_.begin();
}
std::vector<PeakGroup>::iterator DeconvolvedSpectrum::end() noexcept
{
return peak_groups_.end();
}
const PeakGroup& DeconvolvedSpectrum::operator[](const Size i) const
{
return peak_groups_[i];
}
PeakGroup& DeconvolvedSpectrum::operator[](const Size i)
{
return peak_groups_[i];
}
void DeconvolvedSpectrum::push_back(const PeakGroup& pg)
{
peak_groups_.push_back(pg);
}
void DeconvolvedSpectrum::emplace_back(const PeakGroup& pg)
{
peak_groups_.emplace_back(pg);
}
void DeconvolvedSpectrum::pop_back()
{
peak_groups_.pop_back();
}
PeakGroup& DeconvolvedSpectrum::back()
{
return peak_groups_.back();
}
Size DeconvolvedSpectrum::size() const noexcept
{
return peak_groups_.size();
}
void DeconvolvedSpectrum::clear()
{
std::vector<PeakGroup>().swap(peak_groups_);
}
void DeconvolvedSpectrum::reserve(Size n)
{
peak_groups_.reserve(n);
}
bool DeconvolvedSpectrum::empty() const
{
return peak_groups_.empty();
}
bool DeconvolvedSpectrum::isDecoy() const
{
if (empty())
return false;
//if (peak_groups_[0].getTargetDecoyType() != PeakGroup::TargetDecoyType::target)
// return true;
if (!precursor_peak_group_.empty() && precursor_peak_group_.getTargetDecoyType() != PeakGroup::TargetDecoyType::target)
return true;
return false;
}
FLASHHelperClasses::IsobaricQuantities DeconvolvedSpectrum::getQuantities() const
{
return quantities_;
}
void DeconvolvedSpectrum::setQuantities(const FLASHHelperClasses::IsobaricQuantities& quantities)
{
quantities_ = quantities;
}
void DeconvolvedSpectrum::setPeakGroups(std::vector<PeakGroup>& x)
{
std::vector<PeakGroup>().swap(peak_groups_);
peak_groups_ = x;
}
void DeconvolvedSpectrum::sort()
{
std::sort(peak_groups_.begin(), peak_groups_.end());
}
void DeconvolvedSpectrum::sortByQscore()
{
std::sort(peak_groups_.begin(), peak_groups_.end(), [](const PeakGroup& p1, const PeakGroup& p2) { return p1.getQscore2D() > p2.getQscore2D(); });
}
} // namespace OpenMS | C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/TOPDOWN/MassFeatureTrace.cpp | .cpp | 9,077 | 233 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Kyowon Jeong, Jihyung Kim $
// $Authors: Kyowon Jeong, Jihyung Kim $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/TOPDOWN/MassFeatureTrace.h>
#include <OpenMS/ANALYSIS/TOPDOWN/SpectralDeconvolution.h>
namespace OpenMS
{
MassFeatureTrace::MassFeatureTrace() : DefaultParamHandler("MassFeatureTrace")
{
Param mtd_defaults = MassTraceDetection().getDefaults();
mtd_defaults.setValue("min_sample_rate", .1, "Minimum fraction of scans along the feature trace that must contain a peak. To raise feature detection sensitivity, lower this value close to 0.");
mtd_defaults.setValue(
"min_trace_length", 10.0,
"Minimum expected length of a mass trace (in seconds). Only for MS1 (or minimum MS level in the dataset) feature tracing. For MSn, all traces are kept regardless of this value.");
mtd_defaults.setValue("chrom_peak_snr", .0);
mtd_defaults.addTag("chrom_peak_snr", "advanced");
mtd_defaults.setValue("reestimate_mt_sd", "false");
mtd_defaults.addTag("reestimate_mt_sd", "advanced");
mtd_defaults.setValue("noise_threshold_int", .0);
mtd_defaults.addTag("noise_threshold_int", "advanced");
mtd_defaults.setValue("quant_method", "area");
mtd_defaults.addTag("quant_method", "advanced"); // hide entry
defaults_.insert("", mtd_defaults);
defaults_.setValue("min_cos", .75, "Cosine similarity threshold between avg. and observed isotope pattern.");
defaultsToParam_();
}
std::vector<FLASHHelperClasses::MassFeature> MassFeatureTrace::findFeaturesAndUpdateQscore2D(const PrecalculatedAveragine& averagine, std::vector<DeconvolvedSpectrum>& deconvolved_spectra,
int ms_level, bool is_decoy)
{
static uint findex = 1;
MSExperiment map;
std::map<int, MSSpectrum> index_spec_map;
int min_abs_charge = INT_MAX;
int max_abs_charge = INT_MIN;
bool is_positive = true;
std::vector<FLASHHelperClasses::MassFeature> mass_features;
std::map<double, Size> rt_index_map;
std::map<int, int> prev_scans;
int prev_scan = 0;
for (Size i = 0; i < deconvolved_spectra.size(); i++)
{
auto deconvolved_spectrum = deconvolved_spectra[i];
if (deconvolved_spectrum.empty())
continue;
if ((int)deconvolved_spectrum.getOriginalSpectrum().getMSLevel() != ms_level)
continue;
int scan = deconvolved_spectrum.getScanNumber();
if (scan > prev_scan)
prev_scans[scan] = prev_scan;
prev_scan = scan;
double rt = deconvolved_spectrum.getOriginalSpectrum().getRT();
rt_index_map[rt] = i;
MSSpectrum deconv_spec;
deconv_spec.setRT(rt);
for (auto& pg : deconvolved_spectrum)
{
if (is_decoy && pg.getTargetDecoyType() == PeakGroup::TargetDecoyType::target) continue;
if (!is_decoy && pg.getTargetDecoyType() != PeakGroup::TargetDecoyType::target) continue;
is_positive = pg.isPositive();
auto [z1, z2] = pg.getAbsChargeRange();
max_abs_charge = max_abs_charge > z2 ? max_abs_charge : z2;
min_abs_charge = min_abs_charge < z1 ? min_abs_charge : z1;
Peak1D tp(pg.getMonoMass(), (float)pg.getIntensity());
deconv_spec.push_back(tp);
}
map.addSpectrum(deconv_spec);
}
map.sortSpectra();
// when map size is less than 3, MassTraceDetection aborts - too few spectra for mass tracing.
if (map.size() < 3)
{
return mass_features;
}
MassTraceDetection mtdet;
Param mtd_param = getParameters().copy("");
double cos_threshold = mtd_param.getValue("min_cos");
mtd_param.remove("min_cos");
mtdet.setParameters(mtd_param);
std::vector<MassTrace> m_traces;
mtdet.setLogType(ProgressLogger::NONE);
mtdet.run(map, m_traces); // m_traces : output of this function
int charge_range = max_abs_charge - min_abs_charge + 1;
for (auto& mt : m_traces)
{
double qscore_2D = 1.0;
double tmp_qscore_2D = 1.0;
int min_feature_abs_charge = INT_MAX; // min feature charge
int max_feature_abs_charge = INT_MIN; // max feature charge
int min_scan_number = INT_MAX; // min feature charge
int max_scan_number = INT_MIN; // max feature charge
auto per_isotope_intensity = std::vector<float>(averagine.getMaxIsotopeIndex(), .0f);
auto per_charge_intensity = std::vector<float>(charge_range + min_abs_charge + 1, .0f);
double mass = mt.getCentroidMZ();
boost::dynamic_bitset<> charges(charge_range + 1);
std::vector<std::vector<PeakGroup>::iterator> pgs;
pgs.reserve(mt.getSize());
std::vector<double> qscores;
prev_scan = 0;
for (auto& p2 : mt)
{
auto& dspec = deconvolved_spectra[rt_index_map[p2.getRT()]];
if (dspec.empty())
continue;
PeakGroup comp;
comp.setMonoisotopicMass(p2.getMZ() - 1e-7);
auto pg = std::lower_bound(dspec.begin(), dspec.end(), comp);
if (pg == dspec.end() || std::abs(pg->getMonoMass() - p2.getMZ()) > 1e-7)
continue;
if (is_decoy && pg->getTargetDecoyType() == PeakGroup::TargetDecoyType::target) continue;
if (!is_decoy && pg->getTargetDecoyType() != PeakGroup::TargetDecoyType::target) continue;
auto [z1, z2] = pg->getAbsChargeRange();
min_feature_abs_charge = min_feature_abs_charge < z1 ? min_feature_abs_charge : z1;
max_feature_abs_charge = max_feature_abs_charge > z2 ? max_feature_abs_charge : z2;
int scan = dspec.getScanNumber();
min_scan_number = std::min(min_scan_number, scan);
max_scan_number = std::max(max_scan_number, scan);
if (prev_scan != 0 && (prev_scans[scan] <= prev_scan)) // only when consecutive scans are connected.
{
tmp_qscore_2D *= (1.0 - pg->getQscore());
}
else
{
tmp_qscore_2D = 1.0 - pg->getQscore();
}
qscore_2D = std::min(qscore_2D, tmp_qscore_2D);
prev_scan = scan;
pgs.push_back(pg);
}
qscore_2D = 1.0 - qscore_2D;
for (auto& pg : pgs)
{
for (size_t z = min_abs_charge; z < per_charge_intensity.size(); z++)
{
float zint = pg->getChargeIntensity((int)z);
if (zint <= 0)
{
continue;
}
charges[z - min_abs_charge] = true;
per_charge_intensity[z] += zint;
}
int iso_off = (int)round((pg->getMonoMass() - mass) / pg->getIsotopeDaDistance());
auto iso_int = pg->getIsotopeIntensities();
for (int i = 0; i + iso_off < (int)per_isotope_intensity.size(); i++)
{
if ((int)i + iso_off < 0 || i >= (int)iso_int.size())
{
continue;
}
per_isotope_intensity[i + iso_off] += iso_int[i];
}
}
int offset = 0;
float isotope_score = SpectralDeconvolution::getIsotopeCosineAndIsoOffset(mass, per_isotope_intensity, offset, averagine, 0, 0, std::vector<double>{});
if (isotope_score < cos_threshold)
{
continue;
}
double max_int = 0;
PeakGroup rep_pg = *pgs[0];
for (auto& pg : pgs)
{
if (max_int <= pg->getIntensity())
{
rep_pg = *pg;
max_int = pg->getIntensity();
}
pg->setFeatureIndex(findex);
if (findex > 0)
pg->setQscore2D(qscore_2D);
}
FLASHHelperClasses::MassFeature mass_feature;
mass_feature.iso_offset = offset;
mass += offset * Constants::ISOTOPE_MASSDIFF_55K_U;
mass_feature.avg_mass = averagine.getAverageMassDelta(mass) + mass;
mass_feature.mt = mt;
mass_feature.charge_count = (int)charges.count();
mass_feature.isotope_score = isotope_score;
mass_feature.min_charge = (is_positive ? min_feature_abs_charge : -max_feature_abs_charge);
mass_feature.max_charge = (is_positive ? max_feature_abs_charge : -min_feature_abs_charge);
mass_feature.qscore = qscore_2D;
mass_feature.per_charge_intensity = per_charge_intensity;
mass_feature.per_isotope_intensity = per_isotope_intensity;
mass_feature.rep_mz = mass_feature.avg_mass / rep_pg.getRepAbsCharge();
mass_feature.scan_number = rep_pg.getScanNumber();
mass_feature.min_scan_number = min_scan_number;
mass_feature.max_scan_number = max_scan_number;
mass_feature.rep_charge = rep_pg.getRepAbsCharge();
mass_feature.index = findex;
mass_feature.is_decoy = is_decoy;
mass_feature.ms_level = ms_level;
mass_features.push_back(mass_feature);
findex++;
}
return mass_features;
}
void MassFeatureTrace::updateMembers_()
{
}
} // namespace OpenMS | C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/TOPDOWN/PeakGroupScoring.cpp | .cpp | 3,522 | 111 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Kyowon Jeong$
// $Authors: Kyowon Jeong$
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/TOPDOWN/FLASHHelperClasses.h>
#include <OpenMS/ANALYSIS/TOPDOWN/PeakGroup.h>
#include <OpenMS/ANALYSIS/TOPDOWN/PeakGroupScoring.h>
#include <iomanip>
namespace OpenMS
{
std::vector<double> PeakGroupScoring::weight_ { -21.0476, 1.5045, -0.1303, 0.183, 0.1834, 17.804};
// Att0 21.0476
// Att1 -1.5045
// Att2 0.1303
// Att3 -0.183
// Att4 -0.1834
// Intercept -17.804
/// calculate PeakGroupScoring using PeakGroup attributes
double PeakGroupScoring::getQscore(const PeakGroup* pg)
{
if (pg->empty())
{ // all zero
return .0;
}
double score = weight_.back() + .5;
auto fv = toFeatureVector_(pg);
for (Size i = 0; i < weight_.size() - 1; i++)
{
score += fv[i] * weight_[i];
}
double qscore = 1.0 / (1.0 + exp(score));
return qscore;
}
/// convert PeakGroup into feature (attribute) vector
std::vector<double> PeakGroupScoring::toFeatureVector_(const PeakGroup* pg)
{
std::vector<double> fvector(5, .0); // length of weights vector - 1, excluding the intercept weight.
if (pg->empty())
return fvector;
int index = 0;
fvector[index++] = pg->getIsotopeCosine(); // (log2(a + d));
fvector[index++] = pg->getIsotopeCosine() - pg->getChargeIsotopeCosine(pg->getRepAbsCharge()); // (log2(d + a / (d + a)));
fvector[index++] = log2(1 + pg->getChargeSNR(pg->getRepAbsCharge())); //(log2(d + a / (d + a)));
fvector[index++] = log2(1 + pg->getChargeSNR(pg->getRepAbsCharge())) - log2(1 + pg->getSNR()); //(log2(a + d));
fvector[index++] = pg->getAvgPPMError();
return fvector;
}
/// to write down training csv file header.
void PeakGroupScoring::writeAttCsvForQscoreTrainingHeader(std::fstream& f)
{
PeakGroup pg;
Size att_count = toFeatureVector_(&pg).size();
for (Size i = 0; i < att_count; i++)
f << "Att" << i << ",";
f << "Class\n";
}
/// to write down training csv file rows
void PeakGroupScoring::writeAttCsvForQscoreTraining(const DeconvolvedSpectrum& deconvolved_spectrum, std::fstream& f)
{
DeconvolvedSpectrum dspec;
dspec.reserve(deconvolved_spectrum.size());
for (auto& pg : deconvolved_spectrum)
{
dspec.push_back(pg);
}
if (dspec.empty())
return;
for (auto& pg : dspec)
{
bool target = pg.getTargetDecoyType() == PeakGroup::TargetDecoyType::target;
auto fv = toFeatureVector_(&pg);
for (auto& item : fv)
{
f << item << ",";
}
f << (target ? "T" : "F") << "\n";
}
}
double PeakGroupScoring::getDLscore(PeakGroup* pg, const MSSpectrum& spec, const FLASHHelperClasses::PrecalculatedAveragine& avg, double tol)
{
const auto& [sig, noise]
= pg->getDLVector(spec, charge_count_for_DL_scoring_, iso_count_for_DL_scoring_, avg, tol);
/// calculate score with sig and noise
return 0;
}
} // namespace OpenMS | C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/TARGETED/IncludeExcludeTarget.cpp | .cpp | 5,293 | 207 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Andreas Bertsch $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/TARGETED/IncludeExcludeTarget.h>
#include <utility>
namespace OpenMS
{
IncludeExcludeTarget::IncludeExcludeTarget() :
CVTermList(),
precursor_mz_(std::numeric_limits<double>::max()),
product_mz_(std::numeric_limits<double>::max())
{
}
IncludeExcludeTarget::IncludeExcludeTarget(const IncludeExcludeTarget & rhs) = default;
IncludeExcludeTarget::~IncludeExcludeTarget() = default;
IncludeExcludeTarget & IncludeExcludeTarget::operator=(const IncludeExcludeTarget & rhs)
{
if (&rhs != this)
{
CVTermList::operator=(rhs);
name_ = rhs.name_;
precursor_mz_ = rhs.precursor_mz_;
precursor_cv_terms_ = rhs.precursor_cv_terms_;
product_mz_ = rhs.product_mz_;
product_cv_terms_ = rhs.product_cv_terms_;
interpretation_list_ = rhs.interpretation_list_;
peptide_ref_ = rhs.peptide_ref_;
compound_ref_ = rhs.compound_ref_;
configurations_ = rhs.configurations_;
prediction_ = rhs.prediction_;
rts_ = rhs.rts_;
}
return *this;
}
bool IncludeExcludeTarget::operator==(const IncludeExcludeTarget & rhs) const
{
return CVTermList::operator==(rhs) &&
name_ == rhs.name_ &&
precursor_mz_ == rhs.precursor_mz_ &&
precursor_cv_terms_ == rhs.precursor_cv_terms_ &&
product_mz_ == rhs.product_mz_ &&
product_cv_terms_ == rhs.product_cv_terms_ &&
interpretation_list_ == rhs.interpretation_list_ &&
peptide_ref_ == rhs.peptide_ref_ &&
compound_ref_ == rhs.compound_ref_ &&
configurations_ == rhs.configurations_ &&
prediction_ == rhs.prediction_ &&
rts_ == rhs.rts_;
}
bool IncludeExcludeTarget::operator!=(const IncludeExcludeTarget & rhs) const
{
return !(*this == rhs);
}
void IncludeExcludeTarget::setName(const String & name)
{
name_ = name;
}
const String & IncludeExcludeTarget::getName() const
{
return name_;
}
void IncludeExcludeTarget::setPeptideRef(const String & peptide_ref)
{
peptide_ref_ = peptide_ref;
}
const String & IncludeExcludeTarget::getPeptideRef() const
{
return peptide_ref_;
}
void IncludeExcludeTarget::setCompoundRef(const String & compound_ref)
{
compound_ref_ = compound_ref;
}
const String & IncludeExcludeTarget::getCompoundRef() const
{
return compound_ref_;
}
void IncludeExcludeTarget::setPrecursorMZ(double mz)
{
precursor_mz_ = mz;
}
double IncludeExcludeTarget::getPrecursorMZ() const
{
return precursor_mz_;
}
void IncludeExcludeTarget::setPrecursorCVTermList(const CVTermList & list)
{
precursor_cv_terms_ = list;
}
void IncludeExcludeTarget::addPrecursorCVTerm(const CVTerm & cv_term)
{
precursor_cv_terms_.addCVTerm(cv_term);
}
const CVTermList & IncludeExcludeTarget::getPrecursorCVTermList() const
{
return precursor_cv_terms_;
}
void IncludeExcludeTarget::setProductMZ(double mz)
{
product_mz_ = mz;
}
double IncludeExcludeTarget::getProductMZ() const
{
return product_mz_;
}
void IncludeExcludeTarget::setProductCVTermList(const CVTermList & list)
{
product_cv_terms_ = list;
}
void IncludeExcludeTarget::addProductCVTerm(const CVTerm & cv_term)
{
product_cv_terms_.addCVTerm(cv_term);
}
const CVTermList & IncludeExcludeTarget::getProductCVTermList() const
{
return product_cv_terms_;
}
void IncludeExcludeTarget::setInterpretations(const std::vector<CVTermList> & interpretations)
{
interpretation_list_ = interpretations;
}
const std::vector<CVTermList> & IncludeExcludeTarget::getInterpretations() const
{
return interpretation_list_;
}
void IncludeExcludeTarget::addInterpretation(const CVTermList & interpretation)
{
interpretation_list_.push_back(interpretation);
}
void IncludeExcludeTarget::setConfigurations(const std::vector<Configuration> & configurations)
{
configurations_ = configurations;
}
const std::vector<IncludeExcludeTarget::Configuration> & IncludeExcludeTarget::getConfigurations() const
{
return configurations_;
}
void IncludeExcludeTarget::addConfiguration(const Configuration & configuration)
{
configurations_.push_back(configuration);
}
void IncludeExcludeTarget::setPrediction(const CVTermList & prediction)
{
prediction_ = prediction;
}
const CVTermList & IncludeExcludeTarget::getPrediction() const
{
return prediction_;
}
void IncludeExcludeTarget::addPredictionTerm(const CVTerm & term)
{
prediction_.addCVTerm(term);
}
void IncludeExcludeTarget::updateMembers_()
{
}
void IncludeExcludeTarget::setRetentionTime(IncludeExcludeTarget::RetentionTime rt)
{
rts_ = std::move(rt);
}
const IncludeExcludeTarget::RetentionTime & IncludeExcludeTarget::getRetentionTime() const
{
return rts_;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/TARGETED/MetaboTargetedTargetDecoy.cpp | .cpp | 11,646 | 242 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Oliver Alka $
// $Authors: Oliver Alka $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/TARGETED/MetaboTargetedTargetDecoy.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <iostream>
#include <regex>
namespace OpenMS
{
std::map<String, std::vector<OpenMS::ReactionMonitoringTransition> > MetaboTargetedTargetDecoy::constructTransitionsMap_(const TargetedExperiment& t_exp)
{
// mapping of the transitions to a specific compound reference
std::map<String, std::vector<OpenMS::ReactionMonitoringTransition> > TransitionsMap;
for (const auto& tr_it : t_exp.getTransitions())
{
auto pair_it_success = TransitionsMap.emplace(tr_it.getCompoundRef(), std::vector<OpenMS::ReactionMonitoringTransition>());
pair_it_success.first->second.push_back(tr_it);
}
return TransitionsMap;
}
std::vector<MetaboTargetedTargetDecoy::MetaboTargetDecoyMassMapping> MetaboTargetedTargetDecoy::constructTargetDecoyMassMapping(const TargetedExperiment& t_exp)
{
std::vector<String> identifier;
for (const auto &it : t_exp.getCompounds())
{
// only need to extract identifier from the targets, since targets and decoys have the same
if (it.getMetaValue("decoy") == DataValue(0))
{
identifier.emplace_back(it.getMetaValue("m_ids_id"));
}
}
std::vector<ReactionMonitoringTransition> rmts = t_exp.getTransitions();
std::vector<MetaboTargetedTargetDecoy::MetaboTargetDecoyMassMapping> mappings;
for (const auto& it : identifier)
{
MetaboTargetedTargetDecoy::MetaboTargetDecoyMassMapping mapping;
mapping.identifier = it;
auto it_target = rmts.begin();
while ((it_target = find_if(it_target,
rmts.end(),
[&it](ReactionMonitoringTransition &rts)
{
return rts.getMetaValue("m_ids_id") == it &&
rts.getDecoyTransitionType() == ReactionMonitoringTransition::TARGET;
})) != rmts.end())
{
mapping.target_product_masses.emplace_back(it_target->getProductMZ());
mapping.target_compound_ref = it_target->getCompoundRef();
++it_target;
}
auto it_decoy = rmts.begin();
while ((it_decoy = find_if(it_decoy,
rmts.end(),
[&it](ReactionMonitoringTransition &rts)
{
return rts.getMetaValue("m_ids_id") == it &&
rts.getDecoyTransitionType() == ReactionMonitoringTransition::DECOY;
})) != rmts.end())
{
mapping.decoy_product_masses.emplace_back(it_decoy->getProductMZ());
mapping.decoy_compound_ref = it_decoy->getCompoundRef();
++it_decoy;
}
mappings.emplace_back(mapping);
}
return mappings;
}
void MetaboTargetedTargetDecoy::resolveOverlappingTargetDecoyMassesByDecoyMassShift(TargetedExperiment& t_exp, std::vector<MetaboTargetedTargetDecoy::MetaboTargetDecoyMassMapping>& mappings, const double& mass_to_add, const double& mz_tol, const String& mz_tol_unit)
{
// Define a map to hold compound references and their corresponding sets of decoy m/z values.
std::map<String, std::set<double>> match_compound_refs_decoy_mz;
// Iterate over each mapping in the provided mappings list.
for (const auto& map : mappings) {
// Create a set to store m/z values that match the criterion.
std::set<double> matched;
// Iterate over each decoy m/z value in the current mapping.
for (double decoy_mz : map.decoy_product_masses) {
// Compare each decoy m/z value with each target m/z value.
for (double target_mz : map.target_product_masses) {
// Calculate the difference between decoy and target m/z values.
// The calculation differs based on whether the tolerance is in ppm or Da.
double difference = (mz_tol_unit == "ppm") ?
std::abs(decoy_mz - target_mz) / target_mz * 1e6 :
std::abs(decoy_mz - target_mz);
// If the difference is small than mz_tol, the masses are too similar.
if (difference <= mz_tol) {
// Add the decoy m/z to the matched set.
matched.insert(decoy_mz);
break; // Move to the next decoy m/z value after finding a match.
}
}
}
// Associate the set of matched decoy m/z values with the compound reference in the map.
match_compound_refs_decoy_mz[map.decoy_compound_ref] = std::move(matched);
}
// Prepare a new vector to store updated ReactionMonitoringTransition objects.
std::vector<ReactionMonitoringTransition> v_rmt_new;
v_rmt_new.reserve(t_exp.getTransitions().size()); // Reserve space to optimize memory allocation.
// Iterate over each transition in the experiment.
for (const auto& tr : t_exp.getTransitions()) {
// Look for the current transition's compound reference in the map.
auto found = match_compound_refs_decoy_mz.find(tr.getCompoundRef());
// Check if the compound reference is found and if the product m/z matches any in the set.
if (found != match_compound_refs_decoy_mz.end() && found->second.count(tr.getProductMZ()) > 0) {
// Create a new transition object based on the current transition.
ReactionMonitoringTransition new_tr = tr;
// Modify the product m/z of the new transition.
new_tr.setProductMZ(tr.getProductMZ() + mass_to_add);
// Add the updated transition to the new vector.
v_rmt_new.push_back(std::move(new_tr));
} else {
// If no match is found, add the original transition to the new vector.
v_rmt_new.push_back(tr);
}
}
// Update the experiment's transitions with the new vector of updated transitions.
t_exp.setTransitions(std::move(v_rmt_new));
}
void MetaboTargetedTargetDecoy::generateMissingDecoysByMassShift(TargetedExperiment& t_exp, std::vector<MetaboTargetedTargetDecoy::MetaboTargetDecoyMassMapping>& mappings, const double& mass_to_add)
{
// Add a decoy based on the target masses + mass_to_add (e.g. CH2) if fragmentation tree re-rooting was not possible
for (auto &it : mappings)
{
if (it.decoy_product_masses.size() != it.target_product_masses.size())
{
// decoy was not generated by passatutto
if (it.decoy_compound_ref.empty())
{
// add a potential decoy with the new decoy masses to the mapping
it.decoy_compound_ref = std::regex_replace(it.target_compound_ref, std::regex(R"(_\[)"), "_decoy_[");
std::transform(it.target_product_masses.begin(),
it.target_product_masses.end(),
std::back_inserter(it.decoy_product_masses),
[mass_to_add](double d) -> double { return d + mass_to_add; });
}
}
}
std::map<String, std::vector<OpenMS::ReactionMonitoringTransition> > TransitionsMap = MetaboTargetedTargetDecoy::constructTransitionsMap_(t_exp);
std::vector<TargetedExperiment::Compound> compounds;
std::vector<ReactionMonitoringTransition> transitions;
// look if compounds exists as target and decoy
// add it to the current TargetedExperiment
for (const auto &it : mappings)
{
const auto it_target = std::find_if(t_exp.getCompounds().begin(),
t_exp.getCompounds().end(),
[&it](const TargetedExperiment::Compound &comp)
{
return comp.id == it.target_compound_ref;
});
const auto it_decoy = std::find_if(t_exp.getCompounds().begin(),
t_exp.getCompounds().end(),
[&it](const TargetedExperiment::Compound &comp)
{
return comp.id == it.decoy_compound_ref;
});
// if targets and decoy exists add them to the new datastructure
if (it_target != t_exp.getCompounds().end())
{
compounds.emplace_back(*it_target);
if (TransitionsMap.find(it_target->id) != TransitionsMap.end())
{
transitions.insert(transitions.end(),
TransitionsMap[it_target->id].begin(),
TransitionsMap[it_target->id].end());
}
}
if (it_decoy != t_exp.getCompounds().end())
{
compounds.emplace_back(*it_decoy);
if (TransitionsMap.find(it_decoy->id) != TransitionsMap.end())
{
transitions.insert(transitions.end(),
TransitionsMap[it_decoy->id].begin(),
TransitionsMap[it_decoy->id].end());
}
}
else // decoy does not exists in TargetedExperimentCompound
{
// use the corresponding target compound to generate a new decoy
// and add the decoy transitions.
TargetedExperiment::Compound potential_decoy_compound = *it_target;
std::vector<ReactionMonitoringTransition> potential_decoy_transitions;
String current_compound_name = potential_decoy_compound.getMetaValue("CompoundName");
potential_decoy_compound.setMetaValue("CompoundName", String(current_compound_name + "_decoy"));
potential_decoy_compound.id = it.decoy_compound_ref;
potential_decoy_compound.setMetaValue("decoy", DataValue(1));
if (TransitionsMap.find(it_target->id) != TransitionsMap.end())
{
potential_decoy_transitions = TransitionsMap[it_target->id];
for (size_t i = 0; i < potential_decoy_transitions.size(); ++i)
{
potential_decoy_transitions[i]
.setNativeID(std::regex_replace(potential_decoy_transitions[i].getNativeID(),
std::regex(R"(_\[)"),
"_decoy_["));
potential_decoy_transitions[i]
.setDecoyTransitionType(ReactionMonitoringTransition::DecoyTransitionType::DECOY);
potential_decoy_transitions[i].setMetaValue("annotation", "NA");
potential_decoy_transitions[i].setProductMZ(it.decoy_product_masses[i]);
potential_decoy_transitions[i].setCompoundRef(it.decoy_compound_ref);
}
}
else
{
OPENMS_LOG_WARN << "Add_shift method failed: " << current_compound_name << "_decoy could not be generated." << std::endl;
}
compounds.emplace_back(potential_decoy_compound);
transitions.insert(transitions.end(), potential_decoy_transitions.begin(), potential_decoy_transitions.end());
}
}
t_exp.setCompounds(compounds);
t_exp.setTransitions(transitions);
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/TARGETED/MetaboTargetedAssay.cpp | .cpp | 38,327 | 936 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Oliver Alka $
// $Authors: Oliver Alka $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/TARGETED/MetaboTargetedAssay.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/FeatureGroupingAlgorithmQT.h>
#include <OpenMS/KERNEL/BinnedSpectrum.h>
#include <OpenMS/COMPARISON/BinnedSpectralContrastAngle.h>
#include <OpenMS/PROCESSING/SPECTRAMERGING/SpectraMerger.h>
#include <OpenMS/KERNEL/FeatureMap.h>
#include <OpenMS/KERNEL/ConsensusMap.h>
#include <OpenMS/MATH/MathFunctions.h>
#include <regex>
using namespace OpenMS;
using namespace std;
namespace OpenMS
{
// get charge from adduct in standard format [M+H]+ or [M+H]1+
// only for singly charged species
int MetaboTargetedAssay::getChargeFromAdduct_(const String& adduct)
{
int adduct_charge;
String adduct_suffix = adduct.suffix(']').trim();
// charge one
if (adduct_suffix == "+")
{
adduct_suffix = "1" + adduct_suffix;
}
else if (adduct_suffix == "-")
{
adduct_suffix = "1" + adduct_suffix;
}
else if (adduct_suffix != "1-" && adduct_suffix != "1+")
{
OPENMS_LOG_WARN << "The adduct had the suffix '" << adduct_suffix << "', but only singly positive or singly negative charged adducts are supported." << std::endl;
}
String sign = adduct.back();
adduct_suffix.resize(adduct_suffix.size()-1);
if (sign == "+")
{
adduct_charge = String(adduct_suffix).toInt();
}
else
{
adduct_charge = String(sign + adduct_suffix).toInt();
}
return adduct_charge;
}
bool MetaboTargetedAssay::intensityLess_(const Peak1D& a, const Peak1D& b)
{
return (a.getIntensity() < b.getIntensity());
}
void MetaboTargetedAssay::filterBasedOnTotalOccurrence_(std::vector<MetaboTargetedAssay>& mta, double total_occurrence_filter, size_t in_files_size)
{
if (in_files_size > 1 && !mta.empty())
{
double total_occurrence = double(mta.size())/double(in_files_size);
if (!(total_occurrence >= total_occurrence_filter))
{
mta.clear(); // return empty vector
}
}
}
void MetaboTargetedAssay::sortByPrecursorInt(std::vector<MetaboTargetedAssay>& vec_mta)
{
sort(vec_mta.begin(),
vec_mta.end(),
[](const MetaboTargetedAssay &a, const MetaboTargetedAssay &b) -> bool
{
return a.precursor_int > b.precursor_int;
});
}
void MetaboTargetedAssay::filterBasedOnMolFormAdductOccurrence_(std::vector<MetaboTargetedAssay>& mta)
{
std::map<std::pair<String, String>, int> occ_map;
if (!mta.empty())
{
for (const auto &t_it : mta)
{
auto [it, success] = occ_map.emplace(std::make_pair(t_it.molecular_formula, t_it.compound_adduct), 1);
if (!success)
{
it->second++;
}
}
// find max element in map
using pair_type = decltype(occ_map)::value_type;
auto pr = std::max_element(std::begin(occ_map),
std::end(occ_map),
[](const pair_type &p1, const pair_type &p2) { return p1.second < p2.second; });
// filter vector down to the compound with mol. formula and adduct based on the highest occurrence
mta.erase(remove_if(mta.begin(),
mta.end(),
[&pr](const MetaboTargetedAssay& assay)
{
return assay.molecular_formula != pr->first.first ||
assay.compound_adduct != pr->first.second;
}), mta.end());
}
}
// method to extract potential transitions based on the ms/ms of the highest intensity precursor or a consensus spectrum
std::vector <MetaboTargetedAssay> MetaboTargetedAssay::extractMetaboTargetedAssay(const MSExperiment& spectra,
const FeatureMapping::FeatureToMs2Indices& feature_ms2_index,
const double& precursor_rt_tol,
const double& precursor_mz_distance,
const double& cosine_sim_threshold,
const double& transition_threshold,
const double& min_fragment_mz,
const double& max_fragment_mz,
const bool& method_consensus_spectrum,
const bool& exclude_ms2_precursor,
const unsigned int& file_counter)
{
int transition_group_counter = 0;
vector <MetaboTargetedAssay> v_mta;
const std::map<BaseFeature const *, vector < size_t>>& feature_ms2_spectra_map = feature_ms2_index.assignedMS2;
for (const auto& it : feature_ms2_spectra_map)
{
TargetedExperiment::Compound cmp;
cmp.clearMetaInfo();
vector <ReactionMonitoringTransition> v_rmt;
String description("UNKNOWN"), sumformula("UNKNOWN"), adduct("UNKNOWN");
StringList v_description, v_sumformula, v_adduct;
double feature_rt;
int feature_charge;
const BaseFeature *min_distance_feature = it.first;
feature_rt = min_distance_feature->getRT();
feature_charge = min_distance_feature->getCharge();
// extract metadata from featureXML
auto metaboliteIdentifications = min_distance_feature->getPeptideIdentifications();
if (!(metaboliteIdentifications.empty()) && !(metaboliteIdentifications[0].getHits().empty()))
{
// accurate mass search may provide multiple possible Hits
// for heuristics use the identification with the smallest mz error (ppm)
double min_id_mz_error = std::numeric_limits<double>::max();
for (const auto& mhit : min_distance_feature->getPeptideIdentifications()[0].getHits())
{
double current_id_mz_error = mhit.getMetaValue("mz_error_ppm");
// compare the absolute error absolute error
if (abs(current_id_mz_error) < min_id_mz_error)
{
description = mhit.getMetaValue("description");
sumformula = mhit.getMetaValue("chemical_formula");
adduct = mhit.getMetaValue("modifications");
// change format of description [name] to name
description.erase(remove_if(begin(description), end(description), [](char c) { return c == '[' || c == ']'; }), end(description));
// change format of adduct information M+H;1+ -> [M+H]1+
String adduct_prefix = adduct.prefix(';').trim();
String adduct_suffix = adduct.suffix(';').trim();
adduct = "[" + adduct_prefix + "]" + adduct_suffix;
min_id_mz_error = abs(current_id_mz_error);
}
}
// use the identification with the lowest mass deviation
v_description.push_back(description);
v_sumformula.push_back(sumformula);
v_adduct.push_back(adduct);
}
else
{
// count UNKNOWN via transition group counter
v_description.push_back(String(description + "_" + transition_group_counter));
v_sumformula.push_back(sumformula);
v_adduct.push_back(adduct);
}
double highest_precursor_mz = 0.0;
float highest_precursor_int = 0.0;
int highest_precursor_charge = 0;
MSSpectrum highest_precursor_int_spectrum;
MSSpectrum transition_spectrum;
String native_id;
// find precursor/spectrum with highest intensity precursor
vector <size_t> index = it.second;
for (auto index_it = index.begin(); index_it != index.end(); ++index_it)
{
const MSSpectrum &spectrum = spectra[*index_it];
// check if MS2 spectrum is empty
if (spectrum.empty())
{
OPENMS_LOG_WARN << "Empty MS/MS spectrum was provided. Please manually investigate at index: " << *index_it << std::endl;
continue;
}
const vector <Precursor> &precursor = spectrum.getPrecursors();
// get m/z and intensity of precursor
if (precursor.empty())
{
throw Exception::MissingInformation(__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"Precursor for MS/MS spectrum was not found.");
}
double precursor_mz = precursor[0].getMZ();
float precursor_int = precursor[0].getIntensity();
int precursor_charge = precursor[0].getCharge();
// if precursor charge is not annotated - use feature charge
if (precursor_charge == 0)
{
precursor_charge = feature_charge;
}
native_id = spectrum.getNativeID();
// spectrum with highest intensity precursor
if (precursor_int > highest_precursor_int)
{
highest_precursor_int = precursor_int;
highest_precursor_mz = precursor_mz;
highest_precursor_int_spectrum = spectrum;
highest_precursor_charge = precursor_charge;
}
transition_spectrum = highest_precursor_int_spectrum;
}
// check if more than one MS/MS spectrum is available to use the consensus method
// the MS/MS spectrum of the highest intensity precursor is used as reference and compared
// to the other MS/MS spectrum of a specific feature.
// if the cosine similarity is over the manually set threshold these are merged via SpectraMerger
if (method_consensus_spectrum &&index.size()>= 2)
{
// transform to binned spectra
vector <BinnedSpectrum> binned;
vector <MSSpectrum> similar_spectra;
MSExperiment exp;
const BinnedSpectrum binned_highest_int(highest_precursor_int_spectrum,
BinnedSpectrum::DEFAULT_BIN_WIDTH_HIRES,
false,
1,
BinnedSpectrum::DEFAULT_BIN_OFFSET_HIRES);
// calculation of contrast angle (cosine similarity)
for (auto index_it = index.begin(); index_it != index.end(); ++index_it)
{
const MSSpectrum &spectrum = spectra[*index_it];
const BinnedSpectrum binned_spectrum(spectrum,
BinnedSpectrum::DEFAULT_BIN_WIDTH_HIRES,
false,
1,
BinnedSpectrum::DEFAULT_BIN_OFFSET_HIRES);
BinnedSpectralContrastAngle bspa;
double cosine_sim = bspa(binned_highest_int, binned_spectrum);
if (cosine_sim > cosine_sim_threshold)
{
similar_spectra.push_back(spectrum);
exp.addSpectrum(spectrum);
}
}
// at least 2 spectra with high consine similarity necessary
// fallback to highest precursor intensity spectrum (see above)
if (similar_spectra.size()> 1)
{
// calculate consensus spectrum
exp.sortSpectra();
SpectraMerger merger;
Param p;
p.insert("",SpectraMerger().getDefaults());
p.setValue("precursor_method:mz_tolerance", precursor_mz_distance);
p.setValue("precursor_method:rt_tolerance", precursor_rt_tol * 2);
merger.setParameters(p);
// all MS spectra should have the same precursor
merger.mergeSpectraPrecursors(exp);
// check if all precursors have been merged if not use highest intensity precursor
if (exp.getSpectra().size()< 2)
{
transition_spectrum = exp.getSpectra()[0];
}
}
}
// transition calculations
// calculate max intensity peak and threshold
float max_int = 0.0;
float min_int = std::numeric_limits<float>::max();
// sort intensity in MS2 spectrum to extract transitions
transition_spectrum.sortByIntensity(true);
// filter out the precursors if they are in the ms2 spectrum;
if (exclude_ms2_precursor)
{
for (auto spec_it = transition_spectrum.begin(); spec_it != transition_spectrum.end(); ++spec_it)
{
if (abs(transition_spectrum.getPrecursors()[0].getMZ() - spec_it->getMZ()) < 1e-3)
{
transition_spectrum.erase(spec_it);
break;
}
}
}
// find max and min intensity peak
max_int = max_element(transition_spectrum.begin(), transition_spectrum.end(), intensityLess_)->getIntensity();
min_int = min_element(transition_spectrum.begin(), transition_spectrum.end(), intensityLess_)->getIntensity();
// no peaks or all peaks have same intensity (single peak / noise)
if (min_int >= max_int)
{
continue;
}
vector <TargetedExperimentHelper::RetentionTime> v_cmp_rt;
TargetedExperimentHelper::RetentionTime cmp_rt;
cmp_rt.setRT(feature_rt);
v_cmp_rt.push_back(std::move(cmp_rt));
cmp.rts = std::move(v_cmp_rt);
cmp.setChargeState(highest_precursor_charge);
description = ListUtils::concatenate(v_description, ",");
cmp.id = String(transition_group_counter) + "_" + description + "_" + file_counter;
cmp.setMetaValue("CompoundName", description);
cmp.smiles_string = "NA";
sumformula = ListUtils::concatenate(v_sumformula, ",");
cmp.molecular_formula = sumformula;
adduct = ListUtils::concatenate(v_adduct, ",");
cmp.setMetaValue("Adducts", adduct);
// threshold should be at x % of the maximum intensity
// hard minimal threshold of min_int * 1.1
float threshold_transition = max_int * (transition_threshold / 100);
float threshold_noise = min_int * noise_threshold_constant_;
int transition_counter = 0;
// here ms2 spectra information is used
for (auto spec_it = transition_spectrum.begin(); spec_it != transition_spectrum.end(); ++spec_it)
{
ReactionMonitoringTransition rmt;
rmt.clearMetaInfo();
float current_int = spec_it->getIntensity();
double current_mz = spec_it->getMZ();
// write row for each transition
// current int has to be higher than transition threshold and should not be smaller than threshold noise
// current_mz has to be higher than min_fragment_mz and lower than max_fragment_mz
if (current_int > threshold_transition && current_int > threshold_noise && current_mz > min_fragment_mz && current_mz < max_fragment_mz)
{
float rel_int = current_int / max_int;
rmt.setPrecursorMZ(highest_precursor_mz);
rmt.setProductMZ(current_mz);
TargetedExperimentHelper::TraMLProduct product;
product.setMZ(current_mz);
// charge state from adduct
if (!adduct.empty() && adduct != "UNKNOWN")
{
product.setChargeState(getChargeFromAdduct_(adduct));
}
rmt.setProduct(product);
rmt.setLibraryIntensity(rel_int);
description = ListUtils::concatenate(v_description, ",");
rmt.setCompoundRef (String(transition_group_counter) + "_" + description + "_" + file_counter);
rmt.setNativeID (String(transition_group_counter)+ "_" + String(transition_counter)+ "_" + description + "_" + file_counter);
rmt.setDecoyTransitionType(ReactionMonitoringTransition::DecoyTransitionType::TARGET); // no decoys are generated without SIRIUS
v_rmt.push_back(std::move(rmt));
transition_counter += 1;
}
}
transition_group_counter += 1;
MetaboTargetedAssay mta;
mta.precursor_int = highest_precursor_int;
mta.compound_name = description;
mta.compound_adduct = adduct;
mta.precursor_mz = highest_precursor_mz;
mta.molecular_formula = sumformula;
mta.compound_rt = feature_rt;
mta.compound_file = file_counter;
mta.potential_cmp = cmp;
mta.potential_rmts = v_rmt;
// do not report if no valid transitions are found after filtering
if (!mta.potential_rmts.empty())
{
v_mta.push_back(std::move(mta));
}
}
return v_mta;
}
// method to extract potential transitions based on the ms/ms based of the highest intensity precursor with fragment annotation using SIRIUS
std::vector <MetaboTargetedAssay> MetaboTargetedAssay::extractMetaboTargetedAssayFragmentAnnotation(const vector < CompoundTargetDecoyPair >& v_cmp_spec,
const double& transition_threshold,
const double& min_fragment_mz,
const double& max_fragment_mz,
const bool& use_exact_mass,
const bool& exclude_ms2_precursor)
{
int entry_counter = 0; // counts each entry - to ensure the same count for targets, decoys from the same sirius workspace
vector <MetaboTargetedAssay> v_mta;
for (const auto& it : v_cmp_spec)
{
// check if annotated object exists
const MetaboTargetedAssay::CompoundTargetDecoyPair &csp = it;
vector<MSSpectrum> non_empty_spectra;
if (!csp.target_decoy_spectra.target.empty())
{
MSSpectrum target = csp.target_decoy_spectra.target;
non_empty_spectra.push_back(target);
}
if (!csp.target_decoy_spectra.decoy.empty())
{
MSSpectrum decoy = csp.target_decoy_spectra.decoy;
non_empty_spectra.push_back(decoy);
}
// iterate over both entries - targets and decoys (SiriusTargetDecoySpectra)
// count target and decoy as one entry - to ensure same numbering of targets and decoys
for (auto& transition_spectrum : non_empty_spectra)
{
TargetedExperiment::Compound cmp;
cmp.clearMetaInfo();
vector <ReactionMonitoringTransition> v_rmt;
String description("UNKNOWN"), sumformula("UNKNOWN"), adduct("UNKNOWN");
double feature_rt = csp.compound_info.rt;
description = csp.compound_info.des;
int charge = csp.compound_info.charge;
double precursor_int = csp.compound_info.pint_mono;
// use annotated metadata
sumformula = transition_spectrum.getMetaValue("annotated_sumformula");
adduct = transition_spectrum.getMetaValue("annotated_adduct");
int decoy = transition_spectrum.getMetaValue("decoy");
// transition calculations
// calculate max intensity peak and threshold
float max_int = 0.0;
float min_int = std::numeric_limits<float>::max();
// sort intensity in MS2 spectrum to extract transitions
transition_spectrum.sortByIntensity(true);
// have to remove ms2 precursor peak before min/max
double exact_mass_precursor = 0.0;
for (auto spec_it = transition_spectrum.begin();
spec_it != transition_spectrum.end();
++spec_it)
{
int spec_index = spec_it - transition_spectrum.begin();
OpenMS::DataArrays::StringDataArray explanation_array;
if (!transition_spectrum.getStringDataArrays().empty())
{
explanation_array = transition_spectrum.getStringDataArrays()[0];
if (explanation_array.getName() != "explanation")
{
OPENMS_LOG_WARN << "Fragment explanation was not found. Please check if your annotation works properly." << std::endl;
}
else
{
// precursor in fragment annotation has the same sumformula as MS1 Precursor
if (explanation_array[spec_index] == sumformula)
{
// save exact mass
if (use_exact_mass)
{
exact_mass_precursor = spec_it->getMZ();
}
// remove precursor ms2 entry
if (exclude_ms2_precursor)
{
transition_spectrum.erase(transition_spectrum.begin() + spec_index);
transition_spectrum.getStringDataArrays()[0]
.erase(transition_spectrum.getStringDataArrays()[0].begin() + spec_index);
if (decoy == 0) // second mass FloatDataArray only available for targets
{
transition_spectrum.getFloatDataArrays()[0]
.erase(transition_spectrum.getFloatDataArrays()[0].begin() + spec_index);
}
break; // break to not increment when erase es called.
}
}
}
}
}
// find max and min intensity peak
max_int = max_element(transition_spectrum.begin(), transition_spectrum.end(), intensityLess_)->getIntensity();
min_int = min_element(transition_spectrum.begin(), transition_spectrum.end(), intensityLess_)->getIntensity();
// no peaks or all peaks have same intensity (single peak / noise)
if (min_int >= max_int)
{
OPENMS_LOG_DEBUG << "The annotated spectrum does not have any peaks after the intensity filter step, or all peaks have the same intensity: " << csp.compound_info.cmp << std::endl;
continue;
}
vector <TargetedExperimentHelper::RetentionTime> v_cmp_rt;
TargetedExperimentHelper::RetentionTime cmp_rt;
cmp_rt.setRT(feature_rt);
v_cmp_rt = {cmp_rt};
cmp.rts = {v_cmp_rt};
cmp.setChargeState(charge);
String identifier_suffix = adduct + "_" + int(feature_rt) + "_" + csp.compound_info.file_index;
if (description == "UNKNOWN")
{
description = String(description + "_" + entry_counter);
}
// compoundID has to be unique over all the files
// feature_rt if the same ID was detected twice at different retention times in the same file
if (decoy == 0)
{
cmp.id = String(entry_counter) + "_" + description + "_" + identifier_suffix;
cmp.setMetaValue("CompoundName", description);
}
else if (decoy == 1)
{
description = String(description + "_decoy");
cmp.id = String(entry_counter) + "_" + description + "_" + identifier_suffix;
cmp.setMetaValue("CompoundName", description);
}
OPENMS_LOG_DEBUG << "Processed annotated Spectra - mapping of the description and the SIRIUS identifier." << std::endl;
OPENMS_LOG_DEBUG << "Description: " << description << std::endl;
OPENMS_LOG_DEBUG << "SIRIUS_workspace_identifier: " << csp.compound_info.cmp << std::endl;
OPENMS_LOG_DEBUG << "Compound identifier: " << cmp.id << std::endl;
cmp.smiles_string = "NA";
cmp.molecular_formula = sumformula;
cmp.setMetaValue("Adducts", adduct);
cmp.setMetaValue("decoy", decoy);
if (!csp.compound_info.native_ids_id.empty())
{
cmp.setMetaValue("native_ids_id", csp.compound_info.native_ids_id);
}
if (!csp.compound_info.m_ids_id.empty())
{
cmp.setMetaValue("m_ids_id", csp.compound_info.m_ids_id);
}
if (!csp.compound_info.cmp.empty())
{
cmp.setMetaValue("sirius_workspace_identifier", csp.compound_info.cmp);
}
// threshold should be at x % of the maximum intensity
// hard minimal threshold of min_int * 1.1
float threshold_transition = max_int * (transition_threshold / 100);
float threshold_noise = min_int * noise_threshold_constant_;
int transition_counter = 0;
// extract current StringDataArray with annotations/explanations;
OpenMS::DataArrays::StringDataArray explanation_array = transition_spectrum.getStringDataArrays()[0];
// here ms2 spectra information is used
for (auto spec_it = transition_spectrum.begin();
spec_it != transition_spectrum.end();
++spec_it)
{
ReactionMonitoringTransition rmt;
rmt.clearMetaInfo();
int peak_index = spec_it - transition_spectrum.begin();
float current_int = spec_it->getIntensity();
double current_mz = spec_it->getMZ();
String current_explanation = explanation_array[peak_index];
// write row for each transition
// current int has to be higher than transition threshold and should not be smaller than threshold noise
// current_mz has to be higher than min_fragment_mz and lower than max_fragment_mz
if (current_int > threshold_transition && current_int > threshold_noise && current_mz > min_fragment_mz && current_mz < max_fragment_mz)
{
float rel_int = current_int / max_int;
rmt.setPrecursorMZ((use_exact_mass && exact_mass_precursor != 0.0) ? exact_mass_precursor : csp.compound_info.pmass);
rmt.setProductMZ(current_mz);
TargetedExperimentHelper::TraMLProduct product;
product.setMZ(current_mz);
// charge state from adduct
if (!adduct.empty() && adduct != "UNKNOWN")
{
product.setChargeState(getChargeFromAdduct_(adduct));
}
rmt.setProduct(product);
rmt.setLibraryIntensity(rel_int);
rmt.setCompoundRef(cmp.id);
rmt.setNativeID(String(entry_counter) + "_" + String(transition_counter) + "_" + description + "_" + identifier_suffix);
rmt.setMetaValue("annotation", DataValue(current_explanation));
if (!csp.compound_info.native_ids_id.empty())
{
rmt.setMetaValue("native_ids_id", csp.compound_info.native_ids_id);
}
if (!csp.compound_info.m_ids_id.empty())
{
rmt.setMetaValue("m_ids_id", csp.compound_info.m_ids_id);
}
if (decoy == 1)
{
rmt.setDecoyTransitionType(ReactionMonitoringTransition::DecoyTransitionType::DECOY);
}
else
{
rmt.setDecoyTransitionType(ReactionMonitoringTransition::DecoyTransitionType::TARGET);
}
v_rmt.push_back(std::move(rmt));
transition_counter += 1;
}
}
MetaboTargetedAssay mta;
mta.precursor_int = precursor_int;
mta.compound_name = description;
mta.compound_adduct = adduct;
if (use_exact_mass)
{
mta.precursor_mz = exact_mass_precursor;
}
else
{
mta.precursor_mz = csp.compound_info.pmass;
}
mta.molecular_formula = sumformula;
mta.compound_rt = feature_rt;
mta.compound_file = csp.compound_info.file_index;
mta.potential_cmp = cmp;
mta.potential_rmts = v_rmt;
if (!mta.potential_rmts.empty())
{
v_mta.push_back(std::move(mta));
}
}
entry_counter += 1;
}
return v_mta;
}
// method to pair compound information (SiriusMSFile) with the annotated target spectrum from Sirius based on the m_id (unique identifier)
std::vector< MetaboTargetedAssay::CompoundTargetDecoyPair > MetaboTargetedAssay::pairCompoundWithAnnotatedTDSpectraPairs(const std::vector<SiriusMSFile::CompoundInfo>& v_cmpinfo,
const std::vector<SiriusFragmentAnnotation::SiriusTargetDecoySpectra>& annotated_spectra)
{
vector< MetaboTargetedAssay::CompoundTargetDecoyPair > v_cmp_spec;
for (const auto& cmp : v_cmpinfo)
{
for (const auto& spectra : annotated_spectra)
{
if (cmp.m_ids_id == spectra.target.getName()) // the m_id is saved at MSSpectrum level as its name
{
v_cmp_spec.emplace_back(cmp, spectra);
}
}
}
return v_cmp_spec;
}
std::unordered_map< UInt64 , vector<MetaboTargetedAssay> > MetaboTargetedAssay::buildAmbiguityGroup(const vector<MetaboTargetedAssay>& v_mta,const double& ar_mz_tol, const double& ar_rt_tol, const String& ar_mz_tol_unit_res, size_t in_files_size)
{
String decoy_suffix = "_decoy";
// group target and decoy position in vector based on the unique CompoundID/TransitionGroupID
std::map<String, MetaboTargetedAssay::TargetDecoyGroup> target_decoy_groups;
for (Size i = 0; i < v_mta.size(); ++i)
{
MetaboTargetedAssay current_entry = v_mta[i];
if (!current_entry.potential_rmts.empty()) // should never be empty
{
// remove "decoy" tag from compound id for correct mapping
if (current_entry.potential_rmts[0].getDecoyTransitionType() ==
ReactionMonitoringTransition::DecoyTransitionType::DECOY)
{
String compoundId = current_entry.potential_cmp.id;
compoundId.erase(compoundId.find(decoy_suffix), decoy_suffix.size());
auto [it , success] = target_decoy_groups.emplace(compoundId, MetaboTargetedAssay::TargetDecoyGroup());
if (success)
{
it->second.decoy_index = i;
it->second.decoy_mz = current_entry.precursor_mz;
it->second.decoy_rt = current_entry.compound_rt;
it->second.decoy_file_number = current_entry.compound_file;
}
else
{
it->second.decoy_index = i;
it->second.decoy_mz = current_entry.precursor_mz;
it->second.decoy_rt = current_entry.compound_rt;
it->second.decoy_file_number = current_entry.compound_file;
}
}
if (current_entry.potential_rmts[0].getDecoyTransitionType() ==
ReactionMonitoringTransition::DecoyTransitionType::TARGET)
{
auto [it , success] = target_decoy_groups.emplace(current_entry.potential_cmp.id, MetaboTargetedAssay::TargetDecoyGroup());
if (success)
{
it->second.target_index = i;
it->second.target_mz = current_entry.precursor_mz;
it->second.target_rt = current_entry.compound_rt;
it->second.target_file_number = current_entry.compound_file;
}
else
{
it->second.target_index = i;
it->second.target_mz = current_entry.precursor_mz;
it->second.target_rt = current_entry.compound_rt;
it->second.target_file_number = current_entry.compound_file;
}
}
}
}
std::unordered_map< UInt64 , vector<MetaboTargetedAssay> > ambiguity_groups;
vector <FeatureMap> feature_maps;
size_t loop_size;
if ( in_files_size > 1)
{
loop_size = in_files_size;
}
else
{
loop_size = 2; // needs at least two FeatureMaps to compare - if not available add an empty one.
}
for (size_t i = 0; i < loop_size; i++)
{
FeatureMap fmap;
fmap.setUniqueId();
fmap.ensureUniqueId();
String internal_file_path = "File" + std::to_string(i) + ".mzML";
fmap.setPrimaryMSRunPath({internal_file_path});
feature_maps.emplace_back(fmap);
}
for (const auto& it : target_decoy_groups)
{
// create minimal feature (mz,rt) with reference back to vector
Feature f;
f.setUniqueId();
f.ensureUniqueId();
PeptideIdentification pep;
PeptideIdentificationList v_pep;
// check - no target and decoy available
if (it.second.target_mz == 0.0 && it.second.decoy_mz == 0.0)
{
continue;
}
// target and decoy available - check correspondence
else if (it.second.target_mz != 0.0 && it.second.decoy_mz != 0.0)
{
if (!(it.second.target_mz == it.second.decoy_mz &&
it.second.target_rt == it.second.decoy_rt &&
it.second.target_file_number == it.second.decoy_file_number))
{
OPENMS_LOG_DEBUG << "The decoy and target do not correspond: " <<
" target_mz: " << it.second.target_mz <<
" decoy_mz: " << it.second.decoy_mz <<
" target_rt: " << it.second.target_rt <<
" decoy_rt: " << it.second.decoy_rt <<
" target_file_number: " << it.second.target_file_number <<
" decoy_file_number: " << it.second.decoy_file_number << std::endl;
continue;
}
}
// feature is based on target
// check if target is valid before generating feature
if (it.second.target_mz == 0.0 && it.second.target_rt == 0.0)
{
continue;
}
DPosition<2> pt(it.second.target_rt, it.second.target_mz);
f.setPosition(pt);
if (it.second.target_index != -1)
{
pep.setMetaValue("v_mta_target_index", DataValue(it.second.target_index));
}
if (it.second.decoy_index != -1)
{
pep.setMetaValue("v_mta_decoy_index", DataValue(it.second.decoy_index));
}
v_pep.push_back(pep);
f.setPeptideIdentifications(v_pep);
size_t cfile = it.second.target_file_number;
feature_maps[cfile].push_back(f);
}
ConsensusMap c_map;
FeatureGroupingAlgorithmQT fgaqt;
Param param = fgaqt.getDefaults();
param.setValue("ignore_charge", "true");
param.setValue("distance_RT:max_difference", ar_rt_tol);
param.setValue("distance_MZ:max_difference", ar_mz_tol);
param.setValue("distance_MZ:unit", ar_mz_tol_unit_res);
fgaqt.setParameters(param);
// build ambiguity groups based on FeatureGroupingAlgorithmQt
fgaqt.group(feature_maps, c_map);
// assign unique ids
c_map.applyMemberFunction(&UniqueIdInterface::setUniqueId);
// build ambiguity groups based on consensus entries
for (const auto& c_it : c_map)
{
vector <PeptideIdentification> v_pep;
v_pep = c_it.getPeptideIdentifications().getData();
vector <MetaboTargetedAssay> ambi_group;
for (const auto& p_it : v_pep)
{
if (p_it.metaValueExists("v_mta_target_index"))
{
int index = p_it.getMetaValue("v_mta_target_index");
ambi_group.push_back(v_mta[index]);
}
if (p_it.metaValueExists("v_mta_decoy_index"))
{
int index = p_it.getMetaValue("v_mta_decoy_index");
ambi_group.push_back(v_mta[index]);
}
}
UInt64 entry = c_it.getUniqueId();
auto mit = ambiguity_groups.find(entry);
// allow to add targets and decoys, since they are grouped independently
// targets and decoys have the exact mz and rt
if (!(mit == ambiguity_groups.end()))
{
mit->second.insert(mit->second.end(), ambi_group.begin(), ambi_group.end());
}
else
{
ambiguity_groups.emplace(entry, ambi_group);
}
}
return ambiguity_groups;
}
// resolve IDs based on the consensusXML
// use the one with the highest intensity
void MetaboTargetedAssay::resolveAmbiguityGroup(std::unordered_map< UInt64, vector<MetaboTargetedAssay> >& map_mta_filter, const double& total_occurrence_filter, size_t in_files_size)
{
vector<UInt64> empty_keys;
for (auto& map_it : map_mta_filter)
{
// split the vector in targets and decoys
vector<MetaboTargetedAssay> targets;
vector<MetaboTargetedAssay> decoys;
vector<MetaboTargetedAssay> targetdecoy;
for (const auto& it : map_it.second)
{
if (it.potential_rmts[0].getDecoyTransitionType() == OpenMS::ReactionMonitoringTransition::TARGET)
{
targets.push_back(it);
}
else
{
decoys.push_back(it);
}
}
// filter based on occurrence in samples (e.g. at least in 20% of the samples)
MetaboTargetedAssay::filterBasedOnTotalOccurrence_(targets, total_occurrence_filter, in_files_size);
MetaboTargetedAssay::filterBasedOnTotalOccurrence_(decoys, total_occurrence_filter, in_files_size);
// if multiple possible identifications are reported within one ambiguity group
// use the one with the highest occurrence
MetaboTargetedAssay::filterBasedOnMolFormAdductOccurrence_(targets);
MetaboTargetedAssay::filterBasedOnMolFormAdductOccurrence_(decoys);
// From the resolved groups, use the target and decoy with the highest precursor intensity
if (targets.empty() && decoys.empty())
{
empty_keys.push_back(map_it.first);
}
if (!targets.empty())
{
sortByPrecursorInt(targets);
targetdecoy.push_back(targets[0]);
}
if (!decoys.empty())
{
sortByPrecursorInt(decoys);
targetdecoy.push_back(decoys[0]);
}
map_it.second = targetdecoy;
}
for (const auto& key : empty_keys)
{
map_mta_filter.erase(key);
}
}
} // namespace OpenMS
/// @endcond
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/TARGETED/MRMMapping.cpp | .cpp | 7,287 | 169 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
//
#include <OpenMS/ANALYSIS/TARGETED/MRMMapping.h>
#include <OpenMS/CONCEPT/LogStream.h>
using namespace std;
namespace OpenMS
{
MRMMapping::MRMMapping() :
DefaultParamHandler("MRMMapping")
{
defaults_.setValue("precursor_tolerance", 0.1, "Precursor tolerance when mapping (in Th)");
defaults_.setValue("product_tolerance", 0.1, "Product tolerance when mapping (in Th)");
defaults_.setValue("map_multiple_assays", "false", "Allow to map multiple assays to chromatograms and duplicate these chromatograms in the output.");
defaults_.setValidStrings("map_multiple_assays", {"true","false"});
defaults_.setValue("error_on_unmapped", "false", "Treat remaining, unmapped chromatograms as an error");
defaults_.setValidStrings("error_on_unmapped", {"true","false"});
// write defaults into Param object param_
defaultsToParam_();
updateMembers_();
}
void MRMMapping::updateMembers_()
{
precursor_tol_ = (double)param_.getValue("precursor_tolerance");
product_tol_ = (double)param_.getValue("product_tolerance");
map_multiple_assays_ = (bool)param_.getValue("map_multiple_assays").toBool();
error_on_unmapped_ = (bool)param_.getValue("error_on_unmapped").toBool();
}
void MRMMapping::mapExperiment(const OpenMS::PeakMap& chromatogram_map,
const OpenMS::TargetedExperiment& targeted_exp,
OpenMS::PeakMap& output) const
{
// copy all meta data from old MSExperiment
output = (ExperimentalSettings)chromatogram_map;
output.clear(false);
std::vector<MSChromatogram > empty_chromats;
output.setChromatograms(empty_chromats);
int notmapped = 0;
for (Size i = 0; i < chromatogram_map.getChromatograms().size(); i++)
{
// try to find the best matching transition for this chromatogram
const MSChromatogram& chromatogram = chromatogram_map.getChromatograms()[i];
bool prec_product_set = !( std::fabs(chromatogram.getPrecursor().getMZ()) < 1e-5 &&
std::fabs(chromatogram.getProduct().getMZ()) < 1e-5);
if (!prec_product_set)
{
if (map_multiple_assays_)
{
OPENMS_LOG_WARN << "Warning: Chromatogram " +
String(chromatogram.getNativeID()) + " has no precursor or product m/z recorded, mapping may not work." << std::endl;
}
else
{
OPENMS_LOG_WARN << "Skip mapping for chromatogram " +
String(chromatogram.getNativeID()) + " since no precursor or product m/z was recorded." << std::endl;
continue;
}
}
std::vector<MSChromatogram > mapped_chroms;
for (Size j = 0; j < targeted_exp.getTransitions().size(); j++)
{
if (fabs(chromatogram.getPrecursor().getMZ() - targeted_exp.getTransitions()[j].getPrecursorMZ()) < precursor_tol_ &&
fabs(chromatogram.getProduct().getMZ() - targeted_exp.getTransitions()[j].getProductMZ()) < product_tol_)
{
OPENMS_LOG_DEBUG << "Mapping chromatogram " << i << " to transition " << j << " (" << targeted_exp.getTransitions()[j].getNativeID() << ")"
" with precursor mz " << chromatogram.getPrecursor().getMZ() << " / " << targeted_exp.getTransitions()[j].getPrecursorMZ() <<
" and product mz " << chromatogram.getProduct().getMZ() << " / " << targeted_exp.getTransitions()[j].getProductMZ() << std::endl;
// Create precursor and set the peptide sequence
MSChromatogram c = chromatogram_map.getChromatograms()[i];
Precursor precursor = c.getPrecursor();
String pepref = targeted_exp.getTransitions()[j].getPeptideRef();
precursor.setMetaValue("peptide_sequence", pepref);
precursor.setMetaValue("description", targeted_exp.getTransitions()[j].getNativeID());
for (Size pep_idx = 0; pep_idx < targeted_exp.getPeptides().size(); pep_idx++)
{
const OpenMS::TargetedExperiment::Peptide * pep = &targeted_exp.getPeptides()[pep_idx];
if (pep->id == pepref)
{
if (!pep->sequence.empty())
{
precursor.setMetaValue("peptide_sequence", pep->sequence);
}
break;
}
}
// add precursor to chromatogram
c.setPrecursor(precursor);
// Set the id of the chromatogram, using the id of the transition (this gives directly the mapping of the two
c.setNativeID(targeted_exp.getTransitions()[j].getNativeID());
mapped_chroms.push_back(c);
}
}
// Check whether we have mapped this chromatogram to at least one transition:
// - warn if no mapping occurred
// - else append all mapped chromatograms (if we allow multiple mappings)
// - else append the first mapped chromatograms (if we don't allow multiple mappings)
if (mapped_chroms.empty())
{
OPENMS_LOG_WARN << "Did not find a mapping for chromatogram " + String(i) + " with transition " + String(chromatogram.getPrecursor().getMZ()) + \
" -> " + String(chromatogram.getProduct().getMZ()) + "! Maybe try to increase your mapping tolerance." << std::endl;
notmapped++;
if (error_on_unmapped_)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Did not find a mapping for chromatogram " + String(i) + "! Maybe try to increase your mapping tolerance.");
}
}
else if (map_multiple_assays_)
{
for (auto & c : mapped_chroms) output.addChromatogram(c);
if (mapped_chroms.size() > 1)
{
OPENMS_LOG_WARN << "Chromatogram " + String(chromatogram.getNativeID()) <<
" with " + String(chromatogram.getPrecursor().getMZ()) <<
" -> " + String(chromatogram.getProduct().getMZ()) <<
" maps to multiple assays!" << std::endl;
}
}
else
{
if (mapped_chroms.size() == 1) output.addChromatogram(mapped_chroms[0]);
else
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Chromatogram " + String(chromatogram.getNativeID()) + \
" with " + String(chromatogram.getPrecursor().getMZ()) + \
" -> " + String(chromatogram.getProduct().getMZ()) + \
" maps to multiple assays! Either decrease your mapping tolerance or set map_multiple_assays to true.");
}
}
}
if (notmapped > 0)
{
OPENMS_LOG_WARN << "Could not find mapping for " << notmapped << " chromatogram(s)." << std::endl;
if (error_on_unmapped_)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Found " + String(notmapped) + \
" unmapped chromatograms, disable error_on_unmapped to continue.");
}
}
}
} //namespace
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/TARGETED/TargetedExperimentHelper.cpp | .cpp | 3,305 | 85 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Andreas Bertsch $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/TARGETED/TargetedExperimentHelper.h>
#include <OpenMS/ANALYSIS/TARGETED/TargetedExperiment.h>
#include <OpenMS/CHEMISTRY/ModificationsDB.h>
#include <OpenMS/CONCEPT/LogStream.h>
namespace OpenMS::TargetedExperimentHelper
{
void setModification(int location, int max_size, const String& modification, OpenMS::AASequence& aas)
{
OPENMS_PRECONDITION(location >= -1 && location <= max_size,
(String("Location has invalid value") + (String)location).c_str() )
if (location == -1)
{
aas.setNTerminalModification(modification);
}
else if (location == max_size)
{
aas.setCTerminalModification(modification);
}
else
{
aas.setModification(location, modification);
}
}
OpenMS::AASequence getAASequence(const OpenMS::TargetedExperiment::Peptide& peptide)
{
// Note that the peptide.sequence is the "naked sequence" without any
// modifications on it, therefore we have to populate the AASequence with
// the correct modifications afterwards.
OpenMS::ModificationsDB* mod_db = OpenMS::ModificationsDB::getInstance();
OpenMS::AASequence aas = AASequence::fromString(peptide.sequence);
// Populate the AASequence with the correct modifications derived from
// the Peptide::Modification objects.
for (std::vector<Peptide::Modification>::const_iterator it = peptide.mods.begin();
it != peptide.mods.end(); ++it)
{
// Step 1: First look whether the UniMod ID is set (we don't use a CVTerm any more but a member)
if (it->unimod_id != -1)
{
setModification(it->location, boost::numeric_cast<int>(peptide.sequence.size()),
"UniMod:" + String(it->unimod_id), aas);
continue;
}
OPENMS_LOG_WARN << "Warning: No UniMod id set for modification on peptide " << peptide.sequence <<
". Will try to infer modification id by mass next." << std::endl;
// compare with code in source/ANALYSIS/OPENSWATH/DATAACCESS/DataAccessHelper.cpp
// Step 2: If the above step fails, try to find the correct
// modification by using the mass difference
const ResidueModification* mod = mod_db->getBestModificationByDiffMonoMass(
it->mono_mass_delta, 1.0, peptide.sequence[it->location]);
if (mod != nullptr)
{
setModification(it->location, boost::numeric_cast<int>(peptide.sequence.size()), mod->getId(), aas);
}
else
{
// could not find any modification ...
std::cerr << "Warning: Could not determine modification with delta mass " <<
it->mono_mass_delta << " for peptide " << peptide.sequence <<
" at position " << it->location << std::endl;
std::cerr << "Skipping this modification" << std::endl;
}
}
return aas;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/TARGETED/TargetedExperiment.cpp | .cpp | 23,991 | 737 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Andreas Bertsch $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/TARGETED/TargetedExperiment.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <ostream> // for ostream& operator<<(ostream& os, const TargetedExperiment::SummaryStatistics& s);
#include <map>
// from https://stackoverflow.com/questions/17010005/how-to-use-c11-move-semantics-to-append-vector-contents-to-another-vector
template <typename T>
typename std::vector<T>::iterator appendRVector(std::vector<T>&& src, std::vector<T>& dest)
{
typename std::vector<T>::iterator result;
if (dest.empty())
{
dest = std::move(src);
result = std::begin(dest);
}
else
{
#if 1
// dest.reserve(dest.size() + src.size());
std::move(std::begin(src), std::end(src), std::back_inserter(dest));
src.clear();
#else
result = dest.insert(std::end(dest),
std::make_move_iterator(std::begin(src)),
std::make_move_iterator(std::end(src)));
#endif
}
src.clear();
src.shrink_to_fit();
return result;
}
namespace OpenMS
{
TargetedExperiment::TargetedExperiment() :
protein_reference_map_dirty_(true),
peptide_reference_map_dirty_(true),
compound_reference_map_dirty_(true)
{
}
TargetedExperiment::TargetedExperiment(const TargetedExperiment & rhs) :
cvs_(rhs.cvs_),
contacts_(rhs.contacts_),
publications_(rhs.publications_),
instruments_(rhs.instruments_),
targets_(rhs.targets_),
software_(rhs.software_),
proteins_(rhs.proteins_),
compounds_(rhs.compounds_),
peptides_(rhs.peptides_),
transitions_(rhs.transitions_),
include_targets_(rhs.include_targets_),
exclude_targets_(rhs.exclude_targets_),
source_files_(rhs.source_files_),
protein_reference_map_dirty_(true),
peptide_reference_map_dirty_(true),
compound_reference_map_dirty_(true)
{
}
TargetedExperiment::TargetedExperiment(TargetedExperiment && rhs) noexcept :
cvs_(std::move(rhs.cvs_)),
contacts_(std::move(rhs.contacts_)),
publications_(std::move(rhs.publications_)),
instruments_(std::move(rhs.instruments_)),
targets_(std::move(rhs.targets_)),
software_(std::move(rhs.software_)),
proteins_(std::move(rhs.proteins_)),
compounds_(std::move(rhs.compounds_)),
peptides_(std::move(rhs.peptides_)),
transitions_(std::move(rhs.transitions_)),
include_targets_(std::move(rhs.include_targets_)),
exclude_targets_(std::move(rhs.exclude_targets_)),
source_files_(std::move(rhs.source_files_)),
protein_reference_map_dirty_(true),
peptide_reference_map_dirty_(true),
compound_reference_map_dirty_(true)
{
}
TargetedExperiment::~TargetedExperiment() = default;
TargetedExperiment& TargetedExperiment::operator=(const TargetedExperiment & rhs)
{
if (&rhs != this)
{
cvs_ = rhs.cvs_;
contacts_ = rhs.contacts_;
publications_ = rhs.publications_;
instruments_ = rhs.instruments_;
targets_ = rhs.targets_;
software_ = rhs.software_;
proteins_ = rhs.proteins_;
compounds_ = rhs.compounds_;
peptides_ = rhs.peptides_;
transitions_ = rhs.transitions_;
include_targets_ = rhs.include_targets_;
exclude_targets_ = rhs.exclude_targets_;
source_files_ = rhs.source_files_;
protein_reference_map_dirty_ = true;
peptide_reference_map_dirty_ = true;
compound_reference_map_dirty_ = true;
}
return *this;
}
TargetedExperiment& TargetedExperiment::operator=(TargetedExperiment && rhs) noexcept
{
if (&rhs != this)
{
cvs_ = std::move(rhs.cvs_);
contacts_ = std::move(rhs.contacts_);
publications_ = std::move(rhs.publications_);
instruments_ = std::move(rhs.instruments_);
targets_ = std::move(rhs.targets_);
software_ = std::move(rhs.software_);
proteins_ = std::move(rhs.proteins_);
compounds_ = std::move(rhs.compounds_);
peptides_ = std::move(rhs.peptides_);
transitions_ = std::move(rhs.transitions_);
include_targets_ = std::move(rhs.include_targets_);
exclude_targets_ = std::move(rhs.exclude_targets_);
source_files_ = std::move(rhs.source_files_);
protein_reference_map_dirty_ = true;
peptide_reference_map_dirty_ = true;
compound_reference_map_dirty_ = true;
}
return *this;
}
TargetedExperiment TargetedExperiment::operator+(const TargetedExperiment & rhs) const
{
TargetedExperiment tmp(*this);
tmp += rhs;
return tmp;
}
TargetedExperiment & TargetedExperiment::operator+=(const TargetedExperiment & rhs)
{
protein_reference_map_dirty_ = true;
peptide_reference_map_dirty_ = true;
compound_reference_map_dirty_ = true;
// merge these:
cvs_.insert(cvs_.end(), rhs.cvs_.begin(), rhs.cvs_.end());
contacts_.insert(contacts_.end(), rhs.contacts_.begin(), rhs.contacts_.end());
publications_.insert(publications_.end(), rhs.publications_.begin(), rhs.publications_.end());
instruments_.insert(instruments_.end(), rhs.instruments_.begin(), rhs.instruments_.end());
software_.insert(software_.end(), rhs.software_.begin(), rhs.software_.end());
proteins_.insert(proteins_.end(), rhs.proteins_.begin(), rhs.proteins_.end());
compounds_.insert(compounds_.end(), rhs.compounds_.begin(), rhs.compounds_.end());
peptides_.insert(peptides_.end(), rhs.peptides_.begin(), rhs.peptides_.end());
transitions_.insert(transitions_.end(), rhs.transitions_.begin(), rhs.transitions_.end());
include_targets_.insert(include_targets_.end(), rhs.include_targets_.begin(), rhs.include_targets_.end());
exclude_targets_.insert(exclude_targets_.end(), rhs.exclude_targets_.begin(), rhs.exclude_targets_.end());
source_files_.insert(source_files_.end(), rhs.source_files_.begin(), rhs.source_files_.end());
for (std::map<String, std::vector<CVTerm> >::const_iterator targ_it = rhs.targets_.getCVTerms().begin(); targ_it != rhs.targets_.getCVTerms().end(); ++targ_it)
{
for (std::vector<CVTerm>::const_iterator term_it = targ_it->second.begin(); term_it != targ_it->second.end(); ++term_it)
{
targets_.addCVTerm(*term_it);
}
}
// todo: check for double entries
// transitions, peptides, proteins
return *this;
}
TargetedExperiment & TargetedExperiment::operator+=(TargetedExperiment && rhs)
{
protein_reference_map_dirty_ = true;
peptide_reference_map_dirty_ = true;
compound_reference_map_dirty_ = true;
// merge these:
appendRVector(std::move(rhs.cvs_), cvs_);
appendRVector(std::move(rhs.contacts_), contacts_);
appendRVector(std::move(rhs.publications_), publications_);
appendRVector(std::move(rhs.instruments_), instruments_);
appendRVector(std::move(rhs.software_), software_);
appendRVector(std::move(rhs.proteins_), proteins_);
appendRVector(std::move(rhs.compounds_), compounds_);
appendRVector(std::move(rhs.peptides_), peptides_);
appendRVector(std::move(rhs.transitions_), transitions_);
appendRVector(std::move(rhs.include_targets_), include_targets_);
appendRVector(std::move(rhs.exclude_targets_), exclude_targets_);
appendRVector(std::move(rhs.source_files_), source_files_);
for (std::map<String, std::vector<CVTerm> >::const_iterator targ_it = rhs.targets_.getCVTerms().begin(); targ_it != rhs.targets_.getCVTerms().end(); ++targ_it)
{
for (std::vector<CVTerm>::const_iterator term_it = targ_it->second.begin(); term_it != targ_it->second.end(); ++term_it)
{
targets_.addCVTerm(*term_it);
}
}
// todo: check for double entries
// transitions, peptides, proteins
return *this;
}
bool TargetedExperiment::operator==(const TargetedExperiment & rhs) const
{
return cvs_ == rhs.cvs_ &&
contacts_ == rhs.contacts_ &&
publications_ == rhs.publications_ &&
instruments_ == rhs.instruments_ &&
targets_ == rhs.targets_ &&
software_ == rhs.software_ &&
proteins_ == rhs.proteins_ &&
compounds_ == rhs.compounds_ &&
peptides_ == rhs.peptides_ &&
transitions_ == rhs.transitions_ &&
include_targets_ == rhs.include_targets_ &&
exclude_targets_ == rhs.exclude_targets_ &&
source_files_ == rhs.source_files_;
}
bool TargetedExperiment::operator!=(const TargetedExperiment & rhs) const
{
return !(operator==(rhs));
}
TargetedExperiment::SummaryStatistics TargetedExperiment::getSummary() const
{
SummaryStatistics s;
s.protein_count = proteins_.size();
s.peptide_count = peptides_.size();
s.compound_count = compounds_.size();
s.transition_count = transitions_.size();
for (const auto& tr : transitions_)
{
++s.decoy_counts[tr.getDecoyTransitionType()];
}
s.contains_invalid_references = containsInvalidReferences();
return s;
}
void TargetedExperiment::clear(bool clear_meta_data)
{
transitions_.clear();
if (clear_meta_data)
{
cvs_.clear();
contacts_.clear();
publications_.clear();
instruments_.clear();
targets_ = CVTermList();
software_.clear();
proteins_.clear();
compounds_.clear();
peptides_.clear();
include_targets_.clear();
exclude_targets_.clear();
source_files_.clear();
protein_reference_map_.clear();
peptide_reference_map_.clear();
compound_reference_map_.clear();
protein_reference_map_dirty_ = true;
peptide_reference_map_dirty_ = true;
compound_reference_map_dirty_ = true;
}
}
void TargetedExperiment::setCVs(const std::vector<CV> & cvs)
{
cvs_ = cvs;
}
const std::vector<TargetedExperiment::CV> & TargetedExperiment::getCVs() const
{
return cvs_;
}
void TargetedExperiment::addCV(const CV & cv)
{
cvs_.push_back(cv);
}
void TargetedExperiment::setContacts(const std::vector<Contact> & contacts)
{
contacts_ = contacts;
}
const std::vector<TargetedExperiment::Contact> & TargetedExperiment::getContacts() const
{
return contacts_;
}
void TargetedExperiment::addContact(const Contact & contact)
{
contacts_.push_back(contact);
}
void TargetedExperiment::setPublications(const std::vector<Publication> & publications)
{
publications_ = publications;
}
const std::vector<TargetedExperiment::Publication> & TargetedExperiment::getPublications() const
{
return publications_;
}
void TargetedExperiment::addPublication(const Publication & publication)
{
publications_.push_back(publication);
}
void TargetedExperiment::setTargetCVTerms(const CVTermList & cv_terms)
{
targets_ = cv_terms;
}
const CVTermList & TargetedExperiment::getTargetCVTerms() const
{
return targets_;
}
void TargetedExperiment::addTargetCVTerm(const CVTerm & cv_term)
{
targets_.addCVTerm(cv_term);
}
void TargetedExperiment::setTargetMetaValue(const String & name, const DataValue & value)
{
targets_.setMetaValue(name, value);
}
void TargetedExperiment::setInstruments(const std::vector<Instrument> & instruments)
{
instruments_ = instruments;
}
const std::vector<TargetedExperiment::Instrument> & TargetedExperiment::getInstruments() const
{
return instruments_;
}
void TargetedExperiment::addInstrument(const Instrument & instrument)
{
instruments_.push_back(instrument);
}
void TargetedExperiment::setSoftware(const std::vector<Software> & software)
{
software_ = software;
}
const std::vector<Software> & TargetedExperiment::getSoftware() const
{
return software_;
}
void TargetedExperiment::addSoftware(const Software & software)
{
software_.push_back(software);
}
void TargetedExperiment::setProteins(const std::vector<Protein> & proteins)
{
protein_reference_map_dirty_ = true;
proteins_ = proteins;
}
void TargetedExperiment::setProteins(std::vector<Protein> && proteins)
{
protein_reference_map_dirty_ = true;
proteins_ = std::move(proteins);
}
const std::vector<TargetedExperiment::Protein> & TargetedExperiment::getProteins() const
{
return proteins_;
}
const TargetedExperiment::Protein & TargetedExperiment::getProteinByRef(const String & ref) const
{
if (protein_reference_map_dirty_)
{
createProteinReferenceMap_();
}
OPENMS_PRECONDITION(protein_reference_map_.find(ref) != protein_reference_map_.end(), "Could not find protein in map")
return *(protein_reference_map_[ref]);
}
bool TargetedExperiment::hasProtein(const String & ref) const
{
if (protein_reference_map_dirty_)
{
createProteinReferenceMap_();
}
return protein_reference_map_.find(ref) != protein_reference_map_.end();
}
void TargetedExperiment::addProtein(const Protein & protein)
{
protein_reference_map_dirty_ = true;
proteins_.push_back(protein);
}
void TargetedExperiment::setCompounds(const std::vector<Compound> & compounds)
{
compounds_ = compounds;
}
const std::vector<TargetedExperiment::Compound> & TargetedExperiment::getCompounds() const
{
return compounds_;
}
void TargetedExperiment::addCompound(const Compound & rhs)
{
compounds_.push_back(rhs);
}
void TargetedExperiment::setPeptides(const std::vector<Peptide> & peptides)
{
peptide_reference_map_dirty_ = true;
peptides_ = peptides;
}
void TargetedExperiment::setPeptides(std::vector<Peptide> && peptides)
{
peptide_reference_map_dirty_ = true;
peptides_ = std::move(peptides);
}
const std::vector<TargetedExperiment::Peptide> & TargetedExperiment::getPeptides() const
{
return peptides_;
}
const TargetedExperiment::Peptide & TargetedExperiment::getPeptideByRef(const String & ref) const
{
if (peptide_reference_map_dirty_)
{
createPeptideReferenceMap_();
}
OPENMS_PRECONDITION(hasPeptide(ref), "Cannot return peptide that does not exist, check with hasPeptide() first")
return *(peptide_reference_map_[ref]);
}
const TargetedExperiment::Compound & TargetedExperiment::getCompoundByRef(const String & ref) const
{
if (compound_reference_map_dirty_)
{
createCompoundReferenceMap_();
}
OPENMS_PRECONDITION(hasCompound(ref), "Cannot return compound that does not exist, check with hasCompound() first")
return *(compound_reference_map_[ref]);
}
bool TargetedExperiment::hasPeptide(const String & ref) const
{
if (peptide_reference_map_dirty_)
{
createPeptideReferenceMap_();
}
return peptide_reference_map_.find(ref) != peptide_reference_map_.end();
}
bool TargetedExperiment::hasCompound(const String & ref) const
{
if (compound_reference_map_dirty_)
{
createCompoundReferenceMap_();
}
return compound_reference_map_.find(ref) != compound_reference_map_.end();
}
void TargetedExperiment::addPeptide(const Peptide & rhs)
{
peptide_reference_map_dirty_ = true;
peptides_.push_back(rhs);
}
void TargetedExperiment::setTransitions(const std::vector<ReactionMonitoringTransition> & transitions)
{
transitions_ = transitions;
}
void TargetedExperiment::setTransitions(std::vector<ReactionMonitoringTransition> && transitions)
{
transitions_ = std::move(transitions);
}
const std::vector<ReactionMonitoringTransition> & TargetedExperiment::getTransitions() const
{
return transitions_;
}
void TargetedExperiment::addTransition(const ReactionMonitoringTransition & transition)
{
transitions_.push_back(transition);
}
void TargetedExperiment::setIncludeTargets(const std::vector<IncludeExcludeTarget> & targets)
{
include_targets_ = targets;
}
const std::vector<IncludeExcludeTarget> & TargetedExperiment::getIncludeTargets() const
{
return include_targets_;
}
void TargetedExperiment::addIncludeTarget(const IncludeExcludeTarget & target)
{
include_targets_.push_back(target);
}
void TargetedExperiment::setExcludeTargets(const std::vector<IncludeExcludeTarget> & targets)
{
exclude_targets_ = targets;
}
const std::vector<IncludeExcludeTarget> & TargetedExperiment::getExcludeTargets() const
{
return exclude_targets_;
}
void TargetedExperiment::addExcludeTarget(const IncludeExcludeTarget & target)
{
exclude_targets_.push_back(target);
}
void TargetedExperiment::setSourceFiles(const std::vector<SourceFile> & source_files)
{
source_files_ = source_files;
}
const std::vector<SourceFile> & TargetedExperiment::getSourceFiles() const
{
return source_files_;
}
void TargetedExperiment::addSourceFile(const SourceFile & source_file)
{
source_files_.push_back(source_file);
}
void TargetedExperiment::sortTransitionsByProductMZ()
{
std::sort(transitions_.begin(), transitions_.end(), ReactionMonitoringTransition::ProductMZLess());
}
void TargetedExperiment::sortTransitionsByName()
{
std::sort(transitions_.begin(), transitions_.end(), ReactionMonitoringTransition::NameLess());
}
bool TargetedExperiment::containsInvalidReferences() const
{
typedef std::vector<OpenMS::TargetedExperiment::Protein> ProteinVectorType;
typedef std::vector<OpenMS::TargetedExperiment::Peptide> PeptideVectorType;
typedef std::vector<OpenMS::TargetedExperiment::Compound> CompoundVectorType;
typedef std::vector<OpenMS::ReactionMonitoringTransition> TransitionVectorType;
// check that all proteins ids are unique
std::map<String, int> unique_protein_map;
for (ProteinVectorType::const_iterator prot_it = getProteins().begin(); prot_it != getProteins().end(); ++prot_it)
{
// Create new transition group if it does not yet exist
if (unique_protein_map.find(prot_it->id) != unique_protein_map.end())
{
OPENMS_LOG_ERROR << "Found duplicate protein id (must be unique): " + String(prot_it->id) << std::endl;
return true;
}
unique_protein_map[prot_it->id] = 0;
}
// check that all peptide ids are unique
std::map<String, int> unique_peptide_map;
for (PeptideVectorType::const_iterator pep_it = getPeptides().begin(); pep_it != getPeptides().end(); ++pep_it)
{
// Create new transition group if it does not yet exist
if (unique_peptide_map.find(pep_it->id) != unique_peptide_map.end())
{
OPENMS_LOG_ERROR << "Found duplicate peptide id (must be unique): " + String(pep_it->id) << std::endl;
return true;
}
unique_peptide_map[pep_it->id] = 0;
}
// check that all compound ids are unique
std::map<String, int> unique_compounds_map;
for (CompoundVectorType::const_iterator comp_it = getCompounds().begin(); comp_it != getCompounds().end(); ++comp_it)
{
// Create new transition group if it does not yet exist
if (unique_compounds_map.find(comp_it->id) != unique_compounds_map.end())
{
OPENMS_LOG_ERROR << "Found duplicate compound id (must be unique): " + String(comp_it->id) << std::endl;
return true;
}
unique_compounds_map[comp_it->id] = 0;
}
// check that all transition ids are unique
std::map<String, int> unique_transition_map;
for (TransitionVectorType::const_iterator tr_it = getTransitions().begin(); tr_it != getTransitions().end(); ++tr_it)
{
// Create new transition group if it does not yet exist
if (unique_transition_map.find(tr_it->getNativeID()) != unique_transition_map.end())
{
OPENMS_LOG_ERROR << "Found duplicate transition id (must be unique): " + String(tr_it->getNativeID()) << std::endl;
return true;
}
unique_transition_map[tr_it->getNativeID()] = 0;
}
// Check that each peptide has only valid proteins
for (Size i = 0; i < getPeptides().size(); i++)
{
for (std::vector<String>::const_iterator prot_it = getPeptides()[i].protein_refs.begin(); prot_it != getPeptides()[i].protein_refs.end(); ++prot_it)
{
if (unique_protein_map.find(*prot_it) == unique_protein_map.end())
{
OPENMS_LOG_ERROR << "Protein " << *prot_it << " is not present in the provided data structure." << std::endl;
return true;
}
}
}
// Check that each peptide has only valid references to peptides and compounds
for (Size i = 0; i < getTransitions().size(); i++)
{
const ReactionMonitoringTransition& tr = getTransitions()[i];
if (!tr.getPeptideRef().empty())
{
if (unique_peptide_map.find(tr.getPeptideRef()) == unique_peptide_map.end())
{
OPENMS_LOG_ERROR << "Peptide " << tr.getPeptideRef() << " is not present in the provided data structure." << std::endl;
return true;
}
}
else if (!tr.getCompoundRef().empty())
{
if (unique_compounds_map.find(tr.getCompoundRef()) == unique_compounds_map.end())
{
OPENMS_LOG_ERROR << "Compound " << tr.getPeptideRef() << " is not present in the provided data structure." << std::endl;
return true;
}
}
else
{
// It seems that having no associated compound or peptide is valid as both attributes are optional.
OPENMS_LOG_WARN << "Transition " << tr.getNativeID() << " does not have a compound or peptide associated with it." << std::endl;
// return true;
}
}
return false;
}
void TargetedExperiment::createProteinReferenceMap_() const
{
for (Size i = 0; i < getProteins().size(); i++)
{
protein_reference_map_[getProteins()[i].id] = &getProteins()[i];
}
protein_reference_map_dirty_ = false;
}
void TargetedExperiment::createPeptideReferenceMap_() const
{
for (Size i = 0; i < getPeptides().size(); i++)
{
peptide_reference_map_[getPeptides()[i].id] = &getPeptides()[i];
}
peptide_reference_map_dirty_ = false;
}
void TargetedExperiment::createCompoundReferenceMap_() const
{
for (Size i = 0; i < getCompounds().size(); i++)
{
compound_reference_map_[getCompounds()[i].id] = &getCompounds()[i];
}
compound_reference_map_dirty_ = false;
}
bool formatCount(const size_t count, const size_t all, const String& name, StringList& sink)
{
if (count == 0) return false; // nothing to report... 0%....
sink.push_back(String(count * 100.0 / all, false) + "% (" + name + ")");
return true;
}
std::ostream& operator<<(std::ostream& os, const TargetedExperiment::SummaryStatistics& s)
{
using TYPE = ReactionMonitoringTransition::DecoyTransitionType;
auto count_copy = s.decoy_counts; // allow to default construct missing values with 0 counts
size_t all = count_copy[TYPE::DECOY] +
count_copy[TYPE::TARGET] +
count_copy[TYPE::UNKNOWN];
if (all == 0) all = 1; // avoid division by zero below
StringList counts;
formatCount(count_copy[TYPE::TARGET], all, "target", counts);
formatCount(count_copy[TYPE::DECOY], all, "decoy", counts);
formatCount(count_copy[TYPE::UNKNOWN], all, "unknown", counts);
os << "# Proteins: " << s.protein_count << '\n'
<< "# Peptides: " << s.peptide_count << '\n'
<< "# Compounds: " << s.compound_count << '\n'
<< "# Transitions: " << s.transition_count << '\n'
<< "Transition Type: " + ListUtils::concatenate(counts, ", ") + "\n"
<< "All internal references valid: " << (!s.contains_invalid_references ? "yes" : "no") << '\n';
return os;
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/QUANTITATION/IsobaricIsotopeCorrector.cpp | .cpp | 12,995 | 324 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Stephan Aiche, Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/QUANTITATION/IsobaricIsotopeCorrector.h>
#include <OpenMS/ANALYSIS/QUANTITATION/IsobaricQuantitationMethod.h>
#include <OpenMS/ANALYSIS/QUANTITATION/IsobaricQuantifierStatistics.h>
#include <OpenMS/KERNEL/ConsensusMap.h>
#include <OpenMS/CONCEPT/LogStream.h>
// NNLS isotope correction
#include <OpenMS/ML/NNLS/NonNegativeLeastSquaresSolver.h>
// Internal Eigen utilities
#include <OpenMS/DATASTRUCTURES/MatrixEigen.h>
#include <Eigen/LU>
// #define ISOBARIC_QUANT_DEBUG
namespace OpenMS
{
void
IsobaricIsotopeCorrector::correctIsotopicImpurities(std::vector<double>& intensities,
const IsobaricQuantitationMethod* quant_method)
{
std::vector<double> res(quant_method->getNumberOfChannels());
// we need to copy because NNLS will modify the input Matrix
Matrix<double> m = quant_method->getIsotopeCorrectionMatrix();
solveNNLS_(m, intensities, res);
intensities = res;
}
IsobaricQuantifierStatistics
IsobaricIsotopeCorrector::correctIsotopicImpurities(
const ConsensusMap& consensus_map_in, ConsensusMap& consensus_map_out,
const IsobaricQuantitationMethod* quant_method)
{
OPENMS_PRECONDITION(consensus_map_in.size() == consensus_map_out.size(),
"The in- and output map need to have the same size.")
// the stats object to fill while correcting
IsobaricQuantifierStatistics stats;
stats.number_ms2_total = consensus_map_out.size();
stats.channel_count = quant_method->getNumberOfChannels();
Matrix<double> correction_matrix = quant_method->getIsotopeCorrectionMatrix();
// Check if correction matrix is identity using eigenView
auto correction_view = eigenView(correction_matrix);
if (correction_view.isIdentity(0.0))
{
OPENMS_LOG_DEBUG << "Correction matrix is the identity matrix." << std::endl;
OPENMS_LOG_DEBUG << correction_matrix << std::endl;
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"IsobaricIsotopeCorrector: The given isotope correction matrix is an identity matrix leading to no correction. "
"Please provide a valid isotope_correction matrix as it was provided with the sample kit!");
}
Eigen::FullPivLU<Eigen::MatrixXd> ludecomp(correction_view);
std::vector<double> b(quant_method->getNumberOfChannels(), 0.0);
std::vector<double> x(quant_method->getNumberOfChannels(), 0);
if (!ludecomp.isInvertible())
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"IsobaricIsotopeCorrector: The given isotope correction matrix is not invertible!");
}
// data structures for NNLS
Matrix<double> m_b(quant_method->getNumberOfChannels(), 1);
Matrix<double> m_x(quant_method->getNumberOfChannels(), 1);
// correct all consensus elements
for (ConsensusMap::size_type i = 0; i < consensus_map_out.size(); ++i)
{
#ifdef ISOBARIC_QUANT_DEBUG
std::cout << "\nMAP element #### " << i << " #### \n" << std::endl;
#endif
// delete only the consensus handles from the output map
consensus_map_out[i].clear();
// fill b vector
fillInputVector_(b, m_b, consensus_map_in[i], consensus_map_in);
// solve using LU decomposition (naive solution, can be negative)
auto b_eigen = eigenVectorView(b);
Eigen::VectorXd e_mx = ludecomp.solve(b_eigen);
// Check solution validity
if (!(correction_view * e_mx).isApprox(b_eigen))
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"IsobaricIsotopeCorrector: Cannot multiply!");
}
// Solve using NNLS (ensures non-negative solution)
solveNNLS_(correction_matrix, m_b, m_x);
// Convert naive solution to std::vector for stats
std::vector<double> x_naive(e_mx.data(), e_mx.data() + e_mx.size());
// update the output consensus map with the corrected intensities
float cf_intensity = updateOutputMap_(consensus_map_in, consensus_map_out, i, m_x);
// check consistency
computeStats_(m_x, x_naive, cf_intensity, quant_method, stats);
}
return stats;
}
void
IsobaricIsotopeCorrector::fillInputVector_(std::vector<double>& b,
Matrix<double>& m_b,
const ConsensusFeature& cf,
const ConsensusMap& cm)
{
for (ConsensusFeature::HandleSetType::const_iterator it_elements = cf.getFeatures().begin();
it_elements != cf.getFeatures().end();
++it_elements)
{
//find channel_id of current element
Int index = Int(cm.getColumnHeaders().find(it_elements->getMapIndex())->second.getMetaValue("channel_id"));
#ifdef ISOBARIC_QUANT_DEBUG
std::cout << " map_index " << it_elements->getMapIndex() << "-> id " << index << " with intensity " << it_elements->getIntensity() << "\n" << std::endl;
#endif
b[index] = it_elements->getIntensity();
m_b(index, 0) = it_elements->getIntensity();
}
}
std::vector<double>
IsobaricIsotopeCorrector::getIntensities_(const IsobaricQuantitationMethod* quant_method,
const ConsensusFeature& cf,
const ConsensusMap& cm)
{
int first_map_index = cf.getFeatures().begin()->getMapIndex();
Int map_index_offset = Int(cm.getColumnHeaders().find(first_map_index)->second.getMetaValue("channel_id")) - first_map_index;
std::vector<double> res;
res.resize(quant_method->getNumberOfChannels());
Int index = 0;
for (ConsensusFeature::HandleSetType::const_iterator it_elements = cf.getFeatures().begin();
it_elements != cf.getFeatures().end();
++it_elements)
{
//find channel_id of current element
index = Int(it_elements->getMapIndex() - map_index_offset);
res[index] = it_elements->getIntensity();
}
return res;
}
void
IsobaricIsotopeCorrector::solveNNLS_(const Matrix<double>& correction_matrix,
const Matrix<double>& m_b, Matrix<double>& m_x)
{
Int status = NonNegativeLeastSquaresSolver::solve(correction_matrix, m_b, m_x);
if (status != NonNegativeLeastSquaresSolver::SOLVED)
{
throw Exception::FailedAPICall(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"IsobaricIsotopeCorrector: Failed to find least-squares fit!");
}
}
void
IsobaricIsotopeCorrector::solveNNLS_(Matrix<double>& correction_matrix,
std::vector<double>& b,
std::vector<double>& x)
{
Int status = NonNegativeLeastSquaresSolver::solve(correction_matrix, b, x);
if (status != NonNegativeLeastSquaresSolver::SOLVED)
{
throw Exception::FailedAPICall(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"IsobaricIsotopeCorrector: Failed to find least-squares fit!");
}
}
void
IsobaricIsotopeCorrector::computeStats_(const std::vector<double>& m_x,
const std::vector<double>& x_naive,
const float cf_intensity,
const IsobaricQuantitationMethod* quant_method,
IsobaricQuantifierStatistics& stats)
{
Size s_negative(0);
Size s_different_count(0); // happens when naive solution is negative in other channels
double s_different_intensity(0);
// ISOTOPE CORRECTION: compare solutions of Matrix inversion vs. NNLS
for (Size index = 0; index < quant_method->getNumberOfChannels(); ++index)
{
if (x_naive[index] < 0.0)
{
++s_negative;
}
else if ((((std::fabs(m_x[index] - x_naive[index]))/m_x[index])*100) > 1)
{
++s_different_count;
s_different_intensity += std::fabs(m_x[index] - x_naive[index]);
}
}
if (s_negative == 0 && s_different_count > 0) //some solutions are inconsistent, despite being positive
{
OPENMS_LOG_WARN << "IsobaricIsotopeCorrector: Isotope correction values of alternative method differ!" << std::endl;
}
// update global stats
stats.iso_number_reporter_negative += s_negative;
stats.iso_number_reporter_different += s_different_count;
stats.iso_solution_different_intensity += s_different_intensity;
if (s_negative > 0)
{
++stats.iso_number_ms2_negative;
stats.iso_total_intensity_negative += cf_intensity;
}
}
void
IsobaricIsotopeCorrector::computeStats_(const Matrix<double>& m_x,
const std::vector<double>& x_naive,
const float cf_intensity,
const IsobaricQuantitationMethod* quant_method,
IsobaricQuantifierStatistics& stats)
{
Size s_negative(0);
Size s_different_count(0); // happens when naive solution is negative in other channels
double s_different_intensity(0);
// ISOTOPE CORRECTION: compare solutions of Matrix inversion vs. NNLS
for (Size index = 0; index < quant_method->getNumberOfChannels(); ++index)
{
if (x_naive[index] < 0.0)
{
++s_negative;
}
else if ((((std::fabs(m_x(index,0) - x_naive[index]))/m_x(index,0))*100) > 1)
{
++s_different_count;
s_different_intensity += std::fabs(m_x(index,0) - x_naive[index]);
}
}
if (s_negative == 0 && s_different_count > 0) //some solutions are inconsistent, despite being positive
{
OPENMS_LOG_WARN << "IsobaricIsotopeCorrector: Isotope correction values of alternative method differ!" << std::endl;
}
// update global stats
stats.iso_number_reporter_negative += s_negative;
stats.iso_number_reporter_different += s_different_count;
stats.iso_solution_different_intensity += s_different_intensity;
if (s_negative > 0)
{
++stats.iso_number_ms2_negative;
stats.iso_total_intensity_negative += cf_intensity;
}
}
float
IsobaricIsotopeCorrector::updateOutputMap_(
const ConsensusMap& consensus_map_in, ConsensusMap& consensus_map_out,
ConsensusMap::size_type current_cf, const std::vector<double>& m_x)
{
float cf_intensity(0);
for (ConsensusFeature::HandleSetType::const_iterator it_elements = consensus_map_in[current_cf].begin();
it_elements != consensus_map_in[current_cf].end();
++it_elements)
{
FeatureHandle handle = *it_elements;
//find channel_id of current element
Int index = Int(consensus_map_out.getColumnHeaders()[it_elements->getMapIndex()].getMetaValue("channel_id"));
handle.setIntensity(float(m_x[index]));
consensus_map_out[current_cf].insert(handle);
cf_intensity += handle.getIntensity(); // sum up all channels for CF
#ifdef ISOBARIC_QUANT_DEBUG
std::cout << it_elements->getIntensity() << " -> " << handle.getIntensity() << std::endl;
#endif
}
consensus_map_out[current_cf].setIntensity(cf_intensity); // set overall intensity of CF (sum of all channels)
return cf_intensity;
}
float
IsobaricIsotopeCorrector::updateOutputMap_(
const ConsensusMap& consensus_map_in, ConsensusMap& consensus_map_out,
ConsensusMap::size_type current_cf, const Matrix<double>& m_x)
{
float cf_intensity(0);
for (ConsensusFeature::HandleSetType::const_iterator it_elements = consensus_map_in[current_cf].begin();
it_elements != consensus_map_in[current_cf].end();
++it_elements)
{
FeatureHandle handle = *it_elements;
//find channel_id of current element
Int index = Int(consensus_map_out.getColumnHeaders()[it_elements->getMapIndex()].getMetaValue("channel_id"));
handle.setIntensity(float(m_x(index,0)));
consensus_map_out[current_cf].insert(handle);
cf_intensity += handle.getIntensity(); // sum up all channels for CF
#ifdef ISOBARIC_QUANT_DEBUG
std::cout << it_elements->getIntensity() << " -> " << handle.getIntensity() << std::endl;
#endif
}
consensus_map_out[current_cf].setIntensity(cf_intensity); // set overall intensity of CF (sum of all channels)
return cf_intensity;
}
} // namespace
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/QUANTITATION/TMTTenPlexQuantitationMethod.cpp | .cpp | 7,611 | 144 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Stephan Aiche $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/QUANTITATION/TMTTenPlexQuantitationMethod.h>
#include <OpenMS/DATASTRUCTURES/Matrix.h>
#include <OpenMS/DATASTRUCTURES/ListUtils.h>
namespace OpenMS
{
const String TMTTenPlexQuantitationMethod::name_ = "tmt10plex";
const std::vector<std::string> TMTTenPlexQuantitationMethod::channel_names_ = {"126","127N","127C","128N","128C","129N","129C","130N","130C","131"};
TMTTenPlexQuantitationMethod::TMTTenPlexQuantitationMethod()
{
setName("TMTTenPlexQuantitationMethod");
// // mass map outline - for further details please see #2427
// "126", 126.127726, x, x, 127C, 128C
// "127N", 127.124761, x, x, 128N, 129N
// "127C", 127.131081, x, 126, 128C, 129C
// "128N", 128.128116, x, 127N, 129N, 130N
// "128C", 128.134436, 126, 127C, 129C, 130C
// "129N", 129.131471, 127N, 128N, 130N, 131
// "129C", 129.137790, 127C, 128C, 130C, x
// "130N", 130.134825, 128N, 129N, 131, x
// "130C", 130.141145, 128C, 129C, x, x
// "131", 131.138180, 129N, 130N, x, x
// create the channel map //-2 -1 +1 +2
channels_.push_back(IsobaricChannelInformation("126", 0, "", 126.127726, {-1, -1, 2, 4}));
channels_.push_back(IsobaricChannelInformation("127N", 1, "", 127.124761, {-1, -1, 3, 5}));
channels_.push_back(IsobaricChannelInformation("127C", 2, "", 127.131081, {-1, 0, 4, 6}));
channels_.push_back(IsobaricChannelInformation("128N", 3, "", 128.128116, {-1, 1, 5, 7}));
channels_.push_back(IsobaricChannelInformation("128C", 4, "", 128.134436, {0, 2, 6, 8}));
channels_.push_back(IsobaricChannelInformation("129N", 5, "", 129.131471, {1, 3, 7, 9}));
channels_.push_back(IsobaricChannelInformation("129C", 6, "", 129.137790, {2, 4, 8, -1}));
channels_.push_back(IsobaricChannelInformation("130N", 7, "", 130.134825, {3, 5, 9, -1}));
channels_.push_back(IsobaricChannelInformation("130C", 8, "", 130.141145, {4, 6, -1, -1}));
channels_.push_back(IsobaricChannelInformation("131", 9, "", 131.138180, {5, 7, -1, -1}));
// we assume 126 to be the reference
reference_channel_ = 0;
setDefaultParams_();
}
void TMTTenPlexQuantitationMethod::setDefaultParams_()
{
defaults_.setValue("channel_126_description", "", "Description for the content of the 126 channel.");
defaults_.setValue("channel_127N_description", "", "Description for the content of the 127N channel.");
defaults_.setValue("channel_127C_description", "", "Description for the content of the 127C channel.");
defaults_.setValue("channel_128N_description", "", "Description for the content of the 128N channel.");
defaults_.setValue("channel_128C_description", "", "Description for the content of the 128C channel.");
defaults_.setValue("channel_129N_description", "", "Description for the content of the 129N channel.");
defaults_.setValue("channel_129C_description", "", "Description for the content of the 129C channel.");
defaults_.setValue("channel_130N_description", "", "Description for the content of the 130N channel.");
defaults_.setValue("channel_130C_description", "", "Description for the content of the 130C channel.");
defaults_.setValue("channel_131_description", "", "Description for the content of the 131 channel.");
defaults_.setValue("reference_channel", "126", "The reference channel (126, 127N, 127C, 128N, 128C, 129N, 129C, 130N, 130C, 131).");
defaults_.setValidStrings("reference_channel", TMTTenPlexQuantitationMethod::channel_names_);
defaults_.setValue("correction_matrix", std::vector<std::string>{"0.0/0.0/5.09/0.0",
"0.0/0.25/5.27/0.0",
"0.0/0.37/5.36/0.15",
"0.0/0.65/4.17/0.1",
"0.08/0.49/3.06/0.0",
"0.01/0.71/3.07/0.0",
"0.0/1.32/2.62/0.0",
"0.02/1.28/2.75/2.53",
"0.03/2.08/2.23/0.0",
"0.08/1.99/1.65/0.0"},
"Correction matrix for isotope distributions (see documentation); use the following format: <-2Da>/<-1Da>/<+1Da>/<+2Da>; e.g. '0/0.3/4/0', '0.1/0.3/3/0.2'");
defaultsToParam_();
}
void TMTTenPlexQuantitationMethod::updateMembers_()
{
channels_[0].description = param_.getValue("channel_126_description").toString();
channels_[1].description = param_.getValue("channel_127N_description").toString();
channels_[2].description = param_.getValue("channel_127C_description").toString();
channels_[3].description = param_.getValue("channel_128N_description").toString();
channels_[4].description = param_.getValue("channel_128C_description").toString();
channels_[5].description = param_.getValue("channel_129N_description").toString();
channels_[6].description = param_.getValue("channel_129C_description").toString();
channels_[7].description = param_.getValue("channel_130N_description").toString();
channels_[8].description = param_.getValue("channel_130C_description").toString();
channels_[9].description = param_.getValue("channel_131_description").toString();
// compute the index of the reference channel
std::vector<std::string>::const_iterator t_it = std::find(TMTTenPlexQuantitationMethod::channel_names_.begin(),
TMTTenPlexQuantitationMethod::channel_names_.end(),
param_.getValue("reference_channel"));
reference_channel_ = t_it - TMTTenPlexQuantitationMethod::channel_names_.begin();
}
TMTTenPlexQuantitationMethod::TMTTenPlexQuantitationMethod(const TMTTenPlexQuantitationMethod& other):
IsobaricQuantitationMethod(other)
{
channels_.clear();
channels_.insert(channels_.begin(), other.channels_.begin(), other.channels_.end());
reference_channel_ = other.reference_channel_;
}
TMTTenPlexQuantitationMethod& TMTTenPlexQuantitationMethod::operator=(const TMTTenPlexQuantitationMethod& rhs)
= default;
const String& TMTTenPlexQuantitationMethod::getMethodName() const
{
return TMTTenPlexQuantitationMethod::name_;
}
const IsobaricQuantitationMethod::IsobaricChannelList& TMTTenPlexQuantitationMethod::getChannelInformation() const
{
return channels_;
}
Size TMTTenPlexQuantitationMethod::getNumberOfChannels() const
{
return 10;
}
Matrix<double> TMTTenPlexQuantitationMethod::getIsotopeCorrectionMatrix() const
{
StringList iso_correction = ListUtils::toStringList<std::string>(getParameters().getValue("correction_matrix"));
return stringListToIsotopeCorrectionMatrix_(iso_correction);
}
Size TMTTenPlexQuantitationMethod::getReferenceChannel() const
{
return reference_channel_;
}
} // namespace
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/QUANTITATION/TMTEighteenPlexQuantitationMethod.cpp | .cpp | 11,120 | 172 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Stephan Aiche, Radu Suciu $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/QUANTITATION/TMTEighteenPlexQuantitationMethod.h>
#include <OpenMS/DATASTRUCTURES/ListUtils.h>
#include <OpenMS/DATASTRUCTURES/Matrix.h>
#include <algorithm>
namespace OpenMS
{
const String TMTEighteenPlexQuantitationMethod::name_ = "tmt18plex";
const std::vector<std::string> TMTEighteenPlexQuantitationMethod::channel_names_ = {"126","127N","127C","128N","128C","129N","129C","130N","130C","131N","131C","132N","132C","133N","133C","134N","134C","135N"};
TMTEighteenPlexQuantitationMethod::TMTEighteenPlexQuantitationMethod()
{
setName("TMTEighteenPlexQuantitationMethod");
// create the channel map
channels_.push_back(IsobaricChannelInformation("126", 0, "", 126.127726, {-1, -1, -1, -1, 1, 2, 3, 4}));
channels_.push_back(IsobaricChannelInformation("127N", 1, "", 127.124761, {-1, -1, -1, 0, -1, 3, -1, 5}));
channels_.push_back(IsobaricChannelInformation("127C", 2, "", 127.131081, {-1, -1, 0, -1, 3, 4, 5, 6}));
channels_.push_back(IsobaricChannelInformation("128N", 3, "", 128.128116, {-1, 0, 1, 2, -1, 5, -1, 7}));
channels_.push_back(IsobaricChannelInformation("128C", 4, "", 128.134436, { 0, -1, 2, -1, 5, 6, 7, 8}));
channels_.push_back(IsobaricChannelInformation("129N", 5, "", 129.131471, { 1, 2, 3, 4, -1, 7, -1, 9}));
channels_.push_back(IsobaricChannelInformation("129C", 6, "", 129.137790, { 2, -1, 4, -1, 7, 8, 9, 10}));
channels_.push_back(IsobaricChannelInformation("130N", 7, "", 130.134825, { 3, 4, 5, 6, -1, 9, -1, 11}));
channels_.push_back(IsobaricChannelInformation("130C", 8, "", 130.141145, { 4, -1, 6, -1, 9, 10, 11, 12}));
channels_.push_back(IsobaricChannelInformation("131N", 9, "", 131.138180, { 5, 6, 7, 8, -1, 11, -1, 13}));
channels_.push_back(IsobaricChannelInformation("131C", 10, "", 131.144500, { 6, -1, 8, -1, 11, 12, 13, 14}));
channels_.push_back(IsobaricChannelInformation("132N", 11, "", 132.141535, { 7, 8, 9, 10, -1, 13, -1, 15}));
channels_.push_back(IsobaricChannelInformation("132C", 12, "", 132.147855, { 8, -1, 10, -1, 13, 14, 15, 16}));
channels_.push_back(IsobaricChannelInformation("133N", 13, "", 133.144890, { 9, 10, 11, 12, -1, 15, -1, 17}));
channels_.push_back(IsobaricChannelInformation("133C", 14, "", 133.151210, {10, -1, 12, -1, 15, 16, 17, -1}));
channels_.push_back(IsobaricChannelInformation("134N", 15, "", 134.148245, {11, 12, 13, 14, -1, 17, -1, -1}));
channels_.push_back(IsobaricChannelInformation("134C", 16, "", 134.154565, {12, -1, 14, -1, 17, -1, -1, -1}));
channels_.push_back(IsobaricChannelInformation("135N", 17, "", 135.151600, {13, 14, 15, 16, -1, -1, -1, -1}));
// we assume 126 to be the reference
reference_channel_ = 0;
setDefaultParams_();
}
void TMTEighteenPlexQuantitationMethod::setDefaultParams_()
{
defaults_.setValue("channel_126_description", "", "Description for the content of the 126 channel.");
defaults_.setValue("channel_127N_description", "", "Description for the content of the 127N channel.");
defaults_.setValue("channel_127C_description", "", "Description for the content of the 127C channel.");
defaults_.setValue("channel_128N_description", "", "Description for the content of the 128N channel.");
defaults_.setValue("channel_128C_description", "", "Description for the content of the 128C channel.");
defaults_.setValue("channel_129N_description", "", "Description for the content of the 129N channel.");
defaults_.setValue("channel_129C_description", "", "Description for the content of the 129C channel.");
defaults_.setValue("channel_130N_description", "", "Description for the content of the 130N channel.");
defaults_.setValue("channel_130C_description", "", "Description for the content of the 130C channel.");
defaults_.setValue("channel_131N_description", "", "Description for the content of the 131N channel.");
defaults_.setValue("channel_131C_description", "", "Description for the content of the 131C channel.");
defaults_.setValue("channel_132N_description", "", "Description for the content of the 132N channel.");
defaults_.setValue("channel_132C_description", "", "Description for the content of the 132C channel.");
defaults_.setValue("channel_133N_description", "", "Description for the content of the 133N channel.");
defaults_.setValue("channel_133C_description", "", "Description for the content of the 133C channel.");
defaults_.setValue("channel_134N_description", "", "Description for the content of the 134N channel.");
defaults_.setValue("channel_134C_description", "", "Description for the content of the 134C channel.");
defaults_.setValue("channel_135N_description", "", "Description for the content of the 135N channel.");
defaults_.setValue("reference_channel", "126", "The reference channel (126, 127N, 127C, 128N, 128C, 129N, 129C, 130N, 130C, 131N, 131C, 132N, 132C, 133N, 133C, 134N, 134C, 135N).");
defaults_.setValidStrings("reference_channel", TMTEighteenPlexQuantitationMethod::channel_names_);
// TODO: verify these
defaults_.setValue("correction_matrix", std::vector<std::string>{
"NA/NA /NA/NA /0.31/9.09 /0.02/0.32",
"NA/NA /NA/0.78 /NA/9.41 /NA/0.33",
"NA/NA /0.93/NA /0.35/8.63 /0.01/0.27",
"NA/0.00 /0.82/0.65 /NA/8.13 /NA/0.26",
"0.00/NA /1.47/NA /0.34/6.91 /0.00/0.15",
"0.00/0.00 /1.46/1.28 /NA/6.86 /NA/0.15",
"0.13/NA /2.59/NA /0.32/6.07 /0.1/0.09",
"0.13/0.00 /2.41/0.27 /NA/5.58 /NA/0.10",
"0.04/NA /3.10/NA /0.42/4.82 /0.02/0.06",
"0.03/0.00 /2.78/0.63 /NA/4.57 /NA/0.12",
"0.08/NA /3.90/NA /0.47/3.57 /0.00/0.04",
"0.15/0.01 /3.58/0.72 /NA/1.80 /NA/0.00",
"0.11/NA /4.55/NA /0.43/1.86 /0.00/0.00",
"0.07/0.01 /3.14/0.73 /NA/3.40 /NA/0.03",
"0.22/NA /4.96/NA /0.34/1.03 /0.00/NA",
"0.30/0.03 /5.49/0.62 /NA/1.14 /NA/NA",
"0.14/NA /5.81/NA /0.31/NA /NA/NA",
"0.19/0.02 /5.42/0.36 /NA/NA /NA/NA"
},
"Correction matrix for isotope distributions in percent from the Thermo data sheet (see documentation);"
" Please provide 18 entries (rows), separated by comma, where each entry contains 8 values in the following format: <-2C13>/<-N15-C13>/<-C13>/<-N15>/<+N15>/<+C13>/<+N15+C13>/<+2C13> e.g. one row may look like this: 'NA/0.00 / 0.82/0.65 / NA/8.13 / NA/0.26'. You may use whitespaces at your leisure to ease reading.");
defaultsToParam_();
}
void TMTEighteenPlexQuantitationMethod::updateMembers_()
{
channels_[0].description = param_.getValue("channel_126_description").toString();
channels_[1].description = param_.getValue("channel_127N_description").toString();
channels_[2].description = param_.getValue("channel_127C_description").toString();
channels_[3].description = param_.getValue("channel_128N_description").toString();
channels_[4].description = param_.getValue("channel_128C_description").toString();
channels_[5].description = param_.getValue("channel_129N_description").toString();
channels_[6].description = param_.getValue("channel_129C_description").toString();
channels_[7].description = param_.getValue("channel_130N_description").toString();
channels_[8].description = param_.getValue("channel_130C_description").toString();
channels_[9].description = param_.getValue("channel_131N_description").toString();
channels_[10].description = param_.getValue("channel_131C_description").toString();
channels_[11].description = param_.getValue("channel_132N_description").toString();
channels_[12].description = param_.getValue("channel_132C_description").toString();
channels_[13].description = param_.getValue("channel_133N_description").toString();
channels_[14].description = param_.getValue("channel_133C_description").toString();
channels_[15].description = param_.getValue("channel_134N_description").toString();
channels_[16].description = param_.getValue("channel_134C_description").toString();
channels_[17].description = param_.getValue("channel_135N_description").toString();
// compute the index of the reference channel
std::vector<std::string>::const_iterator t_it = std::find(TMTEighteenPlexQuantitationMethod::channel_names_.begin(),
TMTEighteenPlexQuantitationMethod::channel_names_.end(),
param_.getValue("reference_channel"));
reference_channel_ = t_it - TMTEighteenPlexQuantitationMethod::channel_names_.begin();
}
TMTEighteenPlexQuantitationMethod::TMTEighteenPlexQuantitationMethod(const TMTEighteenPlexQuantitationMethod& other):
IsobaricQuantitationMethod(other)
{
channels_.clear();
channels_.insert(channels_.begin(), other.channels_.begin(), other.channels_.end());
reference_channel_ = other.reference_channel_;
}
TMTEighteenPlexQuantitationMethod& TMTEighteenPlexQuantitationMethod::operator=(const TMTEighteenPlexQuantitationMethod& rhs)
= default;
const String& TMTEighteenPlexQuantitationMethod::getMethodName() const
{
return TMTEighteenPlexQuantitationMethod::name_;
}
const IsobaricQuantitationMethod::IsobaricChannelList& TMTEighteenPlexQuantitationMethod::getChannelInformation() const
{
return channels_;
}
Size TMTEighteenPlexQuantitationMethod::getNumberOfChannels() const
{
return 18;
}
Matrix<double> TMTEighteenPlexQuantitationMethod::getIsotopeCorrectionMatrix() const
{
StringList iso_correction = ListUtils::toStringList<std::string>(getParameters().getValue("correction_matrix"));
return stringListToIsotopeCorrectionMatrix_(iso_correction);
}
Size TMTEighteenPlexQuantitationMethod::getReferenceChannel() const
{
return reference_channel_;
}
} // namespace
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/QUANTITATION/AbsoluteQuantitation.cpp | .cpp | 28,052 | 682 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Douglas McCloskey $
// $Authors: Douglas McCloskey, Pasquale Domenico Colaianni $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/QUANTITATION/AbsoluteQuantitation.h>
//Kernal classes
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/KERNEL/MSChromatogram.h>
#include <OpenMS/KERNEL/FeatureMap.h>
#include <OpenMS/KERNEL/MRMTransitionGroup.h>
#include <OpenMS/KERNEL/MRMFeature.h>
#include <OpenMS/CONCEPT/LogStream.h>
//OpenSWATH classes
#include <OpenMS/ANALYSIS/OPENSWATH/MRMRTNormalizer.h>
//Analysis classes
#include <OpenMS/ANALYSIS/MAPMATCHING/TransformationModel.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/TransformationDescription.h>
#include <OpenMS/ANALYSIS/TARGETED/TargetedExperiment.h>
//Quantitation classes
#include <OpenMS/METADATA/AbsoluteQuantitationStandards.h>
#include <OpenMS/ANALYSIS/QUANTITATION/AbsoluteQuantitationMethod.h>
//Math classes
#include <OpenMS/MATH/StatisticFunctions.h>
//Standard library
#include <cstddef> // for size_t & ptrdiff_t
#include <vector>
#include <cmath>
#include <numeric>
#include <boost/math/special_functions/erf.hpp>
#include <algorithm>
namespace OpenMS
{
AbsoluteQuantitation::AbsoluteQuantitation() :
DefaultParamHandler("AbsoluteQuantitation")
{
defaults_.setValue("min_points", 4, "The minimum number of calibrator points.");
defaults_.setValue("max_bias", 30.0, "The maximum percent bias of any point in the calibration curve.");
defaults_.setValue("min_correlation_coefficient", 0.9, "The minimum correlation coefficient value of the calibration curve.");
defaults_.setValue("max_iters", 100, "The maximum number of iterations to find an optimal set of calibration curve points and parameters.");
defaults_.setValue("outlier_detection_method", "iter_jackknife", "Outlier detection method to find and remove bad calibration points.");
defaults_.setValidStrings("outlier_detection_method", {"iter_jackknife","iter_residual"});
defaults_.setValue("use_chauvenet", "true", "Whether to only remove outliers that fulfill Chauvenet's criterion for outliers (otherwise it will remove any outlier candidate regardless of the criterion).");
defaults_.setValidStrings("use_chauvenet", {"true","false"});
defaults_.setValue("optimization_method", "iterative", "Calibrator optimization method to find the best set of calibration points for each method.");
defaults_.setValidStrings("optimization_method", {"iterative"});
// write defaults into Param object param_
defaultsToParam_();
updateMembers_();
}
void AbsoluteQuantitation::updateMembers_()
{
min_points_ = (size_t)param_.getValue("min_points");
max_bias_ = (double)param_.getValue("max_bias");
min_correlation_coefficient_ = (double)param_.getValue("min_correlation_coefficient");
max_iters_ = (size_t)param_.getValue("max_iters");
outlier_detection_method_ = param_.getValue("outlier_detection_method").toString();
use_chauvenet_ = (bool)param_.getValue("use_chauvenet").toBool();
optimization_method_ = param_.getValue("optimization_method").toString();
}
AbsoluteQuantitation::~AbsoluteQuantitation() = default;
void AbsoluteQuantitation::setQuantMethods(std::vector<AbsoluteQuantitationMethod>& quant_methods)
{
quant_methods_.clear();
for (size_t i = 0; i < quant_methods.size(); i++)
{
String component_name = quant_methods[i].getComponentName();
quant_methods_[component_name] = quant_methods[i];
}
}
std::vector<AbsoluteQuantitationMethod> AbsoluteQuantitation::getQuantMethods()
{
std::vector<AbsoluteQuantitationMethod> quant_methods;
for (auto const& quant_method : quant_methods_)
{
quant_methods.push_back(quant_method.second);
}
return quant_methods;
}
std::map<String, AbsoluteQuantitationMethod> AbsoluteQuantitation::getQuantMethodsAsMap()
{
return quant_methods_;
}
double AbsoluteQuantitation::calculateRatio(const Feature & component_1, const Feature & component_2, const String & feature_name)
{
double ratio = 0.0;
// member feature_name access
if (feature_name == "intensity")
{
if (component_1.metaValueExists("native_id") && component_2.metaValueExists("native_id"))
{
const double feature_1 = component_1.getIntensity();
const double feature_2 = component_2.getIntensity();
ratio = feature_1 / feature_2;
}
else if (component_1.metaValueExists("native_id"))
{
OPENMS_LOG_DEBUG << "Warning: no IS found for component " << component_1.getMetaValue("native_id") << ".";
const double feature_1 = component_1.getIntensity();
ratio = feature_1;
}
}
// metaValue feature_name access
else
{
if (component_1.metaValueExists(feature_name) && component_2.metaValueExists(feature_name))
{
const double feature_1 = component_1.getMetaValue(feature_name);
const double feature_2 = component_2.getMetaValue(feature_name);
ratio = feature_1/feature_2;
}
else if (component_1.metaValueExists(feature_name))
{
OPENMS_LOG_DEBUG << "Warning: no IS found for component " << component_1.getMetaValue("native_id") << ".";
const double feature_1 = component_1.getMetaValue(feature_name);
ratio = feature_1;
}
else
{
OPENMS_LOG_DEBUG << "Feature metaValue " << feature_name << " not found for components " << component_1.getMetaValue("native_id") << " and " << component_2.getMetaValue("native_id") << ".";
}
}
return ratio;
}
double AbsoluteQuantitation::calculateBias(const double & actual_concentration, const double & calculated_concentration)
{
double bias = fabs(actual_concentration - calculated_concentration)/actual_concentration*100;
return bias;
}
Param AbsoluteQuantitation::fitCalibration(
const std::vector<AbsoluteQuantitationStandards::featureConcentration> & component_concentrations,
const String & feature_name,
const String & transformation_model,
const Param & transformation_model_params)
{
// extract out the calibration points
TransformationModel::DataPoints data;
TransformationModel::DataPoint point;
for (size_t i = 0; i < component_concentrations.size(); i++)
{
point.first = component_concentrations[i].actual_concentration / component_concentrations[i].IS_actual_concentration / component_concentrations[i].dilution_factor; // adjust based on the dilution factor
double ratio = calculateRatio(component_concentrations[i].feature, component_concentrations[i].IS_feature,feature_name);
point.second = ratio;
data.push_back(point);
}
// fit the data to the model
TransformationDescription tmd(data);
// tmd.setDataPoints(data);
tmd.fitModel(transformation_model, transformation_model_params);
Param params = tmd.getModelParameters();
// AbsoluteQuantitationMethod aqm;
// Param params = aqm.fitTransformationModel(transformation_model, data, transformation_model_params);
// store the information about the fit
return params;
}
void AbsoluteQuantitation::calculateBiasAndR(
const std::vector<AbsoluteQuantitationStandards::featureConcentration> & component_concentrations,
const String & feature_name,
const String & transformation_model,
const Param & transformation_model_params,
std::vector<double> & biases,
double & correlation_coefficient)
{
// reset biases
biases.clear();
// extract out the calibration points
std::vector<double> concentration_ratios, feature_amounts_ratios;
TransformationModel::DataPoints data;
TransformationModel::DataPoint point;
for (size_t i = 0; i < component_concentrations.size(); ++i)
{
// calculate the actual and calculated concentration ratios
double calculated_concentration_ratio = applyCalibration(component_concentrations[i].feature,
component_concentrations[i].IS_feature,
feature_name,
transformation_model,
transformation_model_params);
double actual_concentration_ratio = component_concentrations[i].actual_concentration/
component_concentrations[i].IS_actual_concentration / component_concentrations[i].dilution_factor;
concentration_ratios.push_back(component_concentrations[i].actual_concentration);
// extract out the feature amount ratios
double feature_amount_ratio = calculateRatio(component_concentrations[i].feature,
component_concentrations[i].IS_feature,
feature_name);
feature_amounts_ratios.push_back(feature_amount_ratio);
// calculate the bias
double bias = calculateBias(actual_concentration_ratio, calculated_concentration_ratio);
biases.push_back(bias);
point.first = actual_concentration_ratio;
point.second = feature_amount_ratio;
data.push_back(point);
}
// apply weighting to the feature amounts and actual concentration ratios
TransformationModel tm(data, transformation_model_params);
tm.weightData(data);
std::vector<double> concentration_ratios_weighted, feature_amounts_ratios_weighted;
for (size_t i = 0; i < data.size(); ++i)
{
concentration_ratios_weighted.push_back(data[i].first);
feature_amounts_ratios_weighted.push_back(data[i].second);
}
// calculate the R2 (R2 = Pearson_R^2)
correlation_coefficient = Math::pearsonCorrelationCoefficient(
concentration_ratios_weighted.begin(), concentration_ratios_weighted.begin() + concentration_ratios_weighted.size(),
feature_amounts_ratios_weighted.begin(), feature_amounts_ratios_weighted.begin() + feature_amounts_ratios_weighted.size()
);
}
double AbsoluteQuantitation::applyCalibration(const Feature & component,
const Feature & IS_component,
const String & feature_name,
const String & transformation_model,
const Param & transformation_model_params)
{
// calculate the ratio
double ratio = calculateRatio(component, IS_component, feature_name);
// calculate the absolute concentration
TransformationModel::DataPoints data;
TransformationDescription tmd(data);
// tmd.setDataPoints(data);
tmd.fitModel(transformation_model, transformation_model_params);
tmd.invert();
double calculated_concentration = tmd.apply(ratio);
// AbsoluteQuantitationMethod aqm;
// double calculated_concentration = aqm.evaluateTransformationModel(
// transformation_model, ratio, transformation_model_params);
// check for less than zero
if (calculated_concentration < 0.0)
{
calculated_concentration = 0.0;
}
return calculated_concentration;
}
void AbsoluteQuantitation::quantifyComponents(FeatureMap& unknowns)
{
//Potential Optimizations: create a map for each unknown FeatureMap
// to reduce multiple loops
// initialize all other variables
Feature empty_feature;
size_t IS_component_it(0), IS_component_group_it(0);
// // iterate through the unknowns
// for (size_t i = 0; i < unknowns.size(); i++)
// {
// iterate through each component_group/feature
for (size_t feature_it = 0; feature_it < unknowns.size(); ++feature_it)
{
String component_group_name = (String)unknowns[feature_it].getMetaValue("PeptideRef");
Feature unknowns_quant_feature;
// iterate through each component/sub-feature
for (size_t sub_it = 0; sub_it < unknowns[feature_it].getSubordinates().size(); ++sub_it)
{
String component_name = (String)unknowns[feature_it].getSubordinates()[sub_it].getMetaValue("native_id");
// apply the calibration curve to components that are in the quant_method
if (quant_methods_.count(component_name)>0)
{
double calculated_concentration = 0.0;
std::map<String,AbsoluteQuantitationMethod>::iterator quant_methods_it = quant_methods_.find(component_name);
String quant_component_name = quant_methods_it->second.getComponentName();
String quant_IS_component_name = quant_methods_it->second.getISName();
String quant_feature_name = quant_methods_it->second.getFeatureName();
if (!quant_IS_component_name.empty())
{
// look up the internal standard for the component
bool IS_found = false;
// Optimization: 90% of the IS will be in the same component_group/feature
for (size_t is_sub_it = 0; is_sub_it < unknowns[feature_it].getSubordinates().size(); ++is_sub_it)
{
String IS_component_name = (String)unknowns[feature_it].getSubordinates()[is_sub_it].getMetaValue("native_id");
if (quant_IS_component_name == IS_component_name)
{
IS_found = true;
IS_component_group_it = feature_it;
IS_component_it = is_sub_it;
break;
}
}
if (!IS_found)
{// expand IS search to all components
// iterate through each component_group/feature
for (size_t is_feature_it = 0; is_feature_it < unknowns.size(); ++is_feature_it)
{
//iterate through each component/sub-feature
for (size_t is_sub_it = 0; is_sub_it < unknowns[is_feature_it].getSubordinates().size(); ++is_sub_it)
{
String IS_component_name = (String)unknowns[is_feature_it].getSubordinates()[is_sub_it].getMetaValue("native_id");
if (quant_IS_component_name == IS_component_name)
{
IS_found = true;
IS_component_group_it = is_feature_it;
IS_component_it = is_sub_it;
break;
}
}
if (IS_found)
{
break;
}
}
}
if (IS_found)
{
String transformation_model = quant_methods_it->second.getTransformationModel();
Param transformation_model_params = quant_methods_it->second.getTransformationModelParams();
calculated_concentration = applyCalibration(
unknowns[feature_it].getSubordinates()[sub_it],
unknowns[IS_component_group_it].getSubordinates()[IS_component_it],
quant_feature_name,transformation_model,transformation_model_params);
}
else
{
OPENMS_LOG_INFO << "Component " << component_name << " IS " << quant_IS_component_name << " was not found.";
OPENMS_LOG_INFO << "No concentration will be calculated.\n";
}
}
else
{
String transformation_model = quant_methods_it->second.getTransformationModel();
Param transformation_model_params = quant_methods_it->second.getTransformationModelParams();
calculated_concentration = applyCalibration(
unknowns[feature_it].getSubordinates()[sub_it],
empty_feature,
quant_feature_name,transformation_model,transformation_model_params);
}
// add new metadata (calculated_concentration, concentration_units) to the component
unknowns[feature_it].getSubordinates()[sub_it].setMetaValue("calculated_concentration",calculated_concentration);
String concentration_units = quant_methods_it->second.getConcentrationUnits();
unknowns[feature_it].getSubordinates()[sub_it].setMetaValue("concentration_units",concentration_units);
// calculate the bias?
}
else
{
OPENMS_LOG_INFO << "Component " << component_name << " does not have a quantitation method.";
OPENMS_LOG_INFO << "No concentration will be calculated.\n";
unknowns[feature_it].getSubordinates()[sub_it].setMetaValue("calculated_concentration","");
unknowns[feature_it].getSubordinates()[sub_it].setMetaValue("concentration_units","");
}
}
}
// }
}
bool AbsoluteQuantitation::optimizeCalibrationCurveIterative(
std::vector<AbsoluteQuantitationStandards::featureConcentration> & component_concentrations,
const String & feature_name,
const String & transformation_model,
const Param & transformation_model_params,
Param & optimized_params)
{
// sort from min to max concentration
std::vector<AbsoluteQuantitationStandards::featureConcentration> component_concentrations_sorted = component_concentrations;
std::sort(component_concentrations_sorted.begin(), component_concentrations_sorted.end(),
[](const AbsoluteQuantitationStandards::featureConcentration& lhs, const AbsoluteQuantitationStandards::featureConcentration& rhs)
{
return lhs.actual_concentration < rhs.actual_concentration; //ascending order
}
);
// indices of component_concentrations
std::vector<size_t> component_concentrations_sorted_indices;// loop from all points to min_points
for (size_t index = 0; index < component_concentrations_sorted.size(); ++index)
{
component_concentrations_sorted_indices.push_back(index);
}
// starting parameters
optimized_params = transformation_model_params;
// for (size_t n_iters = 0; n_iters < max_iters_; ++n_iters)
for (size_t n_iters = 0; n_iters < component_concentrations_sorted.size(); ++n_iters)
{
// extract out components
const std::vector<AbsoluteQuantitationStandards::featureConcentration> component_concentrations_sub = extractComponents_(
component_concentrations_sorted, component_concentrations_sorted_indices);
// check if the min number of calibration points has been broken
if (component_concentrations_sorted_indices.size() < min_points_)
{
OPENMS_LOG_INFO << "No optimal calibration found for " << component_concentrations_sub[0].feature.getMetaValue("native_id") << " .";
return false; //no optimal calibration found
}
// fit the model
optimized_params = fitCalibration(component_concentrations_sub,
feature_name,
transformation_model,
optimized_params);
// calculate the R2 and bias
std::vector<double> biases; // not needed (method parameters)
double correlation_coefficient = 0.0; // not needed (method parameters)
calculateBiasAndR(
component_concentrations_sub,
feature_name,
transformation_model,
optimized_params,
biases,
correlation_coefficient);
// check R2 and biases
bool bias_check = true;
for (size_t bias_it = 0; bias_it < biases.size(); ++bias_it)
{
if (biases[bias_it] > max_bias_)
{
bias_check = false;
}
}
if (bias_check && correlation_coefficient > min_correlation_coefficient_)
{
OPENMS_LOG_INFO << "Valid calibration found for " << component_concentrations_sub[0].feature.getMetaValue("native_id") << " .";
// copy over the final optimized points before exiting
component_concentrations = component_concentrations_sub;
return true; //optimal calibration found
}
// R2 and biases check failed, determine potential outlier
int pos;
if (outlier_detection_method_ == "iter_jackknife")
{
// get candidate outlier: removal of which datapoint results in best rsq?
pos = jackknifeOutlierCandidate_(
component_concentrations_sub,
feature_name,
transformation_model,
optimized_params);
}
else if (outlier_detection_method_ == "iter_residual")
{
// get candidate outlier: removal of datapoint with largest residual?
pos = residualOutlierCandidate_(
component_concentrations_sub,
feature_name,
transformation_model,
optimized_params);
}
else
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
String("Method ") + outlier_detection_method_ + " is not a valid method for optimizeCalibrationCurveIterative");
}
// remove if residual is an outlier according to Chauvenet's criterion
// or if testing is turned off
if (!use_chauvenet_ || MRMRTNormalizer::chauvenet(biases, pos))
{
component_concentrations_sorted_indices.erase(component_concentrations_sorted_indices.begin() + pos);
}
else
{
return false; //no optimal calibration found
}
}
return false; //no optimal calibration found
}
std::vector<AbsoluteQuantitationStandards::featureConcentration> AbsoluteQuantitation::extractComponents_(
const std::vector<AbsoluteQuantitationStandards::featureConcentration> & component_concentrations,
const std::vector<size_t>& component_concentrations_indices)
{
std::vector<AbsoluteQuantitationStandards::featureConcentration> component_concentrations_sub;
for (size_t iter = 0; iter < component_concentrations_indices.size(); ++iter)
{
component_concentrations_sub.push_back(component_concentrations[component_concentrations_indices[iter]]);
}
return component_concentrations_sub;
}
int AbsoluteQuantitation::jackknifeOutlierCandidate_(
const std::vector<AbsoluteQuantitationStandards::featureConcentration>& component_concentrations,
const String & feature_name,
const String & transformation_model,
const Param & transformation_model_params)
{
// Returns candidate outlier: A linear regression and rsq is calculated for
// the data points with one removed pair. The combination resulting in
// highest rsq is considered corresponding to the outlier candidate. The
// corresponding iterator position is then returned.
std::vector<double> rsq_tmp;
Param optimized_params = transformation_model_params;
for (Size i = 0; i < component_concentrations.size(); i++)
{
std::vector<AbsoluteQuantitationStandards::featureConcentration> component_concentrations_tmp = component_concentrations;
component_concentrations_tmp.erase(component_concentrations_tmp.begin() + i);
// fit the model
optimized_params = fitCalibration(component_concentrations_tmp,
feature_name,
transformation_model,
optimized_params);
// calculate the R2 and bias
std::vector<double> biases;
double correlation_coefficient = 0.0;
calculateBiasAndR(
component_concentrations_tmp,
feature_name,
transformation_model,
optimized_params,
biases,
correlation_coefficient);
rsq_tmp.push_back(correlation_coefficient);
}
return max_element(rsq_tmp.begin(), rsq_tmp.end()) - rsq_tmp.begin();
}
int AbsoluteQuantitation::residualOutlierCandidate_(
const std::vector<AbsoluteQuantitationStandards::featureConcentration>& component_concentrations,
const String & feature_name,
const String & transformation_model,
const Param & transformation_model_params)
{
// Returns candidate outlier: A linear regression and residuals are calculated for
// the data points. The one with highest residual error is selected as the outlier candidate. The
// corresponding iterator position is then returned.
// fit the model
Param optimized_params = fitCalibration(component_concentrations,
feature_name,
transformation_model,
transformation_model_params);
// calculate the R2 and bias
std::vector<double> biases;
double correlation_coefficient = 0.0;
calculateBiasAndR(
component_concentrations,
feature_name,
transformation_model,
optimized_params,
biases,
correlation_coefficient);
return max_element(biases.begin(), biases.end()) - biases.begin();
}
void AbsoluteQuantitation::optimizeCalibrationCurves(
std::map<String, std::vector<AbsoluteQuantitationStandards::featureConcentration>> & components_concentrations)
{
std::map<String, std::vector<AbsoluteQuantitationStandards::featureConcentration>>& cc = components_concentrations;
for (std::pair<const String, AbsoluteQuantitationMethod>& quant_method : quant_methods_)
{
const String& component_name = quant_method.first;
AbsoluteQuantitationMethod& component_aqm = quant_method.second;
if (cc.count(component_name) && optimization_method_ == "iterative")
{
// optimize the calibration curve for the component
Param optimized_params;
bool optimal_calibration_found = optimizeCalibrationCurveIterative(
cc[component_name],
component_aqm.getFeatureName(),
component_aqm.getTransformationModel(),
component_aqm.getTransformationModelParams(),
optimized_params);
// order component concentrations and update the lloq and uloq
std::vector<AbsoluteQuantitationStandards::featureConcentration>::const_iterator it;
it = std::min_element(cc[component_name].begin(), cc[component_name].end(), [](
const AbsoluteQuantitationStandards::featureConcentration& lhs,
const AbsoluteQuantitationStandards::featureConcentration& rhs
)
{
return lhs.actual_concentration < rhs.actual_concentration;
}
);
component_aqm.setLLOQ(it->actual_concentration);
it = std::max_element(cc[component_name].begin(), cc[component_name].end(), [](
const AbsoluteQuantitationStandards::featureConcentration& lhs,
const AbsoluteQuantitationStandards::featureConcentration& rhs
)
{
return lhs.actual_concentration < rhs.actual_concentration;
}
);
component_aqm.setULOQ(it->actual_concentration);
if (optimal_calibration_found)
{
// calculate the R2 and bias
std::vector<double> biases;
double correlation_coefficient = 0.0;
calculateBiasAndR(
cc[component_name],
component_aqm.getFeatureName(),
component_aqm.getTransformationModel(),
optimized_params,
biases,
correlation_coefficient);
// record the updated information
component_aqm.setCorrelationCoefficient(correlation_coefficient);
component_aqm.setTransformationModelParams(optimized_params);
component_aqm.setNPoints(cc[component_name].size());
}
else
{
component_aqm.setCorrelationCoefficient(0.0);
component_aqm.setNPoints(0);
component_aqm.setLLOQ(0.0);
component_aqm.setULOQ(0.0);
}
}
else if (optimization_method_ != "iterative")
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Unsupported calibration curve optimization method '" + optimization_method_ + "'.");
}
else
{
OPENMS_LOG_DEBUG << "Warning: Standards not found for component " << component_name << ".";
}
}
}
void AbsoluteQuantitation::optimizeSingleCalibrationCurve(
const String& component_name,
std::vector<AbsoluteQuantitationStandards::featureConcentration>& component_concentrations
)
{
std::map<String, std::vector<AbsoluteQuantitationStandards::featureConcentration>> cc_map;
cc_map.insert({component_name, component_concentrations});
optimizeCalibrationCurves(cc_map);
component_concentrations = cc_map.at(component_name);
}
} // namespace
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/QUANTITATION/TMTElevenPlexQuantitationMethod.cpp | .cpp | 7,951 | 169 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Stephan Aiche $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/QUANTITATION/TMTElevenPlexQuantitationMethod.h>
#include <OpenMS/DATASTRUCTURES/ListUtils.h>
#include <OpenMS/DATASTRUCTURES/Matrix.h>
#include <algorithm>
namespace OpenMS
{
const String TMTElevenPlexQuantitationMethod::name_ = "tmt11plex";
const std::vector<std::string> TMTElevenPlexQuantitationMethod::channel_names_ = {"126","127N","127C","128N","128C","129N","129C","130N","130C","131N","131C"};
TMTElevenPlexQuantitationMethod::TMTElevenPlexQuantitationMethod()
{
setName("TMTElevenPlexQuantitationMethod");
// // mass map outline - for further details please see #2427 (was adapted for tmt11plex)
// "126", 126.127726, x, x, 127C, 128C
// "127N", 127.124761, x, x, 128N, 129N
// "127C", 127.131081, x, 126, 128C, 129C
// "128N", 128.128116, x, 127N, 129N, 130N
// "128C", 128.134436, 126, 127C, 129C, 130C
// "129N", 129.131471, 127N, 128N, 130N, 131N
// "129C", 129.137790, 127C, 128C, 130C, 131C
// "130N", 130.134825, 128N, 129N, 131N, x
// "130C", 130.141145, 128C, 129C, 131C, x
// "131N", 131.138180, 129N, 130N, x, x
// "131C", 131.144500, 129C, 130C, x, x
// create the channel map //-2 -1 +1 +2
channels_.push_back(IsobaricChannelInformation("126", 0, "", 126.127726, {-1, -1, 2, 4}));
channels_.push_back(IsobaricChannelInformation("127N", 1, "", 127.124761, {-1, -1, 3, 5}));
channels_.push_back(IsobaricChannelInformation("127C", 2, "", 127.131081, {-1, 0, 4, 6}));
channels_.push_back(IsobaricChannelInformation("128N", 3, "", 128.128116, {-1, 1, 5, 7}));
channels_.push_back(IsobaricChannelInformation("128C", 4, "", 128.134436, {0, 2, 6, 8}));
channels_.push_back(IsobaricChannelInformation("129N", 5, "", 129.131471, {1, 3, 7, 9}));
channels_.push_back(IsobaricChannelInformation("129C", 6, "", 129.137790, {2, 4, 8, 10}));
channels_.push_back(IsobaricChannelInformation("130N", 7, "", 130.134825, {3, 5, 9, -1}));
channels_.push_back(IsobaricChannelInformation("130C", 8, "", 130.141145, {4, 6, 10, -1}));
channels_.push_back(IsobaricChannelInformation("131N", 9, "", 131.138180, {5, 7, -1, -1}));
channels_.push_back(IsobaricChannelInformation("131C", 10, "", 131.144500, {6, 8, -1, -1}));
// Original 10plex channel
// channels_.push_back(IsobaricChannelInformation("131", 9, "", 131.138180, 5, 7, -1, -1));
// we assume 126 to be the reference
reference_channel_ = 0;
setDefaultParams_();
}
void TMTElevenPlexQuantitationMethod::setDefaultParams_()
{
defaults_.setValue("channel_126_description", "", "Description for the content of the 126 channel.");
defaults_.setValue("channel_127N_description", "", "Description for the content of the 127N channel.");
defaults_.setValue("channel_127C_description", "", "Description for the content of the 127C channel.");
defaults_.setValue("channel_128N_description", "", "Description for the content of the 128N channel.");
defaults_.setValue("channel_128C_description", "", "Description for the content of the 128C channel.");
defaults_.setValue("channel_129N_description", "", "Description for the content of the 129N channel.");
defaults_.setValue("channel_129C_description", "", "Description for the content of the 129C channel.");
defaults_.setValue("channel_130N_description", "", "Description for the content of the 130N channel.");
defaults_.setValue("channel_130C_description", "", "Description for the content of the 130C channel.");
defaults_.setValue("channel_131N_description", "", "Description for the content of the 131N channel.");
defaults_.setValue("channel_131C_description", "", "Description for the content of the 131C channel.");
defaults_.setValue("reference_channel", "126", "The reference channel (126, 127N, 127C, 128N, 128C, 129N, 129C, 130N, 130C, 131N, 131C).");
defaults_.setValidStrings("reference_channel", TMTElevenPlexQuantitationMethod::channel_names_);
// default: Product Number: A37725 Lot Number: ZF395505
defaults_.setValue("correction_matrix", std::vector<std::string>{
"0.0/0.0/8.6/0.3",
"0.0/0.1/7.8/0.1",
"0.0/0.8/6.9/0.1",
"0.0/7.4/7.4/0.0",
"0.0/1.5/6.2/0.2",
"0.0/1.5/5.7/0.1",
"0.0/2.6/4.8/0.0",
"0.0/2.2/4.6/0.0",
"0.0/2.8/4.5/0.1",
"0.1/2.9/3.8/0.0",
"0.0/3.9/2.8/0.0"
},
"Correction matrix for isotope distributions (see documentation); use the following format: <-2Da>/<-1Da>/<+1Da>/<+2Da>; e.g. '0/0.3/4/0', '0.1/0.3/3/0.2'");
defaultsToParam_();
}
void TMTElevenPlexQuantitationMethod::updateMembers_()
{
channels_[0].description = param_.getValue("channel_126_description").toString();
channels_[1].description = param_.getValue("channel_127N_description").toString();
channels_[2].description = param_.getValue("channel_127C_description").toString();
channels_[3].description = param_.getValue("channel_128N_description").toString();
channels_[4].description = param_.getValue("channel_128C_description").toString();
channels_[5].description = param_.getValue("channel_129N_description").toString();
channels_[6].description = param_.getValue("channel_129C_description").toString();
channels_[7].description = param_.getValue("channel_130N_description").toString();
channels_[8].description = param_.getValue("channel_130C_description").toString();
channels_[9].description = param_.getValue("channel_131N_description").toString();
channels_[10].description = param_.getValue("channel_131C_description").toString();
// compute the index of the reference channel
std::vector<std::string>::const_iterator t_it = std::find(TMTElevenPlexQuantitationMethod::channel_names_.begin(),
TMTElevenPlexQuantitationMethod::channel_names_.end(),
param_.getValue("reference_channel"));
reference_channel_ = t_it - TMTElevenPlexQuantitationMethod::channel_names_.begin();
}
TMTElevenPlexQuantitationMethod::TMTElevenPlexQuantitationMethod(const TMTElevenPlexQuantitationMethod& other):
IsobaricQuantitationMethod(other)
{
channels_.clear();
channels_.insert(channels_.begin(), other.channels_.begin(), other.channels_.end());
reference_channel_ = other.reference_channel_;
}
TMTElevenPlexQuantitationMethod& TMTElevenPlexQuantitationMethod::operator=(const TMTElevenPlexQuantitationMethod& rhs)
{
if (this == &rhs)
{
return *this;
}
channels_.clear();
channels_.insert(channels_.begin(), rhs.channels_.begin(), rhs.channels_.end());
reference_channel_ = rhs.reference_channel_;
return *this;
}
const String& TMTElevenPlexQuantitationMethod::getMethodName() const
{
return TMTElevenPlexQuantitationMethod::name_;
}
const IsobaricQuantitationMethod::IsobaricChannelList& TMTElevenPlexQuantitationMethod::getChannelInformation() const
{
return channels_;
}
Size TMTElevenPlexQuantitationMethod::getNumberOfChannels() const
{
return 11;
}
Matrix<double> TMTElevenPlexQuantitationMethod::getIsotopeCorrectionMatrix() const
{
StringList iso_correction = ListUtils::toStringList<std::string>(getParameters().getValue("correction_matrix"));
return stringListToIsotopeCorrectionMatrix_(iso_correction);
}
Size TMTElevenPlexQuantitationMethod::getReferenceChannel() const
{
return reference_channel_;
}
} // namespace
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/QUANTITATION/ProteinInference.cpp | .cpp | 7,351 | 209 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: $
// --------------------------------------------------------------------------
//
#include <OpenMS/ANALYSIS/QUANTITATION/ProteinInference.h>
#include <OpenMS/METADATA/ProteinIdentification.h>
#include <OpenMS/METADATA/PeptideIdentification.h>
#include <OpenMS/KERNEL/ConsensusMap.h>
#include <map>
namespace OpenMS
{
ProteinInference::ProteinInference() = default;
ProteinInference::ProteinInference(const ProteinInference& /*cp*/) = default;
ProteinInference& ProteinInference::operator=(const ProteinInference& /*rhs*/) = default;
void ProteinInference::infer(ConsensusMap& consensus_map, const UInt reference_map)
{
// we infer Proteins for every IdentificationRun separately. If you want this combined, then
// do that before calling this function
// Each ProteinIdentification will be augmented with the quantification (where possible)
for (size_t i = 0;
i < consensus_map.getProteinIdentifications().size();
++i)
{
infer_(consensus_map, i, reference_map);
}
}
void ProteinInference::infer_(ConsensusMap& consensus_map,
const size_t protein_idenfication_index,
const UInt reference_map)
{
ProteinIdentification& protein_ident = consensus_map.getProteinIdentifications()[protein_idenfication_index];
for (size_t i = 0; i < protein_ident.getHits().size(); ++i)
{
// Protein Accession
String accession = protein_ident.getHits()[i].getAccession();
// consensus feature -> peptide hit
std::map<size_t, PeptideHit> consensus_to_peptide;
// search for it in consensus elements:
for (size_t i_cm = 0; i_cm < consensus_map.size(); ++i_cm)
{
std::vector<PeptideHit> peptide_hits;
for (PeptideIdentificationList::iterator it_pepid = consensus_map[i_cm].getPeptideIdentifications().begin();
it_pepid != consensus_map[i_cm].getPeptideIdentifications().end();
++it_pepid)
{
// are Protein- and PeptideIdentification from the same search engine run?
if (it_pepid->getIdentifier() != protein_ident.getIdentifier())
continue;
std::set<String> accessions;
accessions.insert(accession);
std::vector<PeptideHit> peptide_hits_local = PeptideIdentification::getReferencingHits(it_pepid->getHits(), accessions);
if (peptide_hits_local.empty())
{
continue;
}
if (sortByUnique_(peptide_hits_local, it_pepid->isHigherScoreBetter())) // we found a unique peptide
{
peptide_hits.push_back(peptide_hits_local[0]);
}
}
// if several PeptideIdentifications (==Spectra) were assigned to current ConsensusElement
// --> take the best (as above), e.g. in SILAC this could happen
// TODO: better idea?
if (!peptide_hits.empty())
{
if (sortByUnique_(peptide_hits, consensus_map[i_cm].getPeptideIdentifications()[0].isHigherScoreBetter())) //found a unique peptide for current ConsensusElement
{
consensus_to_peptide[i_cm] = peptide_hits[0];
#ifdef DEBUG_INFERENCE
std::cout << "assign peptide " << peptide_hits[0].getSequence() << " to Protein " << accession << std::endl;
#endif
}
}
} // ! ConsensusMap loop
// no peptides found that match current Protein
if (consensus_to_peptide.empty())
continue;
// Use all matching ConsensusElements to derive a quantitation for current protein
// build up ratios for every map vs reference
double coverage = 0;
std::map<Size, std::vector<IntensityType> > ratios;
// number of unique peptides pointing to current protein
UInt coverage_count = (UInt)consensus_to_peptide.size();
for (std::map<size_t, PeptideHit>::iterator it_pephits = consensus_to_peptide.begin();
it_pephits != consensus_to_peptide.end();
++it_pephits)
{
coverage += it_pephits->second.getSequence().size();
const ConsensusFeature::HandleSetType& handles = consensus_map[it_pephits->first].getFeatures();
//search if reference is present
ConsensusFeature::HandleSetType::const_iterator it_ref = handles.end();
for (ConsensusFeature::HandleSetType::const_iterator it = handles.begin();
it != handles.end();
++it)
{
if (it->getMapIndex() == reference_map)
{
it_ref = it;
break;
}
}
// did not find a reference
// TODO assume intensity==0 instead??
if (it_ref == handles.end())
continue;
for (ConsensusFeature::HandleSetType::const_iterator it = handles.begin();
it != handles.end();
++it)
{
ratios[it->getMapIndex()].push_back(it->getIntensity() / it_ref->getIntensity());
}
}
// sort ratios map-wise and take median
for (ConsensusMap::ColumnHeaders::const_iterator it_file = consensus_map.getColumnHeaders().begin();
it_file != consensus_map.getColumnHeaders().end();
++it_file)
{
if (ratios.find(it_file->first) != ratios.end())
{
//sort intensity ratios for map #it_file->first
std::sort(ratios[it_file->first].begin(), ratios[it_file->first].end());
//take median
IntensityType protein_ratio = ratios[it_file->first][ratios[it_file->first].size() / 2];
//TODO if ratios have high variance emit a warning!
protein_ident.getHits()[i].setMetaValue(String("ratio_") + String(it_file->first), protein_ratio);
}
} // ! map loop
// % coverage of protein by peptides
coverage /= double(protein_ident.getHits()[i].getSequence().size()) / 100;
protein_ident.getHits()[i].setMetaValue("coverage", coverage);
protein_ident.getHits()[i].setMetaValue("hits", coverage_count);
} // ! Protein loop
// protein_to_peptides now contains the Protein -> Peptides mapping
// lets estimate the
}
bool ProteinInference::sortByUnique_(std::vector<PeptideHit>& peptide_hits_local, const bool is_higher_score_better)
{
if (peptide_hits_local.empty())
return false;
// several peptideHits from (the same) spectrum point to current Protein
// -> take the best
if (peptide_hits_local.size() > 1)
{
std::sort(peptide_hits_local.begin(), peptide_hits_local.end(), PeptideHit::ScoreLess());
if (is_higher_score_better)
{
peptide_hits_local[0] = peptide_hits_local[peptide_hits_local.size() - 1];
}
}
//-> lets see if its unique:
std::set<String> protein_accessions = peptide_hits_local[0].extractProteinAccessionsSet();
if (protein_accessions.size() != 1)
{
// this is a shared peptide --> do not use it
return false;
}
else
{
return true;
}
// the first element now contains the best peptideHit
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/QUANTITATION/IsotopeLabelingMDVs.cpp | .cpp | 15,520 | 361 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Ahmed Khalil $
// $Authors: Ahmed Khalil $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/QUANTITATION/IsotopeLabelingMDVs.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/CoarseIsotopePatternGenerator.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/IsotopeDistribution.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <OpenMS/CONCEPT/LogStream.h>
//Standard library
#include <cstddef> // for size_t & ptrdiff_t
#include <vector>
#include <string>
#include <cmath>
#include <numeric>
//#include <unordered_map>
#include <algorithm>
#include <OpenMS/DATASTRUCTURES/MatrixEigen.h>
#include <Eigen/LU>
namespace OpenMS
{
const std::string IsotopeLabelingMDVs::NamesOfDerivatizationAgent[] = {"NOT_SELECTED", "TBDMS"};
const std::string IsotopeLabelingMDVs::NamesOfMassIntensityType[] = {"norm_max", "norm_sum"};
IsotopeLabelingMDVs::IsotopeLabelingMDVs() :
DefaultParamHandler("IsotopeLabelingMDVs")
{
}
IsotopeLabelingMDVs::~IsotopeLabelingMDVs() = default;
void IsotopeLabelingMDVs::updateMembers_()
{
}
void IsotopeLabelingMDVs::isotopicCorrection(
const Feature& normalized_feature,
Feature& corrected_feature,
const Matrix<double>& correction_matrix,
const DerivatizationAgent& correction_matrix_agent)
{
// MDV_corrected = correction_matrix_inversed * MDV_observed (normalized_features)
auto em = eigenView(correction_matrix);
if (em.isIdentity() && !(correction_matrix.empty()))
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"IsotopeLabelingMDVs: The given isotope correction matrix is an identity matrix leading to no correction."
"Please provide a valid correction_matrix.");
}
/// Correction Matrices for various derivatization agents
const std::map<DerivatizationAgent, std::vector<std::vector<double>> > correction_matrices =
{
{ DerivatizationAgent::TBDMS, {{0.8213, 0.1053, 0.0734, 0.0000},
{0.8420, 0.0963, 0.0617, 0.0000},
{0.8466, 0.0957, 0.0343, 0.0233},
{0.8484, 0.0954, 0.0337, 0.0225}}
}
};
Eigen::MatrixXd correction_matrix_eigen;
auto correction_matrix_search = correction_matrices.find(correction_matrix_agent);
// use the internally saved correction matrix if the derivatization agent name is supplied and found
if (correction_matrix_agent != DerivatizationAgent::NOT_SELECTED && correction_matrix_search != correction_matrices.end())
{
correction_matrix_eigen.resize(correction_matrix_search->second.size(), correction_matrix_search->second[0].size());
for (size_t i = 0; i < correction_matrix_search->second.size(); ++i)
{
for (size_t j = 0; j < correction_matrix_search->second[0].size(); ++j)
{
correction_matrix_eigen(i,j) = correction_matrix_search->second[i][j];
}
}
}
// copy correction_matrix to an eigen matrix, when correction_matrix is supplied
else
{
correction_matrix_eigen.resize(correction_matrix.rows(), correction_matrix.cols());
for (long int i = 0; i < correction_matrix.rows(); ++i)
{
for (long int j = 0; j < correction_matrix.cols(); ++j)
{
correction_matrix_eigen(i,j) = correction_matrix(i,j);
}
}
}
// 1- inversion of correction matrix
Eigen::MatrixXd correction_matrix_eigen_inversed = correction_matrix_eigen.inverse();
// 2- element-wise expansion with MDV_observed
std::vector<double> MDV_observed;
for (const auto& feature : normalized_feature.getSubordinates())
{
MDV_observed.push_back(feature.getMetaValue("peak_apex_int"));
}
corrected_feature = normalized_feature;
// Update MDV_observed to match the size of inversed correction_matrix
if (static_cast<unsigned long>(correction_matrix_eigen_inversed.cols()) > MDV_observed.size())
{
size_t resize_diff = correction_matrix_eigen_inversed.cols() - MDV_observed.size();
for (size_t i = 0; i < resize_diff; ++i)
{
MDV_observed.push_back(0.0);
}
}
// Expand the inversed correction_matrix to be of an equivalent size to MDV_observed
else if (MDV_observed.size() > static_cast<unsigned long>(correction_matrix_eigen_inversed.cols()))
{
size_t resize_diff = MDV_observed.size() - correction_matrix_eigen_inversed.cols();
correction_matrix_eigen_inversed.conservativeResize(correction_matrix_eigen_inversed.rows() + resize_diff,
correction_matrix_eigen_inversed.cols() + resize_diff);
for (int i = correction_matrix_eigen_inversed.rows() - resize_diff; i < correction_matrix_eigen_inversed.rows(); ++i)
{
for (int j = correction_matrix_eigen_inversed.cols() - resize_diff; j < correction_matrix_eigen_inversed.cols(); ++j)
{
correction_matrix_eigen_inversed(i,j) = 0.0;
}
}
}
for (int i = 0; i < correction_matrix_eigen_inversed.rows(); ++i)
{
double corrected_value = 0.0;
for (int j = 0; j < correction_matrix_eigen_inversed.cols(); ++j)
{
corrected_value += correction_matrix_eigen_inversed(i,j) * MDV_observed[j];
}
corrected_feature.getSubordinates().at(i).setMetaValue("peak_apex_int", std::isnan(corrected_value) ? 0.0 : corrected_value);
}
}
void IsotopeLabelingMDVs::isotopicCorrections(
const FeatureMap& normalized_featureMap,
FeatureMap& corrected_featureMap,
const Matrix<double>& correction_matrix,
const DerivatizationAgent& correction_matrix_agent)
{
for (const Feature& feature : normalized_featureMap)
{
Feature corrected_feature;
isotopicCorrection(feature, corrected_feature, correction_matrix, correction_matrix_agent);
corrected_featureMap.push_back(corrected_feature);
}
}
void IsotopeLabelingMDVs::calculateIsotopicPurity(
Feature& normalized_featuremap,
const std::vector<double>& experiment_data,
const std::string& isotopic_purity_name)
{
if (!experiment_data.empty())
{
double experiment_data_peak = 0.0;
std::vector<double> experiment_data_ = experiment_data;
std::vector<double>::iterator max_it = std::max_element(experiment_data_.begin(), experiment_data_.end());
uint64_t experiment_data_peak_idx = std::distance(experiment_data_.begin(), max_it);
experiment_data_peak = experiment_data_[experiment_data_peak_idx];
if (experiment_data_peak_idx >= 1 && experiment_data_peak != 0.0)
{
double previous_experiment_data_peak = experiment_data[experiment_data_peak_idx - 1];
double isotopic_purity = experiment_data_peak_idx / (experiment_data_peak_idx + (previous_experiment_data_peak / experiment_data_peak));
normalized_featuremap.setMetaValue(isotopic_purity_name, isotopic_purity);
}
}
}
void IsotopeLabelingMDVs::calculateIsotopicPurities(
FeatureMap& normalized_featureMap,
const std::vector<std::vector<double>>& experiment_data,
const std::vector<std::string>& isotopic_purity_names)
{
for (size_t feature_idx = 0; feature_idx < normalized_featureMap.size(); ++feature_idx)
{
calculateIsotopicPurity(normalized_featureMap.at(feature_idx), experiment_data.at(feature_idx), isotopic_purity_names.at(feature_idx));
}
}
void IsotopeLabelingMDVs::calculateMDVAccuracy(
Feature& normalized_feature,
const std::string& feature_name,
const std::string& fragment_isotopomer_theoretical_formula)
{
std::vector<double> fragment_isotopomer_theoretical, fragment_isotopomer_measured;
for (auto it = normalized_feature.getSubordinates().begin(); it != normalized_feature.getSubordinates().end(); it++)
{
if (feature_name == "intensity")
{
fragment_isotopomer_measured.push_back((double)(it->getIntensity()));
}
else if (feature_name != "intensity" && it->metaValueExists(feature_name))
{
fragment_isotopomer_measured.push_back(it->getMetaValue(feature_name));
}
}
if (normalized_feature.getSubordinates().size() != fragment_isotopomer_measured.size() || fragment_isotopomer_measured.empty()) {
OPENMS_LOG_FATAL_ERROR << "Missing values for the Measured Isotopomer Fragment, Please make sure the Subordinates are accordingly updated." << std::endl;
}
// Generate theoretical values with the exact same length as fragment_isotopomer_measured
IsotopeDistribution theoretical_iso(EmpiricalFormula(fragment_isotopomer_theoretical_formula).getIsotopeDistribution(CoarseIsotopePatternGenerator(fragment_isotopomer_measured.size())));
for (IsotopeDistribution::ConstIterator it = theoretical_iso.begin(); it != theoretical_iso.end(); ++it)
{
fragment_isotopomer_theoretical.push_back( it->getIntensity() );
}
std::vector<double> fragment_isotopomer_abs_diff;
for (size_t i = 0; i < fragment_isotopomer_theoretical.size(); ++i)
{
fragment_isotopomer_abs_diff.push_back(std::fabs(fragment_isotopomer_theoretical[i] - fragment_isotopomer_measured[i]));
}
if (!fragment_isotopomer_abs_diff.empty())
{
double diff_mean = OpenMS::Math::mean(fragment_isotopomer_abs_diff.begin(), fragment_isotopomer_abs_diff.end());
diff_mean = OpenMS::Math::MeanAbsoluteDeviation(fragment_isotopomer_abs_diff.begin(), fragment_isotopomer_abs_diff.end(), diff_mean);
normalized_feature.setMetaValue("average_accuracy", diff_mean);
for (size_t feature_subordinate = 0; feature_subordinate < normalized_feature.getSubordinates().size(); ++feature_subordinate)
{
normalized_feature.getSubordinates().at(feature_subordinate).setMetaValue("absolute_difference", fragment_isotopomer_abs_diff.at(feature_subordinate));
}
}
}
void IsotopeLabelingMDVs::calculateMDVAccuracies(
FeatureMap& normalized_featureMap,
const std::string& feature_name,
const std::map<std::string, std::string>& fragment_isotopomer_theoretical_formulas)
{
for (size_t feature_idx = 0; feature_idx < normalized_featureMap.size(); ++feature_idx)
{
if (normalized_featureMap.at(feature_idx).metaValueExists("PeptideRef"))
{
calculateMDVAccuracy(normalized_featureMap.at(feature_idx),
feature_name,
fragment_isotopomer_theoretical_formulas.find((std::string)normalized_featureMap.at(feature_idx).getMetaValue("PeptideRef"))->second);
}
else
{
OPENMS_LOG_ERROR << "No PeptideRef in FeatureMap (MetaValue doesn't exist)!" << std::endl;
}
}
}
void IsotopeLabelingMDVs::calculateMDV(
const Feature& measured_feature,
Feature& normalized_feature,
const MassIntensityType& mass_intensity_type,
const std::string& feature_name)
{
std::vector<Feature> measured_feature_subordinates = measured_feature.getSubordinates();
normalized_feature = measured_feature;
if (mass_intensity_type == MassIntensityType::NORM_MAX)
{
if (feature_name == "intensity")
{
std::vector<OpenMS::Peak2D::IntensityType> intensities_vec;
for (auto it = measured_feature_subordinates.begin(); it != measured_feature_subordinates.end(); it++)
{
intensities_vec.push_back(it->getIntensity());
}
std::vector<OpenMS::Peak2D::IntensityType>::iterator max_it = std::max_element(intensities_vec.begin(), intensities_vec.end());
double measured_feature_max = intensities_vec[std::distance(intensities_vec.begin(), max_it)];
if (measured_feature_max != 0.0)
{
for (size_t i = 0; i < normalized_feature.getSubordinates().size(); ++i)
{
normalized_feature.getSubordinates().at(i).setIntensity(normalized_feature.getSubordinates().at(i).getIntensity() / measured_feature_max);
}
}
}
// for every other case where feature_name isn't 'intensity', i.e. 'peak_apex_int'
else
{
std::vector<OpenMS::Peak2D::IntensityType> intensities_vec;
for (auto it = measured_feature_subordinates.begin(); it != measured_feature_subordinates.end(); it++)
{
intensities_vec.push_back(it->getMetaValue(feature_name));
}
std::vector<OpenMS::Peak2D::IntensityType>::iterator max_it = std::max_element(intensities_vec.begin(), intensities_vec.end());
double measured_feature_max = intensities_vec[std::distance(intensities_vec.begin(), max_it)];
if (measured_feature_max != 0.0)
{
for (size_t i = 0; i < normalized_feature.getSubordinates().size(); ++i)
{
normalized_feature.getSubordinates().at(i).setMetaValue(feature_name, (OpenMS::Peak2D::IntensityType)measured_feature_subordinates.at(i).getMetaValue(feature_name) / measured_feature_max);
}
}
}
}
else if (mass_intensity_type == MassIntensityType::NORM_SUM)
{
if (feature_name == "intensity")
{
OpenMS::Peak2D::IntensityType feature_peak_apex_intensity_sum = 0.0;
for (auto it = measured_feature_subordinates.begin(); it != measured_feature_subordinates.end(); it++)
{
feature_peak_apex_intensity_sum += it->getIntensity();
}
for (auto it = measured_feature_subordinates.begin(); it != measured_feature_subordinates.end(); it++)
{
normalized_feature.getSubordinates().at(it - measured_feature_subordinates.begin()).setIntensity((it->getIntensity() / feature_peak_apex_intensity_sum));
}
}
// for every other case where feature_name isn't 'intensity', i.e. 'peak_apex_int'
else
{
OpenMS::Peak2D::IntensityType feature_peak_apex_intensity_sum = 0.0;
for (auto it = measured_feature_subordinates.begin(); it != measured_feature_subordinates.end(); it++)
{
feature_peak_apex_intensity_sum += (Peak2D::IntensityType)it->getMetaValue(feature_name);
}
if (feature_peak_apex_intensity_sum != 0.0)
{
for (size_t i = 0; i < normalized_feature.getSubordinates().size(); ++i)
{
normalized_feature.getSubordinates().at(i).setMetaValue(feature_name, (OpenMS::Peak2D::IntensityType)measured_feature_subordinates.at(i).getMetaValue(feature_name) / feature_peak_apex_intensity_sum);
}
}
}
}
}
void IsotopeLabelingMDVs::calculateMDVs(
const FeatureMap& measured_featureMap, FeatureMap& normalized_featureMap,
const MassIntensityType& mass_intensity_type, const std::string& feature_name)
{
normalized_featureMap.clear();
for (const Feature& feature : measured_featureMap)
{
Feature normalized_feature;
calculateMDV(feature, normalized_feature, mass_intensity_type, feature_name);
normalized_featureMap.push_back(normalized_feature);
}
}
} // namespace
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/QUANTITATION/TMTSixPlexQuantitationMethod.cpp | .cpp | 4,682 | 113 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Stephan Aiche $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/QUANTITATION/TMTSixPlexQuantitationMethod.h>
#include <OpenMS/DATASTRUCTURES/Matrix.h>
#include <OpenMS/DATASTRUCTURES/ListUtils.h>
namespace OpenMS
{
const String TMTSixPlexQuantitationMethod::name_ = "tmt6plex";
TMTSixPlexQuantitationMethod::TMTSixPlexQuantitationMethod()
{
setName("TMTSixPlexQuantitationMethod");
// create the channel map
channels_.push_back(IsobaricChannelInformation("126", 0, "", 126.127725, {-1, -1, 1, 2}));
channels_.push_back(IsobaricChannelInformation("127", 1, "", 127.124760, {-1, 0, 2, 3}));
channels_.push_back(IsobaricChannelInformation("128", 2, "", 128.134433, {0, 1, 3, 4}));
channels_.push_back(IsobaricChannelInformation("129", 3, "", 129.131468, {1, 2, 4, 5}));
channels_.push_back(IsobaricChannelInformation("130", 4, "", 130.141141, {2, 3, 5, -1}));
channels_.push_back(IsobaricChannelInformation("131", 5, "", 131.138176, {3, 4, -1, -1}));
// we assume 126 to be the reference
reference_channel_ = 0;
setDefaultParams_();
}
void TMTSixPlexQuantitationMethod::setDefaultParams_()
{
defaults_.setValue("channel_126_description", "", "Description for the content of the 126 channel.");
defaults_.setValue("channel_127_description", "", "Description for the content of the 127 channel.");
defaults_.setValue("channel_128_description", "", "Description for the content of the 128 channel.");
defaults_.setValue("channel_129_description", "", "Description for the content of the 129 channel.");
defaults_.setValue("channel_130_description", "", "Description for the content of the 130 channel.");
defaults_.setValue("channel_131_description", "", "Description for the content of the 131 channel.");
defaults_.setValue("reference_channel", 126, "Number of the reference channel (126-131).");
defaults_.setMinInt("reference_channel", 126);
defaults_.setMaxInt("reference_channel", 131);
// default: Product Number: 90061 Lot Number: ZE386964
defaults_.setValue("correction_matrix", std::vector<std::string>{
"0.0/0.0/8.6/0.3",
"0.0/0.1/7.8/0.1",
"0.0/1.5/6.2/0.2",
"0.0/1.5/5.7/0.1",
"0.0/3.1/3.6/0.0",
"0.1/2.9/3.8/0.0"
},
"Correction matrix for isotope distributions (see documentation); use the following format: <-2Da>/<-1Da>/<+1Da>/<+2Da>; e.g. '0/0.3/4/0', '0.1/0.3/3/0.2'");
defaultsToParam_();
}
void TMTSixPlexQuantitationMethod::updateMembers_()
{
channels_[0].description = param_.getValue("channel_126_description").toString();
channels_[1].description = param_.getValue("channel_127_description").toString();
channels_[2].description = param_.getValue("channel_128_description").toString();
channels_[3].description = param_.getValue("channel_129_description").toString();
channels_[4].description = param_.getValue("channel_130_description").toString();
channels_[5].description = param_.getValue("channel_131_description").toString();
// compute the index of the reference channel
reference_channel_ = ((Int) param_.getValue("reference_channel")) - 126;
}
TMTSixPlexQuantitationMethod::TMTSixPlexQuantitationMethod(const TMTSixPlexQuantitationMethod& other):
IsobaricQuantitationMethod(other)
{
channels_.clear();
channels_.insert(channels_.begin(), other.channels_.begin(), other.channels_.end());
reference_channel_ = other.reference_channel_;
}
TMTSixPlexQuantitationMethod& TMTSixPlexQuantitationMethod::operator=(const TMTSixPlexQuantitationMethod& rhs) = default;
const String& TMTSixPlexQuantitationMethod::getMethodName() const
{
return TMTSixPlexQuantitationMethod::name_;
}
const IsobaricQuantitationMethod::IsobaricChannelList& TMTSixPlexQuantitationMethod::getChannelInformation() const
{
return channels_;
}
Size TMTSixPlexQuantitationMethod::getNumberOfChannels() const
{
return 6;
}
Matrix<double> TMTSixPlexQuantitationMethod::getIsotopeCorrectionMatrix() const
{
StringList iso_correction = ListUtils::toStringList<std::string>(getParameters().getValue("correction_matrix"));
return stringListToIsotopeCorrectionMatrix_(iso_correction);
}
Size TMTSixPlexQuantitationMethod::getReferenceChannel() const
{
return reference_channel_;
}
} // namespace
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/QUANTITATION/AbsoluteQuantitationMethod.cpp | .cpp | 4,350 | 182 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Douglas McCloskey, Pasquale Domenico Colaianni $
// $Authors: Douglas McCloskey, Pasquale Domenico Colaianni $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/QUANTITATION/AbsoluteQuantitationMethod.h>
#include <tuple>
namespace OpenMS
{
bool AbsoluteQuantitationMethod::operator==(const AbsoluteQuantitationMethod& other) const
{
return
std::tie(
component_name_,
feature_name_,
IS_name_,
llod_,
ulod_,
lloq_,
uloq_,
n_points_,
correlation_coefficient_,
concentration_units_,
transformation_model_,
transformation_model_params_
) == std::tie(
other.component_name_,
other.feature_name_,
other.IS_name_,
other.llod_,
other.ulod_,
other.lloq_,
other.uloq_,
other.n_points_,
other.correlation_coefficient_,
other.concentration_units_,
other.transformation_model_,
other.transformation_model_params_
);
}
bool AbsoluteQuantitationMethod::operator!=(const AbsoluteQuantitationMethod& other) const
{
return !(*this == other);
}
void AbsoluteQuantitationMethod::setLLOD(const double llod)
{
llod_ = llod;
}
void AbsoluteQuantitationMethod::setULOD(const double ulod)
{
ulod_ = ulod;
}
double AbsoluteQuantitationMethod::getLLOD() const
{
return llod_;
}
double AbsoluteQuantitationMethod::getULOD() const
{
return ulod_;
}
void AbsoluteQuantitationMethod::setLLOQ(const double lloq)
{
lloq_ = lloq;
}
void AbsoluteQuantitationMethod::setULOQ(const double uloq)
{
uloq_ = uloq;
}
double AbsoluteQuantitationMethod::getLLOQ() const
{
return lloq_;
}
double AbsoluteQuantitationMethod::getULOQ() const
{
return uloq_;
}
void AbsoluteQuantitationMethod::setFeatureName(const String& feature_name)
{
feature_name_ = feature_name;
}
String AbsoluteQuantitationMethod::getFeatureName() const
{
return feature_name_;
}
void AbsoluteQuantitationMethod::setISName(const String& IS_name)
{
IS_name_ = IS_name;
}
String AbsoluteQuantitationMethod::getISName() const
{
return IS_name_;
}
void AbsoluteQuantitationMethod::setComponentName(const String& component_name)
{
component_name_ = component_name;
}
String AbsoluteQuantitationMethod::getComponentName() const
{
return component_name_;
}
void AbsoluteQuantitationMethod::setConcentrationUnits(const String& concentration_units)
{
concentration_units_ = concentration_units;
}
String AbsoluteQuantitationMethod::getConcentrationUnits() const
{
return concentration_units_;
}
void AbsoluteQuantitationMethod::setTransformationModel(const String& transformation_model)
{
transformation_model_ = transformation_model;
}
void AbsoluteQuantitationMethod::setTransformationModelParams(const Param& transformation_model_params)
{
transformation_model_params_ = transformation_model_params;
}
String AbsoluteQuantitationMethod::getTransformationModel() const
{
return transformation_model_;
}
Param AbsoluteQuantitationMethod::getTransformationModelParams() const
{
return transformation_model_params_;
}
void AbsoluteQuantitationMethod::setNPoints(const Int n_points)
{
n_points_ = n_points;
}
void AbsoluteQuantitationMethod::setCorrelationCoefficient(const double correlation_coefficient)
{
correlation_coefficient_ = correlation_coefficient;
}
Int AbsoluteQuantitationMethod::getNPoints() const
{
return n_points_;
}
double AbsoluteQuantitationMethod::getCorrelationCoefficient() const
{
return correlation_coefficient_;
}
bool AbsoluteQuantitationMethod::checkLOD(const double value) const
{
return value >= llod_ && value <= ulod_; // is it bracketed or not
}
bool AbsoluteQuantitationMethod::checkLOQ(const double value) const
{
return value >= lloq_ && value <= uloq_; // is it bracketed or not
}
} // namespace
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/QUANTITATION/IsobaricNormalizer.cpp | .cpp | 8,695 | 228 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Stephan Aiche, Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/QUANTITATION/IsobaricNormalizer.h>
#include <OpenMS/ANALYSIS/QUANTITATION/IsobaricQuantitationMethod.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/KERNEL/ConsensusMap.h>
#include <map>
namespace OpenMS
{
IsobaricNormalizer::IsobaricNormalizer(const IsobaricQuantitationMethod* const quant_method) :
quant_meth_(quant_method)
{
reference_channel_name_ = quant_meth_->getChannelInformation()[quant_meth_->getReferenceChannel()].name;
}
IsobaricNormalizer::IsobaricNormalizer(const IsobaricNormalizer& other) :
quant_meth_(other.quant_meth_),
reference_channel_name_(other.reference_channel_name_)
{
}
IsobaricNormalizer& IsobaricNormalizer::operator=(const IsobaricNormalizer& rhs)
{
if (this == &rhs)
return *this;
quant_meth_ = rhs.quant_meth_;
reference_channel_name_ = rhs.reference_channel_name_;
return *this;
}
ConsensusFeature::HandleSetType::iterator IsobaricNormalizer::findReferenceChannel_(ConsensusFeature& cf, const ConsensusMap& consensus_map) const
{
for (ConsensusFeature::HandleSetType::iterator it_elements = cf.begin();
it_elements != cf.end();
++it_elements)
{
if (consensus_map.getColumnHeaders().find(it_elements->getMapIndex())->second.getMetaValue("channel_name") == reference_channel_name_)
{
return it_elements;
}
}
return cf.end();
}
void IsobaricNormalizer::buildVectorIndex_(const ConsensusMap& consensus_map)
{
// clear old values
ref_map_id_ = 0;
map_to_vec_index_.clear();
Size index = 0;
for (ConsensusMap::ColumnHeaders::const_iterator file_it = consensus_map.getColumnHeaders().begin();
file_it != consensus_map.getColumnHeaders().end();
++file_it)
{
// TODO I think this does not work if you are normalizing a consensus map including multiple files
// which all have that reference channel name.
if (file_it->second.getMetaValue("channel_name") == reference_channel_name_)
{
ref_map_id_ = file_it->first;
}
map_to_vec_index_[file_it->first] = index;
++index;
}
}
void IsobaricNormalizer::collectRatios_(const ConsensusFeature& cf, const Peak2D::IntensityType& ref_intensity)
{
for (ConsensusFeature::HandleSetType::const_iterator it_elements = cf.begin();
it_elements != cf.end();
++it_elements)
{
if (ref_intensity == 0) //avoid nan's and inf's
{
if (it_elements->getIntensity() == 0) // 0/0 will give 'nan'
{
//so leave it out completely (there is no information to be gained)
}
else // x/0 is 'inf' but std::sort() has problems with that
{
peptide_ratios_[map_to_vec_index_[it_elements->getMapIndex()]].push_back(std::numeric_limits<Peak2D::IntensityType>::max());
}
}
else // everything seems fine
{
peptide_ratios_[map_to_vec_index_[it_elements->getMapIndex()]].push_back(it_elements->getIntensity() / ref_intensity);
}
// control
peptide_intensities_[map_to_vec_index_[it_elements->getMapIndex()]].push_back(it_elements->getIntensity());
}
}
void IsobaricNormalizer::computeNormalizationFactors_(std::vector<Peak2D::IntensityType>& normalization_factors)
{
// ensure that the ref_(ratios|intensities) are sorted
std::sort(peptide_ratios_[ref_map_id_].begin(), peptide_ratios_[ref_map_id_].end());
std::sort(peptide_intensities_[ref_map_id_].begin(), peptide_intensities_[ref_map_id_].end());
// reporting
Peak2D::IntensityType max_deviation_from_control = 0;
// find MEDIAN of ratios for each channel (store as 0th element in sorted vector)
for (std::map<Size, Size>::const_iterator it_map = map_to_vec_index_.begin(); it_map != map_to_vec_index_.end(); ++it_map)
{
// this is solely for readability reasons, the compiler should optimize this anyway
const Size vec_idx = it_map->second;
// sort vector (partial_sort might improve performance here)
std::sort(peptide_ratios_[vec_idx].begin(), peptide_ratios_[vec_idx].end());
// save median as first element
normalization_factors[vec_idx] = peptide_ratios_[vec_idx][peptide_ratios_[vec_idx].size() / 2];
// sort control (intensities)
std::sort(peptide_intensities_[vec_idx].begin(), peptide_intensities_[vec_idx].end());
// find MEDIAN of control-method (intensities) for each channel
peptide_intensities_[vec_idx][0] = peptide_intensities_[vec_idx][peptide_intensities_[vec_idx].size() / 2] /
peptide_intensities_[ref_map_id_][peptide_intensities_[ref_map_id_].size() / 2];
OPENMS_LOG_INFO << "IsobaricNormalizer: map-id " << (it_map->first) << " has factor " << (normalization_factors[vec_idx]) << " (control: " << (peptide_intensities_[vec_idx][0]) << ")" << std::endl;
Peak2D::IntensityType dev = (peptide_ratios_[vec_idx][0] - peptide_intensities_[vec_idx][0]) / normalization_factors[vec_idx];
if (fabs(max_deviation_from_control) < fabs(dev))
{
max_deviation_from_control = dev;
}
}
OPENMS_LOG_INFO << "IsobaricNormalizer: max ratio deviation of alternative method is " << (max_deviation_from_control * 100) << "%\n";
}
void IsobaricNormalizer::normalize(ConsensusMap& consensus_map)
{
// determine reference channel as vector index
buildVectorIndex_(consensus_map);
// build mapping of map_index to ratio_array_index
peptide_ratios_.resize(quant_meth_->getNumberOfChannels());
peptide_intensities_.resize(quant_meth_->getNumberOfChannels());
//build up ratios for each peptide of non-reference channels
ConsensusFeature::HandleSetType::iterator ref_it;
for (ConsensusMap::Iterator cm_it = consensus_map.begin(); cm_it != consensus_map.end(); ++cm_it)
{
// find reference index (this is inefficient to do every time,
// but the most robust against anyone who tries to change the internals of ConsensusFeature):
ref_it = findReferenceChannel_(*cm_it, consensus_map);
// reference channel not found in this ConsensusFeature
if (ref_it == cm_it->end())
{
OPENMS_LOG_WARN << "IsobaricNormalizer::normalize() WARNING: ConsensusFeature "
<< (cm_it - consensus_map.begin())
<< " does not have a reference channel! Skipping"
<< std::endl;
continue;
}
collectRatios_(*cm_it, ref_it->getIntensity());
} // ! collect ratios
// vector to store the channel wise normalization factors
std::vector<Peak2D::IntensityType> normalization_factors;
normalization_factors.resize(quant_meth_->getNumberOfChannels());
// compute the normalization factors based on the medians of the compute ratios
computeNormalizationFactors_(normalization_factors);
// free memory
peptide_intensities_.clear();
peptide_ratios_.clear();
// adjust intensity ratios
for (size_t i = 0; i < consensus_map.size(); ++i)
{
// find reference index (this is inefficient to do every time,
// but the most robust against anyone who tries to change the
// internals of ConsensusFeature):
ref_it = findReferenceChannel_(consensus_map[i], consensus_map);
// reference channel not found in this ConsensusFeature
if (ref_it == consensus_map[i].end())
{
continue;
}
// now adjust the ratios
ConsensusFeature cf = consensus_map[i];
cf.clear(); // delete its handles
for (ConsensusFeature::HandleSetType::iterator it_elements = consensus_map[i].begin();
it_elements != consensus_map[i].end();
++it_elements)
{
FeatureHandle hd = *it_elements;
if (it_elements == ref_it)
{
hd.setIntensity(1);
}
else // divide current intensity by normalization factor (which was stored at position 0)
{
hd.setIntensity(hd.getIntensity() / normalization_factors[map_to_vec_index_[it_elements->getMapIndex()]]);
}
cf.insert(hd);
}
// replace consensusFeature with updated intensity
consensus_map[i] = cf;
} // ! adjust ratios
}
} // namespace
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/QUANTITATION/ItraqConstants.cpp | .cpp | 11,816 | 292 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Chris Bielow $
// --------------------------------------------------------------------------
//
#include <OpenMS/ANALYSIS/QUANTITATION/ItraqConstants.h>
#include <map>
namespace OpenMS
{
// number of channels for iTRAQ types. (make sure it corresponds to enum ITRAQ_TYPES)
const Int ItraqConstants::CHANNEL_COUNT[3] = {4, 8, 6};
const Int ItraqConstants::CHANNELS_FOURPLEX[4][1] = {{114}, {115}, {116}, {117}};
const Int ItraqConstants::CHANNELS_EIGHTPLEX[8][1] = {{113}, {114}, {115}, {116}, {117}, {118}, {119}, {121}};
const Int ItraqConstants::CHANNELS_TMT_SIXPLEX[6][1] = {{126}, {127}, {128}, {129}, {130}, {131}};
// currently from http://www.matrixscience.com/help/quant_config_help.html
// (@ 117; +2 the value is 0.1 not 0.0 as confirmed by ABSciex)
const double ItraqConstants::ISOTOPECORRECTIONS_FOURPLEX[4][4] =
{
{0.0, 1.0, 5.9, 0.2}, //114
{0.0, 2.0, 5.6, 0.1},
{0.0, 3.0, 4.5, 0.1},
{0.1, 4.0, 3.5, 0.1} //117
};
//taken from Applied Biosystems Website
// http://www.absciex.com/Documents/Support/AB_SCIEX_Question_and_Answer.xls
const double ItraqConstants::ISOTOPECORRECTIONS_EIGHTPLEX[8][4] =
{
{0.00, 0.00, 6.89, 0.22}, //113
{0.00, 0.94, 5.90, 0.16},
{0.00, 1.88, 4.90, 0.10},
{0.00, 2.82, 3.90, 0.07},
{0.06, 3.77, 2.99, 0.00},
{0.09, 4.71, 1.88, 0.00},
{0.14, 5.66, 0.87, 0.00},
{0.27, 7.44, 0.18, 0.00} //121
};
//taken from ThermoFisher Scientific
// http://www.piercenet.com/coapdfs/CofA-90064-SPECS.pdf
const double ItraqConstants::ISOTOPECORRECTIONS_TMT_SIXPLEX[6][4] =
{
{0.00, 0.00, 0.00, 0.00}, //126
{0.00, 0.00, 0.00, 0.00}, //127
{0.00, 0.00, 0.00, 0.00}, //128
{0.00, 0.00, 0.00, 0.00}, //129
{0.00, 0.00, 0.00, 0.00}, //130
{0.00, 0.00, 0.00, 0.00}, //131
};
StringList ItraqConstants::getIsotopeMatrixAsStringList(const int itraq_type, const IsotopeMatrices & isotope_corrections)
{
OPENMS_PRECONDITION(itraq_type < SIZE_OF_ITRAQ_TYPES && itraq_type >= 0, "Error while trying to access invalid isotope correction matrix.");
StringList isotopes;
std::vector<Matrix<Int>> channel_names(3);
channel_names[0].setMatrix<Int, 4, 1>(CHANNELS_FOURPLEX);
channel_names[1].setMatrix<Int, 8, 1>(CHANNELS_EIGHTPLEX);
channel_names[2].setMatrix<Int, 6, 1>(CHANNELS_TMT_SIXPLEX);
for (Int i = 0; i < CHANNEL_COUNT[itraq_type]; ++i)
{
String line = String(channel_names[itraq_type](i, 0)) + ":";
for (Int j = 0; j < 3; ++j)
{
line += String(isotope_corrections[itraq_type](i, j)) + "/";
}
line += String(isotope_corrections[itraq_type](i, 3));
isotopes.push_back(line);
}
return isotopes;
}
void ItraqConstants::updateIsotopeMatrixFromStringList(const int itraq_type, const StringList & channels, IsotopeMatrices & isotope_corrections)
{
// TODO: make generic .. why do we need to initialize all matrices, we are only interested in itraq_type
isotope_corrections.resize(SIZE_OF_ITRAQ_TYPES);
isotope_corrections[0].setMatrix<double, 4, 4>(ItraqConstants::ISOTOPECORRECTIONS_FOURPLEX);
isotope_corrections[1].setMatrix<double, 8, 4>(ItraqConstants::ISOTOPECORRECTIONS_EIGHTPLEX);
isotope_corrections[2].setMatrix<double, 6, 4>(ItraqConstants::ISOTOPECORRECTIONS_TMT_SIXPLEX);
// split the channels key:name pairs apart
for (StringList::const_iterator it = channels.begin(); it != channels.end(); ++it)
{
StringList result;
it->split(':', result);
if (result.size() != 2)
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "ItraqQuantifier: Invalid entry in Param 'isotope_correction_values'; expected one ':', got this: '" + (*it) + "'");
}
result[0] = result[0].trim(); // hold channel name
result[1] = result[1].trim(); // holds 4 values
Int channel = result[0].toInt();
Int line = 0;
if (itraq_type == FOURPLEX)
line = channel - 114;
else if (itraq_type == EIGHTPLEX)
line = channel - 113;
else // TODO: what do we need as offset here
line = channel - 126;
if ((itraq_type == FOURPLEX && (line < 0 || line > 3))
||
((itraq_type == EIGHTPLEX && (line < 0 || line > 8)) || channel == 120)
||
(itraq_type == TMT_SIXPLEX && (line < 0 || line > 5)))
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("ItraqQuantifier: Invalid entry in Param 'isotope_correction_values'; channel-name is not valid for ")
+ String(
itraq_type == FOURPLEX ? "4plex" : (itraq_type == EIGHTPLEX ? "8plex" : "TMT-6plex")
)
+ String(": '")
+ result[0]
+ String("'"));
}
// if set to 121 we still want to change line 7 of the matrix
if (line == 8 && itraq_type == EIGHTPLEX)
line = 7;
StringList corrections;
result[1].split('/', corrections);
if (corrections.size() != 4)
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "ItraqQuantifier: Invalid entry in Param 'isotope_correction_values'; expected four correction values separated by '&', got this: '" + result[1] + "'");
}
// overwrite line in Matrix with custom values
isotope_corrections[itraq_type](line, 0) = corrections[0].toDouble();
isotope_corrections[itraq_type](line, 1) = corrections[1].toDouble();
isotope_corrections[itraq_type](line, 2) = corrections[2].toDouble();
isotope_corrections[itraq_type](line, 3) = corrections[3].toDouble();
#ifdef ITRAQ_DEBUG
std::cout << "Channel " << channel << " has values " << corrections << std::endl;
#endif
}
}
void ItraqConstants::initChannelMap(const int itraq_type, ChannelMapType & map)
{
static std::map<Int, double> reporter_mass_exact;
if (reporter_mass_exact.empty() && (itraq_type == EIGHTPLEX || itraq_type == FOURPLEX)) // exact monoisotopic reporter ion masses (taken from AB Sciex)
{
reporter_mass_exact[113] = 113.1078;
reporter_mass_exact[114] = 114.1112;
reporter_mass_exact[115] = 115.1082;
reporter_mass_exact[116] = 116.1116;
reporter_mass_exact[117] = 117.1149;
reporter_mass_exact[118] = 118.1120;
reporter_mass_exact[119] = 119.1153;
reporter_mass_exact[121] = 121.1220;
}
else // TMT(CID): exact masses taken from Thermo Scientific
{
reporter_mass_exact[126] = 126.127725;
reporter_mass_exact[127] = 127.124760;
reporter_mass_exact[128] = 128.134433;
reporter_mass_exact[129] = 129.131468;
reporter_mass_exact[130] = 130.141141;
reporter_mass_exact[131] = 131.138176;
}
/// valid names for 4 and 8plex, i.e. 114,115,116,117 for 4plex
std::vector<Matrix<Int>> channel_names;
channel_names.resize(3);
channel_names[0].setMatrix<Int, 4, 1>(CHANNELS_FOURPLEX);
channel_names[1].setMatrix<Int, 8, 1>(CHANNELS_EIGHTPLEX);
channel_names[2].setMatrix<Int, 6, 1>(CHANNELS_TMT_SIXPLEX);
map.clear();
for (long int i = 0; i < channel_names[itraq_type].rows(); ++i)
{
ChannelInfo info;
info.description = "";
info.name = channel_names[itraq_type](i, 0);
info.id = (Int)i;
if (const auto it = reporter_mass_exact.find(info.name); it == reporter_mass_exact.end())
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Unexpected reporter name during initialization.", String(info.name));
}
else
{
info.center = it->second;
}
info.active = false;
map[info.name] = info;
}
#ifdef ITRAQ_DEBUG
std::cout << "INIT: map has " << map.size() << " entries!" << std::endl;
#endif
}
void ItraqConstants::updateChannelMap(const StringList & active_channels, ChannelMapType & map)
{
// split the channels key:name pairs apart
for (StringList::const_iterator it = active_channels.begin(); it != active_channels.end(); ++it)
{
StringList result;
it->split(':', result);
if (result.size() != 2)
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "ItraqConstants: Invalid entry in Param 'channel_active'; expected one semicolon ('" + (*it) + "')");
}
result[0] = result[0].trim();
result[1] = result[1].trim();
if (result[0] == String::EMPTY || result[1] == String::EMPTY)
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "ItraqConstants: Invalid entry in Param 'channel_active'; key or value is empty ('" + (*it) + "')");
}
Int channel = result[0].toInt();
if (map.find(channel) == map.end())
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "ItraqConstants: Invalid entry in Param 'channel_active'; channel is not valid ('" + String(channel) + "')");
}
// update name (description) of channel
map[channel].description = result[1];
map[channel].active = true;
#ifdef ITRAQ_DEBUG
std::cout << "Channel " << channel << " has description " << channel_map_[channel].description << " and center " << channel_map_[channel].center << std::endl;
#endif
}
}
Matrix<double> ItraqConstants::translateIsotopeMatrix(const int & itraq_type, const IsotopeMatrices & isotope_corrections)
{
// translate isotope_corrections to a channel_frequency matrix
/*
take special care of 8plex case, as ch-121 has a gap just before, thus matrix values need to be adapted
*/
Matrix<double> channel_frequency(CHANNEL_COUNT[itraq_type], CHANNEL_COUNT[itraq_type]);
for (Int i = 0; i < CHANNEL_COUNT[itraq_type]; ++i)
{
for (Int j = 0; j < CHANNEL_COUNT[itraq_type]; ++j)
{
// diagonal (should be close to 1 = 100%)
if (i == j)
{
double val = 1.0;
// subtract all isotope deviations of row i
for (Int col_idx = 0; col_idx < 4; ++col_idx)
{
val += -isotope_corrections[itraq_type](i, col_idx) / 100;
}
channel_frequency(i, j) = val;
}
else // from mass i to mass j (directly copy the deviation)
{
if (i != 7 && j != 7)
{
if (j < i && i <= j + 2) // -2, -1 cases of row 'i'
{
channel_frequency(j, i) = isotope_corrections[itraq_type](i, j - i + 2) / 100;
}
else if (i < j && j <= i + 2) // +1, +2 cases of row 'i'
{
channel_frequency(j, i) = isotope_corrections[itraq_type](i, j - i + 1) / 100;
}
}
else // special case of ch-121 for 8plex
{ // make everything more 'extreme' by 1 index
if (i == 7 && j == 6) // -2 case of ch-121
{
channel_frequency(j, i) = isotope_corrections[itraq_type](i, 0) / 100;
}
else if (i == 6 && j == 7) // +2 case of ch-121
{
channel_frequency(j, i) = isotope_corrections[itraq_type](i, 3) / 100.0;
}
}
}
}
}
return channel_frequency;
}
} // !namespace
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/QUANTITATION/IsobaricQuantifierStatistics.cpp | .cpp | 2,691 | 74 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Stephan Aiche, Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/QUANTITATION/IsobaricQuantifierStatistics.h>
#include <OpenMS/DATASTRUCTURES/String.h>
namespace OpenMS
{
IsobaricQuantifierStatistics::IsobaricQuantifierStatistics() :
channel_count(0),
iso_number_ms2_negative(0),
iso_number_reporter_negative(0),
iso_number_reporter_different(0),
iso_solution_different_intensity(0),
iso_total_intensity_negative(0),
number_ms2_total(0),
number_ms2_empty(0),
empty_channels()
{
}
void IsobaricQuantifierStatistics::reset()
{
channel_count = 0;
iso_number_ms2_negative = 0;
iso_number_reporter_negative = 0;
iso_number_reporter_different = 0;
iso_solution_different_intensity = 0;
iso_total_intensity_negative = 0;
number_ms2_total = 0;
number_ms2_empty = 0;
empty_channels.clear();
}
IsobaricQuantifierStatistics::IsobaricQuantifierStatistics(const IsobaricQuantifierStatistics& other)
{
channel_count = other.channel_count;
iso_number_ms2_negative = other.iso_number_ms2_negative;
iso_number_reporter_negative = other.iso_number_reporter_negative;
iso_number_reporter_different = other.iso_number_reporter_different;
iso_solution_different_intensity = other.iso_solution_different_intensity;
iso_total_intensity_negative = other.iso_total_intensity_negative;
number_ms2_total = other.number_ms2_total;
number_ms2_empty = other.number_ms2_empty;
empty_channels.clear();
empty_channels.insert(other.empty_channels.begin(), other.empty_channels.end());
}
IsobaricQuantifierStatistics& IsobaricQuantifierStatistics::operator=(const IsobaricQuantifierStatistics& rhs)
{
if (this == &rhs) return *this;
channel_count = rhs.channel_count;
iso_number_ms2_negative = rhs.iso_number_ms2_negative;
iso_number_reporter_negative = rhs.iso_number_reporter_negative;
iso_number_reporter_different = rhs.iso_number_reporter_different;
iso_solution_different_intensity = rhs.iso_solution_different_intensity;
iso_total_intensity_negative = rhs.iso_total_intensity_negative;
number_ms2_total = rhs.number_ms2_total;
number_ms2_empty = rhs.number_ms2_empty;
empty_channels.clear();
empty_channels.insert(rhs.empty_channels.begin(), rhs.empty_channels.end());
return *this;
}
} // namespace
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/QUANTITATION/IsobaricChannelExtractor.cpp | .cpp | 39,042 | 882 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Stephan Aiche, Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/QUANTITATION/IsobaricChannelExtractor.h>
#include <OpenMS/ANALYSIS/QUANTITATION/IsobaricQuantitationMethod.h>
#include <OpenMS/ANALYSIS/QUANTITATION/TMTTenPlexQuantitationMethod.h>
#include <OpenMS/ANALYSIS/QUANTITATION/TMTElevenPlexQuantitationMethod.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/KERNEL/RangeUtils.h>
#include <OpenMS/KERNEL/ConsensusFeature.h>
#include <OpenMS/KERNEL/ConsensusMap.h>
#include <OpenMS/MATH/StatisticFunctions.h>
// #define ISOBARIC_CHANNEL_EXTRACTOR_DEBUG
// #undef ISOBARIC_CHANNEL_EXTRACTOR_DEBUG
namespace OpenMS
{
// Maximum allowed search window for TMT-10 reporter ions. The channels are only 0.006 Th apart.
// Allowing anything larger will result in wrong quantifications for empty channels.
// Also used for TMT_11PLEX
double TMT_10AND11PLEX_CHANNEL_TOLERANCE = 0.003;
IsobaricChannelExtractor::PurityState_::PurityState_(const PeakMap& targetExp) :
baseExperiment(targetExp)
{
// initialize precursorScan with end(), it will be updated later on
// from the calling method
precursorScan = baseExperiment.end();
// find the first ms1 scan in the experiment
followUpScan = baseExperiment.begin();
while (followUpScan != baseExperiment.end() && followUpScan->getMSLevel() != 1)
{
++followUpScan;
}
// check if we found one
hasFollowUpScan = followUpScan != baseExperiment.end();
}
void IsobaricChannelExtractor::PurityState_::advanceFollowUp(const double rt)
{
// advance follow up scan until we found a ms1 scan with a bigger RT
if (followUpScan != baseExperiment.end()) ++followUpScan;
while (followUpScan != baseExperiment.end())
{
if (followUpScan->getMSLevel() == 1 && followUpScan->getRT() > rt)
{
break;
}
++followUpScan;
}
// check if we found one
hasFollowUpScan = followUpScan != baseExperiment.end();
}
bool IsobaricChannelExtractor::PurityState_::followUpValid(const double rt) const
{
return hasFollowUpScan ? rt < followUpScan->getRT() : true;
}
IsobaricChannelExtractor::IsobaricChannelExtractor(const IsobaricQuantitationMethod* const quant_method) :
DefaultParamHandler("IsobaricChannelExtractor"),
quant_method_(quant_method),
selected_activation_("any"),
reporter_mass_shift_(0.1),
min_precursor_intensity_(1.0),
keep_unannotated_precursor_(true),
min_reporter_intensity_(0.0),
remove_low_intensity_quantifications_(false),
min_precursor_purity_(0.0),
max_precursor_isotope_deviation_(10),
interpolate_precursor_purity_(false),
channel_mz_delta()
{
setDefaultParams_();
}
IsobaricChannelExtractor::IsobaricChannelExtractor(const IsobaricChannelExtractor& other) = default;
IsobaricChannelExtractor& IsobaricChannelExtractor::operator=(const IsobaricChannelExtractor& rhs)
{
if (this == &rhs)
return *this;
DefaultParamHandler::operator=(rhs);
quant_method_ = rhs.quant_method_;
selected_activation_ = rhs.selected_activation_;
reporter_mass_shift_ = rhs.reporter_mass_shift_;
min_precursor_intensity_ = rhs.min_precursor_intensity_;
keep_unannotated_precursor_ = rhs.keep_unannotated_precursor_;
min_reporter_intensity_ = rhs.min_reporter_intensity_;
remove_low_intensity_quantifications_ = rhs.remove_low_intensity_quantifications_;
min_precursor_purity_ = rhs.min_precursor_purity_;
max_precursor_isotope_deviation_ = rhs.max_precursor_isotope_deviation_;
interpolate_precursor_purity_ = rhs.interpolate_precursor_purity_;
channel_mz_delta = rhs.channel_mz_delta;
return *this;
}
void IsobaricChannelExtractor::setDefaultParams_()
{
defaults_.setValue("select_activation", "auto", "Operate only on MSn scans where any of its precursors features a certain activation method. Setting to \"auto\" uses HCD and HCID spectra. Set to empty string if you want to disable filtering.");
std::vector<std::string> activation_list;
activation_list.emplace_back("auto");
activation_list.insert(activation_list.end(), Precursor::NamesOfActivationMethod, Precursor::NamesOfActivationMethod + static_cast<size_t>(Precursor::ActivationMethod::SIZE_OF_ACTIVATIONMETHOD) - 1);
activation_list.emplace_back("any"); // allow disabling this
defaults_.setValidStrings("select_activation", activation_list);
defaults_.setValue("reporter_mass_shift", 0.002, "Allowed shift (left to right) in Th from the expected position.");
defaults_.setMinFloat("reporter_mass_shift", 0.0001); // ~0.7ppm -- no need to allow any lower value; this is more than enough for TMT-10plex (0.006 distance between channels, i.e. 60 times wider)
defaults_.setMaxFloat("reporter_mass_shift", 0.5);
defaults_.setValue("min_precursor_intensity", 1.0, "Minimum intensity of the precursor to be extracted. MS/MS scans having a precursor with a lower intensity will not be considered for quantitation.");
defaults_.setMinFloat("min_precursor_intensity", 0.0);
defaults_.setValue("keep_unannotated_precursor", "true", "Flag if precursor with missing intensity value or missing precursor spectrum should be included or not.");
defaults_.setValidStrings("keep_unannotated_precursor", {"true","false"});
defaults_.setValue("min_reporter_intensity", 0.0, "Minimum intensity of the individual reporter ions to be extracted.");
defaults_.setMinFloat("min_reporter_intensity", 0.0);
defaults_.setValue("discard_low_intensity_quantifications", "false", "Remove all reporter intensities if a single reporter is below the threshold given in 'min_reporter_intensity'.");
defaults_.setValidStrings("discard_low_intensity_quantifications", {"true","false"});
defaults_.setValue("min_precursor_purity", 0.0, "Minimum fraction of the total intensity in the isolation window of the precursor spectrum attributable to the selected precursor.");
defaults_.setMinFloat("min_precursor_purity", 0.0);
defaults_.setMaxFloat("min_precursor_purity", 1.0);
defaults_.setValue("precursor_isotope_deviation", 10.0, "Maximum allowed deviation (in ppm) between theoretical and observed isotopic peaks of the precursor peak in the isolation window to be counted as part of the precursor.");
defaults_.setMinFloat("precursor_isotope_deviation", 0.0);
defaults_.addTag("precursor_isotope_deviation", "advanced");
defaults_.setValue("purity_interpolation", "true", "If set to true the algorithm will try to compute the purity as a time weighted linear combination of the precursor scan and the following scan. If set to false, only the precursor scan will be used.");
defaults_.setValidStrings("purity_interpolation", {"true","false"});
defaults_.addTag("purity_interpolation", "advanced");
defaultsToParam_();
}
void IsobaricChannelExtractor::updateMembers_()
{
selected_activation_ = getParameters().getValue("select_activation").toString();
reporter_mass_shift_ = getParameters().getValue("reporter_mass_shift");
min_precursor_intensity_ = getParameters().getValue("min_precursor_intensity");
keep_unannotated_precursor_ = getParameters().getValue("keep_unannotated_precursor") == "true";
min_reporter_intensity_ = getParameters().getValue("min_reporter_intensity");
remove_low_intensity_quantifications_ = getParameters().getValue("discard_low_intensity_quantifications") == "true";
min_precursor_purity_ = getParameters().getValue("min_precursor_purity");
max_precursor_isotope_deviation_ = getParameters().getValue("precursor_isotope_deviation");
interpolate_precursor_purity_ = getParameters().getValue("purity_interpolation") == "true";
Size number_of_channels = quant_method_->getNumberOfChannels();
/* check for sensible parameters */
if ((( number_of_channels == 10) || (number_of_channels == 11))
&& reporter_mass_shift_ > TMT_10AND11PLEX_CHANNEL_TOLERANCE)
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Error: Both TMT-10plex and TMT-11plex require reporter mass shifts <= 0.003 to avoid channel ambiguity!");
}
}
bool IsobaricChannelExtractor::isValidPrecursor_(const Precursor& precursor) const
{
return (!(precursor.getIntensity() > 0.0) && keep_unannotated_precursor_) || !(precursor.getIntensity() < min_precursor_intensity_);
}
bool IsobaricChannelExtractor::hasLowIntensityReporter_(const ConsensusFeature& cf) const
{
for (ConsensusFeature::const_iterator cf_it = cf.begin();
cf_it != cf.end();
++cf_it)
{
if (cf_it->getIntensity() == 0.0)
{
return true;
}
}
return false;
}
double IsobaricChannelExtractor::computeSingleScanPrecursorPurity_(const PeakMap::ConstIterator& ms2_spec, const PeakMap::SpectrumType& precursor_spec) const
{
typedef PeakMap::SpectrumType::ConstIterator const_spec_iterator;
// compute distance between isotopic peaks based on the precursor charge.
const double charge_dist = Constants::NEUTRON_MASS_U / static_cast<double>(ms2_spec->getPrecursors()[0].getCharge());
// the actual boundary values
const double strict_lower_mz = ms2_spec->getPrecursors()[0].getMZ() - ms2_spec->getPrecursors()[0].getIsolationWindowLowerOffset();
const double strict_upper_mz = ms2_spec->getPrecursors()[0].getMZ() + ms2_spec->getPrecursors()[0].getIsolationWindowUpperOffset();
const double fuzzy_lower_mz = strict_lower_mz - (strict_lower_mz * max_precursor_isotope_deviation_ / 1000000);
const double fuzzy_upper_mz = strict_upper_mz + (strict_upper_mz * max_precursor_isotope_deviation_ / 1000000);
// first find the actual precursor peak
Size precursor_peak_idx = precursor_spec.findNearest(ms2_spec->getPrecursors()[0].getMZ());
const Peak1D& precursor_peak = precursor_spec[precursor_peak_idx];
// now we get ourselves some border iterators
const_spec_iterator lower_bound = precursor_spec.MZBegin(fuzzy_lower_mz);
const_spec_iterator upper_bound = precursor_spec.MZEnd(ms2_spec->getPrecursors()[0].getMZ());
Peak1D::IntensityType precursor_intensity = precursor_peak.getIntensity();
Peak1D::IntensityType total_intensity = precursor_peak.getIntensity();
// ------------------------------------------------------------------------------
// try to find a match for our isotopic peak on the left side
double expected_next_mz = precursor_peak.getMZ() - charge_dist;
while (expected_next_mz > fuzzy_lower_mz)
{
// find nearest peak in precursor window
const_spec_iterator np_it = precursor_spec.MZBegin(lower_bound, expected_next_mz, upper_bound);
// handle border cases
// check if next peak has smaller dist
const_spec_iterator np_it2 = np_it;
++np_it;
if (std::fabs(np_it2->getMZ() - expected_next_mz) < std::fabs(np_it->getMZ() - expected_next_mz))
{
np_it = np_it2;
}
// compute difference between found peak and expected
double min_diff = std::fabs(np_it->getMZ() - expected_next_mz) * 1000000 / expected_next_mz;
// check if we found an isotopic peak
if (min_diff < max_precursor_isotope_deviation_)
{
#ifdef ISOBARIC_CHANNEL_EXTRACTOR_DEBUG
std::cerr << "Mark peak as isotopic peak POS: " << precursor_spec[min_idx] << " (diff: " << min_diff << " vs " << max_precursor_isotope_deviation_ << ")\n";
#endif
if (np_it->getMZ() > strict_lower_mz)
{
precursor_intensity += np_it->getIntensity();
}
else
{
// we're in the fuzzy area, so we will take only 50% of the given intensity
// since we assume that the isolation window borders are not sharp
precursor_intensity += 0.5 * np_it->getIntensity();
}
// update expected_next_mz
expected_next_mz = np_it->getMZ() - charge_dist;
}
else
{
#ifdef ISOBARIC_CHANNEL_EXTRACTOR_DEBUG
std::cerr << "No matching isotopic peak for expected pos: " << expected_next_mz << " (min reached diff: " << min_diff << " vs " << max_precursor_isotope_deviation_ << ")\n";
#endif
// update expected_next_mz with theoretical position
expected_next_mz -= charge_dist;
}
}
// ------------------------------------------------------------------------------
// try to find a match for our isotopic peak on the right
// redefine bounds
lower_bound = precursor_spec.MZBegin(ms2_spec->getPrecursors()[0].getMZ());
upper_bound = precursor_spec.MZEnd(fuzzy_upper_mz);
expected_next_mz = precursor_peak.getMZ() + charge_dist;
while (expected_next_mz < fuzzy_upper_mz)
{
// find nearest peak in precursor window
const_spec_iterator np_it = precursor_spec.MZBegin(lower_bound, expected_next_mz, upper_bound);
// handle border cases
// check if next peak has smaller dist
const_spec_iterator np_it2 = np_it;
++np_it;
if (std::fabs(np_it2->getMZ() - expected_next_mz) < std::fabs(np_it->getMZ() - expected_next_mz))
{
np_it = np_it2;
}
// compute difference between found peak and expected
double min_diff = std::fabs(np_it->getMZ() - expected_next_mz) * 1000000 / expected_next_mz;
// check if we found an isotopic peak
if (min_diff < max_precursor_isotope_deviation_)
{
#ifdef ISOBARIC_CHANNEL_EXTRACTOR_DEBUG
std::cerr << "Mark peak as isotopic peak POS: " << precursor_spec[min_idx] << " (diff: " << min_diff << " vs " << max_precursor_isotope_deviation_ << ")\n";
#endif
if (np_it->getMZ() < strict_upper_mz)
{
precursor_intensity += np_it->getIntensity();
}
else
{
// we're in the fuzzy area, so we will take only 50% of the given intensity
// since we assume that the isolation window borders are not sharp
precursor_intensity += 0.5 * np_it->getIntensity();
}
// update expected_next_mz
expected_next_mz = np_it->getMZ() + charge_dist;
}
else
{
#ifdef ISOBARIC_CHANNEL_EXTRACTOR_DEBUG
std::cerr << "No matching isotopic peak for expected pos: " << expected_next_mz << " (min reached diff: " << min_diff << " vs " << max_precursor_isotope_deviation_ << ")\n";
#endif
// update expected_next_mz with theoretical position
expected_next_mz += charge_dist;
}
}
// ------------------------------------------------------------------------------
// compute total intensity
int idx = static_cast<int>(precursor_peak_idx) - 1;
while (idx >= 0 && precursor_spec[idx].getMZ() > fuzzy_lower_mz)
{
if (precursor_spec[idx].getMZ() > strict_lower_mz)
{
total_intensity += precursor_spec[idx].getIntensity();
}
else
{
// we're in the fuzzy area, so we will take only 50% of the given intensity
// since we assume that the isolation window borders are not sharp
total_intensity += 0.5 * precursor_spec[idx].getIntensity();
}
--idx;
}
idx = static_cast<int>(precursor_peak_idx) + 1;
while (idx < static_cast<int>(precursor_spec.size()) && precursor_spec[idx].getMZ() < fuzzy_upper_mz)
{
if (precursor_spec[idx].getMZ() < strict_upper_mz)
{
total_intensity += precursor_spec[idx].getIntensity();
}
else
{
// we're in the fuzzy area, so we will take only 50% of the given intensity
// since we assume that the isolation window borders are not sharp
total_intensity += 0.5 * precursor_spec[idx].getIntensity();
}
++idx;
}
return precursor_intensity / total_intensity;
}
double IsobaricChannelExtractor::computePrecursorPurity_(const PeakMap::ConstIterator& ms2_spec, const PurityState_& pState) const
{
// we cannot analyze precursors without a charge
if (ms2_spec->getPrecursors()[0].getCharge() == 0)
{
return 1.0;
}
else
{
#ifdef ISOBARIC_CHANNEL_EXTRACTOR_DEBUG
std::cerr << "------------------ analyzing " << ms2_spec->getNativeID() << '\n';
#endif
// compute purity of preceding ms1 scan
double early_scan_purity = computeSingleScanPrecursorPurity_(ms2_spec, *(pState.precursorScan));
if (pState.hasFollowUpScan && interpolate_precursor_purity_)
{
double late_scan_purity = computeSingleScanPrecursorPurity_(ms2_spec, *(pState.followUpScan));
// calculating the extrapolated, S2I value as a time weighted linear combination of the two scans
// see: Savitski MM, Sweetman G, Askenazi M, Marto JA, Lang M, Zinn N, et al. (2011).
// Analytical chemistry 83: 8959–67. http://www.ncbi.nlm.nih.gov/pubmed/22017476
// std::fabs is applied to compensate for potentially negative RTs
return std::fabs(ms2_spec->getRT() - pState.precursorScan->getRT()) *
((late_scan_purity - early_scan_purity) / std::fabs(pState.followUpScan->getRT() - pState.precursorScan->getRT()))
+ early_scan_purity;
}
else
{
return early_scan_purity;
}
}
}
void IsobaricChannelExtractor::extractChannels(const PeakMap& ms_exp_data, ConsensusMap& consensus_map)
{
if (ms_exp_data.empty())
{
OPENMS_LOG_WARN << "The given file does not contain any conventional peak data, but might"
" contain chromatograms. This tool currently cannot handle them, sorry.\n";
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Experiment has no scans!");
}
// check if RT is sorted (we rely on it)
if (!ms_exp_data.isSorted(false))
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Spectra are not sorted in RT! Please sort them first!");
}
// clear the output map
consensus_map.clear(false);
consensus_map.setExperimentType("labeled_MS2");
// create predicate for spectrum checking
OPENMS_LOG_INFO << "Selecting scans with activation mode: " << selected_activation_ << '\n';
// Select the two possible HCD activation modes according to PSI-MS ontology: HCID and HCD
if (selected_activation_ == "auto")
{
selected_activation_ = Precursor::NamesOfActivationMethod[static_cast<size_t>(Precursor::ActivationMethod::HCID)] + "," + Precursor::NamesOfActivationMethod[static_cast<size_t>(Precursor::ActivationMethod::HCD)];
}
HasActivationMethod<PeakMap::SpectrumType> isValidActivation(ListUtils::create<String>(selected_activation_));
// walk through spectra and count the number of scans with valid activation method per MS-level
// only the highest level will be used for quantification (e.g. MS3, if present)
std::map<UInt, UInt> ms_level;
std::map<String, int> activation_modes;
for (PeakMap::ConstIterator it = ms_exp_data.begin(); it != ms_exp_data.end(); ++it)
{
if (it->getMSLevel() == 1) continue; // never report MS1
++activation_modes[getActivationMethod_(*it)]; // count HCD, CID, ...
if (selected_activation_ == "any" || isValidActivation(*it))
{
++ms_level[it->getMSLevel()];
}
}
if (ms_level.empty())
{
OPENMS_LOG_WARN << "Filtering by MS/MS(/MS) and activation mode: no spectra pass activation mode filter!\n"
<< "Activation modes found:\n";
for (std::map<String, int>::const_iterator it = activation_modes.begin(); it != activation_modes.end(); ++it)
{
OPENMS_LOG_WARN << " mode " << (it->first.empty() ? "<none>" : it->first) << ": " << it->second << " scans\n";
}
OPENMS_LOG_WARN << "Result will be empty!\n";
return;
}
OPENMS_LOG_INFO << "Filtering by MS/MS(/MS) and activation mode:\n";
for (std::map<UInt, UInt>::const_iterator it = ms_level.begin(); it != ms_level.end(); ++it)
{
OPENMS_LOG_INFO << " level " << it->first << ": " << it->second << " scans\n";
}
UInt quant_ms_level = ms_level.rbegin()->first;
OPENMS_LOG_INFO << "Using MS-level " << quant_ms_level << " for quantification.\n";
// now we have picked data
// --> assign peaks to channels
UInt64 element_index(0);
// remember the current precursor spectrum
PurityState_ pState(ms_exp_data);
PeakMap::ConstIterator it_last_MS2 = ms_exp_data.end(); // remember last MS2 spec, to get precursor in MS1 (also if quant is in MS3)
bool ms3 = false;
for (PeakMap::ConstIterator it = ms_exp_data.begin(); it != ms_exp_data.end(); ++it)
{
// remember the last MS1 spectra as we assume it to be the precursor spectrum
if (it->getMSLevel() == 1)
{
// remember potential precursor and continue
pState.precursorScan = it;
// reset last MS2 -- we expect to see a new one soon and the old one should not be used for the following MS3 (if any)
it_last_MS2 = ms_exp_data.end();
continue;
}
if (it->getMSLevel() != quant_ms_level) continue;
if ((*it).empty()) continue; // skip empty spectra
if (!(selected_activation_ == "any" || isValidActivation(*it))) continue;
// find following ms1 scan (needed for purity computation)
if (!pState.followUpValid(it->getRT()))
{
// advance iterator
pState.advanceFollowUp(it->getRT());
}
// check precursor constraints
if (!isValidPrecursor_(it->getPrecursors()[0]))
{
OPENMS_LOG_DEBUG << "Skip spectrum " << it->getNativeID() << ": Precursor doesn't fulfill all constraints.\n";
continue;
}
// check precursor purity if we have a valid precursor ..
double precursor_purity = -1.0;
if (pState.precursorScan != ms_exp_data.end())
{
precursor_purity = computePrecursorPurity_(it, pState);
// check if purity is high enough
if (precursor_purity < min_precursor_purity_)
{
OPENMS_LOG_DEBUG << "Skip spectrum " << it->getNativeID() << ": Precursor purity is below the threshold. [purity = " << precursor_purity << "]\n";
continue;
}
}
else
{
OPENMS_LOG_INFO << "No precursor available for spectrum: " << it->getNativeID() << '\n';
}
if (it->getMSLevel() == 3)
{
ms3 = true;
// we cannot save just the last MS2 but need to compare to the precursor info stored in the (potential MS3 spectrum)
it_last_MS2 = ms_exp_data.getPrecursorSpectrum(it);
if (it_last_MS2 == ms_exp_data.end())
{ // this only happens if an MS3 spec does not have a preceding MS2
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("No MS2 precursor information given for MS3 scan native ID ") + it->getNativeID() + " with RT " + String(it->getRT()));
}
}
else
{
it_last_MS2 = it;
}
// check if MS1 precursor info is available
if (it_last_MS2->getPrecursors().empty())
{
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("No precursor information given for scan native ID ") + it->getNativeID() + " with RT " + String(it->getRT()));
}
// store RT of MS2 scan and MZ of MS1 precursor ion as centroid of ConsensusFeature
ConsensusFeature cf;
cf.setUniqueId();
cf.setRT(it_last_MS2->getRT());
cf.setMZ(it_last_MS2->getPrecursors()[0].getMZ());
Peak2D channel_value;
channel_value.setRT(it->getRT());
// for each each channel
UInt64 map_index = 0;
Peak2D::IntensityType overall_intensity = 0;
for (IsobaricQuantitationMethod::IsobaricChannelList::const_iterator cl_it = quant_method_->getChannelInformation().begin();
cl_it != quant_method_->getChannelInformation().end();
++cl_it)
{
// set mz-position of channel
channel_value.setMZ(cl_it->center);
// reset intensity
channel_value.setIntensity(0);
// as every evaluation requires time, we cache the MZEnd iterator
const PeakMap::SpectrumType::ConstIterator mz_end = it->MZEnd(cl_it->center + qc_dist_mz);
// search for the non-zero signal closest to theoretical position
// & check for closest signal within reasonable distance (0.5 Da) -- might find neighbouring TMT channel, but that should not confuse anyone
int peak_count(0); // count peaks in user window -- should be only one, otherwise Window is too large
PeakMap::SpectrumType::ConstIterator idx_nearest(mz_end);
for (PeakMap::SpectrumType::ConstIterator mz_it = it->MZBegin(cl_it->center - qc_dist_mz);
mz_it != mz_end;
++mz_it)
{
if (mz_it->getIntensity() == 0) continue; // ignore 0-intensity shoulder peaks -- could be detrimental when de-calibrated
double dist_mz = fabs(mz_it->getMZ() - cl_it->center);
if (dist_mz < reporter_mass_shift_) ++peak_count;
if (idx_nearest == mz_end // first peak
|| ((dist_mz < fabs(idx_nearest->getMZ() - cl_it->center)))) // closer to best candidate
{
idx_nearest = mz_it;
}
}
if (idx_nearest != mz_end)
{
double mz_delta = cl_it->center - idx_nearest->getMZ();
// stats: we don't care what shift the user specified
channel_mz_delta[cl_it->name].mz_deltas.push_back(mz_delta);
if (peak_count > 1) ++channel_mz_delta[cl_it->name].signal_not_unique;
// pass user threshold
if (std::fabs(mz_delta) < reporter_mass_shift_)
{
channel_value.setIntensity(idx_nearest->getIntensity());
}
}
// discard contribution of this channel as it is below the required intensity threshold
if (channel_value.getIntensity() < min_reporter_intensity_)
{
channel_value.setIntensity(0);
}
overall_intensity += channel_value.getIntensity();
// add channel to ConsensusFeature
cf.insert(map_index, channel_value, element_index);
++map_index;
} // ! channel_iterator
// check if we keep this feature or if it contains low-intensity quantifications
if (remove_low_intensity_quantifications_ && hasLowIntensityReporter_(cf))
{
continue;
}
// check featureHandles are not empty
if (overall_intensity <= 0)
{
cf.setMetaValue("all_empty", String("true"));
}
// add purity information if we could compute it
if (precursor_purity > 0.0)
{
cf.setMetaValue("precursor_purity", precursor_purity);
}
// embed the id of the scan from which the quantitative information was extracted
cf.setMetaValue("scan_id", it->getNativeID());
// embed the id of the scan from which the ID information should be extracted
// helpful for mapping later
if (ms3)
{
cf.setMetaValue("id_scan_id", it_last_MS2->getNativeID());
}
// ...as well as additional meta information
cf.setMetaValue("precursor_intensity", it->getPrecursors()[0].getIntensity());
cf.setCharge(it_last_MS2->getPrecursors()[0].getCharge());
cf.setIntensity(overall_intensity);
consensus_map.push_back(cf);
// the tandem-scan in the order they appear in the experiment
++element_index;
} // ! Experiment iterator
printStats();
/// add meta information to the map
registerChannelsInOutputMap(consensus_map);
}
void IsobaricChannelExtractor::registerChannelsInOutputMap(ConsensusMap& consensus_map, const String& filename)
{
// register the individual channels in the output consensus map
Int index = 0;
// if there are already column headers, we need to append to the end
if (!consensus_map.getColumnHeaders().empty())
{
index = (--consensus_map.getColumnHeaders().cend())->first + 1;
}
for (IsobaricQuantitationMethod::IsobaricChannelList::const_iterator cl_it = quant_method_->getChannelInformation().begin();
cl_it != quant_method_->getChannelInformation().end();
++cl_it)
{
ConsensusMap::ColumnHeader channel_as_map;
if (!filename.empty())
{
channel_as_map.filename = filename;
}
// label is the channel + description provided in the Params
channel_as_map.label = quant_method_->getMethodName() + "_" + cl_it->name;
// TODO(aiche): number of features need to be set later
channel_as_map.size = consensus_map.size();
// add some more MetaInfo
channel_as_map.setMetaValue("channel_name", cl_it->name);
channel_as_map.setMetaValue("channel_id", cl_it->id);
channel_as_map.setMetaValue("channel_description", cl_it->description);
channel_as_map.setMetaValue("channel_center", cl_it->center);
consensus_map.getColumnHeaders()[index] = channel_as_map;
++index;
}
}
std::vector<double> IsobaricChannelExtractor::extractSingleSpec(Size spec_idx, const MSExperiment& exp, std::vector<std::pair<double, unsigned>>& channel_qc)
{
// for each channel
unsigned map_index = 0;
const auto& quant_spec = exp[spec_idx];
std::vector<double> result (quant_method_->getNumberOfChannels(), 0.0);
if (quant_spec.empty())
{
OPENMS_LOG_WARN << "Quant. spectrum " << quant_spec.getNativeID() << " is empty. Skipping extraction.\n";
return result;
}
// TODO try the following again to make it a bit faster
// assumes sortedness of channel info. Should be given. TODO Add precondition to quant_method constructor?
/*const auto& reporter_region_end = ++quant_spec.MZEnd(quant_method_->getChannelInformation().back().center + qc_dist_mz);
const auto& reporter_region_start = quant_spec.MZBegin(quant_spec.begin(), quant_method_->getChannelInformation().front().center - qc_dist_mz, reporter_region_end);
std::cout << "reporter_region_start: " << reporter_region_start->getMZ() << '\n';
if (reporter_region_end != quant_spec.end())
{
std::cout << "reporter_region_end: " << reporter_region_end->getMZ() << '\n';
} else {
std::cout << "reporter_region_end: " << "end\n";
return result;
}*/
// TODO I wonder if full linear search inside the reporter region is faster since it can be better optimized by the compiler
for (IsobaricQuantitationMethod::IsobaricChannelList::const_iterator cl_it = quant_method_->getChannelInformation().begin();
cl_it != quant_method_->getChannelInformation().end();
++cl_it)
{
// set mz-position of channel
auto reporter_mz = cl_it->center;
// if (reporter_mz < reporter_region_start->getMZ() || reporter_mz > reporter_region_end->getMZ()) continue;
// as every evaluation requires time, we cache the MZEnd iterator
const PeakMap::SpectrumType::ConstIterator mz_end = quant_spec.MZEnd(/*reporter_region_start,*/ reporter_mz + qc_dist_mz/*, reporter_region_end*/);
// search for the non-zero signal closest to theoretical position
// & check for closest signal within reasonable distance (0.5 Da) -- might find neighbouring TMT channel, but that should not confuse anyone
int peak_count(0); // count peaks in user window -- should be only one, otherwise Window is too large
PeakMap::SpectrumType::ConstIterator idx_nearest(mz_end);
for (PeakMap::SpectrumType::ConstIterator mz_it = quant_spec.MZBegin(/*reporter_region_start,*/ reporter_mz - qc_dist_mz/*, reporter_region_end*/);
mz_it != mz_end;
++mz_it)
{
if (mz_it->getIntensity() == 0) continue; // ignore 0-intensity shoulder peaks -- could be detrimental when de-calibrated
double dist_mz = fabs(mz_it->getMZ() - reporter_mz);
if (dist_mz < reporter_mass_shift_) ++peak_count;
if (idx_nearest == mz_end // first peak
|| ((dist_mz < fabs(idx_nearest->getMZ() - reporter_mz)))) // closer to best candidate
{
idx_nearest = mz_it;
}
}
channel_qc[map_index].second = peak_count;
if (idx_nearest != mz_end)
{
double mz_delta = reporter_mz - idx_nearest->getMZ();
// stats: we don't care what shift the user specified
channel_qc[map_index].first = mz_delta;
// pass user threshold
if (fabs(mz_delta) < reporter_mass_shift_)
{
//std::cout << "reporter_mz: " << reporter_mz << '\n';
result[map_index] = idx_nearest->getIntensity();
}
}
++map_index;
} // ! channel_iterator
return result;
}
void IsobaricChannelExtractor::printStats()
{
printStats(channel_mz_delta);
}
void IsobaricChannelExtractor::printStats(ChannelQCSet& stats) const
{
const auto number_of_channels = quant_method_->getNumberOfChannels();
// print stats about m/z calibration / presence of signal
OPENMS_LOG_INFO << "Calibration stats: Median distance of observed reporter ions m/z to expected position (up to " << qc_dist_mz << " Th):\n";
bool impurities_found(false);
for (IsobaricQuantitationMethod::IsobaricChannelList::const_iterator cl_it = quant_method_->getChannelInformation().begin();
cl_it != quant_method_->getChannelInformation().end();
++cl_it)
{
OPENMS_LOG_INFO << " ch " << String(cl_it->name).fillRight(' ', 4) << " (~" << String(cl_it->center).substr(0, 7).fillRight(' ', 7) << "): ";
if (stats.find(cl_it->name) != stats.end())
{
// sort
double median = Math::median(stats[cl_it->name].mz_deltas.begin(), stats[cl_it->name].mz_deltas.end(), false);
if (((number_of_channels == 10) || (number_of_channels == 11)) &&
(fabs(median) > TMT_10AND11PLEX_CHANNEL_TOLERANCE) &&
(int(cl_it->center) != 126 && int(cl_it->center) != 131)) // these two channels have ~1 Th spacing.. so they do not suffer from the tolerance problem
{ // the channel was most likely empty, and we picked up the neighbouring channel's data (~0.006 Th apart). So reporting median here is misleading.
OPENMS_LOG_INFO << "<invalid data (>" << TMT_10AND11PLEX_CHANNEL_TOLERANCE << " Th channel tolerance)>\n";
}
else
{
OPENMS_LOG_INFO << median << " Th";
if (stats[cl_it->name].signal_not_unique > 0)
{
OPENMS_LOG_INFO << " [MSn impurity (within " << reporter_mass_shift_ << " Th): " << stats[cl_it->name].signal_not_unique << " windows|spectra]";
impurities_found = true;
}
OPENMS_LOG_INFO << "\n";
}
}
else
{
OPENMS_LOG_INFO << "<no data>\n";
}
}
if (impurities_found) OPENMS_LOG_INFO << "\nImpurities within the allowed reporter mass shift " << reporter_mass_shift_ << " Th have been found."
<< "They can be ignored if the spectra are m/z calibrated (see above), since only the peak closest to the theoretical position is used for quantification!";
OPENMS_LOG_INFO << '\n';
}
void IsobaricChannelExtractor::printStatsWithMissing(std::vector<ChannelQC>& stats) const
{
Size number_of_channels = quant_method_->getNumberOfChannels();
// print stats about m/z calibration / presence of signal
OPENMS_LOG_INFO << "Calibration stats (up to " << qc_dist_mz << " m/z), Impurities up to " << reporter_mass_shift_ << " m/z):\n";
bool impurities_found(false);
Size channel_nr = 0;
for (IsobaricQuantitationMethod::IsobaricChannelList::const_iterator cl_it = quant_method_->getChannelInformation().begin();
cl_it != quant_method_->getChannelInformation().end();
++cl_it)
{
OPENMS_LOG_INFO << " ch " << String(cl_it->name).fillRight(' ', 4) << " (~" << String(cl_it->center).substr(0, 7).fillRight(' ', 7) << "): ";
auto& cur_deltas = stats[channel_nr].mz_deltas;
Size old_size = cur_deltas.size();
// filter out NaN in mz_deltas
cur_deltas.erase(std::remove_if(cur_deltas.begin(), cur_deltas.end(), [](double value) {
return std::isnan(value);
}), cur_deltas.end());
Size missing = old_size - cur_deltas.size();
// sort
double median = Math::median(cur_deltas.begin(), cur_deltas.end(), false);
// transform to absolute value
std::transform(cur_deltas.begin(), cur_deltas.end(), cur_deltas.begin(), [](const auto& v){return std::abs(v);});
double abs_median = Math::median(cur_deltas.begin(), cur_deltas.end(), false);
if (((number_of_channels == 10) || (number_of_channels == 11)) &&
(abs_median > TMT_10AND11PLEX_CHANNEL_TOLERANCE) &&
(int(cl_it->center) != 126 && int(cl_it->center) != 131)) // these two channels have ~1 Th spacing.. so they do not suffer from the tolerance problem
{ // the channel was most likely empty, and we picked up the neighbouring channel's data (~0.006 Th apart). So reporting median here is misleading.
OPENMS_LOG_INFO << "<invalid data (>" << TMT_10AND11PLEX_CHANNEL_TOLERANCE << " Th channel tolerance)>\n";
}
else
{
OPENMS_LOG_INFO << "Median error: " << median << " m/z |";
OPENMS_LOG_INFO << " MAD: " << abs_median << " m/z |";
OPENMS_LOG_INFO << " #impurity peaks: " << stats[channel_nr].signal_not_unique << "|";
if (stats[channel_nr].signal_not_unique > 0)
{
impurities_found = true;
}
OPENMS_LOG_INFO << " #missing: " << missing << "\n";
}
++channel_nr;
}
if (impurities_found) OPENMS_LOG_INFO << "\nImpurities within the allowed reporter mass shift " << reporter_mass_shift_ << " m/z have been found."
<< "They can be ignored if the spectra are m/z calibrated (see above), since only the peak closest to the theoretical position is used for quantification!";
OPENMS_LOG_INFO << '\n';
}
/**
* @brief Clears channel statistics, e.g., after a new experiment has been loaded.
*/
void IsobaricChannelExtractor::clearStats()
{
channel_mz_delta.clear();
}
/**
* @brief Clears channel statistics, e.g., after a new experiment has been loaded.
*/
ChannelQCSet& IsobaricChannelExtractor::getStats()
{
return channel_mz_delta;
}
} // namespace
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/QUANTITATION/DDAWorkflowCommons.cpp | .cpp | 9,725 | 224 | // Copyright (c) 2002-present, The OpenMS Team -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Timo Sachsenberg $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/QUANTITATION/DDAWorkflowCommons.h>
#include <OpenMS/CONCEPT/Exception.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/DATASTRUCTURES/String.h>
#include <OpenMS/DATASTRUCTURES/StringListUtils.h>
#include <OpenMS/SYSTEM/File.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/FEATUREFINDER/MassTraceDetection.h>
#include <OpenMS/PROCESSING/FILTERING/ThresholdMower.h>
#include <OpenMS/PROCESSING/CALIBRATION/InternalCalibration.h>
#include <OpenMS/PROCESSING/CALIBRATION/MZTrafoModel.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <OpenMS/FEATUREFINDER/FeatureFinderMultiplexAlgorithm.h>
#include <map>
#include <vector>
using namespace std;
namespace OpenMS
{
std::map<String, String> DDAWorkflowCommons::mapId2MzMLs(const std::map<String, String>& m2i)
{
std::map<String, String> idfile2mzfile;
for (const auto& m : m2i)
{
idfile2mzfile[m.second] = m.first;
}
return idfile2mzfile;
}
std::map<String, String> DDAWorkflowCommons::mapMzML2Ids(StringList & in, StringList & in_ids)
{
// validate file lists (use only basename and ignore extension)
auto validation_result = File::validateMatchingFileNames(in, in_ids, true, true);
// we try to fail early (without parsing files) if the input is obviously wrong
// check for two major mistakes:
// 1. different number of files (-> certainly wrong)
// 2. same number of files but different order (-> certainly wrong)
// If some files differ in names, we can't be sure at this point and skip this test for now.
// We need to look into the ID files to infer the spectra filenames later to be sure a mistake was made.
switch (validation_result)
{
case File::MatchingFileListsStatus::SET_MISMATCH:
if (in.size() != in_ids.size())
{
OPENMS_LOG_FATAL_ERROR << "ID and spectra file lists differ in size. Please provide the same number of files for spectra and ID." << endl;
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"ID and spectra file lists differ in size. Please provide the same number of files for spectra and ID.");
}
else
{ // same number of files but filenames differ (we will try to read the spectra filenames from the id files later)
OPENMS_LOG_DEBUG << "ID and spectra file lists differ. Please provide the same files in the same order." << std::endl;
OPENMS_LOG_DEBUG << "File in spectra file list: " << std::endl;
for (const auto& f : in)
{
OPENMS_LOG_DEBUG << f << std::endl;
}
OPENMS_LOG_DEBUG << "File in ID file list: " << std::endl;
for (const auto& f : in_ids)
{
OPENMS_LOG_DEBUG << f << std::endl;
}
OPENMS_LOG_DEBUG << "Will try to infer spectra filenames from id files later." << std::endl;
}
break;
case File::MatchingFileListsStatus::ORDER_MISMATCH:
OPENMS_LOG_DEBUG << "ID and spectra file match but order of file names seem to differ. Please provide the same files in the same order." << std::endl;
OPENMS_LOG_DEBUG << "File in spectra file list: " << std::endl;
for (const auto& f : in)
{
OPENMS_LOG_DEBUG << f << std::endl;
}
OPENMS_LOG_WARN << "File in ID file list: " << endl;
for (const auto& f : in_ids)
{
OPENMS_LOG_DEBUG << f << std::endl;
}
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"ID and spectra file match but order of file names seem to differ. They need to be provided in the same order.");
break;
case File::MatchingFileListsStatus::MATCH:
OPENMS_LOG_INFO << "Info: ID files have the same names as spectra files." << std::endl;
break;
}
map<String, String> mzfile2idfile;
for (Size i = 0; i != in.size(); ++i)
{
const String& in_abs_path = File::absolutePath(in[i]);
const String& id_abs_path = File::absolutePath(in_ids[i]);
mzfile2idfile[in_abs_path] = id_abs_path;
OPENMS_LOG_DEBUG << "Spectra: " << in[i] << "\t Ids: " << in_ids[i] << std::endl;
}
return mzfile2idfile;
}
double DDAWorkflowCommons::estimateMedianChromatographicFWHM(MSExperiment & ms_centroided)
{
MassTraceDetection mt_ext;
Param mtd_param = mt_ext.getParameters();
OPENMS_LOG_DEBUG << "Parameters passed to MassTraceDetection" << mtd_param << std::endl;
std::vector<MassTrace> m_traces;
mt_ext.run(ms_centroided, m_traces, 1000);
std::vector<double> fwhm_1000;
for (auto &m : m_traces)
{
if (m.getSize() == 0) continue;
m.updateMeanMZ();
m.updateWeightedMZsd();
double fwhm = m.estimateFWHM(false);
fwhm_1000.push_back(fwhm);
}
double median_fwhm = Math::median(fwhm_1000.begin(), fwhm_1000.end());
return median_fwhm;
}
void DDAWorkflowCommons::recalibrateMS1(MSExperiment & ms_centroided,
PeptideIdentificationList& peptide_ids,
const String & id_file_abs_path )
{
InternalCalibration ic;
// ic.setLogType(log_type_);
ic.fillCalibrants(peptide_ids, 25.0); // >25 ppm maximum deviation defines an outlier TODO: check if we need to adapt this
if (ic.getCalibrationPoints().size() <= 1) return;
// choose calibration model based on number of calibration points
// there seem to be some problems with the QUADRATIC model that we first need to investigate
//MZTrafoModel::MODELTYPE md = (ic.getCalibrationPoints().size() == 2) ? MZTrafoModel::MODELTYPE::LINEAR : MZTrafoModel::MODELTYPE::QUADRATIC;
//bool use_RANSAC = (md == MZTrafoModel::MODELTYPE::LINEAR || md == MZTrafoModel::MODELTYPE::QUADRATIC);
MZTrafoModel::MODELTYPE md = MZTrafoModel::MODELTYPE::LINEAR;
bool use_RANSAC = true;
Size RANSAC_initial_points = (md == MZTrafoModel::MODELTYPE::LINEAR) ? 2 : 3;
Math::RANSACParam p(RANSAC_initial_points, 70, 10, 30, true); // TODO: check defaults (taken from tool)
MZTrafoModel::setRANSACParams(p);
// these limits are a little loose, but should prevent grossly wrong models without burdening the user with yet another parameter.
MZTrafoModel::setCoefficientLimits(25.0, 25.0, 0.5);
IntList ms_level = {1};
double rt_chunk = 300.0; // 5 minutes
String qc_residual_path, qc_residual_png_path;
if (!id_file_abs_path.empty())
{
const String & id_basename = File::basename(id_file_abs_path);
qc_residual_path = id_basename + "qc_residuals.tsv";
qc_residual_png_path = id_basename + "qc_residuals.png";
}
if (!ic.calibrate(ms_centroided,
ms_level, md, rt_chunk, use_RANSAC,
10.0,
5.0,
"",
"",
qc_residual_path,
qc_residual_png_path,
"Rscript"))
{
OPENMS_LOG_WARN << "\nCalibration failed. See error message above!" << std::endl;
}
}
void DDAWorkflowCommons::calculateSeeds(
const MSExperiment & ms_centroided,
const double intensity_threshold,
FeatureMap & seeds,
double median_fwhm,
Size charge_min,
Size charge_max
)
{
//TODO: Actually FFM provides a parameter for minimum intensity. Also it copies the full experiment again once or twice.
MSExperiment e;
for (const auto& s : ms_centroided)
{
if (s.getMSLevel() == 1)
{
e.addSpectrum(s);
}
}
ThresholdMower threshold_mower_filter;
Param tm = threshold_mower_filter.getParameters();
tm.setValue("threshold", intensity_threshold); // TODO: derive from data
threshold_mower_filter.setParameters(tm);
threshold_mower_filter.filterPeakMap(e);
FeatureFinderMultiplexAlgorithm algorithm;
Param p = algorithm.getParameters();
p.setValue("algorithm:labels", ""); // unlabeled only
p.setValue("algorithm:charge", String(charge_min) + ":" + String(charge_max));
p.setValue("algorithm:rt_typical", median_fwhm * 3.0);
p.setValue("algorithm:rt_band", 3.0); // max 3 seconds shifts between isotopic traces (not sure if needed)
p.setValue("algorithm:rt_min", median_fwhm * 0.5);
p.setValue("algorithm:spectrum_type", "centroid");
algorithm.setParameters(p);
//FIXME progress of FFM is not printed at all
const bool progress(true);
algorithm.run(e, progress);
seeds = algorithm.getFeatureMap();
OPENMS_LOG_INFO << "Using " << String(seeds.size()) << " seeds from untargeted feature extraction." << std::endl;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/QUANTITATION/ItraqEightPlexQuantitationMethod.cpp | .cpp | 6,722 | 152 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Stephan Aiche $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/QUANTITATION/ItraqEightPlexQuantitationMethod.h>
#include <OpenMS/DATASTRUCTURES/Matrix.h>
#include <OpenMS/DATASTRUCTURES/ListUtils.h>
#include <OpenMS/CONCEPT/LogStream.h>
namespace OpenMS
{
const String ItraqEightPlexQuantitationMethod::name_ = "itraq8plex";
ItraqEightPlexQuantitationMethod::ItraqEightPlexQuantitationMethod()
{
setName("ItraqFourPlexQuantitationMethod");
// create the channel map
channels_.push_back(IsobaricChannelInformation("113", 0, "", 113.1078, {-1, -1, 1, 2}));
channels_.push_back(IsobaricChannelInformation("114", 1, "", 114.1112, {-1, 0, 2, 3}));
channels_.push_back(IsobaricChannelInformation("115", 2, "", 115.1082, {0, 1, 3, 4}));
channels_.push_back(IsobaricChannelInformation("116", 3, "", 116.1116, {1, 2, 4, 5}));
channels_.push_back(IsobaricChannelInformation("117", 4, "", 117.1149, {2, 3, 5, 6}));
channels_.push_back(IsobaricChannelInformation("118", 5, "", 118.1120, {3, 4, 6, 7}));
channels_.push_back(IsobaricChannelInformation("119", 6, "", 119.1153, {4, 5, -1, 7}));
channels_.push_back(IsobaricChannelInformation("121", 7, "", 121.1220, {6, -1, -1, -1}));
// we assume 114 to be the reference
reference_channel_ = 0;
setDefaultParams_();
}
ItraqEightPlexQuantitationMethod::~ItraqEightPlexQuantitationMethod() = default;
ItraqEightPlexQuantitationMethod::ItraqEightPlexQuantitationMethod(const ItraqEightPlexQuantitationMethod& other):
IsobaricQuantitationMethod(other)
{
channels_.clear();
channels_.insert(channels_.begin(), other.channels_.begin(), other.channels_.end());
reference_channel_ = other.reference_channel_;
}
ItraqEightPlexQuantitationMethod& ItraqEightPlexQuantitationMethod::operator=(const ItraqEightPlexQuantitationMethod& rhs)
{
if (this == &rhs)
return *this;
channels_.clear();
channels_.insert(channels_.begin(), rhs.channels_.begin(), rhs.channels_.end());
reference_channel_ = rhs.reference_channel_;
return *this;
}
void ItraqEightPlexQuantitationMethod::setDefaultParams_()
{
defaults_.setValue("channel_113_description", "", "Description for the content of the 113 channel.");
defaults_.setValue("channel_114_description", "", "Description for the content of the 114 channel.");
defaults_.setValue("channel_115_description", "", "Description for the content of the 115 channel.");
defaults_.setValue("channel_116_description", "", "Description for the content of the 116 channel.");
defaults_.setValue("channel_117_description", "", "Description for the content of the 117 channel.");
defaults_.setValue("channel_118_description", "", "Description for the content of the 118 channel.");
defaults_.setValue("channel_119_description", "", "Description for the content of the 119 channel.");
defaults_.setValue("channel_121_description", "", "Description for the content of the 121 channel.");
defaults_.setValue("reference_channel", 113, "Number of the reference channel (113-121). Please note that 120 is not valid.");
defaults_.setMinInt("reference_channel", 113);
defaults_.setMaxInt("reference_channel", 121);
// {0.00, 0.00, 6.89, 0.22}, //113
// {0.00, 0.94, 5.90, 0.16},
// {0.00, 1.88, 4.90, 0.10},
// {0.00, 2.82, 3.90, 0.07},
// {0.06, 3.77, 2.99, 0.00},
// {0.09, 4.71, 1.88, 0.00},
// {0.14, 5.66, 0.87, 0.00},
// {0.27, 7.44, 0.18, 0.00} //121
defaults_.setValue("correction_matrix", std::vector<std::string>{"0.00/0.00/6.89/0.22", //113
"0.00/0.94/5.90/0.16",
"0.00/1.88/4.90/0.10",
"0.00/2.82/3.90/0.07",
"0.06/3.77/2.99/0.00",
"0.09/4.71/1.88/0.00",
"0.14/5.66/0.87/0.00",
"0.27/7.44/0.18/0.00"}, //121
"Correction matrix for isotope distributions (see documentation); use the following format: <-2Da>/<-1Da>/<+1Da>/<+2Da>; e.g. '0/0.3/4/0', '0.1/0.3/3/0.2'");
defaultsToParam_();
}
void ItraqEightPlexQuantitationMethod::updateMembers_()
{
channels_[0].description = param_.getValue("channel_113_description").toString();
channels_[1].description = param_.getValue("channel_114_description").toString();
channels_[2].description = param_.getValue("channel_115_description").toString();
channels_[3].description = param_.getValue("channel_116_description").toString();
channels_[4].description = param_.getValue("channel_117_description").toString();
channels_[5].description = param_.getValue("channel_118_description").toString();
channels_[6].description = param_.getValue("channel_119_description").toString();
channels_[7].description = param_.getValue("channel_121_description").toString();
// compute the index of the reference channel
Int ref_ch = param_.getValue("reference_channel");
if (ref_ch == 121)
{
reference_channel_ = 7;
}
else if (ref_ch == 120)
{
OPENMS_LOG_WARN << "Invalid channel selection." << std::endl;
}
else
{
reference_channel_ = ref_ch - 113;
}
}
const String& ItraqEightPlexQuantitationMethod::getMethodName() const
{
return ItraqEightPlexQuantitationMethod::name_;
}
const IsobaricQuantitationMethod::IsobaricChannelList& ItraqEightPlexQuantitationMethod::getChannelInformation() const
{
return channels_;
}
Size ItraqEightPlexQuantitationMethod::getNumberOfChannels() const
{
return 8;
}
Matrix<double> ItraqEightPlexQuantitationMethod::getIsotopeCorrectionMatrix() const
{
StringList iso_correction = ListUtils::toStringList<std::string>(getParameters().getValue("correction_matrix"));
return stringListToIsotopeCorrectionMatrix_(iso_correction);
}
Size ItraqEightPlexQuantitationMethod::getReferenceChannel() const
{
return reference_channel_;
}
} // namespace
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/QUANTITATION/PeptideAndProteinQuant.cpp | .cpp | 50,252 | 1,306 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Hendrik Weisser $
// --------------------------------------------------------------------------
//
#include <OpenMS/ANALYSIS/QUANTITATION/PeptideAndProteinQuant.h>
#include <OpenMS/CHEMISTRY/AASequence.h>
#include <OpenMS/CHEMISTRY/EnzymaticDigestion.h>
#include <OpenMS/CONCEPT/Exception.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/DATASTRUCTURES/DataValue.h>
#include <OpenMS/DATASTRUCTURES/ListUtils.h>
#include <OpenMS/DATASTRUCTURES/StringView.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <OpenMS/METADATA/ExperimentalDesign.h>
#include <OpenMS/METADATA/PeptideHit.h>
#include <OpenMS/METADATA/PeptideIdentification.h>
#include <OpenMS/METADATA/ProteinIdentification.h>
#include <OpenMS/SYSTEM/File.h>
#include <algorithm>
using namespace std;
namespace OpenMS
{
PeptideAndProteinQuant::PeptideAndProteinQuant() :
DefaultParamHandler("PeptideAndProteinQuant"), stats_(), pep_quant_(),
prot_quant_()
{
std::vector<std::string> true_false = {"true","false"};
defaults_.setValue("method", "top", "- top - quantify based on three most abundant peptides (number can be changed in 'top').\n- iBAQ (intensity based absolute quantification), calculate the sum of all peptide peak intensities divided by the number of theoretically observable tryptic peptides (https://rdcu.be/cND1J). Warning: only consensusXML or featureXML input is allowed!");
defaults_.setValidStrings("method", {"top","iBAQ"});
defaults_.setValue("top:N", 3, "Calculate protein abundance from this number of proteotypic peptides (most abundant first; '0' for all)");
defaults_.setMinInt("top:N", 0);
defaults_.setValue("top:aggregate", "median", "Aggregation method used to compute protein abundances from peptide abundances");
defaults_.setValidStrings("top:aggregate", {"median","mean","weighted_mean","sum"});
defaults_.setValue("top:include_all", "false", "Include results for proteins with fewer proteotypic peptides than indicated by 'N' (no effect if 'N' is 0 or 1)");
defaults_.setValidStrings("top:include_all", true_false);
defaults_.setSectionDescription("top", "Additional options for custom quantification using top N peptides.");
defaults_.setValue("best_charge_and_fraction", "false", "Distinguish between fraction and charge states of a peptide. For peptides, abundances will be reported separately for each fraction and charge;\nfor proteins, abundances will be computed based only on the most prevalent charge observed of each peptide (over all fractions).\nBy default, abundances are summed over all charge states.");
defaults_.setValidStrings("best_charge_and_fraction", true_false);
defaults_.setValue("consensus:normalize", "false", "Scale peptide abundances so that medians of all samples are equal.");
defaults_.setValidStrings("consensus:normalize", true_false);
defaults_.setValue("consensus:fix_peptides", "false", "Use the same peptides for protein quantification across all samples.\nWith 'N 0',"
"all peptides that occur in every sample are considered.\nOtherwise ('N'), the N peptides that occur in the most samples (independently of each other) are selected,\nbreaking ties by total abundance (there is no guarantee that the best co-ocurring peptides are chosen!).");
defaults_.setValidStrings("consensus:fix_peptides", true_false);
defaults_.setSectionDescription("consensus", "Additional options for consensus maps (and identification results comprising multiple runs)");
defaultsToParam_();
}
// doesn't only count but also some initialization TODO: rename
void PeptideAndProteinQuant::countPeptides_(
PeptideIdentificationList& peptides)
{
for (auto & pep : peptides)
{
if (pep.getHits().empty()) continue;
pep.sort(); // TODO: move this out of count peptides
const PeptideHit& hit = pep.getHits()[0]; // get best hit
PeptideData& data = pep_quant_[hit.getSequence()];
data.psm_count++;
// add protein accessions:
set<String> protein_accessions = hit.extractProteinAccessionsSet();
data.accessions.insert(protein_accessions.begin(), protein_accessions.end());
}
}
PeptideHit PeptideAndProteinQuant::getAnnotation_(
PeptideIdentificationList& peptides)
{
// hits in IDs must already be sorted by score! (done in "countPeptides_")
if (peptides.empty() || peptides[0].getHits().empty()) return {};
// get best hit
const PeptideHit& hit = peptides[0].getHits()[0];
// check for ambiguities
for (auto pep_it = ++peptides.begin();
pep_it != peptides.end(); ++pep_it)
{
const PeptideHit& current = pep_it->getHits()[0];
if (current.getSequence() != hit.getSequence())
{
// TODO?: warn/error that ambiguous sequences are annotated. check if this can happen
return {};
}
}
return hit;
}
void PeptideAndProteinQuant::quantifyFeature_(const FeatureHandle& feature,
const size_t fraction,
const String& filename,
const PeptideHit& hit,
UInt channel_or_label)
{
// return if annotation for the feature is ambiguous or missing
if (hit == PeptideHit()) { return; }
stats_.quant_features++;
const AASequence& seq = hit.getSequence();
//TODO The practice of inserting elements with the [] should be forbidden.
// It is a debugging nightmare because if you try to access it and it is
// not there, you are adding another element. In a next iteration this whole
// class should be rewritten to use insert/emplace and find or better yet,
// since we have "normal" 0-based values for samples now, vectors.
pep_quant_[seq].abundances[fraction][filename][hit.getCharge()][channel_or_label] +=
feature.getIntensity(); // new map element is initialized with 0
}
bool PeptideAndProteinQuant::getBest_(const std::map<Int, std::map<String, std::map<Int, std::map<UInt, double>>>>& peptide_abundances, std::tuple<size_t, String, size_t, UInt>& best)
{
size_t best_n_quant(0);
double best_abundance(0);
best = std::make_tuple(0, "", 0, 0);
for (auto & fa : peptide_abundances) // for all fractions
{
for (auto & fna : fa.second) // for all filenames
{
for (auto & ca : fna.second) // for all charge states
{
for (auto & cha : ca.second) // for all channels
{
const Int & fraction = fa.first;
const String & filename = fna.first;
const Int & charge = ca.first;
const UInt & channel = cha.first;
double current_abundance = cha.second;
if (current_abundance <= 0) { continue; }
const size_t current_n_quant = 1; // Each entry represents one quantification
if (current_n_quant > best_n_quant)
{
best_abundance = current_abundance;
best_n_quant = current_n_quant;
best = std::make_tuple(fraction, filename, charge, channel);
}
else if (current_n_quant == best_n_quant
&& current_abundance > best_abundance) // resolve tie by abundance
{
best_abundance = current_abundance;
best = std::make_tuple(fraction, filename, charge, channel);
}
}
}
}
}
return best_n_quant > 0; // Return true if at least one abundance was found
}
size_t PeptideAndProteinQuant::getSampleIDFromFilenameAndChannel_(const String& filename,
UInt channel_or_label,
const ExperimentalDesign& ed) const
{
// Map filename and label to sample using experimental design
const auto& ms_section = ed.getMSFileSection();
for (const auto& entry : ms_section)
{
String ed_filename = FileHandler::stripExtension(File::basename(entry.path));
if (ed_filename == filename && entry.label == channel_or_label)
{
return entry.sample;
}
}
// If not found, throw an exception with detailed information
throw Exception::MissingInformation(
__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"Could not find sample mapping for filename '" + filename + "' and channel '" + String(channel_or_label) + "' in experimental design.");
}
void PeptideAndProteinQuant::quantifyPeptides(
const PeptideIdentificationList& peptides)
{
OPENMS_LOG_INFO << "Quantifying peptides..." << std::endl;
//////////////////////////////////////////////////////
// first, use peptide-level results from protein inference:
// - remove peptides not supported by inference results
// - update protein accessions according to inference results
// mapping: peptide seq. (unmodified) -> protein accessions
// (in protXML, only unmodified peptides are listed)
map<String, set<String> > pep_info;
for (auto const & pep : peptides)
{
for (auto const & hit : pep.getHits())
{
String seq = hit.getSequence().toUnmodifiedString();
set<String> accessions = hit.extractProteinAccessionsSet();
// cout << "Sequence: " << seq << " size: " << accessions.size() << " " << *(accessions.begin()) << endl;
// If a peptide is seen multiple times, the protein accessions should
// always be the same, so only the first time it should be necessary to
// insert them. However, just in case there a differences in the
// accessions, we accumulate them all (probably unnecessary work):
pep_info[seq].insert(accessions.begin(), accessions.end());
}
}
// if inference results are given, filter quant. data accordingly:
if (!pep_info.empty())
{
if (pep_quant_.empty())
{
OPENMS_LOG_ERROR << "No peptides quantified (pep_quant_ is empty)!" << endl;
}
PeptideQuant filtered;
for (auto & pep_q : pep_quant_) // for all quantified peptides
{
String seq = pep_q.first.toUnmodifiedString();
OPENMS_LOG_DEBUG << "Sequence: " << seq << endl;
map<String, set<String> >::iterator pos = pep_info.find(seq);
if (pos != pep_info.end()) // sequence found in protein inference data
{
OPENMS_LOG_DEBUG << "Accessions: ";
for (auto & a : pos->second) { OPENMS_LOG_DEBUG << a << "\t"; }
OPENMS_LOG_DEBUG << "\n";
pep_q.second.accessions = pos->second; // replace accessions
filtered.insert(pep_q);
}
else
{
OPENMS_LOG_DEBUG << "not found in inference data." << endl;
}
}
pep_quant_ = std::move(filtered);
}
//////////////////////////////////////////////////////
// second, perform the actual peptide quantification:
for (auto & pep_q : pep_quant_)
{
if (param_.getValue("best_charge_and_fraction") == "true")
{ // quantify according to the best charge state only:
// determine which fraction, filename, charge state, and channel yields the maximum abundance
// (break ties by total abundance)
std::tuple<size_t, String, size_t, UInt> best_combination;
// return false: only identified, not quantified
if (!getBest_(pep_q.second.abundances, best_combination))
{
continue;
}
// quantify according to the best combination only:
size_t best_fraction = std::get<0>(best_combination);
String best_filename = std::get<1>(best_combination);
size_t best_charge = std::get<2>(best_combination);
UInt best_channel = std::get<3>(best_combination);
double abundance = pep_q.second.abundances[best_fraction][best_filename][best_charge][best_channel];
size_t sample_id = getSampleIDFromFilenameAndChannel_(best_filename, best_channel, experimental_design_);
pep_q.second.total_abundances[sample_id] = abundance;
}
else
{ // sum up sample abundances over all fractions, filenames, charge states, and channels:
for (auto & fa : pep_q.second.abundances) // for all fractions
{
for (auto & fna : fa.second) // for all filenames
{
for (auto & ca : fna.second) // for all charge states
{
for (auto & cha : ca.second) // for all channels
{
const String & filename = fna.first;
const UInt & channel = cha.first;
const double & abundance = cha.second;
// Map (filename, channel) to sample using ExperimentalDesign
size_t sample_id = getSampleIDFromFilenameAndChannel_(filename, channel, experimental_design_);
pep_q.second.total_abundances[sample_id] += abundance;
}
}
}
}
}
// for PSM counts we cover all fractions, filenames, charge states.
for (auto & fa : pep_q.second.psm_counts) // for all fractions
{
for (auto & fna : fa.second) // for all filenames
{
for (auto & ca : fna.second) // for all charge states
{
const String & filename = fna.first;
const double & psm_counts = ca.second;
// In multiplexed design, e.g. TMT, a signle PSM is associated with all samples measured in the different channels/labels
for (Size channel = 1; channel <= experimental_design_.getNumberOfLabels(); ++channel)
{
size_t sample_id = getSampleIDFromFilenameAndChannel_(filename, channel, experimental_design_);
pep_q.second.total_psm_counts[sample_id] += psm_counts; // accumulate PSM counts for spectral counting
}
}
}
}
// count quantified peptide
if (!pep_q.second.total_abundances.empty()) { stats_.quant_peptides++; }
}
//////////////////////////////////////////////////////
// normalize (optional):
if ((stats_.n_samples > 1) &&
(param_.getValue("consensus:normalize") == "true"))
{
normalizePeptides_();
}
}
void PeptideAndProteinQuant::normalizePeptides_()
{
/////////////////////////////////////////////////////
// calculate total peptide abundances
// depending on earlier options, these include:
// - all charges or only the best charge state
// - all fractions (if multiple fractions are analyzed)
map<UInt64, DoubleList> abundances; // all peptide abundances by sample
for (auto & pq : pep_quant_)
{
// maybe TODO: treat missing abundance values as zero
for (auto & sa : pq.second.total_abundances)
{
abundances[sa.first].push_back(sa.second);
}
}
if (abundances.size() <= 1) { return; }
/////////////////////////////////////////////////////
// compute scale factors on the sample level:
SampleAbundances medians; // median abundance by sample
for (auto & ab : abundances)
{
medians[ab.first] = Math::median(ab.second.begin(), ab.second.end());
}
DoubleList all_medians;
for (auto & sa : medians)
{
all_medians.push_back(sa.second);
}
double overall_median = Math::median(all_medians.begin(),
all_medians.end());
SampleAbundances scale_factors;
for (auto & med : medians)
{
scale_factors[med.first] = overall_median / med.second;
}
/////////////////////////////////////////////////////
// scale all abundance values:
for (auto & pep_q : pep_quant_)
{
// scale total abundances
for (auto & sta : pep_q.second.total_abundances)
{
sta.second *= scale_factors[sta.first];
}
// scale individual abundances
for (auto & fa : pep_q.second.abundances) // for all fractions
{
for (auto & fna : fa.second) // for all filenames
{
for (auto & ca : fna.second) // for all charge states
{
for (auto & cha : ca.second) // for all channels
{
const String & filename = fna.first;
const UInt & channel = cha.first;
size_t sample_id = getSampleIDFromFilenameAndChannel_(filename, channel, experimental_design_);
cha.second *= scale_factors[sample_id];
}
}
}
}
}
}
String PeptideAndProteinQuant::getAccession_(
const set<String>& pep_accessions,
const map<String, String>& accession_to_leader) const
{
if (accession_to_leader.empty())
{
// no info about indistinguishable proteins available
if (pep_accessions.size() == 1) { return *pep_accessions.begin(); }
}
else
{
// if all accessions belong to the same group of indistinguishable
// proteins, return accession of the group leader
StringList leaders;
for (auto const & acc : pep_accessions)
{
map<String, String>::const_iterator pos = accession_to_leader.find(acc);
if (pos != accession_to_leader.end()) leaders.push_back(pos->second);
// if the protein accession was not found, this is not an error:
// if there's not enough evidence for a protein, it won't occur in
// the protXML - so we won't quantify it
}
if (leaders.empty()) return "";
bool all_equal = equal(leaders.begin(), --leaders.end(),
++leaders.begin());
if (all_equal) return leaders[0];
}
OPENMS_LOG_DEBUG << "LEADERS EMPTY: " << endl;
for (auto const & acc : pep_accessions)
{
OPENMS_LOG_DEBUG << acc << endl;
}
return "";
}
void PeptideAndProteinQuant::quantifyProteins(const ProteinIdentification& proteins)
{
if (pep_quant_.empty())
{
OPENMS_LOG_WARN << "Warning: No peptides quantified." << endl;
return;
}
// Phase 1: Transfer peptide data to protein structures
transferPeptideDataToProteins_(proteins);
// Phase 2: Extract and validate parameters
std::string method = param_.getValue("method");
Size top_n = param_.getValue("top:N");
std::string aggregate = param_.getValue("top:aggregate");
bool include_all = param_.getValue("top:include_all") == "true";
bool fix_peptides = param_.getValue("consensus:fix_peptides") == "true";
// Handle iBAQ parameter overrides
if (method == "iBAQ")
{
top_n = 0;
aggregate = "sum";
}
// Phase 3: Process each protein
for (auto& prot_q : prot_quant_)
{
const String& accession = prot_q.first;
const ProteinData& pd = prot_q.second;
// Calculate PSM counts based on all peptides of a protein (group)
for (auto const& pep2sa : pd.peptide_psm_counts)
{
const SampleAbundances& sas = pep2sa.second;
for (auto const& sa : sas)
{
const Size& sample_id = sa.first;
const Size& psms = sa.second;
if (psms > 0)
prot_q.second.total_distinct_peptides[sample_id]++;
prot_q.second.total_psm_counts[sample_id] += psms;
}
}
// Check if protein has enough peptides (for statistics)
if ((top_n > 0) && (prot_q.second.peptide_abundances.size() < top_n))
{
stats_.too_few_peptides++;
if (!include_all)
{
continue;
}
}
// Select peptides for quantification
std::vector<String> selected_peptides = selectPeptidesForQuantification_(
accession, top_n, fix_peptides);
// Calculate protein abundances
calculateProteinAbundances_(accession, selected_peptides, aggregate, top_n, include_all);
// if information about (indistinguishable) protein groups is available, map
// each accession to the accession of the leader of its group of proteins:
auto accession_to_leader = mapAccessionToLeader(proteins);
calculateFileAndChannelLevelProteinAbundances_(accession, selected_peptides, aggregate,
top_n, include_all, accession_to_leader);
// Update statistics
if (prot_q.second.total_abundances.empty())
{
stats_.too_few_peptides++;
}
else
{
stats_.quant_proteins++;
}
}
// Phase 4: Post-processing
if (method == "iBAQ")
{
performIbaqNormalization_(proteins);
}
}
std::map<OpenMS::String, OpenMS::String> PeptideAndProteinQuant::mapAccessionToLeader(const OpenMS::ProteinIdentification& proteins) const
{
std::map<OpenMS::String, OpenMS::String> accession_to_leader;
if (! proteins.getIndistinguishableProteins().empty())
{
for (auto const& pg : proteins.getIndistinguishableProteins())
{
for (auto const& acc : pg.accessions)
{
// each accession should only occur once, but we don't check...
accession_to_leader[acc] = pg.accessions[0];
}
}
}
return accession_to_leader;
}
void PeptideAndProteinQuant::readQuantData(FeatureMap& features, const ExperimentalDesign& ed)
{
updateMembers_(); // clear data
experimental_design_ = ed; // store experimental design for aggregation
stats_.n_samples = ed.getNumberOfSamples();
stats_.n_fractions = 1;
stats_.n_ms_files = ed.getNumberOfMSFiles();
stats_.total_features = features.size();
// For FeatureMap, extract filename from metadata or use default
String filename = "default";
if (features.metaValueExists("filename"))
{
filename = FileHandler::stripExtension(File::basename(features.getMetaValue("filename")));
}
else if (!ed.getMSFileSection().empty())
{
// Use first MS file from experimental design as fallback
filename = FileHandler::stripExtension(File::basename(ed.getMSFileSection()[0].path));
}
for (auto & f : features)
{
if (f.getPeptideIdentifications().empty())
{
stats_.blank_features++;
continue;
}
countPeptides_(f.getPeptideIdentifications());
PeptideHit hit = getAnnotation_(f.getPeptideIdentifications());
FeatureHandle handle(0, f);
const size_t fraction(1);
const Int label(1); // Default label for LFQ data
quantifyFeature_(handle, fraction, filename, hit, label); // updates "stats_.quant_features"
}
countPeptides_(features.getUnassignedPeptideIdentifications());
stats_.total_peptides = pep_quant_.size();
stats_.ambig_features = stats_.total_features - stats_.blank_features -
stats_.quant_features;
}
void PeptideAndProteinQuant::readQuantData(
ConsensusMap& consensus,
const ExperimentalDesign& ed)
{
// TODO check that the file section of the experimental design is compatible with what can be parsed from the consensus map.
updateMembers_(); // clear data
experimental_design_ = ed; // store experimental design for aggregation
if (consensus.empty())
{
OPENMS_LOG_ERROR << "Empty consensus map passed to readQuantData." << endl;
return;
}
// n_fractions are also used to initialize enough
stats_.n_fractions = ed.getNumberOfFractions();
stats_.n_ms_files = ed.getNumberOfMSFiles();
stats_.n_samples = ed.getNumberOfSamples();
OPENMS_LOG_DEBUG << "Reading quant data: " << endl;
OPENMS_LOG_DEBUG << " MS files : " << stats_.n_ms_files << endl;
OPENMS_LOG_DEBUG << " Fractions : " << stats_.n_fractions << endl;
OPENMS_LOG_DEBUG << " Samples (Assays): " << stats_.n_samples << endl;
// map filename and label of experimental design to the full experimental design entry for faster lookup
const auto& ms_section = ed.getMSFileSection();
std::unordered_map<String, ExperimentalDesign::MSFileSectionEntry> fileAndLabel2MSFileSectionEntry;
for (const auto& e : ms_section)
{
String ed_filename = FileHandler::stripExtension(File::basename(e.path));
String ed_label = e.label;
fileAndLabel2MSFileSectionEntry[ed_filename + ed_label] = e;
}
for (auto & c : consensus)
{
stats_.total_features += c.getFeatures().size();
// count features without id
if (c.getPeptideIdentifications().empty())
{
stats_.blank_features += c.getFeatures().size();
continue;
}
countPeptides_(c.getPeptideIdentifications());
PeptideHit hit = getAnnotation_(c.getPeptideIdentifications());
for (auto const & f : c.getFeatures())
{
//TODO MULTIPLEXED: needs to be adapted for multiplexed experiments
size_t row = f.getMapIndex();
const auto& h = consensus.getColumnHeaders().at(row);
const String c_fn = FileHandler::stripExtension(File::basename(h.filename)); // filename according to experimental design in consensus map
const size_t c_lab = h.getLabelAsUInt(consensus.getExperimentType());
// find entry in experimental design (ignore extension and folder) that corresponds to current column header entry
if (auto it = fileAndLabel2MSFileSectionEntry.find(c_fn + String(c_lab)); it != fileAndLabel2MSFileSectionEntry.end())
{
const size_t fraction = it->second.fraction;
quantifyFeature_(f, fraction, c_fn, hit, c_lab); // updates "stats_.quant_features"
}
else
{
OPENMS_LOG_FATAL_ERROR << "File+Label referenced in consensus header not found in experimental design.\n"
<< "File+Label:" << c_fn << "\t" << c_lab << std::endl;
}
}
}
countPeptides_(consensus.getUnassignedPeptideIdentifications());
stats_.total_peptides = pep_quant_.size();
stats_.ambig_features = stats_.total_features - stats_.blank_features -
stats_.quant_features;
}
void PeptideAndProteinQuant::readQuantData(
std::vector<ProteinIdentification>& proteins,
PeptideIdentificationList& peptides,
const ExperimentalDesign& ed)
{
updateMembers_(); // clear data
experimental_design_ = ed; // store experimental design for aggregation
stats_.n_samples = ed.getNumberOfSamples();
stats_.n_fractions = ed.getNumberOfFractions();
stats_.n_ms_files = ed.getNumberOfMSFiles();
OPENMS_LOG_DEBUG << "Reading quant data: " << endl;
OPENMS_LOG_DEBUG << " MS files : " << stats_.n_ms_files << endl;
OPENMS_LOG_DEBUG << " Fractions : " << stats_.n_fractions << endl;
OPENMS_LOG_DEBUG << " Samples (Assays): " << stats_.n_samples << endl;
stats_.total_features = peptides.size();
countPeptides_(peptides);
map<pair<String,Size>, String> identifier_idmergeidx_to_ms_file;
for (Size i = 0; i < proteins.size(); ++i)
{
StringList ms_files;
proteins[i].getPrimaryMSRunPath(ms_files);
if (ms_files.empty())
{
throw Exception::MissingInformation(
__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"No MS file annotated in protein identification.");
}
for (Size s = 0; s < ms_files.size(); ++s)
{
identifier_idmergeidx_to_ms_file[{proteins[i].getIdentifier(), s}] = ms_files[s];
}
OPENMS_LOG_DEBUG << " run index : MS file " << i << " : " << ListUtils::concatenate(ms_files, ", ") << endl;
}
for (auto & p : peptides)
{
if (p.getHits().empty()) { continue; }
Size id_merge_idx = p.getMetaValue("id_merge_idx",0);
const PeptideHit& hit = p.getHits()[0];
// don't quantify decoys
if (hit.isDecoy()) continue;
stats_.quant_features++;
const AASequence& seq = hit.getSequence();
const String& ms_file_path = identifier_idmergeidx_to_ms_file[{p.getIdentifier(),id_merge_idx}];
// determine sample and fraction by MS file name (stored in protein identification)
const ExperimentalDesign::MSFileSection& run_section = ed.getMSFileSection();
auto row = find_if(begin(run_section), end(run_section),
[&ms_file_path](const ExperimentalDesign::MSFileSectionEntry& r)
{
return File::basename(r.path) == File::basename(ms_file_path);
});
if (row == end(run_section))
{
OPENMS_LOG_ERROR << "MS file: " << ms_file_path << " not found in experimental design." << endl;
for (const auto& r : run_section)
{
OPENMS_LOG_ERROR << r.path << endl;
}
throw Exception::MissingInformation(
__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"MS file annotated in protein identification doesn't match any in the experimental design.");
}
size_t fraction = row->fraction;
String filename = FileHandler::stripExtension(File::basename(ms_file_path));
Int label = row->label; // Use label from experimental design
// count peptides in the different fractions, filenames, charge states, and channels
pep_quant_[seq].abundances[fraction][filename][hit.getCharge()][label] += 1;
}
stats_.total_peptides = pep_quant_.size();
}
void PeptideAndProteinQuant::updateMembers_()
{
// reset everything:
stats_ = Statistics();
pep_quant_.clear();
prot_quant_.clear();
}
const PeptideAndProteinQuant::Statistics&
PeptideAndProteinQuant::getStatistics()
{
return stats_;
}
const PeptideAndProteinQuant::PeptideQuant&
PeptideAndProteinQuant::getPeptideResults()
{
return pep_quant_;
}
const PeptideAndProteinQuant::ProteinQuant&
PeptideAndProteinQuant::getProteinResults()
{
return prot_quant_;
}
void PeptideAndProteinQuant::annotateQuantificationsToProteins(
const ProteinQuant& protein_quants,
ProteinIdentification& proteins,
bool remove_unquantified)
{
// read experimental design as it is needed to annotate quantities in the correct order
ExperimentalDesign::MSFileSection msfile_section = experimental_design_.getMSFileSection();
// Extract the Spectra Filepath column from the design
map<UInt64, map<UInt64, String>> design_group_fraction_filename;
UInt64 n_files = 0;
for (ExperimentalDesign::MSFileSectionEntry const& f : msfile_section)
{
const String fn = FileHandler::stripExtension(File::basename(f.path));
design_group_fraction_filename[f.fraction_group][f.fraction] = fn;
n_files++;
}
auto & id_groups = proteins.getIndistinguishableProteins();
for (const auto& q : protein_quants)
{
// accession of quantified protein(group)
const String & acc = q.first;
if (q.second.total_abundances.empty())
{
//TODO maybe just count the number of unquantifiable proteins and report that?
OPENMS_LOG_DEBUG << "Protein " << acc << " not quantified." << endl;
continue;
} // not quantified
// lambda to check if a ProteinGroup has accession "acc"
auto hasProteinInGroup = [&acc] (const ProteinIdentification::ProteinGroup& g)->bool
{
return find(g.accessions.begin(), g.accessions.end(), acc) != g.accessions.end();
};
// retrieve protein group with accession "acc"
auto id_group = std::find_if(id_groups.begin(), id_groups.end(), hasProteinInGroup);
if (id_group != id_groups.end())
{
// copy abundances to float data array
const SampleAbundances& total_abundances = q.second.total_abundances;
const SampleAbundances& total_psm_counts = q.second.total_psm_counts;
const SampleAbundances& total_distinct_peptides = q.second.total_distinct_peptides;
const auto& file_level_psm_counts = q.second.file_level_psm_counts;
// TODO: OPENMS_ASSERT(id_group->float_data_arrays.empty(), "Protein group float data array not empty!.");
id_group->getFloatDataArrays().resize(4);
id_group->getStringDataArrays().resize(2);
id_group->getIntegerDataArrays().resize(2);
// Sample-level arrays (indices 0-2)
ProteinIdentification::ProteinGroup::FloatDataArray & abundances = id_group->getFloatDataArrays()[0];
Size n_samples = getStatistics().n_samples;
abundances.setName("abundances");
abundances.resize(n_samples);
auto & psm_counts = id_group->getFloatDataArrays()[1];
psm_counts.setName("psm_count");
psm_counts.resize(n_samples);
auto & peptide_counts = id_group->getFloatDataArrays()[2];
peptide_counts.setName("distinct_peptides");
peptide_counts.resize(n_samples);
for (auto const & s : total_abundances)
{
abundances[s.first] = (float) s.second;
}
for (auto const & s : total_psm_counts)
{
psm_counts[s.first] = (float) s.second;
}
for (auto const & s : total_distinct_peptides)
{
peptide_counts[s.first] = (float) s.second;
}
// Add file/channel level abundances
auto& file_channel_level_abundance = id_group->getFloatDataArrays()[3];
file_channel_level_abundance.setName("file_channel_level_abundance");
auto& file_channel_level_filename = id_group->getStringDataArrays()[0];
file_channel_level_filename.setName("file_channel_level_filename");
auto& file_channel_level_channel = id_group->getIntegerDataArrays()[0];
file_channel_level_channel.setName("file_channel_level_channel");
// We loop over the filenames in the design file, as this is the order we expect in the output.
for (const auto& [group_id, fraction_to_filename_map] : design_group_fraction_filename)
{
for (auto [fraction, design_filename] : fraction_to_filename_map)
{
// Process each filename within the fraction group
// important: strip file extension and path to find the entry
design_filename = FileHandler::stripExtension(File::basename(design_filename));
#ifdef DEBUG_PROTEINQUANTIFIER
std::cout
<< "Experimental design: fraction group: " << group_id
<< ", filename: '" << design_filename
<< "', fraction: " << fraction
<< " of the experimental design." << std::endl;
#endif
// for each file in the design, fill the channels quantity
for (Size c = 1; c <= experimental_design_.getNumberOfLabels(); ++c) // label/channel numbers are 1-based
{
double channel_abundance{};
const auto& filename_to_channel_map = q.second.channel_level_abundances;
if (auto file_level_it = filename_to_channel_map.find(design_filename);
file_level_it != filename_to_channel_map.end())
{
if (file_level_it->second.find(0) != file_level_it->second.end()) throw Exception::MissingInformation(
__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"Channel found that should not exist.");
// Found the file, now search for the channel
if (auto channel_it = file_level_it->second.find(c);
channel_it != file_level_it->second.end())
{
channel_abundance = channel_it->second;
}
}
file_channel_level_abundance.push_back(channel_abundance);
file_channel_level_filename.push_back(design_filename);
file_channel_level_channel.push_back(c);
#if DEBUG_PEPTIDEANDPROTEINQUANT
std::cout << "DEBUG: Adding abundance for protein to meta value " << acc
<< " filename " << design_filename
<< " channel " << c
<< ": " << channel_abundance << endl;
#endif
}
}
}
// Add file level PSM counts
auto& file_level_psm_count = id_group->getIntegerDataArrays()[1];
file_level_psm_count.setName("file_level_psm_count");
auto& file_level_filename = id_group->getStringDataArrays()[1];
file_level_filename.setName("file_level_filename");
for (const auto& filename : file_level_psm_counts)
{
file_level_psm_count.push_back((int)filename.second);
file_level_filename.push_back(filename.first);
}
}
else
{
throw Exception::MissingInformation(
__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"Protein group quantified that is not present in inference data.");
}
}
if (remove_unquantified)
{
// remove all protein groups that have not been quantified
auto notQuantified = [] (const ProteinIdentification::ProteinGroup& g)->bool { return g.getFloatDataArrays().empty(); };
id_groups.erase(
remove_if(id_groups.begin(), id_groups.end(), notQuantified),
id_groups.end());
}
}
void PeptideAndProteinQuant::transferPeptideDataToProteins_(const ProteinIdentification& proteins)
{
// if information about (indistinguishable) protein groups is available, map
// each accession to the accession of the leader of its group of proteins:
map<String, String> accession_to_leader = mapAccessionToLeader(proteins);
bool contains_accessions{ false}; // flag to check if any accessions were found
for (auto const& pep_q : pep_quant_)
{
String leader_accession = getAccession_(pep_q.second.accessions, accession_to_leader);
OPENMS_LOG_DEBUG << "Peptide id mapped to leader: " << leader_accession << endl;
// not enough evidence or mapping to multiple groups
if (leader_accession.empty())
continue;
contains_accessions = true;
// proteotypic peptide
const String peptide = pep_q.first.toUnmodifiedString();
prot_quant_[leader_accession].psm_count += pep_q.second.psm_count; // total PSM count for this group of proteins (represented by the leader accession)
// transfer abundances and counts from peptides->protein
// summarize abundances and counts between different peptidoforms
for (auto const& sta : pep_q.second.total_abundances)
{
prot_quant_[leader_accession].peptide_abundances[peptide][sta.first] += sta.second;
}
for (auto const& sta : pep_q.second.total_psm_counts)
{
prot_quant_[leader_accession].peptide_psm_counts[peptide][sta.first] += sta.second;
}
// transfer detailed abundances from peptide to protein
for (auto const& fraction : pep_q.second.abundances)
{
for (auto const& filename : fraction.second)
{
for (auto const& charge : filename.second)
{
for (auto const& channel : charge.second)
{
prot_quant_[leader_accession].channel_level_abundances[filename.first][channel.first] += channel.second;
#ifdef DEBUG_PROTEINQUANTIFIER
std::cout << "DEBUG: Adding abundance for protein " << accession
<< " fraction " << fraction.first
<< " filename " << filename.first
<< " charge " << charge.first
<< " channel " << channel.first
<< ": " << channel.second << endl;
#endif
}
}
}
}
// transfer detailed PSM counts from peptide to protein
for (auto const& fraction : pep_q.second.psm_counts)
{
for (auto const& filename : fraction.second)
{
for (auto const& charge : filename.second)
{
prot_quant_[leader_accession].file_level_psm_counts[filename.first] += (UInt)charge.second;
}
}
}
}
if (!contains_accessions)
{
OPENMS_LOG_FATAL_ERROR << "No protein matches found, cannot quantify proteins." << endl;
throw Exception::MissingInformation(
__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"No protein matches found, cannot quantify proteins.");
}
}
std::vector<String> PeptideAndProteinQuant::selectPeptidesForQuantification_(const String& protein_accession,
Size top_n,
bool fix_peptides)
{
std::vector<String> peptides;
auto prot_it = prot_quant_.find(protein_accession);
if (prot_it == prot_quant_.end())
{
return peptides; // empty vector
}
const ProteinData& pd = prot_it->second;
if (fix_peptides && (top_n == 0))
{
// consider all peptides that occur in every sample:
for (auto const& ab : pd.peptide_abundances)
{
if (ab.second.size() == stats_.n_samples)
{
peptides.push_back(ab.first);
}
}
}
else if (fix_peptides && (top_n > 0) && (pd.peptide_abundances.size() > top_n))
{
orderBest_(pd.peptide_abundances, peptides);
peptides.resize(top_n);
}
else
{
// consider all peptides of the protein:
for (auto const& ab : pd.peptide_abundances)
{
peptides.push_back(ab.first);
}
}
return peptides;
}
double PeptideAndProteinQuant::aggregateAbundances_(const std::vector<double>& abundances,
const String& method) const
{
if (abundances.empty())
{
return 0.0;
}
if (method == "median")
{
std::vector<double> sorted_abundances = abundances; // make a copy for sorting
return Math::median(sorted_abundances.begin(), sorted_abundances.end());
}
else if (method == "mean")
{
return Math::mean(abundances.begin(), abundances.end());
}
else if (method == "weighted_mean")
{
double sum_intensities = 0;
double sum_intensities_squared = 0;
for (auto const& intensity : abundances)
{
sum_intensities += intensity;
sum_intensities_squared += intensity * intensity;
}
return sum_intensities_squared / sum_intensities;
}
else // "sum"
{
return Math::sum(abundances.begin(), abundances.end());
}
}
void PeptideAndProteinQuant::calculateProteinAbundances_(const String& protein_accession,
const std::vector<String>& selected_peptides,
const String& aggregate_method,
Size top_n,
bool include_all)
{
auto prot_it = prot_quant_.find(protein_accession);
if (prot_it == prot_quant_.end())
{
return;
}
ProteinData& pd = prot_it->second;
// consider only the selected peptides for quantification:
map<UInt64, DoubleList> abundances; // all peptide abundances by sample
for (const auto& pep : selected_peptides) // for all selected peptides
{
auto pep_it = pd.peptide_abundances.find(pep);
if (pep_it != pd.peptide_abundances.end())
{
for (auto& sa : pep_it->second) // copy over all abundances
{
abundances[sa.first].push_back(sa.second);
}
}
}
for (auto& ab : abundances)
{
// check if the protein has enough peptides in this sample
if (!include_all && (top_n > 0) && (ab.second.size() < top_n))
{
continue;
}
// if we have more than "top", reduce to the top ones
if ((top_n > 0) && (ab.second.size() > top_n))
{
// sort descending:
sort(ab.second.begin(), ab.second.end(), greater<double>());
ab.second.resize(top_n); // remove all but best N values
}
double abundance_result = aggregateAbundances_(ab.second, aggregate_method);
pd.total_abundances[ab.first] = abundance_result;
}
}
void PeptideAndProteinQuant::calculateFileAndChannelLevelProteinAbundances_(const String& protein_accession,
const std::vector<String>& selected_peptides,
const String& aggregate_method,
Size top_n,
bool include_all,
const std::map<String, String>& accession_to_leader)
{
auto prot_it = prot_quant_.find(protein_accession);
if ( prot_it == prot_quant_.end())
{
return;
}
ProteinData& pd = prot_it->second;
// organize detailed abundances by (fraction, filename, channel) combinations
map<tuple<Int, String, UInt>, DoubleList> channel_level_abundances_for_selected_peptides;
// collect detailed abundances from selected peptides
for (const auto& pep : selected_peptides) // for all selected peptides
{
// find the original peptide data to get detailed abundances
for (auto const& pep_q_check : pep_quant_)
{
if (pep_q_check.first.toUnmodifiedString() == pep)
{
String check_accession = getAccession_(pep_q_check.second.accessions, accession_to_leader);
if (check_accession == protein_accession) // this peptide belongs to current protein
{
// collect detailed abundances from this peptide
for (auto const& fraction : pep_q_check.second.abundances)
{
for (auto const& filename : fraction.second)
{
for (auto const& charge : filename.second)
{
for (auto const& channel : charge.second)
{
auto peptide = make_tuple(fraction.first, filename.first, channel.first);
channel_level_abundances_for_selected_peptides[peptide].push_back(channel.second);
#ifdef DEBUG_PEPTIDEANDPROTEINQUANT
std::cout << "DEBUG: Adding abundance for leader " <<
getAccession_(pep_q_check.second.accessions, const_cast<std::map<String, String>&>(accession_to_leader))
<< pep
<< " fraction " << fraction.first
<< " filename " << filename.first
<< " charge " << charge.first
<< " channel " << channel.first
<< ": " << channel.second << endl;
#endif
}
}
}
}
break; // found the peptide, no need to continue searching
}
}
}
}
// now aggregate using the same aggregation method
for (auto& detailed_ab : channel_level_abundances_for_selected_peptides)
{
const auto& selected_peptide = detailed_ab.first;
String filename = get<1>(selected_peptide);
UInt channel = get<2>(selected_peptide);
DoubleList& all_abundances = detailed_ab.second;
if (all_abundances.empty()) continue;
// check if we have enough peptides for this detailed key
if (!include_all && (top_n > 0) && (all_abundances.size() < top_n))
{
continue;
}
// if we have more than "top", reduce to the top ones
if ((top_n > 0) && (all_abundances.size() > top_n))
{
// sort descending:
sort(all_abundances.begin(), all_abundances.end(), greater<double>());
all_abundances.resize(top_n); // remove all but best N values
}
double abundance_result = aggregateAbundances_(all_abundances, aggregate_method);
// store the aggregated result in channel_level_abundances
pd.channel_level_abundances[filename][channel] = abundance_result;
#ifdef DEBUG_PEPTIDEANDPROTEINQUANT
Int fraction = get<0>(selected_peptide);
std::cout << "DEBUG: Protein " << protein_accession
<< " leader " << getAccession_(protein_accession, const_cast<std::map<String, String>&>(accession_to_leader))
<< " fraction " << fraction
<< " filename " << filename
<< " channel " << channel
<< ": " << abundance_result << endl;
#endif
}
}
void PeptideAndProteinQuant::performIbaqNormalization_(const ProteinIdentification& proteins)
{
EnzymaticDigestion digest{};
for (auto & hit : proteins.getHits())
{
const OpenMS::String & hit_accession = hit.getAccession();
const OpenMS::String & hit_sequence = hit.getSequence();
if (prot_quant_.find(hit_accession) != prot_quant_.end())
{
if (hit_sequence.empty())
{
prot_quant_.erase(hit_accession);
OPENMS_LOG_WARN << "Removed " << hit_accession << ", no protein sequence found!" << endl;
}
else
{
std::vector<StringView> peptides {};
digest.digestUnmodified(StringView(hit_sequence), peptides);
for (auto& total_abundance : prot_quant_[hit_accession].total_abundances)
{
total_abundance.second /= double(peptides.size());
}
}
}
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/QUANTITATION/IsobaricQuantitationMethod.cpp | .cpp | 3,762 | 91 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Stephan Aiche $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/QUANTITATION/IsobaricQuantitationMethod.h>
#include <OpenMS/DATASTRUCTURES/Matrix.h>
#include <OpenMS/DATASTRUCTURES/ListUtils.h>
namespace OpenMS
{
IsobaricQuantitationMethod::~IsobaricQuantitationMethod() = default;
IsobaricQuantitationMethod::IsobaricQuantitationMethod() :
DefaultParamHandler("IsobaricQuantitationMethod")
{
}
Matrix<double> IsobaricQuantitationMethod::stringListToIsotopeCorrectionMatrix_(const StringList& stringlist) const
{
// check the string list
if (stringlist.size() != getNumberOfChannels())
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("IsobaricQuantitationMethod: Invalid string representation of the isotope correction matrix. Expected ") + getNumberOfChannels() + " entries but got " + stringlist.size() + ".");
}
// compute frequency matrix based on the deviation matrix
Matrix<double> channel_frequency(getNumberOfChannels(), getNumberOfChannels(), 0.0);
// channel index
Size contributing_channel = 0;
// fill row-wise
for (const auto& l : stringlist)
{
StringList corrections;
l.split('/', corrections);
auto number_of_columns = getChannelInformation()[contributing_channel].affected_channels.size();
if (corrections.size() != number_of_columns )
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("Corrections for channel ID #") + contributing_channel + " must contain " + number_of_columns + " values, but has " + corrections.size() + "!", String(corrections.size()));
}
// overwrite line in Matrix with custom values
Size affected_channel_idx = 0;
double self_contribution = 100.0;
double correction;
Int target_channel;
for (auto& c : corrections)
{
c = c.trim().toUpper();
if (c != "NA" && c != "-1" && c != "0.0")
{
target_channel = getChannelInformation()[contributing_channel].affected_channels[affected_channel_idx];
try
{
correction = c.toDouble();
}
catch (Exception::ConversionError& e)
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("Correction entry #") + affected_channel_idx + " in channel ID " + contributing_channel + " must be one of na/NA/-1 or a floating point number representation!", c);
}
if (correction < 0.0 || correction > 100.0)
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("Correction entry #") + affected_channel_idx + " in channel ID " + contributing_channel + " must be a percentage between 0 and 100!", c);
}
if (target_channel >= 0 && Size(target_channel) < getNumberOfChannels())
{
channel_frequency(target_channel, contributing_channel) = correction / 100.0;
}
self_contribution -= correction; // count reduced self-contribution even if it does not affect another channel
}
affected_channel_idx++;
}
// set reduced self contribution
channel_frequency(contributing_channel, contributing_channel) = self_contribution / 100.0;
// increment channel index
++contributing_channel;
}
return channel_frequency;
}
} // namespace
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/QUANTITATION/KDTreeFeatureNode.cpp | .cpp | 1,260 | 54 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Johannes Veit $
// $Authors: Johannes Veit $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/QUANTITATION/KDTreeFeatureNode.h>
#include <OpenMS/ANALYSIS/QUANTITATION/KDTreeFeatureMaps.h>
namespace OpenMS
{
KDTreeFeatureNode::KDTreeFeatureNode(KDTreeFeatureMaps* data, Size idx) :
data_(data),
idx_(idx)
{
}
KDTreeFeatureNode::KDTreeFeatureNode(const KDTreeFeatureNode& rhs)
= default;
KDTreeFeatureNode& KDTreeFeatureNode::operator=(KDTreeFeatureNode const& rhs)
= default;
KDTreeFeatureNode::~KDTreeFeatureNode()
= default;
Size KDTreeFeatureNode::getIndex() const
{
return idx_;
}
KDTreeFeatureNode::value_type KDTreeFeatureNode::operator[](Size i) const
{
if (i == 0)
{
return data_->rt(idx_);
}
else if (i == 1)
{
return data_->mz(idx_);
}
else
{
const String& err_msg = "Indices other than 0 (RT) and 1 (m/z) are not allowed!";
throw Exception::ElementNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, err_msg);
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/QUANTITATION/ItraqFourPlexQuantitationMethod.cpp | .cpp | 4,703 | 119 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Stephan Aiche $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/QUANTITATION/ItraqFourPlexQuantitationMethod.h>
#include <OpenMS/DATASTRUCTURES/Matrix.h>
#include <OpenMS/DATASTRUCTURES/ListUtils.h>
namespace OpenMS
{
const String ItraqFourPlexQuantitationMethod::name_ = "itraq4plex";
ItraqFourPlexQuantitationMethod::ItraqFourPlexQuantitationMethod()
{
setName("ItraqFourPlexQuantitationMethod");
// create the channel map
channels_.push_back(IsobaricChannelInformation("114", 0, "", 114.1112, {-1, -1, 1, 2}));
channels_.push_back(IsobaricChannelInformation("115", 1, "", 115.1082, {-1, 0, 2, 3}));
channels_.push_back(IsobaricChannelInformation("116", 2, "", 116.1116, {0, 1, 3, -1}));
channels_.push_back(IsobaricChannelInformation("117", 3, "", 117.1149, {1, 2, -1, -1}));
// we assume 114 to be the reference
reference_channel_ = 0;
setDefaultParams_();
}
ItraqFourPlexQuantitationMethod::~ItraqFourPlexQuantitationMethod() = default;
void ItraqFourPlexQuantitationMethod::setDefaultParams_()
{
defaults_.setValue("channel_114_description", "", "Description for the content of the 114 channel.");
defaults_.setValue("channel_115_description", "", "Description for the content of the 115 channel.");
defaults_.setValue("channel_116_description", "", "Description for the content of the 116 channel.");
defaults_.setValue("channel_117_description", "", "Description for the content of the 117 channel.");
defaults_.setValue("reference_channel", 114, "Number of the reference channel (114-117).");
defaults_.setMinInt("reference_channel", 114);
defaults_.setMaxInt("reference_channel", 117);
// {0.0, 1.0, 5.9, 0.2}, //114
// {0.0, 2.0, 5.6, 0.1},
// {0.0, 3.0, 4.5, 0.1},
// {0.1, 4.0, 3.5, 0.1} //117
defaults_.setValue("correction_matrix", std::vector<std::string>{"0.0/1.0/5.9/0.2",
"0.0/2.0/5.6/0.1",
"0.0/3.0/4.5/0.1",
"0.1/4.0/3.5/0.1"},
"Correction matrix for isotope distributions (see documentation); use the following format: <-2Da>/<-1Da>/<+1Da>/<+2Da>; e.g. '0/0.3/4/0', '0.1/0.3/3/0.2'");
defaultsToParam_();
}
void ItraqFourPlexQuantitationMethod::updateMembers_()
{
channels_[0].description = param_.getValue("channel_114_description").toString();
channels_[1].description = param_.getValue("channel_115_description").toString();
channels_[2].description = param_.getValue("channel_116_description").toString();
channels_[3].description = param_.getValue("channel_117_description").toString();
// compute the index of the reference channel
reference_channel_ = ((Int) param_.getValue("reference_channel")) - 114;
}
ItraqFourPlexQuantitationMethod::ItraqFourPlexQuantitationMethod(const ItraqFourPlexQuantitationMethod& other):
IsobaricQuantitationMethod(other)
{
channels_.clear();
channels_.insert(channels_.begin(), other.channels_.begin(), other.channels_.end());
reference_channel_ = other.reference_channel_;
}
ItraqFourPlexQuantitationMethod& ItraqFourPlexQuantitationMethod::operator=(const ItraqFourPlexQuantitationMethod& rhs)
{
if (this == &rhs)
return *this;
channels_.clear();
channels_.insert(channels_.begin(), rhs.channels_.begin(), rhs.channels_.end());
reference_channel_ = rhs.reference_channel_;
return *this;
}
const String& ItraqFourPlexQuantitationMethod::getMethodName() const
{
return ItraqFourPlexQuantitationMethod::name_;
}
const IsobaricQuantitationMethod::IsobaricChannelList& ItraqFourPlexQuantitationMethod::getChannelInformation() const
{
return channels_;
}
Size ItraqFourPlexQuantitationMethod::getNumberOfChannels() const
{
return 4;
}
Matrix<double> ItraqFourPlexQuantitationMethod::getIsotopeCorrectionMatrix() const
{
StringList iso_correction = ListUtils::toStringList<std::string>(getParameters().getValue("correction_matrix"));
return stringListToIsotopeCorrectionMatrix_(iso_correction);
}
Size ItraqFourPlexQuantitationMethod::getReferenceChannel() const
{
return reference_channel_;
}
} // namespace
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/QUANTITATION/TMTSixteenPlexQuantitationMethod.cpp | .cpp | 10,199 | 164 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Stephan Aiche $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/QUANTITATION/TMTSixteenPlexQuantitationMethod.h>
#include <OpenMS/DATASTRUCTURES/ListUtils.h>
#include <OpenMS/DATASTRUCTURES/Matrix.h>
#include <algorithm>
namespace OpenMS
{
const String TMTSixteenPlexQuantitationMethod::name_ = "tmt16plex";
const std::vector<std::string> TMTSixteenPlexQuantitationMethod::channel_names_ = {"126","127N","127C","128N","128C","129N","129C","130N","130C","131N","131C","132N","132C","133N","133C","134N"};
TMTSixteenPlexQuantitationMethod::TMTSixteenPlexQuantitationMethod()
{
setName("TMTSixteenPlexQuantitationMethod");
// create the channel map
channels_.push_back(IsobaricChannelInformation("126", 0, "", 126.127726, {-1, -1, -1, -1, 1, 2, 3, 4}));
channels_.push_back(IsobaricChannelInformation("127N", 1, "", 127.124761, {-1, -1, -1, 0, -1, 3, -1, 5}));
channels_.push_back(IsobaricChannelInformation("127C", 2, "", 127.131081, {-1, -1, 0, -1, 3, 4, 5, 6}));
channels_.push_back(IsobaricChannelInformation("128N", 3, "", 128.128116, {-1, 0, 1, 2, -1, 5, -1, 7}));
channels_.push_back(IsobaricChannelInformation("128C", 4, "", 128.134436, { 0, -1, 2, -1, 5, 6, 7, 8}));
channels_.push_back(IsobaricChannelInformation("129N", 5, "", 129.131471, { 1, 2, 3, 4, -1, 7, -1, 9}));
channels_.push_back(IsobaricChannelInformation("129C", 6, "", 129.137790, { 2, -1, 4, -1, 7, 8, 9, 10}));
channels_.push_back(IsobaricChannelInformation("130N", 7, "", 130.134825, { 3, 4, 5, 6, -1, 9, -1, 11}));
channels_.push_back(IsobaricChannelInformation("130C", 8, "", 130.141145, { 4, -1, 6, -1, 9, 10, 11, 12}));
channels_.push_back(IsobaricChannelInformation("131N", 9, "", 131.138180, { 5, 6, 7, 8, -1, 11, -1, 13}));
channels_.push_back(IsobaricChannelInformation("131C", 10, "", 131.144500, { 6, -1, 8, -1, 11, 12, 13, 14}));
channels_.push_back(IsobaricChannelInformation("132N", 11, "", 132.141535, { 7, 8, 9, 10, -1, 13, -1, 15}));
channels_.push_back(IsobaricChannelInformation("132C", 12, "", 132.147855, { 8, -1, 10, -1, 13, 14, 15, -1}));
channels_.push_back(IsobaricChannelInformation("133N", 13, "", 133.144890, { 9, 10, 11, 12, -1, 15, -1, -1}));
channels_.push_back(IsobaricChannelInformation("133C", 14, "", 133.151210, {10, -1, 12, -1, 15, -1, -1, -1}));
channels_.push_back(IsobaricChannelInformation("134N", 15, "", 134.148245, {11, 12, 13, 14, -1, -1, -1, -1}));
// we assume 126 to be the reference
reference_channel_ = 0;
setDefaultParams_();
}
void TMTSixteenPlexQuantitationMethod::setDefaultParams_()
{
defaults_.setValue("channel_126_description", "", "Description for the content of the 126 channel.");
defaults_.setValue("channel_127N_description", "", "Description for the content of the 127N channel.");
defaults_.setValue("channel_127C_description", "", "Description for the content of the 127C channel.");
defaults_.setValue("channel_128N_description", "", "Description for the content of the 128N channel.");
defaults_.setValue("channel_128C_description", "", "Description for the content of the 128C channel.");
defaults_.setValue("channel_129N_description", "", "Description for the content of the 129N channel.");
defaults_.setValue("channel_129C_description", "", "Description for the content of the 129C channel.");
defaults_.setValue("channel_130N_description", "", "Description for the content of the 130N channel.");
defaults_.setValue("channel_130C_description", "", "Description for the content of the 130C channel.");
defaults_.setValue("channel_131N_description", "", "Description for the content of the 131N channel.");
defaults_.setValue("channel_131C_description", "", "Description for the content of the 131C channel.");
defaults_.setValue("channel_132N_description", "", "Description for the content of the 132N channel.");
defaults_.setValue("channel_132C_description", "", "Description for the content of the 132C channel.");
defaults_.setValue("channel_133N_description", "", "Description for the content of the 133N channel.");
defaults_.setValue("channel_133C_description", "", "Description for the content of the 133C channel.");
defaults_.setValue("channel_134N_description", "", "Description for the content of the 134N channel.");
defaults_.setValue("reference_channel", "126", "The reference channel (126, 127N, 127C, 128N, 128C, 129N, 129C, 130N, 130C, 131N, 131C, 132N, 132C, 133N, 133C, 134N).");
defaults_.setValidStrings("reference_channel", TMTSixteenPlexQuantitationMethod::channel_names_);
defaults_.setValue("correction_matrix", std::vector<std::string>{
"NA/NA / NA/NA / 0.31/9.09 / 0.02/0.32",
"NA/NA / NA/0.78 / NA/9.41 / NA/0.33",
"NA/NA / 0.93/NA / 0.35/8.63 / 0.01/0.27",
"NA/0.00 / 0.82/0.65 / NA/8.13 / NA/0.26",
"0.00/NA / 1.47/NA / 0.34/6.91 / 0.00/0.15",
"0.00/0.00 / 1.46/1.28 / NA/6.86 / NA/0.15",
"0.13/NA / 2.59/NA / 0.32/6.07 / 0.1/0.09",
"0.13/0.00 / 2.41/0.27 / NA/5.58 / NA/0.10",
"0.04/NA / 3.10/NA / 0.42/4.82 / 0.02/0.06",
"0.03/0.00 / 2.78/0.63 / NA/4.57 / NA/0.12",
"0.08/NA / 3.90/NA / 0.47/3.57 / 0.00/0.04",
"0.15/0.01 / 3.58/0.72 / NA/1.80 / NA/0.00",
"0.11/NA / 4.55/NA / 0.43/1.86 / 0.00/0.00",
"0.07/0.01 / 3.14/0.73 / NA/3.40 / NA/0.03",
"0.22/NA / 4.96/NA / 0.34/1.03 / 0.00/NA",
"0.30/0.03 / 5.49/0.62 / NA/1.14 / NA/NA"
},
"Correction matrix for isotope distributions in percent from the Thermo data sheet (see documentation);"
" Please provide 16 entries (rows), separated by comma, where each entry contains 8 values in the following format: <-2C13>/<-N15-C13>/<-C13>/<-N15>/<+N15>/<+C13>/<+N15+C13>/<+2C13> e.g. one row may look like this: 'NA/0.00 / 0.82/0.65 / NA/8.13 / NA/0.26'. You may use whitespaces at your leisure to ease reading.");
defaultsToParam_();
}
void TMTSixteenPlexQuantitationMethod::updateMembers_()
{
channels_[0].description = param_.getValue("channel_126_description").toString();
channels_[1].description = param_.getValue("channel_127N_description").toString();
channels_[2].description = param_.getValue("channel_127C_description").toString();
channels_[3].description = param_.getValue("channel_128N_description").toString();
channels_[4].description = param_.getValue("channel_128C_description").toString();
channels_[5].description = param_.getValue("channel_129N_description").toString();
channels_[6].description = param_.getValue("channel_129C_description").toString();
channels_[7].description = param_.getValue("channel_130N_description").toString();
channels_[8].description = param_.getValue("channel_130C_description").toString();
channels_[9].description = param_.getValue("channel_131N_description").toString();
channels_[10].description = param_.getValue("channel_131C_description").toString();
channels_[11].description = param_.getValue("channel_132N_description").toString();
channels_[12].description = param_.getValue("channel_132C_description").toString();
channels_[13].description = param_.getValue("channel_133N_description").toString();
channels_[14].description = param_.getValue("channel_133C_description").toString();
channels_[15].description = param_.getValue("channel_134N_description").toString();
// compute the index of the reference channel
std::vector<std::string>::const_iterator t_it = std::find(TMTSixteenPlexQuantitationMethod::channel_names_.begin(),
TMTSixteenPlexQuantitationMethod::channel_names_.end(),
param_.getValue("reference_channel"));
reference_channel_ = t_it - TMTSixteenPlexQuantitationMethod::channel_names_.begin();
}
TMTSixteenPlexQuantitationMethod::TMTSixteenPlexQuantitationMethod(const TMTSixteenPlexQuantitationMethod& other):
IsobaricQuantitationMethod(other)
{
channels_.clear();
channels_.insert(channels_.begin(), other.channels_.begin(), other.channels_.end());
reference_channel_ = other.reference_channel_;
}
TMTSixteenPlexQuantitationMethod& TMTSixteenPlexQuantitationMethod::operator=(const TMTSixteenPlexQuantitationMethod& rhs)
= default;
const String& TMTSixteenPlexQuantitationMethod::getMethodName() const
{
return TMTSixteenPlexQuantitationMethod::name_;
}
const IsobaricQuantitationMethod::IsobaricChannelList& TMTSixteenPlexQuantitationMethod::getChannelInformation() const
{
return channels_;
}
Size TMTSixteenPlexQuantitationMethod::getNumberOfChannels() const
{
return 16;
}
Matrix<double> TMTSixteenPlexQuantitationMethod::getIsotopeCorrectionMatrix() const
{
StringList iso_correction = ListUtils::toStringList<std::string>(getParameters().getValue("correction_matrix"));
return stringListToIsotopeCorrectionMatrix_(iso_correction);
}
Size TMTSixteenPlexQuantitationMethod::getReferenceChannel() const
{
return reference_channel_;
}
} // namespace
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/QUANTITATION/IsobaricQuantifier.cpp | .cpp | 6,097 | 141 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Stephan Aiche, Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/QUANTITATION/IsobaricIsotopeCorrector.h>
#include <OpenMS/ANALYSIS/QUANTITATION/IsobaricNormalizer.h>
#include <OpenMS/ANALYSIS/QUANTITATION/IsobaricQuantifier.h>
#include <OpenMS/ANALYSIS/QUANTITATION/IsobaricQuantitationMethod.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/KERNEL/ConsensusMap.h>
namespace OpenMS
{
IsobaricQuantifier::IsobaricQuantifier(const IsobaricQuantitationMethod* const quant_method) : DefaultParamHandler("IsobaricQuantifier"), quant_method_(quant_method)
{
setDefaultParams_();
}
IsobaricQuantifier::IsobaricQuantifier(const IsobaricQuantifier& other) : DefaultParamHandler(other), quant_method_(other.quant_method_)
{
}
IsobaricQuantifier& IsobaricQuantifier::operator=(const IsobaricQuantifier& rhs)
{
if (this == &rhs)
return *this;
DefaultParamHandler::operator=(rhs);
quant_method_ = rhs.quant_method_;
return *this;
}
void IsobaricQuantifier::setDefaultParams_()
{
defaults_.setValue("isotope_correction", "true",
"Enable isotope correction (highly recommended). "
"Note that you need to provide a correct isotope correction matrix "
"otherwise the tool will fail or produce invalid results.");
defaults_.setValidStrings("isotope_correction", {"true", "false"});
defaults_.setValue("normalization", "false",
"Enable normalization of channel intensities with respect to the reference channel. "
"The normalization is done by using the Median of Ratios (every channel / Reference). "
"Also the ratio of medians (from any channel and reference) is provided as control measure!");
defaults_.setValidStrings("normalization", {"true", "false"});
defaultsToParam_();
}
void IsobaricQuantifier::updateMembers_()
{
isotope_correction_enabled_ = getParameters().getValue("isotope_correction") == "true";
normalization_enabled_ = getParameters().getValue("normalization") == "true";
}
void IsobaricQuantifier::quantify(const ConsensusMap& consensus_map_in, ConsensusMap& consensus_map_out)
{
// precheck incoming map
if (consensus_map_in.empty())
{
OPENMS_LOG_WARN << "Warning: Empty iTRAQ/TMT container. No quantitative information available!" << std::endl;
return;
}
// create output map based on input, we will cleanup the channels while iterating over it
consensus_map_out = consensus_map_in;
// init stats
stats_.reset();
stats_.channel_count = quant_method_->getNumberOfChannels();
// apply isotope correction if requested by user
if (isotope_correction_enabled_)
{
stats_ = IsobaricIsotopeCorrector::correctIsotopicImpurities(consensus_map_in, consensus_map_out, quant_method_);
}
else
{
OPENMS_LOG_WARN << "Warning: Due to deactivated isotope-correction labeling statistics will be based on raw intensities, which might give too optimistic results." << std::endl;
}
// compute statistics and embed into output map
computeLabelingStatistics_(consensus_map_out);
// apply normalization if requested
if (normalization_enabled_)
{
IsobaricNormalizer normalizer(quant_method_);
normalizer.normalize(consensus_map_out);
}
}
void IsobaricQuantifier::computeLabelingStatistics_(ConsensusMap& consensus_map_out)
{
// number of total quantified spectra
stats_.number_ms2_total = consensus_map_out.size();
// Labeling efficiency statistics
for (size_t i = 0; i < consensus_map_out.size(); ++i)
{
// is whole scan empty?!
if (consensus_map_out[i].getIntensity() == 0)
++stats_.number_ms2_empty;
// look at single reporters
for (ConsensusFeature::HandleSetType::const_iterator it_elements = consensus_map_out[i].begin(); it_elements != consensus_map_out[i].end(); ++it_elements)
{
if (it_elements->getIntensity() == 0)
{
String ch_index = consensus_map_out.getColumnHeaders()[it_elements->getMapIndex()].getMetaValue("channel_name");
++stats_.empty_channels[ch_index];
}
}
}
OPENMS_LOG_INFO << "IsobaricQuantifier: skipped " << stats_.number_ms2_empty << " of " << consensus_map_out.size() << " selected scans due to lack of reporter information:\n";
consensus_map_out.setMetaValue("isoquant:scans_noquant", stats_.number_ms2_empty);
consensus_map_out.setMetaValue("isoquant:scans_total", consensus_map_out.size());
OPENMS_LOG_INFO << "IsobaricQuantifier: channels with signal\n";
for (IsobaricQuantitationMethod::IsobaricChannelList::const_iterator cl_it = quant_method_->getChannelInformation().begin(); cl_it != quant_method_->getChannelInformation().end();
++cl_it) // use the same iteration method for printing stats as in IsobaricChannelExtractor which have the same order, so user can make 1:1 comparison
{
std::map<String, Size>::const_iterator it_m = stats_.empty_channels.find(cl_it->name);
if (it_m == stats_.empty_channels.end())
{ // should not happen
OPENMS_LOG_WARN << "Warning: no stats for channel '" << cl_it->name << "'" << std::endl;
continue;
}
OPENMS_LOG_INFO << " ch " << String(cl_it->name).fillRight(' ', 4) << ": " << (consensus_map_out.size() - it_m->second) << " / " << consensus_map_out.size() << " ("
<< ((consensus_map_out.size() - it_m->second) * 100 / consensus_map_out.size()) << "%)\n";
consensus_map_out.setMetaValue(String("isoquant:quantifyable_ch") + it_m->first, (consensus_map_out.size() - it_m->second));
}
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/QUANTITATION/KDTreeFeatureMaps.cpp | .cpp | 4,160 | 156 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Johannes Veit $
// $Authors: Johannes Veit $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/QUANTITATION/KDTreeFeatureMaps.h>
#include <OpenMS/MATH/MathFunctions.h>
using namespace std;
namespace OpenMS
{
void KDTreeFeatureMaps::addFeature(Size mt_map_index, const BaseFeature* feature)
{
map_index_.push_back(mt_map_index);
features_.push_back(feature);
rt_.push_back(feature->getRT());
KDTreeFeatureNode mt_node(this, size() - 1);
kd_tree_.insert(mt_node);
}
const BaseFeature* KDTreeFeatureMaps::feature(Size i) const
{
return features_[i];
}
double KDTreeFeatureMaps::rt(Size i) const
{
return rt_[i];
}
double KDTreeFeatureMaps::mz(Size i) const
{
return features_[i]->getMZ();
}
float KDTreeFeatureMaps::intensity(Size i) const
{
return features_[i]->getIntensity();
}
Int KDTreeFeatureMaps::charge(Size i) const
{
return features_[i]->getCharge();
}
Size KDTreeFeatureMaps::mapIndex(Size i) const
{
return map_index_[i];
}
Size KDTreeFeatureMaps::size() const
{
return features_.size();
}
Size KDTreeFeatureMaps::treeSize() const
{
return kd_tree_.size();
}
Size KDTreeFeatureMaps::numMaps() const
{
return num_maps_;
}
void KDTreeFeatureMaps::clear()
{
features_.clear();
map_index_.clear();
kd_tree_.clear();
}
void KDTreeFeatureMaps::optimizeTree()
{
kd_tree_.optimize();
}
void KDTreeFeatureMaps::getNeighborhood(Size index, vector<Size>& result_indices, double rt_tol, double mz_tol, bool mz_ppm, bool include_features_from_same_map, double max_pairwise_log_fc) const
{
pair<double, double> rt_win = Math::getTolWindow(rt(index), rt_tol, false);
pair<double, double> mz_win = Math::getTolWindow(mz(index), mz_tol, mz_ppm);
vector<Size> tmp_result;
Size ignored_map_index = include_features_from_same_map ? numeric_limits<Size>::max() : map_index_[index];
queryRegion(rt_win.first, rt_win.second, mz_win.first, mz_win.second, tmp_result, ignored_map_index);
if (max_pairwise_log_fc < 0.0)
{
result_indices.insert(result_indices.end(), tmp_result.begin(), tmp_result.end());
}
else // max log fold change check enabled
{
double int_1 = features_[index]->getIntensity();
for (vector<Size>::const_iterator it = tmp_result.begin(); it != tmp_result.end(); ++it)
{
double int_2 = features_[*it]->getIntensity();
double abs_log_fc = fabs(log10(int_2 / int_1));
// abs_log_fc could assume +nan or +inf if negative
// or zero intensity features were present, but
// this shouldn't cause a problem. they just wouldn't
// be used.
if (abs_log_fc <= max_pairwise_log_fc)
{
result_indices.push_back(*it);
}
}
}
}
void KDTreeFeatureMaps::queryRegion(double rt_low, double rt_high, double mz_low, double mz_high, vector<Size>& result_indices, Size ignored_map_index) const
{
// set up tolerance window as region for the 2D tree
FeatureKDTree::_Region_ region;
region._M_low_bounds[0] = rt_low;
region._M_high_bounds[0] = rt_high;
region._M_low_bounds[1] = mz_low;
region._M_high_bounds[1] = mz_high;
// range-query tolerance window
vector<KDTreeFeatureNode> tmp_result;
kd_tree_.find_within_range(region, back_insert_iterator<vector<KDTreeFeatureNode> >(tmp_result));
// add indices to result
result_indices.clear();
for (vector<KDTreeFeatureNode>::const_iterator it = tmp_result.begin(); it != tmp_result.end(); ++it)
{
Size found_index = it->getIndex();
if (ignored_map_index == numeric_limits<Size>::max() || map_index_[found_index] != ignored_map_index)
{
result_indices.push_back(found_index);
}
}
}
void KDTreeFeatureMaps::applyTransformations(const vector<TransformationModelLowess*>& trafos)
{
for (Size i = 0; i < size(); ++i)
{
rt_[i] = trafos[map_index_[i]]->evaluate(features_[i]->getRT());
}
}
void KDTreeFeatureMaps::updateMembers_()
{
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/MRM/ReactionMonitoringTransition.cpp | .cpp | 12,016 | 400 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Andreas Bertsch $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MRM/ReactionMonitoringTransition.h>
#include <OpenMS/CONCEPT/Helpers.h>
#include <utility>
namespace OpenMS
{
static const unsigned char DETECTING_TRANSITION_LOC = 0;
static const unsigned char IDENTIFYING_TRANSITION_LOC = 1;
static const unsigned char QUANTIFYING_TRANSITION_LOC = 2;
ReactionMonitoringTransition::ReactionMonitoringTransition() :
CVTermList(),
library_intensity_(-101),
decoy_type_(UNKNOWN),
precursor_mz_(0.0),
precursor_cv_terms_(nullptr),
prediction_(nullptr)
{
// Default is: true, false, true
// NOTE: do not change that, the same default is implicitly assumed in TraMLHandler
transition_flags_[DETECTING_TRANSITION_LOC] = true;
transition_flags_[IDENTIFYING_TRANSITION_LOC] = false;
transition_flags_[QUANTIFYING_TRANSITION_LOC] = true;
}
ReactionMonitoringTransition::ReactionMonitoringTransition(const ReactionMonitoringTransition & rhs) :
CVTermList(rhs),
name_(rhs.name_),
peptide_ref_(rhs.peptide_ref_),
compound_ref_(rhs.compound_ref_),
library_intensity_(rhs.library_intensity_),
decoy_type_(rhs.decoy_type_),
precursor_mz_(rhs.precursor_mz_),
precursor_cv_terms_(nullptr),
product_(rhs.product_),
intermediate_products_(rhs.intermediate_products_),
rts(rhs.rts),
prediction_(nullptr),
transition_flags_(rhs.transition_flags_)
{
// We copy the internal object (not just the ptr)
if (rhs.precursor_cv_terms_ != nullptr)
{
precursor_cv_terms_ = new CVTermList(*rhs.precursor_cv_terms_);
}
if (rhs.prediction_ != nullptr)
{
prediction_ = new Prediction(*rhs.prediction_);
}
}
ReactionMonitoringTransition::ReactionMonitoringTransition(ReactionMonitoringTransition && rhs) noexcept :
CVTermList(std::move(rhs)),
name_(std::move(rhs.name_)),
peptide_ref_(std::move(rhs.peptide_ref_)),
compound_ref_(std::move(rhs.compound_ref_)),
library_intensity_(std::move(rhs.library_intensity_)),
decoy_type_(std::move(rhs.decoy_type_)),
precursor_mz_(std::move(rhs.precursor_mz_)),
precursor_cv_terms_(std::move(rhs.precursor_cv_terms_)),
product_(std::move(rhs.product_)),
intermediate_products_(std::move(rhs.intermediate_products_)),
rts(std::move(rhs.rts)),
prediction_(std::move(rhs.prediction_)),
transition_flags_(std::move(rhs.transition_flags_))
{
rhs.precursor_cv_terms_ = nullptr;
rhs.prediction_ = nullptr;
}
ReactionMonitoringTransition::~ReactionMonitoringTransition()
{
delete precursor_cv_terms_;
delete prediction_;
}
ReactionMonitoringTransition & ReactionMonitoringTransition::operator=(const ReactionMonitoringTransition & rhs)
{
if (&rhs != this)
{
CVTermList::operator=(rhs);
name_ = rhs.name_;
peptide_ref_ = rhs.peptide_ref_;
compound_ref_ = rhs.compound_ref_;
precursor_mz_ = rhs.precursor_mz_;
intermediate_products_ = rhs.intermediate_products_;
product_ = rhs.product_;
rts = rhs.rts;
library_intensity_ = rhs.library_intensity_;
decoy_type_ = rhs.decoy_type_;
transition_flags_ = rhs.transition_flags_;
// We copy the internal object (not just the ptr)
delete precursor_cv_terms_;
precursor_cv_terms_ = nullptr;
if (rhs.precursor_cv_terms_ != nullptr)
{
precursor_cv_terms_ = new CVTermList(*rhs.precursor_cv_terms_);
}
// We copy the internal object (not just the ptr)
delete prediction_;
prediction_ = nullptr;
if (rhs.prediction_ != nullptr)
{
prediction_ = new Prediction(*rhs.prediction_);
}
}
return *this;
}
ReactionMonitoringTransition & ReactionMonitoringTransition::operator=(ReactionMonitoringTransition && rhs) noexcept
{
if (&rhs != this)
{
CVTermList::operator=(std::move(rhs));
name_ = std::move(rhs.name_);
peptide_ref_ = std::move(rhs.peptide_ref_);
compound_ref_ = std::move(rhs.compound_ref_);
precursor_mz_ = std::move(rhs.precursor_mz_);
intermediate_products_ = std::move(rhs.intermediate_products_);
product_ = std::move(rhs.product_);
rts = std::move(rhs.rts);
library_intensity_ = std::move(rhs.library_intensity_);
decoy_type_ = std::move(rhs.decoy_type_);
transition_flags_ = std::move(rhs.transition_flags_);
// Move the ptr-based objects to the current objects and delete them in the rhs
delete precursor_cv_terms_;
precursor_cv_terms_ = rhs.precursor_cv_terms_;
rhs.precursor_cv_terms_ = nullptr;
delete prediction_;
prediction_ = rhs.prediction_;
rhs.prediction_ = nullptr;
}
return *this;
}
bool ReactionMonitoringTransition::operator==(const ReactionMonitoringTransition & rhs) const
{
return CVTermList::operator==(rhs) &&
name_ == rhs.name_ &&
peptide_ref_ == rhs.peptide_ref_ &&
compound_ref_ == rhs.compound_ref_ &&
precursor_mz_ == rhs.precursor_mz_ &&
OpenMS::Helpers::cmpPtrSafe< CVTermList* >(precursor_cv_terms_, rhs.precursor_cv_terms_) &&
product_ == rhs.product_ &&
intermediate_products_ == rhs.intermediate_products_ &&
rts == rhs.rts &&
OpenMS::Helpers::cmpPtrSafe< Prediction* >(prediction_, rhs.prediction_) &&
library_intensity_ == rhs.library_intensity_ &&
decoy_type_ == rhs.decoy_type_ &&
transition_flags_ == rhs.transition_flags_;
}
bool ReactionMonitoringTransition::operator!=(const ReactionMonitoringTransition & rhs) const
{
return !(*this == rhs);
}
void ReactionMonitoringTransition::setName(const String & name)
{
name_ = name;
}
const String & ReactionMonitoringTransition::getName() const
{
return name_;
}
void ReactionMonitoringTransition::setNativeID(const String & name)
{
name_ = name;
}
const String & ReactionMonitoringTransition::getNativeID() const
{
return name_;
}
void ReactionMonitoringTransition::setPeptideRef(const String & peptide_ref)
{
peptide_ref_ = peptide_ref;
}
const String & ReactionMonitoringTransition::getPeptideRef() const
{
return peptide_ref_;
}
void ReactionMonitoringTransition::setCompoundRef(const String & compound_ref)
{
compound_ref_ = compound_ref;
}
const String & ReactionMonitoringTransition::getCompoundRef() const
{
return compound_ref_;
}
void ReactionMonitoringTransition::setPrecursorMZ(double mz)
{
precursor_mz_ = mz;
}
double ReactionMonitoringTransition::getPrecursorMZ() const
{
return precursor_mz_;
}
bool ReactionMonitoringTransition::hasPrecursorCVTerms() const
{
return (precursor_cv_terms_ != nullptr);
}
void ReactionMonitoringTransition::setPrecursorCVTermList(const CVTermList & list)
{
delete precursor_cv_terms_;
precursor_cv_terms_ = new CVTermList(list);
}
void ReactionMonitoringTransition::addPrecursorCVTerm(const CVTerm & cv_term)
{
if (!precursor_cv_terms_)
{
precursor_cv_terms_ = new CVTermList();
}
precursor_cv_terms_->addCVTerm(cv_term);
}
const CVTermList & ReactionMonitoringTransition::getPrecursorCVTermList() const
{
OPENMS_PRECONDITION(hasPrecursorCVTerms(), "ReactionMonitoringTransition has no PrecursorCVTerms, check first with hasPrecursorCVTerms()")
return *precursor_cv_terms_;
}
void ReactionMonitoringTransition::setProductMZ(double mz)
{
product_.setMZ(mz);
}
double ReactionMonitoringTransition::getProductMZ() const
{
return product_.getMZ();
}
int ReactionMonitoringTransition::getProductChargeState() const
{
return product_.getChargeState();
}
bool ReactionMonitoringTransition::isProductChargeStateSet() const
{
return product_.hasCharge();
}
void ReactionMonitoringTransition::addProductCVTerm(const CVTerm & cv_term)
{
product_.addCVTerm(cv_term);
}
const std::vector<ReactionMonitoringTransition::Product> & ReactionMonitoringTransition::getIntermediateProducts() const
{
return intermediate_products_;
}
void ReactionMonitoringTransition::addIntermediateProduct(const ReactionMonitoringTransition::Product& product)
{
intermediate_products_.push_back(product);
}
void ReactionMonitoringTransition::setIntermediateProducts(const std::vector<ReactionMonitoringTransition::Product> & intermediate_products)
{
intermediate_products_ = intermediate_products;
}
void ReactionMonitoringTransition::setProduct(ReactionMonitoringTransition::Product product)
{
product_ = std::move(product);
}
const ReactionMonitoringTransition::Product & ReactionMonitoringTransition::getProduct() const
{
return product_;
}
void ReactionMonitoringTransition::setRetentionTime(ReactionMonitoringTransition::RetentionTime rt)
{
rts = std::move(rt);
}
const ReactionMonitoringTransition::RetentionTime & ReactionMonitoringTransition::getRetentionTime() const
{
return rts;
}
bool ReactionMonitoringTransition::hasPrediction() const
{
return (prediction_ != nullptr);
}
void ReactionMonitoringTransition::setPrediction(const Prediction & prediction)
{
delete prediction_;
prediction_ = new Prediction(prediction);
}
const ReactionMonitoringTransition::Prediction & ReactionMonitoringTransition::getPrediction() const
{
OPENMS_PRECONDITION(hasPrecursorCVTerms(), "ReactionMonitoringTransition has no Prediction object, check first with hasPrediction()")
return *prediction_;
}
void ReactionMonitoringTransition::addPredictionTerm(const CVTerm & term)
{
if (!prediction_)
{
prediction_ = new Prediction();
}
prediction_->addCVTerm(term);
}
void ReactionMonitoringTransition::updateMembers_()
{
}
ReactionMonitoringTransition::DecoyTransitionType ReactionMonitoringTransition::getDecoyTransitionType() const
{
return decoy_type_;
}
void ReactionMonitoringTransition::setDecoyTransitionType(const DecoyTransitionType & d)
{
decoy_type_ = d;
}
double ReactionMonitoringTransition::getLibraryIntensity() const
{
return library_intensity_;
}
void ReactionMonitoringTransition::setLibraryIntensity(const double intensity)
{
library_intensity_ = intensity;
}
bool ReactionMonitoringTransition::isDetectingTransition() const
{
return transition_flags_[DETECTING_TRANSITION_LOC];
}
void ReactionMonitoringTransition::setDetectingTransition(bool val)
{
transition_flags_[DETECTING_TRANSITION_LOC] = val;
}
bool ReactionMonitoringTransition::isIdentifyingTransition() const
{
return transition_flags_[IDENTIFYING_TRANSITION_LOC];
}
void ReactionMonitoringTransition::setIdentifyingTransition(bool val)
{
transition_flags_[IDENTIFYING_TRANSITION_LOC] = val;
}
bool ReactionMonitoringTransition::isQuantifyingTransition() const
{
return transition_flags_[QUANTIFYING_TRANSITION_LOC];
}
void ReactionMonitoringTransition::setQuantifyingTransition(bool val)
{
transition_flags_[QUANTIFYING_TRANSITION_LOC] = val;
}
bool ReactionMonitoringTransition::ProductMZLess::operator()(ReactionMonitoringTransition const & left, ReactionMonitoringTransition const & right) const
{
return left.getProductMZ() < right.getProductMZ();
}
bool ReactionMonitoringTransition::NameLess::operator()(ReactionMonitoringTransition const & left, ReactionMonitoringTransition const & right) const
{
return left.getName() < right.getName();
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/SEQUENCE/NeedlemanWunsch.cpp | .cpp | 11,193 | 161 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Nora Wild $
// $Authors: Nora Wild $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/SEQUENCE/NeedlemanWunsch.h>
#include <OpenMS/CONCEPT/Exception.h>
#include <OpenMS/DATASTRUCTURES/ListUtils.h>
#include <utility>
using namespace std;
namespace OpenMS
{
static int matrices[static_cast<int>(NeedlemanWunsch::ScoringMatrix::SIZE_OF_SCORINGMATRIX)][26][26]
{
//identity
{
// A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
/* A */ {1, 0, 0, 0, 0, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, 0},
/* B */ {0, 1, 0, 0, 0, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, 0},
/* C */ {0, 0, 1, 0, 0, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, 0},
/* D */ {0, 0, 0, 1, 0, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, 0},
/* E */ {0, 0, 0, 0, 1, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, 0},
/* F */ {0, 0, 0, 0, 0, 1, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, 0},
/* G */ {0, 0, 0, 0, 0, 0, 1, 0, 0, INT16_MAX, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, 0},
/* H */ {0, 0, 0, 0, 0, 0, 0, 1, 0, INT16_MAX, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, 0},
/* I */ {0, 0, 0, 0, 0, 0, 0, 0, 1, INT16_MAX, 0, 1, 0, 0, INT16_MAX, 0, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, 0},
/* J */ {INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX,},
/* K */ {0, 0, 0, 0, 0, 0, 0, 0, 0, INT16_MAX, 1, 0, 0, 0, INT16_MAX, 0, 1, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, 0},
/* L */ {0, 0, 0, 0, 0, 0, 0, 0, 1, INT16_MAX, 0, 1, 0, 0, INT16_MAX, 0, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, 0},
/* M */ {0, 0, 0, 0, 0, 0, 0, 0, 0, INT16_MAX, 0, 0, 1, 0, INT16_MAX, 0, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, 0},
/* N */ {0, 0, 0, 0, 0, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 1, INT16_MAX, 0, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, 0},
/* O */ {INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX,},
/* P */ {0, 0, 0, 0, 0, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, INT16_MAX, 1, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, 0},
/* Q */ {0, 0, 0, 0, 0, 0, 0, 0, 0, INT16_MAX, 1, 0, 0, 0, INT16_MAX, 0, 1, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, 0},
/* R */ {0, 0, 0, 0, 0, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, INT16_MAX, 0, 0, 1, 0, 0, INT16_MAX, 0, 0, 0, 0, 0},
/* S */ {0, 0, 0, 0, 0, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 1, 0, INT16_MAX, 0, 0, 0, 0, 0},
/* T */ {0, 0, 0, 0, 0, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, 1, INT16_MAX, 0, 0, 0, 0, 0},
/* U */ {INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX,},
/* V */ {0, 0, 0, 0, 0, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, 0, INT16_MAX, 1, 0, 0, 0, 0},
/* W */ {0, 0, 0, 0, 0, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, 0, INT16_MAX, 0, 1, 0, 0, 0},
/* X */ {0, 0, 0, 0, 0, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, 0},
/* Y */ {0, 0, 0, 0, 0, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 1, 0},
/* Z */ {0, 0, 0, 0, 0, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, 1}
},
//PAM30MS
{
// A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
/* A */ {6, -7, -6, -3, -2, -8, -2, -7, -5, INT16_MAX, -7, -6, -5, -4, INT16_MAX, -2, -4, -7, 0, -1, INT16_MAX, -2,-13, 0, -8, -6},
/* B */ {-7, 5,-11, -7, -7,-12, -8, -4, -6, INT16_MAX, 5, -7, -3, -4, INT16_MAX, -5, -3, 5, -4, -5, INT16_MAX, -9, -7, 0,-10, 1},
/* C */ {-6,-11, 10,-14,-14,-13, -9, -7, -6, INT16_MAX,-14,-11,-13,-11, INT16_MAX, -8,-14, -8, -3, -8, INT16_MAX, -6,-15, 0, -4,-14},
/* D */ {-3, -7,-14, 8, 2,-15, -3, -4, -7, INT16_MAX, -4,-10,-11, 2, INT16_MAX, -8, -2,-10, -4, -5, INT16_MAX, -8,-15, 0,-11, -3},
/* E */ {-2, -7,-14, 2, 8,-14, -4, -5, -5, INT16_MAX, -4, -7, -7, -2, INT16_MAX, -5, 1, -9, -4, -6, INT16_MAX, -6,-17, 0, -8, -2},
/* F */ {-8,-12,-13,-15,-14, 9, -9, -6, -2, INT16_MAX,-14, -3, -4, -9, INT16_MAX,-10,-13, -9, -6, -9, INT16_MAX, -8, -4, 0, 2,-14},
/* G */ {-2, -8, -9, -3, -4, -9, 6, -9,-11, INT16_MAX, -7,-11, -8, -3, INT16_MAX, -6, -7, -9, -2, -6, INT16_MAX, -5,-15, 0,-14, -7},
/* H */ {-7, -4, -7, -4, -5, -6, -9, 9, -9, INT16_MAX, -6, -8,-10, 0, INT16_MAX, -4, 1, -2, -6, -7, INT16_MAX, -6, -7, 0, -3, -3},
/* I */ {-5, -6, -6, -7, -5, -2,-11, -9, 8, INT16_MAX, -6, 5, -1, -5, INT16_MAX, -8, -8, -5, -7, -2, INT16_MAX, 2,-14, 0, -6, -7},
/* J */ {INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX,},
/* K */ {-7, 5,-14, -4, -4,-14, -7, -6, -6, INT16_MAX, 7, -7, -2, -1, INT16_MAX, -6, -3, 0, -4, -3, INT16_MAX, -9,-12, 0, -9, 4},
/* L */ {-6, -7,-11,-10, -7, -3,-11, -8, 5, INT16_MAX, -7, 5, 0, -6, INT16_MAX, -8, -7, -7, -8, -5, INT16_MAX, 0,-10, 0, -7, -7},
/* M */ {-5, -3,-13,-11, -7, -4, -8,-10, -1, INT16_MAX, -2, 0, 11, -9, INT16_MAX, -8, -4, -4, -5, -4, INT16_MAX, -1,-13, 0,-11, -3},
/* N */ {-4, -4,-11, 2, -2, -9, -3, 0, -5, INT16_MAX, -1, -6, -9, 8, INT16_MAX, -6, -3, -6, 0, -2, INT16_MAX, -8, -8, 0, -4, -2},
/* O */ {INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX,},
/* P */ {-2, -5, -8, -8, -5,-10, -6, -4, -8, INT16_MAX, -6, -8, -8, -6, INT16_MAX, 8, -3, -4, -2, -4, INT16_MAX, -6,-14, 0,-13, -5},
/* Q */ {-4, -3,-14, -2, 1,-13, -7, 1, -8, INT16_MAX, -3, -7, -4, -3, INT16_MAX, -3, 8, -2, -5, -5, INT16_MAX, -7,-13, 0,-12, 4},
/* R */ {-7, 5, -8,-10, -9, -9, -9, -2, -5, INT16_MAX, 0, -7, -4, -6, INT16_MAX, -4, -2, 8, -3, -6, INT16_MAX, -8, -2, 0, 10, -1},
/* S */ { 0, -4, -3, -4, -4, -6, -2, -6, -7, INT16_MAX, -4, -8, -5, 0, INT16_MAX, -2, -5, -3, 6, 0, INT16_MAX, -6, -5, 0, -7, -5},
/* T */ {-1, -5, -8, -5, -6, -9, -6, -7, -2, INT16_MAX, -3, -5, -4, -2, INT16_MAX, -4, -5, -6, 0, 7, INT16_MAX, -3,-13, 0, -6, -4},
/* U */ {INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX,},
/* V */ {-2, -9, -6, -8, -6, -8, -5, -6, 2, INT16_MAX, -9, 0, -1, -8, INT16_MAX, -6, -7, -8, -6, -3, INT16_MAX, 7,-15, 0, -7, -8},
/* W */ {-13,-7,-15,-15,-17, -4,-15, -7,-14, INT16_MAX,-12,-10,-13, -8, INT16_MAX,-14,-13, -2, -5,-13, INT16_MAX,-15, 13, 0, -5,-13},
/* X */ { 0, 0, 0, 0, 0, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, 0, INT16_MAX, 0, 0, 0, 0, 0},
/* Y */ {-8,-10, -4,-11, -8, 2,-14, -3, -6, INT16_MAX, -9, -7,-11, -4, INT16_MAX,-13,-12,-10, -7, -6, INT16_MAX, -7, -5, 0, 10,-11},
/* Z */ {-6, 1,-14, -3, -2,-14, -7, -3, -7, INT16_MAX, 4, -7, -3, -2, INT16_MAX, -5, 4, -1, -5, -4, INT16_MAX, -8,-13, 0,-11, 4}
}
};
NeedlemanWunsch::NeedlemanWunsch(NeedlemanWunsch::ScoringMatrix matrix, int penalty)
{
setMatrix(matrix);
setPenalty(penalty);
}
const vector<string> NeedlemanWunsch::NamesOfScoringMatrices = {"identity", "PAM30MS"};
void NeedlemanWunsch::setMatrix(const NeedlemanWunsch::ScoringMatrix& matrix)
{
my_matrix_ = matrix;
}
void NeedlemanWunsch::setMatrix(const std::string& matrix)
{
auto first = NamesOfScoringMatrices.begin();
auto last = NamesOfScoringMatrices.end();
const auto it = std::find(first, last, matrix);
if (it == last)
{
String msg = "Matrix is not known! Valid choices are: "+
ListUtils::concatenate(NamesOfScoringMatrices, ", ");
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
msg);
}
setMatrix(static_cast<NeedlemanWunsch::ScoringMatrix>(it - first));
}
void NeedlemanWunsch::setPenalty(const int penalty)
{
gap_penalty_ = penalty;
}
NeedlemanWunsch::ScoringMatrix NeedlemanWunsch::getMatrix() const
{
return my_matrix_;
}
int NeedlemanWunsch::getPenalty() const
{
return gap_penalty_;
}
int NeedlemanWunsch::align(const String& seq1, const String& seq2)
{
unsigned seq1_len = (unsigned)seq1.length();
unsigned seq2_len = (unsigned)seq2.length();
first_row_.resize(seq2_len+1); // both rows have the same length
second_row_.resize(seq2_len+1);
int* p_firstrow = &(first_row_[0]);
int* p_secondrow = &(second_row_[0]);
int (*matrix_ptr)[26][26] = &matrices[static_cast<int>(my_matrix_)];
for (unsigned i = 0; i <= seq2_len; ++i) // initialize using gap-penalty
{
first_row_[i] = i * (-gap_penalty_);
}
for (unsigned i = 1;i <= seq1_len; ++i)
{
(*p_secondrow) = i * (-gap_penalty_); // the first value in a row
for (unsigned j = 1; j <= seq2_len; ++j)
{
(*(p_secondrow+j)) = max(max(((*(p_secondrow+j-1)) - gap_penalty_), ((*(p_firstrow+j)) - gap_penalty_)),
((*(p_firstrow+j-1)) + (*matrix_ptr)[seq1[i-1] - 'A'] [seq2[j-1] - 'A']));
}
swap(p_firstrow, p_secondrow);
}
return (*(p_firstrow + seq2_len));
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/XLMS/XQuestScores.cpp | .cpp | 14,289 | 399 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Eugen Netz $
// $Authors: Eugen Netz $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/XLMS/XQuestScores.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <boost/math/distributions/binomial.hpp>
#include <numeric>
using namespace std;
namespace OpenMS
{
float XQuestScores::preScore(Size matched_alpha, Size ions_alpha, Size matched_beta, Size ions_beta)
{
if ( (matched_alpha <= 0 && matched_beta <= 0) || ions_alpha <= 0 || ions_beta <= 0)
{
return 0.0;
}
// avoid 0 values in multiplication, adds a "dynamic range" among candidates with no matching linear peaks to one of the peptides
float matched_alpha_float = matched_alpha;
if (matched_alpha <= 0)
{
matched_alpha_float = 0.1f;
}
float matched_beta_float = matched_beta;
if (matched_beta <= 0)
{
matched_beta_float = 0.1f;
}
float result = sqrt((static_cast<float>(matched_alpha_float) / static_cast<float>(ions_alpha)) * (static_cast<float>(matched_beta_float) / static_cast<float>(ions_beta)));
return result;
}
float XQuestScores::preScore(Size matched_alpha, Size ions_alpha)
{
if (ions_alpha <= 0)
{
return 0.0;
}
float result = static_cast<float>(matched_alpha) / static_cast<float>(ions_alpha);
return result;
}
double XQuestScores::matchOddsScore(const PeakSpectrum& theoretical_spec, const Size matched_size, double fragment_mass_tolerance, bool fragment_mass_tolerance_unit_ppm, bool is_xlink_spectrum, Size n_charges)
{
using boost::math::binomial;
Size theo_size = theoretical_spec.size();
if (matched_size < 1 || theo_size < 1)
{
return 0;
}
double range = theoretical_spec[theo_size-1].getMZ() - theoretical_spec[0].getMZ();
// Compute fragment tolerance in Da for the mean of MZ values, if tolerance in ppm (rough approximation)
double mean = 0.0;
for (Size i = 0; i < theo_size; ++i)
{
mean += theoretical_spec[i].getMZ();
}
mean = mean / theo_size;
double tolerance_Th = fragment_mass_tolerance_unit_ppm ? mean * 1e-6 * fragment_mass_tolerance : fragment_mass_tolerance;
// A priori probability of a random match given info about the theoretical spectrum
double a_priori_p = 0;
if (is_xlink_spectrum)
{
a_priori_p = (1 - ( pow( (1 - 2 * tolerance_Th / (0.5 * range)), (static_cast<double>(theo_size) / static_cast<double>(n_charges)))));
}
else
{
a_priori_p = (1 - ( pow( (1 - 2 * tolerance_Th / (0.5 * range)), static_cast<int>(theo_size))));
}
double match_odds = 0;
binomial flip(theo_size, a_priori_p);
// min double number to avoid 0 values, causing scores with the value "inf"
match_odds = -log(cdf(complement(flip, matched_size)) + std::numeric_limits<double>::min());
// score lower than 0 does not make sense, but can happen if cfd = 0, -log( 1 + min() ) < 0
if (match_odds >= 0.0)
{
return match_odds;
}
else
{
return 0;
}
}
double XQuestScores::matchOddsScoreSimpleSpec(const std::vector< SimpleTSGXLMS::SimplePeak >& theoretical_spec, const Size matched_size, double fragment_mass_tolerance, bool fragment_mass_tolerance_unit_ppm, bool is_xlink_spectrum, Size n_charges)
{
using boost::math::binomial;
Size theo_size = theoretical_spec.size();
if (matched_size < 1 || theo_size < 1)
{
return 0;
}
double range = theoretical_spec[theo_size-1].mz - theoretical_spec[0].mz;
// Compute fragment tolerance in Da for the mean of MZ values, if tolerance in ppm (rough approximation)
double mean = 0.0;
for (Size i = 0; i < theo_size; ++i)
{
mean += theoretical_spec[i].mz;
}
mean = mean / theo_size;
double tolerance_Th = fragment_mass_tolerance_unit_ppm ? mean * 1e-6 * fragment_mass_tolerance : fragment_mass_tolerance;
// A priori probability of a random match given info about the theoretical spectrum
double a_priori_p = 0;
if (is_xlink_spectrum)
{
a_priori_p = (1 - ( pow( (1 - 2 * tolerance_Th / (0.5 * range)), (static_cast<double>(theo_size) / static_cast<double>(n_charges)))));
}
else
{
a_priori_p = (1 - ( pow( (1 - 2 * tolerance_Th / (0.5 * range)), static_cast<int>(theo_size))));
}
double match_odds = 0;
binomial flip(theo_size, a_priori_p);
// min double number to avoid 0 values, causing scores with the value "inf"
match_odds = -log(cdf(complement(flip, matched_size)) + std::numeric_limits<double>::min());
// score lower than 0 does not make sense, but can happen if cfd = 0, -log( 1 + min() ) < 0
if (match_odds >= 0.0)
{
return match_odds;
}
else
{
return 0;
}
}
double XQuestScores::logOccupancyProb(const PeakSpectrum& theoretical_spec, const Size matched_size, double fragment_mass_tolerance, bool fragment_mass_tolerance_unit_ppm)
{
using boost::math::binomial;
Size theo_size = theoretical_spec.size();
if (matched_size < 1 || theo_size < 1)
{
return 0;
}
double range;
double used_tolerance;
if (fragment_mass_tolerance_unit_ppm)
{
range = std::log(theoretical_spec.back().getMZ()) - std::log(theoretical_spec[0].getMZ());
used_tolerance = fragment_mass_tolerance / 1e6;
}
else
{
range = theoretical_spec.back().getMZ() - theoretical_spec[0].getMZ();
used_tolerance = fragment_mass_tolerance;
}
// A priori probability of a random match given info about the theoretical spectrum
double a_priori_p = 0;
a_priori_p = 1 - pow(1 - 2 * used_tolerance / range, static_cast<double>(theo_size));
double log_occu_prob = 0;
binomial flip(theo_size, a_priori_p);
// min double number to avoid 0 values, causing scores with the value "inf"
log_occu_prob = -log(cdf(complement(flip, matched_size)) + std::numeric_limits<double>::min());
// score lower than 0 does not make sense, but can happen, if cfd = 0, then -log( 1 + <double>::min() ) < 0
if (log_occu_prob >= 0.0)
{
return log_occu_prob;
}
else // underflow warning?
{
return 0;
}
}
double XQuestScores::weightedTICScoreXQuest(Size alpha_size, Size beta_size, double intsum_alpha, double intsum_beta, double total_current, bool type_is_cross_link)
{
// maxdigestlength and mindigestlength from standard settings of xQuest
double maxdigestlength = 50;
double mindigestlength = 5;
if (!type_is_cross_link)
{
beta_size = ( maxdigestlength + mindigestlength ) - alpha_size;
}
double aatotal = alpha_size + beta_size;
double invMax = 1 / (mindigestlength / (mindigestlength + maxdigestlength));
double invFrac_alpha = 1 / (alpha_size / aatotal);
double invFrac_beta = 1 / (beta_size / aatotal);
double TIC_weight_alpha = invFrac_alpha / invMax;
double TIC_weight_beta = invFrac_beta / invMax;
double wTIC = TIC_weight_alpha * (intsum_alpha / total_current ) + TIC_weight_beta * (intsum_beta / total_current);
return wTIC;
}
double XQuestScores::weightedTICScore(Size alpha_size, Size beta_size, double intsum_alpha, double intsum_beta, double total_current, bool type_is_cross_link)
{
if (!type_is_cross_link)
{
beta_size = alpha_size;
}
double aatotal = alpha_size + beta_size;
// deviation from xQuest algorithm: invMax is not a constant anymore
// and scales by the actual length difference between alpha and beta, rather than the maximal possible difference between any two peptides
// this results in a local scaling, rather than a global one
double invMax = 1 / (min(alpha_size, beta_size) / aatotal);
double invFrac_alpha = 1 / (alpha_size / aatotal);
double invFrac_beta = 1 / (beta_size / aatotal);
double TIC_weight_alpha = invFrac_alpha / invMax;
double TIC_weight_beta = invFrac_beta / invMax;
double wTIC = TIC_weight_alpha * (intsum_alpha / total_current ) + TIC_weight_beta * (intsum_beta / total_current);
return wTIC;
}
double XQuestScores::matchedCurrentChain(const std::vector< std::pair< Size, Size > >& matched_spec_linear, const std::vector< std::pair< Size, Size > >& matched_spec_xlinks, const PeakSpectrum& spectrum_linear_peaks, const PeakSpectrum& spectrum_xlink_peaks)
{
double intsum = 0;
for (SignedSize j = 0; j < static_cast<SignedSize>(matched_spec_linear.size()); ++j)
{
intsum += spectrum_linear_peaks[matched_spec_linear[j].second].getIntensity();
}
for (SignedSize j = 0; j < static_cast<SignedSize>(matched_spec_xlinks.size()); ++j)
{
intsum += spectrum_xlink_peaks[matched_spec_xlinks[j].second].getIntensity();
}
return intsum;
}
double XQuestScores::totalMatchedCurrent(const std::vector< std::pair< Size, Size > >& matched_spec_linear_alpha, const std::vector< std::pair< Size, Size > >& matched_spec_linear_beta, const std::vector< std::pair< Size, Size > >& matched_spec_xlinks_alpha, const std::vector< std::pair< Size, Size > >& matched_spec_xlinks_beta, const PeakSpectrum& spectrum_linear_peaks, const PeakSpectrum& spectrum_xlink_peaks)
{
// make vectors of matched peak indices
double intsum(0);
std::vector< Size > indices_linear;
std::vector< Size > indices_xlinks;
for (Size j = 0; j < matched_spec_linear_alpha.size(); ++j)
{
indices_linear.push_back(matched_spec_linear_alpha[j].second);
}
for (Size j = 0; j < matched_spec_linear_beta.size(); ++j)
{
indices_linear.push_back(matched_spec_linear_beta[j].second);
}
for (Size j = 0; j < matched_spec_xlinks_alpha.size(); ++j)
{
indices_xlinks.push_back(matched_spec_xlinks_alpha[j].second);
}
for (Size j = 0; j < matched_spec_xlinks_beta.size(); ++j)
{
indices_xlinks.push_back(matched_spec_xlinks_beta[j].second);
}
// make the indices in the vectors unique, to not sum up peak intensities multiple times
sort(indices_linear.begin(), indices_linear.end());
sort(indices_xlinks.begin(), indices_xlinks.end());
std::vector< Size >::iterator last_unique_linear = unique(indices_linear.begin(), indices_linear.end());
std::vector< Size >::iterator last_unique_xlinks = unique(indices_xlinks.begin(), indices_xlinks.end());
indices_linear.erase(last_unique_linear, indices_linear.end());
indices_xlinks.erase(last_unique_xlinks, indices_xlinks.end());
// sum over intensities under the unique indices
for (Size j = 0; j < indices_linear.size(); ++j)
{
intsum += spectrum_linear_peaks[indices_linear[j]].getIntensity();
}
for (Size j = 0; j < indices_xlinks.size(); ++j)
{
intsum += spectrum_xlink_peaks[indices_xlinks[j]].getIntensity();
}
return intsum;
}
std::vector< double > XQuestScores::xCorrelation(const PeakSpectrum & spec1, const PeakSpectrum & spec2, Int maxshift, double tolerance)
{
// generate vector of results, filled with zeroes
std::vector< double > results(maxshift * 2 + 1, 0);
// return 0 = no correlation, when one of the spectra is empty
if (spec1.empty() || spec2.empty()) {
return results;
}
double maxionsize = std::max(spec1[spec1.size()-1].getMZ(), spec2[spec2.size()-1].getMZ());
Int table_size = ceil(maxionsize / tolerance)+1;
std::vector< double > ion_table1(table_size, 0);
std::vector< double > ion_table2(table_size, 0);
// Build tables of the same size, each bin has the size of the tolerance
for (Size i = 0; i < spec1.size(); ++i)
{
Size pos = static_cast<Size>(ceil(spec1[i].getMZ() / tolerance));
ion_table1[pos] = 10.0;
}
for (Size i = 0; i < spec2.size(); ++i)
{
Size pos =static_cast<Size>(ceil(spec2[i].getMZ() / tolerance));
ion_table2[pos] = 10.0;
}
// Compute means
double mean1 = (std::accumulate(ion_table1.begin(), ion_table1.end(), 0.0)) / table_size;
double mean2 = (std::accumulate(ion_table2.begin(), ion_table2.end(), 0.0)) / table_size;
// Compute denominator
double s1 = 0;
double s2 = 0;
for (Int i = 0; i < table_size; ++i)
{
s1 += pow((ion_table1[i] - mean1), 2);
s2 += pow((ion_table2[i] - mean2), 2);
}
double denom = sqrt(s1 * s2);
// Calculate correlation for each shift
for (Int shift = -maxshift; shift <= maxshift; ++shift)
{
double s = 0;
for (Int i = 0; i < table_size; ++i)
{
Int j = i + shift;
if ( (j >= 0) && (j < table_size))
{
s += (ion_table1[i] - mean1) * (ion_table2[j] - mean2);
}
}
if (denom > 0)
{
results[shift + maxshift] = s / denom;
}
}
return results;
}
double XQuestScores::xCorrelationPrescore(const PeakSpectrum & spec1, const PeakSpectrum & spec2, double tolerance)
{
// return 0 = no correlation, when one of the spectra is empty
if (spec1.empty() || spec2.empty()) {
return 0.0;
}
double maxionsize = std::max(spec1[spec1.size()-1].getMZ(), spec2[spec2.size()-1].getMZ());
Int table_size = ceil(maxionsize / tolerance)+1;
std::vector< double > ion_table1(table_size, 0);
std::vector< double > ion_table2(table_size, 0);
// Build tables of the same size, each bin has the size of the tolerance
for (Size i = 0; i < spec1.size(); ++i)
{
Size pos = static_cast<Size>(ceil(spec1[i].getMZ() / tolerance));
ion_table1[pos] = 1;
}
for (Size i = 0; i < spec2.size(); ++i)
{
Size pos =static_cast<Size>(ceil(spec2[i].getMZ() / tolerance));
ion_table2[pos] = 1;
}
double dot_product = 0.0;
for (Size i = 0; i < ion_table1.size(); ++i)
{
dot_product += ion_table1[i] * ion_table2[i];
}
// determine the smaller spectrum and normalize by the number of peaks in it
double peaks = std::min(spec1.size(), spec2.size());
return dot_product / peaks;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/XLMS/OPXLHelper.cpp | .cpp | 65,821 | 1,470 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Eugen Netz $
// $Authors: Eugen Netz $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/XLMS/OPXLHelper.h>
#include <OpenMS/CHEMISTRY/ModifiedPeptideGenerator.h>
#include <OpenMS/CHEMISTRY/ModificationsDB.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/DATASTRUCTURES/ListUtilsIO.h>
#include <OpenMS/DATASTRUCTURES/StringView.h>
#include <utility>
#ifdef _OPENMP
#include <omp.h>
#endif
// turn on additional debug output
// #define DEBUG_OPXLHELPER
using namespace std;
namespace OpenMS
{
vector<OPXLDataStructs::XLPrecursor> OPXLHelper::enumerateCrossLinksAndMasses(const vector<OPXLDataStructs::AASeqWithMass>& peptides, double cross_link_mass, const DoubleList& cross_link_mass_mono_link, const StringList& cross_link_residue1, const StringList& cross_link_residue2, const vector< double >& spectrum_precursors, vector< int >& precursor_correction_positions, double precursor_mass_tolerance, bool precursor_mass_tolerance_unit_ppm)
{
// initialize empty vector for the results
vector<OPXLDataStructs::XLPrecursor> mass_to_candidates;
double max_precursor = spectrum_precursors[spectrum_precursors.size()-1];
Size peptides_size = peptides.size();
// compute a very conservative total upper bound, based on the heaviest possible linear peptide
// can be used instead of peptides.end() in all cases for this precursor mass
vector<OPXLDataStructs::AASeqWithMass>::const_iterator conservative_upper_bound = upper_bound(peptides.cbegin(), peptides.cend(), max_precursor, OPXLDataStructs::AASeqWithMassComparator());
// initialize additional iterators
// the upper bounds for a precursor mass can be used as the lower bounds
// for the next heavier precursor mass, narrowing down the search space for new bounds
vector<OPXLDataStructs::AASeqWithMass>::const_iterator first_loop = peptides.cbegin();
vector<OPXLDataStructs::AASeqWithMass>::const_iterator last_loop = peptides.cbegin();
vector<OPXLDataStructs::AASeqWithMass>::const_iterator first_mono = peptides.cbegin();
vector<OPXLDataStructs::AASeqWithMass>::const_iterator last_mono = peptides.cbegin();
vector<OPXLDataStructs::AASeqWithMass>::const_iterator last_alpha = peptides.cbegin();
for (Size pm = 0; pm < spectrum_precursors.size(); ++pm)
{
double precursor_mass = spectrum_precursors[pm];
// compute absolute tolerance from relative, if necessary
double allowed_error = 0;
if (precursor_mass_tolerance_unit_ppm) // ppm
{
allowed_error = precursor_mass * precursor_mass_tolerance * 1e-6;
}
else // Dalton
{
allowed_error = precursor_mass_tolerance;
}
// ################################ Enumerate Loop-Links #################
// The largest peptides given a fixed precursor mass are possible with loop links
double min_peptide_mass = precursor_mass - cross_link_mass - allowed_error;
double max_peptide_mass = precursor_mass - cross_link_mass + allowed_error;
first_loop = lower_bound(first_loop, conservative_upper_bound, min_peptide_mass, OPXLDataStructs::AASeqWithMassComparator());
last_loop = upper_bound(last_loop, conservative_upper_bound, max_peptide_mass, OPXLDataStructs::AASeqWithMassComparator());
int first_index = first_loop - peptides.cbegin();
int last_index = last_loop - peptides.cbegin();
#pragma omp parallel for
for (int p1 = first_index; p1 < last_index; ++p1)
{
const String& seq_first = peptides[p1].unmodified_seq;
// test if this peptide could have loop-links: one cross-link with both sides attached to the same peptide
bool first_res = false; // is there a residue the first side of the linker can attach to?
bool second_res = false; // is there a residue the second side of the linker can attach to?
for (Size k = 0; k < seq_first.size()-1; ++k)
{
for (Size i = 0; i < cross_link_residue1.size(); ++i)
{
if (cross_link_residue1[i].size() == 1 && string(1, seq_first[k]) == cross_link_residue1[i])
{
first_res = true;
}
}
for (Size i = 0; i < cross_link_residue2.size(); ++i)
{
if (cross_link_residue2[i].size() == 1 && string(1, seq_first[k]) == cross_link_residue2[i])
{
second_res = true;
}
}
}
// If both sides of a cross-linker can link to this peptide, generate the loop-link
if (first_res && second_res)
{
// Monoisotopic weight of the peptide + cross-linker
double cross_linked_peptide_mass = peptides[p1].peptide_mass + cross_link_mass;
// also only one peptide
OPXLDataStructs::XLPrecursor precursor;
precursor.precursor_mass = cross_linked_peptide_mass;
precursor.alpha_index = p1;
precursor.beta_index = peptides_size + 1; // an out-of-range index to represent an empty index
precursor.alpha_seq = seq_first;
precursor.beta_seq = "";
#pragma omp critical (mass_to_candidates_access)
{
mass_to_candidates.push_back(precursor);
precursor_correction_positions.push_back(pm);
}
}
} // end of parallel loop over loop-link candidates
// ################################ Enumerate Mono-Links #################
for (Size i = 0; i < cross_link_mass_mono_link.size(); i++)
{
double mono_link_mass = cross_link_mass_mono_link[i];
min_peptide_mass = precursor_mass - mono_link_mass - allowed_error;
max_peptide_mass = precursor_mass - mono_link_mass + allowed_error;
// mono-link masses are sorted in descending order
// so we can use the results from the last search as a new lower bounds for both searches again
first_mono = lower_bound(first_mono, conservative_upper_bound, min_peptide_mass, OPXLDataStructs::AASeqWithMassComparator());
last_mono = upper_bound(last_mono, conservative_upper_bound, max_peptide_mass, OPXLDataStructs::AASeqWithMassComparator());
first_index = first_mono - peptides.cbegin();
last_index = last_mono - peptides.cbegin();
#pragma omp parallel for
for (int p1 = first_index; p1 < last_index; ++p1)
{
// Monoisotopic weight of the peptide + cross-linker
double cross_linked_peptide_mass = peptides[p1].peptide_mass + mono_link_mass;
// Make sure it is clear only one peptide is considered here. Use an out-of-range value for the second peptide.
OPXLDataStructs::XLPrecursor precursor;
precursor.precursor_mass = cross_linked_peptide_mass;
precursor.alpha_index = p1;
precursor.beta_index = peptides_size + 1; // an out-of-range index to represent an empty index
precursor.alpha_seq = peptides[p1].unmodified_seq;
precursor.beta_seq = "";
#pragma omp critical (mass_to_candidates_access)
{
mass_to_candidates.push_back(precursor);
precursor_correction_positions.push_back(pm);
}
} // end of loop over candidates for a specific mono-link mass
} // end of loop over mono-link masses
// ################################ Enumerate Cross-Links #################
// constrain the conservative upper bound even more,
// because we have to fit in two peptides this time
// maximal mass: difference between precursor mass and the smallest peptide + cross-linker
max_peptide_mass = precursor_mass - cross_link_mass - peptides[0].peptide_mass + allowed_error;
last_alpha = upper_bound(last_alpha, conservative_upper_bound, max_peptide_mass, OPXLDataStructs::AASeqWithMassComparator());
int last_alpha_index = last_alpha - peptides.cbegin();
#pragma omp parallel for
for (int p1 = 0; p1 < last_alpha_index; ++p1)
{
// Constrain search for beta
double min_peptide_mass_beta = precursor_mass - cross_link_mass - peptides[p1].peptide_mass - allowed_error;
double max_peptide_mass_beta = precursor_mass - cross_link_mass - peptides[p1].peptide_mass + allowed_error;
// the last_alpha upper bound is also a conservative upper bound here
vector<OPXLDataStructs::AASeqWithMass>::const_iterator first_beta = lower_bound(peptides.cbegin()+p1, last_alpha, min_peptide_mass_beta, OPXLDataStructs::AASeqWithMassComparator());
vector<OPXLDataStructs::AASeqWithMass>::const_iterator last_beta = upper_bound(peptides.cbegin()+p1, last_alpha, max_peptide_mass_beta, OPXLDataStructs::AASeqWithMassComparator());
if (first_beta == last_beta)
{
continue;
}
Size first_beta_index = first_beta - peptides.begin();
Size last_beta_index = last_beta - peptides.begin();
for (Size p2 = first_beta_index; p2 < last_beta_index; ++p2)
{
// Monoisotopic weight of the first peptide + the second peptide + cross-linker
double cross_linked_pair_mass = peptides[p1].peptide_mass + peptides[p2].peptide_mass + cross_link_mass;
// this time both peptides have valid indices
OPXLDataStructs::XLPrecursor precursor;
precursor.precursor_mass = cross_linked_pair_mass;
precursor.alpha_index = p1;
precursor.beta_index = p2;
precursor.alpha_seq = peptides[p1].unmodified_seq;
precursor.beta_seq = peptides[p2].unmodified_seq;
#pragma omp critical (mass_to_candidates_access)
{
mass_to_candidates.push_back(precursor);
precursor_correction_positions.push_back(pm);
}
} // end of loop over betas
} // end of parallel loop over alphas
} // end of loop over precursor masses
return mass_to_candidates;
}
std::vector<OPXLDataStructs::AASeqWithMass> OPXLHelper::digestDatabase(
vector<FASTAFile::FASTAEntry> fasta_db,
const EnzymaticDigestion& digestor,
Size min_peptide_length,
const StringList& cross_link_residue1,
const StringList& cross_link_residue2,
const ModifiedPeptideGenerator::MapToResidueType& fixed_modifications,
const ModifiedPeptideGenerator::MapToResidueType& variable_modifications,
Size max_variable_mods_per_peptide)
{
multimap<StringView, AASequence> processed_peptides;
vector<OPXLDataStructs::AASeqWithMass> peptide_masses;
bool n_term_linker = false;
bool c_term_linker = false;
for (const String& res : cross_link_residue1)
{
if (res == "N-term")
{
n_term_linker = true;
}
else if (res == "C-term")
{
c_term_linker = true;
}
}
for (const String& res : cross_link_residue2)
{
if (res == "N-term")
{
n_term_linker = true;
}
else if (res == "C-term")
{
c_term_linker = true;
}
}
// digest and filter database
for (SignedSize fasta_index = 0; fasta_index < static_cast<SignedSize>(fasta_db.size()); ++fasta_index)
{
// store vector of substrings pointing in fasta database (bounded by pairs of begin, end iterators)
vector<StringView> current_digest;
digestor.digestUnmodified(fasta_db[fasta_index].sequence, current_digest, min_peptide_length);
for (vector<StringView>::iterator cit = current_digest.begin(); cit != current_digest.end(); ++cit)
{
// skip peptides with invalid AAs // TODO is this necessary?
if (cit->getString().has('B') || cit->getString().has('O') || cit->getString().has('U') || cit->getString().has('X') || cit->getString().has('Z')) continue;
OPXLDataStructs::PeptidePosition position = OPXLDataStructs::INTERNAL;
if (fasta_db[fasta_index].sequence.hasPrefix(cit->getString()))
{
position = OPXLDataStructs::N_TERM;
}
else if (fasta_db[fasta_index].sequence.hasSuffix(cit->getString()))
{
position = OPXLDataStructs::C_TERM;
}
// skip if no cross-linked residue
bool skip = true;
if (n_term_linker && position == OPXLDataStructs::N_TERM)
{
skip = false;
}
else if (c_term_linker && position == OPXLDataStructs::C_TERM)
{
skip = false;
}
else
{
for (const String& res : cross_link_residue1)
{
if (res.size() == 1 && (cit->getString().find(res) < cit->getString().size()-1))
{
skip = false;
}
}
for (const String& res : cross_link_residue2)
{
if (res.size() == 1 && (cit->getString().find(res) < cit->getString().size()-1))
{
skip = false;
}
}
}
if (skip) continue;
bool already_processed = false;
if (processed_peptides.find(*cit) != processed_peptides.end())
{
// peptide (and all modified variants) already processed so skip it
already_processed = true;
}
if (already_processed) continue;
vector<AASequence> all_modified_peptides;
// generate all modified variants of a peptide
AASequence aas = AASequence::fromString(cit->getString());
ModifiedPeptideGenerator::applyFixedModifications(fixed_modifications, aas);
ModifiedPeptideGenerator::applyVariableModifications(variable_modifications, aas, max_variable_mods_per_peptide, all_modified_peptides);
for (SignedSize mod_pep_idx = 0; mod_pep_idx < static_cast<SignedSize>(all_modified_peptides.size()); ++mod_pep_idx)
{
const AASequence& candidate = all_modified_peptides[mod_pep_idx];
OPXLDataStructs::AASeqWithMass pep_mass;
pep_mass.peptide_mass = candidate.getMonoWeight();
pep_mass.peptide_seq = candidate;
pep_mass.position = position;
pep_mass.unmodified_seq = cit->getString();
processed_peptides.insert(pair<StringView, AASequence>(*cit, candidate));
peptide_masses.push_back(pep_mass);
}
}
}
sort(peptide_masses.begin(), peptide_masses.end(), OPXLDataStructs::AASeqWithMassComparator());
return peptide_masses;
}
vector <OPXLDataStructs::ProteinProteinCrossLink> OPXLHelper::buildCandidates(const std::vector< OPXLDataStructs::XLPrecursor > & candidates,
const std::vector< int > & precursor_corrections,
const std::vector< int >& precursor_correction_positions,
const std::vector<OPXLDataStructs::AASeqWithMass> & peptide_masses,
const StringList & cross_link_residue1,
const StringList & cross_link_residue2,
double cross_link_mass,
const DoubleList & cross_link_mass_mono_link,
const std::vector< double >& spectrum_precursor_vector,
const std::vector< double >& allowed_error_vector,
const String& cross_link_name)
{
bool n_term_linker = false;
bool c_term_linker = false;
for (const String& res : cross_link_residue1)
{
if (res == "N-term")
{
n_term_linker = true;
}
else if (res == "C-term")
{
c_term_linker = true;
}
}
for (const String& res : cross_link_residue2)
{
if (res == "N-term")
{
n_term_linker = true;
}
else if (res == "C-term")
{
c_term_linker = true;
}
}
vector <OPXLDataStructs::ProteinProteinCrossLink> cross_link_candidates;
#pragma omp parallel for schedule(guided)
for (int i = 0; i < static_cast<int>(candidates.size()); ++i)
{
OPXLDataStructs::XLPrecursor candidate = candidates[i];
vector <SignedSize> link_pos_first;
vector <SignedSize> link_pos_second;
const AASequence* peptide_first = &(peptide_masses[candidate.alpha_index].peptide_seq);
OPXLDataStructs::PeptidePosition peptide_pos_first = peptide_masses[candidate.alpha_index].position;
const AASequence* peptide_second = nullptr;
OPXLDataStructs::PeptidePosition peptide_pos_second = OPXLDataStructs::INTERNAL;
if (candidate.beta_index < peptide_masses.size())
{
peptide_second = &(peptide_masses[candidate.beta_index].peptide_seq);
peptide_pos_second = peptide_masses[candidate.beta_index].position;
}
String seq_first = candidate.alpha_seq;
String seq_second;
if (peptide_second) { seq_second = candidate.beta_seq; }
// mono-links and loop-links with different masses can be generated for the same precursor mass, but only one of them can be valid each time.
// Find out which is the case. But it should not happen often enough to slow down the tool significantly.
bool is_loop = false;
for (Size f = 0; f < allowed_error_vector.size(); ++f)
{
if (abs(spectrum_precursor_vector[f] - (peptide_first->getMonoWeight() + cross_link_mass)) <= allowed_error_vector[f])
{
is_loop = true;
}
}
for (Size k = 0; k < seq_first.size()-1; ++k)
{
for (Size x = 0; x < cross_link_residue1.size(); ++x)
{
// if (seq_first.substr(k, 1) == cross_link_residue1[x]) link_pos_first.push_back(k);
if (string(1, seq_first[k]) == cross_link_residue1[x]) link_pos_first.push_back(k);
}
}
if (candidate.beta_index < peptide_masses.size())
{
for (Size k = 0; k < seq_second.size()-1; ++k)
{
for (Size x = 0; x < cross_link_residue2.size(); ++x)
{
if (string(1, seq_second[k]) == cross_link_residue2[x]) link_pos_second.push_back(k);
}
}
}
else
{
// Second position defining a mono-link and the second positions on the same peptide for loop links (only one of these two is valid for any specific precursor)
if (!is_loop)
{
link_pos_second.push_back(-1);
}
else
{
for (Size k = 0; k < seq_first.size()-1; ++k)
{
for (Size x = 0; x < cross_link_residue2.size(); ++x)
{
if (string(1, seq_first[k]) == cross_link_residue2[x]) link_pos_second.push_back(k);
}
}
}
}
// Determine larger peptide (alpha) by sequence length, use mass as tie breaker
bool alpha_first = true;
if (seq_second.size() > seq_first.size())
{
alpha_first = false;
}
else if ( (seq_second.size() == seq_first.size()) && peptide_second && (peptide_second->getMonoWeight() > peptide_first->getMonoWeight()) )
{
alpha_first = false;
}
// TODO remodel this, there should be a simpler way, e.g. the peptides were sorted so "second" is always heavier?
// generate cross_links for all valid combinations
for (Size x = 0; x < link_pos_first.size(); ++x)
{
for (Size y = 0; y < link_pos_second.size(); ++y)
{
OPXLDataStructs::ProteinProteinCrossLink cross_link_candidate;
cross_link_candidate.precursor_correction = precursor_corrections[i];
cross_link_candidate.cross_linker_name = cross_link_name;
// filter out unnecessary loop-link candidates that we would not trust in a manual validation anyway
if ((seq_second.empty()) && (link_pos_second[y] != -1)) // if it is a loop-link
{
// if the positions are the same, then it is linking the same residue with itself
// also pos1 > pos2 would be the same link as pos1 < pos2 with switched positions
if ( (link_pos_first[x] >= link_pos_second[y]) ) continue;
// don't consider loop-links linking very close residues (y > x is already established, so no need for abs())
if ( (link_pos_second[y] - link_pos_first[x]) < 3 ) continue;
// don't consider loop-links, that link to residues on the fringe of the peptide sequence
// because for those there won't be sufficient fragmentation for sequencing on at least one end of the peptide
// we want at least 3 residues on each side
if ( (link_pos_first[x] < 3) || (link_pos_second[y] > static_cast<int>(seq_first.size()) - 4) ) continue;
}
// if one of the linked residues is already modified with something else, skip this combination of linked positions
if ((*peptide_first)[link_pos_first[x]].isModified())
{
continue;
}
if (peptide_second != nullptr && (*peptide_second)[link_pos_second[y]].isModified())
{
continue;
}
// check for modified residue for loop linked cases
if ((seq_second.empty() && link_pos_second[y] != -1) && (*peptide_first)[link_pos_second[y]].isModified())
{
continue;
}
if (alpha_first)
{
cross_link_candidate.alpha = peptide_first;
cross_link_candidate.beta = peptide_second;
cross_link_candidate.cross_link_position.first = link_pos_first[x];
cross_link_candidate.cross_link_position.second = link_pos_second[y];
cross_link_candidate.term_spec_alpha = ResidueModification::ANYWHERE;
cross_link_candidate.term_spec_beta = ResidueModification::ANYWHERE;
}
else
{
cross_link_candidate.alpha = peptide_second;
cross_link_candidate.beta = peptide_first;
cross_link_candidate.cross_link_position.first = link_pos_second[y];
cross_link_candidate.cross_link_position.second = link_pos_first[x];
cross_link_candidate.term_spec_alpha = ResidueModification::ANYWHERE;
cross_link_candidate.term_spec_beta = ResidueModification::ANYWHERE;
}
// Cross-linker mass is only one of the mono-link masses, if there is no second position (second == -1), otherwise the normal linker mass
if (link_pos_second[y] != -1)
{
cross_link_candidate.cross_linker_mass = cross_link_mass;
#pragma omp critical (cross_link_candidates_access)
cross_link_candidates.push_back(cross_link_candidate);
}
else
{
for (Size k = 0; k < cross_link_mass_mono_link.size(); ++k)
{
// only use the correct mono-links (at this point we know it is a mono-link, but not which one)
bool is_correct_monolink = false;
if (abs(spectrum_precursor_vector[precursor_correction_positions[i]] - (peptide_first->getMonoWeight() + cross_link_mass_mono_link[k])) <= allowed_error_vector[precursor_correction_positions[i]])
{
is_correct_monolink = true;
}
if (is_correct_monolink)
{
cross_link_candidate.cross_linker_mass = cross_link_mass_mono_link[k];
#pragma omp critical (cross_link_candidates_access)
cross_link_candidates.push_back(cross_link_candidate);
}
}
}
}
}
if (peptide_pos_second != OPXLDataStructs::INTERNAL)
{
ResidueModification::TermSpecificity second_spec = ResidueModification::N_TERM;
Size mod_pos = 0;
bool compatible = false;
// If the peptide is at the N-terminus of the protein and the first residue is otherwise not linkable, add a terminal cross-linker
if (n_term_linker && (peptide_pos_second == OPXLDataStructs::N_TERM) && find(link_pos_second.begin(), link_pos_second.end(), 0) == link_pos_second.end())
{
compatible = true;
}
if (c_term_linker && (peptide_pos_second == OPXLDataStructs::C_TERM) && peptide_second)
{
second_spec = ResidueModification::C_TERM;
mod_pos = peptide_second->size() - 1;
compatible = true;
}
if (compatible)
{
for (Size x = 0; x < link_pos_first.size(); ++x)
{
OPXLDataStructs::ProteinProteinCrossLink cross_link_candidate;
if (alpha_first)
{
cross_link_candidate.alpha = peptide_first;
cross_link_candidate.beta = peptide_second;
cross_link_candidate.cross_link_position.first = link_pos_first[x];
cross_link_candidate.cross_link_position.second = mod_pos;
cross_link_candidate.term_spec_alpha = ResidueModification::ANYWHERE;
cross_link_candidate.term_spec_beta = second_spec;
}
else
{
cross_link_candidate.alpha = peptide_second;
cross_link_candidate.beta = peptide_first;
cross_link_candidate.cross_link_position.first = mod_pos;
cross_link_candidate.cross_link_position.second = link_pos_first[x];
cross_link_candidate.term_spec_alpha = second_spec;
cross_link_candidate.term_spec_beta = ResidueModification::ANYWHERE;
}
// If second peptide has a term specificity, there must be a second peptide, so we don't have to consider mono or loop-links
cross_link_candidate.cross_linker_mass = cross_link_mass;
cross_link_candidate.cross_linker_name = cross_link_name;
cross_link_candidate.precursor_correction = precursor_corrections[i];
#pragma omp critical (cross_link_candidates_access)
cross_link_candidates.push_back(cross_link_candidate);
}
}
}
if (peptide_pos_first != OPXLDataStructs::INTERNAL)
{
ResidueModification::TermSpecificity first_spec = ResidueModification::N_TERM;
Size mod_pos = 0;
bool compatible = false;
// If the peptide is at the N-terminus of the protein and the first residue is otherwise not linkable, add a terminal cross-linker
if (n_term_linker && (peptide_pos_first == OPXLDataStructs::N_TERM) && find(link_pos_first.begin(), link_pos_first.end(), 0) == link_pos_first.end())
{
compatible = true;
}
if (c_term_linker && (peptide_pos_first == OPXLDataStructs::C_TERM))
{
first_spec = ResidueModification::C_TERM;
mod_pos = peptide_first->size() - 1;
compatible = true;
}
if (compatible)
{
for (Size x = 0; x < link_pos_second.size(); ++x)
{
OPXLDataStructs::ProteinProteinCrossLink cross_link_candidate;
cross_link_candidate.cross_linker_name = cross_link_name;
cross_link_candidate.precursor_correction = precursor_corrections[i];
if (alpha_first)
{
cross_link_candidate.alpha = peptide_first;
cross_link_candidate.beta = peptide_second;
cross_link_candidate.cross_link_position.first = mod_pos;
cross_link_candidate.cross_link_position.second = link_pos_second[x];
cross_link_candidate.term_spec_alpha = first_spec;
cross_link_candidate.term_spec_beta = ResidueModification::ANYWHERE;;
}
else
{
cross_link_candidate.alpha = peptide_second;
cross_link_candidate.beta = peptide_first;
cross_link_candidate.cross_link_position.first = link_pos_second[x];
cross_link_candidate.cross_link_position.second = mod_pos;
cross_link_candidate.term_spec_alpha = ResidueModification::ANYWHERE;;
cross_link_candidate.term_spec_beta = first_spec;
}
// Cross-linker mass is only one of the mono-link masses, if there is no second position (second == -1), otherwise the normal linker mass
if (link_pos_second[x] != -1)
{
cross_link_candidate.cross_linker_mass = cross_link_mass;
#pragma omp critical (cross_link_candidates_access)
cross_link_candidates.push_back(cross_link_candidate);
}
else
{
for (Size k = 0; k < cross_link_mass_mono_link.size(); ++k)
{
// only use the correct mono-links (at this point we know it is a mono-link, but not which one)
bool is_correct_monolink = false;
if (abs(spectrum_precursor_vector[precursor_correction_positions[i]] - (peptide_first->getMonoWeight() + cross_link_mass_mono_link[k])) <= allowed_error_vector[precursor_correction_positions[i]])
{
is_correct_monolink = true;
}
if (is_correct_monolink)
{
cross_link_candidate.cross_linker_mass = cross_link_mass_mono_link[k];
#pragma omp critical (cross_link_candidates_access)
cross_link_candidates.push_back(cross_link_candidate);
}
}
}
}
}
}
} // end of parallelized for-loop
return cross_link_candidates;
}
void OPXLHelper::buildFragmentAnnotations(std::vector<PeptideHit::PeakAnnotation> & frag_annotations, const std::vector< std::pair< Size, Size > > & matching, const PeakSpectrum & theoretical_spectrum, const PeakSpectrum & experiment_spectrum)
{
if (theoretical_spectrum.empty() || experiment_spectrum.empty())
{
return;
}
PeakSpectrum::IntegerDataArray charges = theoretical_spectrum.getIntegerDataArrays()[0];
PeakSpectrum::StringDataArray names = theoretical_spectrum.getStringDataArrays()[0];
for (Size k = 0; k < matching.size(); ++k)
{
PeptideHit::PeakAnnotation frag_anno;
frag_anno.mz = experiment_spectrum[matching[k].second].getMZ();
frag_anno.intensity = experiment_spectrum[matching[k].second].getIntensity();
frag_anno.charge = charges[matching[k].first];
frag_anno.annotation = names[matching[k].first];
frag_annotations.push_back(frag_anno);
}
}
void OPXLHelper::buildPeptideIDs(PeptideIdentificationList & peptide_ids, const std::vector< OPXLDataStructs::CrossLinkSpectrumMatch > & top_csms_spectrum, std::vector< std::vector< OPXLDataStructs::CrossLinkSpectrumMatch > > & all_top_csms, Size all_top_csms_current_index, const PeakMap & spectra, Size scan_index, Size scan_index_heavy)
{
for (Size i = 0; i < top_csms_spectrum.size(); ++i)
{
PeptideIdentification peptide_id;
const PeakSpectrum& spectrum_light = spectra[scan_index];
double precursor_charge = spectrum_light.getPrecursors()[0].getCharge();
double precursor_mz = spectrum_light.getPrecursors()[0].getMZ();
String xltype = "cross-link";
SignedSize alpha_pos = top_csms_spectrum[i].cross_link.cross_link_position.first;
SignedSize beta_pos = top_csms_spectrum[i].cross_link.cross_link_position.second;
if (top_csms_spectrum[i].cross_link.getType() == OPXLDataStructs::MONO)
{
xltype = "mono-link";
}
else if (top_csms_spectrum[i].cross_link.getType() == OPXLDataStructs::LOOP)
{
xltype = "loop-link";
}
PeptideHit ph_alpha, ph_beta;
// Set monolink as a modification or add MetaValue for cross-link identity and mass
AASequence seq_alpha = *top_csms_spectrum[i].cross_link.alpha;
ResidueModification::TermSpecificity alpha_term_spec = top_csms_spectrum[i].cross_link.term_spec_alpha;
if (top_csms_spectrum[i].cross_link.getType() == OPXLDataStructs::MONO)
{
vector< String > mods;
const String residue = seq_alpha[alpha_pos].getOneLetterCode();
#ifdef DEBUG_OPXLHELPER
#pragma omp critical (LOG_DEBUG_access)
OPENMS_LOG_DEBUG << "Searching mono-link for " << residue << " | " << alpha_pos << endl;
#endif
ModificationsDB::getInstance()->searchModificationsByDiffMonoMass(mods, top_csms_spectrum[i].cross_link.cross_linker_mass, 0.001, residue, ResidueModification::ANYWHERE);
#ifdef DEBUG_OPXLHELPER
#pragma omp critical (LOG_DEBUG_access)
OPENMS_LOG_DEBUG << "number of modifications fitting the diff mass: " << mods.size() << endl;
#endif
bool mod_set = false;
if (!mods.empty()) // If several mods have the same diff mass, try to resolve ambiguity by cross-linker name (e.g. DSS and BS3 are different reagents, but have the same result after the reaction)
{
for (Size s = 0; s < mods.size(); ++s)
{
if (mods[s].hasSubstring(top_csms_spectrum[i].cross_link.cross_linker_name))
{
#ifdef DEBUG_OPXLHELPER
#pragma omp critical (LOG_DEBUG_access)
OPENMS_LOG_DEBUG << "applied modification: " << mods[s] << endl;
#endif
seq_alpha.setModification(alpha_pos, mods[s]);
mod_set = true;
break;
}
}
}
else if (mods.empty() && (alpha_pos == 0 || alpha_pos == static_cast<int>(seq_alpha.size())-1))
{
#ifdef DEBUG_OPXLHELPER
#pragma omp critical (LOG_DEBUG_access)
OPENMS_LOG_DEBUG << "No residue specific mono-link found, searching for terminal mods..." << endl;
#endif
ModificationsDB::getInstance()->searchModificationsByDiffMonoMass(mods, top_csms_spectrum[i].cross_link.cross_linker_mass, 0.001, "", alpha_term_spec);
if (!mods.empty())
{
Size mod_index = 0;
for (Size s = 0; s < mods.size(); ++s)
{
if (mods[s].hasSubstring(top_csms_spectrum[i].cross_link.cross_linker_name))
{
mod_index = s;
}
}
if (alpha_term_spec == ResidueModification::N_TERM)
{
#ifdef DEBUG_OPXLHELPER
#pragma omp critical (LOG_DEBUG_access)
OPENMS_LOG_DEBUG << "Setting N-term mono-link: " << mods[mod_index] << endl;
#endif
seq_alpha.setNTerminalModification(mods[mod_index]);
}
else
{
#ifdef DEBUG_OPXLHELPER
#pragma omp critical (LOG_DEBUG_access)
OPENMS_LOG_DEBUG << "Setting C-term mono-link: " << mods[mod_index] << endl;
#endif
seq_alpha.setCTerminalModification(mods[mod_index]);
}
mod_set = true;
}
}
if ( (!mods.empty()) && (!mod_set) ) // If resolving by name did not work, use any with matching diff mass
{
seq_alpha.setModification(alpha_pos, mods[0]);
mod_set = true;
ph_alpha.setMetaValue(Constants::UserParam::OPENPEPXL_XL_MOD, mods[0]);
}
if (!mod_set) // If no equivalent mono-link exists in the UNIMOD or XLMOD databases, use the given name to construct a placeholder
{
String mod_name = String("unknown mono-link " + top_csms_spectrum[i].cross_link.cross_linker_name + " mass " + String(top_csms_spectrum[i].cross_link.cross_linker_mass));
ph_alpha.setMetaValue(Constants::UserParam::OPENPEPXL_XL_MOD, mod_name);
}
}
else
{
ph_alpha.setMetaValue(Constants::UserParam::OPENPEPXL_XL_MOD, top_csms_spectrum[i].cross_link.cross_linker_name);
}
ph_alpha.setMetaValue(Constants::UserParam::OPENPEPXL_XL_MASS, DataValue(top_csms_spectrum[i].cross_link.cross_linker_mass));
String alpha_term = "ANYWHERE";
if (alpha_term_spec == ResidueModification::N_TERM)
{
alpha_term = "N_TERM";
}
else if (alpha_term_spec == ResidueModification::C_TERM)
{
alpha_term = "C_TERM";
}
ResidueModification::TermSpecificity beta_term_spec = top_csms_spectrum[i].cross_link.term_spec_beta;
String beta_term = "ANYWHERE";
if (beta_term_spec == ResidueModification::N_TERM)
{
beta_term = "N_TERM";
}
else if (beta_term_spec == ResidueModification::C_TERM)
{
beta_term = "C_TERM";
}
vector<PeptideHit> phs;
if (beta_pos >= 0)
{
ph_alpha.setMetaValue(Constants::UserParam::OPENPEPXL_XL_POS2, DataValue(beta_pos));
}
else
{
ph_alpha.setMetaValue(Constants::UserParam::OPENPEPXL_XL_POS2, DataValue("-"));
}
ph_alpha.setSequence(seq_alpha);
ph_alpha.setCharge(precursor_charge);
ph_alpha.setScore(top_csms_spectrum[i].score);
ph_alpha.setRank(DataValue(i+1));
ph_alpha.setMetaValue("xl_chain", "MS:1002509"); // donor (longer, heavier, alphabetically earlier)
ph_alpha.setMetaValue(Constants::UserParam::OPENPEPXL_XL_POS1, DataValue(alpha_pos));
ph_alpha.setMetaValue(Constants::UserParam::SPECTRUM_REFERENCE, spectra[scan_index].getNativeID());
ph_alpha.setMetaValue("spectrum_index", scan_index);
ph_alpha.setMetaValue(Constants::UserParam::OPENPEPXL_XL_TYPE, xltype);
ph_alpha.setMetaValue(Constants::UserParam::OPENPEPXL_XL_RANK, DataValue(i + 1));
ph_alpha.setMetaValue(Constants::UserParam::OPENPEPXL_XL_TERM_SPEC_ALPHA, alpha_term);
ph_alpha.setMetaValue(Constants::UserParam::OPENPEPXL_XL_TERM_SPEC_BETA, beta_term);
ph_alpha.setMetaValue(Constants::UserParam::ISOTOPE_ERROR, top_csms_spectrum[i].precursor_correction);
if (scan_index_heavy != scan_index)
{
ph_alpha.setMetaValue(Constants::UserParam::OPENPEPXL_HEAVY_SPEC_RT, spectra[scan_index_heavy].getRT());
ph_alpha.setMetaValue(Constants::UserParam::OPENPEPXL_HEAVY_SPEC_MZ, spectra[scan_index_heavy].getPrecursors()[0].getMZ());
ph_alpha.setMetaValue(Constants::UserParam::OPENPEPXL_HEAVY_SPEC_REF, spectra[scan_index_heavy].getNativeID());
ph_alpha.setMetaValue("spectrum_index_heavy", scan_index_heavy);
}
ph_alpha.setMetaValue(Constants::UserParam::PRECURSOR_ERROR_PPM_USERPARAM, top_csms_spectrum[i].precursor_error_ppm);
ph_alpha.setMetaValue(Constants::UserParam::OPENPEPXL_SCORE, top_csms_spectrum[i].score); // important for Percolator feature set because the PeptideHit score might be overwritten by a q-value
ph_alpha.setMetaValue("OpenPepXL:xquest_score", top_csms_spectrum[i].xquest_score);
ph_alpha.setMetaValue("OpenPepXL:xcorr xlink", top_csms_spectrum[i].xcorrx_max);
ph_alpha.setMetaValue("OpenPepXL:xcorr common", top_csms_spectrum[i].xcorrc_max);
ph_alpha.setMetaValue("OpenPepXL:match-odds", top_csms_spectrum[i].match_odds);
ph_alpha.setMetaValue("OpenPepXL:intsum", top_csms_spectrum[i].int_sum);
ph_alpha.setMetaValue("OpenPepXL:intsum_alpha", top_csms_spectrum[i].intsum_alpha);
ph_alpha.setMetaValue("OpenPepXL:intsum_beta", top_csms_spectrum[i].intsum_beta);
ph_alpha.setMetaValue("OpenPepXL:total_current", top_csms_spectrum[i].total_current);
ph_alpha.setMetaValue("OpenPepXL:wTIC", top_csms_spectrum[i].wTIC);
ph_alpha.setMetaValue("OpenPepXL:TIC", top_csms_spectrum[i].percTIC);
ph_alpha.setMetaValue("OpenPepXL:prescore", top_csms_spectrum[i].pre_score);
ph_alpha.setMetaValue("OpenPepXL:log_occupancy", top_csms_spectrum[i].log_occupancy);
ph_alpha.setMetaValue("OpenPepXL:log_occupancy_alpha", top_csms_spectrum[i].log_occupancy_alpha);
ph_alpha.setMetaValue("OpenPepXL:log_occupancy_beta", top_csms_spectrum[i].log_occupancy_beta);
ph_alpha.setMetaValue("matched_xlink_alpha",top_csms_spectrum[i].matched_xlink_alpha);
ph_alpha.setMetaValue("matched_xlink_beta",top_csms_spectrum[i].matched_xlink_beta);
ph_alpha.setMetaValue("matched_linear_alpha",top_csms_spectrum[i].matched_linear_alpha);
ph_alpha.setMetaValue("matched_linear_beta",top_csms_spectrum[i].matched_linear_beta);
ph_alpha.setMetaValue("ppm_error_abs_sum_linear_alpha", top_csms_spectrum[i].ppm_error_abs_sum_linear_alpha);
ph_alpha.setMetaValue("ppm_error_abs_sum_linear_beta", top_csms_spectrum[i].ppm_error_abs_sum_linear_beta);
ph_alpha.setMetaValue("ppm_error_abs_sum_xlinks_alpha", top_csms_spectrum[i].ppm_error_abs_sum_xlinks_alpha);
ph_alpha.setMetaValue("ppm_error_abs_sum_xlinks_beta", top_csms_spectrum[i].ppm_error_abs_sum_xlinks_beta);
ph_alpha.setMetaValue("ppm_error_abs_sum_linear", top_csms_spectrum[i].ppm_error_abs_sum_linear);
ph_alpha.setMetaValue("ppm_error_abs_sum_xlinks", top_csms_spectrum[i].ppm_error_abs_sum_xlinks);
ph_alpha.setMetaValue("ppm_error_abs_sum_alpha", top_csms_spectrum[i].ppm_error_abs_sum_alpha);
ph_alpha.setMetaValue("ppm_error_abs_sum_beta", top_csms_spectrum[i].ppm_error_abs_sum_beta);
ph_alpha.setMetaValue("ppm_error_abs_sum", top_csms_spectrum[i].ppm_error_abs_sum);
ph_alpha.setMetaValue("precursor_total_intensity", top_csms_spectrum[i].precursor_total_intensity);
ph_alpha.setMetaValue("precursor_target_intensity", top_csms_spectrum[i].precursor_target_intensity);
ph_alpha.setMetaValue("precursor_signal_proportion", top_csms_spectrum[i].precursor_signal_proportion);
ph_alpha.setMetaValue("precursor_target_peak_count", top_csms_spectrum[i].precursor_target_peak_count);
ph_alpha.setMetaValue("precursor_residual_peak_count", top_csms_spectrum[i].precursor_residual_peak_count);
ph_alpha.setMetaValue("selected", "false");
ph_alpha.setPeakAnnotations(top_csms_spectrum[i].frag_annotations);
#ifdef DEBUG_OPXLHELPER
#pragma omp critical (LOG_DEBUG_access)
OPENMS_LOG_DEBUG << "Annotations of size " << ph_alpha.getPeakAnnotations().size() << endl;
#endif
if (top_csms_spectrum[i].cross_link.getType() == OPXLDataStructs::CROSS)
{
ph_alpha.setMetaValue(Constants::UserParam::OPENPEPXL_BETA_SEQUENCE, (*top_csms_spectrum[i].cross_link.beta).toString());
ph_beta.setSequence(*top_csms_spectrum[i].cross_link.beta);
ph_beta.setCharge(precursor_charge);
ph_beta.setScore(top_csms_spectrum[i].score);
ph_beta.setMetaValue("xl_chain", "MS:1002510"); // receiver
ph_beta.setMetaValue(Constants::UserParam::SPECTRUM_REFERENCE, spectra[scan_index].getNativeID());
ph_beta.setMetaValue("spectrum_index", scan_index);
phs.push_back(ph_alpha);
phs.push_back(ph_beta);
}
else
{
ph_alpha.setMetaValue(Constants::UserParam::OPENPEPXL_BETA_SEQUENCE, "-");
phs.push_back(ph_alpha);
}
peptide_id.setRT(spectrum_light.getRT());
peptide_id.setMZ(precursor_mz);
String specIDs;
if (scan_index_heavy != scan_index)
{
specIDs = spectra[scan_index].getNativeID() + "," + spectra[scan_index_heavy].getNativeID();
}
else
{
specIDs = spectra[scan_index].getNativeID();
}
peptide_id.setMetaValue(Constants::UserParam::SPECTRUM_REFERENCE, specIDs);
peptide_id.setHits(phs);
peptide_id.setScoreType(Constants::UserParam::OPENPEPXL_SCORE);
// This critical section is called this way, because access to all_top_csms also happens in OpenPepXLAlgorithm.
// Access to peptide_ids is also critical, but it is only accessed here during parallel processing.
#pragma omp critical (all_top_csms_access)
{
peptide_ids.push_back(peptide_id);
all_top_csms[all_top_csms_current_index][i].peptide_id_index = peptide_ids.size()-1;
}
}
}
void OPXLHelper::addProteinPositionMetaValues(std::vector< PeptideIdentification > & peptide_ids)
{
for (PeptideIdentification& id : peptide_ids)
{
if (id.getHits().empty()) continue;
PeptideHit& ph_alpha = id.getHits()[0];
String prot1_pos;
// cross-link position in Protein (alpha)
const std::vector<PeptideEvidence> pevs = ph_alpha.getPeptideEvidences();
for (std::vector<PeptideEvidence>::const_iterator pev = pevs.begin(); pev != pevs.end(); ++pev)
{
// start counting at 1: pev->getStart() and xl_pos are both starting at 0, with + 1 the N-term residue is number 1
Int prot_link_pos = pev->getStart() + String(ph_alpha.getMetaValue(Constants::UserParam::OPENPEPXL_XL_POS1)).toInt() + 1;
prot1_pos = prot1_pos + "," + prot_link_pos;
}
// remove leading "," of first position
if (!prot1_pos.empty())
{
prot1_pos = prot1_pos.suffix(prot1_pos.size()-1);
}
ph_alpha.setMetaValue(Constants::UserParam::OPENPEPXL_XL_POS1_PROT, prot1_pos);
// cross-link position in Protein (beta)
if (id.getHits().size() == 2)
{
PeptideHit& ph_beta = id.getHits()[1];
String prot2_pos;
String prot2_accessions;
const std::vector<PeptideEvidence> pevs_beta = ph_beta.getPeptideEvidences();
for (std::vector<PeptideEvidence>::const_iterator pev = pevs_beta.begin(); pev != pevs_beta.end(); ++pev)
{
// start counting at 1: pev->getStart() and xl_pos are both starting at 0, with + 1 the N-term residue is number 1
Int prot_link_pos = pev->getStart() + String(ph_alpha.getMetaValue(Constants::UserParam::OPENPEPXL_XL_POS2)).toInt() + 1;
prot2_pos = prot2_pos + "," + prot_link_pos;
prot2_accessions = prot2_accessions + "," + pev->getProteinAccession();
}
// remove leading "," of first position
if (!prot2_pos.empty())
{
prot2_pos = prot2_pos.suffix(prot2_pos.size()-1);
}
ph_beta.setMetaValue(Constants::UserParam::OPENPEPXL_XL_POS1_PROT, prot1_pos);
ph_alpha.setMetaValue(Constants::UserParam::OPENPEPXL_XL_POS2_PROT, prot2_pos);
ph_beta.setMetaValue(Constants::UserParam::OPENPEPXL_XL_POS2_PROT, prot2_pos);
}
else
{
// second cross-link position in Protein (loop-links)
if (ph_alpha.getMetaValue(Constants::UserParam::OPENPEPXL_XL_POS2) != "-")
{
String prot2_pos;
for (std::vector<PeptideEvidence>::const_iterator pev = pevs.begin(); pev != pevs.end(); ++pev)
{
// start counting at 1: pev->getStart() and xl_pos are both starting at 0, with + 1 the N-term residue is number 1
Int prot_link_pos = pev->getStart() + String(ph_alpha.getMetaValue(Constants::UserParam::OPENPEPXL_XL_POS2)).toInt() + 1;
prot2_pos = prot2_pos + "," + prot_link_pos;
}
// remove leading "," of first position
if (!prot2_pos.empty())
{
prot2_pos = prot2_pos.suffix(prot2_pos.size()-1);
}
ph_alpha.setMetaValue(Constants::UserParam::OPENPEPXL_XL_POS2_PROT, prot2_pos);
}
else
{
ph_alpha.setMetaValue(Constants::UserParam::OPENPEPXL_XL_POS2_PROT, "-");
}
}
}
}
void OPXLHelper::addBetaAccessions(std::vector< PeptideIdentification > & peptide_ids)
{
for (PeptideIdentification& id : peptide_ids)
{
if (id.getHits().empty()) continue;
PeptideHit& ph_alpha = id.getHits()[0];
if (id.getHits().size() == 2)
{
PeptideHit& ph_beta = id.getHits()[1];
String prot2_accessions;
const std::vector<PeptideEvidence> pevs_beta = ph_beta.getPeptideEvidences();
for (std::vector<PeptideEvidence>::const_iterator pev = pevs_beta.begin(); pev != pevs_beta.end(); ++pev)
{
prot2_accessions = prot2_accessions + "," + pev->getProteinAccession();
}
if (!prot2_accessions.empty())
{
prot2_accessions = prot2_accessions.suffix(prot2_accessions.size()-1);
}
ph_alpha.setMetaValue(Constants::UserParam::OPENPEPXL_BETA_ACCESSIONS, prot2_accessions);
ph_beta.setMetaValue(Constants::UserParam::OPENPEPXL_BETA_ACCESSIONS, prot2_accessions);
}
else
{
ph_alpha.setMetaValue(Constants::UserParam::OPENPEPXL_BETA_ACCESSIONS, "-");
}
}
}
void OPXLHelper::addXLTargetDecoyMV(std::vector< PeptideIdentification > & peptide_ids)
{
for (PeptideIdentification& id : peptide_ids)
{
if (id.getHits().empty()) continue;
PeptideHit& ph_alpha = id.getHits()[0];
ph_alpha.setMetaValue(Constants::UserParam::OPENPEPXL_TARGET_DECOY_ALPHA, ph_alpha.getMetaValue(Constants::UserParam::TARGET_DECOY));
// cross-link position in Protein (beta)
if (id.getHits().size() == 2)
{
PeptideHit& ph_beta = id.getHits()[1];
ph_alpha.setMetaValue(Constants::UserParam::OPENPEPXL_TARGET_DECOY_BETA, ph_beta.getMetaValue(Constants::UserParam::TARGET_DECOY));
// if at least one of the two accession lists only contains decoys, the cross-link will be treated as a decoy
if ( (!String(ph_alpha.getMetaValue(Constants::UserParam::TARGET_DECOY)).hasSubstring("target")) ||
(!String(ph_beta.getMetaValue(Constants::UserParam::TARGET_DECOY)).hasSubstring("target")) )
{
ph_alpha.setMetaValue(Constants::UserParam::TARGET_DECOY, "decoy");
}
}
else
{
ph_alpha.setMetaValue(Constants::UserParam::OPENPEPXL_TARGET_DECOY_BETA, "-");
}
}
}
void OPXLHelper::removeBetaPeptideHits(std::vector< PeptideIdentification > & peptide_ids)
{
// add PeptideEvidence data from Beta peptides as MetaValues to the Alpha peptide and remove the Beta PeptideHit
for (PeptideIdentification& id : peptide_ids)
{
if (id.getHits().size() == 2 && id.getHits()[1].getMetaValue("xl_chain") == "MS:1002510")
{
const std::vector<PeptideEvidence>& peptide_evidences = id.getHits()[1].getPeptideEvidences();
String pre, post, start, end;
for (const PeptideEvidence& pe : peptide_evidences)
{
if (!pre.empty())
{
pre += ",";
post += ",";
start += ",";
end += ",";
}
pre += pe.getAABefore();
post += pe.getAAAfter();
start += pe.getStart();
end += pe.getEnd();
}
std::vector<PeptideHit> hits;
PeptideHit hit = id.getHits()[0];
hit.removeMetaValue("xl_chain");
hit.setMetaValue(Constants::UserParam::OPENPEPXL_BETA_PEPEV_PRE, pre);
hit.setMetaValue(Constants::UserParam::OPENPEPXL_BETA_PEPEV_POST, post);
hit.setMetaValue(Constants::UserParam::OPENPEPXL_BETA_PEPEV_START, start);
hit.setMetaValue(Constants::UserParam::OPENPEPXL_BETA_PEPEV_END, end);
hits.push_back(hit);
id.setHits(hits);
}
}
// Collect PeptideHits to the same spectrum under one PeptideIdentification
map<String, PeptideIdentification> new_peptide_ids;
for (PeptideIdentification& id : peptide_ids)
{
if (!id.getHits().empty())
{
PeptideHit& hit = id.getHits()[0];
PeptideIdentification new_id;
String current_spectrum = id.getMetaValue(Constants::UserParam::SPECTRUM_REFERENCE);
if (new_peptide_ids.find(current_spectrum) != new_peptide_ids.end())
{
new_id = (*new_peptide_ids.find(current_spectrum)).second;
}
else
{
new_id.setRT(id.getRT());
new_id.setMZ(id.getMZ());
new_id.setScoreType(Constants::UserParam::OPENPEPXL_SCORE);
new_id.setMetaValue(Constants::UserParam::SPECTRUM_REFERENCE, current_spectrum);
}
hit.removeMetaValue("xl_chain");
new_id.insertHit(hit);
new_peptide_ids[current_spectrum] = new_id;
}
}
std::vector<PeptideIdentification> new_peptide_ids_vector;
new_peptide_ids_vector.reserve(new_peptide_ids.size());
for (pair<String, PeptideIdentification> mit : new_peptide_ids)
{
new_peptide_ids_vector.push_back(mit.second);
}
peptide_ids = new_peptide_ids_vector;
}
void OPXLHelper::addProteinPositionMetaValues(PeptideIdentificationList& peptide_ids)
{
addProteinPositionMetaValues(peptide_ids.getData());
}
void OPXLHelper::addXLTargetDecoyMV(PeptideIdentificationList& peptide_ids)
{
addXLTargetDecoyMV(peptide_ids.getData());
}
void OPXLHelper::addBetaAccessions(PeptideIdentificationList& peptide_ids)
{
addBetaAccessions(peptide_ids.getData());
}
void OPXLHelper::removeBetaPeptideHits(PeptideIdentificationList& peptide_ids)
{
removeBetaPeptideHits(peptide_ids.getData());
}
void OPXLHelper::computeDeltaScores(PeptideIdentificationList& peptide_ids)
{
computeDeltaScores(peptide_ids.getData());
}
std::vector<PeptideIdentification> OPXLHelper::combineTopRanksFromPairs(PeptideIdentificationList& peptide_ids, Size number_top_hits)
{
return combineTopRanksFromPairs(peptide_ids.getData(), number_top_hits);
}
void OPXLHelper::addPercolatorFeatureList(ProteinIdentification& prot_id)
{
// add features for percolator
/* default features added in PercolatorAdapter:
* SpecId, ScanNr, ExpMass, CalcMass, mass,
* peplen, charge#min..#max, enzN, enzC, enzInt, dm, absdm
*/
StringList feature_set;
feature_set
<< Constants::UserParam::PRECURSOR_ERROR_PPM_USERPARAM
<< Constants::UserParam::OPENPEPXL_SCORE
<< Constants::UserParam::ISOTOPE_ERROR
<< "OpenPepXL:xquest_score"
<< "OpenPepXL:xcorr xlink"
<< "OpenPepXL:xcorr common"
<< "OpenPepXL:match-odds"
<< "OpenPepXL:intsum"
<< "OpenPepXL:wTIC"
<< "OpenPepXL:TIC"
<< "OpenPepXL:prescore"
<< "OpenPepXL:log_occupancy"
<< "OpenPepXL:log_occupancy_alpha"
<< "OpenPepXL:log_occupancy_beta"
<< "matched_xlink_alpha"
<< "matched_xlink_beta"
<< "matched_linear_alpha"
<< "matched_linear_beta"
<< "ppm_error_abs_sum_linear_alpha"
<< "ppm_error_abs_sum_linear_beta"
<< "ppm_error_abs_sum_xlinks_alpha"
<< "ppm_error_abs_sum_xlinks_beta"
<< "ppm_error_abs_sum_linear"
<< "ppm_error_abs_sum_xlinks"
<< "ppm_error_abs_sum_alpha"
<< "ppm_error_abs_sum_beta"
<< "ppm_error_abs_sum"
<< "precursor_total_intensity"
<< "precursor_target_intensity"
<< "precursor_signal_proportion"
<< "precursor_target_peak_count"
<< "precursor_residual_peak_count";
ProteinIdentification::SearchParameters search_params = prot_id.getSearchParameters();
search_params.setMetaValue("feature_extractor", "TOPP_PSMFeatureExtractor");
search_params.setMetaValue("extra_features", ListUtils::concatenate(feature_set, ","));
prot_id.setSearchParameters(search_params);
}
void OPXLHelper::computeDeltaScores(std::vector< PeptideIdentification > & peptide_ids)
{
for (PeptideIdentification& pep_id : peptide_ids)
{
pep_id.sort();
vector<PeptideHit>& phs = pep_id.getHits();
// at least two PeptideHits needed for an actual delta score
if (phs.size() > 1)
{
for (Size rank = 0; rank < phs.size()-1; ++rank)
{
double delta_score = phs[rank+1].getScore() / phs[rank].getScore();
phs[rank].setMetaValue(Constants::UserParam::DELTA_SCORE, delta_score);
}
}
// set delta score to 0 for the last ranked PeptideHit, or if only one Peptide hit is available
if (!phs.empty())
{
phs[phs.size()-1].setMetaValue(Constants::UserParam::DELTA_SCORE, 0.0);
}
}
}
std::vector< PeptideIdentification > OPXLHelper::combineTopRanksFromPairs(std::vector< PeptideIdentification > & peptide_ids, Size number_top_hits)
{
std::vector< PeptideIdentification > new_peptide_ids;
std::vector< PeptideIdentification > current_spectrum_peptide_ids;
std::set< String > spectrum_indices;
for (PeptideIdentification& id : peptide_ids)
{
if (!id.getHits().empty())
{
spectrum_indices.insert(id.getHits()[0].getMetaValue("spectrum_index"));
}
}
for (String index : spectrum_indices)
{
for (PeptideIdentification& id : peptide_ids)
{
if (!id.getHits().empty())
{
if (String(id.getHits()[0].getMetaValue("spectrum_index")) == index)
{
current_spectrum_peptide_ids.push_back(id);
}
}
}
std::sort(current_spectrum_peptide_ids.rbegin(), current_spectrum_peptide_ids.rend(), OPXLHelper::PeptideIDScoreComparator());
current_spectrum_peptide_ids.erase( std::unique( current_spectrum_peptide_ids.begin(), current_spectrum_peptide_ids.end() ), current_spectrum_peptide_ids.end() );
if (current_spectrum_peptide_ids.size() > number_top_hits)
{
current_spectrum_peptide_ids.resize(number_top_hits);
}
Size rank_count(1);
for (PeptideIdentification& current_id : current_spectrum_peptide_ids)
{
if (!current_id.getHits().empty())
{
current_id.getHits()[0].setMetaValue("xl_rank", rank_count);
if (current_id.getHits().size() == 2)
{
current_id.getHits()[1].setMetaValue("xl_rank", rank_count);
}
}
rank_count++;
}
new_peptide_ids.insert(new_peptide_ids.end(), current_spectrum_peptide_ids.begin(), current_spectrum_peptide_ids.end());
current_spectrum_peptide_ids.clear();
}
return new_peptide_ids;
}
std::vector <OPXLDataStructs::ProteinProteinCrossLink> OPXLHelper::collectPrecursorCandidates(const IntList& precursor_correction_steps,
double precursor_mass,
double precursor_mass_tolerance,
bool precursor_mass_tolerance_unit_ppm,
const vector<OPXLDataStructs::AASeqWithMass>& filtered_peptide_masses,
double cross_link_mass,
const DoubleList& cross_link_mass_mono_link,
const StringList& cross_link_residue1,
const StringList& cross_link_residue2,
String cross_link_name,
bool use_sequence_tags,
const std::vector<std::string>& tags)
{
// determine candidates
std::vector< OPXLDataStructs::XLPrecursor > candidates;
std::vector< double > spectrum_precursor_vector;
std::vector< double > allowed_error_vector;
for (int correction_mass : precursor_correction_steps)
{
double allowed_error = 0;
double corrected_precursor_mass = precursor_mass - (static_cast<double>(correction_mass) * Constants::C13C12_MASSDIFF_U);
if (precursor_mass_tolerance_unit_ppm) // ppm
{
allowed_error = corrected_precursor_mass * precursor_mass_tolerance * 1e-6;
}
else // Dalton
{
allowed_error = precursor_mass_tolerance;
}
spectrum_precursor_vector.push_back(corrected_precursor_mass);
allowed_error_vector.push_back(allowed_error);
} // end correction mass loop
std::vector< int > precursor_correction_positions;
// if sequence tags are used and no tags were found, don't bother combining peptide pairs
if ( (use_sequence_tags && !tags.empty()) ||
!use_sequence_tags)
{
candidates = OPXLHelper::enumerateCrossLinksAndMasses(filtered_peptide_masses, cross_link_mass, cross_link_mass_mono_link, cross_link_residue1, cross_link_residue2, spectrum_precursor_vector, precursor_correction_positions, precursor_mass_tolerance, precursor_mass_tolerance_unit_ppm);
}
// an empty vector of sequence tags implies no filtering should be done in this case
if (use_sequence_tags)
{
Size candidates_size = candidates.size();
OPXLHelper::filterPrecursorsByTags(candidates, precursor_correction_positions, tags);
OPENMS_LOG_DEBUG << "Number of sequence tags: " << tags.size() << std::endl;
OPENMS_LOG_DEBUG << "Candidate Peptide Pairs before sequence tag filtering: " << candidates_size << std::endl;
OPENMS_LOG_DEBUG << "Candidate Peptide Pairs after sequence tag filtering: " << candidates.size() << std::endl;
}
vector< int > precursor_corrections;
for (Size pc = 0; pc < precursor_correction_positions.size(); ++pc)
{
precursor_corrections.push_back(precursor_correction_steps[precursor_correction_positions[pc]]);
}
vector <OPXLDataStructs::ProteinProteinCrossLink> cross_link_candidates = OPXLHelper::buildCandidates(candidates, precursor_corrections, precursor_correction_positions, filtered_peptide_masses, cross_link_residue1, cross_link_residue2, cross_link_mass, cross_link_mass_mono_link, spectrum_precursor_vector, allowed_error_vector, std::move(cross_link_name));
return cross_link_candidates;
}
double OPXLHelper::computePrecursorError(const OPXLDataStructs::CrossLinkSpectrumMatch& csm, double precursor_mz, int precursor_charge)
{
// Error calculation
double weight = csm.cross_link.alpha->getMonoWeight();
if (csm.cross_link.getType() == OPXLDataStructs::CROSS)
{
weight += csm.cross_link.beta->getMonoWeight() + csm.cross_link.cross_linker_mass;
}
else
{
weight += csm.cross_link.cross_linker_mass;
}
double precursor_mass = (precursor_mz * static_cast<double>(precursor_charge)) - (static_cast<double>(precursor_charge) * Constants::PROTON_MASS_U)
- (static_cast<double>(csm.precursor_correction) * Constants::C13C12_MASSDIFF_U);
double error = precursor_mass - weight;
double rel_error = (error / precursor_mass) / 1e-6;
return rel_error;
}
void OPXLHelper::isoPeakMeans(OPXLDataStructs::CrossLinkSpectrumMatch& csm,
const DataArrays::IntegerDataArray& num_iso_peaks_array,
const std::vector< std::pair< Size, Size > >& matched_spec_linear_alpha,
const std::vector< std::pair< Size, Size > >& matched_spec_linear_beta,
const std::vector< std::pair< Size, Size > >& matched_spec_xlinks_alpha,
const std::vector< std::pair< Size, Size > >& matched_spec_xlinks_beta)
{
csm.num_iso_peaks_mean = Math::mean(num_iso_peaks_array.begin(), num_iso_peaks_array.end());
auto addUp = [&](const auto& data) -> double
{
double sum{};
if (data.empty()) return sum;
for (const auto& p : data) sum += num_iso_peaks_array[p.second];
return sum / data.size();
};
csm.num_iso_peaks_mean_linear_alpha = addUp(matched_spec_linear_alpha);
csm.num_iso_peaks_mean_linear_beta = addUp(matched_spec_linear_beta);
csm.num_iso_peaks_mean_xlinks_alpha = addUp(matched_spec_xlinks_alpha);
csm.num_iso_peaks_mean_xlinks_beta = addUp(matched_spec_xlinks_beta);
}
void OPXLHelper::filterPrecursorsByTags(std::vector <OPXLDataStructs::XLPrecursor>& candidates, std::vector< int >& precursor_correction_positions, const std::vector<std::string>& tags)
{
std::vector <OPXLDataStructs::XLPrecursor> filtered_candidates;
std::vector< int > filtered_precursor_correction_positions;
// brute force string comparisons for now, faster than Aho-Corasick for small tag sets
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(candidates.size()); ++i)
{
// iterate over copies, so that we can reverse them
for (std::string tag : tags)
{
if (candidates[i].alpha_seq.hasSubstring(tag) || candidates[i].beta_seq.hasSubstring(tag))
{
#pragma omp critical (filtered_candidates_access)
{
filtered_candidates.push_back(candidates[i]);
filtered_precursor_correction_positions.push_back(precursor_correction_positions[i]);
}
break;
}
std::reverse(tag.begin(), tag.end());
if (candidates[i].alpha_seq.hasSubstring(tag) || candidates[i].beta_seq.hasSubstring(tag))
{
#pragma omp critical (filtered_candidates_access)
{
filtered_candidates.push_back(candidates[i]);
filtered_precursor_correction_positions.push_back(precursor_correction_positions[i]);
}
break;
}
}
} // end of parallel loop over candidates
candidates = filtered_candidates;
precursor_correction_positions = filtered_precursor_correction_positions;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/XLMS/OPXLDataStructs.cpp | .cpp | 365 | 13 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Eugen Netz $
// $Authors: Eugen Netz $
// --------------------------------------------------------------------------
namespace OpenMS
{
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/XLMS/OpenPepXLAlgorithm.cpp | .cpp | 69,146 | 1,254 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Eugen Netz $
// $Authors: Timo Sachsenberg, Eugen Netz $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/XLMS/OpenPepXLAlgorithm.h>
#include <OpenMS/ANALYSIS/ID/IDMapper.h>
#include <OpenMS/ANALYSIS/ID/PeptideIndexing.h>
#include <OpenMS/ANALYSIS/ID/PrecursorPurity.h>
#include <OpenMS/ANALYSIS/XLMS/OPXLHelper.h>
#include <OpenMS/ANALYSIS/XLMS/OPXLSpectrumProcessingAlgorithms.h>
#include <OpenMS/ANALYSIS/XLMS/XQuestScores.h>
#include <OpenMS/CHEMISTRY/ModificationsDB.h>
#include <OpenMS/CHEMISTRY/ModifiedPeptideGenerator.h>
#include <OpenMS/CHEMISTRY/ProteaseDB.h>
#include <OpenMS/CHEMISTRY/ProteaseDigestion.h>
#include <OpenMS/CHEMISTRY/SimpleTSGXLMS.h>
#include <OpenMS/CHEMISTRY/TheoreticalSpectrumGeneratorXLMS.h>
#include <OpenMS/PROCESSING/FILTERING/NLargest.h>
#include <OpenMS/KERNEL/SpectrumHelper.h>
#include <OpenMS/PROCESSING/CENTROIDING/PeakPickerHiRes.h>
#include <iostream>
using namespace std;
using namespace OpenMS;
// turn on additional debug output
// #define DEBUG_OPENPEPXLALGO
#ifdef _OPENMP
#include <omp.h>
#endif
OpenPepXLAlgorithm::OpenPepXLAlgorithm()
: DefaultParamHandler("OpenPepXLAlgorithm")
{
defaults_.setValue("decoy_string", "DECOY_", "String that was appended (or prefixed - see 'prefix' flag below) to the accessions in the protein database to indicate decoy proteins.");
std::vector<std::string> bool_strings = {"true","false"};
defaults_.setValue("decoy_prefix", "true", "Set to true, if the decoy_string is a prefix of accessions in the protein database. Otherwise it is a suffix.");
defaults_.setValidStrings("decoy_prefix", bool_strings);
defaults_.setValue("precursor:mass_tolerance", 10.0, "Width of precursor mass tolerance window");
std::vector<std::string> mass_tolerance_unit_valid_strings = {"ppm","Da"};
defaults_.setValue("precursor:mass_tolerance_unit", "ppm", "Unit of precursor mass tolerance.");
defaults_.setValidStrings("precursor:mass_tolerance_unit", mass_tolerance_unit_valid_strings);
defaults_.setValue("precursor:min_charge", 2, "Minimum precursor charge to be considered.");
defaults_.setValue("precursor:max_charge", 8, "Maximum precursor charge to be considered.");
defaults_.setValue("precursor:corrections", ListUtils::create<int>("4, 3, 2, 1, 0"), "Monoisotopic peak correction. Matches candidates for possible monoisotopic precursor peaks for experimental mass m and given numbers n at masses (m - n * (C13-C12)). These should be ordered from more extreme to less extreme corrections. Numbers later in the list will be preferred in case of ambiguities.");
defaults_.setSectionDescription("precursor", "Precursor filtering settings");
defaults_.setValue("fragment:mass_tolerance", 20.0, "Fragment mass tolerance");
defaults_.setValue("fragment:mass_tolerance_xlinks", 20.0, "Fragment mass tolerance for cross-link ions");
defaults_.setValue("fragment:mass_tolerance_unit", "ppm", "Unit of fragment m");
defaults_.setValidStrings("fragment:mass_tolerance_unit", mass_tolerance_unit_valid_strings);
defaults_.setSectionDescription("fragment", "Fragment peak matching settings");
vector<String> all_mods;
ModificationsDB::getInstance()->getAllSearchModifications(all_mods);
defaults_.setValue("modifications:fixed", std::vector<std::string>{"Carbamidomethyl (C)"}, "Fixed modifications, specified using UniMod (www.unimod.org) terms, e.g. 'Carbamidomethyl (C)'");
defaults_.setValidStrings("modifications:fixed", ListUtils::create<std::string>(all_mods));
defaults_.setValue("modifications:variable", std::vector<std::string>{"Oxidation (M)"}, "Variable modifications, specified using UniMod (www.unimod.org) terms, e.g. 'Oxidation (M)'");
defaults_.setValidStrings("modifications:variable", ListUtils::create<std::string>(all_mods));
defaults_.setValue("modifications:variable_max_per_peptide", 3, "Maximum number of residues carrying a variable modification per candidate peptide");
defaults_.setSectionDescription("modifications", "Peptide modification settings");
defaults_.setValue("peptide:min_size", 5, "Minimum size a peptide must have after digestion to be considered in the search.");
defaults_.setValue("peptide:missed_cleavages", 3, "Number of missed cleavages.");
vector<String> all_enzymes;
ProteaseDB::getInstance()->getAllNames(all_enzymes);
defaults_.setValue("peptide:enzyme", "Trypsin", "The enzyme used for peptide digestion.");
defaults_.setValidStrings("peptide:enzyme", ListUtils::create<std::string>(all_enzymes));
defaults_.setSectionDescription("peptide", "Settings for digesting proteins into peptides");
defaults_.setValue("cross_linker:residue1", std::vector<std::string>{"K","N-term"}, "Comma separated residues, that the first side of a bifunctional cross-linker can attach to");
defaults_.setValue("cross_linker:residue2", std::vector<std::string>{"K","N-term"}, "Comma separated residues, that the second side of a bifunctional cross-linker can attach to");
defaults_.setValue("cross_linker:mass_light", 138.0680796, "Mass of the light cross-linker, linking two residues on one or two peptides");
defaults_.setValue("cross_linker:mass_iso_shift", 12.075321, "Mass of the isotopic shift between the light and heavy linkers");
defaults_.setValue("cross_linker:mass_mono_link", ListUtils::create<double>("156.07864431, 155.094628715"), "Possible masses of the linker, when attached to only one peptide");
defaults_.setValue("cross_linker:name", "DSS", "Name of the searched cross-link, used to resolve ambiguity of equal masses (e.g. DSS or BS3)");
defaults_.setSectionDescription("cross_linker", "Description of the cross-linker reagent");
defaults_.setValue("algorithm:number_top_hits", 1, "Number of top hits reported for each spectrum pair");
std::vector<std::string> deisotope_strings = {"true","false","auto"};
defaults_.setValue("algorithm:deisotope", "auto", "Set to true, if the input spectra should be deisotoped before any other processing steps. If set to auto the spectra will be deisotoped, if the fragment mass tolerance is < 0.1 Da or < 100 ppm (0.1 Da at a mass of 1000)", {"advanced"});
defaults_.setValidStrings("algorithm:deisotope", deisotope_strings);
defaults_.setSectionDescription("algorithm", "Additional algorithm settings");
defaults_.setValue("ions:b_ions", "true", "Search for peaks of b-ions.", {"advanced"});
defaults_.setValue("ions:y_ions", "true", "Search for peaks of y-ions.", {"advanced"});
defaults_.setValue("ions:a_ions", "false", "Search for peaks of a-ions.", {"advanced"});
defaults_.setValue("ions:x_ions", "false", "Search for peaks of x-ions.", {"advanced"});
defaults_.setValue("ions:c_ions", "false", "Search for peaks of c-ions.", {"advanced"});
defaults_.setValue("ions:z_ions", "false", "Search for peaks of z-ions.", {"advanced"});
defaults_.setValue("ions:neutral_losses", "true", "Search for neutral losses of H2O and H3N.", {"advanced"});
defaults_.setValidStrings("ions:b_ions", bool_strings);
defaults_.setValidStrings("ions:y_ions", bool_strings);
defaults_.setValidStrings("ions:a_ions", bool_strings);
defaults_.setValidStrings("ions:x_ions", bool_strings);
defaults_.setValidStrings("ions:c_ions", bool_strings);
defaults_.setValidStrings("ions:z_ions", bool_strings);
defaults_.setValidStrings("ions:neutral_losses", bool_strings);
defaults_.setSectionDescription("ions", "Ion types to search for in MS/MS spectra");
defaultsToParam_();
}
OpenPepXLAlgorithm::~OpenPepXLAlgorithm() = default;
void OpenPepXLAlgorithm::updateMembers_()
{
decoy_string_ = param_.getValue("decoy_string").toString();
decoy_prefix_ = param_.getValue("decoy_prefix").toBool();
min_precursor_charge_ = param_.getValue("precursor:min_charge");
max_precursor_charge_ = param_.getValue("precursor:max_charge");
precursor_mass_tolerance_ = param_.getValue("precursor:mass_tolerance");
precursor_mass_tolerance_unit_ppm_ = (param_.getValue("precursor:mass_tolerance_unit") == "ppm");
precursor_correction_steps_ = param_.getValue("precursor:corrections");
fragment_mass_tolerance_ = param_.getValue("fragment:mass_tolerance");
fragment_mass_tolerance_xlinks_ = param_.getValue("fragment:mass_tolerance_xlinks");
fragment_mass_tolerance_unit_ppm_ = (param_.getValue("fragment:mass_tolerance_unit") == "ppm");
cross_link_residue1_ = ListUtils::toStringList<std::string>(param_.getValue("cross_linker:residue1"));
cross_link_residue2_ = ListUtils::toStringList<std::string>(param_.getValue("cross_linker:residue2"));
cross_link_mass_light_ = param_.getValue("cross_linker:mass_light");
cross_link_mass_iso_shift_ = param_.getValue("cross_linker:mass_iso_shift");
cross_link_mass_mono_link_ = param_.getValue("cross_linker:mass_mono_link");
cross_link_name_ = param_.getValue("cross_linker:name").toString();
fixedModNames_ = ListUtils::toStringList<std::string>(param_.getValue("modifications:fixed"));
varModNames_ = ListUtils::toStringList<std::string>(param_.getValue("modifications:variable"));
max_variable_mods_per_peptide_ = static_cast<Size>(param_.getValue("modifications:variable_max_per_peptide"));
peptide_min_size_ = static_cast<Size>(param_.getValue("peptide:min_size"));
missed_cleavages_ = static_cast<Size>(param_.getValue("peptide:missed_cleavages"));
enzyme_name_ = param_.getValue("peptide:enzyme").toString();
number_top_hits_ = param_.getValue("algorithm:number_top_hits");
deisotope_mode_ = param_.getValue("algorithm:deisotope").toString();
add_y_ions_ = param_.getValue("ions:y_ions").toString();
add_b_ions_ = param_.getValue("ions:b_ions").toString();
add_x_ions_ = param_.getValue("ions:x_ions").toString();
add_a_ions_ = param_.getValue("ions:a_ions").toString();
add_c_ions_ = param_.getValue("ions:c_ions").toString();
add_z_ions_ = param_.getValue("ions:z_ions").toString();
add_losses_ = param_.getValue("ions:neutral_losses").toString();
}
OpenPepXLAlgorithm::ExitCodes OpenPepXLAlgorithm::run(PeakMap& unprocessed_spectra, ConsensusMap& cfeatures, std::vector<FASTAFile::FASTAEntry>& fasta_db, std::vector<ProteinIdentification>& protein_ids, PeptideIdentificationList& peptide_ids, OPXLDataStructs::PreprocessedPairSpectra& preprocessed_pair_spectra, std::vector< std::pair<Size, Size> >& spectrum_pairs, std::vector< std::vector< OPXLDataStructs::CrossLinkSpectrumMatch > >& all_top_csms, PeakMap& spectra)
{
ProgressLogger progresslogger;
progresslogger.setLogType(this->getLogType());
// preprocess parameters for convenience
if (fragment_mass_tolerance_xlinks_ < fragment_mass_tolerance_)
{
fragment_mass_tolerance_xlinks_ = fragment_mass_tolerance_;
}
#ifdef DEBUG_OPENPEPXLALGO
OPENMS_LOG_DEBUG << "XLinks Tolerance: " << fragment_mass_tolerance_xlinks_ << endl;
#endif
std::sort(cross_link_mass_mono_link_.begin(), cross_link_mass_mono_link_.end(), std::greater< double >());
// deisotope if "true" or if "auto" and the tolerance is below the threshold (0.1 Da or 100 ppm)
bool deisotope = (deisotope_mode_ == "true") ||
(deisotope_mode_ == "auto" &&
((!fragment_mass_tolerance_unit_ppm_ && fragment_mass_tolerance_ < 0.1) ||
(fragment_mass_tolerance_unit_ppm_ && fragment_mass_tolerance_ < 100)));
set<String> fixed_unique(fixedModNames_.begin(), fixedModNames_.end());
if (fixed_unique.size() != fixedModNames_.size())
{
OPENMS_LOG_WARN << "duplicate fixed modification provided." << endl;
return ExitCodes::ILLEGAL_PARAMETERS;
}
set<String> var_unique(varModNames_.begin(), varModNames_.end());
if (var_unique.size() != varModNames_.size())
{
OPENMS_LOG_WARN << "duplicate variable modification provided." << endl;
return ExitCodes::ILLEGAL_PARAMETERS;
}
ModifiedPeptideGenerator::MapToResidueType fixed_modifications = ModifiedPeptideGenerator::getModifications(fixedModNames_);
ModifiedPeptideGenerator::MapToResidueType variable_modifications = ModifiedPeptideGenerator::getModifications(varModNames_);
protein_ids[0].setPrimaryMSRunPath({}, unprocessed_spectra);
if (unprocessed_spectra.empty() && unprocessed_spectra.getChromatograms().empty())
{
OPENMS_LOG_WARN << "The given file does not contain any conventional peak data, but might"
" contain chromatograms. This tool currently cannot handle them, sorry." << endl;
return ExitCodes::INCOMPATIBLE_INPUT_DATA;
}
//check if spectra are sorted
for (Size i = 0; i < unprocessed_spectra.size(); ++i)
{
if (!unprocessed_spectra[i].isSorted())
{
OPENMS_LOG_WARN << "Error: Not all spectra are sorted according to peak m/z positions. Use FileFilter to sort the input!" << endl;
return ExitCodes::INCOMPATIBLE_INPUT_DATA;
}
}
// Peak Picking, check if all levels are picked and pick uncentroided MS levels
PeakPickerHiRes pp;
PeakMap picked_spectra;
progresslogger.startProgress(0, 1, "Centroiding data (if necessary)...");
pp.pickExperiment(unprocessed_spectra, picked_spectra, true);
progresslogger.endProgress();
unprocessed_spectra.clear(true);
// Precursor Purity precalculation
map<String, PrecursorPurity::PurityScores> precursor_purities = PrecursorPurity::computePrecursorPurities(picked_spectra, precursor_mass_tolerance_, precursor_mass_tolerance_unit_ppm_);
// preprocess spectra (filter out 0 values, sort by position)
progresslogger.startProgress(0, 1, "Filtering spectra...");
spectra = OPXLSpectrumProcessingAlgorithms::preprocessSpectra(picked_spectra, fragment_mass_tolerance_xlinks_, fragment_mass_tolerance_unit_ppm_, peptide_min_size_, min_precursor_charge_, max_precursor_charge_, deisotope, true);
progresslogger.endProgress();
picked_spectra.clear(true);
// sort the spectra by RT, the order might have been changed by parallel preprocessing
spectra.sortSpectra(false);
ProteaseDigestion digestor;
digestor.setEnzyme(enzyme_name_);
digestor.setMissedCleavages(missed_cleavages_);
IDMapper idmapper;
Param p = idmapper.getParameters();
p.setValue("rt_tolerance", 30.0);
p.setValue("mz_tolerance", precursor_mass_tolerance_);
String mz_measure = precursor_mass_tolerance_unit_ppm_ ? "ppm" : "Da";
p.setValue("mz_measure", mz_measure);
p.setValue("mz_reference", "precursor");
p.setValue("ignore_charge", "false");
idmapper.setParameters(p);
progresslogger.startProgress(0, 1, "Map spectrum precursors to linked features...");
idmapper.annotate(cfeatures, PeptideIdentificationList(), vector<ProteinIdentification>(), true, true, spectra);
progresslogger.endProgress();
vector< double > spectrum_precursors;
// find pairs of MS2 spectra, that correspond to MS1 features linked by the consensus map / FeatureFinderMultiplex
for (ConsensusMap::const_iterator cit = cfeatures.begin(); cit != cfeatures.end(); ++cit)
{
if (cit->getFeatures().size() == 2 && cit->getPeptideIdentifications().size() >= 2)
{
for (Size x = 0; x < cit->getPeptideIdentifications().size(); ++x)
{
if (static_cast<Size>(cit->getPeptideIdentifications()[x].getMetaValue("map_index")) == 0)
{
for (Size y = 0; y < cit->getPeptideIdentifications().size(); ++y)
{
if (static_cast<Size>(cit->getPeptideIdentifications()[y].getMetaValue("map_index")) == 1)
{
const PeptideIdentification& pi_0 = cit->getPeptideIdentifications()[x];
const PeptideIdentification& pi_1 = cit->getPeptideIdentifications()[y];
spectrum_pairs.emplace_back(pi_0.getMetaValue("spectrum_index"), pi_1.getMetaValue("spectrum_index"));
double current_precursor_mz0 = spectra[pi_0.getMetaValue("spectrum_index")].getPrecursors()[0].getMZ();
double current_precursor_mz1 = spectra[pi_1.getMetaValue("spectrum_index")].getPrecursors()[0].getMZ();
double current_precursor_charge0 = spectra[pi_0.getMetaValue("spectrum_index")].getPrecursors()[0].getCharge();
double current_precursor_charge1 = spectra[pi_1.getMetaValue("spectrum_index")].getPrecursors()[0].getCharge();
double current_precursor_mass0 = (current_precursor_mz0 * current_precursor_charge0) - (current_precursor_charge0 * Constants::PROTON_MASS_U);
double current_precursor_mass1 = (current_precursor_mz1 * current_precursor_charge1) - (current_precursor_charge1 * Constants::PROTON_MASS_U);
spectrum_precursors.push_back(current_precursor_mass0);
spectrum_precursors.push_back(current_precursor_mass1);
}
}
}
}
}
}
sort(spectrum_precursors.begin(), spectrum_precursors.end());
// create linear peak / shifted peak spectra for all pairs
progresslogger.startProgress(0, 1, "Preprocessing Spectra Pairs...");
preprocessed_pair_spectra = OpenPepXLAlgorithm::preprocessPairs_(spectra, spectrum_pairs, cross_link_mass_iso_shift_, fragment_mass_tolerance_, fragment_mass_tolerance_xlinks_, fragment_mass_tolerance_unit_ppm_, deisotope);
progresslogger.endProgress();
ProteinIdentification::SearchParameters search_params = protein_ids[0].getSearchParameters();
String searched_charges((String(min_precursor_charge_)));
for (int ch = min_precursor_charge_+1; ch <= max_precursor_charge_; ++ch)
{
searched_charges += "," + String(ch);
}
search_params.charges = searched_charges;
search_params.digestion_enzyme = *(ProteaseDB::getInstance()->getEnzyme(enzyme_name_));
search_params.fixed_modifications = fixedModNames_;
search_params.variable_modifications = varModNames_;
search_params.mass_type = ProteinIdentification::PeakMassType::MONOISOTOPIC;
search_params.missed_cleavages = missed_cleavages_;
search_params.fragment_mass_tolerance = fragment_mass_tolerance_;
search_params.fragment_mass_tolerance_ppm = fragment_mass_tolerance_unit_ppm_;
search_params.precursor_mass_tolerance = precursor_mass_tolerance_;
search_params.precursor_mass_tolerance_ppm = precursor_mass_tolerance_unit_ppm_;
// As MetaValues
search_params.setMetaValue("decoy_prefix", decoy_prefix_);
search_params.setMetaValue("decoy_string", decoy_string_);
search_params.setMetaValue("precursor:min_charge", min_precursor_charge_);
search_params.setMetaValue("precursor:max_charge", max_precursor_charge_);
search_params.setMetaValue("fragment:mass_tolerance_xlinks", fragment_mass_tolerance_xlinks_);
search_params.setMetaValue("peptide:min_size", peptide_min_size_);
search_params.setMetaValue("cross_link:residue1", cross_link_residue1_);
search_params.setMetaValue("cross_link:residue2", cross_link_residue2_);
search_params.setMetaValue("cross_link:mass", cross_link_mass_light_);
search_params.setMetaValue("cross_link:mass_isoshift", cross_link_mass_iso_shift_);
search_params.setMetaValue("cross_link:mass_monolink", cross_link_mass_mono_link_);
search_params.setMetaValue("cross_link:name", cross_link_name_);
search_params.setMetaValue("precursor:corrections", precursor_correction_steps_);
search_params.setMetaValue("modifications:variable_max_per_peptide", max_variable_mods_per_peptide_);
protein_ids[0].setSearchParameters(search_params);
protein_ids[0].setScoreType("OpenPepXL_Protein_Score");
// lookup for processed peptides. must be defined outside of omp section and synchronized
vector<OPXLDataStructs::AASeqWithMass> peptide_masses;
peptide_masses = OPXLHelper::digestDatabase(fasta_db, digestor, peptide_min_size_, cross_link_residue1_, cross_link_residue2_, fixed_modifications, variable_modifications, max_variable_mods_per_peptide_);
// create spectrum generator
TheoreticalSpectrumGeneratorXLMS specGen;
SimpleTSGXLMS specGen_mainscore;
// Set parameters for cross-link fragmentation
Param specGenParams = specGen.getParameters();
specGenParams.setValue("add_y_ions", add_y_ions_, "Add peaks of b-ions to the spectrum");
specGenParams.setValue("add_b_ions", add_b_ions_, "Add peaks of y-ions to the spectrum");
specGenParams.setValue("add_a_ions", add_a_ions_, "Add peaks of a-ions to the spectrum");
specGenParams.setValue("add_x_ions", add_x_ions_, "Add peaks of c-ions to the spectrum");
specGenParams.setValue("add_c_ions", add_c_ions_, "Add peaks of x-ions to the spectrum");
specGenParams.setValue("add_z_ions", add_z_ions_, "Add peaks of z-ions to the spectrum");
specGenParams.setValue("add_losses", add_losses_, "Adds common losses to those ion expect to have them, only water and ammonia loss is considered");
specGenParams.setValue("add_metainfo", "true");
specGenParams.setValue("add_isotopes", "true", "If set to 1 isotope peaks of the product ion peaks are added");
specGenParams.setValue("max_isotope", 2, "Defines the maximal isotopic peak which is added, add_isotopes must be set to 1");
specGenParams.setValue("add_precursor_peaks", "true", "Adds peaks of the precursor to the spectrum, which happen to occur sometimes");
specGenParams.setValue("add_abundant_immonium_ions", "false", "Add most abundant immonium ions");
specGenParams.setValue("add_first_prefix_ion", "true", "If set to true e.g. b1 ions are added");
specGenParams.setValue("add_k_linked_ions", "false");
specGen.setParameters(specGenParams);
Param specGenParams_mainscore = specGen_mainscore.getParameters();
specGenParams_mainscore.setValue("add_b_ions", add_b_ions_, "Add peaks of y-ions to the spectrum");
specGenParams_mainscore.setValue("add_y_ions", add_y_ions_, "Add peaks of b-ions to the spectrum");
specGenParams_mainscore.setValue("add_a_ions", add_a_ions_, "Add peaks of a-ions to the spectrum");
specGenParams_mainscore.setValue("add_x_ions", add_x_ions_, "Add peaks of c-ions to the spectrum");
specGenParams_mainscore.setValue("add_c_ions", add_c_ions_, "Add peaks of x-ions to the spectrum");
specGenParams_mainscore.setValue("add_z_ions", add_z_ions_, "Add peaks of z-ions to the spectrum");
specGenParams_mainscore.setValue("add_losses", add_losses_, "Adds common losses to those ion expect to have them, only water and ammonia loss is considered");
specGenParams_mainscore.setValue("add_first_prefix_ion", "true", "If set to true e.g. b1 ions are added");
specGenParams_mainscore.setValue("add_isotopes", "true", "If set to 1 isotope peaks of the product ion peaks are added");
specGenParams_mainscore.setValue("max_isotope", 2, "Defines the maximal isotopic peak which is added, add_isotopes must be set to 1");
specGenParams_mainscore.setValue("add_precursor_peaks", "true");
specGenParams_mainscore.setValue("add_k_linked_ions", "false");
specGen_mainscore.setParameters(specGenParams_mainscore);
#ifdef DEBUG_OPENPEPXLALGO
OPENMS_LOG_DEBUG << "Peptide candidates: " << peptide_masses.size() << endl;
#endif
search_params = protein_ids[0].getSearchParameters();
search_params.setMetaValue("MS:1001029", peptide_masses.size()); // number of sequences searched = MS:1001029
protein_ids[0].setSearchParameters(search_params);
sort(peptide_masses.begin(), peptide_masses.end(), OPXLDataStructs::AASeqWithMassComparator());
// The largest peptides given a fixed maximal precursor mass are possible with loop links
// Filter peptides using maximal loop link mass first
double max_precursor_mass = spectrum_precursors[spectrum_precursors.size()-1];
// compute absolute tolerance from relative, if necessary
double max_peptide_allowed_error = 0;
if (precursor_mass_tolerance_unit_ppm_) // ppm
{
max_peptide_allowed_error = max_precursor_mass * precursor_mass_tolerance_ * 1e-6;
}
else // Dalton
{
max_peptide_allowed_error = precursor_mass_tolerance_;
}
// maximal possible peptide mass given the largest precursor
double max_peptide_mass = max_precursor_mass - cross_link_mass_light_ + max_peptide_allowed_error;
// search for the first mass greater than the maximum, use everything before that peptide
vector<OPXLDataStructs::AASeqWithMass>::iterator last = upper_bound(peptide_masses.begin(), peptide_masses.end(), max_peptide_mass, OPXLDataStructs::AASeqWithMassComparator());
vector<OPXLDataStructs::AASeqWithMass> filtered_peptide_masses;
filtered_peptide_masses.assign(peptide_masses.begin(), last);
peptide_masses.clear();
// iterate over all spectra
progresslogger.startProgress(0, 1, "Matching to theoretical spectra and scoring...");
Size spectrum_counter = 0;
for (SignedSize pair_index = 0; pair_index < static_cast<SignedSize>(spectrum_pairs.size()); ++pair_index)
{
Size scan_index = spectrum_pairs[pair_index].first;
Size scan_index_heavy = spectrum_pairs[pair_index].second;
#ifdef DEBUG_OPENPEPXLALGO
OPENMS_LOG_DEBUG << "New scan indices: " << scan_index << "\t" << scan_index_heavy << endl;
#endif
const PeakSpectrum& spectrum_light = spectra[scan_index];
const double precursor_charge = spectrum_light.getPrecursors()[0].getCharge();
const double precursor_mz = spectrum_light.getPrecursors()[0].getMZ();
const double precursor_mass = precursor_mz * static_cast<double>(precursor_charge) - static_cast<double>(precursor_charge) * Constants::PROTON_MASS_U;
const PeakSpectrum& linear_peaks = preprocessed_pair_spectra.spectra_linear_peaks[pair_index];
const PeakSpectrum& xlink_peaks = preprocessed_pair_spectra.spectra_xlink_peaks[pair_index];
const PeakSpectrum& all_peaks = preprocessed_pair_spectra.spectra_all_peaks[pair_index];
vector< OPXLDataStructs::CrossLinkSpectrumMatch > top_csms_spectrum;
// ignore this spectrum pair, if they have less paired peaks than the minimal peptide size
if (all_peaks.size() < peptide_min_size_)
{
continue;
}
vector <OPXLDataStructs::ProteinProteinCrossLink> cross_link_candidates = OPXLHelper::collectPrecursorCandidates(precursor_correction_steps_, precursor_mass, precursor_mass_tolerance_, precursor_mass_tolerance_unit_ppm_, filtered_peptide_masses, cross_link_mass_light_, cross_link_mass_mono_link_, cross_link_residue1_, cross_link_residue2_, cross_link_name_);
spectrum_counter++;
cout << "Processing spectrum pair " << spectrum_counter << " / " << spectrum_pairs.size() << endl;
cout << "Light Spectrum ID: " << spectrum_light.getNativeID() << " |\tHeavy Spectrum ID: " << spectra[scan_index_heavy].getNativeID() << "\t| at: " << DateTime::now().getTime() << endl;
cout << "Number of peaks in light spectrum: " << spectrum_light.size() << " |\tNumber of candidates: " << cross_link_candidates.size() << endl;
// lists for one spectrum, to determine best match to the spectrum
vector< OPXLDataStructs::CrossLinkSpectrumMatch > all_csms_spectrum;
vector< OPXLDataStructs::CrossLinkSpectrumMatch > mainscore_csms_spectrum;
#pragma omp parallel for schedule(guided)
for (SignedSize i = 0; i < static_cast<SignedSize>(cross_link_candidates.size()); ++i)
{
OPXLDataStructs::ProteinProteinCrossLink cross_link_candidate = cross_link_candidates[i];
std::vector< SimpleTSGXLMS::SimplePeak > theoretical_spec_linear_alpha;
theoretical_spec_linear_alpha.reserve(1500);
std::vector< SimpleTSGXLMS::SimplePeak > theoretical_spec_linear_beta;
std::vector< SimpleTSGXLMS::SimplePeak > theoretical_spec_xlinks_alpha;
std::vector< SimpleTSGXLMS::SimplePeak > theoretical_spec_xlinks_beta;
bool type_is_cross_link = cross_link_candidate.getType() == OPXLDataStructs::CROSS;
bool type_is_loop = cross_link_candidate.getType() == OPXLDataStructs::LOOP;
Size link_pos_B = 0;
if (type_is_loop)
{
link_pos_B = cross_link_candidate.cross_link_position.second;
}
AASequence alpha;
AASequence beta;
if (cross_link_candidate.alpha) { alpha = *cross_link_candidate.alpha; }
if (cross_link_candidate.beta) { beta = *cross_link_candidate.beta; }
specGen_mainscore.getLinearIonSpectrum(theoretical_spec_linear_alpha, alpha, cross_link_candidate.cross_link_position.first, 2, link_pos_B);
if (type_is_cross_link)
{
theoretical_spec_linear_beta.reserve(1500);
specGen_mainscore.getLinearIonSpectrum(theoretical_spec_linear_beta, beta, cross_link_candidate.cross_link_position.second, 2);
}
// Something like this can happen, e.g. with a loop link connecting the first and last residue of a peptide
if (theoretical_spec_linear_alpha.empty())
{
continue;
}
vector< pair< Size, Size > > matched_spec_linear_alpha;
vector< pair< Size, Size > > matched_spec_linear_beta;
vector< pair< Size, Size > > matched_spec_xlinks_alpha;
vector< pair< Size, Size > > matched_spec_xlinks_beta;
if (!linear_peaks.empty())
{
DataArrays::IntegerDataArray exp_charges;
if (!linear_peaks.getIntegerDataArrays().empty())
{
exp_charges = linear_peaks.getIntegerDataArrays()[0];
}
OPXLSpectrumProcessingAlgorithms::getSpectrumAlignmentSimple(matched_spec_linear_alpha, fragment_mass_tolerance_, fragment_mass_tolerance_unit_ppm_, theoretical_spec_linear_alpha, linear_peaks, exp_charges);
OPXLSpectrumProcessingAlgorithms::getSpectrumAlignmentSimple(matched_spec_linear_beta, fragment_mass_tolerance_, fragment_mass_tolerance_unit_ppm_, theoretical_spec_linear_beta, linear_peaks, exp_charges);
}
// drop candidates with almost no linear fragment peak matches before making the more complex theoretical spectra and aligning them
// this removes hits that no one would trust after manual validation anyway and reduces time wasted on really bad spectra or candidates without any matching peaks
if (matched_spec_linear_alpha.size() < 2 || (type_is_cross_link && matched_spec_linear_beta.size() < 2) )
{
continue;
}
theoretical_spec_xlinks_alpha.reserve(1500);
if (type_is_cross_link)
{
theoretical_spec_xlinks_beta.reserve(1500);
specGen_mainscore.getXLinkIonSpectrum(theoretical_spec_xlinks_alpha, cross_link_candidate, true, 2, precursor_charge);
specGen_mainscore.getXLinkIonSpectrum(theoretical_spec_xlinks_beta, cross_link_candidate, false, 2, precursor_charge);
}
else
{
// Function for mono-links or loop-links
specGen_mainscore.getXLinkIonSpectrum(theoretical_spec_xlinks_alpha, alpha, cross_link_candidate.cross_link_position.first, precursor_mass, 1, precursor_charge, link_pos_B);
}
if (theoretical_spec_xlinks_alpha.empty())
{
continue;
}
if (!xlink_peaks.empty())
{
DataArrays::IntegerDataArray exp_charges;
if (!xlink_peaks.getIntegerDataArrays().empty())
{
exp_charges = xlink_peaks.getIntegerDataArrays()[0];
}
OPXLSpectrumProcessingAlgorithms::getSpectrumAlignmentSimple(matched_spec_xlinks_alpha, fragment_mass_tolerance_xlinks_, fragment_mass_tolerance_unit_ppm_, theoretical_spec_xlinks_alpha, xlink_peaks, exp_charges);
OPXLSpectrumProcessingAlgorithms::getSpectrumAlignmentSimple(matched_spec_xlinks_beta, fragment_mass_tolerance_xlinks_, fragment_mass_tolerance_unit_ppm_, theoretical_spec_xlinks_beta, xlink_peaks, exp_charges);
}
// the maximal xlink ion charge is (precursor charge - 1) and the minimal xlink ion charge is 2.
// we need the difference between min and max here, which is (precursor_charge - 3) in most cases
// but we also need a number > 0, we set 1 as the minimum, in case the precursor charge is only 3 or smaller
Size n_xlink_charges = 1;
if (precursor_charge > 3)
{
n_xlink_charges = precursor_charge - 3;
}
// compute match odds (unweighted), the 3 is the number of charge states in the theoretical spectra
double match_odds_c_alpha = XQuestScores::matchOddsScoreSimpleSpec(theoretical_spec_linear_alpha, matched_spec_linear_alpha.size(), fragment_mass_tolerance_, fragment_mass_tolerance_unit_ppm_);
double match_odds_x_alpha = XQuestScores::matchOddsScoreSimpleSpec(theoretical_spec_xlinks_alpha, matched_spec_xlinks_alpha.size(), fragment_mass_tolerance_xlinks_, fragment_mass_tolerance_unit_ppm_, true, n_xlink_charges);
double match_odds = 0;
double match_odds_alpha = 0;
double match_odds_beta = 0;
if (type_is_cross_link)
{
double match_odds_c_beta = XQuestScores::matchOddsScoreSimpleSpec(theoretical_spec_linear_beta, matched_spec_linear_beta.size(), fragment_mass_tolerance_, fragment_mass_tolerance_unit_ppm_);
double match_odds_x_beta = XQuestScores::matchOddsScoreSimpleSpec(theoretical_spec_xlinks_beta, matched_spec_xlinks_beta.size(), fragment_mass_tolerance_xlinks_, fragment_mass_tolerance_unit_ppm_, true, n_xlink_charges);
match_odds = (match_odds_c_alpha + match_odds_x_alpha + match_odds_c_beta + match_odds_x_beta) / 4;
match_odds_alpha = (match_odds_c_alpha + match_odds_x_alpha) / 2;
match_odds_beta = (match_odds_c_beta + match_odds_x_beta) / 2;
}
else
{
match_odds = (match_odds_c_alpha + match_odds_x_alpha) / 2;
match_odds_alpha = match_odds;
}
OPXLDataStructs::CrossLinkSpectrumMatch csm;
csm.cross_link = cross_link_candidate;
csm.precursor_correction = cross_link_candidate.precursor_correction;
double rel_error = OPXLHelper::computePrecursorError(csm, precursor_mz, precursor_charge);
double new_match_odds_weight = 0.2;
double new_rel_error_weight = -0.03;
double new_score = new_match_odds_weight * std::log(1e-7 + match_odds) + new_rel_error_weight * abs(rel_error);
csm.score = new_score;
csm.match_odds = match_odds;
csm.match_odds_alpha = match_odds_alpha;
csm.match_odds_beta = match_odds_beta;
csm.precursor_error_ppm = rel_error;
#pragma omp critical (mainscore_csms_spectrum_access)
mainscore_csms_spectrum.push_back(csm);
}
// progresslogger.endProgress();
std::sort(mainscore_csms_spectrum.rbegin(), mainscore_csms_spectrum.rend(), OPXLDataStructs::CLSMScoreComparator());
int last_candidate_index = static_cast<int>(mainscore_csms_spectrum.size());
last_candidate_index = std::min(last_candidate_index, number_top_hits_);
#pragma omp parallel for schedule(guided)
for (int i = 0; i < last_candidate_index ; ++i)
{
OPXLDataStructs::ProteinProteinCrossLink cross_link_candidate = mainscore_csms_spectrum[i].cross_link;
AASequence alpha;
AASequence beta;
if (cross_link_candidate.alpha) { alpha = *cross_link_candidate.alpha; }
if (cross_link_candidate.beta) { beta = *cross_link_candidate.beta; }
#ifdef DEBUG_OPENPEPXLALGO
double candidate_mz = (alpha.getMonoWeight() + beta.getMonoWeight() + cross_link_candidate.cross_linker_mass+ (static_cast<double>(precursor_charge) * Constants::PROTON_MASS_U)) / precursor_charge;
#pragma omp critical (LOG_DEBUG_access)
{
OPENMS_LOG_DEBUG << "Pair: " << alpha.toString() << "-" << beta.toString() << " matched to light spectrum " << scan_index << "\t and heavy spectrum " << scan_index_heavy
<< " with m/z: " << precursor_mz << "\t" << "and candidate m/z: " << candidate_mz << "\tK Positions: " << cross_link_candidate.cross_link_position.first << "\t" << cross_link_candidate.cross_link_position.second << endl;
}
#endif
OPXLDataStructs::CrossLinkSpectrumMatch csm = mainscore_csms_spectrum[i];
csm.cross_link = cross_link_candidate;
PeakSpectrum theoretical_spec_linear_alpha;
theoretical_spec_linear_alpha.reserve(1500);
PeakSpectrum theoretical_spec_linear_beta;
PeakSpectrum theoretical_spec_xlinks_alpha;
theoretical_spec_xlinks_alpha.reserve(1500);
PeakSpectrum theoretical_spec_xlinks_beta;
bool type_is_cross_link = cross_link_candidate.getType() == OPXLDataStructs::CROSS;
bool type_is_loop = cross_link_candidate.getType() == OPXLDataStructs::LOOP;
Size link_pos_B = 0;
if (type_is_loop)
{
link_pos_B = cross_link_candidate.cross_link_position.second;
}
specGen.getLinearIonSpectrum(theoretical_spec_linear_alpha, alpha, cross_link_candidate.cross_link_position.first, true, 2, link_pos_B);
if (type_is_cross_link)
{
theoretical_spec_linear_beta.reserve(1500);
theoretical_spec_xlinks_beta.reserve(1500);
specGen.getLinearIonSpectrum(theoretical_spec_linear_beta, beta, cross_link_candidate.cross_link_position.second, false, 2);
specGen.getXLinkIonSpectrum(theoretical_spec_xlinks_alpha, cross_link_candidate, true, 1, precursor_charge);
specGen.getXLinkIonSpectrum(theoretical_spec_xlinks_beta, cross_link_candidate, false, 1, precursor_charge);
}
else
{
// Function for mono-links or loop-links
specGen.getXLinkIonSpectrum(theoretical_spec_xlinks_alpha, alpha, cross_link_candidate.cross_link_position.first, precursor_mass, true, 2, precursor_charge, link_pos_B);
}
vector< pair< Size, Size > > matched_spec_linear_alpha;
vector< pair< Size, Size > > matched_spec_linear_beta;
vector< pair< Size, Size > > matched_spec_xlinks_alpha;
vector< pair< Size, Size > > matched_spec_xlinks_beta;
DataArrays::FloatDataArray ppm_error_array_linear_alpha;
DataArrays::FloatDataArray ppm_error_array_xlinks_alpha;
DataArrays::FloatDataArray ppm_error_array_linear_beta;
DataArrays::FloatDataArray ppm_error_array_xlinks_beta;
if (!linear_peaks.empty())
{
DataArrays::IntegerDataArray theo_charges_alpha;
DataArrays::IntegerDataArray theo_charges_beta;
DataArrays::IntegerDataArray exp_charges;
auto theo_alpha_it = getDataArrayByName(theoretical_spec_linear_alpha.getIntegerDataArrays(), "charge");
theo_charges_alpha = *theo_alpha_it;
if (!theoretical_spec_linear_beta.empty())
{
auto theo_beta_it = getDataArrayByName(theoretical_spec_linear_beta.getIntegerDataArrays(), "charge");
theo_charges_beta = *theo_beta_it;
}
auto exp_it = getDataArrayByName(linear_peaks.getIntegerDataArrays(), "charge");
if (exp_it != linear_peaks.getIntegerDataArrays().end())
{
if (!exp_it->empty())
{
exp_charges = *exp_it;
}
}
OPXLSpectrumProcessingAlgorithms::getSpectrumAlignmentFastCharge(matched_spec_linear_alpha, fragment_mass_tolerance_, fragment_mass_tolerance_unit_ppm_, theoretical_spec_linear_alpha, linear_peaks, theo_charges_alpha, exp_charges, ppm_error_array_linear_alpha);
OPXLSpectrumProcessingAlgorithms::getSpectrumAlignmentFastCharge(matched_spec_linear_beta, fragment_mass_tolerance_, fragment_mass_tolerance_unit_ppm_, theoretical_spec_linear_beta, linear_peaks, theo_charges_beta, exp_charges, ppm_error_array_linear_beta);
}
if (!xlink_peaks.empty())
{
DataArrays::IntegerDataArray theo_charges_alpha;
DataArrays::IntegerDataArray theo_charges_beta;
DataArrays::IntegerDataArray exp_charges;
auto theo_alpha_it = getDataArrayByName(theoretical_spec_xlinks_alpha.getIntegerDataArrays(), "charge");
theo_charges_alpha = *theo_alpha_it;
if (!theoretical_spec_xlinks_beta.empty())
{
auto theo_beta_it = getDataArrayByName(theoretical_spec_xlinks_beta.getIntegerDataArrays(), "charge");
theo_charges_beta = *theo_beta_it;
}
auto exp_it = getDataArrayByName(xlink_peaks.getIntegerDataArrays(), "charge");
exp_charges = *exp_it;
OPXLSpectrumProcessingAlgorithms::getSpectrumAlignmentFastCharge(matched_spec_xlinks_alpha, fragment_mass_tolerance_xlinks_, fragment_mass_tolerance_unit_ppm_, theoretical_spec_xlinks_alpha, xlink_peaks, theo_charges_alpha, exp_charges, ppm_error_array_xlinks_alpha);
OPXLSpectrumProcessingAlgorithms::getSpectrumAlignmentFastCharge(matched_spec_xlinks_beta, fragment_mass_tolerance_xlinks_, fragment_mass_tolerance_unit_ppm_, theoretical_spec_xlinks_beta, xlink_peaks, theo_charges_beta, exp_charges, ppm_error_array_xlinks_beta);
}
// Pre-Score calculations
Size matched_alpha_count = matched_spec_linear_alpha.size() + matched_spec_xlinks_alpha.size();
Size theor_alpha_count = theoretical_spec_linear_alpha.size() + theoretical_spec_xlinks_alpha.size();
Size matched_beta_count = matched_spec_linear_beta.size() + matched_spec_xlinks_beta.size();
Size theor_beta_count = theoretical_spec_linear_beta.size() + theoretical_spec_xlinks_beta.size();
#ifdef DEBUG_OPENPEPXLALGO
#pragma omp critical (LOG_DEBUG_access)
{
OPENMS_LOG_DEBUG << "matched peaks: " << matched_alpha_count + matched_beta_count << endl;
OPENMS_LOG_DEBUG << "theoretical peaks: " << theor_alpha_count + theor_beta_count << endl;
OPENMS_LOG_DEBUG << "exp peaks: " << all_peaks.size() << endl;
}
#endif
if (matched_alpha_count + matched_beta_count > 0)
{
// Simplified pre-Score
double pre_score = 0;
if (type_is_cross_link)
{
pre_score = XQuestScores::preScore(matched_alpha_count, theor_alpha_count, matched_beta_count, theor_beta_count);
}
else
{
pre_score = XQuestScores::preScore(matched_alpha_count, theor_alpha_count);
}
// compute intsum score
double intsum = XQuestScores::totalMatchedCurrent(matched_spec_linear_alpha, matched_spec_linear_beta, matched_spec_xlinks_alpha, matched_spec_xlinks_beta, linear_peaks, xlink_peaks);
// Total ion intensity of light spectrum
// sum over linear and xlink ion spectra instead of unfiltered
double total_current = 0;
for (SignedSize j = 0; j < static_cast<SignedSize>(linear_peaks.size()); ++j)
{
total_current += linear_peaks[j].getIntensity();
}
for (SignedSize j = 0; j < static_cast<SignedSize>(xlink_peaks.size()); ++j)
{
total_current += xlink_peaks[j].getIntensity();
}
double TIC = intsum / total_current;
// TIC_alpha and _beta
double intsum_alpha = XQuestScores::matchedCurrentChain(matched_spec_linear_alpha, matched_spec_xlinks_alpha, linear_peaks, xlink_peaks);
double intsum_beta = 0;
if (type_is_cross_link)
{
intsum_beta = XQuestScores::matchedCurrentChain(matched_spec_linear_beta, matched_spec_xlinks_beta, linear_peaks, xlink_peaks);
}
// normalize TIC_alpha and _beta
if ((intsum_alpha + intsum_beta) > 0.0)
{
intsum_alpha = intsum_alpha * intsum / (intsum_alpha + intsum_beta);
intsum_beta = intsum_beta * intsum / (intsum_alpha + intsum_beta);
}
// compute wTIC
double wTIC = XQuestScores::weightedTICScore(alpha.size(), beta.size(), intsum_alpha, intsum_beta, total_current, type_is_cross_link);
double wTICold = XQuestScores::weightedTICScoreXQuest(alpha.size(), beta.size(), intsum_alpha, intsum_beta, total_current, type_is_cross_link);
// compute match odds (unweighted), the 3 is the number of charge states in the theoretical spectra
double log_occu_c_alpha = XQuestScores::logOccupancyProb(theoretical_spec_linear_alpha, matched_spec_linear_alpha.size(), fragment_mass_tolerance_, fragment_mass_tolerance_unit_ppm_);
double log_occu_x_alpha = XQuestScores::logOccupancyProb(theoretical_spec_xlinks_alpha, matched_spec_xlinks_alpha.size(), fragment_mass_tolerance_xlinks_ , fragment_mass_tolerance_unit_ppm_);
double log_occu = 0;
double log_occu_alpha = 0;
double log_occu_beta = 0;
if (type_is_cross_link)
{
double log_occu_c_beta = XQuestScores::logOccupancyProb(theoretical_spec_linear_beta, matched_spec_linear_beta.size(), fragment_mass_tolerance_, fragment_mass_tolerance_unit_ppm_);
double log_occu_x_beta = XQuestScores::logOccupancyProb(theoretical_spec_xlinks_beta, matched_spec_xlinks_beta.size(), fragment_mass_tolerance_xlinks_, fragment_mass_tolerance_unit_ppm_);
log_occu = (log_occu_c_alpha + log_occu_x_alpha + log_occu_c_beta + log_occu_x_beta) / 4;
log_occu_alpha = (log_occu_c_alpha + log_occu_x_alpha) / 2;
log_occu_beta = (log_occu_c_beta + log_occu_x_beta) / 2;
}
else
{
log_occu = (log_occu_c_alpha + log_occu_x_alpha) / 2;
log_occu_alpha = log_occu;
}
//Cross-correlation
PeakSpectrum theoretical_spec_linear;
PeakSpectrum theoretical_spec_xlinks;
if (type_is_cross_link)
{
theoretical_spec_linear = OPXLSpectrumProcessingAlgorithms::mergeAnnotatedSpectra(theoretical_spec_linear_alpha, theoretical_spec_linear_beta);
theoretical_spec_xlinks = OPXLSpectrumProcessingAlgorithms::mergeAnnotatedSpectra(theoretical_spec_xlinks_alpha, theoretical_spec_xlinks_beta);
}
else
{
theoretical_spec_linear = theoretical_spec_linear_alpha;
theoretical_spec_xlinks = theoretical_spec_xlinks_alpha;
}
PeakSpectrum theoretical_spec = OPXLSpectrumProcessingAlgorithms::mergeAnnotatedSpectra(theoretical_spec_linear, theoretical_spec_xlinks);
PeakSpectrum theoretical_spec_alpha = OPXLSpectrumProcessingAlgorithms::mergeAnnotatedSpectra(theoretical_spec_linear_alpha, theoretical_spec_xlinks_alpha);
PeakSpectrum theoretical_spec_beta;
if (type_is_cross_link)
{
theoretical_spec_beta = OPXLSpectrumProcessingAlgorithms::mergeAnnotatedSpectra(theoretical_spec_linear_beta, theoretical_spec_xlinks_beta);
}
double xcorrx_max = XQuestScores::xCorrelationPrescore(xlink_peaks, theoretical_spec_xlinks, 0.1);
double xcorrc_max = XQuestScores::xCorrelationPrescore(linear_peaks, theoretical_spec_linear, 0.1);
// Compute score from the 4 scores and 4 weights
// The weights are adapted from the xQuest algorithm (O. Rinner et al., 2008, "Identification of cross-linked peptides from large sequence databases"),
// they were determined by an Linear Discriminant Analysis on CID fragmentation data.
double xcorrx_weight = 2.488;
double xcorrc_weight = 21.279;
double match_odds_weight = 1.973;
double wTIC_weight = 12.829;
double intsum_weight = 1.8;
double xquest_score = xcorrx_weight * xcorrx_max + xcorrc_weight * xcorrc_max + match_odds_weight * csm.match_odds + wTIC_weight * wTICold + intsum_weight * intsum;
csm.xquest_score = xquest_score;
csm.pre_score = pre_score;
csm.percTIC = TIC;
csm.wTIC = wTIC;
csm.wTICold = wTICold;
csm.int_sum = intsum;
csm.intsum_alpha = intsum_alpha;
csm.intsum_beta = intsum_beta;
csm.total_current = total_current;
csm.log_occupancy = log_occu;
csm.log_occupancy_alpha = log_occu_alpha;
csm.log_occupancy_beta = log_occu_beta;
csm.xcorrx_max = xcorrx_max;
csm.xcorrc_max = xcorrc_max;
csm.matched_linear_alpha = matched_spec_linear_alpha.size();
csm.matched_linear_beta = matched_spec_linear_beta.size();
csm.matched_xlink_alpha = matched_spec_xlinks_alpha.size();
csm.matched_xlink_beta = matched_spec_xlinks_beta.size();
csm.scan_index_light = scan_index;
csm.scan_index_heavy = scan_index_heavy;
if (precursor_purities.size() > scan_index)
{
csm.precursor_total_intensity = precursor_purities[spectrum_light.getNativeID()].total_intensity;
csm.precursor_target_intensity = precursor_purities[spectrum_light.getNativeID()].target_intensity;
csm.precursor_signal_proportion = precursor_purities[spectrum_light.getNativeID()].signal_proportion;
csm.precursor_target_peak_count = precursor_purities[spectrum_light.getNativeID()].target_peak_count;
csm.precursor_residual_peak_count = precursor_purities[spectrum_light.getNativeID()].interfering_peak_count;
}
// num_iso_peaks array from deisotoping
if (deisotope)
{
auto num_iso_peaks_array_it = getDataArrayByName(all_peaks.getIntegerDataArrays(), "iso_peak_count");
DataArrays::IntegerDataArray num_iso_peaks_array = *num_iso_peaks_array_it;
auto num_iso_peaks_array_linear_it = getDataArrayByName(linear_peaks.getIntegerDataArrays(), "iso_peak_count");
DataArrays::IntegerDataArray num_iso_peaks_array_linear = *num_iso_peaks_array_linear_it;
auto num_iso_peaks_array_xlinks_it = getDataArrayByName(xlink_peaks.getIntegerDataArrays(), "iso_peak_count");
DataArrays::IntegerDataArray num_iso_peaks_array_xlinks = *num_iso_peaks_array_xlinks_it;
csm.num_iso_peaks_mean = Math::mean(num_iso_peaks_array.begin(), num_iso_peaks_array.end());
vector< double > iso_peaks_linear_alpha;
vector< double > iso_peaks_linear_beta;
vector< double > iso_peaks_xlinks_alpha;
vector< double > iso_peaks_xlinks_beta;
if (!matched_spec_linear_alpha.empty())
{
for (const auto& match : matched_spec_linear_alpha)
{
iso_peaks_linear_alpha.push_back(num_iso_peaks_array_linear[match.second]);
}
csm.num_iso_peaks_mean_linear_alpha = Math::mean(iso_peaks_linear_alpha.begin(), iso_peaks_linear_alpha.end());
}
if (!matched_spec_linear_beta.empty())
{
for (const auto& match : matched_spec_linear_beta)
{
iso_peaks_linear_beta.push_back(num_iso_peaks_array_linear[match.second]);
}
csm.num_iso_peaks_mean_linear_beta = Math::mean(iso_peaks_linear_beta.begin(), iso_peaks_linear_beta.end());
}
if (!matched_spec_xlinks_alpha.empty())
{
for (const auto& match : matched_spec_xlinks_alpha)
{
iso_peaks_xlinks_alpha.push_back(num_iso_peaks_array_xlinks[match.second]);
}
if (!iso_peaks_xlinks_alpha.empty())
{
csm.num_iso_peaks_mean_xlinks_alpha = Math::mean(iso_peaks_xlinks_alpha.begin(), iso_peaks_xlinks_alpha.end());
}
}
if (!matched_spec_xlinks_beta.empty())
{
for (const auto& match : matched_spec_xlinks_beta)
{
iso_peaks_xlinks_beta.push_back(num_iso_peaks_array_xlinks[match.second]);
}
if (!iso_peaks_xlinks_beta.empty())
{
csm.num_iso_peaks_mean_xlinks_beta = Math::mean(iso_peaks_xlinks_beta.begin(), iso_peaks_xlinks_beta.end());
}
}
}
if (!ppm_error_array_linear_alpha.empty())
{
for (Size k = 0; k < ppm_error_array_linear_alpha.size(); ++k)
{
csm.ppm_error_abs_sum_linear_alpha += abs(ppm_error_array_linear_alpha[k]);
}
csm.ppm_error_abs_sum_linear_alpha = csm.ppm_error_abs_sum_linear_alpha / ppm_error_array_linear_alpha.size();
}
if (!ppm_error_array_linear_beta.empty())
{
for (Size k = 0; k < ppm_error_array_linear_beta.size(); ++k)
{
csm.ppm_error_abs_sum_linear_beta += abs(ppm_error_array_linear_beta[k]);
}
csm.ppm_error_abs_sum_linear_beta = csm.ppm_error_abs_sum_linear_beta / ppm_error_array_linear_beta.size();
}
if (!ppm_error_array_xlinks_alpha.empty())
{
for (Size k = 0; k < ppm_error_array_xlinks_alpha.size(); ++k)
{
csm.ppm_error_abs_sum_xlinks_alpha += abs(ppm_error_array_xlinks_alpha[k]);
}
csm.ppm_error_abs_sum_xlinks_alpha = csm.ppm_error_abs_sum_xlinks_alpha / ppm_error_array_xlinks_alpha.size();
}
if (!ppm_error_array_xlinks_beta.empty())
{
for (Size k = 0; k < ppm_error_array_xlinks_beta.size(); ++k)
{
csm.ppm_error_abs_sum_xlinks_beta += abs(ppm_error_array_xlinks_beta[k]);
}
csm.ppm_error_abs_sum_xlinks_beta = csm.ppm_error_abs_sum_xlinks_beta / ppm_error_array_xlinks_beta.size();
}
DataArrays::FloatDataArray ppm_error_array_linear;
DataArrays::FloatDataArray ppm_error_array_xlinks;
DataArrays::FloatDataArray ppm_error_array_alpha;
DataArrays::FloatDataArray ppm_error_array_beta;
DataArrays::FloatDataArray ppm_error_array;
ppm_error_array_linear.insert(ppm_error_array_linear.end(), ppm_error_array_linear_alpha.begin(), ppm_error_array_linear_alpha.end());
ppm_error_array_linear.insert(ppm_error_array_linear.end(), ppm_error_array_linear_beta.begin(), ppm_error_array_linear_beta.end());
ppm_error_array_xlinks.insert(ppm_error_array_xlinks.end(), ppm_error_array_xlinks_alpha.begin(), ppm_error_array_xlinks_alpha.end());
ppm_error_array_xlinks.insert(ppm_error_array_xlinks.end(), ppm_error_array_xlinks_beta.begin(), ppm_error_array_xlinks_beta.end());
ppm_error_array_alpha.insert(ppm_error_array_alpha.end(), ppm_error_array_linear_alpha.begin(), ppm_error_array_linear_alpha.end());
ppm_error_array_alpha.insert(ppm_error_array_alpha.end(), ppm_error_array_xlinks_alpha.begin(), ppm_error_array_xlinks_alpha.end());
ppm_error_array_beta.insert(ppm_error_array_beta.end(), ppm_error_array_linear_beta.begin(), ppm_error_array_linear_beta.end());
ppm_error_array_beta.insert(ppm_error_array_beta.end(), ppm_error_array_xlinks_beta.begin(), ppm_error_array_xlinks_beta.end());
ppm_error_array.insert(ppm_error_array.end(), ppm_error_array_linear.begin(), ppm_error_array_linear.end());
ppm_error_array.insert(ppm_error_array.end(), ppm_error_array_xlinks.begin(), ppm_error_array_xlinks.end());
if (!ppm_error_array_linear.empty())
{
for (double ppm_error : ppm_error_array_linear)
{
csm.ppm_error_abs_sum_linear += abs(ppm_error);
}
csm.ppm_error_abs_sum_linear = csm.ppm_error_abs_sum_linear / ppm_error_array_linear.size();
}
if (!ppm_error_array_xlinks.empty())
{
for (double ppm_error : ppm_error_array_xlinks)
{
csm.ppm_error_abs_sum_xlinks += abs(ppm_error);
}
csm.ppm_error_abs_sum_xlinks = csm.ppm_error_abs_sum_xlinks / ppm_error_array_xlinks.size();
}
if (!ppm_error_array_alpha.empty())
{
for (double ppm_error : ppm_error_array_alpha)
{
csm.ppm_error_abs_sum_alpha += abs(ppm_error);
}
csm.ppm_error_abs_sum_alpha = csm.ppm_error_abs_sum_alpha / ppm_error_array_alpha.size();
}
if (!ppm_error_array_beta.empty())
{
for (double ppm_error : ppm_error_array_beta)
{
csm.ppm_error_abs_sum_beta += abs(ppm_error);
}
csm.ppm_error_abs_sum_beta = csm.ppm_error_abs_sum_beta / ppm_error_array_beta.size();
}
if (!ppm_error_array.empty())
{
for (double ppm_error : ppm_error_array)
{
csm.ppm_error_abs_sum += abs(ppm_error);
}
csm.ppm_error_abs_sum = csm.ppm_error_abs_sum / ppm_error_array.size();
}
// write fragment annotations
vector<PeptideHit::PeakAnnotation> frag_annotations;
OPXLHelper::buildFragmentAnnotations(frag_annotations, matched_spec_linear_alpha, theoretical_spec_linear_alpha, linear_peaks);
OPXLHelper::buildFragmentAnnotations(frag_annotations, matched_spec_linear_beta, theoretical_spec_linear_beta, linear_peaks);
OPXLHelper::buildFragmentAnnotations(frag_annotations, matched_spec_xlinks_alpha, theoretical_spec_xlinks_alpha, xlink_peaks);
OPXLHelper::buildFragmentAnnotations(frag_annotations, matched_spec_xlinks_beta, theoretical_spec_xlinks_beta, xlink_peaks);
// make annotations unique
sort(frag_annotations.begin(), frag_annotations.end());
vector<PeptideHit::PeakAnnotation>::iterator last_unique_anno = unique(frag_annotations.begin(), frag_annotations.end());
if (last_unique_anno != frag_annotations.end())
{
frag_annotations.erase(last_unique_anno, frag_annotations.end());
}
csm.frag_annotations = frag_annotations;
#pragma omp critical (all_csms_spectrum_access)
{
all_csms_spectrum.push_back(csm);
}
}
} // end of parallel loop over top X candidates
// collect top n matches to spectrum
sort(all_csms_spectrum.rbegin(), all_csms_spectrum.rend(), OPXLDataStructs::CLSMScoreComparator());
Size max_hit = min(all_csms_spectrum.size(), static_cast<Size>(number_top_hits_));
for (Size top = 0; top < max_hit; top++)
{
all_csms_spectrum[top].rank = top+1;
top_csms_spectrum.push_back(all_csms_spectrum[top]);
}
Size all_top_csms_current_index = 0;
#pragma omp critical (all_top_csms_access)
{
if (!top_csms_spectrum.empty())
{
all_top_csms.push_back(top_csms_spectrum);
all_top_csms_current_index = all_top_csms.size()-1;
}
}
// Write PeptideIdentifications and PeptideHits for n top hits of this spectrum
if (!top_csms_spectrum.empty())
{
OPXLHelper::buildPeptideIDs(peptide_ids, top_csms_spectrum, all_top_csms, all_top_csms_current_index, spectra, scan_index, scan_index_heavy);
}
#ifdef DEBUG_OPENPEPXLALGO
#pragma omp critical (LOG_DEBUG_access)
OPENMS_LOG_DEBUG << "Next Spectrum #############################################" << endl;
#endif
} // end of matching / scoring, end of parallel for-loop
progresslogger.endProgress();
peptide_ids = OPXLHelper::combineTopRanksFromPairs(peptide_ids, number_top_hits_);
// Add protein identifications
PeptideIndexing pep_indexing;
Param indexing_param = pep_indexing.getParameters();
String d_prefix = decoy_prefix_ ? "prefix" : "suffix";
indexing_param.setValue("decoy_string_position", d_prefix, "If set, protein accessions in the database contain 'decoy_string' as prefix.");
indexing_param.setValue("decoy_string", decoy_string_, "String that was appended (or prefixed - see 'prefix' flag below) to the accessions in the protein database to indicate decoy proteins.");
indexing_param.setValue("missing_decoy_action", "warn");
indexing_param.setValue("enzyme:name", enzyme_name_);
pep_indexing.setParameters(indexing_param);
pep_indexing.run(fasta_db, protein_ids, peptide_ids);
OPXLHelper::addProteinPositionMetaValues(peptide_ids);
OPXLHelper::addBetaAccessions(peptide_ids);
OPXLHelper::addXLTargetDecoyMV(peptide_ids);
OPXLHelper::removeBetaPeptideHits(peptide_ids);
OPXLHelper::computeDeltaScores(peptide_ids);
OPXLHelper::addPercolatorFeatureList(protein_ids[0]);
return OpenPepXLAlgorithm::ExitCodes::EXECUTION_OK;
}
// create linear / shifted peak spectra for all pairs
OPXLDataStructs::PreprocessedPairSpectra OpenPepXLAlgorithm::preprocessPairs_(const PeakMap& spectra, const vector< pair<Size, Size> >& spectrum_pairs, const double cross_link_mass_iso_shift, double fragment_mass_tolerance, double fragment_mass_tolerance_xlinks, bool fragment_mass_tolerance_unit_ppm, bool deisotope)
{
OPXLDataStructs::PreprocessedPairSpectra preprocessed_pair_spectra(spectrum_pairs.size());
#pragma omp parallel for
for (SignedSize pair_index = 0; pair_index < static_cast<SignedSize>(spectrum_pairs.size()); ++pair_index)
{
Size scan_index = spectrum_pairs[pair_index].first;
const PeakSpectrum& spectrum_light = spectra[scan_index];
const Size scan_index_heavy = spectrum_pairs[pair_index].second;
Size max_charge_xlink = spectrum_light.getPrecursors()[0].getCharge();
const PeakSpectrum& spectrum_heavy = spectra[scan_index_heavy];
vector< pair< Size, Size > > matched_fragments_without_shift;
DataArrays::FloatDataArray dummy_array;
DataArrays::IntegerDataArray dummy_charges;
OPXLSpectrumProcessingAlgorithms::getSpectrumAlignmentFastCharge(matched_fragments_without_shift, fragment_mass_tolerance, fragment_mass_tolerance_unit_ppm, spectrum_light, spectrum_heavy, dummy_charges, dummy_charges, dummy_array, 0.3);
// transform by m/z difference between unlabeled and labeled cross-link to make heavy and light comparable.
PeakSpectrum xlink_peaks;
PeakSpectrum::IntegerDataArray spectrum_heavy_charges;
PeakSpectrum::IntegerDataArray spectrum_light_iso_peaks;
auto spectrum_heavy_charges_it = getDataArrayByName(spectrum_heavy.getIntegerDataArrays(), "charge");
if (spectrum_heavy_charges_it != spectrum_heavy.getIntegerDataArrays().end())
{
if (!spectrum_heavy_charges_it->empty())
{
spectrum_heavy_charges = *spectrum_heavy_charges_it;
}
}
auto spectrum_light_iso_peaks_it = getDataArrayByName(spectrum_light.getIntegerDataArrays(), "iso_peak_count");
if (spectrum_light_iso_peaks_it != spectrum_light.getIntegerDataArrays().end())
{
if (!spectrum_light_iso_peaks_it->empty())
{
spectrum_light_iso_peaks = *spectrum_light_iso_peaks_it;
}
}
if (deisotope)
{
xlink_peaks.getIntegerDataArrays().resize(2);
xlink_peaks.getIntegerDataArrays()[0].setName("charge");
xlink_peaks.getIntegerDataArrays()[1].setName("iso_peak_count");
}
else
{
xlink_peaks.getIntegerDataArrays().resize(1);
xlink_peaks.getIntegerDataArrays()[0].setName("charge");
}
// keep track of matched peaks
vector<Size> used_peaks;
// transform all peaks in the heavy spectrum by shifting them, considering all expected charge states
for (Size charge = 1; charge <= max_charge_xlink; ++charge)
{
PeakSpectrum spectrum_heavy_to_light;
PeakSpectrum::IntegerDataArray spectrum_heavy_to_light_charges;
spectrum_heavy_to_light_charges.setName("charge");
double mass_shift = cross_link_mass_iso_shift / charge;
// transform heavy spectrum
for (Size i = 0; i != spectrum_heavy.size(); ++i)
{
bool charge_fits = true;
// check if the charge for the heavy peak determined by deisotoping matches the currently considered charge
if (deisotope && spectrum_heavy_charges[i] != 0 && static_cast<unsigned int>(spectrum_heavy_charges[i]) != charge)
{
charge_fits = false;
}
if (charge_fits)
{
Peak1D p = spectrum_heavy[i];
p.setMZ(p.getMZ() - mass_shift);
spectrum_heavy_to_light.push_back(p);
spectrum_heavy_to_light_charges.push_back(charge);
}
}
spectrum_heavy_to_light.getIntegerDataArrays().push_back(spectrum_heavy_to_light_charges);
// align peaks from light spectrum with shifted peaks from heavy spectrum
// matching fragments are potentially carrying the cross-linker
vector< pair< Size, Size > > matched_fragments_with_shift;
spectrum_heavy_to_light.sortByPosition();
if (!spectrum_heavy_to_light.empty())
{
dummy_array.clear();
OPXLSpectrumProcessingAlgorithms::getSpectrumAlignmentFastCharge(matched_fragments_with_shift, fragment_mass_tolerance_xlinks, fragment_mass_tolerance_unit_ppm, spectrum_light, spectrum_heavy_to_light, dummy_charges, dummy_charges, dummy_array, 0.3);
// fill xlink_peaks spectrum with matched peaks from the light spectrum and add the currently considered charge
for (Size i = 0; i < matched_fragments_with_shift.size(); ++i)
{
// test whether this peak was matched with a lower charge before (biased towards lower charge matches, if one light peak matches to multiple heavy peaks with different charges)
vector<Size>::iterator it = find(used_peaks.begin(), used_peaks.end(), matched_fragments_with_shift[i].first);
if (it == used_peaks.end())
{
xlink_peaks.push_back(spectrum_light[matched_fragments_with_shift[i].first]);
xlink_peaks.getIntegerDataArrays()[0].push_back(charge);
used_peaks.push_back(matched_fragments_with_shift[i].first);
if (deisotope)
{
xlink_peaks.getIntegerDataArrays()[1].push_back(spectrum_light_iso_peaks[matched_fragments_with_shift[i].first]);
}
}
}
}
}
// generate linear peaks spectrum, include charges determined through deisotoping in preprocessing
PeakSpectrum linear_peaks;
PeakSpectrum::IntegerDataArray spectrum_light_charges;
auto spectrum_light_charges_it = getDataArrayByName(spectrum_light.getIntegerDataArrays(), "charge");
if (spectrum_light_charges_it != spectrum_light.getIntegerDataArrays().end())
{
if (!spectrum_light_charges_it->empty())
{
spectrum_light_charges = *spectrum_light_charges_it;
linear_peaks.getIntegerDataArrays().resize(2);
linear_peaks.getIntegerDataArrays()[0].setName("charge");
linear_peaks.getIntegerDataArrays()[1].setName("iso_peak_count");
}
}
for (Size i = 0; i != matched_fragments_without_shift.size(); ++i)
{
linear_peaks.push_back(spectrum_light[matched_fragments_without_shift[i].first]);
if (!spectrum_light_charges.empty())
{
linear_peaks.getIntegerDataArrays()[0].push_back(spectrum_light_charges[matched_fragments_without_shift[i].first]);
linear_peaks.getIntegerDataArrays()[1].push_back(spectrum_light_iso_peaks[matched_fragments_without_shift[i].first]);
}
}
// TODO replace with window mower
Size max_peak_number = 250;
NLargest nfilter(max_peak_number);
nfilter.filterSpectrum(linear_peaks);
nfilter.filterSpectrum(xlink_peaks);
PeakSpectrum all_peaks = OPXLSpectrumProcessingAlgorithms::mergeAnnotatedSpectra(linear_peaks, xlink_peaks);
linear_peaks.setPrecursors(spectrum_light.getPrecursors());
xlink_peaks.setPrecursors(spectrum_light.getPrecursors());
all_peaks.setPrecursors(spectrum_light.getPrecursors());
linear_peaks.sortByPosition();
xlink_peaks.sortByPosition();
all_peaks.sortByPosition();
#pragma omp critical (preprocessed_pair_spectra_access)
{
swap(preprocessed_pair_spectra.spectra_linear_peaks[pair_index], linear_peaks);
swap(preprocessed_pair_spectra.spectra_xlink_peaks[pair_index], xlink_peaks);
swap(preprocessed_pair_spectra.spectra_all_peaks[pair_index], all_peaks);
}
}
return preprocessed_pair_spectra;
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/XLMS/OPXLSpectrumProcessingAlgorithms.cpp | .cpp | 17,781 | 473 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Eugen Netz $
// $Authors: Eugen Netz $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/XLMS/OPXLSpectrumProcessingAlgorithms.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/KERNEL/SpectrumHelper.h>
// preprocessing and filtering
#include <OpenMS/PROCESSING/FILTERING/ThresholdMower.h>
#include <OpenMS/PROCESSING/SCALING/Normalizer.h>
#include <OpenMS/PROCESSING/FILTERING/NLargest.h>
#include <OpenMS/PROCESSING/FILTERING/WindowMower.h>
#include <OpenMS/PROCESSING/DEISOTOPING/Deisotoper.h>
#ifdef _OPENMP
#include <omp.h>
#endif
using namespace std;
namespace OpenMS
{
PeakSpectrum OPXLSpectrumProcessingAlgorithms::mergeAnnotatedSpectra(PeakSpectrum & first_spectrum, PeakSpectrum & second_spectrum)
{
// merge peaks: create new spectrum, insert peaks from first and then from second spectrum
PeakSpectrum resulting_spectrum;
resulting_spectrum.insert(resulting_spectrum.end(), first_spectrum.begin(), first_spectrum.end());
resulting_spectrum.insert(resulting_spectrum.end(), second_spectrum.begin(), second_spectrum.end());
// merge DataArrays in a similar way
for (Size i = 0; i < first_spectrum.getFloatDataArrays().size(); i++)
{
// TODO instead of this "if", get second array by name if available. would not be dependent on order.
if (second_spectrum.getFloatDataArrays().size() > i)
{
PeakSpectrum::FloatDataArray float_array;
float_array.insert(float_array.end(), first_spectrum.getFloatDataArrays()[i].begin(), first_spectrum.getFloatDataArrays()[i].end());
float_array.insert(float_array.end(), second_spectrum.getFloatDataArrays()[i].begin(), second_spectrum.getFloatDataArrays()[i].end());
resulting_spectrum.getFloatDataArrays().push_back(float_array);
resulting_spectrum.getFloatDataArrays()[i].setName(first_spectrum.getFloatDataArrays()[i].getName());
}
}
for (Size i = 0; i < first_spectrum.getStringDataArrays().size(); i++)
{
if (second_spectrum.getStringDataArrays().size() > i)
{
PeakSpectrum::StringDataArray string_array;
string_array.insert(string_array.end(), first_spectrum.getStringDataArrays()[i].begin(), first_spectrum.getStringDataArrays()[i].end());
string_array.insert(string_array.end(), second_spectrum.getStringDataArrays()[i].begin(), second_spectrum.getStringDataArrays()[i].end());
resulting_spectrum.getStringDataArrays().push_back(string_array);
resulting_spectrum.getStringDataArrays()[i].setName(first_spectrum.getStringDataArrays()[i].getName());
}
}
for (Size i = 0; i < first_spectrum.getIntegerDataArrays().size(); i++)
{
if (second_spectrum.getIntegerDataArrays().size() > i)
{
PeakSpectrum::IntegerDataArray integer_array;
integer_array.insert(integer_array.end(), first_spectrum.getIntegerDataArrays()[i].begin(), first_spectrum.getIntegerDataArrays()[i].end());
integer_array.insert(integer_array.end(), second_spectrum.getIntegerDataArrays()[i].begin(), second_spectrum.getIntegerDataArrays()[i].end());
resulting_spectrum.getIntegerDataArrays().push_back(integer_array);
resulting_spectrum.getIntegerDataArrays()[i].setName(first_spectrum.getIntegerDataArrays()[i].getName());
}
}
// Spectra were simply concatenated, so they are not sorted by position anymore
resulting_spectrum.sortByPosition();
return resulting_spectrum;
}
PeakMap OPXLSpectrumProcessingAlgorithms::preprocessSpectra(PeakMap& exp, double fragment_mass_tolerance, bool fragment_mass_tolerance_unit_ppm, Size peptide_min_size, Int min_precursor_charge, Int max_precursor_charge, bool deisotope, bool labeled)
{
// filter MS2 map
// remove 0 intensities
ThresholdMower threshold_mower_filter;
threshold_mower_filter.filterPeakMap(exp);
Normalizer normalizer;
normalizer.filterPeakMap(exp);
// sort by rt
exp.sortSpectra(false);
OPENMS_LOG_DEBUG << "Deisotoping and filtering spectra." << endl;
// filter settings
WindowMower window_mower_filter;
Param filter_param = window_mower_filter.getParameters();
filter_param.setValue("windowsize", 100.0, "The size of the sliding window along the m/z axis.");
filter_param.setValue("peakcount", 20, "The number of peaks that should be kept.");
filter_param.setValue("movetype", "jump", "Whether sliding window (one peak steps) or jumping window (window size steps) should be used.");
window_mower_filter.setParameters(filter_param);
PeakMap filtered_spectra;
#pragma omp parallel for
for (SignedSize exp_index = 0; exp_index < static_cast<SignedSize>(exp.size()); ++exp_index)
{
// for labeled experiments, the pairs of heavy and light spectra are linked by spectra indices from the consensusXML, so the returned number of spectra has to be equal to the input
bool process_this_spectrum(labeled);
if (exp[exp_index].getMSLevel() != 2)
{
continue;
}
const vector<Precursor>& precursor = exp[exp_index].getPrecursors();
if (!process_this_spectrum && precursor.size() == 1 && exp[exp_index].size() >= peptide_min_size * 2)
{
int precursor_charge = precursor[0].getCharge();
if (precursor_charge >= min_precursor_charge && precursor_charge <= max_precursor_charge)
{
process_this_spectrum = true;
}
}
if (!process_this_spectrum)
{
continue;
}
if (deisotope)
{
PeakSpectrum deisotoped = exp[exp_index];
Deisotoper::deisotopeAndSingleCharge(deisotoped,
fragment_mass_tolerance, fragment_mass_tolerance_unit_ppm,
1, 7, // min / max charge
false, // keep only deisotoped
3, 10, // min / max isopeaks
false, // make single charged
true, // annotate charge
true, // annotate isotopic peak counts
true, // use simple averagine model
3, // peak to start averagine model
true // add up intensity into monoisotopic peak
);
// only consider spectra, that have at least as many peaks as two times the minimal peptide size after deisotoping
if (deisotoped.size() > peptide_min_size * 2 || labeled)
{
window_mower_filter.filterPeakSpectrum(deisotoped);
deisotoped.sortByPosition();
#pragma omp critical (filtered_spectra_access)
filtered_spectra.addSpectrum(deisotoped);
}
}
else
{
PeakSpectrum filtered = exp[exp_index];
if (!labeled) // this kind of filtering is not necessary for labeled cross-links, since they area filtered by comparing heavy and light spectra later
{
window_mower_filter.filterPeakSpectrum(filtered);
}
// only consider spectra, that have at least as many peaks as two times the minimal peptide size after filtering
if (filtered.size() > peptide_min_size * 2 || labeled)
{
filtered.sortByPosition();
#pragma omp critical (filtered_spectra_access)
filtered_spectra.addSpectrum(filtered);
}
}
} // end of parallelized loop over spectra
return filtered_spectra;
}
void OPXLSpectrumProcessingAlgorithms::getSpectrumAlignmentFastCharge(
std::vector<std::pair<Size, Size> > & alignment, double fragment_mass_tolerance,
bool fragment_mass_tolerance_unit_ppm,
const PeakSpectrum& theo_spectrum,
const PeakSpectrum& exp_spectrum,
const DataArrays::IntegerDataArray& theo_charges,
const DataArrays::IntegerDataArray& exp_charges,
DataArrays::FloatDataArray& ppm_error_array,
double intensity_cutoff)
{
OPENMS_PRECONDITION(exp_spectrum.isSorted(), "Spectrum needs to be sorted.");
OPENMS_PRECONDITION(theo_spectrum.isSorted(), "Spectrum needs to be sorted.");
OPENMS_PRECONDITION((alignment.empty() == true), "Alignment result vector needs to be empty.");
OPENMS_PRECONDITION((ppm_error_array.empty() == true), "ppm error result vector needs to be empty.");
const Size n_t(theo_spectrum.size());
const Size n_e(exp_spectrum.size());
const bool has_charge = !(exp_charges.empty() || theo_charges.empty());
if (n_t == 0 || n_e == 0) { return; }
Size t(0), e(0);
alignment.reserve(theo_spectrum.size());
ppm_error_array.reserve(theo_spectrum.size());
while (t < n_t && e < n_e)
{
const double theo_mz = theo_spectrum[t].getMZ();
const double exp_mz = exp_spectrum[e].getMZ();
int tz(0), ez(0);
if (has_charge)
{
tz = theo_charges[t];
ez = exp_charges[e];
}
const bool tz_matches_ez = (ez == tz || !ez || !tz);
double ti = theo_spectrum[t].getIntensity();
double ei = exp_spectrum[e].getIntensity();
const bool initial_intensity_matches = ( std::min(ti, ei) / std::max(ti, ei) ) > intensity_cutoff;
double d = exp_mz - theo_mz;
const double max_dist_dalton = fragment_mass_tolerance_unit_ppm ? theo_mz * fragment_mass_tolerance * 1e-6 : fragment_mass_tolerance;
if (fabs(d) <= max_dist_dalton) // match in tolerance window?
{
// get first peak with matching charge in tolerance window
if (!tz_matches_ez || !initial_intensity_matches)
{
Size e_candidate(e);
while (e_candidate < n_e-1)
{
++e_candidate;
double new_ez = has_charge ? exp_charges[e_candidate] : 0;
double new_ei = exp_spectrum[e_candidate].getIntensity();
const bool charge_matches = (new_ez == tz || !new_ez || !tz);
const bool intensity_matches = ( std::min(ti, new_ei) / std::max(ti, new_ei) ) > intensity_cutoff;
double new_d = exp_spectrum[e_candidate].getMZ() - theo_mz;
if (charge_matches && new_d <= max_dist_dalton && intensity_matches)
{ // found a match
break;
}
else if (new_d > max_dist_dalton)
{ // no match found
e_candidate = e;
break;
}
}
if (e == e_candidate)
{ // no match found continue with next theo. peak
++t;
continue;
}
else
{ // match found
e = e_candidate;
}
}
// Invariant: e now points to the first peak in tolerance window, that matches in charge and intensity
// last peak? there can't be a better one in this tolerance window
if (e >= n_e - 1)
{
// add match
alignment.emplace_back(std::make_pair(t, e));
// add ppm error
double ppm_error = (exp_spectrum[e].getMZ() - theo_mz) / theo_mz * 1e6;
ppm_error_array.emplace_back(ppm_error);
return;
}
Size closest_exp_peak(e);
// Invariant: closest_exp_peak always point to best match
double new_ez(0);
double best_d = exp_spectrum[closest_exp_peak].getMZ() - theo_mz;
do // check for better match in tolerance window
{
// advance to next exp. peak
++e;
// determine distance of next peak
double new_d = exp_spectrum[e].getMZ() - theo_mz;
const bool in_tolerance_window = (fabs(new_d) < max_dist_dalton);
if (!in_tolerance_window) { break; }
// Invariant: e is in tolerance window
// check if charge and intensity of next peak matches
if (has_charge) { new_ez = exp_charges[e]; }
const bool charge_matches = (new_ez == tz || !new_ez || !tz);
double new_ei = exp_spectrum[e].getIntensity();
const bool intensity_matches = ( std::min(ti, new_ei) / std::max(ti, new_ei) ) > intensity_cutoff;
if (!charge_matches || !intensity_matches) { continue; }
// Invariant: charge and intensity matches
const bool better_distance = (fabs(new_d) <= fabs(best_d));
// better distance (and matching charge)? better match found
if (better_distance)
{ // found a better match
closest_exp_peak = e;
best_d = new_d;
}
else
{ // distance got worse -> no additional matches!
break;
}
}
while (e < n_e - 1);
// search in tolerance window for an experimental peak closer to theoretical one
alignment.emplace_back(std::make_pair(t, closest_exp_peak));
// add ppm error for this match
double ppm_error = (exp_spectrum[closest_exp_peak].getMZ() - theo_mz) / theo_mz * 1e6;
ppm_error_array.emplace_back(ppm_error);
e = closest_exp_peak + 1; // advance experimental peak to 1-after the best match
++t; // advance theoretical peak
}
else if (d < 0) // exp. peak is left of theo. peak (outside of tolerance window)
{
++e;
}
else if (d > 0) // theo. peak is left of exp. peak (outside of tolerance window)
{
++t;
}
}
}
void OPXLSpectrumProcessingAlgorithms::getSpectrumAlignmentSimple(
std::vector<std::pair<Size, Size> > & alignment,
double fragment_mass_tolerance,
bool fragment_mass_tolerance_unit_ppm,
const std::vector< SimpleTSGXLMS::SimplePeak >& theo_spectrum,
const PeakSpectrum& exp_spectrum,
const DataArrays::IntegerDataArray& exp_charges)
{
alignment.clear();
const Size n_t(theo_spectrum.size());
const Size n_e(exp_spectrum.size());
const bool has_charge = !(exp_charges.empty());
if (n_t == 0 || n_e == 0) { return; }
Size t(0), e(0);
alignment.reserve(theo_spectrum.size());
while (t < n_t && e < n_e)
{
const double theo_mz = theo_spectrum[t].mz;
const double exp_mz = exp_spectrum[e].getMZ();
int tz(0), ez(0);
if (has_charge)
{
tz = theo_spectrum[t].charge;
ez = exp_charges[e];
}
const bool tz_matches_ez = (ez == tz || !ez || !tz);
double d = exp_mz - theo_mz;
const double max_dist_dalton = fragment_mass_tolerance_unit_ppm ? theo_mz * fragment_mass_tolerance * 1e-6 : fragment_mass_tolerance;
if (fabs(d) <= max_dist_dalton) // match in tolerance window?
{
// get first peak with matching charge in tolerance window
if (!tz_matches_ez)
{
Size e_candidate(e);
while (e_candidate < n_e-1)
{
++e_candidate;
double new_ez = has_charge ? exp_charges[e_candidate] : 0;
const bool charge_matches = (new_ez == tz || !new_ez || !tz);
double new_d = exp_spectrum[e_candidate].getMZ() - theo_mz;
if (charge_matches && new_d <= max_dist_dalton)
{ // found a match
break;
}
else if (new_d > max_dist_dalton)
{ // no match found
e_candidate = e;
break;
}
}
if (e == e_candidate)
{ // no match found continue with next theo. peak
++t;
continue;
}
else
{ // match found
e = e_candidate;
}
}
// Invariant: e now points to the first peak in tolerance window, that matches in charge and intensity
// last peak? there can't be a better one in this tolerance window
if (e >= n_e - 1)
{
// add match
alignment.emplace_back(std::make_pair(t, e));
return;
}
Size closest_exp_peak(e);
// Invariant: closest_exp_peak always point to best match
double new_ez(0);
double best_d = exp_spectrum[closest_exp_peak].getMZ() - theo_mz;
do // check for better match in tolerance window
{
// advance to next exp. peak
++e;
// determine distance of next peak
double new_d = exp_spectrum[e].getMZ() - theo_mz;
const bool in_tolerance_window = (fabs(new_d) < max_dist_dalton);
if (!in_tolerance_window) { break; }
// Invariant: e is in tolerance window
// check if charge and intensity of next peak matches
if (has_charge) { new_ez = exp_charges[e]; }
const bool charge_matches = (new_ez == tz || !new_ez || !tz);
if (!charge_matches) { continue; }
// Invariant: charge and intensity matches
const bool better_distance = (fabs(new_d) <= fabs(best_d));
// better distance (and matching charge)? better match found
if (better_distance)
{ // found a better match
closest_exp_peak = e;
best_d = new_d;
}
else
{ // distance got worse -> no additional matches!
break;
}
}
while (e < n_e - 1);
// search in tolerance window for an experimental peak closer to theoretical one
alignment.emplace_back(std::make_pair(t, closest_exp_peak));
e = closest_exp_peak + 1; // advance experimental peak to 1-after the best match
++t; // advance theoretical peak
}
else if (d < 0) // exp. peak is left of theo. peak (outside of tolerance window)
{
++e;
}
else if (d > 0) // theo. peak is left of exp. peak (outside of tolerance window)
{
++t;
}
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/XLMS/XFDRAlgorithm.cpp | .cpp | 25,768 | 605 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Eugen Netz $
// $Authors: Lukas Zimmermann, Eugen Netz $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/XLMS/XFDRAlgorithm.h>
#include <OpenMS/CONCEPT/Constants.h>
// using namespace std;
using namespace OpenMS;
XFDRAlgorithm::XFDRAlgorithm()
: DefaultParamHandler("XFDRAlgorithm")
{
defaults_.setValue(param_decoy_string_, "DECOY_", "Prefix of decoy protein ids. The correspondig target protein id should be retrievable by deleting this prefix.");
defaults_.setValue(param_minborder_, -50.0, "Filter for minimum precursor mass error (ppm) before FDR estimation. Values outside of the tolerance window of the original search will effectively disable this filter.");
defaults_.setValue(param_maxborder_, 50.0, "Filter for maximum precursor mass error (ppm) before FDR estimation. Values outside of the tolerance window of the original search will effectively disable this filter.");
defaults_.setValue(param_mindeltas_, 0.0, "Filter for delta score, 0 disables the filter. Minimum delta score required, hits are rejected if larger or equal. The delta score is a ratio of the score of a hit and the score of the next best hit to the same spectrum, so the value range is between 0 and 1 with 1.0 meaning the scores are equal and 0.5 meaning the next best score is half as high as the current one.");
defaults_.setMinFloat(param_mindeltas_, 0.0);
defaults_.setMaxFloat(param_mindeltas_, 1.0);
defaults_.setValue(param_minionsmatched_, 0, "Filter for minimum matched ions per peptide.");
defaults_.setMinInt(param_minionsmatched_, 0);
std::vector<std::string> bool_strings = {"true","false"};
defaults_.setValue(param_uniquexl_, "false", "Calculate statistics based only on unique IDs. For a set of IDs from equal candidates (same pair of peptides, modifications and cross-linked positions), only the highest scoring hit will be considered. By default the score distribution will be estimated using all 1st ranked candidates.");
defaults_.setValidStrings(param_uniquexl_, bool_strings);
defaults_.setValue(param_no_qvalues_, "false", "Do not transform simple FDR to q-values");
defaults_.setValidStrings(param_no_qvalues_, bool_strings);
defaults_.setValue(param_minscore_, -10.0, "Minimum score to be considered for FDR calculation. A number lower than the lowest score will effectively disable this filter.");
defaults_.setValue(param_binsize_, 0.0001, "Bin size for the cumulative histograms for score distributions. Should be about the same size as the smallest expected difference between scores. Smaller numbers will make XFDR more robust, but much slower. Negative numbers are not allowed. Should only be changed if the range of the main score changes or another score than the OpenPepXL score is used.");
defaults_.setMinFloat(param_binsize_, 1e-15);
defaultsToParam_();
}
XFDRAlgorithm::~XFDRAlgorithm() = default;
void XFDRAlgorithm::updateMembers_()
{
decoy_string_ = static_cast<String>(param_.getValue(param_decoy_string_).toString());
arg_mindeltas_ = static_cast<double>(param_.getValue(param_mindeltas_));
arg_minborder_ = static_cast<double>(param_.getValue(param_minborder_));
arg_maxborder_ = static_cast<double>(param_.getValue(param_maxborder_));
arg_minionsmatched_ = static_cast<Int>(param_.getValue(param_minionsmatched_));
arg_minscore_ = static_cast<double>(param_.getValue(param_minscore_));
arg_uniquex_ = (param_.getValue(param_uniquexl_) == "true" ? true : false);
arg_no_qvalues_ = (param_.getValue(param_no_qvalues_) == "true" ? true : false);
arg_binsize_ = static_cast<double>(param_.getValue(param_binsize_));
min_score_ = 0;
max_score_ = arg_minscore_;
}
XFDRAlgorithm::ExitCodes XFDRAlgorithm::run(PeptideIdentificationList& peptide_ids, ProteinIdentification& protein_id)
{
writeArgumentsLog_();
std::cout << "Initializing data structures..." << std::endl;
// Initialize and validate data structures that are derived from the main peptide identification vector 'all_pep)ids'
initDataStructures_(peptide_ids, protein_id);
// Maps the cross link class to the encountered scores
std::map<String, std::vector<double>> scores;
UInt num_flagged(0);
std::cout << "Collecting scores for each class..." << std::endl;
// Loop through the peptides, apply filter, and assign cross-link types
for (PeptideIdentification& pep_id : peptide_ids)
{
if (pep_id.getHits().empty())
{
continue;
}
// usually the 1st ranked hit should be the first in the list
// but we make sure here and use the first hit with rank 1 that we find
int rank_one_hit_index(0);
for (Size i = 0; i < pep_id.getHits().size(); ++i)
{
PeptideHit& ph = pep_id.getHits()[i];
if (int(ph.getMetaValue(Constants::UserParam::OPENPEPXL_XL_RANK)) == 1)
{
rank_one_hit_index = i;
break;
}
}
PeptideHit& ph = pep_id.getHits()[rank_one_hit_index];
// if after the search above we don't have a rank 1 hit, skip this pep_id
if (int(ph.getMetaValue(Constants::UserParam::OPENPEPXL_XL_RANK)) != 1)
{
continue;
}
// Attributes of peptide identification that can be used for filtering
const double delta_score = ph.getMetaValue(Constants::UserParam::DELTA_SCORE);
const double score = ph.getScore();
double error_rel(0);
if (ph.metaValueExists(Constants::UserParam::PRECURSOR_ERROR_PPM_USERPARAM))
{
error_rel = ph.getMetaValue(Constants::UserParam::PRECURSOR_ERROR_PPM_USERPARAM);
}
const Size min_ions_matched = getMinIonsMatched_(ph);
const String id = ph.getMetaValue("OpenPepXL:id");
// Only consider IDs which fullfill all filter criteria specified by the user
if ( (arg_minborder_ <= error_rel) // minborder fullfilled
&& (arg_maxborder_ >= error_rel) // maxborder fullfilled
&& (arg_mindeltas_ == 0 || delta_score < arg_mindeltas_)
&& (min_ions_matched >= (Size)arg_minionsmatched_)
&& score >= arg_minscore_
)
{
// check for the unique ID criterion
if (arg_uniquex_)
{
auto uid_it = std::find(this->unique_ids_.begin(), this->unique_ids_.end(), id);
if (uid_it != this->unique_ids_.end())
{
int index = std::distance(this->unique_ids_.begin(), uid_it);
if (this->unique_id_scores_[index] != ph.getScore())
{
// this is not the highest scoring ID for this candidate
continue;
}
}
}
num_flagged++;
ph.setMetaValue("XFDR:used_for_FDR", 1);
for (const String &cross_link_class : this->cross_link_classes_[id])
{
scores[cross_link_class].push_back(score);
}
}
}
std::cout << "XFDR has used " + String(num_flagged) + " hits to calculate the FDR" << std::endl;
// Log number of scores within each class
std::cout << "Number of Scores for each class:" << std::endl;
for (const auto &score : scores)
{
std::cout << score.first + ": " + score.second.size() << std::endl;
}
// Generate Histograms of the scores for each class
// Use cumulative histograms to count the number of scores above consecutive thresholds
std::map< String, Math::Histogram<> > cum_histograms;
for (const auto &class_scores: scores)
{
std::vector< double > current_scores = class_scores.second;
Math::Histogram<> histogram(this->min_score_, this->max_score_, arg_binsize_);
Math::Histogram<>::getCumulativeHistogram(current_scores.begin(), current_scores.end(), true, true, histogram);
cum_histograms[class_scores.first] = histogram;
}
std::cout << "Calculating Score Distributions..." << std::endl;
// Calculate FDR for interlinks
std::vector< double > fdr_interlinks;
this->fdr_xprophet_(cum_histograms, crosslink_class_interlinks_, crosslink_class_interdecoys_, crosslink_class_fulldecoysinterlinks_, fdr_interlinks, false);
// Calculate FDR for intralinks
std::vector< double > fdr_intralinks;
this->fdr_xprophet_(cum_histograms, crosslink_class_intralinks_, crosslink_class_intradecoys_, crosslink_class_fulldecoysintralinks_, fdr_intralinks, false);
// Calculate FDR for monolinks and looplinks
std::vector< double > fdr_monolinks;
this->fdr_xprophet_(cum_histograms, crosslink_class_monolinks_, crosslink_class_monodecoys_, "", fdr_monolinks, true);
// Determine whether qTransform should be performed (and consequently the score type)
// bool arg_no_qvalues = getFlag_(param_no_qvalues_);
String score_type = arg_no_qvalues_ ? "FDR" : "q-value";
if ( ! arg_no_qvalues_)
{
std::cout << "Performing qFDR transformation..." << std::endl;
std::vector< double > qfdr_interlinks;
this->calc_qfdr_(fdr_interlinks, qfdr_interlinks);
std::vector< double > qfdr_intralinks;
this->calc_qfdr_(fdr_intralinks, qfdr_intralinks);
std::vector< double > qfdr_monolinks;
this->calc_qfdr_(fdr_monolinks, qfdr_monolinks);
fdr_interlinks = qfdr_interlinks;
fdr_intralinks = qfdr_intralinks;
fdr_monolinks = qfdr_monolinks;
}
std::cout << "Assigning FDRs..." << std::endl;
// Assign FDR values to all identifications
for (PeptideIdentification &pep_id : peptide_ids)
{
for (PeptideHit& ph : pep_id.getHits())
{
if ( ! ph.metaValueExists("XFDR:used_for_FDR"))
{
ph.setMetaValue("XFDR:used_for_FDR", 0);
}
double score = ph.getScore();
StringList crosslink_types;
assignTypes_(ph, crosslink_types);
ph.setMetaValue("XFDR:fdr_type", score_type);
// Assign FDR value as meta value and also set as score
bool assigned = false;
double fdr = 1;
for (StringList::const_iterator crosslink_types_it = crosslink_types.begin();
crosslink_types_it != crosslink_types.end(); ++crosslink_types_it)
{
String current_crosslink_type = *crosslink_types_it;
Size idx = std::floor((score - this->min_score_) / arg_binsize_);
if ( current_crosslink_type == crosslink_class_fulldecoysinterlinks_
|| current_crosslink_type == crosslink_class_hybriddecoysinterlinks_
|| current_crosslink_type == crosslink_class_interdecoys_
|| current_crosslink_type == crosslink_class_interlinks_)
{
fdr = fdr_interlinks[idx];
assigned = true;
break;
}
else if ( current_crosslink_type == crosslink_class_fulldecoysintralinks_
|| current_crosslink_type == crosslink_class_hybriddecoysintralinks_
|| current_crosslink_type == crosslink_class_intradecoys_
|| current_crosslink_type == crosslink_class_intralinks_)
{
fdr = fdr_intralinks[idx];
assigned = true;
break;
}
else if ( current_crosslink_type == crosslink_class_monodecoys_
|| current_crosslink_type == crosslink_class_monolinks_)
{
fdr = fdr_monolinks[idx];
assigned = true;
break;
}
}
if ( assigned)
{
ph.setMetaValue("XFDR:FDR", fdr);
}
else
{
std::cout << "WARNING: A Crosslink could not be identified as either interlink, intralink, or monolink, so no FDR will be available for it." << std::endl;
}
}
}
return ExitCodes::EXECUTION_OK;
}
void XFDRAlgorithm::initDataStructures_(PeptideIdentificationList& peptide_ids, ProteinIdentification& protein_id)
{
const String prot_identifier = protein_id.getIdentifier();
// if the metaValue exists in search_params and the default value for XFDR was not changed, use the one in search_params
ProteinIdentification::SearchParameters search_params = protein_id.getSearchParameters();
if (search_params.metaValueExists("decoy_string") && decoy_string_ == "DECOY_")
{
decoy_string_ = search_params.getMetaValue("decoy_string");
}
// Preprocess all peptide identifications and construct derived data structures necessary for XFDR
for (Size i = 0; i < peptide_ids.size(); ++i)
{
PeptideIdentification &pep_id = peptide_ids[i];
if (pep_id.getHits().empty())
{
continue;
}
pep_id.setIdentifier(prot_identifier);
std::vector< PeptideHit > &pep_hits = pep_id.getHits();
for (PeptideHit& ph : pep_hits)
{
// Set the minScore and maxScore attribute depending on the input data
const double score = ph.getScore();
// Set score boundaries
if (score < this->min_score_)
{
this->min_score_ = std::floor(score);
}
if (score > this->max_score_)
{
this->max_score_ = std::ceil(score);
}
assert(this->min_score_ <= this->max_score_);
// figure out if crosslink is inter- or intra protein
// for cases with multiple proteins, count as true, if any one possible combination of proteins fits the criteria
// so both can be true at the same time (or false for mono-links)
setIntraProtein_(ph, false);
setInterProtein_(ph, false);
if (ph.metaValueExists(Constants::UserParam::OPENPEPXL_XL_TYPE) && ph.getMetaValue(Constants::UserParam::OPENPEPXL_XL_TYPE) == "cross-link")
{
StringList alpha_prots;
const std::vector<PeptideEvidence> pevs_alpha = ph.getPeptideEvidences();
for (PeptideEvidence pev : pevs_alpha)
{
alpha_prots.push_back(pev.getProteinAccession());
}
StringList beta_prots = ListUtils::create<String>(ph.getMetaValue(Constants::UserParam::OPENPEPXL_BETA_ACCESSIONS).toString());
for (String& alpha_prot : alpha_prots)
{
for (String& beta_prot : beta_prots)
{
if (isSameProtein_(alpha_prot, beta_prot, decoy_string_))
{
setIntraProtein_(ph, true);
}
else
{
setInterProtein_(ph, true);
}
}
}
}
String id = getId_(ph);
ph.setMetaValue("OpenPepXL:id", id);
// candidates with the same ID will also have the same types
if (this->cross_link_classes_.find(id) == this->cross_link_classes_.end())
{
assignTypes_(ph, this->cross_link_classes_[id]);
}
}
}
if (arg_uniquex_)
{
findTopUniqueHits_(peptide_ids);
}
}
void XFDRAlgorithm::assignTypes_(PeptideHit &ph, StringList &types)
{
types.clear();
bool xl_is_decoy = ph.getMetaValue(Constants::UserParam::TARGET_DECOY) == "decoy";
// target or decoy
if (xl_is_decoy)
{
types.push_back(crosslink_class_decoys_);
}
else
{
types.push_back(crosslink_class_targets_);
}
// intralinks
if (ph.getMetaValue("XFDR:is_intraprotein").toBool() && (!xl_is_decoy))
{
types.push_back(crosslink_class_intralinks_);
}
// intradecoys
if (ph.getMetaValue("XFDR:is_intraprotein").toBool() && xl_is_decoy)
{
types.push_back(crosslink_class_intradecoys_);
}
// interlinks
if (ph.getMetaValue("XFDR:is_interprotein").toBool() && (!xl_is_decoy))
{
types.push_back(crosslink_class_interlinks_);
}
// interdecoys
if (ph.getMetaValue("XFDR:is_interprotein").toBool() && xl_is_decoy)
{
types.push_back(crosslink_class_interdecoys_);
}
assert(ph.metaValueExists(Constants::UserParam::OPENPEPXL_XL_TYPE));
String current_crosslink_type = ph.getMetaValue(Constants::UserParam::OPENPEPXL_XL_TYPE);
// monolinks
if ( (!xl_is_decoy) && (current_crosslink_type == "mono-link"
|| current_crosslink_type == "loop-link"))
{
types.push_back(crosslink_class_monolinks_);
}
// monodecoys
if ( xl_is_decoy && (current_crosslink_type == "mono-link"
|| current_crosslink_type == "loop-link"))
{
types.push_back(crosslink_class_monodecoys_);
}
if (current_crosslink_type == "cross-link")
{
const bool alpha_is_decoy = ph.getMetaValue(Constants::UserParam::OPENPEPXL_TARGET_DECOY_ALPHA).toString() == "decoy";
const bool beta_is_decoy = ph.getMetaValue(Constants::UserParam::OPENPEPXL_TARGET_DECOY_BETA).toString() == "decoy";
// fulldecoysintralinks
if (ph.getMetaValue("XFDR:is_intraprotein").toBool() && alpha_is_decoy && beta_is_decoy)
{
types.push_back(crosslink_class_fulldecoysintralinks_);
}
// fulldecoysinterlinks
if (ph.getMetaValue("XFDR:is_interprotein").toBool() && alpha_is_decoy && beta_is_decoy)
{
types.push_back(crosslink_class_fulldecoysinterlinks_);
}
// hybriddecoysintralinks
if (ph.getMetaValue("XFDR:is_intraprotein").toBool()
&& (( (!alpha_is_decoy) && beta_is_decoy)
|| (alpha_is_decoy && (!beta_is_decoy))))
{
types.push_back(crosslink_class_hybriddecoysintralinks_);
}
// hybriddecoysinterlinks
if (ph.getMetaValue("XFDR:is_interprotein").toBool()
&& (( (!alpha_is_decoy) && beta_is_decoy)
|| (alpha_is_decoy && (!beta_is_decoy))))
{
types.push_back(crosslink_class_hybriddecoysinterlinks_);
}
}
}
void XFDRAlgorithm::fdr_xprophet_(std::map< String, Math::Histogram<> > & cum_histograms,
const String & targetclass, const String & decoyclass, const String & fulldecoyclass,
std::vector< double > & fdr, bool mono) const
{
// Determine whether targetclass, decoyclass, and fulldecoyclass are present in the histogram map
bool targetclass_present = cum_histograms.find(targetclass) != cum_histograms.end();
bool decoyclass_present = cum_histograms.find(decoyclass) != cum_histograms.end();
bool fulldecoyclass_present = cum_histograms.find(fulldecoyclass) != cum_histograms.end();
for (double current_score = this->min_score_ + (arg_binsize_/2);
current_score <= this->max_score_ - (arg_binsize_/2);
current_score += arg_binsize_)
{
double estimated_n_decoys = decoyclass_present ? cum_histograms[decoyclass].binValue(current_score) : 0;
if ( ! mono)
{
estimated_n_decoys -= 2 * ( fulldecoyclass_present ? cum_histograms[fulldecoyclass].binValue(current_score) : 0);
}
double n_targets = targetclass_present ? cum_histograms[targetclass].binValue(current_score) : 0;
fdr.push_back(n_targets > 0 ? estimated_n_decoys / (n_targets) : 0);
}
}
void XFDRAlgorithm::calc_qfdr_(const std::vector< double > &fdr, std::vector< double > &qfdr)
{
qfdr.resize(fdr.size());
for (Int i = fdr.size() - 1; i >= 0; --i)
{
double current_fdr = fdr[i];
double smallest_fdr = current_fdr;
for (Int j = i; j >= 0; j--)
{
double fdr_to_check = fdr[j];
if (fdr_to_check < smallest_fdr)
{
smallest_fdr = fdr_to_check;
}
}
qfdr[i] = smallest_fdr < current_fdr ? smallest_fdr : current_fdr;
}
}
void XFDRAlgorithm::findTopUniqueHits_(PeptideIdentificationList& peptide_ids)
{
for (PeptideIdentification& pep_id : peptide_ids)
{
for (PeptideHit& ph : pep_id.getHits())
{
String id = ph.getMetaValue("OpenPepXL:id");
auto uid_it = std::find(this->unique_ids_.begin(), this->unique_ids_.end(), id);
// if an ID for this candidate already exists, check if the new score is higher than the last
if (uid_it != this->unique_ids_.end())
{
int index = std::distance(this->unique_ids_.begin(), uid_it);
if (this->unique_id_scores_[index] < ph.getScore())
{
this->unique_id_scores_[index] = ph.getScore();
}
}
else
{
this->unique_ids_.push_back(id);
this->unique_id_scores_.push_back(ph.getScore());
}
}
}
}
void XFDRAlgorithm::writeArgumentsLog_() const
{
//-------------------------------------------------------------
// Printing parameters to log
//-------------------------------------------------------------
std::cout << std::endl;
std::cout << ((arg_minborder_ != -1) ? "Lower bound for precursor mass error for FDR calculation is " + String(arg_minborder_) + " ppm"
: "No lower bound for precursor mass error for FDR calculation") << std::endl;
std::cout << ((arg_maxborder_ != -1) ? "Upper bound for precursor mass error for FDR calculation is " + String(arg_maxborder_) + " ppm"
: "No upper bound for precursor mass error for FDR calculation") << std::endl;
std::cout << ((arg_mindeltas_ != 0) ? "Filtering of hits by a deltascore of " + String(arg_mindeltas_) + " is used."
: "No filtering of hits by deltascore") << std::endl;
std::cout << ((arg_minionsmatched_ > 0) ? "Filtering of hits by minimum ions matched: " + String(arg_minionsmatched_) + " is used"
: "No filtering of hits by minimum ions matched.") << std::endl;
std::cout << ((arg_minscore_ > 0) ? "Filtering of hits by minimum score of " + String(arg_minscore_) + " is used."
: "No filtering of hits by minimum score.") << std::endl;
std::cout << ((arg_uniquex_) ? "Error model is generated based on unique cross-links."
: "Error model is generated based on redundant cross-links.") << std::endl;
std::cout << "Bin size for cumulative histograms is " + String(arg_binsize_) << std::endl;
}
XFDRAlgorithm::ExitCodes XFDRAlgorithm::validateClassArguments() const
{
if (arg_minborder_ >= arg_maxborder_)
{
std::cout << "Minborder cannot be larger or equal than Maxboder!" << std::endl;
return ExitCodes::ILLEGAL_PARAMETERS;
}
return ExitCodes::EXECUTION_OK;
}
String XFDRAlgorithm::getId_(const PeptideHit& ph) const
{
if (ph.metaValueExists("OpenPepXL:id"))
{
return ph.getMetaValue("OpenPepXL:id").toString();
}
if (ph.getMetaValue(Constants::UserParam::OPENPEPXL_XL_TYPE) == "cross-link")
{
return ph.getSequence().toUnmodifiedString()
+ "-" + AASequence::fromString(ph.getMetaValue(Constants::UserParam::OPENPEPXL_BETA_SEQUENCE)).toUnmodifiedString()
+ "-a" + String(ph.getMetaValue(Constants::UserParam::OPENPEPXL_XL_POS1))
+ "-b" + String(ph.getMetaValue(Constants::UserParam::OPENPEPXL_XL_POS2));
}
else if (ph.getMetaValue(Constants::UserParam::OPENPEPXL_XL_TYPE) == "loop-link")
{
return ph.getSequence().toUnmodifiedString()
+ "-a" + String(ph.getMetaValue(Constants::UserParam::OPENPEPXL_XL_POS1))
+ "-b" + String(ph.getMetaValue(Constants::UserParam::OPENPEPXL_XL_POS2));
}
else if (ph.metaValueExists(Constants::UserParam::OPENPEPXL_XL_MASS))
{
return ph.getSequence().toUnmodifiedString()
+ "-" + String(ph.getMetaValue(Constants::UserParam::OPENPEPXL_XL_POS1))
+ "-" + String(ph.getMetaValue(Constants::UserParam::OPENPEPXL_XL_MASS));
}
else
{
return ph.getSequence().toUnmodifiedString()
+ "-" + String(ph.getMetaValue(Constants::UserParam::OPENPEPXL_XL_POS1));
}
}
// Names of the class parameters
const String XFDRAlgorithm::param_decoy_string_ = "decoy_string";
const String XFDRAlgorithm::param_minborder_ = "minborder";
const String XFDRAlgorithm::param_maxborder_ = "maxborder";
const String XFDRAlgorithm::param_mindeltas_ = "mindeltas";
const String XFDRAlgorithm::param_minionsmatched_ = "minionsmatched";
const String XFDRAlgorithm::param_uniquexl_ = "uniquexl";
const String XFDRAlgorithm::param_no_qvalues_ = "no_qvalues";
const String XFDRAlgorithm::param_minscore_ = "minscore";
const String XFDRAlgorithm::param_binsize_ = "binsize";
// Names of cross-link classes
const String XFDRAlgorithm::crosslink_class_intradecoys_ = "intradecoys";
const String XFDRAlgorithm::crosslink_class_fulldecoysintralinks_ = "fulldecoysintralinks";
const String XFDRAlgorithm::crosslink_class_interdecoys_ = "interdecoys";
const String XFDRAlgorithm::crosslink_class_fulldecoysinterlinks_ = "fulldecoysinterlinks";
const String XFDRAlgorithm::crosslink_class_monodecoys_ = "monodecoys";
const String XFDRAlgorithm::crosslink_class_intralinks_ = "intralinks";
const String XFDRAlgorithm::crosslink_class_interlinks_ = "interlinks";
const String XFDRAlgorithm::crosslink_class_monolinks_ = "monolinks";
const String XFDRAlgorithm::crosslink_class_decoys_ = "decoys";
const String XFDRAlgorithm::crosslink_class_targets_ = "targets";
const String XFDRAlgorithm::crosslink_class_hybriddecoysintralinks_ = "hybriddecoysintralinks";
const String XFDRAlgorithm::crosslink_class_hybriddecoysinterlinks_ = "hybriddecoysinterlinks";
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/MAPMATCHING/MapAlignmentAlgorithmIdentification.cpp | .cpp | 14,680 | 361 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MAPMATCHING/MapAlignmentAlgorithmIdentification.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <OpenMS/METADATA/AnnotatedMSRun.h>
using namespace std;
namespace OpenMS
{
MapAlignmentAlgorithmIdentification::MapAlignmentAlgorithmIdentification() :
DefaultParamHandler("MapAlignmentAlgorithmIdentification"),
ProgressLogger(), reference_index_(-1), reference_(), min_run_occur_(0), min_score_(0.)
{
defaults_.setValue("score_type", "", "Name of the score type to use for ranking and filtering (.oms input only). If left empty, a score type is picked automatically.");
defaults_.setValue("score_cutoff", "false", "Use only IDs above a score cut-off (parameter 'min_score') for alignment?");
defaults_.setValidStrings("score_cutoff", {"true", "false"});
defaults_.setValue("min_score", 0.05, "If 'score_cutoff' is 'true': Minimum score for an ID to be considered.\nUnless you have very few runs or identifications, increase this value to focus on more informative peptides.");
defaults_.setValue("min_run_occur", 2, "Minimum number of runs (incl. reference, if any) in which a peptide must occur to be used for the alignment.\nUnless you have very few runs or identifications, increase this value to focus on more informative peptides.");
defaults_.setMinInt("min_run_occur", 2);
defaults_.setValue("max_rt_shift", 0.5, "Maximum realistic RT difference for a peptide (median per run vs. reference). Peptides with higher shifts (outliers) are not used to compute the alignment.\nIf 0, no limit (disable filter); if > 1, the final value in seconds; if <= 1, taken as a fraction of the range of the reference RT scale.");
defaults_.setMinFloat("max_rt_shift", 0.0);
defaults_.setValue("use_unassigned_peptides", "true", "Should unassigned peptide identifications be used when computing an alignment of feature or consensus maps? If 'false', only peptide IDs assigned to features will be used.");
defaults_.setValidStrings("use_unassigned_peptides", {"true", "false"});
defaults_.setValue("use_feature_rt", "false", "When aligning feature or consensus maps, don't use the retention time of a peptide identification directly; instead, use the retention time of the centroid of the feature (apex of the elution profile) that the peptide was matched to. If different identifications are matched to one feature, only the peptide closest to the centroid in RT is used.\nPrecludes 'use_unassigned_peptides'.");
defaults_.setValidStrings("use_feature_rt", {"true", "false"});
defaults_.setValue("use_adducts", "true", "If IDs contain adducts, treat differently adducted variants of the same molecule as different.");
defaults_.setValidStrings("use_adducts", {"true", "false"});
defaultsToParam_();
}
MapAlignmentAlgorithmIdentification::~MapAlignmentAlgorithmIdentification() = default;
void MapAlignmentAlgorithmIdentification::checkParameters_(Size runs)
{
min_run_occur_ = (int)param_.getValue("min_run_occur");
// reference is not counted as a regular run:
if (!reference_.empty()) runs++;
use_feature_rt_ = param_.getValue("use_feature_rt").toBool();
if (min_run_occur_ > runs)
{
String msg = "Warning: Value of parameter 'min_run_occur' (here: " +
String(min_run_occur_) + ") is higher than the number of runs incl. "
"reference (here: " + String(runs) + "). Using " + String(runs) +
" instead.";
OPENMS_LOG_WARN << msg << endl;
min_run_occur_ = runs;
}
score_cutoff_ = param_.getValue("score_cutoff").toBool();
// score type may have been set by reference already - don't overwrite it:
if (score_cutoff_ && score_type_.empty())
{
score_type_ = (std::string)param_.getValue("score_type");
}
min_score_ = param_.getValue("min_score");
use_adducts_ = param_.getValue("use_adducts").toBool();
}
// RT lists in "rt_data" will be sorted (unless "sorted" is true)
void MapAlignmentAlgorithmIdentification::computeMedians_(SeqToList& rt_data,
SeqToValue& medians,
bool sorted)
{
medians.clear();
for (SeqToList::iterator rt_it = rt_data.begin();
rt_it != rt_data.end(); ++rt_it)
{
double median = Math::median(rt_it->second.begin(),
rt_it->second.end(), sorted);
medians.insert(medians.end(), make_pair(rt_it->first, median));
}
}
// lists of peptide hits in "peptides" will be sorted
bool MapAlignmentAlgorithmIdentification::getRetentionTimes_(
const PeptideIdentificationList& peptides, SeqToList& rt_data)
{
for (auto pep_it = peptides.cbegin(); pep_it != peptides.cend(); ++pep_it)
{
if (!pep_it->getHits().empty())
{
const PeptideHit* best_hit = getBestScoringHit(pep_it->getHits(), pep_it->isHigherScoreBetter());
if (better_(best_hit->getScore(), min_score_))
{
const String& seq = best_hit->getSequence().toString();
rt_data[seq].push_back(pep_it->getRT());
}
}
}
return false;
}
IdentificationData::ScoreTypeRef
MapAlignmentAlgorithmIdentification::handleIdDataScoreType_(const IdentificationData& id_data)
{
IdentificationData::ScoreTypeRef score_ref;
if (score_type_.empty()) // choose a score type
{
score_ref = id_data.pickScoreType(id_data.getObservationMatches());
if (score_ref == id_data.getScoreTypes().end())
{
String msg = "no scores found";
throw Exception::MissingInformation(__FILE__, __LINE__,
OPENMS_PRETTY_FUNCTION, msg);
}
score_type_ = score_ref->cv_term.getName();
OPENMS_LOG_INFO << "Using score type: " << score_type_ << endl;
}
else
{
score_ref = id_data.findScoreType(score_type_);
if (score_ref == id_data.getScoreTypes().end())
{
String msg = "score type '" + score_type_ + "' not found";
throw Exception::MissingInformation(__FILE__, __LINE__,
OPENMS_PRETTY_FUNCTION, msg);
}
}
return score_ref;
}
bool MapAlignmentAlgorithmIdentification::getRetentionTimes_(
const IdentificationData& id_data, SeqToList& rt_data)
{
// @TODO: should this get handled as an error?
if (id_data.getObservationMatches().empty()) return true;
IdentificationData::ScoreTypeRef score_ref =
handleIdDataScoreType_(id_data);
vector<IdentificationData::ObservationMatchRef> top_hits =
id_data.getBestMatchPerObservation(score_ref);
for (const auto& hit : top_hits)
{
bool include = true;
if (score_cutoff_)
{
pair<double, bool> result = hit->getScore(score_ref);
if (!result.second ||
score_ref->isBetterScore(min_score_, result.first))
{
include = false;
}
}
if (include)
{
String molecule = hit->identified_molecule_var.toString();
if (use_adducts_ && hit->adduct_opt)
{
molecule += "+[" + (*hit->adduct_opt)->getName() + "]";
}
rt_data[molecule].push_back(hit->observation_ref->rt);
}
}
return false;
}
void MapAlignmentAlgorithmIdentification::computeTransformations_(
vector<SeqToList>& rt_data, vector<TransformationDescription>& transforms,
bool sorted)
{
Int size = rt_data.size(); // not Size because we compare to Ints later
transforms.clear();
// filter RT data (remove peptides that elute in several fractions):
// TODO
// compute RT medians:
OPENMS_LOG_DEBUG << "Computing RT medians..." << endl;
vector<SeqToValue> medians_per_run(size);
for (Int i = 0; i < size; ++i)
{
computeMedians_(rt_data[i], medians_per_run[i], sorted);
}
SeqToList medians_per_seq;
for (vector<SeqToValue>::iterator run_it = medians_per_run.begin();
run_it != medians_per_run.end(); ++run_it)
{
for (SeqToValue::iterator med_it = run_it->begin();
med_it != run_it->end(); ++med_it)
{
medians_per_seq[med_it->first].push_back(med_it->second);
}
}
// get reference retention time scale: either directly from reference file,
// or compute consensus time scale
bool reference_given = !reference_.empty(); // reference file given
if (reference_given)
{
// remove peptides that don't occur in enough runs:
OPENMS_LOG_DEBUG << "Removing peptides that occur in too few runs..." << endl;
SeqToValue temp;
for (SeqToValue::iterator ref_it = reference_.begin();
ref_it != reference_.end(); ++ref_it)
{
SeqToList::iterator med_it = medians_per_seq.find(ref_it->first);
if ((med_it != medians_per_seq.end()) &&
(med_it->second.size() + 1 >= min_run_occur_))
{
temp.insert(temp.end(), *ref_it); // new items should go at the end
}
}
OPENMS_LOG_DEBUG << "Removed " << reference_.size() - temp.size() << " of "
<< reference_.size() << " peptides." << endl;
temp.swap(reference_);
}
else // compute overall RT median per sequence (median of medians per run)
{
OPENMS_LOG_DEBUG << "Computing overall RT medians per sequence..." << endl;
// remove peptides that don't occur in enough runs (at least two):
OPENMS_LOG_DEBUG << "Removing peptides that occur in too few runs..." << endl;
SeqToList temp;
for (SeqToList::iterator med_it = medians_per_seq.begin();
med_it != medians_per_seq.end(); ++med_it)
{
if (med_it->second.size() >= min_run_occur_)
{
temp.insert(temp.end(), *med_it);
}
}
OPENMS_LOG_DEBUG << "Removed " << medians_per_seq.size() - temp.size() << " of "
<< medians_per_seq.size() << " peptides." << endl;
temp.swap(medians_per_seq);
computeMedians_(medians_per_seq, reference_);
}
if (reference_.empty())
{
OPENMS_LOG_WARN << "No reference RT information left after filtering!" << endl;
}
double max_rt_shift = (double)param_.getValue("max_rt_shift");
if (max_rt_shift <= 1)
{
// compute max. allowed shift from overall retention time range:
double rt_min = numeric_limits<double>::infinity(), rt_max = -rt_min;
for (SeqToValue::iterator it = reference_.begin(); it != reference_.end();
++it)
{
rt_min = min(rt_min, it->second);
rt_max = max(rt_max, it->second);
}
double rt_range = rt_max - rt_min;
max_rt_shift *= rt_range;
// in the degenerate case of only one reference point, "max_rt_shift"
// should be zero (because "rt_range" is zero) - this is covered below
}
if (max_rt_shift == 0)
{
max_rt_shift = numeric_limits<double>::max();
}
OPENMS_LOG_DEBUG << "Max. allowed RT shift (in seconds): " << max_rt_shift << endl;
// generate RT transformations:
OPENMS_LOG_DEBUG << "Generating RT transformations..." << endl;
OPENMS_LOG_INFO << "\nAlignment based on:" << endl; // diagnostic output
Size offset = 0; // offset in case of internal reference
for (Int i = 0; i < size + 1; ++i)
{
if (i == reference_index_)
{
// if one of the input maps was used as reference, it has been skipped
// so far - now we have to consider it again:
TransformationDescription trafo;
trafo.fitModel("identity");
transforms.push_back(trafo);
OPENMS_LOG_INFO << "- " << reference_.size() << " data points for sample "
<< i + 1 << " (reference)\n";
offset = 1;
}
if (i >= size) break;
if (reference_.empty())
{
TransformationDescription trafo;
trafo.fitModel("identity");
transforms.push_back(trafo);
continue;
}
// to be useful for the alignment, a peptide sequence has to occur in the
// current run ("medians_per_run[i]"), but also in at least one other run
// ("medians_overall"):
TransformationDescription::DataPoints data;
Size n_outliers = 0;
for (SeqToValue::iterator med_it = medians_per_run[i].begin();
med_it != medians_per_run[i].end(); ++med_it)
{
SeqToValue::const_iterator pos = reference_.find(med_it->first);
if (pos != reference_.end())
{
if (abs(med_it->second - pos->second) <= max_rt_shift)
{ // found, and satisfies "max_rt_shift" condition!
TransformationDescription::DataPoint point(med_it->second,
pos->second, pos->first);
data.push_back(point);
}
else
{
n_outliers++;
}
}
}
transforms.emplace_back(data);
OPENMS_LOG_INFO << "- " << data.size() << " data points for sample "
<< i + offset + 1;
if (n_outliers) OPENMS_LOG_INFO << " (" << n_outliers << " outliers removed)";
OPENMS_LOG_INFO << "\n";
}
OPENMS_LOG_INFO << endl;
// delete temporary reference
if (!reference_given) reference_.clear();
}
// explicit template instantiation for Windows DLL:
template bool OPENMS_DLLAPI MapAlignmentAlgorithmIdentification::getRetentionTimes_<>(const ConsensusMap& features, SeqToList& rt_data);
// explicit template instantiation for Windows DLL:
template bool OPENMS_DLLAPI MapAlignmentAlgorithmIdentification::getRetentionTimes_<>(const FeatureMap& features, SeqToList& rt_data);
const PeptideHit* MapAlignmentAlgorithmIdentification::getBestScoringHit(const std::vector<PeptideHit>& hits, const bool is_higher_score_better)
{
auto scoreComparator = PeptideIdentification::getScoreComparator(is_higher_score_better);
const PeptideHit* best_hit = nullptr;
for (const auto& hit : hits)
{
if (!best_hit || scoreComparator(hit, *best_hit))
{
best_hit = &hit;
}
}
return best_hit;
}
} //namespace
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/MAPMATCHING/FeatureGroupingAlgorithmQT.cpp | .cpp | 2,007 | 64 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Steffen Sass, Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MAPMATCHING/FeatureGroupingAlgorithmQT.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/QTClusterFinder.h>
#include <OpenMS/ANALYSIS/ID/IonIdentityMolecularNetworking.h>
#include <OpenMS/METADATA/ProteinIdentification.h>
#include <OpenMS/METADATA/PeptideIdentification.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/FeatureGroupingAlgorithm.h>
using namespace std;
namespace OpenMS
{
FeatureGroupingAlgorithmQT::FeatureGroupingAlgorithmQT() :
FeatureGroupingAlgorithm()
{
setName("FeatureGroupingAlgorithmQT");
defaults_.insert("", QTClusterFinder().getParameters());
defaultsToParam_();
}
FeatureGroupingAlgorithmQT::~FeatureGroupingAlgorithmQT() = default;
template <typename MapType>
void FeatureGroupingAlgorithmQT::group_(const vector<MapType>& maps,
ConsensusMap& out)
{
// check that the number of maps is ok:
if (maps.size() < 2)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"At least two maps must be given!");
}
QTClusterFinder cluster_finder;
cluster_finder.setParameters(param_.copy("", true));
cluster_finder.run(maps, out);
postprocess_(maps, out);
}
void FeatureGroupingAlgorithmQT::group(const std::vector<FeatureMap>& maps,
ConsensusMap& out)
{
group_(maps, out);
}
void FeatureGroupingAlgorithmQT::group(const std::vector<ConsensusMap>& maps,
ConsensusMap& out)
{
group_(maps, out);
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/MAPMATCHING/TransformationModelLinear.cpp | .cpp | 6,290 | 159 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Hendrik Weisser, Eugen Netz $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MAPMATCHING/TransformationModelLinear.h>
#include <OpenMS/DATASTRUCTURES/ListUtils.h>
#include <Mathematics/Vector2.h>
#include <Mathematics/ApprHeightLine2.h>
namespace OpenMS
{
TransformationModelLinear::TransformationModelLinear(const TransformationModel::DataPoints& data, const Param& params) :
TransformationModel(data, params) // initializes model
{
data_given_ = !data.empty();
if (!data_given_ && params.exists("slope") && params.exists("intercept"))
{
// don't estimate parameters, use given values
slope_ = params.getValue("slope");
intercept_ = params.getValue("intercept");
}
else // estimate parameters from data
{
Param defaults;
getDefaultParameters(defaults);
params_.setDefaults(defaults);
symmetric_ = params_.getValue("symmetric_regression") == "true";
// weight the data (if weighting is specified)
TransformationModel::DataPoints data_weighted = data;
// TrafoXML's prior to OpenMS 3.0 have x/y_weight = "" if unweighted
if ((params.exists("x_weight") && params.getValue("x_weight") != "x" && params.getValue("x_weight") != "") ||
(params.exists("y_weight") && params.getValue("y_weight") != "y" && params.getValue("y_weight") != ""))
{
weightData(data_weighted);
}
size_t size = data_weighted.size();
std::vector<gte::Vector2<double>> points;
if (size == 0) // no data
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"no data points for 'linear' model");
}
else if (size == 1) // degenerate case, but we can still do something
{
slope_ = 1.0;
intercept_ = data_weighted[0].second - data_weighted[0].first;
}
else if (size == 2)
{
// if the two points are too close, gte::HeightLineFit2 can't fit a line
// but in the special case of two points, there is an exact solution and we don't need a least-sqaures fit
slope_ = (data_weighted[1].second - data_weighted[0].second) / (data_weighted[1].first - data_weighted[0].first);
intercept_ = data_weighted[0].second - (slope_ * data_weighted[0].first);
if (std::isinf(slope_) || std::isnan(slope_) || std::isinf(intercept_) || std::isnan(intercept_))
{
throw Exception::UnableToFit(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "TransformationModelLinear", "Unable to fit linear transformation to the two data points.");
}
}
else // compute least-squares fit
{
for (size_t i = 0; i < size; ++i)
{
points.emplace_back(std::initializer_list<double>{data_weighted[i].first, data_weighted[i].second});
}
auto line = gte::ApprHeightLine2<double>();
if (!line.Fit(static_cast<int>(size), &points.front()))
{
throw Exception::UnableToFit(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "TransformationModelLinear", "Unable to fit linear transformation to data points.");
}
slope_ = line.GetParameters().second[0];
intercept_ = -slope_ * line.GetParameters().first[0] + line.GetParameters().first[1];
}
// update params
params_.setValue("slope", slope_);
params_.setValue("intercept", intercept_);
}
}
double TransformationModelLinear::evaluate(double value) const
{
if (!weighting_)
{
return slope_ * value + intercept_;
}
double weighted_value = weightDatum(value, x_weight_);
double eval = slope_ * weighted_value + intercept_;
eval = unWeightDatum(eval, y_weight_);
return eval;
}
void TransformationModelLinear::invert()
{
if (slope_ == 0)
{
throw Exception::DivisionByZero(__FILE__, __LINE__,
OPENMS_PRETTY_FUNCTION);
}
intercept_ = -intercept_ / slope_;
slope_ = 1.0 / slope_;
// invert the weights:
std::swap(x_datum_min_,y_datum_min_);
std::swap(x_datum_max_,y_datum_max_);
std::swap(x_weight_,y_weight_);
// update parameters:
params_.setValue("slope", slope_);
params_.setValue("intercept", intercept_);
params_.setValue("x_weight", x_weight_);
params_.setValue("y_weight", y_weight_);
params_.setValue("x_datum_min", x_datum_min_);
params_.setValue("x_datum_max", x_datum_max_);
params_.setValue("y_datum_min", y_datum_min_);
params_.setValue("y_datum_max", y_datum_max_);
}
void TransformationModelLinear::getParameters(double& slope, double& intercept, String& x_weight, String& y_weight, double& x_datum_min, double& x_datum_max, double& y_datum_min, double& y_datum_max) const
{
slope = slope_;
intercept = intercept_;
x_weight = x_weight_;
y_weight = y_weight_;
x_datum_min = x_datum_min_;
x_datum_max = x_datum_max_;
y_datum_min = y_datum_min_;
y_datum_max = y_datum_max_;
}
void TransformationModelLinear::getDefaultParameters(Param& params)
{
params.clear();
params.setValue("symmetric_regression", "false", "Perform linear regression"
" on 'y - x' vs. 'y + x', instead of on 'y' vs. 'x'.");
params.setValidStrings("symmetric_regression",
{"true","false"});
params.setValue("x_weight", "x", "Weight x values");
params.setValidStrings("x_weight",
{"1/x","1/x2","ln(x)","x"});
params.setValue("y_weight", "y", "Weight y values");
params.setValidStrings("y_weight",
{"1/y","1/y2","ln(y)","y"});
params.setValue("x_datum_min", 1e-15, "Minimum x value");
params.setValue("x_datum_max", 1e15, "Maximum x value");
params.setValue("y_datum_min", 1e-15, "Minimum y value");
params.setValue("y_datum_max", 1e15, "Maximum y value");
}
} // namespace
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/MAPMATCHING/TransformationDescription.cpp | .cpp | 10,574 | 337 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Clemens Groepl, Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MAPMATCHING/TransformationDescription.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/TransformationModelBSpline.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/TransformationModelInterpolated.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/TransformationModelLowess.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <cmath>
#include <iomanip>
#include <iostream>
using namespace std;
namespace OpenMS
{
TransformationDescription::TransformationDescription() :
data_(TransformationDescription::DataPoints()),
model_type_("none"),
model_(new TransformationModel())
{
}
TransformationDescription::TransformationDescription(
const TransformationDescription::DataPoints& data) :
data_(data), model_type_("none"),
model_(new TransformationModel())
{
}
TransformationDescription::~TransformationDescription()
{
delete model_;
}
TransformationDescription::TransformationDescription(
const TransformationDescription& rhs)
{
data_ = rhs.data_;
model_type_ = "none";
model_ = nullptr; // initialize this before the "delete" call in "fitModel"!
Param params = rhs.getModelParameters();
fitModel(rhs.model_type_, params);
}
TransformationDescription& TransformationDescription::operator=(
const TransformationDescription& rhs)
{
if (this == &rhs)
return *this;
data_ = rhs.data_;
model_type_ = "none";
Param params = rhs.getModelParameters();
fitModel(rhs.model_type_, params);
return *this;
}
void TransformationDescription::fitModel(const String& model_type,
const Param& params)
{
// if (previous) transformation is the identity, don't fit another model:
if (model_type_ == "identity") return;
delete model_;
model_ = nullptr; // avoid segmentation fault in case of exception
if ((model_type == "none") || (model_type == "identity"))
{
model_ = new TransformationModel();
}
else if (model_type == "linear")
{
model_ = new TransformationModelLinear(data_, params);
// // debug output:
// double slope, intercept;
// TransformationModelLinear* lm = dynamic_cast<TransformationModelLinear*>(model_);
// lm->getParameters(slope, intercept);
// cout << "slope: " << slope << ", intercept: " << intercept << endl;
}
else if (model_type == "b_spline")
{
model_ = new TransformationModelBSpline(data_, params);
}
else if (model_type == "lowess")
{
model_ = new TransformationModelLowess(data_, params);
}
else if (model_type == "interpolated")
{
model_ = new TransformationModelInterpolated(data_, params);
}
else
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "unknown model type '" + model_type + "'");
}
model_type_ = model_type;
}
double TransformationDescription::apply(double value) const
{
return model_->evaluate(value);
}
const String& TransformationDescription::getModelType() const
{
return model_type_;
}
void TransformationDescription::getModelTypes(StringList& result)
{
result = ListUtils::create<String>("linear,b_spline,interpolated,lowess");
// "none" and "identity" don't count
}
void TransformationDescription::setDataPoints(const DataPoints& data)
{
data_ = data;
model_type_ = "none"; // reset the model even if it was "identity"
delete model_;
model_ = new TransformationModel();
}
void TransformationDescription::setDataPoints(const vector<pair<double, double> >& data)
{
data_.resize(data.size());
for (Size i = 0; i < data.size(); ++i)
{
data_[i] = data[i];
}
model_type_ = "none"; // reset the model even if it was "identity"
delete model_;
model_ = new TransformationModel();
}
const TransformationDescription::DataPoints&
TransformationDescription::getDataPoints() const
{
return data_;
}
const Param& TransformationDescription::getModelParameters() const
{
return model_->getParameters();
}
void TransformationDescription::invert()
{
for (TransformationDescription::DataPoints::iterator it = data_.begin();
it != data_.end(); ++it)
{
*it = TransformationDescription::DataPoint(it->second, it->first,
it->note);
}
// ugly hack for linear model with explicit slope/intercept parameters:
if ((model_type_ == "linear") && data_.empty())
{
TransformationModelLinear* lm =
dynamic_cast<TransformationModelLinear*>(model_);
lm->invert();
}
else
{
Param params = getModelParameters();
fitModel(model_type_, params);
}
}
void TransformationDescription::getDeviations(vector<double>& diffs,
bool do_apply,
bool do_sort) const
{
diffs.clear();
diffs.reserve(data_.size());
for (DataPoints::const_iterator it = data_.begin(); it != data_.end(); ++it)
{
double x = it->first;
if (do_apply) x = apply(x);
diffs.push_back(abs(x - it->second));
}
if (do_sort) sort(diffs.begin(), diffs.end());
}
TransformationDescription::TransformationStatistics TransformationDescription::getStatistics() const
{
TransformationDescription::TransformationStatistics s;
if (data_.empty()) return s;
// x/y data ranges:
double xmin, xmax, ymin, ymax;
xmin = xmax = data_[0].first;
ymin = ymax = data_[0].second;
for (DataPoints::const_iterator it = ++data_.begin(); it != data_.end();
++it)
{
if (xmin > it->first) xmin = it->first;
if (xmax < it->first) xmax = it->first;
if (ymin > it->second) ymin = it->second;
if (ymax < it->second) ymax = it->second;
}
s.xmin = xmin;
s.xmax = xmax;
s.ymin = ymin;
s.ymax = ymax;
// deviations:
vector<double> diffs;
getDeviations(diffs);
bool no_model = (model_type_ == "none") || (model_type_ == "identity");
for (const Size p : s.percents)
{
Size index = p / 100.0 * diffs.size() - 1;
s.percentiles_before[p] = diffs[index];
}
// if we have a model, calculate deviations after applying the model
// else set the same values
if (!no_model) { getDeviations(diffs, true); }
for (const Size p : s.percents)
{
Size index = p / 100.0 * diffs.size() - 1;
s.percentiles_after[p] = diffs[index];
}
return s;
}
void TransformationDescription::printSummary(ostream& os) const
{
const TransformationStatistics s = getStatistics();
os << "Number of data points (x/y pairs): " << data_.size() << "\n";
if (data_.empty()) return;
os << "Data range (x): " << s.xmin << " to " << s.xmax
<< "\nData range (y): " << s.ymin << " to " << s.ymax << "\n";
// deviations:
vector<double> diffs;
getDeviations(diffs);
bool no_model = (model_type_ == "none") || (model_type_ == "identity");
os << String("Summary of x/y deviations") +
(no_model ? "" : " before transformation") + ":\n";
for (Size p : s.percents)
{
os << "- " << setw(3) << p << "% of data points within (+/-)"
<< s.percentiles_before.at(p) << "\n";
}
if (no_model)
{
os << endl;
return;
}
// else:
getDeviations(diffs, true);
os << "Summary of x/y deviations after applying '" << model_type_
<< "' transformation:\n";
for (Size p : s.percents)
{
os << "- " << setw(3) << p << "% of data points within (+/-)"
<< s.percentiles_after.at(p) << "\n";
}
os << endl;
}
double TransformationDescription::estimateWindow(double quantile,
bool invert,
bool full_window,
double padding_factor) const
{
// Work on a copy so we don't mutate the original
TransformationDescription tmp(*this);
if (invert)
{
// Map iRT→RT and refit the inverse model so residuals are measured in RT space
tmp.invert();
}
// Compute absolute residuals | y - f(x) | in the (possibly inverted) space
std::vector<double> diffs;
tmp.getDeviations(diffs, /*do_apply=*/true, /*do_sort=*/false);
// Drop non-finite values defensively
diffs.erase(std::remove_if(diffs.begin(), diffs.end(),
[](double v){ return !std::isfinite(v); }),
diffs.end());
if (diffs.empty())
{
OPENMS_LOG_DEBUG << "[estimateWindow] no residuals; returning 0" << std::endl;
return 0.0;
}
// Compute adaptive quantile (Tukey k=1.5, r_sparse=1%, r_dense=10%)
// k=1.5 uses the standard Tukey upper fence (Q3 + 1.5·IQR) to cap sparse extremes proposed in Exploratory Data Analysis by John W. Tukey (1977)
// r_sparse=0.01 means if ≤1% of |residuals| exceed the fence, treat them as outliers (favor robust quantile);
// r_dense=0.10 means if ≥10% exceed the fence, tails are genuinely broad (favor raw quantile).
// These values are conservative, widely used in stats.
OpenMS::Math::AdaptiveQuantileResult adaptive_quantile_res = OpenMS::Math::adaptiveQuantile(
diffs.begin(), diffs.end(), quantile,
/*k=*/1.5, /*r_sparse=*/0.01, /*r_dense=*/0.10);
const double full = (full_window ? (2.0 * adaptive_quantile_res.blended) : adaptive_quantile_res.blended) * padding_factor;
OPENMS_LOG_DEBUG
<< "[estimateWindow] n=" << diffs.size()
<< " q=" << quantile
<< " half_raw=" << adaptive_quantile_res.half_raw
<< " half_rob=" << adaptive_quantile_res.half_rob
<< " UF=" << adaptive_quantile_res.upper_fence
<< " tail_frac=" << adaptive_quantile_res.tail_fraction
<< " => half_adapt=" << adaptive_quantile_res.blended
<< " full=" << full
<< " invert=" << (invert ? "true" : "false")
<< std::endl;
return full;
}
} // end of namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/MAPMATCHING/TransformationModel.cpp | .cpp | 7,378 | 250 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: $
// $Authors: Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MAPMATCHING/TransformationModel.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/KERNEL/StandardTypes.h>
#include <iostream> // std::cout
#include <algorithm> // std::find
#include <cmath> // std::log
namespace OpenMS
{
TransformationModel::TransformationModel(const TransformationModel::DataPoints&, const Param& p) :
params_(p),
x_weight_("x"),
x_datum_min_(0),
x_datum_max_(0),
y_weight_("y"),
y_datum_min_(0),
y_datum_max_(0),
weighting_(false)
{
// get x datum ranges
x_datum_min_ = params_.exists("x_datum_min") ? (double)params_.getValue("x_datum_min") : 1e-15;
x_datum_max_ = params_.exists("x_datum_max") ? (double)params_.getValue("x_datum_max") : 1e15;
// get y datum ranges
y_datum_min_ = params_.exists("y_datum_min") ? (double)params_.getValue("y_datum_min") : 1e-15;
y_datum_max_ = params_.exists("y_datum_max") ? (double)params_.getValue("y_datum_max") : 1e15;
// TrafoXML's prior to OpenMS 3.0 have x/y_weight = "" if unweighted
x_weight_ = params_.exists("x_weight") && (params_.getValue("x_weight") != "") ? String(params_.getValue("x_weight").toString()) : "x";
y_weight_ = params_.exists("y_weight") && (params_.getValue("y_weight") != "") ? String(params_.getValue("y_weight").toString()) : "y";
std::vector<String> valid_x_weights = getValidXWeights();
std::vector<String> valid_y_weights = getValidYWeights();
if (x_weight_ != "x" && !checkValidWeight(x_weight_, valid_x_weights))
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Value '" + x_weight_ + "' is not a valid weight parameter for x values.");
}
if (y_weight_ != "y" && !checkValidWeight(y_weight_, valid_y_weights))
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Value '" + y_weight_ + "' is not a valid weight parameter for y values.");
}
// easily remember whether we do weighting or not
weighting_ = !(x_weight_ == "x" && y_weight_ == "y");
}
TransformationModel::~TransformationModel() = default;
double TransformationModel::evaluate(double value) const
{
return value;
}
const Param& TransformationModel::getParameters() const
{
return params_;
}
void TransformationModel::getDefaultParameters(Param& params)
{
params.clear();
}
void TransformationModel::weightData(TransformationModel::DataPoints& data)
{
if (!weighting_ ) return;
// weight x values
if (x_weight_ != "x" && !data.empty())
{
for (size_t i = 0; i < data.size(); ++i)
{
// check x datum ranges
data[i].first = checkDatumRange(data[i].first,x_datum_min_,x_datum_max_);
// weight x datum
data[i].first = weightDatum(data[i].first, x_weight_);
}
}
// weight y values
if (y_weight_ != "y" && !data.empty())
{
for (size_t i = 0; i < data.size(); ++i)
{
// check y datum ranges
data[i].second = checkDatumRange(data[i].second,y_datum_min_, y_datum_max_);
// weight y datum
data[i].second = weightDatum(data[i].second, y_weight_);
}
}
}
void TransformationModel::unWeightData(TransformationModel::DataPoints& data)
{
if (!weighting_ ) return;
// unweight x values
if (x_weight_ != "x" && !data.empty())
{
for (size_t i = 0; i < data.size(); ++i)
{
data[i].first = unWeightDatum(data[i].first, x_weight_);
}
}
// unweight y values
if (y_weight_ != "y" && !data.empty())
{
for (size_t i = 0; i < data.size(); ++i)
{
data[i].second = unWeightDatum(data[i].second, y_weight_);
}
}
}
bool TransformationModel::checkValidWeight(const String& weight, const std::vector<String>& valid_weights) const
{
bool valid = false;
if (std::find(valid_weights.begin(), valid_weights.end(), weight) != valid_weights.end())
{
valid=true;
}
else
{
OPENMS_LOG_INFO << "weight " + weight + " is not supported.";
}
return valid;
}
double TransformationModel::checkDatumRange(const double& datum, const double& datum_min, const double& datum_max)
{
double datum_checked = datum;
if (datum >= datum_max)
{
OPENMS_LOG_INFO << "datum " << datum << " is out of range.";
OPENMS_LOG_INFO << "datum will be truncated to " << datum_max << ".";
datum_checked = datum_max;
}
else if (datum <= datum_min)
{
OPENMS_LOG_INFO << "datum " << datum << " is out of range.";
OPENMS_LOG_INFO << "datum will be truncated to " << datum_min << ".";
datum_checked = datum_min;
}
return datum_checked;
}
std::vector<String> TransformationModel::getValidXWeights() const
{
std::vector<String> valid_weights{"1/x","1/x2","ln(x)","x"}; // == 1 disables weights
return valid_weights;
}
std::vector<String> TransformationModel::getValidYWeights() const
{
std::vector<String> valid_weights{"1/y","1/y2","ln(y)","y"}; // == 1 disables weights
return valid_weights;
}
double TransformationModel::weightDatum(const double& datum, const String& weight) const
{
double datum_weighted = 0;
if (weight == "ln(x)")
{
datum_weighted = std::log(datum);
}
else if (weight == "ln(y)")
{
datum_weighted = std::log(datum);
}
else if (weight == "1/x")
{
datum_weighted = 1/std::abs(datum);
}
else if (weight == "1/y")
{
datum_weighted = 1/std::abs(datum);
}
else if (weight == "1/x2")
{
datum_weighted = 1/std::pow(datum,2);
}
else if (weight == "1/y2")
{
datum_weighted = 1/std::pow(datum,2);
}
else if (weight == "x" || weight == "y" )
{
datum_weighted = datum;
}
else
{
datum_weighted = datum;
OPENMS_LOG_INFO << "weight " + weight + " not supported.";
OPENMS_LOG_INFO << "no weighting will be applied.";
}
return datum_weighted;
}
double TransformationModel::unWeightDatum(const double& datum, const String& weight) const
{
double datum_weighted = 0;
if (weight == "ln(x)")
{
datum_weighted = std::exp(datum);
}
else if (weight == "ln(y)")
{
datum_weighted = std::exp(datum);
}
else if (weight == "1/x")
{
datum_weighted = 1/std::abs(datum);
}
else if (weight == "1/y")
{
datum_weighted = 1/std::abs(datum);
}
else if (weight == "1/x2")
{
datum_weighted = std::sqrt(1/std::abs(datum));
}
else if (weight == "1/y2")
{
datum_weighted = std::sqrt(1/std::abs(datum));
}
else if (weight == "x" || weight == "y")
{
datum_weighted = datum;
}
else
{
datum_weighted = datum;
OPENMS_LOG_INFO << "weight " + weight + " not supported.";
OPENMS_LOG_INFO << "no weighting will be applied.";
}
return datum_weighted;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/MAPMATCHING/StablePairFinder.cpp | .cpp | 10,683 | 282 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MAPMATCHING/StablePairFinder.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/FeatureDistance.h>
#include <OpenMS/KERNEL/FeatureMap.h>
#include <OpenMS/METADATA/PeptideIdentification.h>
#ifdef Debug_StablePairFinder
#define V_(bla) std::cout << __FILE__ ":" << __LINE__ << ": " << bla << std::endl;
#else
#define V_(bla) {};
#endif
// #define VV_(bla) V_("" # bla ": " << bla);
using namespace std;
namespace OpenMS
{
StablePairFinder::StablePairFinder() :
Base()
{
//set the name for DefaultParamHandler error messages
Base::setName("StablePairFinder");
defaults_.setValue("second_nearest_gap", 2.0, "Only link features whose distance to the second nearest neighbors (for both sides) is larger by 'second_nearest_gap' than the distance between the matched pair itself.");
defaults_.setMinFloat("second_nearest_gap", 1.0);
defaults_.setValue("use_identifications", "false", "Never link features that are annotated with different peptides (features without ID's always match; only the best hit per peptide identification is considered).");
defaults_.setValidStrings("use_identifications", {"true","false"});
defaults_.insert("", FeatureDistance().getDefaults());
Base::defaultsToParam_();
}
void StablePairFinder::updateMembers_()
{
V_("@@@ StablePairFinder::updateMembers_()");
second_nearest_gap_ = param_.getValue("second_nearest_gap");
use_IDs_ = param_.getValue("use_identifications").toBool();
}
void StablePairFinder::run(const std::vector<ConsensusMap>& input_maps,
ConsensusMap& result_map)
{
// empty output destination:
result_map.clear(false);
// sanity checks:
if (input_maps.size() != 2)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"exactly two input maps required");
}
checkIds_(input_maps);
// set up the distance functor:
double max_intensity = std::numeric_limits<double>::lowest();;
if (!input_maps[0].RangeIntensity::isEmpty())
{
max_intensity = input_maps[0].getMaxIntensity();
}
if (!input_maps[1].RangeIntensity::isEmpty())
{
max_intensity = max(max_intensity, input_maps[1].getMaxIntensity());
}
Param distance_params = param_.copy("");
distance_params.remove("use_identifications");
distance_params.remove("second_nearest_gap");
FeatureDistance feature_distance(max_intensity, false);
feature_distance.setParameters(distance_params);
// keep track of pairing:
std::vector<bool> is_singleton[2];
is_singleton[0].resize(input_maps[0].size(), true);
is_singleton[1].resize(input_maps[1].size(), true);
typedef pair<double, double> DoublePair;
DoublePair init = make_pair(FeatureDistance::infinity,
FeatureDistance::infinity);
// for every element in map 0:
// - index of nearest neighbor in map 1:
vector<UInt> nn_index_0(input_maps[0].size(), UInt(-1));
// - distances to nearest and second-nearest neighbors in map 1:
vector<DoublePair> nn_distance_0(input_maps[0].size(), init);
// for every element in map 1:
// - index of nearest neighbor in map 0:
vector<UInt> nn_index_1(input_maps[1].size(), UInt(-1));
// - distances to nearest and second-nearest neighbors in map 0:
vector<DoublePair> nn_distance_1(input_maps[1].size(), init);
// iterate over all feature pairs, find nearest neighbors:
// TODO: iterate over SENSIBLE RT (and m/z) window -- sort the maps beforehand
// to save a lot of processing time...
// Once done, remove the warning in the description of the 'use_identifications' parameter
for (UInt fi0 = 0; fi0 < input_maps[0].size(); ++fi0)
{
const ConsensusFeature& feat0 = input_maps[0][fi0];
for (UInt fi1 = 0; fi1 < input_maps[1].size(); ++fi1)
{
const ConsensusFeature& feat1 = input_maps[1][fi1];
if (use_IDs_ && !compatibleIDs_(feat0, feat1)) // check peptide IDs
{
continue; // mismatch
}
pair<bool, double> result = feature_distance(feat0, feat1);
double distance = result.second;
// we only care if distance constraints are satisfied for "best
// matches", not for second-best; this means that second-best distances
// can become smaller than best distances
// (e.g. the RT is larger than allowed (->invalid pair), but m/z is perfect and has the most weight --> better score!)
bool valid = result.first;
// update entries for map 0:
if (distance < nn_distance_0[fi0].second)
{
if (valid && (distance < nn_distance_0[fi0].first))
{
nn_distance_0[fi0].second = nn_distance_0[fi0].first;
nn_distance_0[fi0].first = distance;
nn_index_0[fi0] = fi1;
}
else
{
nn_distance_0[fi0].second = distance;
}
}
// update entries for map 1:
if (distance < nn_distance_1[fi1].second)
{
if (valid && (distance < nn_distance_1[fi1].first))
{
nn_distance_1[fi1].second = nn_distance_1[fi1].first;
nn_distance_1[fi1].first = distance;
nn_index_1[fi1] = fi0;
}
else
{
nn_distance_1[fi1].second = distance;
}
}
}
}
// if features from the two maps are nearest neighbors of each other, they
// can become a pair:
for (UInt fi0 = 0; fi0 < input_maps[0].size(); ++fi0)
{
UInt fi1 = nn_index_0[fi0]; // nearest neighbor of "fi0" in map 1
// cout << "index: " << fi0 << ", RT: " << input_maps[0][fi0].getRT()
// << ", MZ: " << input_maps[0][fi0].getMZ() << endl
// << "neighbor: " << fi1 << ", RT: " << input_maps[1][fi1].getRT()
// << ", MZ: " << input_maps[1][fi1].getMZ() << endl
// << "d(i,j): " << nn_distance_0[fi0].first << endl
// << "d2(i): " << nn_distance_0[fi0].second << endl
// << "d2(j): " << nn_distance_1[fi1].second << endl;
// criteria set by the parameters must be fulfilled:
if ((nn_distance_0[fi0].first < FeatureDistance::infinity) &&
(nn_distance_0[fi0].first * second_nearest_gap_ <= nn_distance_0[fi0].second))
{
// "fi0" satisfies constraints...
if ((nn_index_1[fi1] == fi0) &&
(nn_distance_1[fi1].first * second_nearest_gap_ <= nn_distance_1[fi1].second))
{
// ...nearest neighbor of "fi0" also satisfies constraints (yay!)
// cout << "match!" << endl;
result_map.push_back(ConsensusFeature());
ConsensusFeature& f = result_map.back();
f.insert(input_maps[0][fi0]);
f.insert(input_maps[1][fi1]);
f.computeConsensus();
double quality = 1.0 - nn_distance_0[fi0].first;
double quality0 = 1.0 - nn_distance_0[fi0].first * second_nearest_gap_ / nn_distance_0[fi0].second;
double quality1 = 1.0 - nn_distance_1[fi1].first * second_nearest_gap_ / nn_distance_1[fi1].second;
quality = quality * quality0 * quality1; // TODO other formula?
// incorporate existing quality values:
Size size0 = max(input_maps[0][fi0].size(), size_t(1));
Size size1 = max(input_maps[1][fi1].size(), size_t(1));
// quality contribution from first map:
quality0 = input_maps[0][fi0].getQuality() * (size0 - 1);
// quality contribution from second map:
quality1 = input_maps[1][fi1].getQuality() * (size1 - 1);
f.setQuality((quality + quality0 + quality1) / (size0 + size1 - 1));
is_singleton[0][fi0] = false;
is_singleton[1][fi1] = false;
}
}
}
// write out unmatched consensus features
for (UInt input = 0; input <= 1; ++input)
{
for (UInt index = 0; index < input_maps[input].size(); ++index)
{
if (is_singleton[input][index])
{
result_map.push_back(input_maps[input][index]);
if (result_map.back().size() < 2) // singleton consensus feature
{
result_map.back().setQuality(0.0);
}
}
}
}
// canonical ordering for checking the results, and the ids have no real meaning anyway
result_map.sortByMZ();
// protein IDs and unassigned peptide IDs are added to the result by the
// FeatureGroupingAlgorithm!
}
bool StablePairFinder::compatibleIDs_(const ConsensusFeature& feat1, const ConsensusFeature& feat2) const
{
// a feature without identifications always matches:
if (feat1.getPeptideIdentifications().empty() || feat2.getPeptideIdentifications().empty())
return true;
const PeptideIdentificationList& pep1 = feat1.getPeptideIdentifications();
const PeptideIdentificationList& pep2 = feat2.getPeptideIdentifications();
set<String> best1, best2;
for (PeptideIdentificationList::const_iterator pep_it = pep1.begin(); pep_it != pep1.end(); ++pep_it)
{
if (pep_it->getHits().empty())
continue; // shouldn't be the case
best1.insert(getBestHitSequence_(*pep_it).toString());
}
for (PeptideIdentificationList::const_iterator pep_it = pep2.begin(); pep_it != pep2.end(); ++pep_it)
{
if (pep_it->getHits().empty())
continue; // shouldn't be the case
best2.insert(getBestHitSequence_(*pep_it).toString());
}
return best1 == best2;
}
const AASequence& StablePairFinder::getBestHitSequence_(const PeptideIdentification& peptideIdentification) const
{
if (peptideIdentification.isHigherScoreBetter())
{
return std::min_element(peptideIdentification.getHits().begin(),
peptideIdentification.getHits().end(),
PeptideHit::ScoreMore()
)->getSequence();
}
else
{
return std::min_element(peptideIdentification.getHits().begin(),
peptideIdentification.getHits().end(),
PeptideHit::ScoreLess()
)->getSequence();
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/MAPMATCHING/FeatureMapping.cpp | .cpp | 3,116 | 76 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Oliver Alka $
// $Authors: Oliver Alka $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MAPMATCHING/FeatureMapping.h>
#include <OpenMS/MATH/MathFunctions.h>
using namespace std;
namespace OpenMS
{
// return map of ms2 to feature and a vector of unassigned ms2
FeatureMapping::FeatureToMs2Indices FeatureMapping::assignMS2IndexToFeature(const OpenMS::MSExperiment& spectra,
const FeatureMappingInfo& fm_info,
const double& precursor_mz_tolerance,
const double& precursor_rt_tolerance,
bool ppm)
{
std::map<const BaseFeature*, std::vector<size_t>> assigned_ms2;
vector<size_t> unassigned_ms2;
// map precursors to closest feature and retrieve annotated metadata (if possible)
for (size_t index = 0; index != spectra.size(); ++index)
{
if (spectra[index].getMSLevel() != 2) { continue; }
// get precursor meta data (m/z, rt)
const vector<Precursor> & pcs = spectra[index].getPrecursors();
if (!pcs.empty())
{
const double mz = pcs[0].getMZ();
const double rt = spectra[index].getRT();
// query features in tolerance window
vector<Size> matches;
// get mz tolerance window
std::pair<double,double> mz_tolerance_window = Math::getTolWindow(mz, precursor_mz_tolerance, ppm);
fm_info.kd_tree.queryRegion(rt - precursor_rt_tolerance, rt + precursor_rt_tolerance, mz_tolerance_window.first, mz_tolerance_window.second, matches, true);
// no precursor matches the feature information found
if (matches.empty())
{
unassigned_ms2.push_back(index);
continue;
}
// in the case of multiple features in tolerance window, select the one closest in m/z to the precursor
Size min_distance_feature_index(0);
double min_distance(1e11);
for (auto const & k_idx : matches)
{
const double f_mz = fm_info.kd_tree.mz(k_idx);
const double distance = fabs(f_mz - mz);
if (distance < min_distance)
{
min_distance = distance;
min_distance_feature_index = k_idx;
}
}
const BaseFeature* min_distance_feature = fm_info.kd_tree.feature(min_distance_feature_index);
assigned_ms2[min_distance_feature].push_back(index);
}
}
FeatureMapping::FeatureToMs2Indices feature_mapping;
feature_mapping.assignedMS2 = assigned_ms2;
feature_mapping.unassignedMS2 = unassigned_ms2;
return feature_mapping;
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/MAPMATCHING/MapAlignmentEvaluationAlgorithmPrecision.cpp | .cpp | 3,601 | 99 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Katharina Albers $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MAPMATCHING/MapAlignmentEvaluationAlgorithmPrecision.h>
namespace OpenMS
{
MapAlignmentEvaluationAlgorithmPrecision::MapAlignmentEvaluationAlgorithmPrecision() :
MapAlignmentEvaluationAlgorithm()
{
}
MapAlignmentEvaluationAlgorithmPrecision::~MapAlignmentEvaluationAlgorithmPrecision() = default;
void MapAlignmentEvaluationAlgorithmPrecision::evaluate(const ConsensusMap & consensus_map_in, const ConsensusMap & consensus_map_gt, const double & rt_dev, const double & mz_dev, const Peak2D::IntensityType & int_dev, const bool use_charge, double & out)
{
//Precision = 1/N * sum ( gt_subtend_tilde_tool_i / tilde_tool_i )
ConsensusMap cons_map_gt; /* = consensus_map_gt; */
for (Size i = 0; i < consensus_map_gt.size(); ++i)
{
if (consensus_map_gt[i].size() >= 2)
{
cons_map_gt.push_back(consensus_map_gt[i]);
}
}
ConsensusMap cons_map_tool = consensus_map_in;
std::vector<Size> gt_subtend_tilde_tool; //holds the numerators of the sum
std::vector<Size> tilde_tool; //holds the denominators of the sum
//loop over all consensus features of the ground truth
for (Size i = 0; i < cons_map_gt.size(); ++i) //N = cons_map_gt.size()
{
ConsensusFeature & gt_elem = cons_map_gt[i];
//for every i = 1, ..., N:
Size gt_subtend_tilde_tool_i = 0; //filling material for the vectors
Size tilde_tool_i = 0;
//loop over all consensus features of the tool's consensus map
for (Size j = 0; j < cons_map_tool.size(); ++j)
{
ConsensusFeature & tool_elem = cons_map_tool[j];
Size cons_tool_size = cons_map_tool[j].size(); //size of the actual consensus feature of the tool
Size gt_i_subtend_tool_j = 0; //size of the intersection of the actual cons. feat. of the tool with the c.f. of GT
//loop over all features in the ith consensus feature of the gt
for (HandleIterator gt_it = gt_elem.begin(); gt_it != gt_elem.end(); ++gt_it)
{
//loop over all features in the jth consensus feature of the tool's map
for (HandleIterator tool_it = tool_elem.begin(); tool_it != tool_elem.end(); ++tool_it)
{
//++cons_tool_size;
if (isSameHandle(*tool_it, *gt_it, rt_dev, mz_dev, int_dev, use_charge))
{
++gt_i_subtend_tool_j;
break;
}
}
}
if ((cons_tool_size >= 2) && (gt_i_subtend_tool_j > 0))
{
gt_subtend_tilde_tool_i += gt_i_subtend_tool_j;
tilde_tool_i += cons_tool_size;
}
}
gt_subtend_tilde_tool.push_back(gt_subtend_tilde_tool_i);
tilde_tool.push_back(tilde_tool_i);
}
double sum = 0; // intermediate step: the sum
for (Size k = 0; k < gt_subtend_tilde_tool.size(); ++k)
{
double fraction = 0; //intermediate step: the fraction
if (gt_subtend_tilde_tool[k] != 0)
{
fraction = double(gt_subtend_tilde_tool[k]) / double(tilde_tool[k]);
}
sum += fraction;
}
out = (1.0 / double(cons_map_gt.size())) * sum;
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/MAPMATCHING/MapAlignmentEvaluationAlgorithmRecall.cpp | .cpp | 3,693 | 105 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Katharina Albers $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MAPMATCHING/MapAlignmentEvaluationAlgorithmRecall.h>
namespace OpenMS
{
MapAlignmentEvaluationAlgorithmRecall::MapAlignmentEvaluationAlgorithmRecall() :
MapAlignmentEvaluationAlgorithm()
{
}
MapAlignmentEvaluationAlgorithmRecall::~MapAlignmentEvaluationAlgorithmRecall() = default;
void MapAlignmentEvaluationAlgorithmRecall::evaluate(const ConsensusMap & consensus_map_in, const ConsensusMap & consensus_map_gt, const double & rt_dev, const double & mz_dev, const Peak2D::IntensityType & int_dev, const bool use_charge, double & out)
{
//Recall = 1/N * sum( gt_subtend_tilde_tool_i / ( m_i * gt_i ) )
ConsensusMap cons_map_gt; /* = consensus_map_gt; */
for (Size i = 0; i < consensus_map_gt.size(); ++i)
{
if (consensus_map_gt[i].size() >= 2)
{
cons_map_gt.push_back(consensus_map_gt[i]);
}
}
ConsensusMap cons_map_tool = consensus_map_in;
std::vector<Size> gt_subtend_tilde_tool; //holds the numerators of the sum
std::vector<Size> m; //holds the denominators of the sum
std::vector<Size> gt; //holds the denominators of the sum
//loop over all consensus features of the ground truth
for (Size i = 0; i < cons_map_gt.size(); ++i) //N = cons_map_gt.size()
{
ConsensusFeature & gt_elem = cons_map_gt[i];
//for every i = 1, ..., N:
Size gt_subtend_tilde_tool_i = 0; // holds the numerators of the sum
Size m_i = 0;
Size gt_i = 0; // size of the actual consensus feature of the GT
//loop over all consensus features of the tool's consensus map
for (Size j = 0; j < cons_map_tool.size(); ++j)
{
ConsensusFeature & tool_elem = cons_map_tool[j];
Size gt_i_subtend_tool_j = 0; //size of the intersection of the actual cons. feat. of the tool with the c.f. of GT
Size cons_tool_size = cons_map_tool[j].size(); //size of the actual consensus feature of the tool_subtend_tilde_tool
//loop over all features in the ith consensus feature of the gt
for (HandleIterator gt_it = gt_elem.begin(); gt_it != gt_elem.end(); ++gt_it)
{
++gt_i;
//loop over all features in the jth consensus feature of the tool's map
for (HandleIterator tool_it = tool_elem.begin(); tool_it != tool_elem.end(); ++tool_it)
{
if (isSameHandle(*tool_it, *gt_it, rt_dev, mz_dev, int_dev, use_charge))
{
++gt_i_subtend_tool_j;
break;
}
}
}
if ((cons_tool_size >= 2) && (gt_i_subtend_tool_j > 0))
{
gt_subtend_tilde_tool_i += gt_i_subtend_tool_j;
++m_i;
}
}
gt_subtend_tilde_tool.push_back(gt_subtend_tilde_tool_i);
m.push_back(m_i);
gt.push_back(gt_i / cons_map_tool.size());
}
double sum = 0; // intermediate step: the sum
for (Size k = 0; k < gt_subtend_tilde_tool.size(); ++k)
{
double fraction = 0;
if (gt_subtend_tilde_tool[k] != 0)
{
fraction = double(gt_subtend_tilde_tool[k]) / (m[k] * gt[k]);
}
sum += fraction;
}
double recall = (1.0 / double(cons_map_gt.size())) * sum;
out = recall;
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/MAPMATCHING/MapAlignmentAlgorithmKD.cpp | .cpp | 6,944 | 256 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Johannes Veit $
// $Authors: Johannes Veit $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MAPMATCHING/MapAlignmentAlgorithmKD.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <queue>
using namespace std;
namespace OpenMS
{
MapAlignmentAlgorithmKD::MapAlignmentAlgorithmKD(Size num_maps, const Param& param) :
fit_data_(num_maps),
transformations_(num_maps),
param_(param),
max_pairwise_log_fc_(-1)
{
updateMembers_();
}
MapAlignmentAlgorithmKD::~MapAlignmentAlgorithmKD()
{
for (vector<TransformationModelLowess*>::iterator it = transformations_.begin();
it != transformations_.end(); ++it)
{
delete *it;
}
}
void MapAlignmentAlgorithmKD::addRTFitData(const KDTreeFeatureMaps& kd_data)
{
// compute connected components
map<Size, vector<Size> > ccs;
getCCs_(kd_data, ccs);
// keep only conflict-free CCs of sufficient size
map<Size, vector<Size> > filtered_ccs;
filterCCs_(kd_data, ccs, filtered_ccs);
// save some memory
ccs.clear();
// compute average RTs for all CCs
map<Size, double> avg_rts;
for (map<Size, vector<Size> >::const_iterator it = filtered_ccs.begin(); it != filtered_ccs.end(); ++it)
{
double avg_rt = 0;
Size cc_index = it->first;
const vector<Size>& cc = it->second;
for (vector<Size>::const_iterator cc_it = cc.begin(); cc_it != cc.end(); ++cc_it)
{
Size i = *cc_it;
avg_rt += kd_data.rt(i);
}
avg_rt /= cc.size();
avg_rts[cc_index] = avg_rt;
}
// generate fit data for each map, add to fit_data_
for (map<Size, vector<Size> >::const_iterator it = filtered_ccs.begin(); it != filtered_ccs.end(); ++it)
{
Size cc_index = it->first;
const vector<Size>& cc = it->second;
for (vector<Size>::const_iterator cc_it = cc.begin(); cc_it != cc.end(); ++cc_it)
{
Size i = *cc_it;
double rt = kd_data.rt(i);
double avg_rt = avg_rts[cc_index];
fit_data_[kd_data.mapIndex(i)].push_back(make_pair(rt, avg_rt));
}
}
}
void MapAlignmentAlgorithmKD::fitLOWESS()
{
Size num_maps = fit_data_.size();
for (Size i = 0; i < num_maps; ++i)
{
Size n = fit_data_[i].size();
const Param& lowess_param = param_.copy("LOWESS:", true);
if (n < 50)
{
OPENMS_LOG_WARN << "Warning: Only " << n << " data points for LOWESS fit of map " << i << ". Consider adjusting RT or m/z tolerance or max_pairwise_log_fc, decreasing min_rel_cc_size, or increasing max_nr_conflicts." << endl;
TransformationModel::DataPoints identity = {{0,0}, {1,1}, {1e6,1e6}};
transformations_[i] = new TransformationModelLowess(identity, lowess_param);
}
else
{
transformations_[i] = new TransformationModelLowess(fit_data_[i], lowess_param);
}
}
}
void MapAlignmentAlgorithmKD::transform(KDTreeFeatureMaps& kd_data) const
{
// apply transformations to kd_data
kd_data.applyTransformations(transformations_);
// re-optimize kd-tree
kd_data.optimizeTree();
}
Size MapAlignmentAlgorithmKD::computeCCs_(const KDTreeFeatureMaps& kd_data, vector<Size>& result) const
{
//compute CCs by means of repeated BFS (without actually storing the graph (edges) in memory)
Size num_nodes = kd_data.size();
//clear CC indices
result.clear();
result.resize(num_nodes, numeric_limits<Size>::max());
//set up data structures
queue<Size> bfs_queue;
vector<Int> bfs_visited(num_nodes, false);
Size search_pos = 0;
Size cc_index = 0;
//BFS until every node has been visited
while (true)
{
bool finished = true;
for (Size i = search_pos; i < num_nodes; ++i)
{
if (!bfs_visited[i])
{
bfs_queue.push(i);
bfs_visited[i] = true;
finished = false;
search_pos = i + 1;
break;
}
}
if (finished) break;
while (!bfs_queue.empty())
{
Size i = bfs_queue.front();
bfs_queue.pop();
result[i] = cc_index;
vector<Size> compatible_features;
kd_data.getNeighborhood(i, compatible_features, rt_tol_secs_, mz_tol_, mz_ppm_, false, max_pairwise_log_fc_);
for (vector<Size>::const_iterator it = compatible_features.begin();
it != compatible_features.end();
++it)
{
Size j = *it;
if (!bfs_visited[j])
{
bfs_queue.push(j);
bfs_visited[j] = true;
}
}
}
++cc_index;
}
return cc_index;
}
void MapAlignmentAlgorithmKD::getCCs_(const KDTreeFeatureMaps& kd_data, map<Size, vector<Size> >& result) const
{
vector<Size> cc_index;
computeCCs_(kd_data, cc_index);
result.clear();
for (Size i = 0; i < kd_data.size(); ++i)
{
result[cc_index[i]].push_back(i);
}
}
void MapAlignmentAlgorithmKD::filterCCs_(const KDTreeFeatureMaps& kd_data, const map<Size, vector<Size> >& ccs, map<Size, vector<Size> >& filtered_ccs) const
{
Size num_maps = fit_data_.size();
Size min_size = max(2.0, (double)(param_.getValue("warp:min_rel_cc_size")) * (double)num_maps);
int max_nr_conflicts = (int)param_.getValue("warp:max_nr_conflicts");
filtered_ccs.clear();
for (map<Size, vector<Size> >::const_iterator it = ccs.begin(); it != ccs.end(); ++it)
{
const vector<Size>& cc = it->second;
// size OK?
if (cc.size() < min_size)
{
// nope
continue;
}
// charges compatible?
set<int> charges;
for (vector<Size>::const_iterator idx_it = cc.begin(); idx_it != cc.end(); ++idx_it)
{
int z = kd_data.charge(*idx_it);
if (z != 0)
{
charges.insert(z);
}
if (charges.size() > 1)
{
// nope
continue;
}
}
// check for conflicts
bool passes = true;
if (max_nr_conflicts != -1)
{
set<Size> map_indices;
int nr_conflicts = 0;
for (vector<Size>::const_iterator idx_it = cc.begin(); idx_it != cc.end(); ++idx_it)
{
// filter out if too many features from same map
Size map_idx = kd_data.mapIndex(*idx_it);
if (map_indices.find(map_idx) != map_indices.end())
{
if (++nr_conflicts > max_nr_conflicts)
{
passes = false;
break;
}
}
else
{
map_indices.insert(map_idx);
}
}
}
if (passes)
{
filtered_ccs[it->first] = cc;
}
}
}
void MapAlignmentAlgorithmKD::updateMembers_()
{
if (param_.empty()) return;
rt_tol_secs_ = (double)(param_.getValue("warp:rt_tol"));
mz_tol_ = (double)(param_.getValue("warp:mz_tol"));
mz_ppm_ = (param_.getValue("mz_unit").toString() == "ppm");
max_pairwise_log_fc_ = param_.getValue("warp:max_pairwise_log_fc");
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/MAPMATCHING/LabeledPairFinder.cpp | .cpp | 14,048 | 309 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: $
// $Authors: Marc Sturm $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MAPMATCHING/LabeledPairFinder.h>
#include <OpenMS/DATASTRUCTURES/ConstRefVector.h>
#include <OpenMS/MATH/STATISTICS/Histogram.h>
#include <OpenMS/MATH/STATISTICS/GaussFitter.h>
#include <OpenMS/METADATA/ProteinIdentification.h>
#include <OpenMS/METADATA/PeptideIdentification.h>
using namespace std;
namespace OpenMS
{
using namespace Math;
LabeledPairFinder::LabeledPairFinder() :
BaseGroupFinder()
{
setName("LabeledPairFinder");
defaults_.setValue("rt_estimate", "true", "If 'true' the optimal RT pair distance and deviation are estimated by "
"fitting a gaussian distribution to the histogram of pair distance. "
"Note that this works only datasets with a significant amount of pairs! "
"If 'false' the parameters 'rt_pair_dist', 'rt_dev_low' "
"and 'rt_dev_high' define the optimal distance.");
defaults_.setValidStrings("rt_estimate", {"true","false"});
defaults_.setValue("rt_pair_dist", -20.0, "optimal pair distance in RT [sec] from light to heavy feature");
defaults_.setValue("rt_dev_low", 15.0, "maximum allowed deviation below optimal retention time distance");
defaults_.setMinFloat("rt_dev_low", 0.0);
defaults_.setValue("rt_dev_high", 15.0, "maximum allowed deviation above optimal retention time distance");
defaults_.setMinFloat("rt_dev_high", 0.0);
defaults_.setValue("mz_pair_dists", ListUtils::create<double>("4.0"), "optimal pair distances in m/z [Th] for features with charge +1 (adapted to +2, +3, .. by division through charge)");
defaults_.setValue("mz_dev", 0.05, "maximum allowed deviation from optimal m/z distance\n");
defaults_.setMinFloat("mz_dev", 0.0);
defaults_.setValue("mrm", "false", "this option should be used if the features correspond mrm chromatograms (additionally the precursor is taken into account)", {"advanced"});
defaults_.setValidStrings("mrm", {"true","false"});
defaultsToParam_();
}
void LabeledPairFinder::run(const vector<ConsensusMap>& input_maps, ConsensusMap& result_map)
{
if (input_maps.size() != 1)
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "exactly one input map required");
if (result_map.getColumnHeaders().size() != 2)
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "two file descriptions required");
if (result_map.getColumnHeaders().begin()->second.filename != result_map.getColumnHeaders().rbegin()->second.filename)
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "the two file descriptions have to contain the same file name");
checkIds_(input_maps);
//look up the light and heavy index
Size light_index = numeric_limits<Size>::max();
Size heavy_index = numeric_limits<Size>::max();
for (ConsensusMap::ColumnHeaders::const_iterator it = result_map.getColumnHeaders().begin();
it != result_map.getColumnHeaders().end();
++it)
{
if (it->second.label == "heavy")
{
heavy_index = it->first;
}
else if (it->second.label == "light")
{
light_index = it->first;
}
}
if (light_index == numeric_limits<Size>::max() || heavy_index == numeric_limits<Size>::max())
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "the input maps have to be labeled 'light' and 'heavy'");
}
result_map.clear(false);
// sort consensus features by RT (and MZ) to speed up searching afterwards
typedef ConstRefVector<ConsensusMap> RefMap;
RefMap model_ref(input_maps[0].begin(), input_maps[0].end());
model_ref.sortByPosition();
//calculate matches
ConsensusMap matches;
//settings
double rt_pair_dist = param_.getValue("rt_pair_dist");
double rt_dev_low = param_.getValue("rt_dev_low");
double rt_dev_high = param_.getValue("rt_dev_high");
double mz_dev = param_.getValue("mz_dev");
DoubleList mz_pair_dists = param_.getValue("mz_pair_dists");
bool mrm = param_.getValue("mrm").toBool();
//estimate RT parameters
if (param_.getValue("rt_estimate") == "true")
{
//find all possible RT distances of features with the same charge and a good m/z distance
vector<double> dists;
dists.reserve(model_ref.size());
for (RefMap::const_iterator it = model_ref.begin(); it != model_ref.end(); ++it)
{
for (RefMap::const_iterator it2 = model_ref.begin(); it2 != model_ref.end(); ++it2)
{
for (DoubleList::const_iterator dist_it = mz_pair_dists.begin(); dist_it != mz_pair_dists.end(); ++dist_it)
{
double mz_pair_dist = *dist_it;
if (it2->getCharge() == it->getCharge()
&& it2->getMZ() >= it->getMZ() + mz_pair_dist / it->getCharge() - mz_dev
&& it2->getMZ() <= it->getMZ() + mz_pair_dist / it->getCharge() + mz_dev)
{
dists.push_back(it2->getRT() - it->getRT());
}
}
}
}
if (dists.empty())
{
cout << "Warning: Could not find pairs for RT distance estimation. The manual settings are used!" << endl;
}
else
{
if (dists.size() < 50)
{
cout << "Warning: Found only " << dists.size() << " pairs. The estimated shift and std deviation are probably not reliable!" << endl;
}
//--------------------------- estimate initial parameters of fit ---------------------------
GaussFitter::GaussFitResult result(-1, -1, -1);
//first estimate of the optimal shift: median of the distances
sort(dists.begin(), dists.end());
Size median_index = dists.size() / 2;
result.x0 = dists[median_index];
//create histogram of distances
//consider only the maximum of pairs, centered around the optimal shift
Size max_pairs = model_ref.size() / 2;
Size start_index = (Size) max((SignedSize)0, (SignedSize)(median_index - max_pairs / 2));
Size end_index = (Size) min((SignedSize)(dists.size() - 1), (SignedSize)(median_index + max_pairs / 2));
double start_value = dists[start_index];
double end_value = dists[end_index];
double bin_step = fabs(end_value - start_value) / 99.999; //ensure that we have 100 bins
Math::Histogram<> hist(start_value, end_value, bin_step);
//std::cout << "HIST from " << start_value << " to " << end_value << " (bin size " << bin_step << ")" << endl;
for (Size i = start_index; i <= end_index; ++i)
{
hist.inc(dists[i]);
}
//cout << hist << endl;
dists.clear();
//determine median of bins (uniform background distribution)
vector<Size> bins(hist.begin(), hist.end());
sort(bins.begin(), bins.end());
Size bin_median = bins[bins.size() / 2];
bins.clear();
//estimate scale A: maximum of the histogram
Size max_value = hist.maxValue();
result.A = max_value - bin_median;
//overwrite estimate of x0 with the position of the highest bin
for (Size i = 0; i < hist.size(); ++i)
{
if (hist[i] == max_value)
{
result.x0 = hist.centerOfBin(i);
break;
}
}
//estimate sigma: first time the count is less or equal the median count in the histogram
double pos = result.x0;
while (pos > start_value && hist.binValue(pos) > bin_median)
{
pos -= bin_step;
}
double sigma_low = result.x0 - pos;
pos = result.x0;
while (pos<end_value&& hist.binValue(pos)> bin_median)
{
pos += bin_step;
}
double sigma_high = pos - result.x0;
result.sigma = (sigma_high + sigma_low) / 6.0;
//cout << "estimated optimal RT distance (before fit): " << result.x0 << endl;
//cout << "estimated allowed deviation (before fit): " << result.sigma*3.0 << endl;
//--------------------------- do gauss fit ---------------------------
vector<DPosition<2> > points(hist.size());
for (Size i = 0; i < hist.size(); ++i)
{
points[i][0] = hist.centerOfBin(i);
points[i][1] = hist[i];
}
GaussFitter fitter;
fitter.setInitialParameters(result);
result = fitter.fit(points);
cout << "estimated optimal RT distance: " << result.x0 << endl;
cout << "estimated allowed deviation: " << fabs(result.sigma) * 3.0 << endl;
rt_pair_dist = result.x0;
rt_dev_low = fabs(result.sigma) * 3.0;
rt_dev_high = fabs(result.sigma) * 3.0;
}
}
// check each feature
for (RefMap::const_iterator it = model_ref.begin(); it != model_ref.end(); ++it)
{
for (DoubleList::const_iterator dist_it = mz_pair_dists.begin(); dist_it != mz_pair_dists.end(); ++dist_it)
{
double mz_pair_dist = *dist_it;
RefMap::const_iterator it2 = lower_bound(model_ref.begin(), model_ref.end(), it->getRT() + rt_pair_dist - rt_dev_low, ConsensusFeature::RTLess());
while (it2 != model_ref.end() && it2->getRT() <= it->getRT() + rt_pair_dist + rt_dev_high)
{
// if in mrm mode, we need to compare precursor mass difference and fragment mass difference, charge remains the same
double prec_mz_diff(0);
if (mrm)
{
prec_mz_diff = fabs((double)it2->getMetaValue("MZ") - (double)it->getMetaValue("MZ"));
if (it->getCharge() != 0)
{
prec_mz_diff = fabs(prec_mz_diff - mz_pair_dist / it->getCharge());
}
else
{
prec_mz_diff = fabs(prec_mz_diff - mz_pair_dist);
}
}
bool mrm_correct_dist(false);
double frag_mz_diff = fabs(it->getMZ() - it2->getMZ());
//cerr << it->getRT() << " charge1=" << it->getCharge() << ", charge2=" << it2->getCharge() << ", prec_diff=" << prec_mz_diff << ", frag_diff=" << frag_mz_diff << endl;
if (mrm &&
it2->getCharge() == it->getCharge() &&
prec_mz_diff < mz_dev &&
(frag_mz_diff < mz_dev || fabs(frag_mz_diff - mz_pair_dist) < mz_dev))
{
mrm_correct_dist = true;
//cerr << "mrm_correct_dist" << endl;
}
if ((mrm && mrm_correct_dist) || (!mrm &&
it2->getCharge() == it->getCharge() &&
it2->getMZ() >= it->getMZ() + mz_pair_dist / it->getCharge() - mz_dev &&
it2->getMZ() <= it->getMZ() + mz_pair_dist / it->getCharge() + mz_dev
))
{
//cerr << "dist correct" << endl;
double score = sqrt(
PValue_(it2->getMZ() - it->getMZ(), mz_pair_dist / it->getCharge(), mz_dev, mz_dev) *
PValue_(it2->getRT() - it->getRT(), rt_pair_dist, rt_dev_low, rt_dev_high)
);
// Note: we used to copy the id from the light feature here, but that strategy does not generalize to more than two labels.
// We might want to report consensus features where the light one is missing but more than one heavier variant was found.
// Also, the old strategy is inconsistent with what was done in the unlabeled case. Thus now we assign a new unique id here.
matches.push_back(ConsensusFeature());
matches.back().setUniqueId();
matches.back().insert(light_index, *it);
matches.back().clearMetaInfo();
matches.back().insert(heavy_index, *it2);
matches.back().setQuality(score);
matches.back().setCharge(it->getCharge());
matches.back().computeMonoisotopicConsensus();
}
++it2;
}
}
}
//compute best pairs
// - sort matches by quality
// - take highest-quality matches first (greedy) and mark them as used
set<Size> used_features;
matches.sortByQuality(true);
for (ConsensusMap::const_iterator match = matches.begin(); match != matches.end(); ++match)
{
//check if features are not used yet
if (used_features.find(match->begin()->getUniqueId()) == used_features.end() &&
used_features.find(match->rbegin()->getUniqueId()) == used_features.end()
)
{
//if unused, add it to the final set of elements
result_map.push_back(*match);
used_features.insert(match->begin()->getUniqueId());
used_features.insert(match->rbegin()->getUniqueId());
}
}
//Add protein identifications to result map
for (Size i = 0; i < input_maps.size(); ++i)
{
result_map.getProteinIdentifications().insert(result_map.getProteinIdentifications().end(), input_maps[i].getProteinIdentifications().begin(), input_maps[i].getProteinIdentifications().end());
}
//Add unassigned peptide identifications to result map
for (Size i = 0; i < input_maps.size(); ++i)
{
result_map.getUnassignedPeptideIdentifications().insert(result_map.getUnassignedPeptideIdentifications().end(), input_maps[i].getUnassignedPeptideIdentifications().begin(), input_maps[i].getUnassignedPeptideIdentifications().end());
}
// Very useful for checking the results, and the ids have no real meaning anyway
result_map.sortByMZ();
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/MAPMATCHING/TransformationModelBSpline.cpp | .cpp | 5,900 | 148 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MAPMATCHING/TransformationModelBSpline.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/TransformationModelLinear.h>
#include <OpenMS/DATASTRUCTURES/ListUtils.h>
using namespace std;
namespace OpenMS
{
TransformationModelBSpline::TransformationModelBSpline(
const TransformationModel::DataPoints& data, const Param& params) :
spline_(nullptr)
{
// parameter handling/checking:
params_ = params;
Param defaults;
getDefaultParameters(defaults);
params_.setDefaults(defaults);
if (data.size() < 2)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"'b_spline' model requires more data");
}
Size boundary_condition = params_.getValue("boundary_condition");
BSpline2d::BoundaryCondition bound_cond =
static_cast<BSpline2d::BoundaryCondition>(boundary_condition);
vector<double> x(data.size()), y(data.size());
xmin_ = data[0].first;
xmax_ = xmin_;
for (Size i = 0; i < data.size(); ++i)
{
x[i] = data[i].first;
y[i] = data[i].second;
if (x[i] < xmin_) xmin_ = x[i];
else if (x[i] > xmax_) xmax_ = x[i];
}
double wavelength = params_.getValue("wavelength");
if (wavelength > (xmax_ - xmin_))
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "B-spline 'wavelength' can't be larger than the data range (here: " + String(xmax_ - xmin_) + ").", String(wavelength));
}
// since we can't initialize a BSpline2d object in the init list (no c'tor
// that doesn't require preparation of data), we have to use a pointer:
spline_ = new BSpline2d(x, y, wavelength, bound_cond,
params_.getValue("num_nodes"));
if (!spline_->ok())
{
throw Exception::UnableToFit(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"TransformationModelBSpline",
"Unable to fit B-spline to data points.");
}
// extrapolation:
std::string extrapolate = params_.getValue("extrapolate");
if (extrapolate == "b_spline")
{
extrapolate_ = EX_BSPLINE;
}
else if (extrapolate == "global_linear")
{
extrapolate_ = EX_GLOBAL_LINEAR;
TransformationModelLinear lm(data, Param());
String x_weight, y_weight;
double x_datum_min, x_datum_max, y_datum_min, y_datum_max;
lm.getParameters(slope_min_, offset_min_, x_weight, y_weight, x_datum_min, x_datum_max, y_datum_min, y_datum_max);
slope_max_ = slope_min_;
// extrapolation (left/right) considers xmin_/xmax_ as the origin (x = 0):
offset_min_ = lm.evaluate(xmin_);
offset_max_ = lm.evaluate(xmax_);
}
else // "linear" or "constant"
{
offset_min_ = spline_->eval(xmin_);
offset_max_ = spline_->eval(xmax_);
if (extrapolate == "constant")
{
extrapolate_ = EX_CONSTANT;
}
else // "linear"
{
extrapolate_ = EX_LINEAR;
slope_min_ = spline_->derivative(xmin_);
slope_max_ = spline_->derivative(xmax_);
}
}
}
TransformationModelBSpline::~TransformationModelBSpline()
{
if (spline_) delete spline_;
}
double TransformationModelBSpline::evaluate(double value) const
{
if ((value < xmin_) && (extrapolate_ != EX_BSPLINE)) // extrapolate (left)
{
if (extrapolate_ == EX_CONSTANT)
{
return offset_min_;
}
else // "EX_LINEAR" or "EX_GLOBAL_LINEAR"
{
return offset_min_ - slope_min_ * (xmin_ - value);
}
}
if ((value > xmax_) && (extrapolate_ != EX_BSPLINE)) // extrapolate (right)
{
if (extrapolate_ == EX_CONSTANT)
{
return offset_max_;
}
else // "EX_LINEAR" or "EX_GLOBAL_LINEAR"
{
return offset_max_ + slope_max_ * (value - xmax_);
}
}
return spline_->eval(value);
}
void TransformationModelBSpline::getDefaultParameters(Param& params)
{
params.clear();
params.setValue("wavelength", 0.0, "Determines the amount of smoothing by setting the number of nodes for the B-spline. The number is chosen so that the spline approximates a low-pass filter with this cutoff wavelength. The wavelength is given in the same units as the data; a higher value means more smoothing. '0' sets the number of nodes to twice the number of input points.");
params.setMinFloat("wavelength", 0.0);
params.setValue("num_nodes", 5, "Number of nodes for B-spline fitting. Overrides 'wavelength' if set (to two or greater). A lower value means more smoothing.");
params.setMinInt("num_nodes", 0);
params.setValue("extrapolate", "linear", "Method to use for extrapolation beyond the original data range. 'linear': Linear extrapolation using the slope of the B-spline at the corresponding endpoint. 'b_spline': Use the B-spline (as for interpolation). 'constant': Use the constant value of the B-spline at the corresponding endpoint. 'global_linear': Use a linear fit through the data (which will most probably introduce discontinuities at the ends of the data range).");
params.setValidStrings("extrapolate", {"linear","b_spline","constant","global_linear"});
params.setValue("boundary_condition", 2, "Boundary condition at B-spline endpoints: 0 (value zero), 1 (first derivative zero) or 2 (second derivative zero)");
params.setMinInt("boundary_condition", 0);
params.setMaxInt("boundary_condition", 2);
}
} // namespace
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/MAPMATCHING/MapAlignmentTransformer.cpp | .cpp | 6,321 | 204 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Stephan Aiche, Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MAPMATCHING/MapAlignmentTransformer.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/TransformationDescription.h>
#include <OpenMS/KERNEL/ConsensusFeature.h>
#include <OpenMS/KERNEL/ConsensusMap.h>
using std::vector;
namespace OpenMS
{
bool MapAlignmentTransformer::storeOriginalRT_(MetaInfoInterface& meta_info,
double original_rt)
{
if (meta_info.metaValueExists("original_RT")) return false;
meta_info.setMetaValue("original_RT", original_rt);
return true;
}
void MapAlignmentTransformer::transformRetentionTimes(
PeakMap& msexp, const TransformationDescription& trafo,
bool store_original_rt)
{
msexp.clearRanges();
// Transform spectra
for (PeakMap::iterator mse_iter = msexp.begin();
mse_iter != msexp.end(); ++mse_iter)
{
double rt = mse_iter->getRT();
if (store_original_rt) storeOriginalRT_(*mse_iter, rt);
mse_iter->setRT(trafo.apply(rt));
}
// Also transform chromatograms
for (Size i = 0; i < msexp.getNrChromatograms(); ++i)
{
MSChromatogram& chromatogram = msexp.getChromatogram(i);
vector<double> original_rts;
if (store_original_rt) original_rts.reserve(chromatogram.size());
for (Size j = 0; j < chromatogram.size(); j++)
{
double rt = chromatogram[j].getRT();
if (store_original_rt) original_rts.push_back(rt);
chromatogram[j].setRT(trafo.apply(rt));
}
if (store_original_rt && !chromatogram.metaValueExists("original_rt"))
{
chromatogram.setMetaValue("original_rt", original_rts);
}
}
msexp.updateRanges();
}
void MapAlignmentTransformer::transformRetentionTimes(
FeatureMap& fmap, const TransformationDescription& trafo,
bool store_original_rt)
{
for (vector<Feature>::iterator fmit = fmap.begin(); fmit != fmap.end();
++fmit)
{
applyToFeature_(*fmit, trafo, store_original_rt);
}
// adapt RT values of unassigned peptides:
if (!fmap.getUnassignedPeptideIdentifications().empty())
{
transformRetentionTimes(fmap.getUnassignedPeptideIdentifications(), trafo,
store_original_rt);
}
}
void MapAlignmentTransformer::applyToBaseFeature_(
BaseFeature& feature, const TransformationDescription& trafo,
bool store_original_rt)
{
// transform feature position:
double rt = feature.getRT();
if (store_original_rt) storeOriginalRT_(feature, rt);
feature.setRT(trafo.apply(rt));
// adapt RT values of annotated peptides:
if (!feature.getPeptideIdentifications().empty())
{
transformRetentionTimes(feature.getPeptideIdentifications(), trafo,
store_original_rt);
}
}
void MapAlignmentTransformer::applyToFeature_(
Feature& feature, const TransformationDescription& trafo,
bool store_original_rt)
{
applyToBaseFeature_(feature, trafo, store_original_rt);
// loop over all convex hulls
vector<ConvexHull2D>& convex_hulls = feature.getConvexHulls();
for (vector<ConvexHull2D>::iterator chiter = convex_hulls.begin();
chiter != convex_hulls.end(); ++chiter)
{
// transform all hull point positions within convex hull
ConvexHull2D::PointArrayType points = chiter->getHullPoints();
chiter->clear();
for (ConvexHull2D::PointArrayType::iterator points_iter = points.begin();
points_iter != points.end(); ++points_iter)
{
double rt = (*points_iter)[Feature::RT];
(*points_iter)[Feature::RT] = trafo.apply(rt);
}
chiter->setHullPoints(points);
}
// recurse into subordinates
for (vector<Feature>::iterator subiter = feature.getSubordinates().begin();
subiter != feature.getSubordinates().end(); ++subiter)
{
applyToFeature_(*subiter, trafo, store_original_rt);
}
}
void MapAlignmentTransformer::transformRetentionTimes(
ConsensusMap& cmap, const TransformationDescription& trafo,
bool store_original_rt)
{
for (ConsensusMap::Iterator cmit = cmap.begin(); cmit != cmap.end(); ++cmit)
{
applyToConsensusFeature_(*cmit, trafo, store_original_rt);
}
// adapt RT values of unassigned peptides:
if (!cmap.getUnassignedPeptideIdentifications().empty())
{
transformRetentionTimes(cmap.getUnassignedPeptideIdentifications(), trafo,
store_original_rt);
}
}
void MapAlignmentTransformer::applyToConsensusFeature_(
ConsensusFeature& feature, const TransformationDescription& trafo,
bool store_original_rt)
{
applyToBaseFeature_(feature, trafo, store_original_rt);
// apply to grouped features (feature handles):
for (ConsensusFeature::HandleSetType::const_iterator it =
feature.getFeatures().begin(); it != feature.getFeatures().end();
++it)
{
double rt = it->getRT();
it->asMutable().setRT(trafo.apply(rt));
}
}
void MapAlignmentTransformer::transformRetentionTimes(
PeptideIdentificationList& pep_ids,
const TransformationDescription& trafo, bool store_original_rt)
{
for (PeptideIdentificationList::iterator pep_it = pep_ids.begin();
pep_it != pep_ids.end(); ++pep_it)
{
if (pep_it->hasRT())
{
double rt = pep_it->getRT();
if (store_original_rt) storeOriginalRT_(*pep_it, rt);
pep_it->setRT(trafo.apply(rt));
}
}
}
void MapAlignmentTransformer::transformRetentionTimes(
IdentificationData& id_data, const TransformationDescription& trafo,
bool store_original_rt)
{
// update RTs in-place:
id_data.applyToObservations([&](IdentificationData::Observation& obs)
{
if (store_original_rt)
{
storeOriginalRT_(obs, obs.rt);
}
obs.rt = trafo.apply(obs.rt);
});
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/MAPMATCHING/FeatureGroupingAlgorithmLabeled.cpp | .cpp | 1,629 | 50 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Marc Sturm $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MAPMATCHING/FeatureGroupingAlgorithmLabeled.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/LabeledPairFinder.h>
#include <OpenMS/KERNEL/ConversionHelper.h>
namespace OpenMS
{
FeatureGroupingAlgorithmLabeled::FeatureGroupingAlgorithmLabeled() :
FeatureGroupingAlgorithm()
{
setName("FeatureGroupingAlgorithmLabeled");
defaults_.insert("", LabeledPairFinder().getParameters());
defaultsToParam_();
}
FeatureGroupingAlgorithmLabeled::~FeatureGroupingAlgorithmLabeled() = default;
void FeatureGroupingAlgorithmLabeled::group(const std::vector<FeatureMap> & maps, ConsensusMap & out)
{
//check that the number of maps is ok
if (maps.size() != 1)
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Exactly one map must be given!");
if (out.getColumnHeaders().size() != 2)
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Two file descriptions must be set in 'out'!");
//initialize LabeledPairFinder
LabeledPairFinder pm;
pm.setParameters(param_.copy("", true));
//convert to consensus map
std::vector<ConsensusMap> input(1);
MapConversion::convert(0, maps[0], input[0]);
//run
pm.run(input, out);
}
} //namespace
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/MAPMATCHING/FeatureGroupingAlgorithmKD.cpp | .cpp | 21,550 | 511 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Johannes Veit $
// $Authors: Johannes Veit $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MAPMATCHING/FeatureGroupingAlgorithmKD.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/MapAlignmentAlgorithmKD.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/FeatureGroupingAlgorithm.h>
#include <OpenMS/ANALYSIS/ID/IonIdentityMolecularNetworking.h>
#include <OpenMS/DATASTRUCTURES/Adduct.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/METADATA/ProteinIdentification.h>
#include <OpenMS/METADATA/PeptideIdentification.h>
using namespace std;
namespace OpenMS
{
FeatureGroupingAlgorithmKD::FeatureGroupingAlgorithmKD() :
ProgressLogger(),
feature_distance_(FeatureDistance())
{
setName("FeatureGroupingAlgorithmKD");
defaults_.setValue("warp:enabled", "true", "Whether or not to internally warp feature RTs using LOWESS transformation before linking (reported RTs in results will always be the original RTs)");
defaults_.setValidStrings("warp:enabled", {"true","false"});
defaults_.setValue("warp:rt_tol", 100.0, "Width of RT tolerance window (sec)");
defaults_.setMinFloat("warp:rt_tol", 0.0);
defaults_.setValue("warp:mz_tol", 5.0, "m/z tolerance (in ppm or Da)");
defaults_.setMinFloat("warp:mz_tol", 0.0);
defaults_.setValue("warp:max_pairwise_log_fc", 0.5, "Maximum absolute log10 fold change between two compatible signals during compatibility graph construction. Two signals from different maps will not be connected by an edge in the compatibility graph if absolute log fold change exceeds this limit (they might still end up in the same connected component, however). Note: this does not limit fold changes in the linking stage, only during RT alignment, where we try to find high-quality alignment anchor points. Setting this to a value < 0 disables the FC check.", {"advanced"});
defaults_.setValue("warp:min_rel_cc_size", 0.5, "Only connected components containing compatible features from at least max(2, (warp_min_occur * number_of_input_maps)) input maps are considered for computing the warping function", {"advanced"});
defaults_.setMinFloat("warp:min_rel_cc_size", 0.0);
defaults_.setMaxFloat("warp:min_rel_cc_size", 1.0);
defaults_.setValue("warp:max_nr_conflicts", 0, "Allow up to this many conflicts (features from the same map) per connected component to be used for alignment (-1 means allow any number of conflicts)", {"advanced"});
defaults_.setMinInt("warp:max_nr_conflicts", -1);
defaults_.setValue("link:rt_tol", 30.0, "Width of RT tolerance window (sec)");
defaults_.setMinFloat("link:rt_tol", 0.0);
defaults_.setValue("link:mz_tol", 10.0, "m/z tolerance (in ppm or Da)");
defaults_.setMinFloat("link:mz_tol", 0.0);
defaults_.setValue("link:charge_merging","With_charge_zero","whether to disallow charge mismatches (Identical), allow to link charge zero (i.e., unknown charge state) with every charge state, or disregard charges (Any).");
defaults_.setValidStrings("link:charge_merging", {"Identical", "With_charge_zero", "Any"});
defaults_.setValue("link:adduct_merging","Any","whether to only allow the same adduct for linking (Identical), also allow linking features with adduct-free ones, or disregard adducts (Any).");
defaults_.setValidStrings("link:adduct_merging", {"Identical", "With_unknown_adducts", "Any"});
defaults_.setValue("mz_unit", "ppm", "Unit of m/z tolerance");
defaults_.setValidStrings("mz_unit", {"ppm","Da"});
defaults_.setValue("nr_partitions", 100, "Number of partitions in m/z space");
defaults_.setMinInt("nr_partitions", 1);
// FeatureDistance defaults
defaults_.insert("", feature_distance_.getDefaults());
// override some of them
defaults_.setValue("distance_intensity:weight", 1.0);
defaults_.setValue("distance_intensity:log_transform", "enabled");
defaults_.addTag("distance_intensity:weight", "advanced");
defaults_.addTag("distance_intensity:log_transform", "advanced");
defaults_.remove("distance_RT:max_difference");
defaults_.remove("distance_MZ:max_difference");
defaults_.remove("distance_MZ:unit");
defaults_.remove("ignore_charge");
defaults_.remove("ignore_adduct");
// LOWESS defaults
Param lowess_defaults;
TransformationModelLowess::getDefaultParameters(lowess_defaults);
for (Param::ParamIterator it = lowess_defaults.begin(); it != lowess_defaults.end(); ++it)
{
const_cast<Param::ParamEntry&>(*it).tags.insert("advanced");
}
defaults_.insert("LOWESS:", lowess_defaults);
defaults_.setSectionDescription("LOWESS", "LOWESS parameters for internal RT transformations (only relevant if 'warp:enabled' is set to 'true')");
defaultsToParam_();
setLogType(CMD);
}
FeatureGroupingAlgorithmKD::~FeatureGroupingAlgorithmKD() = default;
template <typename MapType>
void FeatureGroupingAlgorithmKD::group_(const vector<MapType>& input_maps,
ConsensusMap& out)
{
// set parameters
String mz_unit(param_.getValue("mz_unit").toString());
mz_ppm_ = mz_unit == "ppm";
mz_tol_ = (double)(param_.getValue("link:mz_tol"));
rt_tol_secs_ = (double)(param_.getValue("link:rt_tol"));
// check that the number of maps is ok:
if (input_maps.size() < 2)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"At least two maps must be given!");
}
out.clear(false);
// collect all m/z values for partitioning, find intensity maximum
vector<double> massrange;
double max_intensity(0.0);
for (typename vector<MapType>::const_iterator map_it = input_maps.begin();
map_it != input_maps.end(); ++map_it)
{
for (typename MapType::const_iterator feat_it = map_it->begin();
feat_it != map_it->end(); feat_it++)
{
massrange.push_back(feat_it->getMZ());
double inty = feat_it->getIntensity();
if (inty > max_intensity)
{
max_intensity = inty;
}
}
}
// set up distance functor
Param distance_params;
distance_params.insert("", param_.copy("distance_RT:"));
distance_params.insert("", param_.copy("distance_MZ:"));
distance_params.insert("", param_.copy("distance_intensity:"));
distance_params.setValue("distance_RT:max_difference", rt_tol_secs_);
distance_params.setValue("distance_MZ:max_difference", mz_tol_);
distance_params.setValue("distance_MZ:unit", (mz_ppm_ ? "ppm" : "Da"));
feature_distance_ = FeatureDistance(max_intensity, false);
feature_distance_.setParameters(distance_params);
// partition at boundaries -> this should be safe because there cannot be
// any cluster reaching across boundaries
sort(massrange.begin(), massrange.end());
int pts_per_partition = massrange.size() / (int)(param_.getValue("nr_partitions"));
double warp_mz_tol = (double)(param_.getValue("warp:mz_tol"));
double max_mz_tol = max(mz_tol_, warp_mz_tol);
// compute partition boundaries
vector<double> partition_boundaries;
partition_boundaries.push_back(massrange.front());
for (size_t j = 0; j < massrange.size()-1; j++)
{
// minimal differences between two m/z values
double massrange_diff = mz_ppm_ ? max_mz_tol * 1e-6 * massrange[j+1] : max_mz_tol;
if (fabs(massrange[j] - massrange[j+1]) > massrange_diff)
{
if (j >= (partition_boundaries.size() ) * pts_per_partition )
{
partition_boundaries.push_back((massrange[j] + massrange[j+1])/2.0);
}
}
}
// add last partition (a bit more since we use "smaller than" below)
partition_boundaries.push_back(massrange.back() + 1.0);
// ------------ compute RT transformation models ------------
MapAlignmentAlgorithmKD aligner(input_maps.size(), param_);
bool align = param_.getValue("warp:enabled").toString() == "true";
if (align)
{
Size progress = 0;
startProgress(0, partition_boundaries.size(), "computing RT transformations");
for (size_t j = 0; j < partition_boundaries.size()-1; j++)
{
double partition_start = partition_boundaries[j];
double partition_end = partition_boundaries[j+1];
std::vector<MapType> tmp_input_maps(input_maps.size());
for (size_t k = 0; k < input_maps.size(); k++)
{
// iterate over all features in the current input map and append
// matching features (within the current partition) to the temporary
// map
for (size_t m = 0; m < input_maps[k].size(); m++)
{
if (input_maps[k][m].getMZ() >= partition_start &&
input_maps[k][m].getMZ() < partition_end)
{
tmp_input_maps[k].push_back(input_maps[k][m]);
}
}
tmp_input_maps[k].updateRanges();
}
// set up kd-tree
KDTreeFeatureMaps kd_data(tmp_input_maps, param_);
aligner.addRTFitData(kd_data);
setProgress(progress++);
}
// fit LOWESS on RT fit data collected across all partitions
try
{
aligner.fitLOWESS();
}
catch (Exception::BaseException& e)
{
OPENMS_LOG_ERROR << "Error: " << e.what() << endl;
return;
}
endProgress();
}
// ------------ run alignment + feature linking on individual partitions ------------
Size progress = 0;
startProgress(0, partition_boundaries.size(), "linking features");
for (size_t j = 0; j < partition_boundaries.size()-1; j++)
{
double partition_start = partition_boundaries[j];
double partition_end = partition_boundaries[j+1];
std::vector<MapType> tmp_input_maps(input_maps.size());
for (size_t k = 0; k < input_maps.size(); k++)
{
// iterate over all features in the current input map and append
// matching features (within the current partition) to the temporary
// map
for (size_t m = 0; m < input_maps[k].size(); m++)
{
if (input_maps[k][m].getMZ() >= partition_start &&
input_maps[k][m].getMZ() < partition_end)
{
tmp_input_maps[k].push_back(input_maps[k][m]);
}
}
tmp_input_maps[k].updateRanges();
}
// set up kd-tree
KDTreeFeatureMaps kd_data(tmp_input_maps, param_);
// alignment
if (align)
{
aligner.transform(kd_data);
}
// link features
runClustering_(kd_data, out);
setProgress(progress++);
}
endProgress();
postprocess_(input_maps, out);
}
void FeatureGroupingAlgorithmKD::group(const std::vector<FeatureMap>& maps,
ConsensusMap& out)
{
group_(maps, out);
}
void FeatureGroupingAlgorithmKD::group(const std::vector<ConsensusMap>& maps,
ConsensusMap& out)
{
group_(maps, out);
}
void FeatureGroupingAlgorithmKD::runClustering_(const KDTreeFeatureMaps& kd_data, ConsensusMap& out)
{
Size n = kd_data.size();
// pass 1: initialize best potential clusters for all possible cluster centers
set<Size> update_these;
for (Size i = 0; i < kd_data.size(); ++i)
{
update_these.insert(i);
}
set<ClusterProxyKD> potential_clusters;
vector<ClusterProxyKD> cluster_for_idx(n);
vector<Int> assigned(n, false);
updateClusterProxies_(potential_clusters, cluster_for_idx, update_these, assigned, kd_data);
// pass 2: construct consensus features until all points assigned.
while (!potential_clusters.empty())
{
// get index of current best cluster center (as defined by ClusterProxyKD::operator<)
Size i = potential_clusters.begin()->getCenterIndex();
// compile the actual list of sub feature indices for cluster with center i
vector<Size> cf_indices;
computeBestClusterForCenter_(i, cf_indices, assigned, kd_data);
// add consensus feature
addConsensusFeature_(cf_indices, kd_data, out);
// mark selected sub features assigned and delete them from potential_clusters
for (vector<Size>::const_iterator f_it = cf_indices.begin(); f_it != cf_indices.end(); ++f_it)
{
assigned[*f_it] = true;
potential_clusters.erase(cluster_for_idx[*f_it]);
}
// compile set of all points whose neighborhoods will need updating
update_these = set<Size>();
for (vector<Size>::const_iterator f_it = cf_indices.begin(); f_it != cf_indices.end(); ++f_it)
{
vector<Size> f_neighbors;
kd_data.getNeighborhood(*f_it, f_neighbors, rt_tol_secs_, mz_tol_, mz_ppm_, true);
for (vector<Size>::const_iterator it = f_neighbors.begin(); it != f_neighbors.end(); ++it)
{
if (!assigned[*it])
{
update_these.insert(*it);
}
}
}
// now that the points are marked assigned, update the neighborhoods of their neighbors
updateClusterProxies_(potential_clusters, cluster_for_idx, update_these, assigned, kd_data);
}
}
void FeatureGroupingAlgorithmKD::updateClusterProxies_(set<ClusterProxyKD>& potential_clusters,
vector<ClusterProxyKD>& cluster_for_idx,
const set<Size>& update_these,
const vector<Int>& assigned,
const KDTreeFeatureMaps& kd_data)
{
for (set<Size>::const_iterator it = update_these.begin(); it != update_these.end(); ++it)
{
Size i = *it;
const ClusterProxyKD& old_proxy = cluster_for_idx[i];
vector<Size> unused;
ClusterProxyKD new_proxy = computeBestClusterForCenter_(i, unused, assigned, kd_data);
// only need to update if size and/or average distance have changed
if (new_proxy != old_proxy)
{
potential_clusters.erase(old_proxy);
cluster_for_idx[i] = new_proxy;
potential_clusters.insert(new_proxy);
}
}
}
ClusterProxyKD FeatureGroupingAlgorithmKD::computeBestClusterForCenter_(Size i, vector<Size>& cf_indices, const vector<Int>& assigned, const KDTreeFeatureMaps& kd_data) const
{
//Parameters how to use charge/adduct information
String merge_charge(param_.getValue("link:charge_merging").toString());
String merge_adduct(param_.getValue("link:adduct_merging").toString());
// compute i's neighborhood, together with a look-up table
// map index -> corresponding points
map<Size, vector<Size> > points_for_map_index;
vector<Size> neighbors;
kd_data.getNeighborhood(i, neighbors, rt_tol_secs_, mz_tol_, mz_ppm_, true);
Int charge_i = kd_data.charge(i);
const BaseFeature* f_i = kd_data.feature(i);
for (vector<Size>::const_iterator it = neighbors.begin(); it != neighbors.end(); ++it)
{
// If the feature was already assigned, don't consider it at all!
if (assigned[*it])
{
continue;
}
if (merge_charge == "Identical")
{
if (kd_data.charge(*it) != charge_i)
{
continue;
}
}
// what to consider for linking with existing features _that have charge_. This ensures that we won't collect different non-zero charges.
else if (merge_charge == "With_charge_zero")
{
if ((kd_data.charge(*it) != charge_i) && (kd_data.charge(*it) != 0))
{
continue;
}
}
// else if (merge_charge == "Any")
//{
// //we allow to merge all
//}
// analogous adduct block
if (merge_adduct == "Identical")
{
// subcase 1: one has adduct, other not
if (kd_data.feature(*it)->metaValueExists(Constants::UserParam::DC_CHARGE_ADDUCTS) != f_i->metaValueExists(Constants::UserParam::DC_CHARGE_ADDUCTS))
{
continue;
}
// subcase 2: both have adduct, but is it the same?
if (kd_data.feature(*it)->metaValueExists(Constants::UserParam::DC_CHARGE_ADDUCTS))
{
if (EmpiricalFormula(kd_data.feature(*it)->getMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS)) != EmpiricalFormula(f_i->getMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS)))
{
continue;
}
}
}
// what to consider for linking with existing features _that have adduct_. If one has no adduct, it's fine
// anyway. If one has an adduct we have to compare.
else if (merge_adduct == "With_unknown_adducts")
{
// subcase1: *it has adduct, but i not. don't want to collect potentially different adducts to previous without adduct
if ((kd_data.feature(*it)->metaValueExists(Constants::UserParam::DC_CHARGE_ADDUCTS)) && (!f_i->metaValueExists(Constants::UserParam::DC_CHARGE_ADDUCTS)))
{
continue;
}
// subcase2: both have adduct
if ((kd_data.feature(*it)->metaValueExists(Constants::UserParam::DC_CHARGE_ADDUCTS)) && (f_i->metaValueExists(Constants::UserParam::DC_CHARGE_ADDUCTS)))
{
// cheaper string check first, only check EF extensively if strings differ (might be just different element orders)
if ((kd_data.feature(*it)->getMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS) != f_i->getMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS)) &&
(EmpiricalFormula(kd_data.feature(*it)->getMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS)) != EmpiricalFormula(f_i->getMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS))))
{
continue;
}
}
}
// else if (merge_adduct == "Any")
//{
// //we allow to merge all
//}
// if everything is OK, add feature
points_for_map_index[kd_data.mapIndex(*it)].push_back(*it);
}
// center i is always part of CF, no other points from i's map can be contained
points_for_map_index[kd_data.mapIndex(i)] = vector<Size>(1, i);
// compile list of sub feature indices and corresponding average distance
double avg_distance = 0.0;
for (map<Size, vector<Size> >::const_iterator it = points_for_map_index.begin();
it != points_for_map_index.end();
++it)
{
const vector<Size>& candidates = it->second;
// choose a point j with minimal distance to center i
double min_dist = numeric_limits<double>::max();
Size best_index = numeric_limits<Size>::max();
for (vector<Size>::const_iterator c_it = candidates.begin(); c_it != candidates.end(); ++c_it)
{
double dist = const_cast<FeatureDistance&>(feature_distance_)(*(kd_data.feature(*c_it)),
*(kd_data.feature(i))).second;
if (dist < min_dist)
{
min_dist = dist;
best_index = *c_it;
}
}
cf_indices.push_back(best_index);
avg_distance += min_dist;
}
avg_distance /= cf_indices.size();
return ClusterProxyKD(cf_indices.size(), avg_distance, i);
}
void FeatureGroupingAlgorithmKD::addConsensusFeature_(const vector<Size>& indices, const KDTreeFeatureMaps& kd_data, ConsensusMap& out) const
{
ConsensusFeature cf;
Adduct adduct;
float avg_quality = 0;
// determine best quality feature for adduct ion annotation (Constanst::UserParam::IIMN_BEST_ION)
float best_quality = 0;
size_t best_quality_index = 0;
// collect the "Group" MetaValues of Features in a ConsensusFeature MetaValue (Constant::UserParam::IIMN_LINKED_GROUPS)
vector<String> linked_groups;
for (vector<Size>::const_iterator it = indices.begin(); it != indices.end(); ++it)
{
Size i = *it;
cf.insert(kd_data.mapIndex(i), *(kd_data.feature(i)));
avg_quality += kd_data.feature(i)->getQuality();
if (kd_data.feature(i)->metaValueExists(Constants::UserParam::DC_CHARGE_ADDUCTS) &&
(kd_data.feature(i)->getQuality() > best_quality) &&
(kd_data.feature(i)->getCharge()))
{
best_quality = kd_data.feature(i)->getQuality();
best_quality_index = i;
}
if (kd_data.feature(i)->metaValueExists(Constants::UserParam::ADDUCT_GROUP))
{
linked_groups.emplace_back(kd_data.feature(i)->getMetaValue(Constants::UserParam::ADDUCT_GROUP));
}
}
if (kd_data.feature(best_quality_index)->metaValueExists(Constants::UserParam::DC_CHARGE_ADDUCTS))
{
cf.setMetaValue(Constants::UserParam::IIMN_BEST_ION,
adduct.toAdductString(kd_data.feature(best_quality_index)->getMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS),
kd_data.feature(best_quality_index)->getCharge()));
}
if (!linked_groups.empty())
{
cf.setMetaValue(Constants::UserParam::IIMN_LINKED_GROUPS, linked_groups);
}
avg_quality /= indices.size();
cf.setQuality(avg_quality);
cf.computeConsensus();
out.push_back(cf);
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/MAPMATCHING/PoseClusteringShiftSuperimposer.cpp | .cpp | 21,367 | 494 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Eva Lange, Clemens Groepl $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MAPMATCHING/PoseClusteringShiftSuperimposer.h>
#include <OpenMS/PROCESSING/BASELINE/MorphologicalFilter.h>
#include <OpenMS/MATH/STATISTICS/BasicStatistics.h>
#include <OpenMS/DATASTRUCTURES/ConstRefVector.h>
#include <OpenMS/ML/INTERPOLATION/LinearInterpolation.h>
// #define Debug_PoseClusteringShiftSuperimposer
#ifdef Debug_PoseClusteringShiftSuperimposer
#define V_(bla) std::cout << __FILE__ ":" << __LINE__ << ": " << bla << std::endl;
#else
#define V_(bla)
#endif
#define VV_(bla) V_("" # bla ": " << bla)
namespace OpenMS
{
PoseClusteringShiftSuperimposer::PoseClusteringShiftSuperimposer() :
BaseSuperimposer()
{
setName("PoseClusteringShiftSuperimposer");
defaults_.setValue("mz_pair_max_distance", 0.5, "Maximum of m/z deviation of corresponding elements in different maps. "
"This condition applies to the pairs considered in hashing.");
defaults_.setMinFloat("mz_pair_max_distance", 0.);
defaults_.setValue("num_used_points", 2000, "Maximum number of elements considered in each map "
"(selected by intensity). Use this to reduce the running time "
"and to disregard weak signals during alignment. For using all points, set this to -1.");
defaults_.setMinInt("num_used_points", -1);
defaults_.setValue("shift_bucket_size", 3.0, "The shift of the retention time "
"interval is being hashed into buckets of this size during pose "
"clustering. A good choice for this would be about "
"the time between consecutive MS scans.");
defaults_.setMinFloat("shift_bucket_size", 0.);
defaults_.setValue("max_shift", 1000.0, "Maximal shift which is considered during histogramming. "
"This applies for both directions.", {"advanced"});
defaults_.setMinFloat("max_shift", 0.);
defaults_.setValue("dump_buckets", "", "[DEBUG] If non-empty, base filename where hash table buckets will be dumped to. "
"A serial number for each invocation will be appended automatically.", {"advanced"});
defaults_.setValue("dump_pairs", "", "[DEBUG] If non-empty, base filename where the individual hashed pairs will be dumped to (large!). "
"A serial number for each invocation will be appended automatically.", {"advanced"});
defaultsToParam_();
return;
}
void PoseClusteringShiftSuperimposer::run(const ConsensusMap & map_model, const ConsensusMap & map_scene, TransformationDescription & transformation)
{
typedef ConstRefVector<ConsensusMap> PeakPointerArray_;
typedef Math::LinearInterpolation<double, double> LinearInterpolationType_;
LinearInterpolationType_ shift_hash_;
// OLD STUFF
// LinearInterpolationType_ scaling_hash_1;
// LinearInterpolationType_ scaling_hash_2;
// LinearInterpolationType_ shift_hash_;
// LinearInterpolationType_ rt_high_hash_;
/// Maximum deviation in mz of two partner points
const double mz_pair_max_distance = param_.getValue("mz_pair_max_distance");
/// Size of each shift bucket
const double shift_bucket_size = param_.getValue("shift_bucket_size");
const UInt struc_elem_length_datapoints = 21; // MAGIC ALERT: number of data points in structuring element for tophat filter, which removes baseline from histogram
const double scaling_histogram_crossing_slope = 3.0; // MAGIC ALERT: used when distinguishing noise level and enriched histogram bins
const double scaling_cutoff_stdev_multiplier = 1.5; // MAGIC ALERT: multiplier for stdev in cutoff for outliers
const UInt loops_mean_stdev_cutoff = 3; // MAGIC ALERT: number of loops in stdev cutoff for outliers
startProgress(0, 100, "shift pose clustering");
UInt actual_progress = 0;
setProgress(++actual_progress);
// Optionally, we will write dumps of the hash table buckets.
bool do_dump_buckets = false;
String dump_buckets_basename;
if (param_.getValue("dump_buckets") != "")
{
do_dump_buckets = true;
dump_buckets_basename = param_.getValue("dump_buckets").toString();
}
setProgress(++actual_progress);
// Even more optionally, we will write dumps of the hashed pairs.
bool do_dump_pairs = false;
String dump_pairs_basename;
if (param_.getValue("dump_pairs") != "")
{
do_dump_pairs = true;
dump_pairs_basename = param_.getValue("dump_pairs").toString();
}
setProgress(++actual_progress);
//**************************************************************************
// Select the most abundant data points only. After that, disallow modifications
// (we tend to have annoying issues with const_iterator versus iterator).
PeakPointerArray_ model_map_ini(map_model.begin(), map_model.end());
const PeakPointerArray_ & model_map(model_map_ini);
PeakPointerArray_ scene_map_ini(map_scene.begin(), map_scene.end());
const PeakPointerArray_ & scene_map(scene_map_ini);
{
// truncate the data as necessary
// casting to SignedSize is done on PURPOSE here! (num_used_points will be maximal if -1 is used)
const Size num_used_points = (SignedSize) param_.getValue("num_used_points");
if (model_map_ini.size() > num_used_points)
{
model_map_ini.sortByIntensity(true);
model_map_ini.resize(num_used_points);
}
model_map_ini.sortByComparator(Peak2D::MZLess());
setProgress(++actual_progress);
if (scene_map_ini.size() > num_used_points)
{
scene_map_ini.sortByIntensity(true);
scene_map_ini.resize(num_used_points);
}
scene_map_ini.sortByComparator(Peak2D::MZLess());
setProgress(++actual_progress);
// Note: model_map_ini and scene_map_ini will not be used further below
}
setProgress((actual_progress = 10));
//**************************************************************************
// Preprocessing
// get RT ranges (NOTE: we trust that min and max have been updated in the
// ConsensusMap::convert() method !)
const double model_low = map_model.getMinRT();
const double scene_low = map_scene.getMinRT();
const double model_high = map_model.getMaxRT();
const double scene_high = map_scene.getMaxRT();
// OLD STUFF
// const double rt_low = (maps[0].getMin()[ConsensusFeature::RT] + maps[1].getMin()[ConsensusFeature::RT]) / 2.;
// const double rt_high = (maps[0].getMax()[ConsensusFeature::RT] + maps[1].getMax()[ConsensusFeature::RT]) / 2.;
// Initialize the hash tables: shift_hash_
// OLD STUFF: was: rt_scaling_hash_, rt_low_hash_, and rt_high_hash_
{
// (over)estimate the required number of buckets for shifting
double max_shift = param_.getValue("max_shift");
// actually the largest possible shift can be much smaller, depending on the data
do
{
if (max_shift < 0)
max_shift = -max_shift;
// ...ml@@@mh........ , ........ml@@@mh...
// ........sl@@@sh... , ...sl@@@sh........
double diff;
diff = model_high - scene_low;
if (diff < 0)
diff = -diff;
if (max_shift > diff)
max_shift = diff;
diff = model_low - scene_high;
if (diff < 0)
diff = -diff;
if (max_shift > diff)
max_shift = diff;
} while (false);
const Int shift_buckets_num_half = 4 + (Int) ceil((max_shift) / shift_bucket_size);
const Int shift_buckets_num = 1 + 2 * shift_buckets_num_half;
shift_hash_.getData().clear();
shift_hash_.getData().resize(shift_buckets_num);
shift_hash_.setMapping(shift_bucket_size, shift_buckets_num_half, 0);
}
setProgress(++actual_progress);
//**************************************************************************
// compute the ratio of the total intensities of both maps, for normalization
double total_intensity_ratio;
do
{
double total_int_model_map = 0;
for (Size i = 0; i < model_map.size(); ++i)
{
total_int_model_map += model_map[i].getIntensity();
}
setProgress(++actual_progress);
double total_int_scene_map = 0;
for (Size i = 0; i < scene_map.size(); ++i)
{
total_int_scene_map += scene_map[i].getIntensity();
}
setProgress(++actual_progress);
// ... and finally ...
total_intensity_ratio = total_int_model_map / total_int_scene_map;
} while (false); // (the extra syntax helps with code folding in eclipse!)
setProgress((actual_progress = 20));
/// The serial number is incremented for each invocation of this, to avoid overwriting of hash table dumps.
static Int dump_buckets_serial = 0;
++dump_buckets_serial;
//**************************************************************************
// Hashing
// Compute the transformations between each point pair in the model map
// and each point pair in the scene map and hash the shift
// transformation.
// To speed up the calculation of the final transformation, we confine the number of
// considered point pairs. We match a point p in the model map only onto those points p'
// in the scene map that lie in a certain mz interval.
Size const model_map_size = model_map.size(); // i /* OLD STUFF: also: j */
Size const scene_map_size = scene_map.size(); // k /* OLD STUFF: also: l */
const double winlength_factor_baseline = 0.1; // MAGIC ALERT: Each window is given unit weight. If there are too many pairs for a window, the individual contributions will be very small, but running time will be high, so we provide a cutoff for this. Typically this will exclude compounds which elute over the whole retention time range from consideration.
///////////////////////////////////////////////////////////////////
// Hashing: Estimate the shift
do // begin of hashing (the extra syntax helps with code folding in eclipse!)
{
String dump_pairs_filename;
std::ofstream dump_pairs_file;
if (do_dump_pairs)
{
dump_pairs_filename = dump_pairs_basename + String(dump_buckets_serial);
dump_pairs_file.open(dump_pairs_filename.c_str());
dump_pairs_file << "#" << ' ' << "i" << ' ' << "k" << std::endl;
}
setProgress(++actual_progress);
// first point in model map
for (Size i = 0, i_low = 0, i_high = 0, k_low = 0, k_high = 0; i < model_map_size - 1; ++i)
{
setProgress(actual_progress + float(i) / model_map_size * 10.f);
// Adjust window around i in model map
while (i_low < model_map_size && model_map[i_low].getMZ() < model_map[i].getMZ() - mz_pair_max_distance)
++i_low;
while (i_high < model_map_size && model_map[i_high].getMZ() <= model_map[i].getMZ() + mz_pair_max_distance)
++i_high;
double i_winlength_factor = 1. / (i_high - i_low);
i_winlength_factor -= winlength_factor_baseline;
if (i_winlength_factor <= 0)
continue;
// Adjust window around k in scene map
while (k_low < scene_map_size && scene_map[k_low].getMZ() < model_map[i].getMZ() - mz_pair_max_distance)
++k_low;
while (k_high < scene_map_size && scene_map[k_high].getMZ() <= model_map[i].getMZ() + mz_pair_max_distance)
++k_high;
// first point in scene map
for (Size k = k_low; k < k_high; ++k)
{
double k_winlength_factor = 1. / (k_high - k_low);
k_winlength_factor -= winlength_factor_baseline;
if (k_winlength_factor <= 0)
continue;
// compute similarity of intensities i k
double similarity_ik;
{
const double int_i = model_map[i].getIntensity();
const double int_k = scene_map[k].getIntensity() * total_intensity_ratio;
similarity_ik = (int_i < int_k) ? int_i / int_k : int_k / int_i;
// weight is inverse proportional to number of elements with similar mz
similarity_ik *= i_winlength_factor;
similarity_ik *= k_winlength_factor;
// VV_(int_i<<' '<<int_k<<' '<<int_similarity_ik);
}
// compute the transformation (i) -> (k)
double shift = model_map[i].getRT() - scene_map[k].getRT();
// hash the images of scaling, rt_low and rt_high into their respective hash tables
shift_hash_.addValue(shift, similarity_ik);
if (do_dump_pairs)
{
dump_pairs_file << i << ' ' << model_map[i].getRT() << ' ' << model_map[i].getMZ() << ' ' << k << ' ' << scene_map[k].getRT() << ' '
<< scene_map[k].getMZ() << ' ' << similarity_ik << ' ' << std::endl;
}
} // k
} // i
} while (false); // end of hashing (the extra syntax helps with code folding in eclipse!)
setProgress((actual_progress = 30));
///////////////////////////////////////////////////////////////////
// work on shift_hash_
// double shift_low;
// double shift_centroid;
// double shift_high;
// OLD STUFF
// double shift_low;
double shift_centroid(0.0);
// double shift_high;
do
{
UInt filtering_stage = 0;
// optionally, dump before filtering
String dump_buckets_filename;
std::ofstream dump_buckets_file;
if (do_dump_buckets)
{
dump_buckets_filename = dump_buckets_basename + "_" + String(dump_buckets_serial);
dump_buckets_file.open(dump_buckets_filename.c_str());
VV_(dump_buckets_filename);
dump_buckets_file << "# shift hash table buckets dump ( scale, height ) : " << dump_buckets_filename << std::endl;
dump_buckets_file << "# unfiltered hash data\n";
for (Size index = 0; index < shift_hash_.getData().size(); ++index)
{
const double image = shift_hash_.index2key(index);
const double height = shift_hash_.getData()[index];
dump_buckets_file << filtering_stage << '\t' << index << '\t' << image << '\t' << height << '\n';
}
dump_buckets_file << '\n';
}
++filtering_stage;
setProgress(++actual_progress);
// apply tophat filter to histogram
MorphologicalFilter morph_filter;
Param morph_filter_param;
morph_filter_param.setValue("struc_elem_unit", "DataPoints");
morph_filter_param.setValue("struc_elem_length", double(struc_elem_length_datapoints));
morph_filter_param.setValue("method", "tophat");
morph_filter.setParameters(morph_filter_param);
LinearInterpolationType_::container_type buffer(shift_hash_.getData().size());
morph_filter.filterRange(shift_hash_.getData().begin(), shift_hash_.getData().end(), buffer.begin());
shift_hash_.getData().swap(buffer);
// optionally, dump after filtering
if (do_dump_buckets)
{
dump_buckets_file << "# tophat filtered hash data\n";
for (Size index = 0; index < shift_hash_.getData().size(); ++index)
{
const double image = shift_hash_.index2key(index);
const double height = shift_hash_.getData()[index];
dump_buckets_file << filtering_stage << '\t' << index << '\t' << image << '\t' << height << '\n';
}
dump_buckets_file << '\n';
}
setProgress(++actual_progress);
++filtering_stage;
// compute freq_cutoff using a fancy criterion to distinguish between the noise level of the histogram and enriched histogram bins
double freq_cutoff_low;
do
{
{
std::copy(shift_hash_.getData().begin(), shift_hash_.getData().end(), buffer.begin());
std::sort(buffer.begin(), buffer.end(), std::greater<double>());
double freq_intercept = shift_hash_.getData().front();
double freq_slope = (shift_hash_.getData().back() - shift_hash_.getData().front()) / double(buffer.size())
/ scaling_histogram_crossing_slope;
if (!freq_slope || buffer.empty())
{
// in fact these conditions are actually impossible, but let's be really sure ;-)
freq_cutoff_low = 0;
}
else
{
Size index = 1; // not 0 (!)
while (buffer[index] >= freq_intercept + freq_slope * double(index))
{
++index;
}
freq_cutoff_low = buffer[--index]; // note that we have index >= 1
}
}
} while (false);
setProgress(++actual_progress);
// apply freq_cutoff, setting smaller values to zero
for (Size index = 0; index < shift_hash_.getData().size(); ++index)
{
if (shift_hash_.getData()[index] < freq_cutoff_low)
{
shift_hash_.getData()[index] = 0;
}
}
setProgress(++actual_progress);
// optionally, dump after noise filtering using freq_cutoff
if (do_dump_buckets)
{
dump_buckets_file << "# after freq_cutoff, which is: " << freq_cutoff_low << '\n';
for (Size index = 0; index < shift_hash_.getData().size(); ++index)
{
const double image = shift_hash_.index2key(index);
const double height = shift_hash_.getData()[index];
dump_buckets_file << filtering_stage << '\t' << index << '\t' << image << '\t' << height << '\n';
}
dump_buckets_file << '\n';
}
setProgress(++actual_progress);
// iterative cut-off based on mean and stdev - relies upon scaling_cutoff_stdev_multiplier which is a bit hard to set right.
{
Math::BasicStatistics<double> statistics;
std::vector<double>::const_iterator data_begin = shift_hash_.getData().begin();
const Size data_size = shift_hash_.getData().size();
Size data_range_begin = 0;
Size data_range_end = data_size;
for (UInt loop = 0; loop < loops_mean_stdev_cutoff; ++loop) // MAGIC ALERT: number of loops
{
statistics.update(data_begin + data_range_begin, data_begin + data_range_end);
double mean = statistics.mean() + data_range_begin;
double stdev = sqrt(statistics.variance());
data_range_begin = floor(std::max<double>(mean - scaling_cutoff_stdev_multiplier * stdev, 0));
data_range_end = ceil(std::min<double>(mean + scaling_cutoff_stdev_multiplier * stdev + 1, data_size));
const double outside_mean = shift_hash_.index2key(mean);
const double outside_stdev = stdev * shift_hash_.getScale();
// shift_low = (outside_mean - outside_stdev);
shift_centroid = (outside_mean);
// shift_high = (outside_mean + outside_stdev);
if (do_dump_buckets)
{
dump_buckets_file << "# loop: " << loop << " mean: " << outside_mean << " stdev: " << outside_stdev << " (mean-stdev): "
<< outside_mean - outside_stdev << " (mean+stdev): " << outside_mean + outside_stdev
<< " data_range_begin: " << data_range_begin << " data_range_end: "
<< data_range_end << std::endl;
}
}
setProgress(++actual_progress);
}
if (do_dump_buckets)
{
dump_buckets_file << "# EOF" << std::endl;
dump_buckets_file.close();
}
setProgress(80);
} while (false);
//************************************************************************************
// Estimate transform
// Compute the shifts at the low and high ends by looking at (around) the fullest bins.
double intercept;
#if 1 // yes of course, use centroids for images of rt_low and rt_high
intercept = shift_centroid;
#else // ooh, use maximum bins instead (Note: this is a fossil which would disregard most of the above computations! The code is left here for developers/debugging only.)
const Size rt_low_max_index = std::distance(shift_hash_.getData().begin(),
std::max_element(shift_hash_.getData().begin(), shift_hash_.getData().end()));
intercept = shift_hash_.index2key(rt_low_max_index);
#endif
VV_(intercept);
setProgress(++actual_progress);
// set trafo
{
Param params;
params.setValue("slope", 1.0);
params.setValue("intercept", intercept);
TransformationDescription trafo;
trafo.fitModel("linear", params);
transformation = trafo;
}
setProgress(++actual_progress);
endProgress();
return;
} // run()
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/MAPMATCHING/MapAlignmentAlgorithmTreeGuided.cpp | .cpp | 13,877 | 316 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Julia Thueringer $
// $Authors: Julia Thueringer $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MAPMATCHING/MapAlignmentAlgorithmTreeGuided.h>
// calculate pearson distance
#include <OpenMS/MATH/StatisticFunctions.h>
// create binary tree
#include <OpenMS/DATASTRUCTURES/DistanceMatrix.h>
#include <OpenMS/DATASTRUCTURES/BinaryTreeNode.h>
#include <OpenMS/ML/CLUSTERING/ClusterHierarchical.h>
#include <OpenMS/ML/CLUSTERING/AverageLinkage.h>
// align maps and generate output
#include <OpenMS/ANALYSIS/MAPMATCHING/MapAlignmentTransformer.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/MapAlignmentAlgorithmIdentification.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <include/OpenMS/APPLICATIONS/MapAlignerBase.h>
using namespace std;
namespace OpenMS
{
MapAlignmentAlgorithmTreeGuided::MapAlignmentAlgorithmTreeGuided() :
DefaultParamHandler("MapAlignmentAlgorithmTreeGuided"),
ProgressLogger()
{
defaults_.insert("model:", MapAlignerBase::getModelDefaults("b_spline"));
defaults_.setValue("model_type", "b_spline", "Options to control the modeling of retention time transformations from data");
defaults_.setValidStrings("model_type", {"linear","b_spline","lowess","interpolated"});
defaults_.insert("align_algorithm:", MapAlignmentAlgorithmIdentification().getDefaults());
defaults_.setValue("align_algorithm:use_feature_rt", "true", "When aligning feature or consensus maps, don't use the retention time of a peptide identification directly; instead, use the retention time of the centroid of the feature (apex of the elution profile) that the peptide was matched to. If different identifications are matched to one feature, only the peptide closest to the centroid in RT is used.\nPrecludes 'use_unassigned_peptides'.");
defaults_.setValidStrings("align_algorithm:use_feature_rt", {"true","false"});
defaultsToParam_();
}
MapAlignmentAlgorithmTreeGuided::~MapAlignmentAlgorithmTreeGuided() = default;
void MapAlignmentAlgorithmTreeGuided::updateMembers_()
{
align_algorithm_.setParameters(param_.copy("align_algorithm:", true));
model_param_ = param_.copy("model:",true);
model_type_ = param_.getValue("model_type").toString();
model_param_ = model_param_.copy(model_type_+":", true);
}
// Similarity functor that provides similarity calculations with the ()-operator for protected type SeqAndRTList
// that stores retention times given for individual peptide sequences of a feature map
class MapAlignmentAlgorithmTreeGuided::PeptideIdentificationsPearsonDistance_
{
public:
float operator()(SeqAndRTList& map_first, SeqAndRTList& map_second) const
{
// if both input maps have no peptide identifications with hits (sequence) they are not similar
if (map_first.size()+map_second.size() == 0)
{
return 0.0;
}
// create vectors for both maps containing RTs of identical peptide sequences and
// get union and intercept amount of peptides
auto pep1_it = map_first.begin();
auto pep2_it = map_second.begin();
vector<double> intercept_rts1;
vector<double> intercept_rts2;
float union_size = 0.0;
while (pep1_it != map_first.end() && pep2_it != map_second.end())
{
if (pep1_it->first < pep2_it->first)
{
++pep1_it;
}
else if (pep2_it->first < pep1_it->first)
{
++pep2_it;
}
else
{
double med1 = Math::median(pep1_it->second.begin(), pep1_it->second.end(), true);
intercept_rts1.push_back(med1);
double med2 = Math::median(pep2_it->second.begin(), pep2_it->second.end(), true);
intercept_rts2.push_back(med2);
++pep1_it;
++pep2_it;
}
++union_size;
}
Size intercept_size = intercept_rts1.size();
// pearsonCorrelationCoefficient(rt_map_i, rt_map_j)
float pearson_val;
pearson_val = static_cast<float>(Math::pearsonCorrelationCoefficient(intercept_rts1.begin(), intercept_rts1.end(),
intercept_rts2.begin(), intercept_rts2.end()));
// Small intersections are penalized by multiplication with the quotient of intersection to union.
return pearson_val * intercept_size / union_size;
}
}; // end of PeptideIdentificationsPearsonDifference
// For given peptide identifications extract sequences and store with associated feature RT.
void MapAlignmentAlgorithmTreeGuided::addPeptideSequences_(const PeptideIdentificationList& peptides,
SeqAndRTList& peptide_rts, std::vector<double>& map_range, double feature_rt)
{
for (const auto& peptide : peptides)
{
if (!peptide.getHits().empty())
{
const String& sequence = peptide.getHits()[0].getSequence().toString();
peptide_rts[sequence].push_back(feature_rt);
map_range.push_back(feature_rt);
}
}
}
// For each input map, extract peptide identifications (sequences) of existing features with associated feature RT.
void MapAlignmentAlgorithmTreeGuided::extractSeqAndRt_(const vector<FeatureMap>& feature_maps,
vector<SeqAndRTList>& maps_seq_and_rt, vector<vector<double>>& maps_ranges)
{
for (Size i = 0; i < feature_maps.size(); ++i)
{
for (const BaseFeature& bf : feature_maps[i])
{
if (!bf.getPeptideIdentifications().empty())
{
addPeptideSequences_(bf.getPeptideIdentifications(), maps_seq_and_rt[i], maps_ranges[i], bf.getRT());
}
}
sort(maps_ranges[i].begin(), maps_ranges[i].end());
}
}
// Extract RTs given for individual features of each map, calculate distances for each pair of maps and cluster hierarchical using average linkage.
void MapAlignmentAlgorithmTreeGuided::buildTree(std::vector<FeatureMap>& feature_maps, std::vector<BinaryTreeNode>& tree,
std::vector<std::vector<double>>& maps_ranges)
{
vector<SeqAndRTList> maps_seq_and_rt(feature_maps.size());
extractSeqAndRt_(feature_maps, maps_seq_and_rt, maps_ranges);
PeptideIdentificationsPearsonDistance_ pep_dist;
AverageLinkage al;
DistanceMatrix<float> dist_matrix; // will be filled
ClusterHierarchical ch;
ch.cluster<SeqAndRTList, PeptideIdentificationsPearsonDistance_>(maps_seq_and_rt, pep_dist, al, tree, dist_matrix);
}
// Align feature maps tree guided using align() of MapAlignmentAlgorithmIdentification and use TreeNode with larger 10/90 percentile range as reference.
void MapAlignmentAlgorithmTreeGuided::treeGuidedAlignment(const std::vector<BinaryTreeNode>& tree,
std::vector<FeatureMap>& feature_maps_transformed,
std::vector<std::vector<double>>& maps_ranges,
FeatureMap& map_transformed,
std::vector<Size>& trafo_order)
{
Size last_trafo = 0; // to get final transformation order from map_sets
vector<TransformationDescription> transformations_align; // temporary for aligner output
vector<FeatureMap> to_align;
// helper to memorize rt transformation order
vector<vector<Size>> map_sets(feature_maps_transformed.size());
for (Size i = 0; i < feature_maps_transformed.size(); ++i)
{
map_sets[i].push_back(i);
}
Size ref;
Size to_transform;
// check RT ranges of IDs
for (size_t i = 0; i < maps_ranges.size(); ++i)
{
StringList p;
feature_maps_transformed[i].getPrimaryMSRunPath(p);
if (maps_ranges[i].empty()) throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "FeatureMap originating from '" + ListUtils::concatenate(p, "', '") + "' contains no Peptide Identifications. Cannot align!");
}
for (const auto& node : tree)
{
// ----------------
// prepare alignment
// ----------------
// determine the map with larger RT range for 10/90 percentile (->reference)
double left_range = maps_ranges[node.left_child][maps_ranges[node.left_child].size()*0.9] - maps_ranges[node.left_child][maps_ranges[node.left_child].size()*0.1];
double right_range = maps_ranges[node.right_child][maps_ranges[node.right_child].size()*0.9] - maps_ranges[node.right_child][maps_ranges[node.right_child].size()*0.1];
if (left_range > right_range)
{
ref = node.left_child;
to_transform = node.right_child;
}
else
{
ref = node.right_child;
to_transform = node.left_child;
}
vector<double> tmp;
std::merge(maps_ranges[node.right_child].begin(), maps_ranges[node.right_child].end(), maps_ranges[node.left_child].begin(), maps_ranges[node.left_child].end(), std::back_inserter(tmp));
to_align.push_back(feature_maps_transformed[to_transform]);
to_align.push_back(feature_maps_transformed[ref]);
// ----------------
// perform alignment
// ----------------
align_algorithm_.align(to_align, transformations_align, 1);
// transform retention times of non-identity for next iteration
transformations_align[0].fitModel(model_type_, model_param_);
MapAlignmentTransformer::transformRetentionTimes(feature_maps_transformed[to_transform],
transformations_align[0], true);
// combine aligned maps, store at smaller index, because tree always calls smaller number
// clear feature map at larger index to save memory
feature_maps_transformed[ref] += feature_maps_transformed[to_transform];
feature_maps_transformed[ref].updateRanges();
if (ref < to_transform)
{
feature_maps_transformed[to_transform].clear(true);
last_trafo = ref;
}
else
{
feature_maps_transformed[to_transform] = feature_maps_transformed[ref];
feature_maps_transformed[ref].clear(true);
last_trafo = to_transform;
}
// update order of alignment for both aligned maps
map_sets[ref].insert(map_sets[ref].end(), map_sets[to_transform].begin(), map_sets[to_transform].end());
map_sets[to_transform] = map_sets[ref];
transformations_align.clear();
to_align.clear();
}
// copy last transformed FeatureMap for reference return
map_transformed = feature_maps_transformed[last_trafo];
trafo_order = map_sets[last_trafo];
}
void MapAlignmentAlgorithmTreeGuided::align(std::vector<FeatureMap>& feature_maps,
std::vector<TransformationDescription>& transformations)
{
// constructing tree
vector<vector<double>> maps_ranges(feature_maps.size()); // to save ranges for alignment (larger rt_range -> reference)
std::vector<BinaryTreeNode> tree; // to construct tree with pearson coefficient
buildTree(feature_maps, tree, maps_ranges);
// print tree
ClusterAnalyzer ca;
OPENMS_LOG_INFO << " Alignment follows Newick tree: " << ca.newickTree(tree, true) << endl;
// alignment
vector<Size> trafo_order;
FeatureMap map_transformed;
{
vector<FeatureMap> copied_maps = feature_maps;
treeGuidedAlignment(tree, copied_maps, maps_ranges, map_transformed, trafo_order);
} // free copied maps
//-------------------------------------------------------------
// generating output
//-------------------------------------------------------------
transformations.clear();
transformations.resize(feature_maps.size()); // for trafo_out
computeTrafosByOriginalRT(feature_maps, map_transformed, transformations, trafo_order);
OpenMS::MapAlignmentAlgorithmTreeGuided::computeTransformedFeatureMaps(feature_maps, transformations);
}
// Extract original RT ("original_RT" MetaInfo) and transformed RT for each feature to compute RT transformations.
void MapAlignmentAlgorithmTreeGuided::computeTrafosByOriginalRT(std::vector<FeatureMap>& feature_maps,
FeatureMap& map_transformed,
std::vector<TransformationDescription>& transformations,
const std::vector<Size>& trafo_order)
{
FeatureMap::const_iterator fit = map_transformed.begin();
TransformationDescription::DataPoints trafo_data_tmp;
for (auto& map_idx : trafo_order)
{
for (Size i = 0; i < feature_maps[map_idx].size(); ++i)
{
TransformationDescription::DataPoint point;
if (fit->metaValueExists("original_RT"))
{
point.first = fit->getMetaValue("original_RT");
}
else
{
point.first = fit->getRT();
}
point.second = fit->getRT();
point.note = fit->getUniqueId();
trafo_data_tmp.push_back(point);
++fit;
}
transformations[map_idx] = TransformationDescription(trafo_data_tmp);
transformations[map_idx].fitModel(model_type_, model_param_);
trafo_data_tmp.clear();
}
}
void MapAlignmentAlgorithmTreeGuided::computeTransformedFeatureMaps(vector<FeatureMap>& feature_maps, const vector<TransformationDescription>& transformations)
{
for (Size i = 0; i < feature_maps.size(); ++i)
{
MapAlignmentTransformer::transformRetentionTimes(feature_maps[i], transformations[i], true);
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/MAPMATCHING/TransformationModelLowess.cpp | .cpp | 12,808 | 321 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MAPMATCHING/TransformationModelLowess.h>
#include <OpenMS/PROCESSING/SMOOTHING/FastLowessSmoothing.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <OpenMS/ML/CROSSVALIDATION/CrossValidation.h>
#include <OpenMS/CONCEPT/EnumHelpers.h>
#include <OpenMS/DATASTRUCTURES/ListUtils.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <algorithm>
#include <array>
#include <cmath>
#include <iostream>
#include <iterator>
#include <limits>
#include <sstream>
using namespace std;
namespace OpenMS
{
bool cmpFirstDimension(const TransformationModel::DataPoint& x, const TransformationModel::DataPoint& y)
{
return (x.first < y.first);
}
TransformationModelLowess::TransformationModelLowess(
const TransformationModel::DataPoints& data_,
const Param& params) : model_(nullptr)
{
// parameter handling/checking:
params_ = params;
Param defaults;
getDefaultParameters(defaults);
params_.setDefaults(defaults);
if (data_.size() < 2)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"'lowess' model requires more data");
}
// TODO copy ...
TransformationModel::DataPoints data(data_);
// sort data
std::sort(data.begin(), data.end(), cmpFirstDimension);
vector<double> x(data.size()), y(data.size()), result(data.size());
double xmin_ = data[0].first;
double xmax_ = xmin_;
for (Size i = 0; i < data.size(); ++i)
{
x[i] = data[i].first;
y[i] = data[i].second;
if (x[i] < xmin_)
{
xmin_ = x[i];
}
else if (x[i] > xmax_)
{
xmax_ = x[i];
}
}
double span = params_.getValue("span");
int nsteps = params_.getValue("num_iterations");
double delta = params_.getValue("delta");
if (delta < 0.0)
{
delta = (xmax_ - xmin_) * 0.01; // automatically determine delta
}
// Auto-span options
const bool auto_span_flag = params_.getValue("auto_span").toBool();
const double span_min_param = (double)params_.getValue("auto_span_min");
const double span_max_param = (double)params_.getValue("auto_span_max");
const int min_neighbors = (int)params_.getValue("auto_min_neighbors");
const int k_folds_param = (int)params_.getValue("auto_k_folds");
const auto metric = (TransformationModelLowess::CVMetric)Helpers::indexOf(TransformationModelLowess::names_of_cvmetric, params_.getValue("auto_metric").toString());
// Determine optimal span using CV
const Size n = data.size();
if (auto_span_flag)
{
OPENMS_LOG_INFO << "Will perform CV to determine optimal span for lowess fit..." << std::endl;
// Build folds: LOO (n<=50) else K-fold
const bool use_loo = (n <= 50);
const Size K = use_loo ? n : static_cast<Size>(std::max(2, k_folds_param));
const auto folds = OpenMS::CrossValidation::makeKFolds(n, K);
// Build candidate grid spans
const String grid_str = params_.getValue("auto_span_grid").toString();
std::vector<double> user_grid;
if (!grid_str.empty())
{
user_grid = ListUtils::create<double>(grid_str);
}
const std::vector<double> spans = buildSpanGrid(n, user_grid,
span_min_param, span_max_param,
min_neighbors);
// Train evaluation callback
auto train_eval = [&](double s,
const std::vector<std::vector<Size>>& folds_in,
std::vector<double>& abs_errs)
{
if (s * static_cast<double>(n) < static_cast<double>(min_neighbors)) return;
Param p_cv = params_;
p_cv.setValue("span", s);
p_cv.setValue("auto_span", "false");
for (Size f = 0; f < folds_in.size(); ++f)
{
TransformationModel::DataPoints train;
train.reserve(n - folds_in[f].size());
std::vector<char> held(n, 0);
for (Size j : folds_in[f]) held[j] = 1;
for (Size j = 0; j < n; ++j) if (!held[j]) train.push_back(data[j]);
if (train.size() < static_cast<Size>(std::max(3, min_neighbors))) continue;
TransformationModelLowess cv_model(train, p_cv);
for (Size j : folds_in[f])
{
const double yhat = cv_model.evaluate(x[j]);
const double e = std::fabs(yhat - y[j]);
if (std::isfinite(e)) abs_errs.push_back(e);
}
}
};
// Later, when scoring folds:
auto score = [&](const std::vector<double>& errs)
{
return scoreResiduals(errs, metric);
};
// Run 1-D grid search
const auto [best_span, best_score] =
OpenMS::CrossValidation::gridSearch1D(spans.begin(), spans.end(),
folds, train_eval, score,
kTieTol, OpenMS::CrossValidation::CrossValidation::CandidateTieBreak::PreferLarger);
span = best_span;
OPENMS_LOG_INFO << "Optimal selected span=" << span
<< " (" << params_.getValue("auto_metric").toString() << " = " << best_score << ")" << std::endl;
// persist for downstream and prevent re-entry
params_.setValue("span", span);
params_.setValue("auto_span", "false");
}
FastLowessSmoothing::lowess(x, y, span, nsteps, delta, result);
TransformationModel::DataPoints data_out;
for (Size i = 0; i < result.size(); ++i)
{
data_out.push_back( std::make_pair(x[i], result[i]) );
}
// TODO thin out data here ? we may not need that many points here to interpolate ... it is enough if we store a few datapoints
Param p;
TransformationModelInterpolated::getDefaultParameters(p);
/// p.setValue("interpolation_type", "cspline"); // linear interpolation between lowess pts
/// p.setValue("extrapolation_type", "four-point-linear");
p.setValue("interpolation_type", params_.getValue("interpolation_type"));
p.setValue("extrapolation_type", params_.getValue("extrapolation_type"));
// create new interpolation model based on the lowess data
model_ = new TransformationModelInterpolated(data_out, p);
}
TransformationModelLowess::~TransformationModelLowess()
{
if (model_) delete model_;
}
void TransformationModelLowess::getDefaultParameters(Param& params)
{
params.clear();
params.setValue("span", 2/3.0, "Fraction of datapoints (f) to use for each local regression (determines the amount of smoothing). Choosing this parameter in the range .2 to .8 usually results in a good fit.");
params.setMinFloat("span", 0.0);
params.setMaxFloat("span", 1.0);
// --- Auto-span tuning (optional) ---
params.setValue("auto_span", "false",
"If true, or if 'span' is 0, automatically select LOWESS span by cross-validation.");
params.setValidStrings("auto_span", {"true","false"});
params.setValue("auto_span_min", 0.15,
"Lower bound for auto-selected span.");
params.setMinFloat("auto_span_min", 0.001);
params.setValue("auto_span_max", 0.80,
"Upper bound for auto-selected span.");
params.setMaxFloat("auto_span_max", 0.99);
params.setValue("auto_min_neighbors", 5,
"Minimum number of neighbors (span*n) enforced in auto mode.");
params.setMinInt("auto_min_neighbors", 3);
params.setValue("auto_k_folds", 5,
"K-folds for CV when n>50 (else LOO is used).");
params.setMinInt("auto_k_folds", 2);
params.setValue("auto_metric", "mae",
"Metric for CV selection: one of {'p90','p95','p99','rmse','mae'}.");
params.setValidStrings("auto_metric", {"p90","p95","p99","rmse","mae"});
params.setValue("auto_span_grid", "",
"Optional explicit grid of span candidates in (0,1]. Comma-separated list, e.g. '0.2,0.3,0.5'. If empty, a default grid is used.");
params.setValue("num_iterations", 3, "Number of robustifying iterations for lowess fitting.");
params.setMinInt("num_iterations", 0);
params.setValue("delta", -1.0, "Nonnegative parameter which may be used to save computations (recommended value is 0.01 of the range of the input, e.g. for data ranging from 1000 seconds to 2000 seconds, it could be set to 10). Setting a negative value will automatically do this.");
params.setValue("interpolation_type", "cspline", "Method to use for interpolation between datapoints computed by lowess. 'linear': Linear interpolation. 'cspline': Use the cubic spline for interpolation. 'akima': Use an akima spline for interpolation");
params.setValidStrings("interpolation_type", {"linear","cspline","akima"});
params.setValue("extrapolation_type", "four-point-linear", "Method to use for extrapolation outside the data range. 'two-point-linear': Uses a line through the first and last point to extrapolate. 'four-point-linear': Uses a line through the first and second point to extrapolate in front and and a line through the last and second-to-last point in the end. 'global-linear': Uses a linear regression to fit a line through all data points and use it for interpolation.");
params.setValidStrings("extrapolation_type", {"two-point-linear","four-point-linear","global-linear"});
}
std::vector<double> TransformationModelLowess::buildSpanGrid(Size n_pts,
const std::vector<double>& candidate_spans,
double span_min_param,
double span_max_param,
int min_neighbors)
{
// Parse user grid if supplied, needs to be comma-separated doubles
std::vector<double> grid;
if (!candidate_spans.empty())
{
grid = candidate_spans;
}
else
{
static const double cand[] = {0.01, 0.05, 0.15, 0.20, 0.25, 0.30, 0.35, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90};
grid.assign(std::begin(cand), std::end(cand));
}
double span_min = std::max(0.01, span_min_param);
double span_max = std::min(0.99, span_max_param);
if (span_min > span_max) std::swap(span_min, span_max);
const double min_span_neighbors = (n_pts > 0) ? static_cast<double>(min_neighbors) / static_cast<double>(n_pts) : 1.0;
const double lower = std::max(span_min, std::min(0.9, min_span_neighbors)); // avoid 1–2 neighbor fits
for (double& v : grid)
{
if (v < lower) v = lower;
if (v > span_max) v = span_max;
}
std::sort(grid.begin(), grid.end());
grid.erase(std::unique(grid.begin(), grid.end(),
[](double a, double b){ return std::fabs(a - b) < 1e-9; }),
grid.end());
if (grid.empty())
{
grid.push_back(std::min(0.95, std::max(0.01, lower)));
}
return grid;
}
const std::array<std::string, (Size)TransformationModelLowess::CVMetric::SIZE_OF_CVMETRIC> TransformationModelLowess::names_of_cvmetric = { "rmse", "mae", "p90", "p95", "p99" };
double TransformationModelLowess::scoreResiduals(const std::vector<double>& errs,
CVMetric metric)
{
if (errs.empty()) return std::numeric_limits<double>::infinity();
switch (metric)
{
case CVMetric::RMSE:
{
std::vector<double> zeros(errs.size(), 0.0);
return OpenMS::Math::rootMeanSquareError(errs.begin(), errs.end(), zeros.begin(), zeros.end());
}
case CVMetric::MAE:
return OpenMS::Math::MeanAbsoluteDeviation(errs.begin(), errs.end(), 0.0);
case CVMetric::P90:
{
std::vector<double> tmp = errs;
std::sort(tmp.begin(), tmp.end());
return OpenMS::Math::quantile(tmp.begin(), tmp.end(), 0.90);
}
case CVMetric::P99:
{
std::vector<double> tmp = errs;
std::sort(tmp.begin(), tmp.end());
return OpenMS::Math::quantile(tmp.begin(), tmp.end(), 0.99);
}
case CVMetric::P95:
default:
{
std::vector<double> tmp = errs;
std::sort(tmp.begin(), tmp.end());
return OpenMS::Math::quantile(tmp.begin(), tmp.end(), 0.95);
}
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/MAPMATCHING/BaseSuperimposer.cpp | .cpp | 614 | 23 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Clemens Groepl, Marc Sturm $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MAPMATCHING/BaseSuperimposer.h>
namespace OpenMS
{
BaseSuperimposer::BaseSuperimposer() :
DefaultParamHandler("BaseSuperimposer"),
ProgressLogger()
{
}
BaseSuperimposer::~BaseSuperimposer() = default;
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/MAPMATCHING/ConsensusMapNormalizerAlgorithmQuantile.cpp | .cpp | 6,838 | 175 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Johannes Veit $
// $Authors: Johannes Junker $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MAPMATCHING/ConsensusMapNormalizerAlgorithmQuantile.h>
#include <OpenMS/CONCEPT/ProgressLogger.h>
using namespace std;
namespace OpenMS
{
ConsensusMapNormalizerAlgorithmQuantile::ConsensusMapNormalizerAlgorithmQuantile() = default;
ConsensusMapNormalizerAlgorithmQuantile::~ConsensusMapNormalizerAlgorithmQuantile() = default;
void ConsensusMapNormalizerAlgorithmQuantile::normalizeMaps(ConsensusMap& map)
{
//extract feature intensities
vector<vector<double> > feature_ints;
extractIntensityVectors(map, feature_ints);
Size number_of_maps = feature_ints.size();
//determine largest number of features in any map
Size largest_number_of_features = 0;
for (Size i = 0; i < number_of_maps; ++i)
{
if (feature_ints[i].size() > largest_number_of_features)
{
largest_number_of_features = feature_ints[i].size();
}
}
//resample n data points from each sorted intensity distribution (from the different maps), n = maximum number of features in any map
vector<vector<double> > resampled_sorted_data;
for (Size i = 0; i < number_of_maps; ++i)
{
vector<double> sorted = feature_ints[i];
std::sort(sorted.begin(), sorted.end());
vector<double> resampled(largest_number_of_features);
resample(sorted, resampled, static_cast<UInt>(largest_number_of_features));
resampled_sorted_data.push_back(resampled);
}
//compute reference distribution from all resampled distributions
vector<double> reference_distribution(largest_number_of_features);
for (Size i = 0; i < number_of_maps; ++i)
{
for (Size j = 0; j < largest_number_of_features; ++j)
{
reference_distribution[j] += (resampled_sorted_data[i][j] / (double)number_of_maps);
}
}
//for each map: resample from the reference distribution down to the respective original size again
vector<vector<double> > normalized_sorted_ints(number_of_maps);
for (Size i = 0; i < number_of_maps; ++i)
{
vector<double> ints;
resample(reference_distribution, ints, static_cast<UInt>(feature_ints[i].size()));
normalized_sorted_ints[i] = ints;
}
//set the intensities of feature_ints to the normalized intensities
for (Size i = 0; i < number_of_maps; ++i)
{
// We do not want to change the order in feature_ints[i] but normalized_sorted_ints
// comes sorted, so we transfer the values in feature_ints[i] into pairs that store
// the value and the index in feature_ints[i]. Then we sort the vector of pair and as
// a result store the indexes of feature_ints[i] in a sorted order in sort_indices.
std::vector<std::pair<double, UInt> > sort_pairs;
sort_pairs.reserve(feature_ints[i].size());
for (Size j = 0; j < feature_ints[i].size(); ++j)
{
sort_pairs.emplace_back(feature_ints[i][j], j);
}
std::sort(sort_pairs.begin(), sort_pairs.end());
vector<Size> sort_indices;
sort_indices.reserve(sort_pairs.size());
for (Size j = 0; j < sort_pairs.size(); ++j)
{
sort_indices.push_back(sort_pairs.at(j).second);
}
Size k = 0;
for (Size j = 0; j < sort_indices.size(); ++j)
{
Size idx = sort_indices[j];
feature_ints[i][idx] = normalized_sorted_ints[i][k++];
}
}
//write new feature intensities to the consensus map
setNormalizedIntensityValues(feature_ints, map);
}
void ConsensusMapNormalizerAlgorithmQuantile::resample(const vector<double>& data_in, vector<double>& data_out, UInt n_resampling_points)
{
data_out.clear();
data_out.resize(n_resampling_points);
if (n_resampling_points == 0)
{
return;
}
data_out[0] = data_in.front();
data_out[n_resampling_points - 1] = data_in.back();
double delta = (double)(data_in.size() - 1) / (double)(n_resampling_points - 1);
for (UInt i = 1; i < n_resampling_points - 1; ++i)
{
double pseudo_index = (double)i * delta;
double left_index = (UInt)floor(pseudo_index);
double right_index = (UInt)ceil(pseudo_index);
if (left_index == right_index)
{
data_out[i] = data_in[left_index];
}
else
{
double weight_left = 1.0 - (pseudo_index - (double)left_index);
double weight_right = 1.0 - ((double)right_index - pseudo_index);
data_out[i] = weight_left * data_in[left_index] + weight_right * data_in[right_index];
}
}
}
void ConsensusMapNormalizerAlgorithmQuantile::extractIntensityVectors(const ConsensusMap& map, vector<vector<double> >& out_intensities)
{
//reserve space for out_intensities (unequal vector lengths, 0-features omitted)
Size number_of_maps = map.getColumnHeaders().size();
out_intensities.clear();
out_intensities.resize(number_of_maps);
for (UInt i = 0; i < number_of_maps; i++)
{
ConsensusMap::ColumnHeaders::const_iterator it = map.getColumnHeaders().find(i);
if (it == map.getColumnHeaders().end()) throw Exception::ElementNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String(i));
out_intensities[i].reserve(it->second.size);
}
//fill out_intensities
ConsensusMap::ConstIterator cf_it;
for (cf_it = map.begin(); cf_it != map.end(); ++cf_it)
{
ConsensusFeature::HandleSetType::const_iterator f_it;
for (f_it = cf_it->getFeatures().begin(); f_it != cf_it->getFeatures().end(); ++f_it)
{
out_intensities[f_it->getMapIndex()].push_back(f_it->getIntensity());
}
}
}
void ConsensusMapNormalizerAlgorithmQuantile::setNormalizedIntensityValues(const vector<vector<double> >& feature_ints, ConsensusMap& map)
{
//assumes the input map and feature_ints are in the same order as in the beginning,
//although feature_ints has normalized values now (but the same ranks as before)
Size number_of_maps = map.getColumnHeaders().size();
ConsensusMap::ConstIterator cf_it;
vector<Size> progress_indices(number_of_maps);
for (cf_it = map.begin(); cf_it != map.end(); ++cf_it)
{
ConsensusFeature::HandleSetType::const_iterator f_it;
for (f_it = cf_it->getFeatures().begin(); f_it != cf_it->getFeatures().end(); ++f_it)
{
Size map_idx = f_it->getMapIndex();
double intensity = feature_ints[map_idx][progress_indices[map_idx]++];
f_it->asMutable().setIntensity(intensity);
}
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/MAPMATCHING/BaseGroupFinder.cpp | .cpp | 1,214 | 42 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Clemens Groepl, Marc Sturm $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MAPMATCHING/BaseGroupFinder.h>
namespace OpenMS
{
BaseGroupFinder::BaseGroupFinder() :
DefaultParamHandler("BaseGroupFinder")
{
}
BaseGroupFinder::~BaseGroupFinder() = default;
void BaseGroupFinder::checkIds_(const std::vector<ConsensusMap>& maps) const
{
std::set<Size> used_ids;
for (Size i = 0; i < maps.size(); ++i)
{
const ConsensusMap& map = maps[i];
for (ConsensusMap::ColumnHeaders::const_iterator it = map.getColumnHeaders().begin(); it != map.getColumnHeaders().end(); ++it)
{
if (used_ids.find(it->first) != used_ids.end())
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "file ids have to be unique");
}
else
{
used_ids.insert(it->first);
}
}
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/MAPMATCHING/TransformationModelInterpolated.cpp | .cpp | 12,800 | 406 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Stephan Aiche $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/Exception.h>
#include <OpenMS/DATASTRUCTURES/String.h>
#include <OpenMS/DATASTRUCTURES/ListUtils.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/TransformationModelInterpolated.h>
// Spline2dInterpolator
#include <OpenMS/MATH/MISC/CubicSpline2d.h>
#include <numeric>
// AkimaInterpolator
#include <Mathematics/IntpAkimaNonuniform1.h>
namespace OpenMS
{
/**
* @brief Spline2dInterpolator
*/
class Spline2dInterpolator :
public TransformationModelInterpolated::Interpolator
{
public:
Spline2dInterpolator()
= default;
void init(std::vector<double>& x, std::vector<double>& y) override
{
// cleanup before we use a new one
if (spline_ != (CubicSpline2d*) nullptr) delete spline_;
// initialize spline
spline_ = new CubicSpline2d(x, y);
}
double eval(const double& x) const override
{
return spline_->eval(x);
}
~Spline2dInterpolator() override
{
delete spline_;
}
private:
CubicSpline2d* spline_{nullptr};
// Spline2d<double>* spline_;
};
/**
* @brief AkimaInterpolator
*/
class AkimaInterpolator :
public TransformationModelInterpolated::Interpolator
{
public:
AkimaInterpolator()
= default;
void init(std::vector<double>& x, std::vector<double>& y) override
{
if (interpolator_ != (gte::IntpAkimaNonuniform1<double>*) nullptr) delete interpolator_;
// re-construct a new interpolator
interpolator_ = new gte::IntpAkimaNonuniform1<double>(static_cast<int>(x.size()), &x.front(), &y.front());
}
double eval(const double& x) const override
{
return (* interpolator_)(x);
}
~AkimaInterpolator() override
{
delete interpolator_;
}
private:
gte::IntpAkimaNonuniform1<double>* interpolator_{nullptr};
};
/**
* @brief LinearInterpolator.
*/
class LinearInterpolator :
public TransformationModelInterpolated::Interpolator
{
public:
LinearInterpolator()
= default;
void init(std::vector<double>& x, std::vector<double>& y) override
{
// clear data
x_.clear();
y_.clear();
// copy data
// TODO: should we solve this using pointers to the original data?
x_.insert(x_.begin(), x.begin(), x.end());
y_.insert(y_.begin(), y.begin(), y.end());
}
double eval(const double& x) const override
{
// find nearest pair of points
std::vector<double>::const_iterator it = std::upper_bound(x_.begin(), x_.end(), x);
// interpolator is guaranteed to be only evaluated on points x, x_.front() =< x =< x x.back()
// see TransformationModelInterpolated::evaluate
// compute interpolation
// the only point that is > then an element in our series is y_.back()
// see call guarantee above
if (it == x_.end())
{
return y_.back();
}
else
{
// interpolate .. invariant: idx > 0
const SignedSize idx = it - x_.begin();
const double x_0 = x_[idx - 1];
const double x_1 = x_[idx];
const double y_0 = y_[idx - 1];
const double y_1 = y_[idx];
return y_0 + (y_1 - y_0) * (x - x_0) / (x_1 - x_0);
}
}
~LinearInterpolator() override
= default;
private:
/// x values
std::vector<double> x_;
/// y values
std::vector<double> y_;
};
void TransformationModelInterpolated::preprocessDataPoints_(const DataPoints& data)
{
// need monotonically increasing x values (can't have the same value twice):
std::map<double, std::vector<double> > mapping;
for (TransformationModel::DataPoints::const_iterator it = data.begin();
it != data.end();
++it)
{
mapping[it->first].push_back(it->second);
}
x_.resize(mapping.size());
y_.resize(mapping.size());
size_t i = 0;
for (std::map<double, std::vector<double> >::const_iterator it = mapping.begin();
it != mapping.end();
++it, ++i)
{
x_[i] = it->first;
// use average y value:
y_[i] = std::accumulate(it->second.begin(), it->second.end(), 0.0) / it->second.size();
}
// ensure that we have enough points for an interpolation
if (x_.size() < 3)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Cubic spline model needs at least 3 data points (with unique x values)");
}
}
void TransformationModelInterpolated::preprocessDataPoints_(const std::vector<std::pair<double,double>>& data)
{
// need monotonically increasing x values (can't have the same value twice):
std::map<double, std::vector<double> > mapping;
for (std::vector<std::pair<double,double>>::const_iterator it = data.begin();
it != data.end();
++it)
{
mapping[it->first].push_back(it->second);
}
x_.resize(mapping.size());
y_.resize(mapping.size());
size_t i = 0;
for (std::map<double, std::vector<double> >::const_iterator it = mapping.begin();
it != mapping.end();
++it, ++i)
{
x_[i] = it->first;
// use average y value:
y_[i] = std::accumulate(it->second.begin(), it->second.end(), 0.0) / it->second.size();
}
// ensure that we have enough points for an interpolation
if (x_.size() < 3)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Cubic spline model needs at least 3 data points (with unique x values)");
}
}
TransformationModelInterpolated::TransformationModelInterpolated(const std::vector<std::pair<double,double>>& data, const Param& params, bool preprocess = true)
{
params_ = params;
Param defaults;
getDefaultParameters(defaults);
params_.setDefaults(defaults);
// convert incoming data to x_ and y_
if (preprocess)
{
preprocessDataPoints_(data);
}
else
{
x_.resize(data.size());
y_.resize(data.size());
for (const std::pair<double,double>& pair : data)
{
x_.push_back(pair.first);
y_.push_back(pair.second);
}
}
// choose the actual interpolation type
const String interpolation_type = params_.getValue("interpolation_type").toString();
if (interpolation_type == "linear")
{
interp_ = new LinearInterpolator();
}
else if (interpolation_type == "cspline")
{
interp_ = new Spline2dInterpolator();
}
else if (interpolation_type == "akima")
{
interp_ = new AkimaInterpolator();
}
else
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"unknown/unsupported interpolation type '" + interpolation_type + "'");
}
// assign data
interp_->init(x_, y_);
// linear model for extrapolation:
const String extrapolation_type = params_.getValue("extrapolation_type").toString();
if (extrapolation_type == "global-linear")
{
std::vector<TransformationModel::DataPoint> bloated_data{};
bloated_data.resize(x_.size());
//uff... well here we go.. adding an empty string
for (Size s = 0; s < x_.size(); ++s)
{
bloated_data.emplace_back(TransformationModel::DataPoint(x_[s],y_[s]));
}
lm_front_ = new TransformationModelLinear(bloated_data, Param());
lm_back_ = new TransformationModelLinear(bloated_data, Param());
}
else if (extrapolation_type == "two-point-linear")
{
TransformationModel::DataPoints lm_data(2);
lm_data[0] = std::make_pair(x_.front(), y_.front());
lm_data[1] = std::make_pair(x_.back(), y_.back()); // last point
lm_front_ = new TransformationModelLinear(lm_data, Param());
lm_back_ = new TransformationModelLinear(lm_data, Param());
}
else if (extrapolation_type == "four-point-linear")
{
TransformationModel::DataPoints lm_data(2);
lm_data[0] = std::make_pair(x_[0], y_[0]);
lm_data[1] = std::make_pair(x_[1], y_[1]);
lm_front_ = new TransformationModelLinear(lm_data, Param());
lm_data[0] = std::make_pair(x_[ x_.size()-2 ], y_[ y_.size()-2] ); // second to last point
lm_data[1] = std::make_pair(x_.back(), y_.back()); // last point
lm_back_ = new TransformationModelLinear(lm_data, Param());
}
else
{
if (interp_)
{
delete interp_;
}
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"unknown/unsupported extrapolation type '" + extrapolation_type + "'");
}
}
TransformationModelInterpolated::TransformationModelInterpolated(const TransformationModel::DataPoints& data, const Param& params)
{
params_ = params;
Param defaults;
getDefaultParameters(defaults);
params_.setDefaults(defaults);
// convert incoming data to x_ and y_
preprocessDataPoints_(data);
// choose the actual interpolation type
const String interpolation_type = params_.getValue("interpolation_type").toString();
if (interpolation_type == "linear")
{
interp_ = new LinearInterpolator();
}
else if (interpolation_type == "cspline")
{
interp_ = new Spline2dInterpolator();
}
else if (interpolation_type == "akima")
{
interp_ = new AkimaInterpolator();
}
else
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"unknown/unsupported interpolation type '" + interpolation_type + "'");
}
// assign data
interp_->init(x_, y_);
// linear model for extrapolation:
const String extrapolation_type = params_.getValue("extrapolation_type").toString();
if (extrapolation_type == "global-linear")
{
lm_front_ = new TransformationModelLinear(data, Param());
lm_back_ = new TransformationModelLinear(data, Param());
}
else if (extrapolation_type == "two-point-linear")
{
TransformationModel::DataPoints lm_data(2);
lm_data[0] = std::make_pair(x_.front(), y_.front());
lm_data[1] = std::make_pair(x_.back(), y_.back()); // last point
lm_front_ = new TransformationModelLinear(lm_data, Param());
lm_back_ = new TransformationModelLinear(lm_data, Param());
}
else if (extrapolation_type == "four-point-linear")
{
TransformationModel::DataPoints lm_data(2);
lm_data[0] = std::make_pair(x_[0], y_[0]);
lm_data[1] = std::make_pair(x_[1], y_[1]);
lm_front_ = new TransformationModelLinear(lm_data, Param());
lm_data[0] = std::make_pair(x_[ x_.size()-2 ], y_[ y_.size()-2] ); // second to last point
lm_data[1] = std::make_pair(x_.back(), y_.back()); // last point
lm_back_ = new TransformationModelLinear(lm_data, Param());
}
else
{
if (interp_)
{
delete interp_;
}
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"unknown/unsupported extrapolation type '" + extrapolation_type + "'");
}
}
TransformationModelInterpolated::~TransformationModelInterpolated()
{
if (interp_) delete interp_;
if (lm_front_) delete lm_front_;
if (lm_back_) delete lm_back_;
}
double TransformationModelInterpolated::evaluate(double value) const
{
if (value < x_.front()) // extrapolate front
{
return lm_front_->evaluate(value);
}
else if (value > x_.back()) // extrapolate back
{
return lm_back_->evaluate(value);
}
// interpolate:
return interp_->eval(value);
}
void TransformationModelInterpolated::getDefaultParameters(Param& params)
{
params.clear();
params.setValue("interpolation_type", "cspline", "Type of interpolation to apply.");
params.setValidStrings("interpolation_type", {"linear","cspline","akima"});
params.setValue("extrapolation_type", "two-point-linear", "Type of extrapolation to apply: two-point-linear: use the first and last data point to build a single linear model, four-point-linear: build two linear models on both ends using the first two / last two points, global-linear: use all points to build a single linear model. Note that global-linear may not be continuous at the border.");
params.setValidStrings("extrapolation_type", {"two-point-linear","four-point-linear","global-linear"});
}
} // namespace
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/MAPMATCHING/ConsensusMapNormalizerAlgorithmThreshold.cpp | .cpp | 4,230 | 110 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Lars Nilse $
// $Authors: Hendrik Brauer, Oliver Kohlbacher, Johannes Junker $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MAPMATCHING/ConsensusMapNormalizerAlgorithmThreshold.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/ConsensusMapNormalizerAlgorithmMedian.h>
#include <OpenMS/CONCEPT/ProgressLogger.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/MATH/StatisticFunctions.h>
using namespace std;
namespace OpenMS
{
ConsensusMapNormalizerAlgorithmThreshold::ConsensusMapNormalizerAlgorithmThreshold() = default;
ConsensusMapNormalizerAlgorithmThreshold::~ConsensusMapNormalizerAlgorithmThreshold() = default;
vector<double> ConsensusMapNormalizerAlgorithmThreshold::computeCorrelation(const ConsensusMap& map, const double& ratio_threshold, const String& acc_filter, const String& desc_filter)
{
Size number_of_features = map.size();
Size number_of_maps = map.getColumnHeaders().size();
vector<vector<double> > feature_int(number_of_maps);
//get map with most features, resize feature_int
UInt map_with_most_features_idx = 0;
ConsensusMap::ColumnHeaders::const_iterator map_with_most_features = map.getColumnHeaders().find(0);
for (UInt i = 0; i < number_of_maps; i++)
{
feature_int[i].resize(number_of_features);
ConsensusMap::ColumnHeaders::const_iterator it = map.getColumnHeaders().find(i);
if (it->second.size > map_with_most_features->second.size)
{
map_with_most_features = it;
map_with_most_features_idx = i;
}
}
//fill feature_int with intensities
Size pass_counter = 0;
ConsensusMap::ConstIterator cf_it;
UInt idx = 0;
for (cf_it = map.begin(); cf_it != map.end(); ++cf_it, ++idx)
{
if (!ConsensusMapNormalizerAlgorithmMedian::passesFilters_(cf_it, map, acc_filter, desc_filter))
{
continue;
}
++pass_counter;
ConsensusFeature::HandleSetType::const_iterator f_it;
for (f_it = cf_it->getFeatures().begin(); f_it != cf_it->getFeatures().end(); ++f_it)
{
feature_int[f_it->getMapIndex()][idx] = f_it->getIntensity();
}
}
OPENMS_LOG_INFO << endl << "Using " << pass_counter << "/" << map.size() << " consensus features for computing normalization coefficients" << endl << endl;
//determine ratio
vector<double> ratio_vector(number_of_maps);
for (UInt j = 0; j < number_of_maps; j++)
{
vector<double> ratios;
for (UInt k = 0; k < number_of_features; ++k)
{
if (feature_int[map_with_most_features_idx][k] != 0.0 && feature_int[j][k] != 0.0)
{
double ratio = feature_int[map_with_most_features_idx][k] / feature_int[j][k];
if (ratio > ratio_threshold && ratio < 1 / ratio_threshold)
{
ratios.push_back(ratio);
}
}
}
if (ratios.empty())
{
OPENMS_LOG_WARN << endl << "Not enough features passing filters. Cannot compute normalization coefficients for all maps. Result will be unnormalized." << endl << endl;
return vector<double>(number_of_maps, 1.0);
}
ratio_vector[j] = Math::mean(ratios.begin(), ratios.end());
}
return ratio_vector;
}
void ConsensusMapNormalizerAlgorithmThreshold::normalizeMaps(ConsensusMap& map, const vector<double>& ratios)
{
ConsensusMap::Iterator cf_it;
ProgressLogger progresslogger;
progresslogger.setLogType(ProgressLogger::CMD);
progresslogger.startProgress(0, map.size(), "normalizing maps");
for (cf_it = map.begin(); cf_it != map.end(); ++cf_it)
{
progresslogger.setProgress(cf_it - map.begin());
ConsensusFeature::HandleSetType::const_iterator f_it;
for (f_it = cf_it->getFeatures().begin(); f_it != cf_it->getFeatures().end(); ++f_it)
{
f_it->asMutable().setIntensity(f_it->getIntensity() * ratios[f_it->getMapIndex()]);
}
}
progresslogger.endProgress();
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/MAPMATCHING/FeatureGroupingAlgorithm.cpp | .cpp | 5,588 | 140 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Marc Sturm, Clemens Groepl, Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MAPMATCHING/FeatureGroupingAlgorithm.h>
// Derived classes are included here
#include <OpenMS/ANALYSIS/MAPMATCHING/FeatureGroupingAlgorithmLabeled.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/FeatureGroupingAlgorithmUnlabeled.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/FeatureGroupingAlgorithmQT.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/FeatureGroupingAlgorithmKD.h>
#include <OpenMS/CONCEPT/LogStream.h>
using namespace std;
namespace OpenMS
{
FeatureGroupingAlgorithm::FeatureGroupingAlgorithm() :
DefaultParamHandler("FeatureGroupingAlgorithm")
{
}
void FeatureGroupingAlgorithm::group(const vector<ConsensusMap>& maps, ConsensusMap& out)
{
OPENMS_LOG_WARN << "FeatureGroupingAlgorithm::group() does not support ConsensusMaps directly. Converting to FeatureMaps." << endl;
vector<FeatureMap> maps_f;
for (Size i = 0; i < maps.size(); ++i)
{
FeatureMap fm;
MapConversion::convert(maps[i], true, fm);
maps_f.push_back(fm);
}
// call FeatureMap version of group()
group(maps_f, out);
}
void FeatureGroupingAlgorithm::transferSubelements(const vector<ConsensusMap>& maps, ConsensusMap& out) const
{
// accumulate file descriptions from the input maps:
// cout << "Updating file descriptions..." << endl;
out.getColumnHeaders().clear();
// mapping: (input file index / map index assigned by the linkers, old map index) -> new map index
map<pair<Size, UInt64>, Size> mapid_table;
for (Size i = 0; i < maps.size(); ++i)
{
const ConsensusMap& consensus = maps[i];
for (ConsensusMap::ColumnHeaders::const_iterator desc_it = consensus.getColumnHeaders().begin(); desc_it != consensus.getColumnHeaders().end(); ++desc_it)
{
Size counter = mapid_table.size();
mapid_table[make_pair(i, desc_it->first)] = counter;
out.getColumnHeaders()[counter] = desc_it->second;
}
}
// look-up table: input map -> unique ID -> consensus feature
// cout << "Creating look-up table..." << endl;
vector<map<UInt64, ConsensusMap::ConstIterator> > feat_lookup(maps.size());
for (Size i = 0; i < maps.size(); ++i)
{
const ConsensusMap& consensus = maps[i];
for (ConsensusMap::ConstIterator feat_it = consensus.begin();
feat_it != consensus.end(); ++feat_it)
{
// do NOT use "id_lookup[i][feat_it->getUniqueId()] = feat_it;" here as
// you will get "attempt to copy-construct an iterator from a singular
// iterator" in STL debug mode:
feat_lookup[i].insert(make_pair(feat_it->getUniqueId(), feat_it));
}
}
// adjust the consensus features:
// cout << "Adjusting consensus features..." << endl;
for (ConsensusMap::iterator cons_it = out.begin(); cons_it != out.end(); ++cons_it)
{
ConsensusFeature adjusted = ConsensusFeature(
static_cast<BaseFeature>(*cons_it)); // remove sub-features
for (ConsensusFeature::HandleSetType::const_iterator sub_it = cons_it->getFeatures().begin(); sub_it != cons_it->getFeatures().end(); ++sub_it)
{
UInt64 id = sub_it->getUniqueId();
Size map_index = sub_it->getMapIndex();
ConsensusMap::ConstIterator origin = feat_lookup[map_index][id];
for (ConsensusFeature::HandleSetType::const_iterator handle_it = origin->getFeatures().begin(); handle_it != origin->getFeatures().end(); ++handle_it)
{
FeatureHandle handle = *handle_it;
Size new_id = mapid_table[make_pair(map_index, handle.getMapIndex())];
handle.setMapIndex(new_id);
adjusted.insert(handle);
}
}
*cons_it = adjusted;
for (auto& id : cons_it->getPeptideIdentifications())
{
// if old_map_index is not present, there was no map_index in the beginning,
// therefore the newly assigned map_index cannot be "corrected"
// -> remove the MetaValue to be consistent.
if (id.metaValueExists("old_map_index"))
{
Size old_map_index = id.getMetaValue("old_map_index");
Size file_index = id.getMetaValue("map_index");
Size new_idx = mapid_table[make_pair(file_index, old_map_index)];
id.setMetaValue("map_index", new_idx);
id.removeMetaValue("old_map_index");
}
else
{
id.removeMetaValue("map_index");
}
}
}
for (auto& id : out.getUnassignedPeptideIdentifications())
{
// if old_map_index is not present, there was no map_index in the beginning,
// therefore the newly assigned map_index cannot be "corrected"
// -> remove the MetaValue to be consistent.
if (id.metaValueExists("old_map_index"))
{
Size old_map_index = id.getMetaValue("old_map_index");
Size file_index = id.getMetaValue("map_index");
Size new_idx = mapid_table[make_pair(file_index, old_map_index)];
id.setMetaValue("map_index", new_idx);
id.removeMetaValue("old_map_index");
}
else
{
id.removeMetaValue("map_index");
}
}
}
FeatureGroupingAlgorithm::~FeatureGroupingAlgorithm() = default;
} //namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/MAPMATCHING/MapAlignmentAlgorithmPoseClustering.cpp | .cpp | 4,087 | 116 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Eva Lange, Clemens Groepl $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MAPMATCHING/MapAlignmentAlgorithmPoseClustering.h>
#include <OpenMS/FORMAT/FileHandler.h>
using namespace std;
namespace OpenMS
{
MapAlignmentAlgorithmPoseClustering::MapAlignmentAlgorithmPoseClustering() :
DefaultParamHandler("MapAlignmentAlgorithmPoseClustering"),
ProgressLogger(), max_num_peaks_considered_(0)
{
defaults_.insert("superimposer:", PoseClusteringAffineSuperimposer().getParameters());
defaults_.insert("pairfinder:", StablePairFinder().getParameters());
defaults_.setValue("max_num_peaks_considered", 1000, "The maximal number of peaks/features to be considered per map. To use all, set to '-1'.");
defaults_.setMinInt("max_num_peaks_considered", -1);
defaultsToParam_();
}
void MapAlignmentAlgorithmPoseClustering::updateMembers_()
{
superimposer_.setParameters(param_.copy("superimposer:", true));
superimposer_.setLogType(getLogType());
pairfinder_.setParameters(param_.copy("pairfinder:", true));
pairfinder_.setLogType(getLogType());
max_num_peaks_considered_ = param_.getValue("max_num_peaks_considered");
}
MapAlignmentAlgorithmPoseClustering::~MapAlignmentAlgorithmPoseClustering() = default;
void MapAlignmentAlgorithmPoseClustering::align(const FeatureMap& map, TransformationDescription& trafo)
{
ConsensusMap map_scene;
MapConversion::convert(1, map, map_scene, max_num_peaks_considered_);
align(map_scene, trafo);
}
void MapAlignmentAlgorithmPoseClustering::align(const PeakMap& map, TransformationDescription& trafo)
{
ConsensusMap map_scene;
PeakMap map2(map);
MapConversion::convert(1, map2, map_scene, max_num_peaks_considered_); // copy MSExperiment here, since it is sorted internally by intensity
align(map_scene, trafo);
}
void MapAlignmentAlgorithmPoseClustering::align(const ConsensusMap& map, TransformationDescription& trafo)
{
// TODO: move this to updateMembers_? (if ConsensusMap prevails)
// TODO: why does superimposer work on consensus map???
const ConsensusMap & map_model = reference_;
ConsensusMap map_scene = map;
// run superimposer to find the global transformation
TransformationDescription si_trafo;
superimposer_.run(map_model, map_scene, si_trafo);
// apply transformation to consensus features and contained feature
// handles
for (Size j = 0; j < map_scene.size(); ++j)
{
// Calculate new RT
double rt = map_scene[j].getRT();
rt = si_trafo.apply(rt);
// Set RT of consensus feature centroid
map_scene[j].setRT(rt);
// Set RT of consensus feature handles
map_scene[j].begin()->asMutable().setRT(rt);
}
// run pairfinder to find pairs
ConsensusMap result;
// TODO: add another 2-map interface to pairfinder?
std::vector<ConsensusMap> input(2);
input[0] = map_model;
input[1] = map_scene;
pairfinder_.run(input, result);
// calculate the local transformation
si_trafo.invert(); // to undo the transformation applied above
TransformationDescription::DataPoints data;
for (ConsensusFeature& cfeature : result)
{
if (cfeature.size() == 2) // two matching features
{
ConsensusFeature::iterator feat_it = cfeature.begin();
double y = feat_it->getRT();
double x = si_trafo.apply((++feat_it)->getRT());
// one feature should be from the reference map:
if (feat_it->getMapIndex() != 0)
{
data.push_back(make_pair(x, y));
}
else
{
data.push_back(make_pair(y, x));
}
}
}
trafo = TransformationDescription(data);
trafo.fitModel("linear");
}
} // namespace
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/MAPMATCHING/MapAlignmentEvaluationAlgorithm.cpp | .cpp | 1,705 | 49 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Katharina Albers $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MAPMATCHING/MapAlignmentEvaluationAlgorithm.h>
// Derived classes are included here
#include <OpenMS/ANALYSIS/MAPMATCHING/MapAlignmentEvaluationAlgorithmPrecision.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/MapAlignmentEvaluationAlgorithmRecall.h>
namespace OpenMS
{
// TODO consider using (RT,MZ,IT) as a unique identifier ?
bool MapAlignmentEvaluationAlgorithm::isSameHandle(const FeatureHandle& lhs, const FeatureHandle& rhs, const double& rt_dev, const double& mz_dev, const Peak2D::IntensityType& int_dev, const bool use_charge)
{
#if 1
// use (RT,MZ,IT) as "unique" identifier
if (fabs(lhs.getRT() - rhs.getRT()) > rt_dev)
return false; // TODO MAGIC_ALERT
if (fabs(lhs.getMZ() - rhs.getMZ()) > mz_dev)
return false; // TODO MAGIC_ALERT
if (fabs(lhs.getIntensity() - rhs.getIntensity()) > int_dev)
return false; // TODO MAGIC_ALERT
if (use_charge && (lhs.getCharge() != rhs.getCharge()))
return false;
return true;
#else
// use (map index, element index) as unique identifier
return lhs.getMapIndex() == rhs.getMapIndex() && lhs.getElementIndex() == rhs.getElementIndex();
#endif
}
MapAlignmentEvaluationAlgorithm::MapAlignmentEvaluationAlgorithm() = default;
MapAlignmentEvaluationAlgorithm::~MapAlignmentEvaluationAlgorithm() = default;
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/MAPMATCHING/FeatureGroupingAlgorithmUnlabeled.cpp | .cpp | 3,136 | 94 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MAPMATCHING/FeatureGroupingAlgorithmUnlabeled.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/StablePairFinder.h>
namespace OpenMS
{
FeatureGroupingAlgorithmUnlabeled::FeatureGroupingAlgorithmUnlabeled() :
FeatureGroupingAlgorithm()
{
setName("FeatureGroupingAlgorithmUnlabeled");
defaults_.insert("", StablePairFinder().getParameters());
defaultsToParam_();
// The input for the pairfinder is a vector of FeatureMaps of size 2
pairfinder_input_.resize(2);
}
FeatureGroupingAlgorithmUnlabeled::~FeatureGroupingAlgorithmUnlabeled() = default;
void FeatureGroupingAlgorithmUnlabeled::group(const std::vector<FeatureMap> & maps, ConsensusMap & out)
{
// check that the number of maps is ok
if (maps.size() < 2)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "At least two maps must be given!");
}
// define reference map (the one with most peaks)
Size reference_map_index = 0;
Size max_count = 0;
for (Size m = 0; m < maps.size(); ++m)
{
if (maps[m].size() > max_count)
{
max_count = maps[m].size();
reference_map_index = m;
}
}
std::vector<ConsensusMap> input(2);
// build a consensus map of the elements of the reference map (contains only singleton consensus elements)
MapConversion::convert(reference_map_index, maps[reference_map_index],
input[0]);
// loop over all other maps, extend the groups
StablePairFinder pair_finder;
pair_finder.setParameters(param_.copy("", true));
for (Size i = 0; i < maps.size(); ++i)
{
if (i != reference_map_index)
{
MapConversion::convert(i, maps[i], input[1]);
// compute the consensus of the reference map and map i
ConsensusMap result;
pair_finder.run(input, result);
input[0].swap(result);
}
}
// replace result with temporary map
out.swap(input[0]);
// copy back the input maps (they have been deleted while swapping)
out.getColumnHeaders() = input[0].getColumnHeaders();
postprocess_(maps, out);
}
void FeatureGroupingAlgorithmUnlabeled::addToGroup(int map_id, const FeatureMap& feature_map)
{
// create new PairFinder
StablePairFinder pair_finder;
pair_finder.setParameters(param_.copy("", true));
// Convert the input map to a consensus map (using the given map_id) and
// replace the second element in the pairfinder_input_ vector.
MapConversion::convert(map_id, feature_map, pairfinder_input_[1]);
// compute the consensus of the reference map and map map_id
ConsensusMap result;
pair_finder.run(pairfinder_input_, result);
pairfinder_input_[0].swap(result);
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/MAPMATCHING/QTClusterFinder.cpp | .cpp | 34,550 | 829 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Steffen Sass, Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MAPMATCHING/QTClusterFinder.h>
#include <OpenMS/DATASTRUCTURES/Adduct.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/KERNEL/FeatureMap.h>
#include <OpenMS/METADATA/PeptideIdentification.h>
#include <OpenMS/KERNEL/FeatureHandle.h>
#include <OpenMS/MATH/MathFunctions.h>
//#define DEBUG_QTCLUSTERFINDER_IDS
using std::list;
using std::vector;
using std::max;
using std::make_pair;
using std::unordered_set;
namespace OpenMS
{
QTClusterFinder::QTClusterFinder() :
BaseGroupFinder(), feature_distance_(FeatureDistance())
{
setName("QTClusterFinder");
defaults_.setValue("use_identifications", "false", "Never link features that are annotated with different peptides (only the best hit per peptide identification is taken into account).");
defaults_.setValidStrings("use_identifications", {"true","false"});
defaults_.setValue("nr_partitions", 100, "How many partitions in m/z space should be used for the algorithm (more partitions means faster runtime and more memory efficient execution).");
defaults_.setMinInt("nr_partitions", 1);
defaults_.setValue("min_nr_diffs_per_bin", 50, "If IDs are used: How many differences from matching IDs should be used to calculate a linking tolerance for unIDed features in an RT region. RT regions will be extended until that number is reached.");
defaults_.setMinInt("min_nr_diffs_per_bin", 5);
defaults_.setValue("min_IDscore_forTolCalc", 1., "If IDs are used: What is the minimum score of an ID to assume a reliable match for tolerance calculation. Check your current score type!");
defaults_.setValue("noID_penalty", 0.0, "If IDs are used: For the normalized distances, how high should the penalty for missing IDs be? 0 = no bias, 1 = IDs inside the max tolerances always preferred (even if much further away).");
defaults_.setMinFloat("noID_penalty", 0.0);
defaults_.setMaxFloat("noID_penalty", 1.0);
defaults_.insert("", feature_distance_.getDefaults());
defaultsToParam_();
}
void QTClusterFinder::setParameters_(double max_intensity,
double max_mz)
{
// don't check for low max. intensity, because intensities may be ignored:
if ((max_mz < 1e-16) || (max_mz > 1e16) || (max_intensity > 1e16))
{
String msg = "Maximum m/z or intensity out of range (m/z: " +
String(max_mz) + ", intensity: " + String(max_intensity) + "). "
"Has 'updateRanges' been called on the input maps?";
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
msg);
}
use_IDs_ = param_.getValue("use_identifications").toBool();
nr_partitions_ = param_.getValue("nr_partitions");
min_nr_diffs_per_bin_ = param_.getValue("min_nr_diffs_per_bin");
min_score_ = param_.getValue("min_IDscore_forTolCalc");
noID_penalty_ = param_.getValue("noID_penalty");
max_diff_rt_ = param_.getValue("distance_RT:max_difference");
max_diff_mz_ = param_.getValue("distance_MZ:max_difference");
// compute m/z tolerance in Da (if given in ppm; for the hash grid):
if (param_.getValue("distance_MZ:unit") == "ppm")
{
max_diff_mz_ *= max_mz * 1e-6;
}
Param distance_params = param_.copy("");
distance_params.remove("use_identifications");
distance_params.remove("nr_partitions");
distance_params.remove("min_nr_diffs_per_bin");
distance_params.remove("min_IDscore_forTolCalc");
distance_params.remove("noID_penalty");
feature_distance_ = FeatureDistance(max_intensity, true);
feature_distance_.setParameters(distance_params);
}
template <typename MapType>
void QTClusterFinder::run_(const vector<MapType>& input_maps,
ConsensusMap& result_map)
{
// update parameters (dummy)
setParameters_(1, 1);
if (use_IDs_)
{
// map string "modified sequence/charge" to all RTs the feature has been observed in the different maps
std::unordered_map<String, std::vector<double>> ided_feat_rts;
//std::unordered_map<String, std::vector<const typename MapType::FeatureType*>> ided_feats;
double minRT = std::numeric_limits<double>::max();
for (auto& map : input_maps)
{
for (auto feat : map) //OMS_CODING_TEST_EXCLUDE Note: needs copy to sort
{
if (feat.getRT() < minRT) minRT = feat.getRT();
auto& pepIDs = feat.getPeptideIdentifications();
if (!pepIDs.empty())
{
//TODO I think we sort in run_internal again. Could be avoided.
feat.sortPeptideIdentifications();
auto& hits = pepIDs[0].getHits();
if (!hits.empty())
{
if ((hits[0].getScore() > min_score_ && pepIDs[0].isHigherScoreBetter()) ||
(hits[0].getScore() < min_score_ && !pepIDs[0].isHigherScoreBetter()))
{
//TODO we could loosen the score filtering by requiring only ONE IDed feature of a peptide to pass the threshold.
// Would require a second pass though
const String key = pepIDs[0].getHits()[0].getSequence().toString() + "/" + feat.getCharge();
const auto [it, inserted] = ided_feat_rts.emplace(key, std::vector<double>{feat.getRT()});
if (!inserted) // already present
{
it->second.push_back(feat.getRT());
}
//TODO we could score the whole feature instead of just the RT to calculate tolerances based on
// a combined score (RT/mz; using the scoring function of this class) instead of just RT
/*const auto it_inserted_feat = ided_feats.emplace(key, std::vector<const typename MapType::FeatureType*>{&feat});
if (!it_inserted_feat.second)
{
it_inserted_feat.first->second.push_back(&feat);
}*/
}
}
}
}
}
//Note: this does not differentiate between the variety of differences between distinct map pairs. E.g.
// differences between map 1 and map 2 might be usually very small (e.g. they are replicates), while
// differences between map 1 and map 3 are large, since they are different conditions. But we might lose
// robust estimates and use more memory if we split them.
std::vector<std::pair<double,std::vector<double>>> medians_diffs;
medians_diffs.resize(ided_feat_rts.size());
Size c = 0;
// for every ID, calculate median RT and differences
for (auto& id_rts : ided_feat_rts)
{
#ifdef DEBUG_QTCLUSTERFINDER_IDS
std::cout << "Stats for " << id_rts.first << ": ";
for (const auto& rt : id_rts.second)
{
std::cout << rt << ", ";
}
std::cout << '\n';
#endif
auto& rts = id_rts.second;
std::sort(rts.begin(),rts.end());
medians_diffs[c].first = rts[rts.size()/2];
medians_diffs[c].second.reserve(rts.size()-1);
Size i = 0;
for (const auto& rt : rts)
{
if (i++ == rts.size()/2) continue;
//TODO would relative diffs solve the RT dependency issue sufficiently? Probably not with non-linear shifts
//Note: One the one hand using abs. val. destroys the Gaussian distribution, on the other hand, if you only have
// two RTs for an ID, you will always have a negative difference from the median (and therefore the distribution
// would be biased anyway). We could use a stable median for evenly sized vectors but that would not reflect
// real distances then. Or always add positive AND negative differences.
medians_diffs[c].second.push_back(std::fabs(rt - medians_diffs[c].first));
}
c++;
}
//TODO check if we can assume sorted
std::sort(medians_diffs.begin(), medians_diffs.end());
Size cnt = 0;
vector<double> tmp_diffs, last_tmp_diffs;
// we calculate minRT (instead of starting first bin at 0) since RTs may start in the "negative" region after alignment.
double start_rt = minRT;
double min_tolerance = 20;
double tol, q2, q3;
OPENMS_LOG_INFO << "Calculating RT linking tolerance bins...\n";
OPENMS_LOG_INFO << "RT_bin_start, Tolerance\n";
// For every pair of median RT and differences, collect
// differences until min_nr_diffs_per_bin_ is reached, then add the
// start of the current bin and a tolerance based on Double MAD https://aakinshin.net/posts/harrell-davis-double-mad-outlier-detector/
for (const auto& med_diffs : medians_diffs)
{
if (tmp_diffs.size() > min_nr_diffs_per_bin_ && !med_diffs.second.empty())
{
std::sort(tmp_diffs.begin(), tmp_diffs.end());
// calculate allowed tolerance
//q1 = quantile_(tmp_diffs, 0.25);
q2 = Math::quantile(tmp_diffs, 0.5);
q3 = Math::quantile(tmp_diffs, 0.75);
//q95 = quantile_(tmp_diffs, 0.95);
//iqr = q3 - q1;
//tol = max(min_tolerance, max(fabs(q3 + iqr_mult * iqr), fabs(q1 - iqr_mult * iqr)));
//tol = q95;
//tol = 2. * 1.4826 * q2;
tol = max(min_tolerance, q2 + 2. * 1.4826 * (q3-q2));
bin_tolerances_.insert(make_pair(start_rt, tol));
OPENMS_LOG_INFO << start_rt << ", " << tol << '\n';
#ifdef DEBUG_QTCLUSTERFINDER_IDS
std::cout << "Differences used: ";
for (const auto& diff : tmp_diffs)
{
std::cout << diff << ", ";
}
std::cout << '\n';
#endif
std::swap(tmp_diffs, last_tmp_diffs);
tmp_diffs.clear();
if (cnt > 0) start_rt = (med_diffs.first + medians_diffs[cnt-1].first)/2;
}
else
{
tmp_diffs.insert(tmp_diffs.end(), med_diffs.second.begin(), med_diffs.second.end());
}
cnt++;
}
// calculate allowed tolerance
std::sort(tmp_diffs.begin(), tmp_diffs.end());
std::vector<double> last_and_before_diffs;
last_and_before_diffs.reserve(tmp_diffs.size() + last_tmp_diffs.size());
std::merge(tmp_diffs.begin(), tmp_diffs.end(), last_tmp_diffs.begin(), last_tmp_diffs.end(), std::back_inserter(last_and_before_diffs));
if (!last_and_before_diffs.empty())
{
q2 = Math::quantile(last_and_before_diffs, 0.5);
q3 = Math::quantile(last_and_before_diffs, 0.75);
tol = max(min_tolerance, q2 + 2. * 1.4826 * (q3-q2));
bin_tolerances_.insert(make_pair(start_rt, tol));
OPENMS_LOG_INFO << start_rt << ", " << tol << '\n';
#ifdef DEBUG_QTCLUSTERFINDER_IDS
std::cout << "Differences used: ";
for (const auto& diff : last_and_before_diffs)
{
std::cout << diff << ", ";
}
std::cout << '\n';
#endif
#ifdef DEBUG_QTCLUSTERFINDER_IDS
std::cout << "size of last bin: " << last_and_before_diffs.size() << '\n';
#endif
}
last_and_before_diffs.clear();
last_tmp_diffs.clear();
tmp_diffs.clear();
}
result_map.clear(false);
std::vector< double > massrange;
for (typename vector<MapType>::const_iterator map_it = input_maps.begin();
map_it != input_maps.end(); ++map_it)
{
for (typename MapType::const_iterator feat_it = map_it->begin();
feat_it != map_it->end(); ++feat_it)
{
massrange.push_back(feat_it->getMZ());
}
}
std::sort(massrange.begin(), massrange.end());
if (nr_partitions_ == 1)
{
// Only one partition
run_internal_(input_maps, result_map, true);
}
else
{
// partition at boundaries -> this should be safe because there cannot be
// any cluster reaching across boundaries
// minimal differences between two m/z values
double massrange_diff = max_diff_mz_;
int pts_per_partition = int(massrange.size()) / nr_partitions_;
// if m/z tolerance is specified in ppm, we adapt massrange_diff
// in each iteration below
bool mz_ppm = param_.getValue("distance_MZ:unit") == "ppm";
double mz_tol = param_.getValue("distance_MZ:max_difference");
// compute partition boundaries
std::vector< double > partition_boundaries;
partition_boundaries.push_back(massrange.front());
for (size_t j = 0; j < massrange.size()-1; j++)
{
if (mz_ppm)
{
massrange_diff = mz_tol * 1e-6 * massrange[j+1];
}
if (fabs(massrange[j] - massrange[j+1]) > massrange_diff)
{
if (j >= (partition_boundaries.size() ) * pts_per_partition )
{
partition_boundaries.push_back((massrange[j] + massrange[j+1])/2.0);
}
}
}
// add last partition (a bit more since we use "smaller than" below)
partition_boundaries.push_back(massrange.back() + 1.0);
ProgressLogger logger;
Size progress = 0;
logger.setLogType(ProgressLogger::CMD);
logger.startProgress(0, partition_boundaries.size(), "Linking features");
for (size_t j = 0; j < partition_boundaries.size()-1; j++)
{
double partition_start = partition_boundaries[j];
double partition_end = partition_boundaries[j+1];
std::vector<MapType> tmp_input_maps(input_maps.size());
for (size_t k = 0; k < input_maps.size(); k++)
{
// iterate over all features in the current input map and append
// matching features (within the current partition) to the temporary
// map
for (size_t m = 0; m < input_maps[k].size(); m++)
{
if (input_maps[k][m].getMZ() >= partition_start &&
input_maps[k][m].getMZ() < partition_end)
{
tmp_input_maps[k].push_back(input_maps[k][m]);
}
}
tmp_input_maps[k].updateRanges();
}
// run algo on current partition
run_internal_(tmp_input_maps, result_map, false);
logger.setProgress(progress++);
}
logger.endProgress();
}
}
template <typename MapType>
void QTClusterFinder::run_internal_(const vector<MapType>& input_maps,
ConsensusMap& result_map, bool do_progress)
{
// clear temporary data structures
already_used_.clear();
num_maps_ = input_maps.size();
if (num_maps_ < 2)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"At least two input maps required");
}
// set up the distance functor (and set other parameters) for the current partition
double max_intensity = std::numeric_limits<double>::lowest();
double max_mz = std::numeric_limits<double>::lowest();
for (auto it = input_maps.begin(); it != input_maps.end(); ++it)
{
if (!it->RangeIntensity::isEmpty())
{
max_intensity = max(max_intensity, it->getMaxIntensity());
}
if (!it->RangeMZ::isEmpty())
{
max_mz = max(max_mz, it->getMaxMZ());
}
}
setParameters_(max_intensity, max_mz);
// create the hash grid and fill it with features:
// std::cout << "Hashing...\n";
list<OpenMS::GridFeature> grid_features;
Grid grid(Grid::ClusterCenter(max_diff_rt_, max_diff_mz_));
for (Size map_index = 0; map_index < num_maps_; ++map_index)
{
for (Size feature_index = 0; feature_index < input_maps[map_index].size();
++feature_index)
{
grid_features.push_back(
GridFeature(input_maps[map_index][feature_index], map_index,
feature_index));
GridFeature& gfeat = grid_features.back();
// sort peptide hits once now, instead of multiple times later:
auto& bfeat = const_cast<BaseFeature&>(gfeat.getFeature());
for (auto& pep : bfeat.getPeptideIdentifications())
{
pep.sort();
}
grid.insert(make_pair(Grid::ClusterCenter(gfeat.getRT(), gfeat.getMZ()),
&gfeat));
}
}
// compute QT clustering:
// std::cout << "Clustering...\n";
// "hot" cluster heads, we can extract the best efficiently
Heap cluster_heads;
// handles to cluster heads to reach them (index == cluster.id_) in cluster_heads for updating
vector<Heap::handle_type> handles;
// "cold" cluster bodies, where most of their data lies
vector<QTCluster::BulkData> cluster_data;
// map to get ids from clusters, who contain a certain grid feature
ElementMapping element_mapping;
computeClustering_(grid, cluster_heads, cluster_data, handles, element_mapping);
// number of clusters == number of data points:
Size size = cluster_heads.size();
ProgressLogger logger;
Size progress = 0;
if (do_progress)
{
logger.setLogType(ProgressLogger::CMD);
logger.startProgress(0, size, "Linking features");
}
while (!cluster_heads.empty())
{
// std::cout << "Clusters: " << clustering.size() << '\n';
ConsensusFeature consensus_feature;
// pops heap until a valid best cluster or empty, makes a consensusFeature and updates
// other clusters affected by the inclusion of this cluster
bool made_feature = makeConsensusFeature_(cluster_heads, consensus_feature,
element_mapping, grid, handles);
if (made_feature)
{
result_map.push_back(consensus_feature);
}
if (do_progress) logger.setProgress(progress++);
}
if (do_progress) logger.endProgress();
}
bool QTClusterFinder::makeConsensusFeature_(Heap& cluster_heads,
ConsensusFeature& feature,
ElementMapping& element_mapping,
const Grid& grid,
const vector<Heap::handle_type>& handles)
{
// pop until the top is valid
while (cluster_heads.top().isInvalid())
{
removeFromElementMapping_(cluster_heads.top(), element_mapping);
cluster_heads.pop();
// if the last remaining cluster was invalid, no consensus feature is created
if (cluster_heads.empty()) return false;
}
const QTCluster& best = cluster_heads.top();
QTCluster::Elements const elements = best.getElements();
#ifdef DEBUG_QTCLUSTERFINDER
std::cout << "Elements: " << elements.size() << " with best "
<< best->getQuality() << " invalid " << best->isInvalid() << '\n';
#endif
createConsensusFeature_(feature, best.getCurrentQuality(), elements);
#ifdef DEBUG_QTCLUSTERFINDER
std::cout << " create new consensus feature " << feature.getRT() << " " << feature.getMZ() << " from " << best->getCenterPoint()->getFeature().getUniqueId() << '\n';
for (OpenMSBoost::unordered_map<Size, OpenMS::GridFeature*>::const_iterator
it = elements.begin(); it != elements.end(); ++it)
{
std::cout << " = element id : " << it->second->getFeature().getUniqueId() << '\n';
}
#endif
updateClustering_(element_mapping, grid, elements, cluster_heads, handles, best.getId());
// made a consensus feature
return true;
}
void QTClusterFinder::removeFromElementMapping_(const QTCluster& cluster,
ElementMapping& element_mapping)
{
/* We have to erase all references to this cluster from the element mapping
* before it is popped from the heap and deleted.
* This function is called in makeConsensusFeature() for clusters who were
* invalidated because their center feature was used by a better cluster.
* The neighbor features of this cluster have not necessarily been used
* and might still be "active", i.e. their element mapping may still be accessed.
* Therefore it should not contain references to a deleted cluster.
*/
Size id = cluster.getId();
for (const auto& element : cluster.getElements())
{
element_mapping[element.feature].erase(id);
}
}
void QTClusterFinder::createConsensusFeature_(ConsensusFeature& feature,
const double quality,
const QTCluster::Elements& elements)
{
feature.setQuality(quality);
Adduct adduct;
// determine best quality feature for adduct ion annotation (Constanst::UserParam::IIMN_BEST_ION)
float best_quality = 0;
size_t best_quality_index = 0;
// collect the "Group" MetaValues of Features in a ConsensusFeature MetaValue (Constanst::UserParam::IIMN_LINKED_GROUPS)
vector<String> linked_groups;
// the features of the current best cluster are inserted into the new consensus feature
for (const auto& element : elements)
{
// Store the id of already used features (important: needs to be done
// before updateClustering()) (not to be confused with the cluster id)
already_used_.insert(element.feature);
BaseFeature& elem_feat = const_cast<BaseFeature&>(element.feature->getFeature());
feature.insert(element.map_index, elem_feat);
if (elem_feat.metaValueExists(Constants::UserParam::DC_CHARGE_ADDUCTS))
{
feature.setMetaValue(String(elem_feat.getUniqueId()), elem_feat.getMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS));
}
if (elem_feat.metaValueExists(Constants::UserParam::DC_CHARGE_ADDUCTS) && (elem_feat.getQuality() > best_quality))
{
feature.setMetaValue(Constants::UserParam::IIMN_BEST_ION, elem_feat.getMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS));
best_quality = elem_feat.getQuality();
}
if (elem_feat.metaValueExists(Constants::UserParam::ADDUCT_GROUP))
{
linked_groups.emplace_back(elem_feat.getMetaValue(Constants::UserParam::ADDUCT_GROUP));
}
}
if (elements[best_quality_index].feature->getFeature().metaValueExists(Constants::UserParam::DC_CHARGE_ADDUCTS))
{
feature.setMetaValue(Constants::UserParam::IIMN_BEST_ION,
adduct.toAdductString(elements[best_quality_index].feature->getFeature().getMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS),
elements[best_quality_index].feature->getFeature().getCharge()));
}
if (!linked_groups.empty())
{
feature.setMetaValue(Constants::UserParam::IIMN_LINKED_GROUPS, linked_groups);
}
feature.computeConsensus();
}
void QTClusterFinder::updateClustering_(ElementMapping& element_mapping,
const Grid& grid,
const QTCluster::Elements& elements,
Heap& cluster_heads,
const vector<Heap::handle_type>& handles,
Size best_id)
{
// remove the current best from the heap and consolidate the heap from previous lazy updates
// we cannot pop at the end since update_lazy may theoretically change top_element immediately.
cluster_heads.pop();
for (const auto& element : elements)
{
const GridFeature* const curr_feature = element.feature;
// ids of clusters the current feature belonged to
unordered_set<Size>& cluster_ids = element_mapping[curr_feature];
// delete the id of the current best cluster
// we do not want to unnecessarily update it in the loop below
cluster_ids.erase(best_id);
// Identify all features that could potentially have been touched by this
// Get all clusters that may potentially need updating
ElementMapping tmp_element_mapping; // modify copy, then update
for (const Size curr_id : cluster_ids)
{
QTCluster& cluster = *handles[curr_id];
// we do not want to update invalid features
// (saves time and does not recompute the quality)
if (!cluster.isInvalid())
{
// remove the elements of the new feature from the cluster
if (cluster.update(elements))
{
// If update returns true, it means that at least one element was
// removed from the cluster and we need to update that cluster
/*
////////////////////////////////////////
Step 1: Iterate through all neighboring grid features and try to
add elements to the current cluster to replace the ones we just
removed
Before that we must delete this clusters id from the element mapping. (important!)
It is possible that addClusterElements_() removes features from the cluster
we are updating. (Through finalizeCluster_ -> computeQuality_ -> optimizeAnnotations).
These are not to be confused with the features we removed
because they are part of the current best cluster. Those are removed in
QTCluster::update (above).
If this happens, the element mapping for the additionally removed features
(which are valid and unused!) still contains the id of the cluster which
we are currently updating. But the cluster does not contain the feature anymore.
When the cluster is deleted, the element mapping for the removed feature doesn't
get updated. The element mapping for the feature then contains an id of a
deleted cluster, which will surely lead to a segfault when the feature is actually
used in another cluster later.
TODO Check guarantee that addClusterElements does not add a feature that was removed
earlier in the loop. Should not happen because they are in the already_used set by now.
*/
removeFromElementMapping_(cluster, element_mapping);
// re-add closest cluster elements that were not used yet.
addClusterElements_(grid, cluster);
// update the heap, because the quality has changed
// compares with top_element to see if a different node needs to be popped now.
// for comparison getQuality() is called for the clusters here
// TODO check if we can guarantee cluster_heads.increase/decrease since they may have
// better theoretical runtimes although a lazy update until the next pop is probably not bad
cluster_heads.update_lazy(handles[curr_id]);
////////////////////////////////////////
// Step 2: reinsert the updated cluster's features into a temporary element mapping.
// This can be merged later since the methods called in the loop here seem not to access the mapping.
for (const auto& neighbor : cluster.getElements())
{
tmp_element_mapping[neighbor.feature].insert(curr_id);
}
}
}
}
// we merge the tmp_element_mapping into the element_mapping after all clusters
// that contained one feature of the current best cluster have been updated,
// i.e. after every iteration of the outer loop
for (const auto& feat_clusterids : tmp_element_mapping)
{
for (const Size id : feat_clusterids.second)
{
element_mapping[feat_clusterids.first].insert(id);
}
}
}
}
void QTClusterFinder::addClusterElements_(const Grid& grid, QTCluster& cluster)
{
cluster.initializeCluster();
#ifdef DEBUG_QTCLUSTERFINDER
std::cout << " Compute Clustering: "<< x << " " << y << " with id " << center_feature->getFeature().getUniqueId() << '\n';
std::set<AASequence> a = cluster.getAnnotations();
std::cout << " with annotations: ";
for (std::set<AASequence>::iterator it = a.begin(); it != a.end(); ++it) std::cout << " " << *it;
std::cout << '\n';
#endif
const int x = cluster.getXCoord();
const int y = cluster.getYCoord();
const GridFeature* center_feature = cluster.getCenterPoint();
// iterate over neighboring grid cells (1st dimension):
for (int i = x - 1; i <= x + 1; ++i)
{
// iterate over neighboring grid cells (2nd dimension):
for (int j = y - 1; j <= y + 1; ++j)
{
auto act_pos = grid.grid_find(Grid::CellIndex(i, j));
if (act_pos != grid.grid_end())
{
for (Grid::const_cell_iterator it_cell = act_pos->second.begin();
it_cell != act_pos->second.end(); ++it_cell)
{
OpenMS::GridFeature* neighbor_feature = it_cell->second;
#ifdef DEBUG_QTCLUSTERFINDER
std::cout << " considering to add feature " << neighbor_feature->getFeature().getUniqueId() << " to cluster " << center_feature->getFeature().getUniqueId()<< '\n';
#endif
// Skip features that we have already used -> we cannot add them to
// be neighbors any more
if (already_used_.find(neighbor_feature) != already_used_.end() )
{
continue;
}
// consider only "real" neighbors, not the element itself:
if (center_feature != neighbor_feature)
{
// NOTE: this actually caches the distance -> memory problem
double dist = getDistance_(center_feature, neighbor_feature);
if (dist == FeatureDistance::infinity)
{
continue; // conditions not satisfied
}
// if IDs are used during linking, check if unidentified features are too far off from "usual" RT shifts
// in that region
if (use_IDs_ && neighbor_feature->getAnnotations().empty())
{
double rt_dist = std::fabs(neighbor_feature->getRT() - center_feature->getRT());
if (distIsOutlier_(rt_dist, center_feature->getRT())) continue;
dist += noID_penalty_;
}
// if neighbor point is a possible cluster point, add it:
cluster.add(neighbor_feature, dist);
}
}
}
}
}
cluster.finalizeCluster();
#ifdef DEBUG_QTCLUSTERFINDER
QTCluster::Elements elements = cluster.getElements();
std::cout << " Done with cluster -> get quality " << cluster.getQuality() << " and nr elements " << elements.size() << '\n';
for (OpenMSBoost::unordered_map<Size, OpenMS::GridFeature*>::const_iterator
it = elements.begin(); it != elements.end(); ++it)
{
std::cout << " = element id : " << it->second->getFeature().getUniqueId() << '\n';
}
{
std::set<AASequence> ax = cluster.getAnnotations();
std::cout << " FINAL with annotations: ";
for (std::set<AASequence>::iterator it = ax.begin(); it != ax.end(); ++it) std::cout << " " << *it;
std::cout << '\n';
}
#endif
}
void QTClusterFinder::run(const vector<ConsensusMap>& input_maps,
ConsensusMap& result_map)
{
run_(input_maps, result_map);
}
void QTClusterFinder::run(const std::vector<FeatureMap>& input_maps,
ConsensusMap& result_map)
{
run_(input_maps, result_map);
}
void QTClusterFinder::computeClustering_(const Grid& grid,
Heap& cluster_heads,
vector<QTCluster::BulkData>& cluster_data,
vector<Heap::handle_type>& handles,
ElementMapping& element_mapping)
{
cluster_heads.clear();
already_used_.clear();
cluster_data.clear();
handles.clear();
// do not remove this (will lead to segfault)
// we need the pointers to cluster_data to stay valid,
// therefore no reallocation is allowed to happen
// we also reserve handles, because we don't know if we are allowed to move the handles
// (the documentation of boost::heap does not tell us a lot about the handles)
cluster_data.reserve(grid.size());
handles.reserve(grid.size());
Size id = 0;
// FeatureDistance produces normalized distances (between 0 and 1 plus a possible noID penalty):
const double max_distance = 1.0 + noID_penalty_;
// iterate over all grid cells:
for (Grid::const_iterator it = grid.begin(); it != grid.end(); ++it)
{
const Grid::CellIndex& act_coords = it.index();
const Int x = act_coords[0], y = act_coords[1];
const OpenMS::GridFeature* const center_feature = it->second;
// construct empty data body for the new cluster and create the head afterwards
cluster_data.emplace_back(center_feature, num_maps_,
max_distance, x, y, id);
QTCluster cluster(&cluster_data.back(), use_IDs_);
addClusterElements_(grid, cluster);
// push the cluster head of the new cluster into the heap
// and the returned handle into our handle vector
handles.push_back(cluster_heads.push(cluster));
// register the new cluster for all its elements in the element mapping
for (const auto& element : (*handles.back()).getElements())
{
element_mapping[element.feature].insert(id);
}
// next cluster gets the next id
++id;
}
}
double QTClusterFinder::getDistance_(const OpenMS::GridFeature* left,
const OpenMS::GridFeature* right)
{
return feature_distance_(left->getFeature(), right->getFeature()).second;
}
bool QTClusterFinder::distIsOutlier_(double dist, double rt)
{
if (bin_tolerances_.empty()) return false;
auto it = bin_tolerances_.upper_bound(rt);
if (it == bin_tolerances_.begin()) return dist >= it->second;
return dist >= (--it)->second;
}
QTClusterFinder::~QTClusterFinder() = default;
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/MAPMATCHING/FeatureDistance.cpp | .cpp | 8,634 | 208 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser
// $Authors: Clemens Groepl, Hendrik Weisser, Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MAPMATCHING/FeatureDistance.h>
#include <OpenMS/MATH/MathFunctions.h>
#include <OpenMS/CHEMISTRY/EmpiricalFormula.h>
#include <OpenMS/CONCEPT/Constants.h>
using namespace std;
namespace OpenMS
{
const double FeatureDistance::infinity =
std::numeric_limits<double>::infinity();
FeatureDistance::FeatureDistance(double max_intensity,
bool force_constraints) :
DefaultParamHandler("FeatureDistance"),
params_rt_(),
params_mz_(),
params_intensity_(),
max_intensity_(max_intensity),
force_constraints_(force_constraints),
log_transform_(false)
{
defaults_.setValue("distance_RT:max_difference", 100.0, "Never pair features with a larger RT distance (in seconds).");
defaults_.setMinFloat("distance_RT:max_difference", 0.0);
defaults_.setValue("distance_RT:exponent", 1.0, "Normalized RT differences ([0-1], relative to 'max_difference') are raised to this power (using 1 or 2 will be fast, everything else is REALLY slow)", {"advanced"});
defaults_.setMinFloat("distance_RT:exponent", 0.0);
defaults_.setValue("distance_RT:weight", 1.0, "Final RT distances are weighted by this factor", {"advanced"});
defaults_.setMinFloat("distance_RT:weight", 0.0);
defaults_.setSectionDescription("distance_RT", "Distance component based on RT differences");
defaults_.setValue("distance_MZ:max_difference", 0.3, "Never pair features with larger m/z distance (unit defined by 'unit')");
defaults_.setMinFloat("distance_MZ:max_difference", 0.0);
defaults_.setValue("distance_MZ:unit", "Da", "Unit of the 'max_difference' parameter");
defaults_.setValidStrings("distance_MZ:unit", {"Da","ppm"});
defaults_.setValue("distance_MZ:exponent", 2.0, "Normalized ([0-1], relative to 'max_difference') m/z differences are raised to this power (using 1 or 2 will be fast, everything else is REALLY slow)", {"advanced"});
defaults_.setMinFloat("distance_MZ:exponent", 0.0);
defaults_.setValue("distance_MZ:weight", 1.0, "Final m/z distances are weighted by this factor", {"advanced"});
defaults_.setMinFloat("distance_MZ:weight", 0.0);
defaults_.setSectionDescription("distance_MZ", "Distance component based on m/z differences");
defaults_.setValue("distance_intensity:exponent", 1.0, "Differences in relative intensity ([0-1]) are raised to this power (using 1 or 2 will be fast, everything else is REALLY slow)", {"advanced"});
defaults_.setMinFloat("distance_intensity:exponent", 0.0);
defaults_.setValue("distance_intensity:weight", 0.0, "Final intensity distances are weighted by this factor", {"advanced"});
defaults_.setMinFloat("distance_intensity:weight", 0.0);
defaults_.setValue("distance_intensity:log_transform", "disabled", "Log-transform intensities? If disabled, d = |int_f2 - int_f1| / int_max. If enabled, d = |log(int_f2 + 1) - log(int_f1 + 1)| / log(int_max + 1))", {"advanced"});
defaults_.setValidStrings("distance_intensity:log_transform", {"enabled","disabled"});
defaults_.setSectionDescription("distance_intensity", "Distance component based on differences in relative intensity (usually relative to highest peak in the whole data set)");
defaults_.setValue("ignore_charge", "false", "false [default]: pairing requires equal charge state (or at least one unknown charge '0'); true: Pairing irrespective of charge state");
defaults_.setValidStrings("ignore_charge", {"true","false"});
defaults_.setValue("ignore_adduct", "true", "true [default]: pairing requires equal adducts (or at least one without adduct annotation); true: Pairing irrespective of adducts");
defaults_.setValidStrings("ignore_adduct", {"true","false"});
defaultsToParam_();
}
FeatureDistance::~FeatureDistance() = default;
FeatureDistance & FeatureDistance::operator=(const FeatureDistance & other)
{
DefaultParamHandler::operator=(other);
max_intensity_ = other.max_intensity_;
force_constraints_ = other.force_constraints_;
updateMembers_(); // this sets all other member variables
return *this;
}
void FeatureDistance::updateMembers_()
{
params_rt_ = DistanceParams_("RT", param_);
params_mz_ = DistanceParams_("MZ", param_);
log_transform_ = (param_.getValue("distance_intensity:log_transform") == "enabled");
if (log_transform_)
{
// this parameter is not set by the user, but comes from the data:
param_.setValue("distance_intensity:max_difference", Math::linear2log(max_intensity_));
}
else
{
// this parameter is not set by the user, but comes from the data:
param_.setValue("distance_intensity:max_difference", max_intensity_);
}
params_intensity_ = DistanceParams_("intensity", param_);
total_weight_reciprocal_ = 1 / (params_rt_.weight + params_mz_.weight +
params_intensity_.weight);
ignore_charge_ = param_.getValue("ignore_charge").toBool();
ignore_adduct_ = param_.getValue("ignore_adduct").toBool();
}
double FeatureDistance::distance_(double diff, const DistanceParams_ & params) const
{
// manually querying for ^1 and ^2, since pow(x,2.0) is REALLY expensive and ^1 and ^2 are the defaults (so are likely to be used)
if (params.exponent == 1)
{
return diff * params.norm_factor * params.weight;
}
else if (params.exponent == 2)
{
double tmp(diff * params.norm_factor);
return tmp * tmp * params.weight;
}
else
{
// this pow() is REALLY expensive, since it uses a 'double' as exponent,
// using 'int' will make it faster, but we will loose fractional
// exponents (might be useful?).
return pow(diff * params.norm_factor, params.exponent) * params.weight;
}
}
pair<bool, double> FeatureDistance::operator()(const BaseFeature & left,
const BaseFeature & right)
{
if (!ignore_charge_)
{
Int charge_left = left.getCharge(), charge_right = right.getCharge();
if (charge_left != charge_right)
{
if ((charge_left != 0) && (charge_right != 0))
{
return make_pair(false, infinity);
}
}
}
if (!ignore_adduct_)
{
if (left.metaValueExists(Constants::UserParam::DC_CHARGE_ADDUCTS) && right.metaValueExists(Constants::UserParam::DC_CHARGE_ADDUCTS))
{
if (EmpiricalFormula(left.getMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS)) != EmpiricalFormula(right.getMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS)))
{
return make_pair(false, infinity);
}
}
}
bool valid = true;
// check m/z difference constraint:
double left_mz = left.getMZ(), right_mz = right.getMZ();
double dist_mz = fabs(left_mz - right_mz);
double max_diff_mz = params_mz_.max_difference;
if (params_mz_.max_diff_ppm) // compute absolute difference (in Da/Th)
{
max_diff_mz *= left_mz * 1e-6;
// overwrite this parameter - it will be recomputed each time anyway:
params_mz_.norm_factor = 1 / max_diff_mz;
}
if (dist_mz > max_diff_mz)
{
if (force_constraints_)
{
return make_pair(false, infinity);
}
valid = false;
}
// check RT difference constraint:
double dist_rt = fabs(left.getRT() - right.getRT());
if (dist_rt > params_rt_.max_difference)
{
if (force_constraints_)
{
return make_pair(false, infinity);
}
valid = false;
}
dist_rt = distance_(dist_rt, params_rt_);
dist_mz = distance_(dist_mz, params_mz_);
double dist_intensity = 0.0;
if (params_intensity_.relevant) // not by default, so worth checking
{
if (log_transform_)
{
dist_intensity = fabs(Math::linear2log(left.getIntensity()) - Math::linear2log(right.getIntensity()));
}
else
{
dist_intensity = fabs(left.getIntensity() - right.getIntensity());
}
dist_intensity = distance_(dist_intensity, params_intensity_);
}
double dist = dist_rt + dist_mz + dist_intensity;
dist *= total_weight_reciprocal_;
return make_pair(valid, dist);
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/MAPMATCHING/PoseClusteringAffineSuperimposer.cpp | .cpp | 49,316 | 1,053 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Eva Lange, Clemens Groepl $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MAPMATCHING/PoseClusteringAffineSuperimposer.h>
#include <OpenMS/PROCESSING/BASELINE/MorphologicalFilter.h>
#include <OpenMS/MATH/STATISTICS/BasicStatistics.h>
#include <OpenMS/ML/INTERPOLATION/LinearInterpolation.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <boost/math/special_functions/fpclassify.hpp> // isnan
// #define Debug_PoseClusteringAffineSuperimposer
namespace OpenMS
{
PoseClusteringAffineSuperimposer::PoseClusteringAffineSuperimposer() :
BaseSuperimposer()
{
setName("PoseClusteringAffineSuperimposer");
defaults_.setValue("mz_pair_max_distance", 0.5, "Maximum of m/z deviation of corresponding elements in different maps. "
"This condition applies to the pairs considered in hashing.");
defaults_.setMinFloat("mz_pair_max_distance", 0.);
defaults_.setValue("rt_pair_distance_fraction", 0.1, "Within each of the two maps, the pairs considered for pose clustering "
"must be separated by at least this fraction of the total elution time "
"interval (i.e., max - min). ", {"advanced"});
defaults_.setMinFloat("rt_pair_distance_fraction", 0.);
defaults_.setMaxFloat("rt_pair_distance_fraction", 1.);
defaults_.setValue("num_used_points", 2000, "Maximum number of elements considered in each map "
"(selected by intensity). Use this to reduce the running time "
"and to disregard weak signals during alignment. For using all points, set this to -1.");
defaults_.setMinInt("num_used_points", -1);
defaults_.setValue("scaling_bucket_size", 0.005, "The scaling of the retention time "
"interval is being hashed into buckets of this size during pose "
"clustering. A good choice for this would be a bit smaller than the "
"error you would expect from repeated runs.");
defaults_.setMinFloat("scaling_bucket_size", 0.);
defaults_.setValue("shift_bucket_size", 3.0, "The shift at the lower (respectively, higher) end of the retention time "
"interval is being hashed into buckets of this size during pose "
"clustering. A good choice for this would be about "
"the time between consecutive MS scans.");
defaults_.setMinFloat("shift_bucket_size", 0.);
defaults_.setValue("max_shift", 1000.0, "Maximal shift which is considered during histogramming (in seconds). "
"This applies for both directions.", {"advanced"});
defaults_.setMinFloat("max_shift", 0.);
defaults_.setValue("max_scaling", 2.0, "Maximal scaling which is considered during histogramming. "
"The minimal scaling is the reciprocal of this.", {"advanced"});
defaults_.setMinFloat("max_scaling", 1.);
defaults_.setValue("dump_buckets", "", "[DEBUG] If non-empty, base filename where hash table buckets will be dumped to. "
"A serial number for each invocation will be appended automatically.", {"advanced"});
defaults_.setValue("dump_pairs", "", "[DEBUG] If non-empty, base filename where the individual hashed pairs will be dumped to (large!). "
"A serial number for each invocation will be appended automatically.", {"advanced"});
defaultsToParam_();
}
/**
@brief Initialize hash maps for the algorithm.
The hash maps will contain a histogram of the values to be estimated.
*/
void initializeHashTables(
Math::LinearInterpolation<double, double>& scaling_hash_1,
Math::LinearInterpolation<double, double>& scaling_hash_2,
Math::LinearInterpolation<double, double>& rt_low_hash_,
Math::LinearInterpolation<double, double>& rt_high_hash_,
const double max_scaling, const double max_shift,
const double scaling_bucket_size, const double shift_bucket_size,
const double rt_low, const double rt_high)
{
const Int scaling_buckets_num_half = (Int) ceil(log(max_scaling) / scaling_bucket_size) + 1;
// set scale to scaling_bucket_size and establish initial mapping of scaling_buckets_num_half to zero
scaling_hash_1.getData().clear();
scaling_hash_1.getData().resize(2 * scaling_buckets_num_half + 1);
scaling_hash_1.setMapping(scaling_bucket_size, scaling_buckets_num_half, 0.);
scaling_hash_2.getData().clear();
scaling_hash_2.getData().resize(2 * scaling_buckets_num_half + 1);
scaling_hash_2.setMapping(scaling_bucket_size, scaling_buckets_num_half, 0.); // map scaling_buckets_num_half to zero
// (over)estimate the required number of buckets for shifting
const Int rt_buckets_num_half = 4 + 2 * (Int) ceil((max_shift * max_scaling) / shift_bucket_size);
const Int rt_buckets_num = 1 + 2 * rt_buckets_num_half;
// set scale to shift_bucket_size and establish initial mapping of rt_buckets_num_half to rt_low/rt_high
rt_low_hash_.getData().clear();
rt_low_hash_.getData().resize(rt_buckets_num);
rt_low_hash_.setMapping(shift_bucket_size, rt_buckets_num_half, rt_low);
rt_high_hash_.getData().clear();
rt_high_hash_.getData().resize(rt_buckets_num);
rt_high_hash_.setMapping(shift_bucket_size, rt_buckets_num_half, rt_high);
}
/**
@brief Estimates scaling by trying different (weighted) affine transformations.
Basically try all combinations of two pairs from map model (i,j) and two
pairs from map scene (k,l) and compute shift and scale based on these
four points. The computed value is weighed by the intensity of all
points, thus this is a density-based approach.
In the first round, compute and store every combination. In the second
round, only consider quadruplets where the scaling factor matches the
estimated bounds of (scale_low_1,scale_high_1), discard all other data.
*/
void affineTransformationHashing(const bool do_dump_pairs,
const std::vector<Peak2D> & model_map,
const std::vector<Peak2D> & scene_map,
Math::LinearInterpolation<double, double>& scaling_hash_1,
Math::LinearInterpolation<double, double>& scaling_hash_2,
Math::LinearInterpolation<double, double>& rt_low_hash_,
Math::LinearInterpolation<double, double>& rt_high_hash_,
const int hashing_round,
const double rt_pair_min_distance,
const String& dump_pairs_basename,
const Int dump_buckets_serial,
const double mz_pair_max_distance,
const double winlength_factor_baseline,
const double total_intensity_ratio,
const double scale_low_1,
const double scale_high_1,
const double rt_low, const double rt_high)
{
Size const model_map_size = model_map.size(); // i j
Size const scene_map_size = scene_map.size(); // k l
String dump_pairs_filename;
std::ofstream dump_pairs_file;
if (do_dump_pairs)
{
dump_pairs_filename = dump_pairs_basename + "_phase_two_" + String(dump_buckets_serial);
dump_pairs_file.open(dump_pairs_filename.c_str());
dump_pairs_file << "#" << ' ' << "i" << ' ' << "j" << ' ' << "k" << ' ' << "l" << ' ' << '\n';
}
// first point in model map (i)
for (Size i = 0, i_low = 0, i_high = 0, k_low = 0, k_high = 0; i < model_map_size - 1; ++i)
{
// Adjust window around i in model map (get all features in a m/z range of item i in the model map)
while (i_low < model_map_size && model_map[i_low].getMZ() < model_map[i].getMZ() - mz_pair_max_distance)
++i_low;
while (i_high < model_map_size && model_map[i_high].getMZ() <= model_map[i].getMZ() + mz_pair_max_distance)
++i_high;
// stop if there are too many features are in our window
double i_winlength_factor = 1. / (i_high - i_low);
i_winlength_factor -= winlength_factor_baseline;
if (i_winlength_factor <= 0)
continue;
// Adjust window around k in scene map (get all features in a m/z range of item i in the scene map)
while (k_low < scene_map_size && scene_map[k_low].getMZ() < model_map[i].getMZ() - mz_pair_max_distance)
++k_low;
while (k_high < scene_map_size && scene_map[k_high].getMZ() <= model_map[i].getMZ() + mz_pair_max_distance)
++k_high;
// Iterate through all matching features in the scene map that are
// within the m/z distance of item i from the model map.
// first point in scene map (k)
for (Size k = k_low; k < k_high; ++k)
{
// stop if there are too many features are in our window
double k_winlength_factor = 1. / (k_high - k_low);
k_winlength_factor -= winlength_factor_baseline;
if (k_winlength_factor <= 0)
continue;
// compute similarity of intensities i k by taking the ratio of the two intensities
double similarity_ik;
{
const double int_i = model_map[i].getIntensity();
const double int_k = scene_map[k].getIntensity() * total_intensity_ratio;
similarity_ik = (int_i < int_k) ? int_i / int_k : int_k / int_i;
// weight is inverse proportional to number of elements with similar mz
similarity_ik *= i_winlength_factor;
similarity_ik *= k_winlength_factor;
}
// second point in model map (j)
for (Size j = i + 1, j_low = i_low, j_high = i_low, l_low = k_low, l_high = k_high; j < model_map_size; ++j)
{
// diff in model map -> skip features that are too far away in RT
double diff_model = model_map[j].getRT() - model_map[i].getRT();
if (fabs(diff_model) < rt_pair_min_distance)
continue;
// Adjust window around j in model map
while (j_low < model_map_size && model_map[j_low].getMZ() < model_map[i].getMZ() - mz_pair_max_distance)
++j_low;
while (j_high < model_map_size && model_map[j_high].getMZ() <= model_map[i].getMZ() + mz_pair_max_distance)
++j_high;
double j_winlength_factor = 1. / (j_high - j_low);
j_winlength_factor -= winlength_factor_baseline;
if (j_winlength_factor <= 0)
continue;
// Adjust window around l in scene map
while (l_low < scene_map_size && scene_map[l_low].getMZ() < model_map[j].getMZ() - mz_pair_max_distance)
++l_low;
while (l_high < scene_map_size && scene_map[l_high].getMZ() <= model_map[j].getMZ() + mz_pair_max_distance)
++l_high;
// second point in scene map (l)
for (Size l = l_low; l < l_high; ++l)
{
double l_winlength_factor = 1. / (l_high - l_low);
l_winlength_factor -= winlength_factor_baseline;
if (l_winlength_factor <= 0)
continue;
// diff in scene map -> skip features that are too far away in RT
double diff_scene = scene_map[l].getRT() - scene_map[k].getRT();
// avoid cross mappings (i,j) -> (k,l) (e.g. i_rt < j_rt and k_rt > l_rt)
// and point pairs with equal retention times (e.g. i_rt == j_rt)
if (fabs(diff_scene) < rt_pair_min_distance || ((diff_model > 0) != (diff_scene > 0)))
continue;
// compute the transformation (i,j) -> (k,l)
double scaling = diff_model / diff_scene;
double shift = model_map[i].getRT() - scene_map[k].getRT() * scaling;
// compute similarity of intensities i k j l
double similarity_ik_jl;
{
// compute similarity of intensities j l
const double int_j = model_map[j].getIntensity();
const double int_l = scene_map[l].getIntensity() * total_intensity_ratio;
double similarity_jl = (int_j < int_l) ? int_j / int_l : int_l / int_j;
// weight is inverse proportional to number of elements with similar mz
similarity_jl *= j_winlength_factor;
similarity_jl *= l_winlength_factor;
similarity_ik_jl = similarity_ik * similarity_jl;
}
// hash the images of scaling, rt_low and rt_high into their respective hash tables
// store the scaling parameter and the (estimated) transformation of start/end of the maps in hashes
// -> in round 2, discard values outside of scale_low_1 and
// scale_high_1 (estimated before in scalingEstimate)
if (hashing_round == 1)
{
// hashing round 1 (estimate the scaling only)
scaling_hash_1.addValue(log(scaling), similarity_ik_jl);
}
else if (scaling >= scale_low_1 && scaling <= scale_high_1)
{
// hashing round 2 (estimate scaling and shift)
scaling_hash_2.addValue(log(scaling), similarity_ik_jl);
const double rt_low_image = shift + rt_low * scaling;
rt_low_hash_.addValue(rt_low_image, similarity_ik_jl);
const double rt_high_image = shift + rt_high * scaling;
rt_high_hash_.addValue(rt_high_image, similarity_ik_jl);
if (do_dump_pairs)
{
dump_pairs_file << i << ' ' << model_map[i].getRT() << ' ' << model_map[i].getMZ() << ' ' << j << ' ' << model_map[j].getRT() << ' '
<< model_map[j].getMZ() << ' ' << k << ' ' << scene_map[k].getRT() << ' ' << scene_map[k].getMZ() << ' ' << l << ' '
<< scene_map[l].getRT() << ' ' << scene_map[l].getMZ() << ' ' << similarity_ik_jl << ' ' << '\n';
}
}
} // l
} // j
} // k
} // i
}
/**
@brief Estimates likely position of the scale factor based on scaling_hash_1.
Uses the histogram given in scaling_hash_1 to perform some filtering and
correction of the data and then estimate the mean of the scaling factor
and standard deviation, thus returning a likely range for the scaling
factor. Outliers are removed in an iterative process.
Returns scale_centroid_1 (mean), scale_low_1 (lower bound) and
scale_high_1 (upper bound) of the scaling factor.
*/
void scalingEstimate(
Math::LinearInterpolation<double, double>& scaling_hash_1,
const bool do_dump_buckets,
const UInt struc_elem_length_datapoints,
const String& dump_buckets_basename,
const Int dump_buckets_serial,
const double scaling_histogram_crossing_slope,
const double scaling_cutoff_stdev_multiplier,
const UInt loops_mean_stdev_cutoff,
double& scale_low_1,
double& scale_high_1,
double& scale_centroid_1)
{
typedef Math::LinearInterpolation<double, double> LinearInterpolationType_;
UInt filtering_stage = 0;
// optionally, dump before filtering
String dump_buckets_filename;
std::ofstream dump_buckets_file;
if (do_dump_buckets)
{
dump_buckets_filename = dump_buckets_basename + "_scale_" + String(dump_buckets_serial);
dump_buckets_file.open(dump_buckets_filename.c_str());
dump_buckets_file << "# rt scale hash table buckets dump ( scale, height ) : " << dump_buckets_filename << '\n';
dump_buckets_file << "# unfiltered hash data\n";
for (Size index = 0; index < scaling_hash_1.getData().size(); ++index)
{
const double log_of_scale = scaling_hash_1.index2key(index);
const double height = scaling_hash_1.getData()[index];
dump_buckets_file << log_of_scale << '\t' << height << '\t' << filtering_stage << '\n';
}
dump_buckets_file << '\n';
}
++filtering_stage;
// ***************************************************************************
// Data filtering: apply tophat filter to histogram of different scales
// ***************************************************************************
MorphologicalFilter morph_filter;
Param morph_filter_param;
morph_filter_param.setValue("struc_elem_unit", "DataPoints");
morph_filter_param.setValue("struc_elem_length", double(struc_elem_length_datapoints));
morph_filter_param.setValue("method", "tophat");
morph_filter.setParameters(morph_filter_param);
LinearInterpolationType_::container_type buffer(scaling_hash_1.getData().size());
morph_filter.filterRange(scaling_hash_1.getData().begin(), scaling_hash_1.getData().end(), buffer.begin());
scaling_hash_1.getData().swap(buffer);
// optionally, dump after filtering
if (do_dump_buckets)
{
dump_buckets_file << "# tophat filtered hash data\n";
for (Size index = 0; index < scaling_hash_1.getData().size(); ++index)
{
const double log_of_scale = scaling_hash_1.index2key(index);
const double height = scaling_hash_1.getData()[index];
dump_buckets_file << log_of_scale << '\t' << height << '\t' << filtering_stage << '\n';
}
dump_buckets_file << '\n';
}
++filtering_stage;
// ***************************************************************************
// Data cutoff: estimate cutoff for filtered histogram
// compute freq_cutoff using a fancy criterion to distinguish between the
// noise level of the histogram and enriched histogram bins
// ***************************************************************************
double freq_cutoff;
do
{
std::copy(scaling_hash_1.getData().begin(), scaling_hash_1.getData().end(), buffer.begin());
std::sort(buffer.begin(), buffer.end(), std::greater<double>());
double freq_intercept = scaling_hash_1.getData().front();
double freq_slope = (scaling_hash_1.getData().back() - scaling_hash_1.getData().front()) / double(buffer.size())
/ scaling_histogram_crossing_slope;
if (!freq_slope || buffer.empty())
{
// in fact these conditions are actually impossible, but let's be really sure ;-)
freq_cutoff = 0;
}
else
{
// -> basically trying to find the intersection where sorted values fall
// below fitted line with slop "freq_slope"
Size index = 1; // not 0 (!)
while (buffer[index] >= freq_intercept + freq_slope * double(index))
{
++index;
}
freq_cutoff = buffer[--index]; // note that we have index >= 1
}
} while (false);
// ***************************************************************************
// apply freq_cutoff, setting smaller values to zero
for (Size index = 0; index < scaling_hash_1.getData().size(); ++index)
{
if (scaling_hash_1.getData()[index] < freq_cutoff)
{
scaling_hash_1.getData()[index] = 0;
}
}
// optionally, dump after noise filtering using freq_cutoff
if (do_dump_buckets)
{
dump_buckets_file << "# after freq_cutoff, which is: " << freq_cutoff << '\n';
for (Size index = 0; index < scaling_hash_1.getData().size(); ++index)
{
const double log_of_scale = scaling_hash_1.index2key(index);
const double height = scaling_hash_1.getData()[index];
dump_buckets_file << log_of_scale << '\t' << height << '\t' << filtering_stage << '\n';
}
dump_buckets_file << '\n';
}
// ***************************************************************************
// iterative cut-off based on mean and stdev - relies upon scaling_cutoff_stdev_multiplier which is a bit hard to set right.
// ***************************************************************************
Math::BasicStatistics<double> statistics;
std::vector<double>::const_iterator data_begin = scaling_hash_1.getData().begin();
const Size data_size = scaling_hash_1.getData().size();
Size data_range_begin = 0;
Size data_range_end = data_size;
for (UInt loop = 0; loop < loops_mean_stdev_cutoff; ++loop) // MAGIC ALERT: number of loops
{
statistics.update(data_begin + data_range_begin, data_begin + data_range_end);
double mean = statistics.mean() + data_range_begin;
double stdev = sqrt(statistics.variance());
data_range_begin = floor(std::max<double>(mean - scaling_cutoff_stdev_multiplier * stdev, 0));
data_range_end = ceil(std::min<double>(mean + scaling_cutoff_stdev_multiplier * stdev + 1, data_size));
const double log_outside_mean = scaling_hash_1.index2key(mean);
const double log_outside_stdev = stdev * scaling_hash_1.getScale();
scale_low_1 = exp(log_outside_mean - log_outside_stdev);
scale_centroid_1 = exp(log_outside_mean);
scale_high_1 = exp(log_outside_mean + log_outside_stdev);
if (do_dump_buckets)
{
dump_buckets_file << "# loop: " << loop << " mean: " << log_outside_mean << " [" << exp(log_outside_mean) << "] stdev: " << log_outside_stdev
<< " [" << scale_centroid_1 << "] (mean-stdev): " << log_outside_mean - log_outside_stdev << " [" << scale_low_1 << "] (mean+stdev): "
<< log_outside_mean + log_outside_stdev << " [" << scale_high_1 << "] data_range_begin: " << data_range_begin << " data_range_end: "
<< data_range_end << '\n';
}
}
if (do_dump_buckets)
{
dump_buckets_file << "# EOF\n";
dump_buckets_file.close();
}
}
void shiftEstimate(
const bool do_dump_buckets,
Math::LinearInterpolation<double, double>& rt_low_hash_,
Math::LinearInterpolation<double, double>& rt_high_hash_,
const Int dump_buckets_serial,
const UInt struc_elem_length_datapoints,
const double scaling_histogram_crossing_slope,
const double scaling_cutoff_stdev_multiplier,
const UInt loops_mean_stdev_cutoff,
const String& dump_buckets_basename,
double& rt_low_centroid,
double& rt_high_centroid)
{
UInt filtering_stage = 0;
// optionally, dump before filtering
String dump_buckets_low_filename;
std::ofstream dump_buckets_low_file;
String dump_buckets_high_filename;
std::ofstream dump_buckets_high_file;
if (do_dump_buckets)
{
dump_buckets_low_filename = dump_buckets_basename + "_low_" + String(dump_buckets_serial);
dump_buckets_low_file.open(dump_buckets_low_filename.c_str());
dump_buckets_low_file << "# rt low hash table buckets dump ( scale, height ) : " << dump_buckets_low_filename << '\n';
dump_buckets_low_file << "# unfiltered hash data\n";
for (Size index = 0; index < rt_low_hash_.getData().size(); ++index)
{
const double rt_image = rt_low_hash_.index2key(index);
const double height = rt_low_hash_.getData()[index];
dump_buckets_low_file << rt_image << '\t' << height << '\t' << filtering_stage << '\n';
}
dump_buckets_low_file << '\n';
dump_buckets_high_filename = dump_buckets_basename + "_high_" + String(dump_buckets_serial);
dump_buckets_high_file.open(dump_buckets_high_filename.c_str());
dump_buckets_high_file << "# rt high hash table buckets dump ( scale, height ) : " << dump_buckets_high_filename << '\n';
dump_buckets_high_file << "# unfiltered hash data\n";
for (Size index = 0; index < rt_high_hash_.getData().size(); ++index)
{
const double rt_image = rt_high_hash_.index2key(index);
const double height = rt_high_hash_.getData()[index];
dump_buckets_high_file << rt_image << '\t' << height << '\t' << filtering_stage << '\n';
}
dump_buckets_high_file << '\n';
}
++filtering_stage;
// apply tophat filter to histogram
MorphologicalFilter morph_filter;
Param morph_filter_param;
morph_filter_param.setValue("struc_elem_unit", "DataPoints");
morph_filter_param.setValue("struc_elem_length", double(struc_elem_length_datapoints));
morph_filter_param.setValue("method", "tophat");
morph_filter.setParameters(morph_filter_param);
typedef Math::LinearInterpolation<double, double> LinearInterpolationType_;
LinearInterpolationType_::container_type buffer(rt_low_hash_.getData().size());
morph_filter.filterRange(rt_low_hash_.getData().begin(), rt_low_hash_.getData().end(), buffer.begin());
rt_low_hash_.getData().swap(buffer);
morph_filter.filterRange(rt_high_hash_.getData().begin(), rt_high_hash_.getData().end(), buffer.begin());
rt_high_hash_.getData().swap(buffer);
// optionally, dump after filtering
if (do_dump_buckets)
{
dump_buckets_low_file << "# tophat filtered hash data\n";
for (Size index = 0; index < rt_low_hash_.getData().size(); ++index)
{
const double rt_image = rt_low_hash_.index2key(index);
const double height = rt_low_hash_.getData()[index];
dump_buckets_low_file << rt_image << '\t' << height << '\t' << filtering_stage << '\n';
}
dump_buckets_low_file << '\n';
dump_buckets_high_file << "# tophat filtered hash data\n";
for (Size index = 0; index < rt_high_hash_.getData().size(); ++index)
{
const double rt_image = rt_high_hash_.index2key(index);
const double height = rt_high_hash_.getData()[index];
dump_buckets_high_file << rt_image << '\t' << height << '\t' << filtering_stage << '\n';
}
dump_buckets_high_file << '\n';
}
++filtering_stage;
// compute freq_cutoff using a fancy criterion to distinguish between the noise level of the histogram and enriched histogram bins
double freq_cutoff_low;
double freq_cutoff_high;
do
{
{
std::copy(rt_low_hash_.getData().begin(), rt_low_hash_.getData().end(), buffer.begin());
std::sort(buffer.begin(), buffer.end(), std::greater<double>());
double freq_intercept = rt_low_hash_.getData().front();
double freq_slope = (rt_low_hash_.getData().back() - rt_low_hash_.getData().front()) / double(buffer.size())
/ scaling_histogram_crossing_slope;
if (!freq_slope || buffer.empty())
{
// in fact these conditions are actually impossible, but let's be really sure ;-)
freq_cutoff_low = 0;
}
else
{
Size index = 1; // not 0 (!)
while (buffer[index] >= freq_intercept + freq_slope * double(index))
{
++index;
}
freq_cutoff_low = buffer[--index]; // note that we have index >= 1
}
}
{
std::copy(rt_high_hash_.getData().begin(), rt_high_hash_.getData().end(), buffer.begin());
std::sort(buffer.begin(), buffer.end(), std::greater<double>());
double freq_intercept = rt_high_hash_.getData().front();
double freq_slope = (rt_high_hash_.getData().back() - rt_high_hash_.getData().front()) / double(buffer.size())
/ scaling_histogram_crossing_slope;
if (!freq_slope || buffer.empty())
{
// in fact these conditions are actually impossible, but let's be really sure ;-)
freq_cutoff_high = 0;
}
else
{
Size index = 1; // not 0 (!)
while (buffer[index] >= freq_intercept + freq_slope * double(index))
{
++index;
}
freq_cutoff_high = buffer[--index]; // note that we have index >= 1
}
}
} while (false);
// apply freq_cutoff, setting smaller values to zero
for (Size index = 0; index < rt_low_hash_.getData().size(); ++index)
{
if (rt_low_hash_.getData()[index] < freq_cutoff_low)
{
rt_low_hash_.getData()[index] = 0;
}
}
for (Size index = 0; index < rt_high_hash_.getData().size(); ++index)
{
if (rt_high_hash_.getData()[index] < freq_cutoff_high)
{
rt_high_hash_.getData()[index] = 0;
}
}
// optionally, dump after noise filtering using freq_cutoff
if (do_dump_buckets)
{
dump_buckets_low_file << "# after freq_cutoff, which is: " << freq_cutoff_low << '\n';
for (Size index = 0; index < rt_low_hash_.getData().size(); ++index)
{
const double rt_image = rt_low_hash_.index2key(index);
const double height = rt_low_hash_.getData()[index];
dump_buckets_low_file << rt_image << '\t' << height << '\t' << filtering_stage << '\n';
}
dump_buckets_low_file << '\n';
dump_buckets_high_file << "# after freq_cutoff, which is: " << freq_cutoff_high << '\n';
for (Size index = 0; index < rt_high_hash_.getData().size(); ++index)
{
const double rt_image = rt_high_hash_.index2key(index);
const double height = rt_high_hash_.getData()[index];
dump_buckets_high_file << rt_image << '\t' << height << '\t' << filtering_stage << '\n';
}
dump_buckets_high_file << '\n';
}
// iterative cut-off based on mean and stdev - relies upon scaling_cutoff_stdev_multiplier which is a bit hard to set right.
{
Math::BasicStatistics<double> statistics;
std::vector<double>::const_iterator data_begin = rt_low_hash_.getData().begin();
const Size data_size = rt_low_hash_.getData().size();
Size data_range_begin = 0;
Size data_range_end = data_size;
for (UInt loop = 0; loop < loops_mean_stdev_cutoff; ++loop) // MAGIC ALERT: number of loops
{
statistics.update(data_begin + data_range_begin, data_begin + data_range_end);
double mean = statistics.mean() + data_range_begin;
double stdev = sqrt(statistics.variance());
data_range_begin = floor(std::max<double>(mean - scaling_cutoff_stdev_multiplier * stdev, 0));
data_range_end = ceil(std::min<double>(mean + scaling_cutoff_stdev_multiplier * stdev + 1, data_size));
const double outside_mean = rt_low_hash_.index2key(mean);
const double outside_stdev = stdev * rt_low_hash_.getScale();
// rt_low_low = (outside_mean - outside_stdev);
rt_low_centroid = (outside_mean);
// rt_low_high = (outside_mean + outside_stdev);
if (do_dump_buckets)
{
dump_buckets_low_file << "# loop: " << loop << " mean: " << outside_mean << " stdev: " << outside_stdev << " (mean-stdev): " << outside_mean
- outside_stdev << " (mean+stdev): " << outside_mean + outside_stdev << " data_range_begin: " << data_range_begin << " data_range_end: "
<< data_range_end << '\n';
}
}
}
// iterative cut-off based on mean and stdev - relies upon scaling_cutoff_stdev_multiplier which is a bit hard to set right.
{
Math::BasicStatistics<double> statistics;
std::vector<double>::const_iterator data_begin = rt_high_hash_.getData().begin();
const Size data_size = rt_high_hash_.getData().size();
Size data_range_begin = 0;
Size data_range_end = data_size;
for (UInt loop = 0; loop < loops_mean_stdev_cutoff; ++loop) // MAGIC ALERT: number of loops
{
statistics.update(data_begin + data_range_begin, data_begin + data_range_end);
double mean = statistics.mean() + data_range_begin;
double stdev = sqrt(statistics.variance());
data_range_begin = floor(std::max<double>(mean - scaling_cutoff_stdev_multiplier * stdev - 1, 0));
data_range_end = ceil(std::min<double>(mean + scaling_cutoff_stdev_multiplier * stdev + 2, data_size));
const double outside_mean = rt_high_hash_.index2key(mean);
const double outside_stdev = stdev * rt_high_hash_.getScale();
// rt_high_low = (outside_mean - outside_stdev);
rt_high_centroid = (outside_mean);
// rt_high_high = (outside_mean + outside_stdev);
if (do_dump_buckets)
{
dump_buckets_high_file << "# loop: " << loop << " mean: " << outside_mean << " stdev: " << outside_stdev << " (mean-stdev): " << outside_mean
- outside_stdev << " (mean+stdev): " << outside_mean + outside_stdev << " data_range_begin: " << data_range_begin << " data_range_end: "
<< data_range_end << '\n';
}
}
}
if (do_dump_buckets)
{
dump_buckets_low_file << "# EOF\n";
dump_buckets_low_file.close();
dump_buckets_high_file << "# EOF\n";
dump_buckets_high_file.close();
}
}
double computeIntensityRatio(const std::vector<Peak2D> & model_map, const std::vector<Peak2D> & scene_map)
{
double total_int_model_map = 0;
for (Size i = 0; i < model_map.size(); ++i)
{
total_int_model_map += model_map[i].getIntensity();
}
double total_int_scene_map = 0;
for (Size i = 0; i < scene_map.size(); ++i)
{
total_int_scene_map += scene_map[i].getIntensity();
}
return total_int_model_map / total_int_scene_map;
}
void PoseClusteringAffineSuperimposer::run(const std::vector<Peak2D> & map_model,
const std::vector<Peak2D> & map_scene,
TransformationDescription & transformation)
{
if (map_model.empty() || map_scene.empty())
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"One of the input maps is empty! This is not allowed!");
}
//**************************************************************************
// Parameters
//**************************************************************************
// number of data points in structuring element for tophat filter, which removes baseline from histogram
const UInt struc_elem_length_datapoints = 21;
// used when distinguishing noise level and enriched histogram bins
const double scaling_histogram_crossing_slope = 3.0;
// multiplier for stdev in cutoff for outliers
const double scaling_cutoff_stdev_multiplier = 1.5;
// number of loops in stdev cutoff for outliers
const UInt loops_mean_stdev_cutoff = 3;
// Each m/z window is given unit weight. If there are too many pairs for a
// window, the individual contributions will be very small, but running
// time will be high, so we provide a cutoff for this. Typically this will
// exclude compounds which elute over the whole retention time range from
// consideration.
// This may lead to the exclusion of certain m/z windows that are very
// crowded.
const double winlength_factor_baseline = 0.1;
/// Maximum deviation in mz of two partner points
const double mz_pair_max_distance = param_.getValue("mz_pair_max_distance");
//**************************************************************************
// Working variables
//**************************************************************************
typedef Math::LinearInterpolation<double, double> LinearInterpolationType_;
// these are a set of hashes that transform bins to actual RT values ...
LinearInterpolationType_ scaling_hash_1; //scaling estimate from round 1 hashing
LinearInterpolationType_ scaling_hash_2; //scaling estimate from round 2 hashing
LinearInterpolationType_ rt_low_hash_; // rt shift estimate of map start
LinearInterpolationType_ rt_high_hash_; // rt shift estimate of map end
UInt actual_progress = 0;
startProgress(0, 100, "affine pose clustering");
setProgress(++actual_progress);
// Optionally, we will write dumps of the hash table buckets.
bool do_dump_buckets = false;
String dump_buckets_basename;
if (param_.getValue("dump_buckets") != "")
{
do_dump_buckets = true;
dump_buckets_basename = param_.getValue("dump_buckets").toString();
}
setProgress(++actual_progress);
// Even more optionally, we will write dumps of the hashed pairs.
bool do_dump_pairs = false;
String dump_pairs_basename;
if (param_.getValue("dump_pairs") != "")
{
do_dump_pairs = true;
dump_pairs_basename = param_.getValue("dump_pairs").toString();
}
setProgress(++actual_progress);
//**************************************************************************
// Step 1: Select the most abundant data points only.
//**************************************************************************
// use copy to truncate
std::vector<Peak2D> model_map(map_model);
std::vector<Peak2D> scene_map(map_scene);
{
// truncate the data as necessary
const Size num_used_points = (Int) param_.getValue("num_used_points");
// sort the last data points by ascending intensity (from the right, using reverse iterators)
// -> linear in complexity, should be faster than sorting and then taking cutoff
if (model_map.size() > num_used_points)
{
std::nth_element(model_map.rbegin(), model_map.rbegin() + (model_map.size() - num_used_points),
model_map.rend(), Peak2D::IntensityLess());
model_map.resize(num_used_points);
}
setProgress(++actual_progress);
if (scene_map.size() > num_used_points)
{
std::nth_element(scene_map.rbegin(), scene_map.rbegin() + (scene_map.size() - num_used_points),
scene_map.rend(), Peak2D::IntensityLess());
scene_map.resize(num_used_points);
}
setProgress(++actual_progress);
}
// sort by ascending m/z
std::sort(model_map.begin(), model_map.end(), Peak2D::MZLess());
std::sort(scene_map.begin(), scene_map.end(), Peak2D::MZLess());
setProgress((actual_progress = 10));
//**************************************************************************
// Preprocessing
//**************************************************************************
// take estimates of the minimal / maximal element from both maps
// possible improvement: use the truncated map from above which should be
// more reliable (one outlier of low intensity could derail the estimate
// below)
const double model_minrt = std::min_element(map_model.begin(), map_model.end(), Peak2D::RTLess())->getRT();
const double scene_minrt = std::min_element(map_scene.begin(), map_scene.end(), Peak2D::RTLess())->getRT();
const double model_maxrt = std::max_element(map_model.begin(), map_model.end(), Peak2D::RTLess())->getRT();
const double scene_maxrt = std::max_element(map_scene.begin(), map_scene.end(), Peak2D::RTLess())->getRT();
const double rt_low = (model_minrt + scene_minrt) / 2.;
const double rt_high = (model_maxrt + scene_maxrt) / 2.;
//**************************************************************************
// Sanity check
//**************************************************************************
{
// crude estimate of the shift and slope
double shift = std::fabs(model_minrt - scene_minrt);
double slope = (model_maxrt - model_minrt) / (scene_maxrt - scene_minrt);
if ( (double)param_.getValue("max_scaling") < slope * 1.2 ||
1.0 / (double)param_.getValue("max_scaling") > slope / 1.2)
{
OPENMS_LOG_WARN << "WARNING: your map likely has a scaling around " << slope
<< " but your parameters only allow for a maximal scaling of " <<
param_.getValue("max_scaling") << '\n';
OPENMS_LOG_WARN << "It is strongly advised to adjust your max_scaling factor\n";
}
if ( (double)param_.getValue("max_shift") < shift * 1.2)
{
OPENMS_LOG_WARN << "WARNING: your map likely has a shift around " << shift
<< " but your parameters only allow for a maximal shift of " <<
param_.getValue("max_shift") << '\n';
OPENMS_LOG_WARN << "It is strongly advised to adjust your max_shift factor\n";
}
}
// Distance in RT two points need to have at most to be considered for clustering
const double rt_pair_min_distance = (double) param_.getValue("rt_pair_distance_fraction") * (rt_high - rt_low);
//**************************************************************************
// Step 2: Initialize the hash tables: rt_scaling_hash_, rt_low_hash_, and
// rt_high_hash_. (over)estimate the required number of buckets
// for scaling
// Note: the user-specified bucket size only applies to scales
// around 1. The hashing uses a log transformation because we do
// not like skewed distributions.
//**************************************************************************
initializeHashTables(scaling_hash_1, scaling_hash_2, rt_low_hash_, rt_high_hash_,
param_.getValue("max_scaling"), param_.getValue("max_shift"),
param_.getValue("scaling_bucket_size"), param_.getValue("shift_bucket_size"),
rt_low, rt_high);
setProgress(++actual_progress);
//**************************************************************************
// Step 3: compute the ratio of the total intensities of both maps, for
// normalization
//**************************************************************************
double total_intensity_ratio = computeIntensityRatio(model_map, scene_map);
setProgress((actual_progress = 20));
// The serial number is incremented for each invocation of this, to avoid
// overwriting of hash table dumps.
static Int dump_buckets_serial = 0;
++dump_buckets_serial;
//**************************************************************************
// Step 4: Hashing
// Compute the transformations between each point pair in the model
// map and each point pair in the scene map and hash the affine
// transformation.
//
// To speed up the calculation of the final transformation, we
// confine the number of considered point pairs. We match a point
// p in the model map only onto those points p' in the scene map
// that lie in a certain mz interval.
//**************************************************************************
///////////////////////////////////////////////////////////////////
// Step 4.1 First round of hashing: Estimate the scaling
affineTransformationHashing(
do_dump_pairs,
model_map, scene_map,
scaling_hash_1, scaling_hash_2, rt_low_hash_, rt_high_hash_,
1,
rt_pair_min_distance,
dump_pairs_basename,
dump_buckets_serial,
mz_pair_max_distance,
winlength_factor_baseline,
total_intensity_ratio,
-1, // only used in 2nd round of hashing
-1, // only used in 2nd round of hashing
rt_low, rt_high);
setProgress((actual_progress = 30));
///////////////////////////////////////////////////////////////////
// Step 4.2 Estimate the scaling factor (and potential bounds) based on the
// histogram work on rt_scaling_hash_
double scale_low_1;
double scale_centroid_1;
double scale_high_1;
scalingEstimate(
scaling_hash_1,
do_dump_buckets,
struc_elem_length_datapoints,
dump_buckets_basename,
dump_buckets_serial,
scaling_histogram_crossing_slope,
scaling_cutoff_stdev_multiplier,
loops_mean_stdev_cutoff,
scale_low_1,
scale_high_1,
scale_centroid_1);
setProgress((actual_progress = 40));
///////////////////////////////////////////////////////////////////
// Step 4.3 Second round of hashing: Estimate the shift at both ends and
// thereby re-estimate the scaling. This uses the first guess of the
// scaling to reduce noise in the histograms.
affineTransformationHashing(
do_dump_pairs,
model_map, scene_map,
scaling_hash_1, scaling_hash_2, rt_low_hash_, rt_high_hash_,
2,
rt_pair_min_distance,
dump_pairs_basename,
dump_buckets_serial,
mz_pair_max_distance,
winlength_factor_baseline,
total_intensity_ratio,
scale_low_1,
scale_high_1,
rt_low, rt_high);
setProgress((actual_progress = 50));
///////////////////////////////////////////////////////////////////
// Step 4.4 Estimate the shift factor at start/end of the map based on the
// histogram work on rt_low_hash_ and rt_high_hash_
double rt_low_centroid;
double rt_high_centroid;
shiftEstimate(
do_dump_buckets,
rt_low_hash_,
rt_high_hash_,
dump_buckets_serial,
struc_elem_length_datapoints,
scaling_histogram_crossing_slope,
scaling_cutoff_stdev_multiplier,
loops_mean_stdev_cutoff,
dump_buckets_basename,
rt_low_centroid,
rt_high_centroid);
setProgress(80);
//**************************************************************************
// Step 5: Estimate transform
// Compute the shifts at the low and high ends by either using the
// estimated centroids from the distribution (new method) or by
// looking at (around) the fullest bins (old method).
//**************************************************************************
double rt_low_image;
double rt_high_image;
// 5.1 use centroids for images of rt_low and rt_high
#if 1
rt_low_image = rt_low_centroid;
rt_high_image = rt_high_centroid;
#else
// Alternative: use maximum bins instead (i.e. most likely shift)
// (Note: this is a fossil which would disregard most of the above
// computations! The code is left here for developers/debugging only.)
// This does not fully take into account the shape of the histogram and may
// be potentially not be as robust as working on histogram data
const Size rt_low_max_index = std::distance(rt_low_hash_.getData().begin(),
std::max_element(rt_low_hash_.getData().begin(), rt_low_hash_.getData().end()));
rt_low_image = rt_low_hash_.index2key(rt_low_max_index);
const Size rt_high_max_index = std::distance(rt_high_hash_.getData().begin(), std::max_element(rt_high_hash_.getData().begin(),
rt_high_hash_.getData().end()));
rt_high_image = rt_high_hash_.index2key(rt_high_max_index);
#endif
setProgress(++actual_progress);
// 5.2 compute slope and intercept from matching high/low retention times
{
Param params;
const double slope = ((rt_high_image - rt_low_image) / (rt_high - rt_low));
params.setValue("slope", slope);
const double intercept = rt_low_image - rt_low * slope;
params.setValue("intercept", intercept);
if (std::isinf(slope) || std::isnan(slope) || std::isinf(intercept) || std::isnan(intercept))
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
String("Superimposer could not compute an initial transformation!") +
"You can try to increase 'max_num_peaks_considered' to solve this.", String(intercept * slope));
}
transformation.fitModel("linear", params); // no data, but explicit parameters
}
setProgress(++actual_progress);
endProgress();
}
void PoseClusteringAffineSuperimposer::run(const ConsensusMap& map_model,
const ConsensusMap& map_scene,
TransformationDescription& transformation)
{
std::vector<Peak2D> c_map_model, c_map_scene;
for (ConsensusMap::const_iterator it = map_model.begin(); it != map_model.end(); ++it)
{
Peak2D c;
c.setIntensity( it->getIntensity() );
c.setRT( it->getRT() );
c.setMZ( it->getMZ() );
c_map_model.push_back(c);
}
for (ConsensusMap::const_iterator it = map_scene.begin(); it != map_scene.end(); ++it)
{
Peak2D c;
c.setIntensity( it->getIntensity() );
c.setRT( it->getRT() );
c.setMZ( it->getMZ() );
c_map_scene.push_back(c);
}
run(c_map_model, c_map_scene, transformation);
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/MAPMATCHING/ConsensusMapNormalizerAlgorithmMedian.cpp | .cpp | 7,906 | 214 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Johannes Veit $
// $Authors: Johannes Junker $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/MAPMATCHING/ConsensusMapNormalizerAlgorithmMedian.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/CONCEPT/ProgressLogger.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <OpenMS/METADATA/PeptideIdentification.h>
#include <OpenMS/METADATA/ProteinIdentification.h>
#include <boost/regex.hpp>
using namespace std;
namespace OpenMS
{
ConsensusMapNormalizerAlgorithmMedian::ConsensusMapNormalizerAlgorithmMedian() = default;
ConsensusMapNormalizerAlgorithmMedian::~ConsensusMapNormalizerAlgorithmMedian() = default;
Size ConsensusMapNormalizerAlgorithmMedian::computeMedians(const ConsensusMap & map, vector<double>& medians, const String& acc_filter, const String& desc_filter)
{
Size number_of_maps = map.getColumnHeaders().size();
vector<vector<double> > feature_int(number_of_maps);
medians.resize(number_of_maps);
// get map with most features, reserve space for feature_int (unequal vector lengths, 0-features omitted)
ConsensusMap::ColumnHeaders::const_iterator map_with_most_features = map.getColumnHeaders().find(0);
UInt map_with_most_features_idx = 0;
for (UInt i = 0; i < number_of_maps; i++)
{
ConsensusMap::ColumnHeaders::const_iterator it = map.getColumnHeaders().find(i);
if (it == map.getColumnHeaders().end())
{
throw Exception::ElementNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String(i));
}
else if (i >= feature_int.size())
{
throw Exception::ElementNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
String(i) + " exceeds map number");
}
feature_int[i].reserve(it->second.size);
if (it->second.size > map_with_most_features->second.size)
{
map_with_most_features = it;
map_with_most_features_idx = i;
}
}
// fill feature_int with intensities
Size pass_counter = 0;
ConsensusMap::ConstIterator cf_it;
for (cf_it = map.begin(); cf_it != map.end(); ++cf_it)
{
if (!passesFilters_(cf_it, map, acc_filter, desc_filter))
{
continue;
}
++pass_counter;
ConsensusFeature::HandleSetType::const_iterator f_it;
for (f_it = cf_it->getFeatures().begin(); f_it != cf_it->getFeatures().end(); ++f_it)
{
feature_int[f_it->getMapIndex()].push_back(f_it->getIntensity());
}
}
OPENMS_LOG_INFO << endl << "Using " << pass_counter << "/" << map.size() << " consensus features for computing normalization coefficients" << endl << endl;
// do we have enough features passing the filters to compute the median for every map?
bool enough_features_left = true;
for (UInt j = 0; j < number_of_maps; j++)
{
//set all medians to 1.0 for now, so normalization will have no effect if we return
medians[j] = 1.0;
vector<double>& ints_j = feature_int[j];
if (ints_j.empty())
{
enough_features_left = false;
}
}
if (!enough_features_left)
{
OPENMS_LOG_WARN << endl << "Not enough features passing filters. Cannot compute normalization coefficients for all maps. Result will be unnormalized." << endl << endl;
return 0;
}
else
{
//compute medians
for (UInt j = 0; j < number_of_maps; j++)
{
vector<double>& ints_j = feature_int[j];
medians[j] = Math::median(ints_j.begin(), ints_j.end());
}
}
return map_with_most_features_idx;
}
void ConsensusMapNormalizerAlgorithmMedian::normalizeMaps(ConsensusMap & map, NormalizationMethod method, const String& acc_filter, const String& desc_filter)
{
if (method == NM_SHIFT)
{
OPENMS_LOG_WARN << endl << "WARNING: normalization using median shifting is not recommended for regular log-normal MS data. Use this only if you know exactly what you're doing!" << endl << endl;
}
ConsensusMap::Iterator cf_it;
ProgressLogger progresslogger;
progresslogger.setLogType(ProgressLogger::CMD);
progresslogger.startProgress(0, map.size(), "normalizing maps");
vector<double> medians;
Size index_of_largest_map = computeMedians(map, medians, acc_filter, desc_filter);
for (cf_it = map.begin(); cf_it != map.end(); ++cf_it)
{
progresslogger.setProgress(cf_it - map.begin());
ConsensusFeature::HandleSetType::const_iterator f_it;
for (f_it = cf_it->getFeatures().begin(); f_it != cf_it->getFeatures().end(); ++f_it)
{
Size map_index = f_it->getMapIndex();
if (method == NM_SCALE)
{
// scale to median of map with largest number of features
f_it->asMutable().setIntensity(f_it->getIntensity() * medians[index_of_largest_map] / medians[map_index]);
}
else // method == NM_SHIFT
{
// shift to median of map with largest median in order to avoid negative intensities
double max_median(numeric_limits<double>::min());
Size max_median_index(0);
for (Size i = 0; i < medians.size(); ++i)
{
if (medians[i] > max_median)
{
max_median = medians[i];
max_median_index = i;
}
}
f_it->asMutable().setIntensity(f_it->getIntensity() + medians[max_median_index] - medians[map_index]);
}
}
}
progresslogger.endProgress();
}
bool ConsensusMapNormalizerAlgorithmMedian::passesFilters_(ConsensusMap::ConstIterator cf_it, const ConsensusMap& map, const String& acc_filter, const String& desc_filter)
{
boost::regex acc_regexp(acc_filter);
boost::regex desc_regexp(desc_filter);
boost::cmatch m;
if ((acc_filter.empty() || boost::regex_search("", m, acc_regexp)) &&
(desc_filter.empty() || boost::regex_search("", m, desc_regexp)))
{
// feature passes (even if it has no identification!)
return true;
}
const vector<ProteinIdentification>& prot_ids = map.getProteinIdentifications();
const PeptideIdentificationList& pep_ids = cf_it->getPeptideIdentifications();
for (PeptideIdentificationList::const_iterator p_it = pep_ids.begin(); p_it != pep_ids.end(); ++p_it)
{
const vector<PeptideHit>& hits = p_it->getHits();
for (vector<PeptideHit>::const_iterator h_it = hits.begin(); h_it != hits.end(); ++h_it)
{
const set<String>& accs = h_it->extractProteinAccessionsSet();
for (set<String>::const_iterator acc_it = accs.begin(); acc_it != accs.end(); ++acc_it)
{
// does accession match?
if (!(acc_filter.empty() ||
boost::regex_search("", m, acc_regexp) ||
boost::regex_search(acc_it->c_str(), m, acc_regexp)))
{
//no
continue;
}
// yes. does description match, too?
if (desc_filter.empty() || boost::regex_search("", m, desc_regexp))
{
return true;
}
for (vector<ProteinIdentification>::const_iterator pr_it = prot_ids.begin(); pr_it != prot_ids.end(); ++pr_it)
{
std::vector<ProteinHit>::const_iterator pr_hit = const_cast<ProteinIdentification&>(*pr_it).findHit(*acc_it);
if (pr_hit != pr_it->getHits().end())
{
const char* desc = pr_hit->getDescription().c_str();
if (boost::regex_search(desc, m, desc_regexp))
{
return true;
}
}
}
}
}
}
return false;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/DECHARGING/ILPDCWrapper.cpp | .cpp | 20,072 | 519 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/DECHARGING/ILPDCWrapper.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/DATASTRUCTURES/ChargePair.h>
#include <OpenMS/DATASTRUCTURES/LPWrapper.h>
#include <OpenMS/DATASTRUCTURES/MassExplainer.h>
#include <OpenMS/SYSTEM/StopWatch.h>
#include <OpenMS/KERNEL/FeatureMap.h>
#include <unordered_map>
#include <fstream>
#include <map>
namespace OpenMS
{
ILPDCWrapper::ILPDCWrapper() = default;
ILPDCWrapper::~ILPDCWrapper() = default;
double ILPDCWrapper::compute(const FeatureMap& fm, PairsType& pairs, Size verbose_level) const
{
if (fm.empty())
{
OPENMS_LOG_INFO << "ILPDC wrapper received empty feature list. Nothing to compute! Exiting..." << std::endl;
return -1;
}
PairsType pairs_clique_ordered;
pairs_clique_ordered.reserve(pairs.size());
typedef std::vector<std::pair<Size, Size> > BinType;
BinType bins;
// check number of components for complete putative edge graph (usually not all will be set to 'active' during ILP):
{
//
// find groups of edges
//
Size group_count(0);
std::map<Size, Size> f2g; // feature id to connected group
std::map<Size, std::set<Size> > g2pairs; // group id to all pairs involved
std::map<Size, std::set<Size> > g2f; // group id to all features involved
for (Size i = 0; i < pairs.size(); ++i)
{
Size f1 = pairs[i].getElementIndex(0);
Size f2 = pairs[i].getElementIndex(1);
if ((f2g.find(f1) != f2g.end()) && (f2g.find(f2) != f2g.end())) // edge connects two distinct groups
{
Size group1 = f2g[f1];
Size group2 = f2g[f2];
if (group2 != group1)
{
// point group2 to group1
g2pairs[group1].insert(g2pairs[group2].begin(), g2pairs[group2].end());
g2pairs.erase(group2);
for (std::set<Size>::const_iterator its = g2f[group2].begin(); its != g2f[group2].end(); ++its)
{
g2f[group1].insert(*its);
f2g[*its] = group1; // reassign features of group2 to group1
}
g2f.erase(group2);
}
}
else if ((f2g.find(f1) != f2g.end()) && !(f2g.find(f2) != f2g.end())) // only f1 is part of a group
{
Size group1 = f2g[f1];
f2g[f2] = group1;
g2f[group1].insert(f2);
}
else if (!(f2g.find(f1) != f2g.end()) && f2g.find(f2) != f2g.end()) // only f2 is part of a group
{
Size group2 = f2g[f2];
f2g[f1] = group2;
g2f[group2].insert(f1);
}
else // neither feature has a group: make a new one
{
Size group = ++group_count;
f2g[f1] = group;
f2g[f2] = group;
g2f[group].insert(f1);
g2f[group].insert(f2);
}
// append current edge to common group
g2pairs[f2g[f1]].insert(i);
}
if (g2pairs.size() != g2f.size())
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Clique construction failed! Unequal number of groups produced!", String(g2pairs.size()) + "!=" + String(g2f.size()));
}
std::map<Size, Size> hist_component_sum;
// now walk though groups and see the size:
for (std::map<Size, std::set<Size> >::const_iterator it = g2f.begin(); it != g2f.end(); ++it)
{
++hist_component_sum[it->second.size()]; // e.g. component 2 has size 4; thus increase count for size 4
}
if (verbose_level > 1)
{
OPENMS_LOG_INFO << "Components:\n";
OPENMS_LOG_INFO << " Size 1 occurs ?x\n";
for (std::map<Size, Size>::const_iterator it = hist_component_sum.begin(); it != hist_component_sum.end(); ++it)
{
OPENMS_LOG_INFO << " Size " << it->first << " occurs " << it->second << "x\n";
}
}
/* partition the cliques into bins, one given to the ILP at a time */
UInt pairs_per_bin = 1000;
UInt big_clique_bin_threshold = 200;
Size start(0);
Size count(0);
for (std::map<Size, std::set<Size> >::const_iterator it = g2pairs.begin(); it != g2pairs.end(); ++it)
{
Size clique_size = it->second.size();
if (count > pairs_per_bin || clique_size > big_clique_bin_threshold)
{
if (count > 0) // either bin is full or we have to close it due to big clique
{
if (verbose_level > 2)
OPENMS_LOG_INFO << "Overstepping border of " << pairs_per_bin << " by " << SignedSize(count - pairs_per_bin) << " elements!\n";
bins.push_back(std::make_pair(start, pairs_clique_ordered.size()));
start = pairs_clique_ordered.size();
count = 0;
}
if (clique_size > big_clique_bin_threshold) // extra bin for this big clique
{
for (std::set<Size>::const_iterator i_p = it->second.begin(); i_p != it->second.end(); ++i_p)
{
pairs_clique_ordered.push_back(pairs[*i_p]);
}
if (verbose_level > 2)
OPENMS_LOG_INFO << "Extra bin for big clique (" << clique_size << ") prepended to schedule\n";
bins.insert(bins.begin(), std::make_pair(start, pairs_clique_ordered.size()));
start = pairs_clique_ordered.size();
continue; // next clique (this one is already processed)
}
}
count += clique_size;
for (std::set<Size>::const_iterator i_p = it->second.begin(); i_p != it->second.end(); ++i_p)
{
pairs_clique_ordered.push_back(pairs[*i_p]);
}
}
if (count > 0)
bins.push_back(std::make_pair(start, pairs_clique_ordered.size()));
}
if (pairs_clique_ordered.size() != pairs.size())
{
throw Exception::InvalidSize(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, pairs_clique_ordered.size() - pairs.size(), "pairs_clique_ordered size does not match pairs size");
}
/* swap pairs, such that edges are order by cliques (so we can make clean cuts) */
pairs.swap(pairs_clique_ordered);
//PairsType pt2 = pairs;
StopWatch time1;
time1.start();
// split problem into slices and have each one solved by the ILPS
double score = 0;
// OMP currently causes spurious segfaults in Release mode; OMP fix applied, however: disable if problem persists
//#ifdef _OPENMP
//#pragma omp parallel for schedule(dynamic, 1), reduction(+: score)
//#endif
for (SignedSize i = 0; i < static_cast<SignedSize>(bins.size()); ++i)
{
score = computeSlice_(fm, pairs, bins[i].first, bins[i].second, verbose_level);
}
time1.stop();
OPENMS_LOG_INFO << " Branch and cut took " << time1.getClockTime() << " seconds, "
<< " with objective value: " << score << "."
<< std::endl;
return score;
}
void ILPDCWrapper::updateFeatureVariant_(FeatureType_& f_set, const String& rota_l, const Size& v) const
{
f_set[rota_l].insert(v);
}
double ILPDCWrapper::computeSlice_(const FeatureMap& fm,
PairsType& pairs,
const PairsIndex margin_left,
const PairsIndex margin_right,
const Size /* verbose_level */) const
{
// feature --> variants set (with scores)
typedef std::map<Size, FeatureType_> r_type;
r_type features;
LPWrapper build;
build.setObjectiveSense(LPWrapper::MAX); // maximize
// add ALL edges first. Their result is what is interesting to us later
for (PairsIndex i = margin_left; i < margin_right; ++i)
{
// log scores are good for addition in ILP - but they are < 0, thus not suitable for maximizing
// ... so we just add normal probabilities...
double score = exp(getLogScore_(pairs[i], fm));
pairs[i].setEdgeScore(score * pairs[i].getEdgeScore()); // multiply with preset score
// create the column representing the edge
Int index = build.addColumn();
build.setColumnBounds(index, 0, 1, LPWrapper::DOUBLE_BOUNDED);
build.setColumnType(index, LPWrapper::INTEGER); // integer variable
build.setObjective(index, pairs[i].getEdgeScore());
// create feature variants set
String rota_l = String(pairs[i].getElementIndex(0)) + pairs[i].getCompomer().getAdductsAsString(0) + "_" + pairs[i].getCharge(0);
updateFeatureVariant_(features[pairs[i].getElementIndex(0)], rota_l, index);
String rota_r = String(pairs[i].getElementIndex(1)) + pairs[i].getCompomer().getAdductsAsString(1) + "_" + pairs[i].getCharge(1);
updateFeatureVariant_(features[pairs[i].getElementIndex(1)], rota_r, index);
}
// ADD Features (multiple variants of one feature are constrained to size=1)
Size count(0); // each entry is a feature idx ---> Map["AdductCgf"]->adjacentEdges
for (r_type::iterator it = features.begin(); it != features.end(); ++it)
{
++count;
std::vector<Int> columns;
std::vector<double> elements;
for (FeatureType_::const_iterator iti = it->second.begin(); iti != it->second.end(); ++iti)
{
Int index = build.addColumn();
build.setColumnBounds(index, 0, 1, LPWrapper::DOUBLE_BOUNDED);
build.setColumnType(index, LPWrapper::INTEGER); // integer variable
build.setObjective(index, 0); // obj value of feature must be a constant, as it must be neutral
columns.push_back(index);
elements.push_back(1.0);
/* allow connected edges only if this variant of the feature is chosen */
/* get adjacent edges */
std::vector<Int> columns_e;
std::vector<double> elements_e;
for (std::set<Size>::const_iterator it_e = iti->second.begin(); it_e != iti->second.end(); ++it_e)
{
columns_e.push_back((Int) * it_e);
elements_e.push_back(-1.0);
}
columns_e.push_back((Int) index);
elements_e.push_back(iti->second.size()); // factor of variant is number of adjacent edges
String se = String("cv") + index;
build.addRow(columns_e, elements_e, se, 0, 10000, LPWrapper::LOWER_BOUND_ONLY);
}
String s = String("c") + count;
// only allow exactly one charge variant
build.addRow(columns, elements, s, 1, 1, LPWrapper::FIXED);
}
LPWrapper::SolverParam param;
param.enable_mir_cuts = true;
param.enable_cov_cuts = true;
param.enable_feas_pump_heuristic = true;
param.enable_binarization = false;
param.enable_clq_cuts = true;
param.enable_gmi_cuts = true;
param.enable_presolve = true;
build.solve(param);
for (UInt iColumn = 0; iColumn < margin_right - margin_left; ++iColumn)
{
double value = build.getColumnValue(iColumn);
if (fabs(value) > 0.5)
{
pairs[margin_left + iColumn].setActive(true);
}
else
{
// DEBUG
//std::cerr << " edge " << iColumn << " with " << value << "\n";
}
}
return build.getObjectiveValue();
}
// old version, slower, as ILP has different layout (i.e, the same as described in paper)
double ILPDCWrapper::computeSliceOld_(const FeatureMap& fm,
PairsType& pairs,
const PairsIndex margin_left,
const PairsIndex margin_right,
const Size verbose_level) const
{
LPWrapper build;
build.setObjectiveSense(LPWrapper::MAX); // maximize
//------------------------------------objective function-----------------------------------------------
// find maximal objective value
double score_min = 10e10f, score_max = -10e10f;
// fill in objective values
std::ostringstream namebuf;
for (PairsIndex i = margin_left; i < margin_right; ++i)
{
// log scores are good for addition in ILP - but they are < 0, thus not suitable for maximizing
// ... so we just add normal probabilities...
double score = exp(getLogScore_(pairs[i], fm));
pairs[i].setEdgeScore(score * pairs[i].getEdgeScore()); // multiply with preset score
namebuf.str("");
namebuf << "x#" << i;
// create the new variable object
Int index = build.addColumn();
build.setColumnBounds(index, 0, 1, LPWrapper::DOUBLE_BOUNDED);
build.setColumnType(index, LPWrapper::INTEGER); // integer variable
build.setObjective(index, pairs[i].getEdgeScore());
if (score_min > score)
score_min = score;
if (score_max < score)
score_max = score;
// DEBUG:
//std::cerr << "MIP: edge#"<< i << " score: " << pairs[i].getEdgeScore() << " adduct:" << pairs[i].getCompomer().getAdductsAsString() << "\n";
}
if (verbose_level > 2)
OPENMS_LOG_INFO << "score_min: " << score_min << " score_max: " << score_max << "\n";
//------------------------------------adding constraints--------------------------------------------------
bool is_conflicting;
std::vector<int> conflict_idx(4);
for (PairsIndex i = margin_left; i < margin_right; ++i)
{
const Compomer& ci = pairs[i].getCompomer();
// TODO: only go until next clique...
for (PairsIndex j = i + 1; j < margin_right; ++j)
{
const Compomer& cj = pairs[j].getCompomer();
is_conflicting = false;
// add pairwise constraints (one for each two conflicting ChargePairs)
// if features are identical they must have identical charges (because any single
// feature can only have one unique charge)
//outgoing edges (from one feature)
if (pairs[i].getElementIndex(0) == pairs[j].getElementIndex(0))
{
if ((pairs[i].getCharge(0) != pairs[j].getCharge(0)) ||
ci.isConflicting(cj, Compomer::LEFT, Compomer::LEFT))
{
is_conflicting = true;
++conflict_idx[0];
}
}
//incoming edges (into one feature)
if (pairs[i].getElementIndex(1) == pairs[j].getElementIndex(1))
{
if ((pairs[i].getCharge(1) != pairs[j].getCharge(1)) ||
ci.isConflicting(cj, Compomer::RIGHT, Compomer::RIGHT))
{
is_conflicting = true;
++conflict_idx[1];
}
}
//incoming/outgoing edge (from one feature)
if (pairs[i].getElementIndex(1) == pairs[j].getElementIndex(0))
{
if ((pairs[i].getCharge(1) != pairs[j].getCharge(0)) ||
ci.isConflicting(cj, Compomer::RIGHT, Compomer::LEFT))
{
is_conflicting = true;
++conflict_idx[2];
}
}
//incoming/outgoing edge (from one feature) -- this should only happen to additionally inferred edges
if (pairs[i].getElementIndex(0) == pairs[j].getElementIndex(1))
{
if ((pairs[i].getCharge(0) != pairs[j].getCharge(1)) ||
ci.isConflicting(cj, Compomer::LEFT, Compomer::RIGHT))
{
is_conflicting = true;
++conflict_idx[3];
}
}
if (is_conflicting)
{
String s = String("C") + i + "." + j;
// Now build rows: two variables, with indices 'columns', factors '1', and 0-1 bounds.
std::vector<double> element(2, 1.0);
std::vector<int> columns;
columns.push_back(int(i - margin_left));
columns.push_back(int(j - margin_left));
build.addRow(columns, element, s, 0., 1., LPWrapper::DOUBLE_BOUNDED);
}
}
}
if (verbose_level > 2)
OPENMS_LOG_INFO << "node count: " << fm.size() << "\n";
if (verbose_level > 2)
OPENMS_LOG_INFO << "edge count: " << pairs.size() << "\n";
if (verbose_level > 2)
OPENMS_LOG_INFO << "constraint count: " << (conflict_idx[0] + conflict_idx[1] + conflict_idx[2] + conflict_idx[3]) << " = " << conflict_idx[0] << " + " << conflict_idx[1] << " + " << conflict_idx[2] << " + " << conflict_idx[3] << "(0 or inferred)" << std::endl;
//---------------------------------------------------------------------------------------------------------
//----------------------------------------Solving and querying result--------------------------------------
//---------------------------------------------------------------------------------------------------------
if (verbose_level > 0)
OPENMS_LOG_INFO << "Starting to solve..." << std::endl;
LPWrapper::SolverParam param;
param.enable_mir_cuts = true;
param.enable_cov_cuts = true;
param.enable_feas_pump_heuristic = true;
param.enable_binarization = false;
param.enable_clq_cuts = true;
param.enable_gmi_cuts = true;
param.enable_presolve = true;
StopWatch time1;
time1.start();
build.solve(param);
time1.stop();
if (verbose_level > 0)
OPENMS_LOG_INFO << " Branch and cut took " << time1.getClockTime() << " seconds, "
<< " with objective value: " << build.getObjectiveValue() << "."
<< " Status: " << (!build.getStatus() ? " Finished" : " Not finished")
<< std::endl;
// variable values
UInt active_edges = 0;
std::unordered_map<String, Size> count_cmp;
for (Int iColumn = 0; iColumn < build.getNumberOfColumns(); ++iColumn)
{
double value = build.getColumnValue(iColumn);
if (fabs(value) > 0.5)
{
++active_edges;
pairs[margin_left + iColumn].setActive(true);
// for statistical purposes: collect compomer distribution
String cmp = pairs[margin_left + iColumn].getCompomer().getAdductsAsString();
++count_cmp[cmp];
}
else
{
// DEBUG
//std::cerr << " edge " << iColumn << " with " << value << "\n";
}
}
if (verbose_level > 2)
OPENMS_LOG_INFO << "Active edges: " << active_edges << " of overall " << pairs.size() << std::endl;
for (std::unordered_map<String, Size>::const_iterator it = count_cmp.begin(); it != count_cmp.end(); ++it)
{
//std::cout << "Cmp " << it->first << " x " << it->second << "\n";
}
double opt_value = build.getObjectiveValue();
//objective function value of optimal(?) solution
return opt_value;
} // !compute_slice
double ILPDCWrapper::getLogScore_(const PairsType::value_type& pair, const FeatureMap& fm) const
{
double score;
String e;
if (getenv("M") != nullptr)
e = String(getenv("M"));
if (e.empty())
{
//std::cout << "1";
score = pair.getCompomer().getLogP();
/*double charge_enhance = 0;
if (pairs[i].getCharge(0) == fm[pairs[i].getElementIndex(0)].getCharge())
charge_enhance += log(0.9); else charge_enhance += log(0.1);
if (pairs[i].getCharge(1) == fm[pairs[i].getElementIndex(1)].getCharge())
charge_enhance += log(0.9); else charge_enhance += log(0.1);
score += charge_enhance;
*/
}
else
{
//std::cout << "2";
double rt_diff = fabs(fm[pair.getElementIndex(0)].getRT() - fm[pair.getElementIndex(1)].getRT());
// enhance correct charge
double charge_enhance = ((pair.getCharge(0) == fm[pair.getElementIndex(0)].getCharge())
&&
(pair.getCharge(1) == fm[pair.getElementIndex(1)].getCharge()))
? 100 : 1;
score = charge_enhance * (1 / (pair.getMassDiff() + 1) + 1 / (rt_diff + 1));
}
//std::cout << "logscore: " << score << "\n";
return score;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/DECHARGING/MetaboliteFeatureDeconvolution.cpp | .cpp | 59,019 | 1,324 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Fabian Aicheler $
// $Authors: $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/DECHARGING/MetaboliteFeatureDeconvolution.h>
#include <OpenMS/CHEMISTRY/EmpiricalFormula.h>
#include <OpenMS/CHEMISTRY/Element.h>
#include <OpenMS/CONCEPT/Exception.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/DATASTRUCTURES/Adduct.h>
#include <OpenMS/DATASTRUCTURES/ChargePair.h>
#include <OpenMS/FORMAT/TextFile.h>
#include <OpenMS/CONCEPT/LogStream.h>
//DEBUG:
#include <fstream>
#include <map>
#undef DC_DEVEL
//#define DC_DEVEL 1
#ifdef DC_DEVEL
#include <OpenMS/ANALYSIS/DECHARGING/ChargeLadder.h>
#endif
using namespace std;
namespace OpenMS
{
/**
@brief 1-sided Compomer for a feature
Holds information on an explicit (with H+) 1-sided Compomer of a feature.
*/
struct MetaboliteFeatureDeconvolution::CmpInfo_
{
String s_comp; ///< formula as String
Size idx_cp{}; ///< index into compomer vector
UInt side_cp{}; ///< side of parent compomer (LEFT or RIGHT)
// C'tor
CmpInfo_() :
s_comp() {}
// C'tor
CmpInfo_(String& s, Size idx, UInt side) :
s_comp(s), idx_cp(idx), side_cp(side) {}
// Copy C'tor
CmpInfo_(const CmpInfo_& rhs) = default;
// Assignment
CmpInfo_& operator=(const CmpInfo_& rhs)
{
if (&rhs == this) return *this;
s_comp = rhs.s_comp;
idx_cp = rhs.idx_cp;
side_cp = rhs.side_cp;
return *this;
}
// Comparator
bool operator<(const CmpInfo_& other) const
{
if (s_comp < other.s_comp) return true; else return false;
}
bool operator==(const CmpInfo_& other) const
{
if (s_comp == other.s_comp) return true; else return false;
}
};
MetaboliteFeatureDeconvolution::MetaboliteFeatureDeconvolution() :
DefaultParamHandler("MetaboliteFeatureDeconvolution"),
potential_adducts_(),
map_label_(),
map_label_inverse_(),
enable_intensity_filter_(false),
negative_mode_(false)
{
defaults_.setValue("charge_min", 1, "Minimal possible charge");
defaults_.setValue("charge_max", 3, "Maximal possible charge");
defaults_.setValue("charge_span_max", 3, "Maximal range of charges for a single analyte, i.e. observing q1=[5,6,7] implies span=3. Setting this to 1 will only find adduct variants of the same charge");
defaults_.setMinInt("charge_span_max", 1); // will only find adduct variants of the same charge
defaults_.setValue("q_try", "feature", "Try different values of charge for each feature according to the above settings ('heuristic' [does not test all charges, just the likely ones] or 'all' ), or leave feature charge untouched ('feature').");
defaults_.setValidStrings("q_try", {"feature","heuristic","all"});
defaults_.setValue("retention_max_diff", 1.0, "Maximum allowed RT difference between any two features if their relation shall be determined");
defaults_.setValue("retention_max_diff_local", 1.0, "Maximum allowed RT difference between between two co-features, after adduct shifts have been accounted for (if you do not have any adduct shifts, this value should be equal to 'retention_max_diff', otherwise it should be smaller!)");
defaults_.setValue("mass_max_diff", 0.05, "Maximum allowed mass tolerance per feature. Defines a symmetric tolerance window around the feature. When looking at possible feature pairs, the allowed feature-wise errors are combined for consideration of possible adduct shifts. For ppm tolerances, each window is based on the respective observed feature mz (instead of putative experimental mzs causing the observed one)!");
defaults_.setMinFloat("mass_max_diff", 0.0);
defaults_.setValue("unit", "Da", "Unit of the 'max_difference' parameter");
defaults_.setValidStrings("unit", {"Da","ppm"});
// Na+:0.1 , (2)H4H-4:0.1:-2:heavy
defaults_.setValue("potential_adducts", std::vector<std::string>{"H:+:0.4","Na:+:0.25","NH4:+:0.25","K:+:0.1","H-2O-1:0:0.05"}, "Adducts used to explain mass differences in format: 'Elements:Charge(+/-/0):Probability[:RTShift[:Label]]', i.e. the number of '+' or '-' indicate the charge ('0' if neutral adduct), e.g. 'Ca:++:0.5' indicates +2. Probabilites have to be in (0,1]. The optional RTShift param indicates the expected RT shift caused by this adduct, e.g. '(2)H4H-4:0:1:-3' indicates a 4 deuterium label, which causes early elution by 3 seconds. As fifth parameter you can add a label for every feature with this adduct. This also determines the map number in the consensus file. Adduct element losses are written in the form 'H-2'. All provided adducts need to have the same charge sign or be neutral! Mixing of adducts with different charge directions is only allowed as neutral complexes. For example, 'H-1Na:0:0.05' can be used to model Sodium gains (with balancing deprotonation) in negative mode.");
defaults_.setValue("max_neutrals", 1, "Maximal number of neutral adducts(q=0) allowed. Add them in the 'potential_adducts' section!");
defaults_.setValue("use_minority_bound", "true", "Prune the considered adduct transitions by transition probabilities.");
defaults_.setValidStrings("use_minority_bound", {"true","false"});
defaults_.setValue("max_minority_bound", 3, "Limits allowed adduct compositions and changes between compositions in the underlying graph optimization problem by introducing a probability-based threshold: the minority bound sets the maximum count of the least probable adduct (according to 'potential_adducts' param) within a charge variant with maximum charge only containing the most likely adduct otherwise. E.g., for 'charge_max' 4 and 'max_minority_bound' 2 with most probable adduct being H+ and least probable adduct being Na+, this will allow adduct compositions of '2(H+),2(Na+)' but not of '1(H+),3(Na+)'. Further, adduct compositions/changes less likely than '2(H+),2(Na+)' will be discarded as well.");
defaults_.setMinInt("max_minority_bound", 0);
defaults_.setValue("min_rt_overlap", 0.66, "Minimum overlap of the convex hull' RT intersection measured against the union from two features (if CHs are given)");
defaults_.setMinFloat("min_rt_overlap", 0);
defaults_.setMaxFloat("min_rt_overlap", 1);
defaults_.setValue("intensity_filter", "false", "Enable the intensity filter, which will only allow edges between two equally charged features if the intensity of the feature with less likely adducts is smaller than that of the other feature. It is not used for features of different charge.");
defaults_.setValidStrings("intensity_filter", {"true","false"});
defaults_.setValue("negative_mode", "false", "Enable negative ionization mode.");
defaults_.setValidStrings("negative_mode", {"true","false"});
defaults_.setValue("default_map_label", "decharged features", "Label of map in output consensus file where all features are put by default", {"advanced"});
defaults_.setValue("verbose_level", 0, "Amount of debug information given during processing.", {"advanced"});
defaults_.setMinInt("verbose_level", 0);
defaults_.setMaxInt("verbose_level", 3);
defaultsToParam_();
}
void MetaboliteFeatureDeconvolution::updateMembers_()
{
map_label_.clear();
map_label_inverse_.clear();
map_label_inverse_[param_.getValue("default_map_label").toString()] = 0; // default virtual map (for unlabeled experiments)
map_label_[0] = param_.getValue("default_map_label").toString();
if (param_.getValue("q_try") == "feature")
q_try_ = CHARGEMODE::QFROMFEATURE;
else if (param_.getValue("q_try") == "heuristic")
q_try_ = CHARGEMODE::QHEURISTIC;
else
q_try_ = CHARGEMODE::QALL;
StringList potential_adducts_s = ListUtils::toStringList<std::string>(param_.getValue("potential_adducts"));
potential_adducts_.clear();
bool had_nonzero_RT = false; // adducts with RT-shift > 0 ?
// adducts might look like this:
// Element:Probability[:RTShift[:Label]]
double summed_probs = 0.0;
for (StringList::iterator it = potential_adducts_s.begin(); it != potential_adducts_s.end(); ++it)
{
// skip disabled adducts
if (it->trim().hasPrefix("#"))
continue;
StringList adduct;
it->split(':', adduct);
if (adduct.size() != 3 && adduct.size() != 4 && adduct.size() != 5)
{
String error = "MetaboliteFeatureDeconvolution::potential_adducts (" + (*it) + ") does not have three, four or five entries ('Elements:Charge:Probability' or 'Elements:Charge:Probability:RTShift' or 'Elements:Charge:Probability:RTShift:Label'), but " + String(adduct.size()) + " entries!";
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, error);
}
// determine probability
float prob = adduct[2].toFloat();
//OPENMS_LOG_WARN << "Adduct " << *it << " prob " << String(prob) << std::endl;
if (prob > 1.0 || prob <= 0.0)
{
String error = "MetaboliteFeatureDeconvolution::potential_adducts (" + (*it) + ") does not have a proper probability (" + String(prob) + ") in [0,1]!";
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, error);
}
if (adduct[1] != "0")//if neutral adduct, assume separate process to ionization -> better not count it for total probs, makes everything easier
{
summed_probs += prob;
}
//OPENMS_LOG_WARN << "Total prob" << String(summed_probs) << std::endl;
// RT Shift:
double rt_shift(0);
if (adduct.size() >= 4)
{
rt_shift = adduct[3].toDouble();
if (rt_shift != 0)
had_nonzero_RT = true;
}
// Label:
String label = "";
if (adduct.size() >= 5)
{
label = adduct[4].trim();
map_label_inverse_[label] = map_label_.size(); // add extra virtual map
map_label_[map_label_inverse_[label]] = label;
}
// determine charge of adduct (by # of '+' or '-')
Size charge_s_len = adduct[1].size();
Int pos_charge = charge_s_len - adduct[1].remove('+').size();
charge_s_len = adduct[1].size();
Int neg_charge = charge_s_len - adduct[1].remove('-').size();
if (pos_charge > 0 && neg_charge > 0)
{
String error = "MetaboliteFeatureDeconvolution::potential_adduct mixes charges for an adduct!";
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, error);
}
else if (pos_charge > 0)
{
EmpiricalFormula ef(adduct[0]);
ef.setCharge(pos_charge);
//getMonoWeight internally adds charge*proton_masses, we need to remove charge*H to get overall charge*electron loss.
//E.g., for H: M-(p+e)+p <-> M-e == H+
//E.g., for Na: Na -(p+e)+p <-> Na-e == Na+
ef -= EmpiricalFormula("H" + String(pos_charge));
potential_adducts_.push_back(Adduct(pos_charge, 1, ef.getMonoWeight(), adduct[0], log(prob), rt_shift, label));
}
else if (neg_charge > 0)
{
if (adduct[0] == "H-1")
{
potential_adducts_.push_back(Adduct(-neg_charge, 1, -Constants::PROTON_MASS_U, adduct[0], log(prob), rt_shift,label));
}
else
{
EmpiricalFormula ef(adduct[0]);
ef.setCharge(0);//ensures we get without additional protons, now just add electron masses // effectively subtract electron masses
potential_adducts_.push_back(Adduct((Int)-neg_charge, 1, ef.getMonoWeight() + Constants::ELECTRON_MASS_U * neg_charge, adduct[0], log(prob), rt_shift, label));
}
}
else if (adduct[1] == "0")//pos,neg == 0
{//getMonoWeight simple for Charge 0: sums individual atom monoisotopic weights
if ((Int)param_.getValue("max_neutrals") > 0)
{
EmpiricalFormula ef(adduct[0]);
ef.setCharge(0);
potential_adducts_.push_back(Adduct(ef.getCharge(), 1, ef.getMonoWeight(), adduct[0], log(prob), rt_shift, label));
}
else
{
continue;//not to be used anyway, don't add
}
}
else//adduct charge not +,- or 0
{
String error = "MetaboliteFeatureDeconvolution::potential_adduct charge must only contain '+','-' or '0'!";
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, error);
}
verbose_level_ = param_.getValue("verbose_level");
}
if (abs(1.0 - summed_probs) > 0.001)
{
String error = "MetaboliteFeatureDeconvolution::potential_adducts charged adduct probabilities do not sum up to 1.0!: " + String(summed_probs);
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, error);
}
// RT sanity check:
double rt_diff_max = param_.getValue("retention_max_diff");
double rt_diff_max_local = param_.getValue("retention_max_diff_local");
if (!had_nonzero_RT) // only 0 RT shift:
{
if (rt_diff_max != rt_diff_max_local)
{
OPENMS_LOG_WARN << "Parameters 'retention_max_diff' and 'retention_max_diff_local' are unequal, but no RT shift of adducts has been defined. Setting parameters to minimum of the two." << std::endl;
param_.setValue("retention_max_diff", std::min(rt_diff_max, rt_diff_max_local));
param_.setValue("retention_max_diff_local", std::min(rt_diff_max, rt_diff_max_local));
}
}
else // has RT shift:
{
if (rt_diff_max < rt_diff_max_local)
{
OPENMS_LOG_WARN << "Parameters 'retention_max_diff' is smaller than 'retention_max_diff_local'. This does not make sense! Setting 'retention_max_diff_local' to 'retention_max_diff'." << std::endl;
param_.setValue("retention_max_diff_local", rt_diff_max);
}
}
// intensity filter
enable_intensity_filter_ = (param_.getValue("intensity_filter") == "true" ? true : false);
}
/// Copy constructor
MetaboliteFeatureDeconvolution::MetaboliteFeatureDeconvolution(const MetaboliteFeatureDeconvolution& source) :
DefaultParamHandler(source),
potential_adducts_(source.potential_adducts_),
map_label_(source.map_label_),
map_label_inverse_(source.map_label_inverse_),
enable_intensity_filter_(source.enable_intensity_filter_),
negative_mode_(source.negative_mode_)
{
}
/// Assignment operator
inline MetaboliteFeatureDeconvolution& MetaboliteFeatureDeconvolution::operator=(const MetaboliteFeatureDeconvolution& source)
{
if (&source == this)
{
return *this;
}
DefaultParamHandler::operator=(source);
potential_adducts_ = source.potential_adducts_;
map_label_ = source.map_label_;
map_label_inverse_ = source.map_label_inverse_;
enable_intensity_filter_ = source.enable_intensity_filter_;
negative_mode_ = source.negative_mode_;
return *this;
}
/// destructor
MetaboliteFeatureDeconvolution::~MetaboliteFeatureDeconvolution() = default;
void MetaboliteFeatureDeconvolution::annotate_feature_(FeatureMap& fm_out, Adduct& default_adduct, Compomer& c, const Size f_idx, const UInt comp_side, const Int new_q, const Int old_q)
{
StringList labels;
Adduct adduct;
fm_out[f_idx].setMetaValue("map_idx", 0);
EmpiricalFormula ef_(c.getAdductsAsString(comp_side));
if (fm_out[f_idx].metaValueExists(Constants::UserParam::DC_CHARGE_ADDUCTS))
{
if (ef_.toString() != fm_out[f_idx].getMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS))
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("Decharging produced inconsistent adduct annotation! [expected: ") + String(fm_out[f_idx].getMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS)) + "]", ef_.toString());
}
else // set DC_CHARGE_ADDUCTS meta value and set it to the formula from EmpiricalFormula, also set the adduct string in "adducts" meta value
{
fm_out[f_idx].setMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS, ef_.toString());
StringList dc_new_adducts = ListUtils::create<String>(adduct.toAdductString(ef_.toString(), new_q));
fm_out[f_idx].setMetaValue("adducts", dc_new_adducts);
}
fm_out[f_idx].setMetaValue("dc_charge_adduct_mass", ef_.getMonoWeight());
fm_out[f_idx].setMetaValue("is_backbone", Size(c.isSingleAdduct(default_adduct, comp_side) ? 1 : 0));
if (new_q != old_q)
fm_out[f_idx].setMetaValue("old_charge", old_q);
fm_out[f_idx].setCharge(new_q);
labels = c.getLabels(comp_side);
if (labels.size() > 1)
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("Decharging produced inconsistent label annotation! [expected: a single label]"), ListUtils::concatenate(labels, ","));
if (!labels.empty())
{
fm_out[f_idx].setMetaValue("map_idx", map_label_inverse_[labels[0]]);
}
}
void MetaboliteFeatureDeconvolution::candidateEdges_(FeatureMap& fm_out, const Adduct& default_adduct, PairsType& feature_relation, std::map<Size, std::set<CmpInfo_> >& feature_adducts)
{
bool is_neg = (param_.getValue("negative_mode") == "true" ? true : false);
Int q_min = param_.getValue("charge_min");
Int q_max = param_.getValue("charge_max");
Int q_span = param_.getValue("charge_span_max");
Size max_neutrals = param_.getValue("max_neutrals");
double rt_diff_max = param_.getValue("retention_max_diff");
double rt_diff_max_local = param_.getValue("retention_max_diff_local");
double mz_diff_max = param_.getValue("mass_max_diff");
double rt_min_overlap = param_.getValue("min_rt_overlap");
// search for most & least probable adduct to fix p threshold
double adduct_lowest_log_p = log(1.0);
double adduct_highest_log_p = log(0.0000000001);
for (Size i = 0; i < potential_adducts_.size(); ++i)
{
adduct_lowest_log_p = std::min(adduct_lowest_log_p, potential_adducts_[i].getLogProb());
adduct_highest_log_p = std::max(adduct_highest_log_p, potential_adducts_[i].getLogProb());
}
bool use_minority_bound = (param_.getValue("use_minority_bound") == "true" ? true : false);
Int max_minority_bound = param_.getValue("max_minority_bound");
double thresh_logp = log(1e-10); //We set a default threshold simply as a minimally small number
if (use_minority_bound)
{
thresh_logp = adduct_lowest_log_p * max_minority_bound +
adduct_highest_log_p * std::max(q_max - max_minority_bound, 0);
}
// create mass difference list
OPENMS_LOG_INFO << "Generating Masses with threshold: " << thresh_logp << " ...\n";
//make it proof for charge 1..3 and charge -3..-1
if ((q_min * q_max) < 0)
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("Min and max charge switch charge signs! Please use same charge sign."), String(q_min)+" "+String(q_max));
}
int small, large;
small = q_min;
large = q_max;
//if both negative, we assume that it goes min->max: -3 -> -1, i.e. q_max would be -1
if ((q_min < 0) && (q_max < 0))
{
small = abs(q_max);
large = abs(q_min);
}
MassExplainer me(potential_adducts_, small, large, q_span, thresh_logp, max_neutrals);
me.compute();
OPENMS_LOG_INFO << "done\n";
// holds query results for a mass difference
MassExplainer::CompomerIterator md_s, md_e;
Compomer null_compomer(0, 0, -std::numeric_limits<double>::max());
SignedSize hits(0);
CoordinateType mz1, mz2, m1;
Size possibleEdges(0), overallHits(0);
// # compomer results that either passed or failed the feature charge constraints
Size no_cmp_hit(0), cmp_hit(0);
for (Size i_RT = 0; i_RT < fm_out.size(); ++i_RT) // ** RT-sweep line
{
mz1 = fm_out[i_RT].getMZ();
for (Size i_RT_window = i_RT + 1
; (i_RT_window < fm_out.size())
&& ((fm_out[i_RT_window].getRT() - fm_out[i_RT].getRT()) <= rt_diff_max)
; ++i_RT_window)
{ // ** RT-window
// knock-out criterion first: RT overlap
// use sorted structure and use 2nd start--1st end / 1st start--2nd end
const Feature& f1 = fm_out[i_RT];
const Feature& f2 = fm_out[i_RT_window];
if (!(f1.getConvexHull().getBoundingBox().isEmpty() || f2.getConvexHull().getBoundingBox().isEmpty()))
{
double f_start1 = std::min(f1.getConvexHull().getBoundingBox().minX(), f2.getConvexHull().getBoundingBox().minX());
double f_start2 = std::max(f1.getConvexHull().getBoundingBox().minX(), f2.getConvexHull().getBoundingBox().minX());
double f_end1 = std::min(f1.getConvexHull().getBoundingBox().maxX(), f2.getConvexHull().getBoundingBox().maxX());
double f_end2 = std::max(f1.getConvexHull().getBoundingBox().maxX(), f2.getConvexHull().getBoundingBox().maxX());
double union_length = f_end2 - f_start1;
double intersect_length = std::max(0., f_end1 - f_start2);
if (intersect_length / union_length < rt_min_overlap)
continue;
}
// start guessing charges ...
mz2 = fm_out[i_RT_window].getMZ();
for (Int q1 = q_min; q1 <= q_max; ++q1) // ** q1
{
//We assume that ionization modes won't get mixed in pipeline ->
//detected features should have same charge sign as provided to decharger settings for positive mode.
//For negative mode, this requirement is relaxed.
if (!chargeTestworthy_(f1.getCharge(), q1, true))
continue;
m1 = mz1 * abs(q1);
// additionally: forbid q1 and q2 with distance greater than q_span
for (Int q2 = std::max(q_min, q1 - q_span + 1)
; (q2 <= q_max) && (q2 <= q1 + q_span - 1)
; ++q2)
{ // ** q2
//again, for negative mode relaxed, thus we consider the absolute of charge
if (!chargeTestworthy_(f2.getCharge(), q2, abs(f1.getCharge()) == abs(q1)))
continue;
++possibleEdges; // internal count, not vital
// Find possible adduct combinations.
// Masses and tolerances are multiplied with their charges to nullify charge influence on mass shift.
// Allows to remove compound mass M from both sides of compomer equation -> queried shift only due to different adducts.
// Tolerance must increase when looking at M instead of m/z, as error margins increase as well by multiplication.
CoordinateType naive_mass_diff = mz2 * abs(q2) - m1;
double abs_mass_diff;
if (param_.getValue("unit") == "Da")
{
abs_mass_diff = mz_diff_max * abs(q1) + mz_diff_max * abs(q2);
}
else if (param_.getValue("unit") == "ppm")
{
// For the ppm case, we multiply the respective experimental feature mz by its allowed ppm error before multiplication by charge.
// We look at the tolerance window with a simplified way: Just use the feature mz, and assume a symmetric window around it.
// Instead of answering the more complex/asymmetrical question: "which experimental mz can for given tolerance cause observed mz".
// (In the complex case we might have to consider different queries for different tolerance windows.)
// The expected error of this simplification is negligible:
// Assuming Y > X (X > Y is analog), given causative experimental mz Y and observed mz X with
// X = Y*(1 - d)
// for allowed tolerance d, the expected Error E between experimental mz and maximal mz in the tolerance window based on experimental mz is:
// E = (mz_exp - (mz_obs + max tolerance))/mz_exp = (Y - X*(1 + d))/Y = 1 - X*(1 + d)/Y = 1 - Y*(1 - d)*(1 + d)/Y = 1 - 1 - d*d = - d*d
// As d should be ppm sized, the error is something around 10 to the power of minus 12.
abs_mass_diff = mz1 * mz_diff_max * 1e-6 * abs(q1) + mz2 * mz_diff_max * 1e-6 * abs(q2);
}
else
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "WARNING! Invalid tolerance unit! " + param_.getValue("unit").toString() + "\n");
}
//abs charge "3" to abs charge "1" -> simply invert charge delta for negative case?
hits = me.query(q2 - q1, naive_mass_diff, abs_mass_diff, thresh_logp, md_s, md_e);
OPENMS_PRECONDITION(hits >= 0, "MetaboliteFeatureDeconvolution querying #hits got negative result!");
overallHits += hits;
// choose most probable hit (TODO think of something clever here)
// for now, we take the one that has highest p in terms of the compomer structure
if (hits > 0)
{
Compomer best_hit = null_compomer;
for (; md_s != md_e; ++md_s)
{
// post-filter hits by local RT
if (fabs(f1.getRT() - f2.getRT() + md_s->getRTShift()) > rt_diff_max_local)
continue;
//std::cout << md_s->getAdductsAsString() << " neg: " << md_s->getNegativeCharges() << " pos: " << md_s->getPositiveCharges() << " p: " << md_s->getLogP() << " \n";
int left_charges, right_charges;
if (is_neg)
{
left_charges = -md_s->getPositiveCharges();
right_charges = -md_s->getNegativeCharges();//for negative, a pos charge means either losing an H-1 from the left (decreasing charge) or the Na case. (We do H-1Na as neutral, because of the pos, neg charges)
}
else
{
left_charges = md_s->getNegativeCharges();//for positive mode neutral switches still have to fulfill requirement that they have at most charge as each side
right_charges = md_s->getPositiveCharges();
}
if ( // compomer fits charge assignment of left & right feature. doesn't consider charge sign switch over span!
(abs(q1) >= abs(left_charges)) && (abs(q2) >= abs(right_charges)))
{
// compomer has better probability
if (best_hit.getLogP() < md_s->getLogP())
best_hit = *md_s;
/** testing: we just add every explaining edge
- a first estimate shows that 90% of hits are of |1|
- the remaining 10% have |2|, so the additional overhead is minimal
**/
Compomer cmp = me.getCompomerById(md_s->getID());
if (is_neg)
{
left_charges = -cmp.getPositiveCharges();
right_charges = -cmp.getNegativeCharges();
}
else
{
left_charges = cmp.getNegativeCharges();
right_charges = cmp.getPositiveCharges();
}
//this block should only be of interest if we have something multiply charges instead of protonation or deprotonation
if (((q1 - left_charges) % default_adduct.getCharge() != 0) ||
((q2 - right_charges) % default_adduct.getCharge() != 0))
{
OPENMS_LOG_WARN << "Cannot add enough default adduct (" << default_adduct.getFormula() << ") to exactly fit feature charge! Next...)\n";
continue;
}
int hc_left = (q1 - left_charges) / default_adduct.getCharge();//this should always be positive! check!!
int hc_right = (q2 - right_charges) / default_adduct.getCharge();//this should always be positive! check!!
if (hc_left < 0 || hc_right < 0)
{
throw Exception::Postcondition(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "WARNING!!! implicit number of default adduct is negative!!! left:" + String(hc_left) + " right: " + String(hc_right) + "\n");
}
// intensity constraint:
// no edge is drawn if low-prob feature has higher intensity
if (!intensityFilterPassed_(q1, q2, cmp, f1, f2))
continue;
// get non-default adducts of this edge
Compomer cmp_stripped(cmp.removeAdduct(default_adduct));
// save new adduct candidate
if (!cmp_stripped.getComponent()[Compomer::LEFT].empty())
{
String tmp = cmp_stripped.getAdductsAsString(Compomer::LEFT);
CmpInfo_ cmp_left(tmp, feature_relation.size(), Compomer::LEFT);
feature_adducts[i_RT].insert(cmp_left);
}
if (!cmp_stripped.getComponent()[Compomer::RIGHT].empty())
{
String tmp = cmp_stripped.getAdductsAsString(Compomer::RIGHT);
CmpInfo_ cmp_right(tmp, feature_relation.size(), Compomer::RIGHT);
feature_adducts[i_RT_window].insert(cmp_right);
}
// add implicit default adduct (H+ or H-) (if != 0)
if (hc_left > 0)
{
cmp.add(default_adduct * hc_left, Compomer::LEFT);
}
if (hc_right > 0)
{
cmp.add(default_adduct * hc_right, Compomer::RIGHT);
}
ChargePair cp(i_RT, i_RT_window, q1, q2, cmp, naive_mass_diff - md_s->getMass(), false);
feature_relation.push_back(cp);
}
} // ! hits loop
if (best_hit == null_compomer)
{
//std::cout << "MetaboliteFeatureDeconvolution.h:: could find no compomer complying with assumed q1 and q2 values!\n with q1: " << q1 << " q2: " << q2 << "\n";
++no_cmp_hit;
}
else
{
++cmp_hit;
}
}
} // q2
} // q1
} // RT-window
} // RT sweep line
OPENMS_LOG_INFO << no_cmp_hit << " of " << (no_cmp_hit + cmp_hit) << " valid net charge compomer results did not pass the feature charge constraints\n";
inferMoreEdges_(feature_relation, feature_adducts);
OPENMS_LOG_INFO << "Found " << feature_relation.size() << " putative edges (of " << possibleEdges << ")"
<< " and avg hit-size of " << (1.0 * overallHits / feature_relation.size())
<< std::endl;
}
//@}
void MetaboliteFeatureDeconvolution::compute(const FeatureMap& fm_in, FeatureMap& fm_out, ConsensusMap& cons_map, ConsensusMap& cons_map_p)
{
bool is_neg = (param_.getValue("negative_mode") == "true" ? true : false);
ConsensusMap cons_map_p_neg; // tmp
cons_map = ConsensusMap();
cons_map_p = ConsensusMap();
// sort by RT and then m/z
fm_out = fm_in;
fm_out.sortByPosition();
fm_out.applyMemberFunction(&UniqueIdInterface::ensureUniqueId);
FeatureMap fm_out_untouched = fm_out;
Adduct default_adduct;
if (is_neg)
{
//for negative mode, the default adduct should be deprotonation (added by the user)
default_adduct = Adduct(-1, 1, -Constants::PROTON_MASS_U, "H-1", log(1.0),0);
// e^(log prob_H)*e^(log prob_Na) = *e^(log prob_Na) * *e^(log prob_Na)
}
else
{
default_adduct = Adduct(1, 1, Constants::PROTON_MASS_U, "H1", log(1.0),0);
}
// edges
PairsType feature_relation;
// for each feature, hold the explicit adduct type induced by edges
std::map<Size, std::set<CmpInfo_> > feature_adducts;
candidateEdges_(fm_out, default_adduct, feature_relation, feature_adducts);
if (!feature_relation.empty())
{
// -------------------------- //
// ** compute ILP solution ** //
// -------------------------- //
// forward set of putative edges to ILP
ILPDCWrapper lp_wrapper;
// compute best solution (this will REORDER elements on feature_relation[] !) - do not rely on order afterwards!
double ilp_score = lp_wrapper.compute(fm_out, feature_relation, this->verbose_level_);
OPENMS_LOG_INFO << "ILP score is: " << ilp_score << std::endl;
}
// prepare output consensusMaps
cons_map.setProteinIdentifications(fm_out.getProteinIdentifications());
cons_map_p.setProteinIdentifications(fm_out.getProteinIdentifications());
// -------------------------- //
// ** DEBUG ** //
// -------------------------- //
std::map<Size, Size> features_aes, features_des; // count of adjacent active and dead edges
UInt agreeing_fcharge = 0;
std::vector<Size> f_idx_v(2);
Size aedges = 0;
StringList scores_clean_edge, scores_dirty_edge;
StringList scores_clean_edge_idx, scores_dirty_edge_idx;
EmpiricalFormula ef_clean_edge, ef_dirty_edge;
// find # edges (active and dead) for each feature
for (Size i = 0; i < feature_relation.size(); ++i)
{
Size f0_idx = feature_relation[i].getElementIndex(0);
Size f1_idx = feature_relation[i].getElementIndex(1);
if (feature_relation[i].isActive())
{
++features_aes[f0_idx];
++features_aes[f1_idx];
}
else
{
++features_des[f0_idx];
++features_des[f1_idx];
}
}
for (Size i = 0; i < feature_relation.size(); ++i)
{
f_idx_v[0] = feature_relation[i].getElementIndex(0);
f_idx_v[1] = feature_relation[i].getElementIndex(1);
Compomer c = feature_relation[i].getCompomer();
if (!feature_relation[i].isActive())
{
continue;
}
++aedges;
bool dirty = false;
for (Size f_idx = 0; f_idx < 2; ++f_idx)
{
// check if the local feature charges agree
if (fm_out[f_idx_v[f_idx]].getCharge() == feature_relation[i].getCharge((UInt)f_idx))
{
++agreeing_fcharge;
}
else
{
double rt_diff = fabs(fm_out[feature_relation[i].getElementIndex(0)].getRT() - fm_out[feature_relation[i].getElementIndex(1)].getRT());
if (verbose_level_ > 2)
{
OPENMS_LOG_WARN << "Conflict in f_Q! f_RT:" << fm_out[f_idx_v[f_idx]].getRT() << " f_MZ:" << fm_out[f_idx_v[f_idx]].getMZ() << " f_int:" << fm_out[f_idx_v[f_idx]].getIntensity()
<< " Q:" << fm_out[f_idx_v[f_idx]].getCharge() << " PredictedQ:" << feature_relation[i].getCharge((UInt)f_idx)
<< "[[ dRT: " << rt_diff << " dMZ: " << feature_relation[i].getMassDiff() << " score[" << i << "]:"
<< feature_relation[i].getEdgeScore() << " f#:" << fm_out[f_idx_v[f_idx]].getUniqueId() << " " << feature_relation[i].getCompomer().getAdductsAsString((UInt)f_idx)
<< "(a" << features_aes[f_idx_v[f_idx]] << ":d" << features_des[f_idx_v[f_idx]] << ") ]]\n";
}
dirty = true;
}
}
EmpiricalFormula ef(c.getAdductsAsString(Compomer::LEFT) + (c.getAdductsAsString(Compomer::RIGHT)));
// store score distribution:
if (!dirty)
{
scores_clean_edge.push_back(String(feature_relation[i].getEdgeScore()));
scores_clean_edge_idx.push_back(String(i));
ef_clean_edge += ef;
}
else
{
scores_dirty_edge.push_back(String(feature_relation[i].getEdgeScore()));
scores_dirty_edge_idx.push_back(String(i));
ef_dirty_edge += ef;
}
}
{
OPENMS_LOG_INFO << "Agreeing charges: " << agreeing_fcharge << "/" << (aedges * 2) << std::endl;
}
// END DEBUG
// ------------------------------ //
// ** collect related features ** //
// ------------------------------ //
//Can we toggle here to keep metaValues from previous iteration, and modify the consistency check further down to only accept the pair if it is in agreement to old one? Probably too late then already. But we might check the metaValue and reuse the consistency code further below for evaluation further above? Should be able to compare existing annotation to the stuff we put into putative feature_relation pairs before ILP computation itself.
// fresh start for meta annotation
for (Size i = 0; i < fm_out.size(); ++i)
{
if (fm_out[i].metaValueExists(Constants::UserParam::DC_CHARGE_ADDUCTS))
fm_out[i].removeMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS);
}
// write groups to consensusXML (with type="charge_groups")
// **find cliques from pairs
// find which featureIdxmaps to which consensusFeatureId
// if no mapping is found, make a new CF.
// if new pair spans two existing CFs -> merge CFs
typedef std::map<Size, Size> CliqueMap;
CliqueMap clique_register;
StringList scores;
StringList scores_e_active_idx;
for (Size i = 0; i < feature_relation.size(); ++i)
{
Size f0_idx = feature_relation[i].getElementIndex(0);
Size f1_idx = feature_relation[i].getElementIndex(1);
Int old_q0 = fm_out[f0_idx].getCharge();
Int old_q1 = fm_out[f1_idx].getCharge();
Int new_q0 = feature_relation[i].getCharge(0);
Int new_q1 = feature_relation[i].getCharge(1);
scores.push_back(String(feature_relation[i].getEdgeScore()));
if (feature_relation[i].isActive())
{
//
// annotate the affected features
// ... and check consistency
//
Compomer c = feature_relation[i].getCompomer();
// - left
annotate_feature_(fm_out, default_adduct, c, f0_idx, Compomer::LEFT, new_q0, old_q0);
// - right
annotate_feature_(fm_out, default_adduct, c, f1_idx, Compomer::RIGHT, new_q1, old_q1);
ConsensusFeature cf(fm_out[f0_idx]);
cf.setPeptideIdentifications(PeptideIdentificationList()); // delete ID's as they are added later again
cf.setQuality(0.0);
cf.setUniqueId();
cf.insert((UInt64) fm_out[f0_idx].getMetaValue("map_idx"), fm_out[f0_idx]);
cf.insert((UInt64) fm_out[f1_idx].getMetaValue("map_idx"), fm_out[f1_idx]);
//remove info not wanted in pair
std::vector<String> keys;
cf.getKeys(keys);
for (std::vector<String>::const_iterator it = keys.begin(); it != keys.end(); ++it)
{
cf.removeMetaValue(*it);
}
cf.setMetaValue("Old_charges", String(old_q0) + ":" + String(old_q1));
cf.setMetaValue("CP", String(fm_out[f0_idx].getCharge()) + "(" + String(fm_out[f0_idx].getMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS)) + "):"
+ String(fm_out[f1_idx].getCharge()) + "(" + String(fm_out[f1_idx].getMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS)) + ") "
+ String("Delta M: ") + feature_relation[i].getMassDiff()
+ String(" Score: ") + feature_relation[i].getEdgeScore());
//cf.computeDechargeConsensus(fm_out);
cons_map_p.push_back(cf);
//remove info not wanted in decharged consensus
cf.getKeys(keys);
for (std::vector<String>::const_iterator it = keys.begin(); it != keys.end(); ++it)
{
cf.removeMetaValue(*it);
}
//
// create cliques for decharge consensus features
//
SignedSize target_cf0 = -1, target_cf1 = -1;
// find the index of the ConsensusFeatures for the current pair
if (clique_register.count(f0_idx) > 0)
{
target_cf0 = clique_register[f0_idx];
}
if (clique_register.count(f1_idx) > 0)
{
target_cf1 = clique_register[f1_idx];
}
// seen both features for the first time
if ((target_cf0 == -1) &&
(target_cf1 == -1))
{ //** new ConsensusFeature required
cons_map.push_back(cf);
clique_register[f0_idx] = cons_map.size() - 1;
clique_register[f1_idx] = cons_map.size() - 1;
}
else if (target_cf0 != target_cf1)
{
if (target_cf0 == -1) //** add f0 to the already existing cf of f1
{
cons_map[target_cf1].insert((UInt64) fm_out[f0_idx].getMetaValue("map_idx"), fm_out[f0_idx]);
clique_register[f0_idx] = target_cf1;
//std::cout << "add: F" << f0_idx << " to " <<target_cf1 << " due to F" << f1_idx << "\n";
}
else if (target_cf1 == -1) //** add f1 to the already existing cf of f0
{
cons_map[target_cf0].insert((UInt64) fm_out[f1_idx].getMetaValue("map_idx"), fm_out[f1_idx]);
clique_register[f1_idx] = target_cf0;
//std::cout << "add: F" << f1_idx << " to " <<target_cf0 << " due to F" << f0_idx << "\n";
}
else //** conflict: the two elements of the pair already have separate CFs --> merge
{ // take every feature from second CF and: #1 put into first CF, #2 change registration with map
ConsensusFeature::HandleSetType hst = cons_map[target_cf1].getFeatures();
for (ConsensusFeature::HandleSetType::const_iterator it = hst.begin(); it != hst.end(); ++it) //** update cf_index
{
clique_register[fm_out.uniqueIdToIndex(it->getUniqueId())] = target_cf0;
}
// insert features from cf1 to cf0
cons_map[target_cf0].insert(hst);
// clear cf1; do NOT delete cf1 (will invalidate higher indices) - do that afterwards
cons_map[target_cf1].clear();
}
}
scores_e_active_idx.push_back(String(i));
}
} // !for feature_relation (i.e. edges)
// remove empty ConsensusFeatures from map
ConsensusMap cons_map_tmp(cons_map);
cons_map_tmp.clear(false); // keep other meta information (like ProteinIDs & Map)
for (ConsensusMap::Iterator it = cons_map.begin(); it != cons_map.end(); ++it)
{
// skip if empty
if (it->getFeatures().empty())
continue;
// skip if no backbone
Size backbone_count = 0;
ConsensusFeature::HandleSetType hst = it->getFeatures();
for (ConsensusFeature::HandleSetType::const_iterator it_h = hst.begin(); it_h != hst.end(); ++it_h) //** check if feature in CF has backbone
{
backbone_count += (Size)fm_out[fm_out.uniqueIdToIndex(it_h->getUniqueId())].getMetaValue("is_backbone");
}
if (backbone_count == 0)
{
for (ConsensusFeature::HandleSetType::const_iterator it_h = hst.begin(); it_h != hst.end(); ++it_h) //** remove cluster members from registry (they will become single features)
{
clique_register.erase(fm_out.uniqueIdToIndex(it_h->getUniqueId()));
}
continue;
}
// store element adducts
for (ConsensusFeature::HandleSetType::const_iterator it_h = hst.begin(); it_h != hst.end(); ++it_h)
{
if (fm_out[fm_out.uniqueIdToIndex(it_h->getUniqueId())].metaValueExists(Constants::UserParam::DC_CHARGE_ADDUCTS))
{
it->setMetaValue(String(it_h->getUniqueId()), fm_out[fm_out.uniqueIdToIndex(it_h->getUniqueId())].getMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS));
}
// also add consensusID of group to all feature_relation
fm_out[fm_out.uniqueIdToIndex(it_h->getUniqueId())].setMetaValue(Constants::UserParam::ADDUCT_GROUP, String(it->getUniqueId()));
}
// store number of distinct charges
std::set<Int> charges;
for (ConsensusFeature::HandleSetType::const_iterator it_h = hst.begin(); it_h != hst.end(); ++it_h)
{
charges.insert(it_h->getCharge());
}
IntList i_charges;
for (std::set<Int>::const_iterator it_q = charges.begin(); it_q != charges.end(); ++it_q)
{
i_charges.push_back(*it_q);
}
it->setMetaValue("distinct_charges", i_charges);
it->setMetaValue("distinct_charges_size", i_charges.size());
cons_map_tmp.push_back(*it);
// set a centroid
cons_map_tmp.back().computeDechargeConsensus(fm_out);
cons_map_tmp.back().setMetaValue("pure_proton_features", backbone_count);
}
cons_map_tmp.swap(cons_map);
// Warning: from here on cons_map indices have changes --> clique_register[]'s values are not reliable any longer (keys are still good)
// include single features without a buddy!
Size singletons_count = 0;
for (Size i = 0; i < fm_out.size(); ++i)
{
// find the index of the ConsensusFeature for the current feature
if (clique_register.count(i) > 0)
continue;
Feature f_single = fm_out_untouched[i];
if (f_single.getCharge() == 0)
{
f_single.setMetaValue("is_ungrouped_monoisotopic", 1);
}
else
{
f_single.setMetaValue("is_ungrouped_with_charge", 1);
}
if (is_neg)
{
//if negative mode, we report only negative charges. abs() for chains of negative mode dechargers.
f_single.setCharge(- abs(f_single.getCharge()));
}
//if negative mode, replace former positive charges with their negative sign version?
//If singleton, set dc_charge_adduct to default, and charge negative in neg mode?,
//first try without modifying charge, maybe already there.
// that should help get the correct mass for charged features at least.
//adduct mass can already be negative, will be multiplied in consensusfeature method with absolute charge
if (f_single.getCharge() != 0)
{
EmpiricalFormula default_ef(default_adduct.getFormula());
f_single.setMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS, (default_ef * abs(f_single.getCharge())).toString());
f_single.setMetaValue("dc_charge_adduct_mass", (default_adduct.getSingleMass() * abs(f_single.getCharge())));
}
fm_out[i] = f_single; // overwrite whatever DC has done to this feature!
ConsensusFeature cf(f_single);
cf.setQuality(0.0);
cf.setUniqueId();
cf.insert(0, f_single);
//remove info not wanted in decharged consensus
std::vector<String> keys;
cf.getKeys(keys);
for (std::vector<String>::const_iterator it = keys.begin(); it != keys.end(); ++it)
{
if (*it == "is_ungrouped_monoisotopic" || *it == "is_ungrouped_with_charge")
continue;
cf.removeMetaValue(*it);
}
// Need to set userParam Group output feature map features for singletons here
fm_out[i].setMetaValue(Constants::UserParam::ADDUCT_GROUP, String(cf.getUniqueId()));
cons_map.push_back(cf);
cons_map.back().computeDechargeConsensus(fm_out);//previously used fm_out_untouched. does fm_out also work?
//If computing decharge mz is 0 (meaning monoisotopic singleton), we instead use the feature mz
if (cons_map.back().getMZ() == 0)
{
cons_map.back().setMZ(f_single.getMZ());
}
++singletons_count;
}
if (verbose_level_ > 2)
{
OPENMS_LOG_INFO << "Single features without charge ladder: " << singletons_count << " of " << fm_out.size() << "\n";
}
// fill the header
//cons_map.getColumnHeaders()[0].filename = "TODO - take from FeatureMAP.getLoadedFilePath () ";
for (Size i = 0; i < map_label_.size(); ++i)
{
cons_map.getColumnHeaders()[i].size = fm_out.size();
cons_map.getColumnHeaders()[i].label = map_label_[i];
cons_map_p.getColumnHeaders()[i].size = fm_out.size();
cons_map_p.getColumnHeaders()[i].label = map_label_[i];
}
//see Proteomics Decharger for use of ChargeLadder for candidate missing features. Could e.g., be used to predict undetected features and look for them in mzML like FeatureFinderIdentification?!
cons_map_p.applyMemberFunction(&UniqueIdInterface::ensureUniqueId);
cons_map.applyMemberFunction(&UniqueIdInterface::ensureUniqueId);
/* post processing for eventual parameter optimization */
checkSolution_(cons_map);
return;
}
//Not verified/completely adapted for negative mode -> disable there
/// test if "simple" edges have alternative
/// (more difficult explanation) supported by neighboring edges
/// e.g. (.) -> (H+) might be augmented to
/// (Na+) -> (H+Na+)
void MetaboliteFeatureDeconvolution::inferMoreEdges_(PairsType& edges, std::map<Size, std::set<CmpInfo_> >& feature_adducts)
{
Adduct default_adduct;
bool is_neg = (param_.getValue("negative_mode") == "true" ? true : false);
if (is_neg)
{
default_adduct = Adduct(-1, 1, -Constants::PROTON_MASS_U, "H-1", log(1.0),0);
}
else
{
default_adduct = Adduct(1, 1, Constants::PROTON_MASS_U, "H1", log(1.0), 0);
}
int left_charges, right_charges;
Size edges_size = edges.size();
for (Size i = 0; i < edges_size; ++i)
{
Size idx0 = edges[i].getElementIndex(0);
Size idx1 = edges[i].getElementIndex(1);
std::set<CmpInfo_> result;
// look at the intersection of the two adjacent features
// (we thus require the non-H adduct to be present on both sides of the edge,
// if one is deemed enough just change to union)
std::set_intersection(feature_adducts[idx0].begin(), feature_adducts[idx0].end(),
feature_adducts[idx1].begin(), feature_adducts[idx1].end(),
std::inserter(result, result.begin()));
std::set<CmpInfo_>::iterator result_it = result.begin();
// add new edge with each adduct of the intersection
while (result_it != result.end())
{
Compomer::CompomerSide to_add = edges[result_it->idx_cp].getCompomer().removeAdduct(default_adduct).getComponent()[result_it->side_cp];
// we currently do not punish additional two-side adducts
for (Compomer::CompomerSide::iterator it = to_add.begin(); it != to_add.end(); ++it)
{
it->second.setLogProb(0);
}
ChargePair cp(edges[i]); // make a copy
Compomer new_cmp = cp.getCompomer().removeAdduct(default_adduct);
new_cmp.add(to_add, Compomer::LEFT);
new_cmp.add(to_add, Compomer::RIGHT);
//We again need to consider inverted behavior (but cp.getCharge(x) gets negative charges as assigned before!
if (is_neg)
{
left_charges = -new_cmp.getPositiveCharges();
right_charges = -new_cmp.getNegativeCharges();
}
else
{
left_charges = new_cmp.getNegativeCharges();
right_charges = new_cmp.getPositiveCharges();
}
// refill with default adducts (usually H+):
if (((cp.getCharge(0) - left_charges) % default_adduct.getCharge() == 0) &&
((cp.getCharge(1) - right_charges) % default_adduct.getCharge() == 0)) // for singly charged default_adducts this should always be true
{
int hc_left, hc_right;
hc_left = (cp.getCharge(0) - left_charges) / default_adduct.getCharge();
hc_right = (cp.getCharge(1) - right_charges) / default_adduct.getCharge();
// we have not stepped over the charge capacity of the features
if (hc_left >= 0 && hc_right >= 0)
{
// fill up with defaults:
if (hc_left > 0)
new_cmp.add(default_adduct * hc_left, Compomer::LEFT);
if (hc_right > 0)
new_cmp.add(default_adduct * hc_right, Compomer::RIGHT);
// charge constraints of feature still fulfilled? (taking ionization mode into account)
int left_charge, right_charge;
if (is_neg)
{
left_charge = -new_cmp.getPositiveCharges();
right_charge = -new_cmp.getNegativeCharges();
}
else
{
left_charge = new_cmp.getNegativeCharges();
right_charge = new_cmp.getPositiveCharges();
}
if ((left_charge == cp.getCharge(0)) &&
(right_charge == cp.getCharge(1)))
{
cp.setCompomer(new_cmp);
cp.setEdgeScore(0.99); //TODO how to score this new edge?
edges.push_back(cp); // add edge
}
else
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "MetaboliteFeatureDeconvolution::inferMoreEdges_(): Inferred edges with wrong(switched?) charges! Left neg_charge, left feature charge, right pos_charge, right feature charge", String(new_cmp.getNegativeCharges())+","+String(cp.getCharge(0))+","+String(new_cmp.getPositiveCharges())+","+String(cp.getCharge(1)));
}
}
}
else // have nonzero modulo.SHOULD NOT HAPPEN FOR DEFAULT CHARGE 1/-1 !!
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "MetaboliteFeatureDeconvolution::inferMoreEdges_(): Modulo returns leftover charge!", String(new_cmp.getNegativeCharges()));
}
++result_it;
}
} // edge for
OPENMS_LOG_INFO << "Inferring edges raised edge count from " << edges_size << " to " << edges.size() << "\n";
}
void MetaboliteFeatureDeconvolution::printEdgesOfConnectedFeatures_(Size idx_1, Size idx_2, const PairsType& feature_relation)
{
std::cout << " +++++ printEdgesOfConnectedFeatures_ +++++\n";
for (Size i = 0; i < feature_relation.size(); ++i)
{
if (
((feature_relation[i].getElementIndex(0) == idx_1) && (feature_relation[i].getElementIndex(1) == idx_2))
||
((feature_relation[i].getElementIndex(0) == idx_2) && (feature_relation[i].getElementIndex(1) == idx_1))
)
{
std::cout << feature_relation[i].getCompomer() << " Edge: " << i << " score: " << feature_relation[i].getEdgeScore() << "\n";
}
}
std::cout << " ----- printEdgesOfConnectedFeatures_ -----\n";
return;
}
inline bool MetaboliteFeatureDeconvolution::intensityFilterPassed_(const Int q1, const Int q2, const Compomer& cmp, const Feature& f1, const Feature& f2) const
{
if (!enable_intensity_filter_)
return true;
if (q1 == q2)
{
Compomer cl; cl.add(cmp.getComponent()[0], Compomer::LEFT);
Compomer cr; cr.add(cmp.getComponent()[1], Compomer::LEFT);
if (((cl.getLogP() <= cr.getLogP()) && (f1.getIntensity() <= f2.getIntensity()))
||
((cl.getLogP() >= cr.getLogP()) && (f1.getIntensity() >= f2.getIntensity()))
)
{
return true;
}
else
{
// forbid this edge?!
std::cout << "intensity constraint: edge with intensity " << f1.getIntensity() << "(" << cmp.getAdductsAsString(Compomer::LEFT) << ") and " << f2.getIntensity() << "(" << cmp.getAdductsAsString(Compomer::RIGHT) << ") deleted\n";
return false;
}
}
return true;
}
bool MetaboliteFeatureDeconvolution::chargeTestworthy_(const Int feature_charge, const Int putative_charge, const bool other_unchanged) const
{
//Switches of charge signs in one ionization mode should logically not occur.
//The assumed decharger charge settings should fit to feature charges.
//However, FFM (and other tools?) doesn't know about negative charges, thus for negative charges,
//we have to verify that positive Feature charges match negative adduct charges.
//Further, we have two scenarios: 1. The features come from FFM, then all charges are absolute.
// 2. We iteratively decharge negative mode, leading to decharger featureXML outputs with new negative charges.
//Thus, we restrict this check for test worthiness to positive mode, as for negative mode both charge signs are valid.
bool is_neg = (param_.getValue("negative_mode") == "true" ? true : false);
if (!is_neg && (feature_charge * putative_charge < 0))
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("feature charge and putative positive mode charge switch charge direction!"), String(feature_charge)+" "+String(putative_charge));
}
//From here, we checked whether we are fine with charge signs, so for now simply look only at absolute charges.
const Int abs_feature_charge = abs(feature_charge);
const Int abs_putative_charge = abs(putative_charge);
// if no charge given or all-charges is selected. Assume no charge detected -> charge 0
if ((abs_feature_charge == 0) || (q_try_ == CHARGEMODE::QALL))
{
return true;
}
else if (q_try_ == CHARGEMODE::QHEURISTIC)
{
// do not allow two charges to change at the same time
if (!other_unchanged && abs_feature_charge != abs_putative_charge)
return false;
// test two adjacent charges:
if (abs(abs_feature_charge - abs_putative_charge) <= 2)
return true;
// test two multiples
if (abs_feature_charge * 2 == abs_putative_charge || abs_feature_charge * 3 == abs_putative_charge
|| abs_feature_charge == abs_putative_charge * 2 || abs_feature_charge == abs_putative_charge * 3)
return true;
return false;
}
else if (q_try_ == CHARGEMODE::QFROMFEATURE)
{
return abs_feature_charge == abs_putative_charge;
}
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "q_try_ has unhandled enum value!", String(static_cast<Int>(q_try_)));
}
void MetaboliteFeatureDeconvolution::checkSolution_(const ConsensusMap& cons_map) const
{
Size ladders_total(0);
Size ladders_with_odd(0);
// checking number of charge ladders which have all gapped shapes, hinting at wrong lower-bound bound (should be lower)
for (ConsensusMap::const_iterator it = cons_map.begin(); it != cons_map.end(); ++it)
{
if (it->size() == 1)
continue;
++ladders_total;
IntList charges = it->getMetaValue("distinct_charges");
for (Size i = 0; i < charges.size(); ++i)
{
if (charges[i] % 2 == 1)
{
++ladders_with_odd;
break;
}
}
}
// if more than 5% of charge ladder have only gapped, report
if (ladders_with_odd < ladders_total * 0.95)
{
OPENMS_LOG_WARN << ".\n..\nWarning: a significant portion of your decharged molecules have gapped, even-numbered charge ladders (" << ladders_total - ladders_with_odd << " of " << ladders_total << ")";
OPENMS_LOG_WARN <<"This might indicate a too low charge interval being tested.\n..\n.\n";
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/DECHARGING/FeatureDeconvolution.cpp | .cpp | 53,834 | 1,266 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Fabian Aicheler $
// $Authors: $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/DECHARGING/FeatureDeconvolution.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/CHEMISTRY/EmpiricalFormula.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/DATASTRUCTURES/ChargePair.h>
#include <OpenMS/FORMAT/TextFile.h>
#include <map>
#undef DC_DEVEL
//#define DC_DEVEL 1
#ifdef DC_DEVEL
#include <fstream>
#include <OpenMS/ANALYSIS/DECHARGING/ChargeLadder.h>
#include <OpenMS/FORMAT/FileHandler.h>
#endif
using namespace std;
namespace OpenMS
{
/**
@brief 1-sided Compomer for a feature
Holds information on an explicit (with H+) 1-sided Compomer of a feature.
*/
struct FeatureDeconvolution::CmpInfo_
{
String s_comp; ///< formula as String
Size idx_cp{}; ///< index into compomer vector
UInt side_cp{}; ///< side of parent compomer (LEFT or RIGHT)
// C'tor
CmpInfo_() :
s_comp() {}
// C'tor
CmpInfo_(String& s, Size idx, UInt side) :
s_comp(s), idx_cp(idx), side_cp(side) {}
// Copy C'tor
CmpInfo_(const CmpInfo_& rhs) = default;
// Assignment
CmpInfo_& operator=(const CmpInfo_& rhs)
{
if (&rhs == this) return *this;
s_comp = rhs.s_comp;
idx_cp = rhs.idx_cp;
side_cp = rhs.side_cp;
return *this;
}
// Comparator
bool operator<(const CmpInfo_& other) const
{
if (s_comp < other.s_comp) return true; else return false;
}
bool operator==(const CmpInfo_& other) const
{
if (s_comp == other.s_comp) return true; else return false;
}
};
FeatureDeconvolution::FeatureDeconvolution() :
DefaultParamHandler("FeatureDeconvolution"),
potential_adducts_(),
map_label_(),
map_label_inverse_(),
enable_intensity_filter_(false)
{
defaults_.setValue("charge_min", 1, "Minimal possible charge");
defaults_.setValue("charge_max", 10, "Maximal possible charge");
defaults_.setValue("charge_span_max", 4, "Maximal range of charges for a single analyte, i.e. observing q1=[5,6,7] implies span=3. Setting this to 1 will only find adduct variants of the same charge");
defaults_.setMinInt("charge_span_max", 1); // will only find adduct variants of the same charge
defaults_.setValue("q_try", "feature", "Try different values of charge for each feature according to the above settings ('heuristic' [does not test all charges, just the likely ones] or 'all' ), or leave feature charge untouched ('feature').");
defaults_.setValidStrings("q_try", {"feature","heuristic","all"});
defaults_.setValue("retention_max_diff", 1.0, "Maximum allowed RT difference between any two features if their relation shall be determined");
defaults_.setValue("retention_max_diff_local", 1.0, "Maximum allowed RT difference between between two co-features, after adduct shifts have been accounted for (if you do not have any adduct shifts, this value should be equal to 'retention_max_diff', otherwise it should be smaller!)");
defaults_.setValue("mass_max_diff", 0.5, "Maximum allowed mass difference [in Th] for a single feature.");
// Na+:0.1 , (2)H4H-4:0.1:-2:heavy
defaults_.setValue("potential_adducts", std::vector<std::string>{"K:+:0.1"}, "Adducts used to explain mass differences in format: 'Element:Charge(+/-):Probability[:RTShift[:Label]]', i.e. the number of '+' or '-' indicate the charge, e.g. 'Ca:++:0.5' indicates +2. Probabilites have to be in (0,1]. RTShift param is optional and indicates the expected RT shift caused by this adduct, e.g. '(2)H4H-4:0:1:-3' indicates a 4 deuterium label, which causes early elution by 3 seconds. As a fifth parameter you can add a label which is tagged on every feature which has this adduct. This also determines the map number in the consensus file.");
defaults_.setValue("max_neutrals", 0, "Maximal number of neutral adducts(q=0) allowed. Add them in the 'potential_adducts' section!");
defaults_.setValue("max_minority_bound", 2, "Maximum count of the least probable adduct (according to 'potential_adducts' param) within a charge variant. E.g. setting this to 2 will not allow an adduct composition of '1(H+),3(Na+)' if Na+ is the least probable adduct");
defaults_.setMinInt("max_minority_bound", 0);
defaults_.setValue("min_rt_overlap", 0.66, "Minimum overlap of the convex hull' RT intersection measured against the union from two features (if CHs are given)");
defaults_.setMinFloat("min_rt_overlap", 0);
defaults_.setMaxFloat("min_rt_overlap", 1);
defaults_.setValue("intensity_filter", "false", "Enable the intensity filter, which will only allow edges between two equally charged features if the intensity of the feature with less likely adducts is smaller than that of the other feature. It is not used for features of different charge.");
defaults_.setValidStrings("intensity_filter", {"true","false"});
defaults_.setValue("negative_mode", "false", "Enable negative ionization mode.");
defaults_.setValue("default_map_label", "decharged features", "Label of map in output consensus file where all features are put by default", {"advanced"});
defaults_.setValue("verbose_level", 0, "Amount of debug information given during processing.", {"advanced"});
defaults_.setMinInt("verbose_level", 0);
defaults_.setMaxInt("verbose_level", 3);
defaultsToParam_();
}
void FeatureDeconvolution::updateMembers_()
{
map_label_.clear();
map_label_inverse_.clear();
map_label_inverse_[String(param_.getValue("default_map_label").toString())] = 0; // default virtual map (for unlabeled experiments)
map_label_[0] = String(param_.getValue("default_map_label").toString());
if (param_.getValue("q_try") == "feature")
q_try_ = CHARGEMODE::QFROMFEATURE;
else if (param_.getValue("q_try") == "heuristic")
q_try_ = CHARGEMODE::QHEURISTIC;
else
q_try_ = CHARGEMODE::QALL;
StringList potential_adducts_s = ListUtils::toStringList<std::string>(param_.getValue("potential_adducts"));
potential_adducts_.clear();
bool had_nonzero_RT = false; // adducts with RT-shift > 0 ?
// adducts might look like this:
// Element:Probability[:RTShift[:Label]]
for (StringList::iterator it = potential_adducts_s.begin(); it != potential_adducts_s.end(); ++it)
{
// skip disabled adducts
if (it->trim().hasPrefix("#"))
continue;
StringList adduct;
it->split(':', adduct);
if (adduct.size() != 3 && adduct.size() != 4 && adduct.size() != 5)
{
String error = "FeatureDeconvolution::potential_adducts (" + (*it) + ") does not have three, four or five entries ('Elements:Charge:Probability' or 'Elements:Charge:Probability:RTShift' or 'Elements:Charge:Probability:RTShift:Label'), but " + String(adduct.size()) + " entries!";
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, error);
}
// determine probability
float prob = adduct[2].toFloat();
if (prob > 1.0 || prob <= 0.0)
{
String error = "FeatureDeconvolution::potential_adducts (" + (*it) + ") does not have a proper probability (" + String(prob) + ") in [0,1]!";
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, error);
}
// RT Shift:
double rt_shift(0);
if (adduct.size() >= 4)
{
rt_shift = adduct[3].toDouble();
if (rt_shift != 0)
had_nonzero_RT = true;
}
// Label:
String label = "";
if (adduct.size() >= 5)
{
label = adduct[4].trim();
map_label_inverse_[label] = map_label_.size(); // add extra virtual map
map_label_[map_label_inverse_[label]] = label;
}
// determine charge of adduct (by # of '+' or '-')
Size charge_s_len = adduct[1].size();
Int pos_charge = charge_s_len - adduct[1].remove('+').size();
charge_s_len = adduct[1].size();
Int neg_charge = charge_s_len - adduct[1].remove('-').size();
if (pos_charge > 0 && neg_charge > 0)
{
String error = "FeatureDeconvolution::potential_adducts mixes charges for an adduct!";
}
else if (pos_charge > 0)
{
EmpiricalFormula ef(adduct[0]);
ef -= EmpiricalFormula("H" + String(pos_charge));
ef.setCharge(pos_charge); // effectively subtract electron masses
potential_adducts_.push_back(Adduct((Int)pos_charge, 1, ef.getMonoWeight(), adduct[0], log(prob), rt_shift, label));
}
else if (neg_charge > 0)
{
if (adduct[0] == "H-1")
{
potential_adducts_.push_back(Adduct((Int)-neg_charge, 1, -Constants::PROTON_MASS_U, adduct[0], log(prob), rt_shift,label));
}
else
{
EmpiricalFormula ef(adduct[0]);
ef.setCharge(0);//ensures we get without additional protons, now just add electron masses
potential_adducts_.push_back(Adduct((Int)-neg_charge, 1, ef.getMonoWeight() + Constants::ELECTRON_MASS_U * neg_charge, adduct[0], log(prob), rt_shift, label));
}
}
else//pos,neg == 0
{ //in principle no change because pos_charge 0 and ef.getMonoWeight() only adds for nonzero charges
EmpiricalFormula ef(adduct[0]);
ef -= EmpiricalFormula("H" + String(pos_charge));
ef.setCharge(pos_charge); // effectively subtract electron masses
potential_adducts_.push_back(Adduct((Int)pos_charge, 1, ef.getMonoWeight(), adduct[0], log(prob), rt_shift, label));
}
verbose_level_ = param_.getValue("verbose_level");
}
// RT sanity check:
double rt_diff_max = param_.getValue("retention_max_diff");
double rt_diff_max_local = param_.getValue("retention_max_diff_local");
if (!had_nonzero_RT) // only 0 RT shift:
{
if (rt_diff_max != rt_diff_max_local)
{
OPENMS_LOG_WARN << "Parameters 'retention_max_diff' and 'retention_max_diff_local' are unequal, but no RT shift of adducts has been defined. Setting parameters to minimum of the two." << std::endl;
param_.setValue("retention_max_diff", std::min(rt_diff_max, rt_diff_max_local));
param_.setValue("retention_max_diff_local", std::min(rt_diff_max, rt_diff_max_local));
}
}
else // has RT shift:
{
if (rt_diff_max < rt_diff_max_local)
{
OPENMS_LOG_WARN << "Parameters 'retention_max_diff' is smaller than 'retention_max_diff_local'. This does not make sense! Setting 'retention_max_diff_local' to 'retention_max_diff'." << std::endl;
param_.setValue("retention_max_diff_local", rt_diff_max);
}
}
// intensity filter
enable_intensity_filter_ = (param_.getValue("intensity_filter") == "true" ? true : false);
}
/// Copy constructor
FeatureDeconvolution::FeatureDeconvolution(const FeatureDeconvolution& source) :
DefaultParamHandler(source),
potential_adducts_(source.potential_adducts_),
map_label_(source.map_label_),
map_label_inverse_(source.map_label_inverse_),
enable_intensity_filter_(source.enable_intensity_filter_)
{
}
/// Assignment operator
inline FeatureDeconvolution& FeatureDeconvolution::operator=(const FeatureDeconvolution& source)
{
if (&source == this)
{
return *this;
}
DefaultParamHandler::operator=(source);
potential_adducts_ = source.potential_adducts_;
map_label_ = source.map_label_;
map_label_inverse_ = source.map_label_inverse_;
enable_intensity_filter_ = source.enable_intensity_filter_;
return *this;
}
/// destructor
FeatureDeconvolution::~FeatureDeconvolution() = default;
//@}
void FeatureDeconvolution::compute(const FeatureMap& fm_in, FeatureMap& fm_out, ConsensusMap& cons_map, ConsensusMap& cons_map_p)
{
bool is_neg = (param_.getValue("negative_mode") == "true" ? true : false);
ConsensusMap cons_map_p_neg; // tmp
cons_map = ConsensusMap();
cons_map_p = ConsensusMap();
Int q_min = param_.getValue("charge_min");
Int q_max = param_.getValue("charge_max");
Int q_span = param_.getValue("charge_span_max");
Size max_neutrals = param_.getValue("max_neutrals");
double rt_diff_max = param_.getValue("retention_max_diff");
double rt_diff_max_local = param_.getValue("retention_max_diff_local");
double mz_diff_max = param_.getValue("mass_max_diff");
double rt_min_overlap = param_.getValue("min_rt_overlap");
// sort by RT and then m/z
fm_out = fm_in;
fm_out.sortByPosition();
fm_out.applyMemberFunction(&UniqueIdInterface::ensureUniqueId);
FeatureMap fm_out_untouched = fm_out;
// search for most & least probable adduct to fix p threshold
double adduct_lowest_log_p = log(1.0);
double adduct_highest_log_p = log(0.0000000001);
for (Size i = 0; i < potential_adducts_.size(); ++i)
{
adduct_lowest_log_p = std::min(adduct_lowest_log_p, potential_adducts_[i].getLogProb());
adduct_highest_log_p = std::max(adduct_highest_log_p, potential_adducts_[i].getLogProb());
}
Int max_minority_bound = param_.getValue("max_minority_bound");
double thresh_logp = adduct_lowest_log_p * max_minority_bound +
adduct_highest_log_p * std::max(q_max - max_minority_bound, 0);
Adduct default_adduct;
if (is_neg)
{
//for negative mode, the default adduct should be deprotonation (added by the user)
default_adduct = Adduct(-1, 1, -Constants::PROTON_MASS_U, "H-1", log(1.0),0);
// e^(log prob_H)*e^(log prob_Na) = *e^(log prob_Na) * *e^(log prob_Na)
}
else
{
default_adduct = Adduct(1, 1, Constants::PROTON_MASS_U, "H1", log(1.0),0);
}
// create mass difference list
OPENMS_LOG_INFO << "Generating Masses with threshold: " << thresh_logp << " ...\n";
//make it proof for charge 1..3 and charge -3..-1
if ((q_min * q_max) < 0)
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("Min and max charge switch charge signs! Please use same charge sign."), String(q_min)+" "+String(q_max));
}
int small, large;
small = q_min;
large = q_max;
//if both negative, we assume that it goes min->max: -3 -> -1, i.e. q_max would be -1
if ((q_min < 0) && (q_max < 0))
{
small = abs(q_max);
large = abs(q_min);
}
MassExplainer me(potential_adducts_, small, large, q_span, thresh_logp, max_neutrals);
me.compute();
OPENMS_LOG_INFO << "done\n";
// holds query results for a mass difference
MassExplainer::CompomerIterator md_s, md_e;
Compomer null_compomer(0, 0, -std::numeric_limits<double>::max());
SignedSize hits(0);
CoordinateType mz1, mz2, m1;
Size possibleEdges(0), overallHits(0);
// edges
PairsType feature_relation;
// for each feature, hold the explicit adduct type induced by edges
std::map<Size, std::set<CmpInfo_> > feature_adducts;
// # compomer results that either passed or failed the feature charge constraints
Size no_cmp_hit(0), cmp_hit(0);
/*DoubleList dl_massdiff;
IntList il_chargediff;*/
for (Size i_RT = 0; i_RT < fm_out.size(); ++i_RT) // ** RT-sweep line
{
mz1 = fm_out[i_RT].getMZ();
for (Size i_RT_window = i_RT + 1
; (i_RT_window < fm_out.size())
&& ((fm_out[i_RT_window].getRT() - fm_out[i_RT].getRT()) <= rt_diff_max)
; ++i_RT_window)
{ // ** RT-window
// knock-out criterion first: RT overlap
// use sorted structure and use 2nd start--1st end / 1st start--2nd end
const Feature& f1 = fm_out[i_RT];
const Feature& f2 = fm_out[i_RT_window];
if (!(f1.getConvexHull().getBoundingBox().isEmpty() || f2.getConvexHull().getBoundingBox().isEmpty()))
{
double f_start1 = std::min(f1.getConvexHull().getBoundingBox().minX(), f2.getConvexHull().getBoundingBox().minX());
double f_start2 = std::max(f1.getConvexHull().getBoundingBox().minX(), f2.getConvexHull().getBoundingBox().minX());
double f_end1 = std::min(f1.getConvexHull().getBoundingBox().maxX(), f2.getConvexHull().getBoundingBox().maxX());
double f_end2 = std::max(f1.getConvexHull().getBoundingBox().maxX(), f2.getConvexHull().getBoundingBox().maxX());
double union_length = f_end2 - f_start1;
double intersect_length = std::max(0., f_end1 - f_start2);
if (intersect_length / union_length < rt_min_overlap)
continue;
}
// start guessing charges ...
mz2 = fm_out[i_RT_window].getMZ();
for (Int q1 = q_min; q1 <= q_max; ++q1) // ** q1
{
//We assume that ionization modes won't get mixed in pipeline -> detected features should have same charge sign as provided to decharger settings.
if (!chargeTestworthy_(f1.getCharge(), q1, true))
continue;
m1 = mz1 * abs(q1);
// additionally: forbid q1 and q2 with distance greater than q_span
for (Int q2 = std::max(q_min, q1 - q_span + 1)
; (q2 <= q_max) && (q2 <= q1 + q_span - 1)
; ++q2)
{ // ** q2
if (!chargeTestworthy_(f2.getCharge(), q2, f1.getCharge() == q1))
continue;
++possibleEdges; // internal count, not vital
// find possible adduct combinations
CoordinateType naive_mass_diff = mz2 * abs(q2) - m1;
double abs_mass_diff = mz_diff_max * abs(q1) + mz_diff_max * abs(q2); // tolerance must increase when looking at M instead of m/z, as error margins increase as well
//abs charge "3" to abs charge "1" -> simply invert charge delta for negative case?
hits = me.query(q2 - q1, naive_mass_diff, abs_mass_diff, thresh_logp, md_s, md_e);
OPENMS_PRECONDITION(hits >= 0, "FeatureDeconvolution querying #hits got negative result!");
overallHits += hits;
// choose most probable hit (TODO think of something clever here)
// for now, we take the one that has highest p in terms of the compomer structure
if (hits > 0)
{
Compomer best_hit = null_compomer;
for (; md_s != md_e; ++md_s)
{
// post-filter hits by local RT
if (fabs(f1.getRT() - f2.getRT() + md_s->getRTShift()) > rt_diff_max_local)
continue;
//std::cout << md_s->getAdductsAsString() << " neg: " << md_s->getNegativeCharges() << " pos: " << md_s->getPositiveCharges() << " p: " << md_s->getLogP() << " \n";
int left_charges, right_charges;
if (is_neg)
{
left_charges = -md_s->getPositiveCharges();
right_charges = -md_s->getNegativeCharges();//for negative, a pos charge means either losing an H-1 from the left (decreasing charge) or the Na case. (We do H-1Na as neutral, because of the pos, neg charges)
}
else
{
left_charges = md_s->getNegativeCharges();//for positive mode neutral switches still have to fulfill requirement that they have at most charge as each side
right_charges = md_s->getPositiveCharges();
}
if ( // compomer fits charge assignment of left & right feature. doesn't consider charge sign switch over span!
(abs(q1) >= abs(left_charges)) && (abs(q2) >= abs(right_charges)))
{
// compomer has better probability
if (best_hit.getLogP() < md_s->getLogP())
best_hit = *md_s;
/** testing: we just add every explaining edge
- a first estimate shows that 90% of hits are of |1|
- the remaining 10% have |2|, so the additional overhead is minimal
**/
Compomer cmp = me.getCompomerById(md_s->getID());
if (is_neg)
{
left_charges = -cmp.getPositiveCharges();
right_charges = -cmp.getNegativeCharges();
}
else
{
left_charges = cmp.getNegativeCharges();
right_charges = cmp.getPositiveCharges();
}
//this block should only be of interest if we have something multiply charges instead of protonation or deprotonation
if (((q1 - left_charges) % default_adduct.getCharge() != 0) ||
((q2 - right_charges) % default_adduct.getCharge() != 0))
{
OPENMS_LOG_WARN << "Cannot add enough default adduct (" << default_adduct.getFormula() << ") to exactly fit feature charge! Next...)\n";
continue;
}
int hc_left = (q1 - left_charges) / default_adduct.getCharge();//this should always be positive! check!!
int hc_right = (q2 - right_charges) / default_adduct.getCharge();//this should always be positive! check!!
if (hc_left < 0 || hc_right < 0)
{
throw Exception::Postcondition(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "WARNING!!! implicit number of default adduct is negative!!! left:" + String(hc_left) + " right: " + String(hc_right) + "\n");
}
// intensity constraint:
// no edge is drawn if low-prob feature has higher intensity
if (!intensityFilterPassed_(q1, q2, cmp, f1, f2))
continue;
// get non-default adducts of this edge
Compomer cmp_stripped(cmp.removeAdduct(default_adduct));
// save new adduct candidate
if (!cmp_stripped.getComponent()[Compomer::LEFT].empty())
{
String tmp = cmp_stripped.getAdductsAsString(Compomer::LEFT);
CmpInfo_ cmp_left(tmp, feature_relation.size(), Compomer::LEFT);
feature_adducts[i_RT].insert(cmp_left);
}
if (!cmp_stripped.getComponent()[Compomer::RIGHT].empty())
{
String tmp = cmp_stripped.getAdductsAsString(Compomer::RIGHT);
CmpInfo_ cmp_right(tmp, feature_relation.size(), Compomer::RIGHT);
feature_adducts[i_RT_window].insert(cmp_right);
}
// add implicit default adduct (H+ or H-) (if != 0)
if (hc_left > 0)
{
cmp.add(default_adduct * hc_left, Compomer::LEFT);
}
if (hc_right > 0)
{
cmp.add(default_adduct * hc_right, Compomer::RIGHT);
}
ChargePair cp(i_RT, i_RT_window, q1, q2, cmp, naive_mass_diff - md_s->getMass(), false);
feature_relation.push_back(cp);
}
} // ! hits loop
if (best_hit == null_compomer)
{
std::cout << "FeatureDeconvolution.h:: could not find a compomer which complies with assumed q1 and q2 values!\n with q1: " << q1 << " q2: " << q2 << "\n";
++no_cmp_hit;
}
else
{
++cmp_hit;
}
}
} // q2
} // q1
} // RT-window
} // RT sweep line
OPENMS_LOG_INFO << no_cmp_hit << " of " << (no_cmp_hit + cmp_hit) << " valid net charge compomer results did not pass the feature charge constraints\n";
inferMoreEdges_(feature_relation, feature_adducts);
// DEBUG:
#ifdef DC_DEVEL
std::ofstream out_m("diffs_mass.txt");
out_m << "a=c(" << dl_massdiff << ")\n";
out_m << "hist(abs(a), 150, xlab=\"delta M\", main=\"Histogram of Mass Differences\")\n";
out_m.close();
std::ofstream out_q("diffs_charge.txt");
out_q << "c=c(" << il_chargediff << ")\n";
out_q.close();
#endif
if (feature_relation.empty())
{
OPENMS_LOG_INFO << "Found NO putative edges. The output generated will be trivial (only singleton clusters and no pairings). "
<< "Your parameters might need revision or the input was ill-formed." << std::endl;
}
else
{
OPENMS_LOG_INFO << "Found " << feature_relation.size() << " putative edges (of " << possibleEdges << ")"
<< " and avg hit-size of " << (1.0 * overallHits / feature_relation.size())
<< std::endl;
// -------------------------- //
// ** compute ILP solution ** //
// -------------------------- //
// forward set of putative edges to ILP
ILPDCWrapper lp_wrapper;
// compute best solution (this will REORDER elements on feature_relation[] !) - do not rely on order afterwards!
double ilp_score = lp_wrapper.compute(fm_out, feature_relation, this->verbose_level_);
OPENMS_LOG_INFO << "ILP score is: " << ilp_score << std::endl;
}
// prepare output consensusMaps
cons_map.setProteinIdentifications(fm_out.getProteinIdentifications());
cons_map_p.setProteinIdentifications(fm_out.getProteinIdentifications());
// -------------------------- //
// ** DEBUG ** //
// -------------------------- //
//printEdgesOfConnectedFeatures_(888, 889, feature_relation);
std::map<Size, Size> features_aes, features_des; // count of adjacent active and dead edges
UInt agreeing_fcharge = 0;
std::vector<Size> f_idx_v(2);
Size aedges = 0;
StringList scores_clean_edge, scores_dirty_edge;
StringList scores_clean_edge_idx, scores_dirty_edge_idx;
EmpiricalFormula ef_clean_edge, ef_dirty_edge;
// find # edges (active and dead) for each feature
TextFile out_massdeltas;
for (Size i = 0; i < feature_relation.size(); ++i)
{
Size f0_idx = feature_relation[i].getElementIndex(0);
Size f1_idx = feature_relation[i].getElementIndex(1);
if (feature_relation[i].isActive())
{
++features_aes[f0_idx];
++features_aes[f1_idx];
}
else
{
++features_des[f0_idx];
++features_des[f1_idx];
}
// print mass delta of each edge
out_massdeltas.addLine(String(feature_relation[i].getMassDiff()) + ", " + String(feature_relation[i].getCharge(0)) + ", " + String(feature_relation[i].isActive() == 0));
}
#ifdef DC_DEVEL
out_massdeltas.store("mass_deltas.csv");
#endif
TextFile out_dead;
for (Size i = 0; i < feature_relation.size(); ++i)
{
f_idx_v[0] = feature_relation[i].getElementIndex(0);
f_idx_v[1] = feature_relation[i].getElementIndex(1);
Compomer c = feature_relation[i].getCompomer();
if (!feature_relation[i].isActive())
{
out_dead.addLine(String("dead e") + i + " (" + (c.getAdductsAsString(Compomer::LEFT)) + " -> " + (c.getAdductsAsString(Compomer::RIGHT)) + "): "
+ f_idx_v[0] + " (q_ff:" + fm_out[f_idx_v[0]].getCharge() + " q_de:" + feature_relation[i].getCharge(0) + ")"
+ f_idx_v[1] + " (q_ff:" + fm_out[f_idx_v[1]].getCharge() + " q_de:" + feature_relation[i].getCharge(1) + ")"
+ "score: " + feature_relation[i].getEdgeScore()
);
continue;
}
++aedges;
bool dirty = false;
for (Size f_idx = 0; f_idx < 2; ++f_idx)
{
// check if the local feature charges agree
if (fm_out[f_idx_v[f_idx]].getCharge() == feature_relation[i].getCharge((UInt)f_idx))
{
++agreeing_fcharge;
}
else
{
double rt_diff = fabs(fm_out[feature_relation[i].getElementIndex(0)].getRT() - fm_out[feature_relation[i].getElementIndex(1)].getRT());
if (verbose_level_ > 2)
{
OPENMS_LOG_WARN << "Conflict in f_Q! f_RT:" << fm_out[f_idx_v[f_idx]].getRT() << " f_MZ:" << fm_out[f_idx_v[f_idx]].getMZ() << " f_int:" << fm_out[f_idx_v[f_idx]].getIntensity()
<< " Q:" << fm_out[f_idx_v[f_idx]].getCharge() << " PredictedQ:" << feature_relation[i].getCharge((UInt)f_idx)
<< "[[ dRT: " << rt_diff << " dMZ: " << feature_relation[i].getMassDiff() << " score[" << i << "]:"
<< feature_relation[i].getEdgeScore() << " f#:" << fm_out[f_idx_v[f_idx]].getUniqueId() << " " << feature_relation[i].getCompomer().getAdductsAsString((UInt)f_idx)
<< "(a" << features_aes[f_idx_v[f_idx]] << ":d" << features_des[f_idx_v[f_idx]] << ") ]]\n";
}
dirty = true;
}
}
EmpiricalFormula ef(c.getAdductsAsString(Compomer::LEFT) + (c.getAdductsAsString(Compomer::RIGHT)));
// store score distribution:
if (!dirty)
{
scores_clean_edge.push_back(String(feature_relation[i].getEdgeScore()));
scores_clean_edge_idx.push_back(String(i));
ef_clean_edge += ef;
}
else
{
scores_dirty_edge.push_back(String(feature_relation[i].getEdgeScore()));
scores_dirty_edge_idx.push_back(String(i));
ef_dirty_edge += ef;
}
}
{
OPENMS_LOG_INFO << "Agreeing charges: " << agreeing_fcharge << "/" << (aedges * 2) << std::endl;
}
#ifdef DC_DEVEL
out_dead.store("ILP_dead_edges.txt"); // TODO disable
//std::cout << "Edge score distribution (clean):\n" + scores_clean_edge.concatenate(" ") + "\n(dirty)\n" + scores_dirty_edge.concatenate(" ") + "\n\n";
//std::cout << "Edge empirical formula (clean):\n" + ef_clean_edge.toString() + "\n(dirty)\n" + ef_dirty_edge.toString() + "\n\n";
#endif
// END DEBUG
// ------------------------------ //
// ** collect related features ** //
// ------------------------------ //
// fresh start for meta annotation
for (Size i = 0; i < fm_out.size(); ++i)
{
if (fm_out[i].metaValueExists(Constants::UserParam::DC_CHARGE_ADDUCTS))
fm_out[i].removeMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS);
}
// write groups to consensusXML (with type="charge_groups")
// **find cliques from pairs
// find which featureId maps to which consensusFeatureId
// if no mapping is found, make a new CF.
// if new pair spans two existing CFs -> merge CFs
typedef std::map<Size, Size> CliqueMap;
CliqueMap clique_register;
StringList scores;
StringList scores_e_inactive_idx, scores_e_active_idx;
for (Size i = 0; i < feature_relation.size(); ++i)
{
Size f0_idx = feature_relation[i].getElementIndex(0);
Size f1_idx = feature_relation[i].getElementIndex(1);
Int old_q0 = fm_out[f0_idx].getCharge();
Int old_q1 = fm_out[f1_idx].getCharge();
Int new_q0 = feature_relation[i].getCharge(0);
Int new_q1 = feature_relation[i].getCharge(1);
scores.push_back(String(feature_relation[i].getEdgeScore()));
if (feature_relation[i].isActive())
{
//std::cout << "feature #" << f0_idx << " #" << f1_idx << " ACTIVE q:" << new_q0 << ":" << new_q1 << " score: " << feature_relation[i].getEdgeScore() << " with RT: " << fm_out[f1_idx].getRT() << "\n";
//
// annotate the affected features
// ... and check consistency
//
Compomer c = feature_relation[i].getCompomer();
StringList labels;
fm_out[f0_idx].setMetaValue("map_idx", 0);
fm_out[f1_idx].setMetaValue("map_idx", 0);
// - left
EmpiricalFormula ef_l(c.getAdductsAsString(Compomer::LEFT));
if (fm_out[f0_idx].metaValueExists(Constants::UserParam::DC_CHARGE_ADDUCTS))
{
if (ef_l.toString() != fm_out[f0_idx].getMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS))
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("Decharging produced inconsistent adduct annotation! [expected: ") + String(fm_out[f0_idx].getMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS)) + "]", ef_l.toString());
}
else
{
fm_out[f0_idx].setMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS, ef_l.toString());
}
fm_out[f0_idx].setMetaValue("dc_charge_adduct_mass", ef_l.getMonoWeight());
fm_out[f0_idx].setMetaValue("is_backbone", Size(c.isSingleAdduct(default_adduct, Compomer::LEFT) ? 1 : 0));
if (new_q0 != old_q0)
fm_out[f0_idx].setMetaValue("old_charge", old_q0);
fm_out[f0_idx].setCharge(new_q0);
labels = c.getLabels(Compomer::LEFT);
if (labels.size() > 1)
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("Decharging produced inconsistent label annotation! [expected: a single label]"), ListUtils::concatenate(labels, ","));
if (!labels.empty())
{
fm_out[f0_idx].setMetaValue("map_idx", map_label_inverse_[labels[0]]);
}
// - right
EmpiricalFormula ef_r(c.getAdductsAsString(Compomer::RIGHT));
if (fm_out[f1_idx].metaValueExists(Constants::UserParam::DC_CHARGE_ADDUCTS))
{
if (ef_r.toString() != fm_out[f1_idx].getMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS))
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("Decharging produced inconsistent adduct annotation! [expected: ") + String(fm_out[f1_idx].getMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS)) + "]", ef_r.toString());
}
else
{
fm_out[f1_idx].setMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS, ef_r.toString());
}
fm_out[f1_idx].setMetaValue("dc_charge_adduct_mass", ef_r.getMonoWeight());
fm_out[f1_idx].setMetaValue("is_backbone", Size(c.isSingleAdduct(default_adduct, Compomer::RIGHT) ? 1 : 0));
if (new_q1 != old_q1)
fm_out[f1_idx].setMetaValue("old_charge", old_q1);
fm_out[f1_idx].setCharge(new_q1);
labels = c.getLabels(Compomer::RIGHT);
if (labels.size() > 1)
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("Decharging produced inconsistent label annotation! [expected: a single label]"), ListUtils::concatenate(labels, ","));
if (!labels.empty())
{
fm_out[f1_idx].setMetaValue("map_idx", map_label_inverse_[labels[0]]);
}
//
// create cliques
//
SignedSize target_cf0 = -1, target_cf1 = -1;
// find the index of the ConsensusFeatures for the current pair
if (clique_register.count(f0_idx) > 0)
{
target_cf0 = clique_register[f0_idx];
}
if (clique_register.count(f1_idx) > 0)
{
target_cf1 = clique_register[f1_idx];
}
ConsensusFeature cf(fm_out[f0_idx]);
cf.setPeptideIdentifications(PeptideIdentificationList()); // delete ID's as they are added later again
cf.setQuality(0.0);
cf.setUniqueId();
cf.insert((UInt64) fm_out[f0_idx].getMetaValue("map_idx"), fm_out[f0_idx]);
cf.insert((UInt64) fm_out[f1_idx].getMetaValue("map_idx"), fm_out[f1_idx]);
cf.setMetaValue("Local", String(old_q0) + ":" + String(old_q1));
cf.setMetaValue("CP", String(fm_out[f0_idx].getCharge()) + "(" + String(fm_out[f0_idx].getMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS)) + "):"
+ String(fm_out[f1_idx].getCharge()) + "(" + String(fm_out[f1_idx].getMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS)) + ") "
+ String("Score: ") + feature_relation[i].getEdgeScore());
//cf.computeDechargeConsensus(fm_out);
#if 1
// print pairs only
cons_map_p.push_back(cf);
#endif
// seen both features for the first time
if ((target_cf0 == -1) &&
(target_cf1 == -1))
{ //** new ConsensusFeature required
cons_map.push_back(cf);
clique_register[f0_idx] = cons_map.size() - 1;
clique_register[f1_idx] = cons_map.size() - 1;
//std::cout << "new: F" << f0_idx << " + F" << f1_idx << " are " << (cons_map.size()-1) << "\n";
}
else if (target_cf0 != target_cf1)
{
if (target_cf0 == -1) //** add f0 to the already existing cf of f1
{
cons_map[target_cf1].insert((UInt64) fm_out[f0_idx].getMetaValue("map_idx"), fm_out[f0_idx]);
clique_register[f0_idx] = target_cf1;
//std::cout << "add: F" << f0_idx << " to " <<target_cf1 << " due to F" << f1_idx << "\n";
}
else if (target_cf1 == -1) //** add f1 to the already existing cf of f0
{
cons_map[target_cf0].insert((UInt64) fm_out[f1_idx].getMetaValue("map_idx"), fm_out[f1_idx]);
clique_register[f1_idx] = target_cf0;
//std::cout << "add: F" << f1_idx << " to " <<target_cf0 << " due to F" << f0_idx << "\n";
}
else //** conflict: the two elements of the pair already have separate CFs --> merge
{ // take every feature from second CF and: #1 put into first CF, #2 change registration with map
ConsensusFeature::HandleSetType hst = cons_map[target_cf1].getFeatures();
for (ConsensusFeature::HandleSetType::const_iterator it = hst.begin(); it != hst.end(); ++it) //** update cf_index
{
clique_register[fm_out.uniqueIdToIndex(it->getUniqueId())] = target_cf0;
}
// insert features from cf1 to cf0
cons_map[target_cf0].insert(hst);
// clear cf1; do NOT delete cf1 (will invalidate higher indices) - do that afterwards
cons_map[target_cf1].clear();
//std::cout << "conflict: F" << f0_idx << " + F" << f1_idx << " --> "<< target_cf0 << "(" << target_cf1 << " killed)" << "\n";
}
}
scores_e_active_idx.push_back(String(i));
}
else // inactive edges
{
scores_e_inactive_idx.push_back(String(i));
// DEBUG
#ifdef DC_DEVEL
ConsensusFeature cf(fm_out[f0_idx]);
cf.setQuality(0.0);
cf.insert(0, fm_out[f0_idx].getUniqueId(), fm_out[f0_idx]);
cf.insert(0, fm_out[f1_idx].getUniqueId(), fm_out[f1_idx]);
cf.setMetaValue("Local", String(old_q0) + ":" + String(old_q1));
cf.setMetaValue("CP", String(fm_out[f0_idx].getCharge()) + "(" + String(fm_out[f0_idx].getMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS)) + "):"
+ String(fm_out[f1_idx].getCharge()) + "(" + String(fm_out[f1_idx].getMetaValue(Constants::UserParam::DC_CHARGE_ADDUCTS)) + ") "
+ String("Score: ") + feature_relation[i].getEdgeScore());
cf.setUniqueId();
// print pairs only
cons_map_p_neg.push_back(cf);
cons_map_p_neg.getColumnHeaders()[0].size = fm_out.size();
cons_map_p_neg.getColumnHeaders()[0].label = "charged features pairs (inactive)";
#endif
}
} // !for feature_relation (i.e. edges)
// DEBUG
#ifdef DC_DEVEL
// todo?!: CM has no file descriptions (channels) set
ConsensusXMLFile cf_neg;
cons_map_p_neg.ensureUniqueId();
cf_neg.store("dc_pairs_neg.consensusXML", cons_map_p_neg);
// DEBUG print scores
TextFile tf;
tf.push_back("scr = c(" + scores.concatenate(", ") + ")");
tf.push_back("s_ia_idx = c(" + scores_e_inactive_idx.concatenate(", ") + ")+1");
tf.push_back("s_a_idx = c(" + scores_e_active_idx.concatenate(", ") + ")+1");
tf.push_back("s_a_idx_clean = c(" + scores_clean_edge_idx.concatenate(", ") + ")+1");
tf.push_back("s_a_idx_dirty = c(" + scores_dirty_edge_idx.concatenate(", ") + ")+1");
tf.push_back("plot( density(scr[s_ia_idx]), xlim=range( scr ), main=" ", xlab=" " )");
tf.push_back("lines(density(scr[s_a_idx_dirty]), col=2)");
tf.push_back("lines(density(scr[s_a_idx_clean]), col=3)");
tf.push_back("legend(x=\"topright\",c(\"dead\", \"active_dirty\", \"active_clean\"), text.col=c(1,2,3))");
tf.store("plot_scores.r");
#endif
// remove empty ConsensusFeatures from map
ConsensusMap cons_map_tmp(cons_map);
cons_map_tmp.clear(false); // keep other meta information (like ProteinIDs & Map)
for (ConsensusMap::Iterator it = cons_map.begin(); it != cons_map.end(); ++it)
{
// skip if empty
if (it->getFeatures().empty())
continue;
// skip if no backbone
Size backbone_count = 0;
ConsensusFeature::HandleSetType hst = it->getFeatures();
for (ConsensusFeature::HandleSetType::const_iterator it_h = hst.begin(); it_h != hst.end(); ++it_h) //** check if feature in CF has backbone
{
backbone_count += (Size)fm_out[fm_out.uniqueIdToIndex(it_h->getUniqueId())].getMetaValue("is_backbone");
}
if (backbone_count == 0)
{
for (ConsensusFeature::HandleSetType::const_iterator it_h = hst.begin(); it_h != hst.end(); ++it_h) //** remove cluster members from registry (they will become single features)
{
clique_register.erase(fm_out.uniqueIdToIndex(it_h->getUniqueId()));
}
continue;
}
// store number of distinct charges
std::set<Int> charges;
for (ConsensusFeature::HandleSetType::const_iterator it_h = hst.begin(); it_h != hst.end(); ++it_h)
{
charges.insert(it_h->getCharge());
}
IntList i_charges;
for (std::set<Int>::const_iterator it_q = charges.begin(); it_q != charges.end(); ++it_q)
{
i_charges.push_back(*it_q);
}
it->setMetaValue("distinct_charges", i_charges);
it->setMetaValue("distinct_charges_size", i_charges.size());
cons_map_tmp.push_back(*it);
// set a centroid
cons_map_tmp.back().computeDechargeConsensus(fm_out);
cons_map_tmp.back().setMetaValue("pure_proton_features", backbone_count);
}
cons_map_tmp.swap(cons_map);
// Warning: from here on cons_map indices have changes --> clique_register[]'s values are not reliable any longer (keys are still good)
// include single features without a buddy!
Size singletons_count = 0;
for (Size i = 0; i < fm_out.size(); ++i)
{
// find the index of the ConsensusFeature for the current feature
if (clique_register.count(i) > 0)
continue;
Feature f_single = fm_out_untouched[i];
f_single.setMetaValue("is_single_feature", 1);
f_single.setMetaValue("charge", f_single.getCharge());
fm_out[i] = f_single; // overwrite whatever DC has done to this feature!
ConsensusFeature cf(f_single);
cf.setQuality(0.0);
cf.setUniqueId();
cf.insert(0, f_single);
cons_map.push_back(cf);
cons_map.back().computeDechargeConsensus(fm_out_untouched);
++singletons_count;
}
OPENMS_LOG_INFO << "Single features without charge ladder: " << singletons_count << " of " << fm_out.size() << "\n";
// fill the header
//cons_map.getColumnHeaders()[0].filename = "TODO - take from FeatureMAP.getLoadedFilePath () ";
for (Size i = 0; i < map_label_.size(); ++i)
{
cons_map.getColumnHeaders()[i].size = fm_out.size();
cons_map.getColumnHeaders()[i].label = map_label_[i];
cons_map_p.getColumnHeaders()[i].size = fm_out.size();
cons_map_p.getColumnHeaders()[i].label = map_label_[i];
}
#ifdef DC_DEVEL
ChargeLadder cl;
FeatureMap fm_missing;
cl.suggestMissingFeatures(fm_out, cons_map, fm_missing);
FileHandler.storeFeatures("fm_missing.featureXML", fm_missing);
#endif
cons_map_p.applyMemberFunction(&UniqueIdInterface::ensureUniqueId);
cons_map.applyMemberFunction(&UniqueIdInterface::ensureUniqueId);
/* post processing for eventual parameter optimization */
checkSolution_(cons_map);
return;
}
//Not verified/completely adapted for negative mode -> disable there
/// test if "simple" edges have alternative
/// (more difficult explanation) supported by neighboring edges
/// e.g. (.) -> (H+) might be augmented to
/// (Na+) -> (H+Na+)
void FeatureDeconvolution::inferMoreEdges_(PairsType& edges, std::map<Size, std::set<CmpInfo_> >& feature_adducts)
{
Adduct default_adduct;
bool is_neg = (param_.getValue("negative_mode") == "true" ? true : false);
if (is_neg)
{
default_adduct = Adduct(-1, 1, -Constants::PROTON_MASS_U, "H-1", log(1.0),0);
}
else
{
default_adduct = Adduct(1, 1, Constants::PROTON_MASS_U, "H1", log(1.0), 0);
}
int left_charges, right_charges;
Size edges_size = edges.size();
for (Size i = 0; i < edges_size; ++i)
{
Size idx0 = edges[i].getElementIndex(0);
Size idx1 = edges[i].getElementIndex(1);
std::set<CmpInfo_> result;
// look at the intersection of the two adjacent features
// (we thus require the non-H adduct to be present on both sides of the edge,
// if one is deemed enough just change to union)
std::set_intersection(feature_adducts[idx0].begin(), feature_adducts[idx0].end(),
feature_adducts[idx1].begin(), feature_adducts[idx1].end(),
std::inserter(result, result.begin()));
std::set<CmpInfo_>::iterator result_it = result.begin();
// add new edge with each adduct of the intersection
while (result_it != result.end())
{
Compomer::CompomerSide to_add = edges[result_it->idx_cp].getCompomer().removeAdduct(default_adduct).getComponent()[result_it->side_cp];
// we currently do not punish additional two-side adducts
for (Compomer::CompomerSide::iterator it = to_add.begin(); it != to_add.end(); ++it)
{
it->second.setLogProb(0);
}
ChargePair cp(edges[i]); // make a copy
Compomer new_cmp = cp.getCompomer().removeAdduct(default_adduct);
new_cmp.add(to_add, Compomer::LEFT);
new_cmp.add(to_add, Compomer::RIGHT);
//We again need to consider inverted behavior (but cp.getCharge(x) gets negative charges as assigned before!
if (is_neg)
{
left_charges = -new_cmp.getPositiveCharges();
right_charges = -new_cmp.getNegativeCharges();
}
else
{
left_charges = new_cmp.getNegativeCharges();
right_charges = new_cmp.getPositiveCharges();
}
// refill with default adducts (usually H+):
if (((cp.getCharge(0) - left_charges) % default_adduct.getCharge() == 0) &&
((cp.getCharge(1) - right_charges) % default_adduct.getCharge() == 0)) // for singly charged default_adducts this should always be true
{
int hc_left, hc_right;
hc_left = (cp.getCharge(0) - left_charges) / default_adduct.getCharge();
hc_right = (cp.getCharge(1) - right_charges) / default_adduct.getCharge();
// we have not stepped over the charge capacity of the features
if (hc_left >= 0 && hc_right >= 0)
{
// fill up with defaults:
if (hc_left > 0)
new_cmp.add(default_adduct * hc_left, Compomer::LEFT);
if (hc_right > 0)
new_cmp.add(default_adduct * hc_right, Compomer::RIGHT);
// charge constraints of feature still fulfilled? (taking ionization mode into account)
int left_charge, right_charge;
if (is_neg)
{
left_charge = -new_cmp.getPositiveCharges();
right_charge = -new_cmp.getNegativeCharges();
}
else
{
left_charge = new_cmp.getNegativeCharges();
right_charge = new_cmp.getPositiveCharges();
}
if ((left_charge == cp.getCharge(0)) &&
(right_charge == cp.getCharge(1)))
{
cp.setCompomer(new_cmp);
cp.setEdgeScore(0.99); //TODO how to score this new edge?
edges.push_back(cp); // add edge
}
else
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "FeatureDeconvolution::inferMoreEdges_(): Inferred edges with wrong(switched?) charges! Left neg_charge, left feature charge, right pos_charge, right feature charge", String(new_cmp.getNegativeCharges())+","+String(cp.getCharge(0))+","+String(new_cmp.getPositiveCharges())+","+String(cp.getCharge(1)));
}
}
}
else // have nonzero modulo.SHOULD NOT HAPPEN FOR DEFAULT CHARGE 1/-1 !!
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "FeatureDeconvolution::inferMoreEdges_(): Modulo returns leftover charge!", String(new_cmp.getNegativeCharges()));
}
++result_it;
}
} // edge for
OPENMS_LOG_INFO << "Inferring edges raised edge count from " << edges_size << " to " << edges.size() << "\n";
}
void FeatureDeconvolution::printEdgesOfConnectedFeatures_(Size idx_1, Size idx_2, const PairsType& feature_relation)
{
std::cout << " +++++ printEdgesOfConnectedFeatures_ +++++\n";
for (Size i = 0; i < feature_relation.size(); ++i)
{
if (
((feature_relation[i].getElementIndex(0) == idx_1) && (feature_relation[i].getElementIndex(1) == idx_2))
||
((feature_relation[i].getElementIndex(0) == idx_2) && (feature_relation[i].getElementIndex(1) == idx_1))
)
{
std::cout << feature_relation[i].getCompomer() << " Edge: " << i << " score: " << feature_relation[i].getEdgeScore() << "\n";
}
}
std::cout << " ----- printEdgesOfConnectedFeatures_ -----\n";
return;
}
inline bool FeatureDeconvolution::intensityFilterPassed_(const Int q1, const Int q2, const Compomer& cmp, const Feature& f1, const Feature& f2) const
{
if (!enable_intensity_filter_)
return true;
if (q1 == q2)
{
Compomer cl; cl.add(cmp.getComponent()[0], Compomer::LEFT);
Compomer cr; cr.add(cmp.getComponent()[1], Compomer::LEFT);
if (((cl.getLogP() <= cr.getLogP()) && (f1.getIntensity() <= f2.getIntensity()))
||
((cl.getLogP() >= cr.getLogP()) && (f1.getIntensity() >= f2.getIntensity()))
)
{
return true;
}
else
{
// forbid this edge?!
std::cout << "intensity constraint: edge with intensity " << f1.getIntensity() << "(" << cmp.getAdductsAsString(Compomer::LEFT) << ") and " << f2.getIntensity() << "(" << cmp.getAdductsAsString(Compomer::RIGHT) << ") deleted\n";
return false;
}
}
return true;
}
bool FeatureDeconvolution::chargeTestworthy_(const Int feature_charge, const Int putative_charge, const bool other_unchanged) const
{
//Switches of charge signs in one ionization mode should logically not occur. The assumed decharger charge settings should fit to feature charges
if (feature_charge * putative_charge < 0)
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("feature charge and putative charge switch charge direction!"), String(feature_charge)+" "+String(putative_charge));
}
// if no charge given or all-charges is selected. Assume no charge detected -> charge 0
if ((feature_charge == 0) || (q_try_ == CHARGEMODE::QALL))
{
return true;
}
else if (q_try_ == CHARGEMODE::QHEURISTIC)
{
// do not allow two charges to change at the same time
if (!other_unchanged && feature_charge != putative_charge)
return false;
// test two adjacent charges:
if (abs(feature_charge - putative_charge) <= 2)
return true;
// test two multiples
if (feature_charge * 2 == putative_charge || feature_charge * 3 == putative_charge
|| feature_charge == putative_charge * 2 || feature_charge == putative_charge * 3)
return true;
return false;
}
else if (q_try_ == CHARGEMODE::QFROMFEATURE)
{
return feature_charge == putative_charge;
}
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "q_try_ has unhandled enum value!", String((Int)q_try_));
}
void FeatureDeconvolution::checkSolution_(const ConsensusMap& cons_map) const
{
Size ladders_total(0);
Size ladders_with_odd(0);
// checking number of charge ladders which have all gapped shapes, hinting at wrong lower-bound bound (should be lower)
for (const ConsensusFeature& cfeature : cons_map)
{
if (cfeature.size() == 1)
continue;
++ladders_total;
IntList charges = cfeature.getMetaValue("distinct_charges");
for (Size i = 0; i < charges.size(); ++i)
{
if (charges[i] % 2 == 1)
{
++ladders_with_odd;
break;
}
}
}
// if more than 5% of charge ladder have only gapped, report
if (ladders_with_odd < ladders_total * 0.95)
{
OPENMS_LOG_WARN << ".\n..\nWarning: a significant portion of your decharged molecules have gapped, even-numbered charge ladders (" << ladders_total - ladders_with_odd << " of " << ladders_total << ")";
OPENMS_LOG_WARN <<"This might indicate a too low charge interval being tested.\n..\n.\n";
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/PercolatorFeatureSetHelper.cpp | .cpp | 36,081 | 751 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Mathias Walzer $
// $Authors: Mathias Walzer, Matthew The $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/PercolatorFeatureSetHelper.h>
#include <OpenMS/config.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <boost/lexical_cast.hpp>
using namespace std;
namespace OpenMS
{
void PercolatorFeatureSetHelper::addMSGFFeatures(PeptideIdentificationList& peptide_ids, StringList& feature_set)
{
// MSGF+ does not always produce all scores so we focus on the main ones
// and make sure they are present and initalized
feature_set.push_back("MS:1002049"); // MS-GF:RawScore
feature_set.push_back("MS:1002050"); // MS-GF:DeNovoScore
feature_set.push_back("MS:1002052"); // MS-GF:SpecEValue
feature_set.push_back("MS:1002053"); // MS-GF:EValue
feature_set.push_back(Constants::UserParam::ISOTOPE_ERROR);
for (auto& p : peptide_ids)
{
for (auto& h : p.getHits())
{
if (!h.metaValueExists("MS:1002049")) h.setMetaValue("MS:1002049", 0.0);
if (!h.metaValueExists("MS:1002050")) h.setMetaValue("MS:1002050", 0.0);
if (!h.metaValueExists("MS:1002052")) h.setMetaValue("MS:1002052", 0.0);
if (!h.metaValueExists("MS:1002053")) h.setMetaValue("MS:1002053", 0.0);
}
}
}
void PercolatorFeatureSetHelper::addXTANDEMFeatures(PeptideIdentificationList& peptide_ids, StringList& feature_set)
{
//TODO annotate isotope error in Adapter and add here as well.
// Find out which ions are in XTandem-File and take only these as features
StringList ion_types = ListUtils::create<String>("a,b,c,x,y,z");
StringList ion_types_found;
for (StringList::const_iterator ion = ion_types.begin(); ion != ion_types.end(); ++ion)
{
if (!peptide_ids.front().getHits().front().getMetaValue(*ion + "_score").toString().empty() &&
!peptide_ids.front().getHits().front().getMetaValue(*ion + "_ions").toString().empty())
{
feature_set.push_back("XTANDEM:frac_ion_" + *ion);
ion_types_found.push_back(*ion);
}
}
feature_set.push_back("XTANDEM:deltascore");
for (PeptideIdentificationList::iterator it = peptide_ids.begin(); it != peptide_ids.end(); ++it)
{
double hyper_score = it->getHits().front().getScore();
double delta_score = hyper_score - it->getHits().front().getMetaValue("nextscore").toString().toDouble();
it->getHits().front().setMetaValue("XTANDEM:deltascore", delta_score);
String sequence = it->getHits().front().getSequence().toUnmodifiedString();
int length = sequence.length();
// Find out correct ion types and get its Values
for (StringList::const_iterator ion = ion_types_found.begin(); ion != ion_types_found.end(); ++ion)
{
if (!peptide_ids.front().getHits().front().getMetaValue(*ion + "_score").toString().empty() &&
!peptide_ids.front().getHits().front().getMetaValue(*ion + "_ions").toString().empty())
{
// recalculate ion score
double ion_score = it->getHits().front().getMetaValue(*ion + "_ions").toString().toDouble() / length;
it->getHits().front().setMetaValue("XTANDEM:frac_ion_" + *ion, ion_score);
}
}
}
}
void PercolatorFeatureSetHelper::addMSFRAGGERFeatures(StringList& feature_set)
{
feature_set.push_back("MS:1001330"); // expect_score
feature_set.push_back("hyperscore");
feature_set.push_back("nextscore");
feature_set.push_back(Constants::UserParam::ISOTOPE_ERROR);
}
void PercolatorFeatureSetHelper::addCOMETFeatures(PeptideIdentificationList& peptide_ids, StringList& feature_set)
{
feature_set.push_back(Constants::UserParam::ISOTOPE_ERROR);
feature_set.push_back("COMET:deltaCn"); // recalculated deltaCn = (current_XCorr - 2nd_best_XCorr) / max(current_XCorr, 1)
feature_set.push_back("COMET:deltaLCn"); // deltaLCn = (current_XCorr - worst_XCorr) / max(current_XCorr, 1)
feature_set.push_back("COMET:lnExpect"); // log(E-value)
feature_set.push_back("MS:1002252"); // unchanged XCorr
feature_set.push_back("MS:1002255"); // unchanged Sp = number of candidate peptides
feature_set.push_back("COMET:lnNumSP"); // log(number of candidate peptides)
feature_set.push_back("COMET:lnRankSP"); // log(rank based on Sp score)
feature_set.push_back("COMET:IonFrac"); // matched_ions / total_ions
for (PeptideIdentificationList::iterator it = peptide_ids.begin(); it != peptide_ids.end(); ++it)
{
double worst_xcorr = 0, second_xcorr = 0;
Int cnt = 0;
for (vector<PeptideHit>::iterator hit = it->getHits().begin(); hit != it->getHits().end(); ++hit)
{
double xcorr = hit->getMetaValue("MS:1002252").toString().toDouble();
worst_xcorr = xcorr;
if (cnt == 1) { second_xcorr = xcorr; }
++cnt;
}
for (vector<PeptideHit>::iterator hit = it->getHits().begin(); hit != it->getHits().end(); ++hit)
{
double xcorr = hit->getMetaValue("MS:1002252").toString().toDouble();
if (!hit->metaValueExists("COMET:deltaCn"))
{
double delta_cn = (xcorr - second_xcorr) / max(1.0, xcorr);
hit->setMetaValue("COMET:deltaCn", delta_cn);
}
if (!hit->metaValueExists("COMET:deltaLCn"))
{
double delta_last_cn = (xcorr - worst_xcorr) / max(1.0, xcorr);
hit->setMetaValue("COMET:deltaLCn", delta_last_cn);
}
double ln_expect = log(hit->getMetaValue("MS:1002257").toString().toDouble());
hit->setMetaValue("COMET:lnExpect", ln_expect);
if (!hit->metaValueExists("COMET:lnNumSP"))
{
double ln_num_sp;
if (hit->metaValueExists("num_matched_peptides"))
{
double num_sp = hit->getMetaValue("num_matched_peptides").toString().toDouble();
ln_num_sp = log(max(1.0, num_sp)); // if recorded, one can be safely assumed
}
else // fallback TODO: remove?
{
ln_num_sp = hit->getMetaValue("MS:1002255").toString().toDouble();
}
hit->setMetaValue("COMET:lnNumSP", ln_num_sp);
}
if (!hit->metaValueExists("COMET:lnRankSP"))
{
double ln_rank_sp = log(max(1.0, hit->getMetaValue("MS:1002256").toString().toDouble()));
hit->setMetaValue("COMET:lnRankSP", ln_rank_sp);
}
if (!hit->metaValueExists("COMET:IonFrac"))
{
double num_matched_ions = hit->getMetaValue("MS:1002258").toString().toDouble();
double num_total_ions = hit->getMetaValue("MS:1002259").toString().toDouble();
double ion_frac = num_matched_ions / num_total_ions;
hit->setMetaValue("COMET:IonFrac", ion_frac);
}
}
}
}
/**
Features 1-9 Represent the Basic Feature Set
feature abbreviation feature description
1. mass Calculated monoisotopic mass of the identified peptide. Present as generic feature.
2. charge Precursor ion charge. Present as generic feature.
3. mScore Mascot score. Added in this function.
4. dScore Mascot score minus Mascot score of next best non isobaric peptide hit. Added in this function.
5. deltaM Calculated minus observed peptide mass (in Dalton and ppm). Present as generic feature.
6. absDeltaM Absolute value of calculated minus observed peptide mass (in Dalton and ppm). Present as generic feature.
7. isoDeltaM Calculated minus observed peptide mass, isotope error corrected (in Dalton and ppm)
8. uniquePeps None (0), one (1), two or more (2) distinct peptide sequences match same protein. Added in this function.
9. mc Missed tryptic cleavages. Present as generic feature.
Features 10-18 Represent the Extended Feature Set As Used in Mascot Percolator
feature abbreviation feature description
10. totInt Total ion intensity (log). Not available in mascot adapter.
11. intMatchedTot Total matched ion intensity (log). Not available in mascot adapter.
12. relIntMatchedTot Total matched ion intensity divided by total ion intensity. Not available in mascot adapter.
13. binom Peptide Score as described in ref 28. Not available in mascot adapter.
14. fragMassError Mean fragment mass error (in Dalton and ppm). Not available in mascot adapter.
15. absFragMassError Mean absolute fragment mass error (in Dalton and ppm). Not available in mascot adapter.
16. fracIonsMatched Fraction of calculated ions matched (per ion series). Not available in mascot adapter.
17. seqCov Sequence coverage of matched ions (per ion series). Not available in mascot adapter.
18. intMatched Matched ion intensity (per ion series). Not available in mascot adapter.
*/
void PercolatorFeatureSetHelper::addMASCOTFeatures(PeptideIdentificationList& peptide_ids, StringList& feature_set)
{
feature_set.push_back("MS:1001171"); // unchanged mScore
feature_set.push_back("MASCOT:delta_score"); // delta score based on mScore
feature_set.push_back("MASCOT:hasMod"); // bool: has post translational modification
for (PeptideIdentificationList::iterator it = peptide_ids.begin(); it != peptide_ids.end(); ++it)
{
it->sort();
std::vector<PeptideHit> hits = it->getHits();
assignDeltaScore_(hits, "MS:1001171", "MASCOT:delta_score");
for (vector<PeptideHit>::iterator hit = hits.begin(); hit != hits.end(); ++hit)
{
bool has_mod = hit->getSequence().isModified();
hit->setMetaValue("MASCOT:hasMod", has_mod);
}
}
}
void PercolatorFeatureSetHelper::addCONCATSEFeatures(PeptideIdentificationList& peptide_ids, StringList& search_engines_used, StringList& feature_set)
{
for (StringList::iterator it = search_engines_used.begin(); it != search_engines_used.end(); ++it) {
feature_set.push_back("CONCAT:" + *it);
}
OPENMS_LOG_INFO << "Using " << ListUtils::concatenate(search_engines_used, ", ") << " as source for search engine specific features." << endl;
feature_set.push_back("CONCAT:lnEvalue");
feature_set.push_back("CONCAT:deltaLnEvalue");
// feature values have been set in concatMULTISEids
for (PeptideIdentificationList::iterator it = peptide_ids.begin(); it != peptide_ids.end(); ++it)
{
it->sort();
assignDeltaScore_(it->getHits(), "CONCAT:lnEvalue", "CONCAT:deltaLnEvalue");
}
}
void PercolatorFeatureSetHelper::mergeMULTISEPeptideIds(PeptideIdentificationList& all_peptide_ids, PeptideIdentificationList& new_peptide_ids, const String& search_engine)
{
OPENMS_LOG_DEBUG << "creating spectrum map" << endl;
std::map<String,PeptideIdentification> unified;
//setup map of merge characteristics per spectrum
for (PeptideIdentificationList::iterator pit = all_peptide_ids.begin(); pit != all_peptide_ids.end(); ++pit)
{
PeptideIdentification ins = *pit;
ins.setScoreType("multiple");
ins.setIdentifier("TopPerc_multiple_SE_input");
String spectrum_reference = getScanMergeKey_(pit, all_peptide_ids.begin());
unified[spectrum_reference] = ins;
}
OPENMS_LOG_DEBUG << "filled spectrum map with previously observed PSM: " << unified.size() << endl;
int nc = 0;
int mc = 0;
OPENMS_LOG_DEBUG << "About to merge in:" << new_peptide_ids.size() << "PSMs." << endl;
for (PeptideIdentificationList::iterator pit = new_peptide_ids.begin(); pit != new_peptide_ids.end(); ++pit)
{
PeptideIdentification ins = *pit;
String st = pit->getScoreType();
//prepare for merge
for (vector<PeptideHit>::iterator hit = ins.getHits().begin(); hit != ins.getHits().end(); ++hit)
{
// keep the hit score as meta value
if (st == "MS-GF:RawScore")
{
st = "MS:1002049";
}
else if (st == "XTandem")
{
st = "MS:1001331";
}
else if (st == "Mascot")
{
st = "MS:1001171";
}
else if ((st == "expect" && search_engine == "Comet" )|| st == "Comet")
{
st = "MS:1002257";
}
if (!hit->metaValueExists(st))
{
hit->setMetaValue(st, hit->getScore());
}
hit->setScore(1); // new 'multiple' score is just the number of times identified by different SE
// rename ambiguous meta value names to PSI cv term ids
if (search_engine == "MS-GF+" && hit->metaValueExists("EValue")) // MS-GF should have all values as PSI cv terms available anyway
{
hit->setMetaValue("MS:1002053", hit->getMetaValue("EValue"));
}
if (search_engine == "Mascot" && hit->metaValueExists("EValue"))
{
hit->setMetaValue("MS:1001172", hit->getMetaValue("EValue"));
}
if (search_engine == "Comet" && hit->metaValueExists("xcorr"))
{
hit->setMetaValue("MS:1002252", hit->getMetaValue("xcorr"));
}
if (search_engine == "XTandem" && hit->metaValueExists("E-Value"))
{
hit->setMetaValue("MS:1001330", hit->getMetaValue("E-Value"));
}
}
ins.setScoreType("multiple");
ins.setIdentifier("TopPerc_multiple_SE_input");
String spectrum_reference = getScanMergeKey_(pit, new_peptide_ids.begin());
//merge in unified map
// insert newly identified spectra (PeptideIdentifications) or ..
if (unified.find(spectrum_reference) == unified.end())
{
unified[spectrum_reference] = ins;
++nc;
}
// .. add PSMs to already identified spectrum
else
{
//find corresponding hit (i.e. sequences must match)
for (vector<PeptideHit>::iterator hit = ins.getHits().begin(); hit != ins.getHits().end(); ++hit)
{
for (vector<PeptideHit>::iterator merger = unified[spectrum_reference].getHits().begin(); merger != unified[spectrum_reference].getHits().end(); ++merger)
{
if (hit->getSequence()==merger->getSequence())
{
//care for peptide evidences!! set would be okay if checked for same search db in parameters,
// vector<PeptideEvidence> pev;
// pev.reserve(max(hit->getPeptideEvidences().size(),merger->getPeptideEvidences().size()));
// std::vector<ProteinHit>::iterator uni;
// std::sort(merger->getPeptideEvidences().begin(),merger->getPeptideEvidences().end(), TopPerc::lq_PeptideEvidence);
// std::sort(hit->getPeptideEvidences().begin(),hit->getPeptideEvidences().end(), TopPerc::lq_PeptideEvidence);
// uni = std::set_union(swop.front().getHits().begin(), swop.front().getHits().end(),
// it->front().getHits().begin(),it->front().getHits().end(), pev.begin(),
// TopPerc::lq_PeptideEvidence);
// pev.resize(uni-pev.begin());
// merger->setPeptideEvidences(pev);
//There is no mutable getPeptideEvidences() accessor in PeptideHit - above will not work, but so long:
//Implying PeptideIndexer was applied (with the same search db each) will care for that all PeptideEvidences from two hits with equal AASequence are the same
//merge meta values
StringList keys;
hit->getKeys(keys);
for (StringList::const_iterator kt = keys.begin(); kt != keys.end(); ++kt)
{
if (!merger->metaValueExists(*kt))
{
merger->setMetaValue(*kt, hit->getMetaValue(*kt));
}
}
// adds up the number of hits, as the score of each separate (new) hit is 1
merger->setScore(merger->getScore() + hit->getScore());
++mc;
break;
}
}
}
}
}
OPENMS_LOG_DEBUG << "filled spectrum map" << endl;
PeptideIdentificationList swip;
swip.reserve(unified.size());
OPENMS_LOG_DEBUG << "merging spectrum map" << endl;
for (std::map<String,PeptideIdentification>::iterator it = unified.begin(); it != unified.end(); ++it)
{
swip.push_back(it->second);
}
all_peptide_ids.swap(swip);
OPENMS_LOG_DEBUG << "Now containing " << all_peptide_ids.size() << " spectra identifications, new: " << nc << " merged in: " << mc << endl;
}
// references from PeptideHits to ProteinHits work with the protein accessions, so no need to update the PeptideHits
void PercolatorFeatureSetHelper::mergeMULTISEProteinIds(vector<ProteinIdentification>& all_protein_ids, vector<ProteinIdentification>& new_protein_ids)
{
OPENMS_LOG_DEBUG << "merging search parameters" << endl;
String SE = new_protein_ids.front().getSearchEngine();
if (all_protein_ids.empty())
{
all_protein_ids.emplace_back();
DateTime now = DateTime::now();
String date_string = now.getDate();
String identifier = "TopPerc_" + date_string;
all_protein_ids.front().setDateTime(now);
all_protein_ids.front().setIdentifier(identifier);
all_protein_ids.front().setSearchEngine(SE);
OPENMS_LOG_DEBUG << "Setting search engine to " << SE << endl;
all_protein_ids.front().setSearchParameters(new_protein_ids.front().getSearchParameters());
}
else if (all_protein_ids.front().getSearchEngine() != SE)
{
all_protein_ids.front().setSearchEngine("multiple");
}
std::vector<ProteinHit>& all_protein_hits = all_protein_ids.front().getHits();
std::vector<ProteinHit>& new_protein_hits = new_protein_ids.front().getHits();
OPENMS_LOG_DEBUG << "Sorting " << new_protein_hits.size() << " new ProteinHits." << endl;
std::sort(new_protein_hits.begin(), new_protein_hits.end(), PercolatorFeatureSetHelper::lq_ProteinHit());
OPENMS_LOG_DEBUG << "Melting with " << all_protein_hits.size() << " previous ProteinHits." << endl;
if (all_protein_hits.empty())
{
all_protein_hits.swap(new_protein_hits);
}
else
{
std::vector<ProteinHit> tmp_protein_hits(new_protein_hits.size() + all_protein_hits.size());
std::vector<ProteinHit>::iterator uni = set_union(
all_protein_hits.begin(), all_protein_hits.end(),
new_protein_hits.begin(), new_protein_hits.end(), tmp_protein_hits.begin(),
PercolatorFeatureSetHelper::lq_ProteinHit() );
tmp_protein_hits.resize(uni - tmp_protein_hits.begin());
all_protein_hits.swap(tmp_protein_hits);
}
OPENMS_LOG_DEBUG << "Done with next ProteinHits." << endl;
StringList keys;
all_protein_ids.front().getSearchParameters().getKeys(keys);
if (!ListUtils::contains(keys, "SE:" + SE))
{
OPENMS_LOG_DEBUG << "Melting Parameters from " << SE << " into MetaInfo." << endl;
//insert into MetaInfo as SE:param
ProteinIdentification::SearchParameters sp = new_protein_ids.front().getSearchParameters();
ProteinIdentification::SearchParameters all_sp = all_protein_ids.front().getSearchParameters();
all_sp.setMetaValue("SE:"+SE,new_protein_ids.front().getSearchEngineVersion());
all_sp.setMetaValue(SE+":db",sp.db);
all_sp.setMetaValue(SE+":db_version",sp.db_version);
all_sp.setMetaValue(SE+":taxonomy",sp.taxonomy);
all_sp.setMetaValue(SE+":charges",sp.charges);
all_sp.setMetaValue(SE+":fixed_modifications",ListUtils::concatenate(sp.fixed_modifications, ","));
all_sp.setMetaValue(SE+":variable_modifications",ListUtils::concatenate(sp.variable_modifications, ","));
all_sp.setMetaValue(SE+":missed_cleavages",sp.missed_cleavages);
all_sp.setMetaValue(SE+":fragment_mass_tolerance",sp.fragment_mass_tolerance);
all_sp.setMetaValue(SE+":fragment_mass_tolerance_unit", sp.fragment_mass_tolerance_ppm ? "ppm" : "Da");
all_sp.setMetaValue(SE+":precursor_mass_tolerance",sp.precursor_mass_tolerance);
all_sp.setMetaValue(SE+":precursor_mass_tolerance_unit", sp.precursor_mass_tolerance_ppm ? "ppm" : "Da");
all_sp.setMetaValue(SE+":digestion_enzyme",sp.digestion_enzyme.getName());
all_sp.setMetaValue(SE+":enzyme_term_specificity",sp.enzyme_term_specificity);
//TODO maybe add all the files in file origin that were searched with this SE. then we can do a lookup later
// for every PepID based on its file_origin, with which SEs and settings it was identified.
OPENMS_LOG_DEBUG << "Done with next Parameters." << endl;
all_protein_ids.front().setSearchParameters(all_sp);
}
OPENMS_LOG_DEBUG << "Merging primaryMSRunPaths." << endl;
try
{
StringList all_primary_ms_run_path;
all_protein_ids.front().getPrimaryMSRunPath(all_primary_ms_run_path);
StringList new_primary_ms_run_path;
new_protein_ids.front().getPrimaryMSRunPath(new_primary_ms_run_path);
all_primary_ms_run_path.insert(all_primary_ms_run_path.end(), new_primary_ms_run_path.begin(), new_primary_ms_run_path.end());
all_protein_ids.front().setPrimaryMSRunPath(all_primary_ms_run_path);
OPENMS_LOG_DEBUG << "New primary run paths: " << ListUtils::concatenate(new_primary_ms_run_path,",") << endl;
OPENMS_LOG_DEBUG << "All primary run paths: " << ListUtils::concatenate(all_primary_ms_run_path,",") << endl;
}
catch (Exception::BaseException& e)
{
OPENMS_LOG_DEBUG << "faulty primary MS run path: " << endl;
Exception::ParseError(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, e.what(), "");
}
OPENMS_LOG_DEBUG << "Merging for this file finished." << endl;
}
void PercolatorFeatureSetHelper::concatMULTISEPeptideIds(PeptideIdentificationList& all_peptide_ids, PeptideIdentificationList& new_peptide_ids, const String& search_engine)
{
for (PeptideIdentificationList::iterator pit = new_peptide_ids.begin(); pit != new_peptide_ids.end(); ++pit)
{
for (vector<PeptideHit>::iterator hit = pit->getHits().begin(); hit != pit->getHits().end(); ++hit)
{
double evalue = 1000.0;
if (search_engine == "MS-GF+")
{
hit->setMetaValue("CONCAT:" + search_engine, hit->getMetaValue("MS:1002049")); // rawscore
evalue = hit->getMetaValue("MS:1002049").toString().toDouble(); // evalue
}
if (search_engine == "Mascot")
{
hit->setMetaValue("CONCAT:" + search_engine, hit->getMetaValue("MS:1001171")); // mscore
evalue = hit->getMetaValue("EValue").toString().toDouble();
}
if (search_engine == "Comet")
{
hit->setMetaValue("CONCAT:" + search_engine, hit->getMetaValue("MS:1002252")); // xcorr
evalue = hit->getMetaValue("MS:1002257").toString().toDouble();
}
if (search_engine == "XTandem")
{
hit->setMetaValue("CONCAT:" + search_engine, hit->getMetaValue("XTandem_score")); // xtandem score
evalue = hit->getMetaValue("E-Value").toString().toDouble();
}
hit->setMetaValue("CONCAT:lnEvalue", log(evalue)); // log(evalue)
}
}
all_peptide_ids.insert(all_peptide_ids.end(), new_peptide_ids.begin(), new_peptide_ids.end());
}
void PercolatorFeatureSetHelper::addMULTISEFeatures(PeptideIdentificationList& peptide_ids, StringList& search_engines_used, StringList& feature_set, bool complete_only, bool limits_imputation)
{
map<String,vector<double> > extremals; // will have as keys the below SE cv terms
vector<String> max_better, min_better;
// This is the minimum set for each SE that should be available in all openms id files in one way or another
if (ListUtils::contains(search_engines_used, "MS-GF+"))
{
feature_set.push_back("MS:1002049"); // rawscore
feature_set.push_back("MS:1002053"); // evalue
max_better.emplace_back("MS:1002049"); // higher is better - start high, get min
min_better.emplace_back("MS:1002053"); // lower is better - start low, get max
}
if (ListUtils::contains(search_engines_used, "Mascot"))
{
feature_set.push_back("MS:1001171"); // score aka Mascot
feature_set.push_back("MS:1001172"); // evalue aka EValue
max_better.emplace_back("MS:1001171"); // higher is better - start high, get min
min_better.emplace_back("MS:1001172"); // lower is better - start low, get max
}
if (ListUtils::contains(search_engines_used, "Comet"))
{
feature_set.push_back("MS:1002252"); // xcorr
feature_set.push_back("MS:1002257"); // evalue
max_better.emplace_back("MS:1002252"); // higher is better - start high, get min
min_better.emplace_back("MS:1002257"); // lower is better - start low, get max
}
if (ListUtils::contains(search_engines_used, "XTandem"))
{
feature_set.push_back("MS:1001331"); // hyperscore aka XTandem
feature_set.push_back("MS:1001330"); // evalue aka E-Value
max_better.emplace_back("MS:1001331"); // higher is better - start high, get min
min_better.emplace_back("MS:1001330"); // lower is better - start low, get max
}
//feature_set.push_back("MULTI:ionFrac");
//feature_set.push_back("MULTI:numHits"); // this is not informative if we only keep PSMs with hits for all search engines
OPENMS_LOG_INFO << "Using " << ListUtils::concatenate(search_engines_used, ", ") << " as source for search engine specific features." << endl;
// get all the feature values
if (!complete_only)
{
for (PeptideIdentificationList::iterator it = peptide_ids.begin(); it != peptide_ids.end(); ++it)
{
for (vector<PeptideHit>::iterator hit = it->getHits().begin(); hit != it->getHits().end(); ++hit)
{
for (StringList::iterator feats = feature_set.begin(); feats != feature_set.end(); ++feats)
{
if (hit->metaValueExists(*feats))
{
// TODO raise issue: MS-GF raw score values are sometimes registered as string DataValues and henceforth casted defectively
if (hit->getMetaValue(*feats).valueType() == DataValue::STRING_VALUE)
{
String recast = hit->getMetaValue(*feats);
double d = boost::lexical_cast<double>(recast);
OPENMS_LOG_DEBUG << "recast: "
<< recast << " "
<< double(hit->getMetaValue(*feats)) << "* ";
hit->setMetaValue(*feats,d);
OPENMS_LOG_DEBUG << hit->getMetaValue(*feats).valueType() << " "
<< hit->getMetaValue(*feats)
<< endl;
}
extremals[*feats].push_back(hit->getMetaValue(*feats));
}
}
}
}
// TODO : add optional manual extremal values settings for 'data imputation' instead of min/max or numeric_limits value
for (vector<String>::iterator maxbt = max_better.begin(); maxbt != max_better.end(); ++maxbt)
{
map<String,vector<double> >::iterator fi = extremals.find(*maxbt);
if (fi != extremals.end())
{
vector<double>::iterator mymax = min_element(fi->second.begin(), fi->second.end());
iter_swap(fi->second.begin(), mymax);
if (limits_imputation)
{
fi->second.front() = -std::numeric_limits<float>::max();
}
}
}
for (vector<String>::iterator minbt = min_better.begin(); minbt != min_better.end(); ++minbt)
{
map<String,vector<double> >::iterator fi = extremals.find(*minbt);
if (fi != extremals.end())
{
vector<double>::iterator mymin = max_element(fi->second.begin(), fi->second.end());
iter_swap(fi->second.begin(), mymin);
if (limits_imputation)
{
fi->second.front() = std::numeric_limits<float>::max();
}
}
}
}
size_t sum_removed = 0;
size_t imputed_values = 0;
size_t observed_values = 0;
size_t complete_spectra = 0;
size_t incomplete_spectra = 0;
OPENMS_LOG_DEBUG << "Looking for minimum feature set:" << ListUtils::concatenate(feature_set, ", ") << "." << endl;
for (PeptideIdentificationList::iterator pi = peptide_ids.begin(); pi != peptide_ids.end(); ++pi)
{
pi->sort();
vector<vector<PeptideHit>::iterator> incompletes;
size_t imputed_back = imputed_values;
for (vector<PeptideHit>::iterator hit = pi->getHits().begin(); hit != pi->getHits().end(); ++hit)
{
//double ion_frac = hit->getMetaValue("matched_intensity").toString().toDouble() / hit->getMetaValue("sum_intensity").toString().toDouble(); // also consider "matched_ion_number"/"peak_number"
//hit->setMetaValue("MULTI:ionFrac", ion_frac);
for (StringList::iterator feats = feature_set.begin(); feats != feature_set.end(); ++feats)
{
if (complete_only && !hit->metaValueExists(*feats))
{
incompletes.push_back(hit); // mark for removal
break;
}
else if (!hit->metaValueExists(*feats))
{
hit->setMetaValue(*feats, extremals[*feats].front()); // imputation
++imputed_values;
}
else
{
++observed_values;
}
}
int num_hits = hit->getScore();
hit->setMetaValue("MULTI:numHits", num_hits);
}
if (complete_only)
{
// remove incompletes
for (vector<vector<PeptideHit>::iterator>::reverse_iterator rit = incompletes.rbegin(); rit != incompletes.rend(); ++rit)
{
pi->getHits().erase(*rit);
}
sum_removed += incompletes.size();
}
if (!incompletes.empty() || imputed_back < imputed_values)
++incomplete_spectra;
else
++complete_spectra;
}
if (sum_removed > 0)
{
OPENMS_LOG_WARN << "Removed " << sum_removed << " incomplete cases of PSMs." << endl;
}
if (imputed_values > 0)
{
OPENMS_LOG_WARN << "Imputed " << imputed_values << " of " << observed_values+imputed_values
<< " missing values. ("
<< imputed_values*100.0/(imputed_values+observed_values)
<< "%)" << endl;
OPENMS_LOG_WARN << "Affected " << incomplete_spectra << " of " << incomplete_spectra+complete_spectra
<< " spectra. ("
<< incomplete_spectra*100.0/(incomplete_spectra+complete_spectra)
<< "%)" << endl;
}
}
void PercolatorFeatureSetHelper::checkExtraFeatures(const vector<PeptideHit>& psms, StringList& extra_features)
{
set<StringList::iterator> unavail;
for (vector<PeptideHit>::const_iterator hit = psms.begin(); hit != psms.end(); ++hit)
{
for (StringList::iterator ef = extra_features.begin(); ef != extra_features.end(); ++ef)
{
if (!hit->metaValueExists(*ef))
{
unavail.insert(ef);
}
}
}
for (set<StringList::iterator>::reverse_iterator rit = unavail.rbegin(); rit != unavail.rend(); ++rit)
{
OPENMS_LOG_WARN << "A extra_feature requested (" << *(*rit) << ") was not available - removed." << endl;
extra_features.erase(*rit);
}
}
// Function adapted from MSGFPlusReader in Percolator converter
double PercolatorFeatureSetHelper::rescaleFragmentFeature_(double featureValue, int NumMatchedMainIons)
{
// Rescale the fragment features to penalize features calculated by few ions
int numMatchedIonLimit = 7;
int numerator = (1 + numMatchedIonLimit) * (1 + numMatchedIonLimit);
int denominator = (1 + (min)(NumMatchedMainIons, numMatchedIonLimit)) * (1 + (min)(NumMatchedMainIons, numMatchedIonLimit));
return featureValue * ((double)numerator / denominator);
}
void PercolatorFeatureSetHelper::assignDeltaScore_(vector<PeptideHit>& hits, const String& score_ref, const String& output_ref)
{
if (!hits.empty())
{
vector<PeptideHit>::iterator prev = hits.begin();
double prev_score = double(prev->getMetaValue(score_ref));
for (vector<PeptideHit>::iterator hit = hits.begin()+1; hit != hits.end(); ++hit)
{
double cur_score = double(hit->getMetaValue(score_ref));
double value = prev_score - cur_score;
prev->setMetaValue(output_ref, value);
prev = hit;
}
(hits.end()-1)->setMetaValue(output_ref, 0.0); //if last hit or only one hit
}
}
// TODO: this is code redundancy to PercolatorAdapter
// TODO: in case of merged idXML files from fractions and/or replicates make sure that you also consider the file origin
// this is usually stored in the map_index MetaValue of a PeptideIdentification (PSM) object.
String PercolatorFeatureSetHelper::getScanMergeKey_(PeptideIdentificationList::iterator it, PeptideIdentificationList::iterator start)
{
// MSGF+ uses this field, is empty if not specified
String scan_identifier = it->getSpectrumReference();
if (scan_identifier.empty())
{
// XTandem uses this (integer) field
// these ids are 1-based in contrast to the index which is 0-based, so subtract 1.
if (it->metaValueExists("spectrum_id") && !it->getMetaValue("spectrum_id").toString().empty())
{
scan_identifier = "index=" + String(it->getMetaValue("spectrum_id").toString().toInt() - 1);
}
else
{
scan_identifier = "index=" + String(it - start + 1);
OPENMS_LOG_WARN << "no known spectrum identifiers, using index [1,n] - use at own risk." << endl;
}
}
Int scan = 0;
StringList fields = ListUtils::create<String>(scan_identifier);
for (StringList::const_iterator it = fields.begin(); it != fields.end(); ++it)
{
Size idx = 0;
if ((idx = it->find("scan=")) != String::npos)
{
scan = it->substr(idx + 5).toInt();
break;
} // only if scan number is not available, use the scan index
else if ((idx = it->find("index=")) != String::npos)
{
scan = it->substr(idx + 6).toInt();
}
}
return String(scan);
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/ANALYSIS/ID/OpenSearchModificationAnalysis.cpp | .cpp | 17,857 | 522 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Timo Sachsenberg $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/ID/OpenSearchModificationAnalysis.h>
#include <OpenMS/CHEMISTRY/ModificationsDB.h>
#include <OpenMS/CHEMISTRY/ResidueModification.h>
#include <OpenMS/DATASTRUCTURES/ListUtils.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <algorithm>
#include <cmath>
#include <fstream>
#include <numeric>
#include <unordered_map>
#include <unordered_set>
#include <boost/math/distributions/normal.hpp>
namespace OpenMS
{
std::pair<OpenSearchModificationAnalysis::DeltaMassHistogram, OpenSearchModificationAnalysis::DeltaMassToChargeCount>
OpenSearchModificationAnalysis::analyzeDeltaMassPatterns(const PeptideIdentificationList& peptide_ids,
bool use_smoothing,
bool /*debug*/) const
{
// Constants
constexpr double deltamass_tolerance = 0.0005;
constexpr double delta_mass_zero_threshold = 0.05;
// Lambda to round values to the specified tolerance
auto roundToTolerance = [](double value) {
return std::round(value / deltamass_tolerance) * deltamass_tolerance;
};
// Data structures to store histogram and charge states
DeltaMassHistogram histogram(FuzzyDoubleComparator(1e-9));
DeltaMassToChargeCount charge_counts(FuzzyDoubleComparator(1e-9));
std::unordered_map<double, std::unordered_set<int>> charge_states;
// Process each peptide identification
for (const auto& peptide_id : peptide_ids)
{
const auto& hits = peptide_id.getHits();
for (const auto& hit : hits)
{
// Retrieve delta mass and charge
if (!hit.metaValueExists("DeltaMass"))
continue;
double delta_mass = hit.getMetaValue("DeltaMass");
int charge = hit.getCharge();
// Ignore delta masses close to zero
if (std::abs(delta_mass) <= delta_mass_zero_threshold)
continue;
// Round delta mass to bin similar values
double rounded_mass = roundToTolerance(delta_mass);
// Update histogram count
histogram[rounded_mass] += 1.0;
// Update unique charge count
if (charge_states[rounded_mass].insert(charge).second)
{
charge_counts[rounded_mass] += 1;
}
}
}
// Prepare results
std::pair<DeltaMassHistogram, DeltaMassToChargeCount> results{histogram, charge_counts};
// Apply smoothing if requested
if (use_smoothing)
{
DeltaMassHistogram smoothed_hist = smoothDeltaMassHistogram_(histogram, 0.0001);
DeltaMassHistogram hist_maxima = findPeaksInHistogram_(smoothed_hist, 0.0, 3.0);
// Update charge counts for the smoothed maxima
DeltaMassToChargeCount smoothed_charge_counts(FuzzyDoubleComparator(1e-9));
for (const auto& [mass, _] : hist_maxima)
{
smoothed_charge_counts[mass] = charge_counts[mass];
}
// Update results with smoothed data
results = {hist_maxima, smoothed_charge_counts};
}
return results;
}
std::vector<OpenSearchModificationAnalysis::ModificationSummary>
OpenSearchModificationAnalysis::mapDeltaMassesToModifications(const DeltaMassHistogram& delta_mass_histogram,
const DeltaMassToChargeCount& charge_histogram,
PeptideIdentificationList& peptide_ids,
double precursor_mass_tolerance,
bool precursor_mass_tolerance_unit_ppm,
const String& output_file) const
{
std::map<double, String, FuzzyDoubleComparator> mass_to_modification(FuzzyDoubleComparator(1e-9));
std::map<String, ModificationPattern> modifications;
std::map<double, String> histogram_found;
// Load modifications from the database
std::vector<String> modification_names;
ModificationsDB* mod_db = ModificationsDB::getInstance();
mod_db->getAllSearchModifications(modification_names);
for (const String& mod_name : modification_names)
{
const ResidueModification* residue = mod_db->getModification(mod_name);
String full_name = residue->getFullName();
double diff_mono_mass = residue->getDiffMonoMass();
if (full_name.find("substitution") == std::string::npos)
{
mass_to_modification[diff_mono_mass] = full_name;
}
}
// Generate combinations of modifications
std::map<double, String, FuzzyDoubleComparator> combo_modifications(FuzzyDoubleComparator(1e-9));
for (auto it1 = mass_to_modification.begin(); it1 != mass_to_modification.end(); ++it1)
{
for (auto it2 = it1; it2 != mass_to_modification.end(); ++it2)
{
combo_modifications[it1->first + it2->first] = it1->second + "++" + it2->second;
}
}
// Helper function to add or update modifications
auto addOrUpdateModification = [&](const String& mod_name, double mass, double count, int num_charges)
{
if (modifications.find(mod_name) == modifications.end())
{
ModificationPattern pattern;
pattern.masses.push_back(mass);
pattern.count = count;
pattern.num_charge_states = num_charges;
modifications[mod_name] = pattern;
}
else
{
modifications[mod_name].count += count;
modifications[mod_name].num_charge_states = std::max(num_charges, modifications[mod_name].num_charge_states);
}
};
// Map delta masses to modifications
for (const auto& hist_entry : delta_mass_histogram)
{
double cluster_mass = hist_entry.first;
double count = hist_entry.second;
double lower_bound, upper_bound;
const double epsilon = 1e-8;
if (precursor_mass_tolerance_unit_ppm)
{
double tolerance = cluster_mass * precursor_mass_tolerance * 1e-6;
lower_bound = cluster_mass - tolerance;
upper_bound = cluster_mass + tolerance;
}
else
{
lower_bound = cluster_mass - precursor_mass_tolerance;
upper_bound = cluster_mass + precursor_mass_tolerance;
}
// Search for modifications within bounds
bool mapping_found = false;
String mod_name;
double mod_mass = 0.0;
// Search in single modifications
auto it_lower = mass_to_modification.lower_bound(lower_bound - epsilon);
bool found_lower = false;
if (it_lower != mass_to_modification.end() &&
std::abs(it_lower->first - cluster_mass) <= precursor_mass_tolerance)
{
found_lower = true;
}
auto it_upper = mass_to_modification.upper_bound(upper_bound + epsilon);
bool found_upper = false;
if (it_upper != mass_to_modification.begin())
{
--it_upper;
if (std::abs(it_upper->first - cluster_mass) <= precursor_mass_tolerance)
{
found_upper = true;
}
}
// Compare results from lower_bound and upper_bound
if (found_lower && found_upper)
{
if (it_lower->first == it_upper->first && it_lower->second == it_upper->second)
{
mod_name = it_lower->second;
mod_mass = it_lower->first;
histogram_found[mod_mass] = mod_name;
mapping_found = true;
}
else
{
mod_name = it_lower->second + "//" + it_upper->second;
mod_mass = cluster_mass;
histogram_found[it_lower->first] = it_lower->second;
histogram_found[it_upper->first] = it_upper->second;
mapping_found = true;
}
}
else
{
// Check if modification can be explained by known modifications
for (const auto& hit : histogram_found)
{
if (std::abs(hit.first - cluster_mass) < precursor_mass_tolerance)
{
addOrUpdateModification(hit.second, hit.first, count, charge_histogram.at(cluster_mass));
mapping_found = true;
break;
}
// Check if modification can be explained by a +1 isotope variant
else if (std::abs((hit.first + 1.0) - cluster_mass) < precursor_mass_tolerance)
{
String temp_mod_name = hit.second + "+1Da";
addOrUpdateModification(temp_mod_name, hit.first + 1.0, count, charge_histogram.at(cluster_mass));
histogram_found[hit.first + 1.0] = temp_mod_name;
mapping_found = true;
break;
}
}
// Search in combination modifications
if (!mapping_found)
{
auto it = combo_modifications.lower_bound(cluster_mass - epsilon);
if (it != combo_modifications.end() &&
std::abs(it->first - cluster_mass) <= precursor_mass_tolerance / 10.0)
{
mod_name = it->second;
mod_mass = it->first;
mapping_found = true;
}
}
}
if (std::abs(mod_mass) < precursor_mass_tolerance)
continue; // Skip if closest mod_mass is too close to 0
if (mapping_found)
{
addOrUpdateModification(mod_name, mod_mass, count, charge_histogram.at(cluster_mass));
}
else
{
// Unknown modification
String unknown_mod_name = "Unknown" + String(std::round(cluster_mass));
addOrUpdateModification(unknown_mod_name, cluster_mass, count, charge_histogram.at(cluster_mass));
}
}
// Collect all modification data into a vector
std::vector<ModificationSummary> modification_summaries;
for (const auto& mod_pair : modifications)
{
ModificationSummary summary;
summary.count = static_cast<int>(std::round(mod_pair.second.count));
summary.name = mod_pair.first;
summary.num_charge_states = mod_pair.second.num_charge_states;
summary.masses = mod_pair.second.masses;
modification_summaries.push_back(summary);
}
// Sort modifications by (num_charge_states + count) in descending order
std::sort(modification_summaries.begin(), modification_summaries.end(),
[](const ModificationSummary& a, const ModificationSummary& b)
{
return (a.num_charge_states + a.count) > (b.num_charge_states + b.count);
});
// Add modifications to peptide identifications
for (auto& peptide_id : peptide_ids)
{
auto& hits = peptide_id.getHits();
for (auto& hit : hits)
{
if (!hit.metaValueExists("DeltaMass"))
continue;
double delta_mass = hit.getMetaValue("DeltaMass");
String ptm = "";
// Check if too close to zero
if (std::abs(delta_mass) < 0.05)
{
hit.setMetaValue("PTM", ptm);
continue;
}
bool found = false;
// Check with error tolerance if already present in histogram
for (const auto& entry : histogram_found)
{
if (std::abs(delta_mass - entry.first) < precursor_mass_tolerance)
{
ptm = entry.second;
found = true;
break;
}
}
// Otherwise assign unknown
if (!found)
{
ptm = "Unknown" + String(delta_mass);
}
hit.setMetaValue("PTM", ptm);
}
}
// Write modification summary table if output file is specified
if (!output_file.empty())
{
writeModificationSummary_(modification_summaries, output_file);
}
return modification_summaries;
}
std::vector<OpenSearchModificationAnalysis::ModificationSummary>
OpenSearchModificationAnalysis::analyzeModifications(PeptideIdentificationList& peptide_ids,
double precursor_mass_tolerance,
bool precursor_mass_tolerance_unit_ppm,
bool use_smoothing,
const String& output_file) const
{
// Analyze delta mass patterns
auto [histogram, charge_counts] = analyzeDeltaMassPatterns(peptide_ids, use_smoothing, false);
// Map to modifications and annotate peptides
return mapDeltaMassesToModifications(histogram, charge_counts, peptide_ids,
precursor_mass_tolerance, precursor_mass_tolerance_unit_ppm,
output_file);
}
// Private helper functions
double OpenSearchModificationAnalysis::gaussian_(double x, double sigma)
{
boost::math::normal_distribution<> normal_dist(0.0, sigma);
return boost::math::pdf(normal_dist, x);
}
OpenSearchModificationAnalysis::DeltaMassHistogram
OpenSearchModificationAnalysis::smoothDeltaMassHistogram_(const DeltaMassHistogram& histogram, double sigma)
{
if (histogram.size() < 3)
{
return histogram; // Not enough data points for smoothing
}
DeltaMassHistogram smoothed_histogram(FuzzyDoubleComparator(1e-9));
// Extract delta masses and counts into vectors for efficient access
std::vector<double> deltas;
std::vector<double> counts;
deltas.reserve(histogram.size());
counts.reserve(histogram.size());
for (const auto& [delta, count] : histogram)
{
deltas.push_back(delta);
counts.push_back(count);
}
const size_t n = deltas.size();
std::vector<double> smoothed_counts(n, 0.0);
// Perform Gaussian smoothing
for (size_t i = 0; i < n; ++i)
{
double weight_sum = 0.0;
for (size_t j = 0; j < n; ++j)
{
double mz_diff = deltas[i] - deltas[j];
// Ignore points beyond 3 standard deviations
if (std::abs(mz_diff) > 3.0 * sigma)
continue;
double weight = gaussian_(mz_diff, sigma);
smoothed_counts[i] += weight * counts[j];
weight_sum += weight;
}
if (weight_sum != 0.0)
{
smoothed_counts[i] /= weight_sum;
}
}
// Populate the smoothed histogram
for (size_t i = 0; i < n; ++i)
{
smoothed_histogram[deltas[i]] = smoothed_counts[i];
}
return smoothed_histogram;
}
OpenSearchModificationAnalysis::DeltaMassHistogram
OpenSearchModificationAnalysis::findPeaksInHistogram_(const DeltaMassHistogram& histogram,
double count_threshold,
double snr)
{
if (histogram.size() < 3)
{
return histogram; // Not enough data points to find peaks
}
DeltaMassHistogram peaks(FuzzyDoubleComparator(1e-9));
// Extract counts to compute noise level (median count)
std::vector<double> counts;
counts.reserve(histogram.size());
for (const auto& [_, count] : histogram)
{
counts.push_back(count);
}
// Calculate median as noise level
std::nth_element(counts.begin(), counts.begin() + counts.size() / 2, counts.end());
double noise_level = counts[counts.size() / 2];
// Convert histogram to vector for indexed access
std::vector<std::pair<double, double>> hist_vector(histogram.begin(), histogram.end());
// Check each point except the first and last for local maxima
for (size_t i = 1; i < hist_vector.size() - 1; ++i)
{
double prev_count = hist_vector[i - 1].second;
double curr_count = hist_vector[i].second;
double next_count = hist_vector[i + 1].second;
// Check if current point is a local maximum
if (curr_count >= prev_count && curr_count >= next_count &&
curr_count > count_threshold &&
curr_count / noise_level > snr)
{
peaks[hist_vector[i].first] = curr_count;
}
}
return peaks;
}
void OpenSearchModificationAnalysis::writeModificationSummary_(const std::vector<ModificationSummary>& modifications,
const String& output_file) const
{
// Remove 'idxml' extension and add '_OutputTable.tsv'
String output_table = output_file;
if (output_table.hasSuffix(".idXML"))
{
output_table = output_table.substr(0, output_table.size() - 6) + "_OutputTable.tsv";
}
else if (output_table.hasSuffix(".idxml"))
{
output_table = output_table.substr(0, output_table.size() - 6) + "_OutputTable.tsv";
}
else
{
output_table += "_OutputTable.tsv";
}
std::ofstream output_stream(output_table);
if (!output_stream.is_open())
{
OPENMS_LOG_ERROR << "Error opening file: " << output_table << std::endl;
return;
}
output_stream << "Name\tMass\tModified Peptides (incl. charge variants)\tModified Peptides\n";
for (const auto& mod_data : modifications)
{
output_stream << mod_data.name << '\t';
// Output mass or masses
if (mod_data.masses.size() < 2)
{
output_stream << mod_data.masses.at(0) << '\t';
}
else
{
output_stream << mod_data.masses.at(0) << "/" << mod_data.masses.at(1) << '\t';
}
// Output counts
output_stream << mod_data.num_charge_states + mod_data.count << '\t'
<< mod_data.count << '\n';
}
output_stream.close();
}
} // namespace OpenMS | C++ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.