keyword stringclasses 7 values | repo_name stringlengths 8 98 | file_path stringlengths 4 244 | file_extension stringclasses 29 values | file_size int64 0 84.1M | line_count int64 0 1.6M | content stringlengths 1 84.1M ⌀ | language stringclasses 14 values |
|---|---|---|---|---|---|---|---|
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/AdductInfo.cpp | .cpp | 8,427 | 243 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Erhan Kenar, Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/CHEMISTRY/AdductInfo.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/CONCEPT/Exception.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/DATASTRUCTURES/ListUtils.h>
namespace OpenMS
{
AdductInfo::AdductInfo(const String& name, const EmpiricalFormula& adduct, int charge, UInt mol_multiplier)
:
name_(name),
ef_(adduct),
charge_(charge),
mol_multiplier_(mol_multiplier)
{
if (charge_ == 0)
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Charge of 0 is not allowed for an adduct (" + ef_.toString() + ")");
}
if (adduct.getCharge() != 0)
{ // EmpiricalFormula adds/subtracts protons to make up the charge;
// we just use the uncharged formula and take care of charge ourselves
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "EmpiricalFormula must not have a charge (" + ef_.toString() + "), since the internal weight computation of EF is unsuitable for adducts.");
}
if (mol_multiplier_ == 0)
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Mol. multiplier of 0 is not allowed for an adduct (" + ef_.toString() + ")");
}
mass_ = ef_.getMonoWeight();
}
double AdductInfo::getNeutralMass(double observed_mz) const
{
// decharge and remove adduct (charge is guaranteed != 0; see C'tor)
double mass = observed_mz * abs(charge_) - mass_;
// correct for electron masses
// (positive charge means there are electrons missing!)
// (negative charge requires increasing the mass by X electrons)
// --> looking at observed m/z, we thus need to decharge to get equal protons and electrons
mass += charge_ * 1 * Constants::ELECTRON_MASS_U;
// the Mol multiplier determines if we assume to be looking at dimers or higher
// Currently, we just want the monomer, to compare its mass to a DB entry
mass /= mol_multiplier_;
return mass;
}
double AdductInfo::getMZ(double neutral_mass) const
{
// this is the inverse of getNeutralMass()
double neutral_nmer_mass_with_adduct = (neutral_mass * mol_multiplier_ + mass_); // [nM+adduct]
// correct for electron masses
// (positive charge means there are electrons missing!)
// (negative charge requires increasing the mass by X electrons)
neutral_nmer_mass_with_adduct -= charge_ * Constants::ELECTRON_MASS_U;
return neutral_nmer_mass_with_adduct / abs(charge_);
}
double AdductInfo::getMassShift(bool use_avg_mass) const
{
double mass = use_avg_mass ? ef_.getAverageWeight() : mass_;
// intrinsic adduct charge comes from additional/missing electrons, but for
// mass shift must be compensated by adding/removing hydrogens:
return mass - charge_ * (Constants::PROTON_MASS_U + Constants::ELECTRON_MASS_U);
}
/// checks if an adduct (e.g.a 'M+2K-H;1+') is valid, i.e. if the losses (==negative amounts) can actually be lost by the compound given in @p db_entry.
/// If the negative parts are present in @p db_entry, true is returned.
bool AdductInfo::isCompatible(const EmpiricalFormula& db_entry) const
{
return db_entry.contains(ef_ * -1);
}
int AdductInfo::getCharge() const
{
return charge_;
}
const String& AdductInfo::getName() const
{
return name_;
}
const EmpiricalFormula& AdductInfo::getEmpiricalFormula() const
{
return ef_;
}
UInt AdductInfo::getMolMultiplier() const
{
return mol_multiplier_;
}
bool AdductInfo::operator==(const AdductInfo& other) const
{
return (name_ == other.name_) && (ef_ == other.ef_) &&
(charge_ == other.charge_) && (mol_multiplier_ == other.mol_multiplier_);
}
AdductInfo AdductInfo::parseAdductString(const String& adduct)
{
// adduct string looks like this:
// M+2K-H;1+ or
// 2M+CH3CN+Na;1+ (i.e. multimers are supported)
// do some sanity checks on the string
// retrieve adduct and charge
String cp_str(adduct);
cp_str.removeWhitespaces();
StringList list;
cp_str.split(";", list);
// split term into formula and charge, e.g. "M-H" and "1-"
String mol_formula, charge_str;
if (list.size() == 2)
{
mol_formula = list[0];
charge_str = list[1];
}
else
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Could not detect molecular ion; charge in '" + cp_str + "'. Got semicolon right?", cp_str);
}
// check if charge string is formatted correctly
if ((!charge_str.hasSuffix("+")) && (!charge_str.hasSuffix("-")))
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Charge sign +/- in the end of the string is missing! ", charge_str);
}
// get charge and sign (throws ConversionError if not an integer)
int charge = charge_str.substr(0, charge_str.size() - 1).toInt();
if (charge_str.suffix(1) == "+")
{
if (charge < 0)
{
charge *= -1;
}
}
else
{
if (charge > 0)
{
charge *= -1;
}
}
// not allowing double ++ or -- or +- or -+
String op_str(mol_formula);
op_str.substitute('-', '+');
if (op_str.hasSubstring("++") || op_str.hasSuffix("+") || op_str.hasPrefix("+"))
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "+/- operator must be surrounded by a chemical formula. Offending string: ", mol_formula);
}
// split by + and -
op_str = mol_formula;
if (op_str.has('%'))
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Character '%' not allowed within chemical formula. Offending string: ", mol_formula);
}
// ... we want to keep the - and +, so we add extra chars around, which we use as splitter later
op_str.substitute("-", "%-%");
op_str.substitute("+", "%+%");
// split while keeping + and - as separate entries
op_str.split("%", list);
// some further sanity check if adduct formula is correct
String m_part(list[0]);
// std::cout << m_part.at(m_part.size() - 1) << std::endl;
if (!m_part.hasSuffix("M"))
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "First term of adduct string must contain the molecular entity 'M', optionally prefixed by a multiplier (e.g. '2M'); not found in ", m_part);
}
int mol_multiplier(1);
// check if M has a multiplier in front
if (m_part.length() > 1)
{ // will throw conversion error of not a number
mol_multiplier = m_part.prefix(m_part.length()-1).toDouble();
}
// evaluate the adduct string ...
// ... add/subtract each adduct compound
bool op_plus(false);
EmpiricalFormula ef; // will remain empty if there are no explicit adducts (e.g. 'M;+1')
for (Size part_idx = 1 /* omit 0 index, since its 'M' */; part_idx < list.size(); ++part_idx)
{
if (list[part_idx] == "+")
{
op_plus = true;
continue;
}
else if (list[part_idx] == "-")
{
op_plus = false;
continue;
}
// std::cout << "putting " << tmpvec2[part_idx] << " into a formula with mass ";
// check if formula has got a stoichiometry factor in front
String formula_str(list[part_idx]);
int stoichio_factor(1);
int idx(0);
while (isdigit(formula_str[idx])) ++idx;
if (idx > 0)
{
stoichio_factor = formula_str.substr(0, idx).toInt();
formula_str = formula_str.substr(idx, formula_str.size());
}
EmpiricalFormula ef_part(formula_str);
OPENMS_LOG_DEBUG << "Adducts: " << stoichio_factor << "*" << formula_str << " == " << stoichio_factor * ef_part.getMonoWeight() << std::endl;
if (op_plus)
{
ef += ef_part * stoichio_factor;
}
else // "-" operator
{
ef -= ef_part * stoichio_factor;
}
}
return AdductInfo(cp_str, ef, charge, mol_multiplier);
}
} // closing namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/NASequence.cpp | .cpp | 14,038 | 452 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Samuel Wein $
// $Authors: Samuel Wein, Timo Sachsenberg, Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/CHEMISTRY/NASequence.h>
#include <OpenMS/CHEMISTRY/RibonucleotideDB.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/CONCEPT/Macros.h>
#include <OpenMS/KERNEL/StandardTypes.h>
#include <map>
#include <string>
#include <utility>
using namespace std;
namespace OpenMS
{
NASequence::NASequence(vector<const Ribonucleotide*> seq, const RibonucleotideChainEnd* five_prime, const RibonucleotideChainEnd* three_prime)
{
seq_ = std::move(seq);
five_prime_ = five_prime;
three_prime_ = three_prime;
}
bool NASequence::operator==(const NASequence& rhs) const
{
return (tie(seq_, five_prime_, three_prime_) == tie(rhs.seq_, rhs.five_prime_, rhs.three_prime_));
}
bool NASequence::operator!=(const NASequence& rhs) const
{
return !(operator==(rhs));
}
bool NASequence::operator<(const NASequence& rhs) const
{
// can't use std::tie here as we might prefer sorting by string instead of pointer address
// compare 5' mod
if (five_prime_ != rhs.five_prime_)
{
return (five_prime_ < rhs.five_prime_);
}
// compare sequence length
if (seq_.size() != rhs.seq_.size())
{
return (seq_.size() < rhs.seq_.size());
}
// compare pointers. If different, we compare the more expensive code (string)
for (size_t i = 0; i != seq_.size(); ++i)
{
if (seq_[i] != rhs.seq_[i])
{
return (seq_[i]->getCode() < rhs.seq_[i]->getCode());
}
}
// compare 3' mod
if (three_prime_ != rhs.three_prime_)
{
return (three_prime_ < rhs.three_prime_);
}
// exactly equal
return false;
}
void NASequence::setSequence(const vector<const Ribonucleotide*>& seq)
{
seq_ = seq;
}
bool NASequence::empty() const
{
return seq_.empty();
}
NASequence NASequence::getPrefix(Size length) const
{
if (length >= seq_.size())
{
throw Exception::IndexOverflow(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, length, seq_.size() - 1);
}
return NASequence({seq_.begin(), seq_.begin() + length}, five_prime_, nullptr);
}
NASequence NASequence::getSuffix(Size length) const
{
if (length >= seq_.size())
{
throw Exception::IndexOverflow(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, length, seq_.size() - 1);
}
// handle situation where we have a thiol at the 5' of our new NASequence (necessary for calculating X and W ions)
ConstRibonucleotidePtr threeEnd = nullptr;
if (seq_[seq_.size() - length - 1]->getCode().back() == '*')
{
static RibonucleotideDB* rdb = RibonucleotideDB::getInstance();
threeEnd = rdb->getRibonucleotide("5'-p*");
}
return NASequence({seq_.end() - length, seq_.end()}, threeEnd, three_prime_);
}
NASequence NASequence::getSubsequence(Size start, Size length) const
{
if (start >= size())
{
throw Exception::IndexOverflow(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, start, size());
}
if (length > size() - start)
length = size() - start;
const RibonucleotideChainEnd* five_prime = ((start == 0) ? five_prime_ : nullptr);
const RibonucleotideChainEnd* three_prime = ((start + length == size()) ? three_prime_ : nullptr);
// handle situation where we have a thiol at the 5' of our new NASequence (necessary for calculating X and W ions)
if (start > 0 && seq_[start - 1]->getCode().back() == '*' )
{
cout << seq_[start - 1]->getCode();
static RibonucleotideDB* rdb = RibonucleotideDB::getInstance();
five_prime = rdb->getRibonucleotide("5'-p*");
if (five_prime == nullptr)
{
OPENMS_LOG_WARN << "NASequence::getSubsequence: subsequence would have both phosphorothiol and other modification at 5', discarding other mod" << endl;
}
}
vector<const Ribonucleotide*>::const_iterator it = seq_.begin() + start;
return NASequence({it, it + length}, five_prime, three_prime);
}
EmpiricalFormula NASequence::getFormula(NASFragmentType type, Int charge) const
{
static const EmpiricalFormula H_form = EmpiricalFormula::hydrogen();
static const EmpiricalFormula phosphate_form = EmpiricalFormula("HPO3");
static const EmpiricalFormula thiophosphate_form = EmpiricalFormula("HPO2S1");
static const EmpiricalFormula internal_to_full = EmpiricalFormula::water();
// static const EmpiricalFormula five_prime_to_full = EmpiricalFormula("HPO3");
// static const EmpiricalFormula three_prime_to_full = EmpiricalFormula("");
static const EmpiricalFormula a_ion_to_full = EmpiricalFormula::water(-1);
static const EmpiricalFormula b_ion_to_full = EmpiricalFormula();
static const EmpiricalFormula c_ion_to_full = EmpiricalFormula("H-1PO2");
static const EmpiricalFormula d_ion_to_full = phosphate_form;
static const EmpiricalFormula w_ion_to_full = d_ion_to_full;
static const EmpiricalFormula x_ion_to_full = c_ion_to_full;
static const EmpiricalFormula y_ion_to_full = b_ion_to_full;
static const EmpiricalFormula z_ion_to_full = a_ion_to_full;
static const EmpiricalFormula aminusB_ion_to_full = EmpiricalFormula::water(-2);
// static const EmpiricalFormula abasicform_RNA = EmpiricalFormula("C5H8O4");
// static const EmpiricalFormula abasicform_DNA = EmpiricalFormula("C5H7O5P");
if (seq_.empty())
return EmpiricalFormula();
EmpiricalFormula our_form;
// Add all the ribonucleotide masses
for (const auto& i : seq_)
{
our_form += i->getFormula();
// Add the phosphate (of thiophosphate) per linkage
if (&i != &seq_.back()) // no linkage at last base
{
if (i->getCode().back() == '*')
{
our_form += (thiophosphate_form - internal_to_full);
}
else
{
our_form += (phosphate_form - internal_to_full);
}
}
}
EmpiricalFormula local_three_prime, local_five_prime;
// Make local copies of the formulas for the terminal mods so we don't get into trouble dereferencing null ptrs
if (three_prime_ != nullptr)
{
local_three_prime = three_prime_->getFormula() - H_form;
}
if (five_prime_ != nullptr)
{
local_five_prime = five_prime_->getFormula() - H_form;
}
switch (type)
{
case Full:
return our_form + (H_form * charge) + local_five_prime + local_three_prime;
// case FivePrime:
// return our_form - five_prime_to_full + OH_form + (H_form * charge) + local_three_prime;
case AminusB:
return our_form + (H_form * charge) + local_five_prime + aminusB_ion_to_full - seq_.back()->getFormula() + seq_.back()->getBaselossFormula();
case AIon:
return our_form + (H_form * charge) + local_five_prime + a_ion_to_full;
case BIon:
return our_form + (H_form * charge) + local_five_prime + b_ion_to_full;
case CIon:
return our_form + (H_form * charge) + local_five_prime + c_ion_to_full + ((seq_.back()->getCode().back() == '*') ? EmpiricalFormula("SO-1") : EmpiricalFormula(""));
case DIon:
return our_form + (H_form * charge) + local_five_prime + d_ion_to_full + ((seq_.back()->getCode().back() == '*') ? EmpiricalFormula("SO-1") : EmpiricalFormula(""));
case WIon:
return our_form + (H_form * charge) + local_three_prime + w_ion_to_full + ((local_five_prime == EmpiricalFormula("HPO2S")) ? EmpiricalFormula("SO-1") : EmpiricalFormula(""));
case XIon:
return our_form + (H_form * charge) + local_three_prime + x_ion_to_full + ((local_five_prime == EmpiricalFormula("HPO2S")) ? EmpiricalFormula("SO-1") : EmpiricalFormula(""));
case YIon:
return our_form + (H_form * charge) + local_three_prime + y_ion_to_full;
case ZIon:
return our_form + (H_form * charge) + local_three_prime + z_ion_to_full;
default:
OPENMS_LOG_ERROR << "NASequence::getFormula: unsupported NASFragmentType" << endl;
}
return our_form;
}
void NASequence::set(size_t index, const Ribonucleotide* r)
{
seq_[index] = r;
}
bool NASequence::hasFivePrimeMod() const
{
return (five_prime_ != nullptr);
}
void NASequence::setFivePrimeMod(const RibonucleotideChainEnd* r)
{
five_prime_ = r;
}
const RibonucleotideChainEnd* NASequence::getFivePrimeMod() const
{
return five_prime_;
}
bool NASequence::hasThreePrimeMod() const
{
return (three_prime_ != nullptr);
}
void NASequence::setThreePrimeMod(const RibonucleotideChainEnd* r)
{
three_prime_ = r;
}
const RibonucleotideChainEnd* NASequence::getThreePrimeMod() const
{
return three_prime_;
}
double NASequence::getMonoWeight(NASFragmentType type, Int charge) const
{
//getFormula adds (or subtracts in negative mode) Hydrogens, not protons, so we need to subtract (or add in negative mode) the electrons
return getFormula(type, charge).getMonoWeight() - charge * Constants::ELECTRON_MASS_U;
}
double NASequence::getAverageWeight(NASFragmentType type, Int charge) const
{
//getFormula adds (or subtracts in negative mode) Hydrogens, not protons, so we need to subtract (or add in negative mode) the electrons
return getFormula(type, charge).getAverageWeight() - charge * Constants::ELECTRON_MASS_U;
}
size_t NASequence::size() const
{
return seq_.size();
}
NASequence NASequence::fromString(const char* s)
{
NASequence nas;
parseString_(String(s), nas);
return nas;
}
NASequence NASequence::fromString(const String& s)
{
NASequence nas;
parseString_(s, nas);
return nas;
}
string NASequence::toString() const
{
string s;
if (five_prime_)
{
const String& code = five_prime_->getCode();
if (code == "5'-p")
{
s = "p";
}
else if (code == "5'-p*")
{
s = "*";
}
else
{
s = "[" + code + "]";
}
}
for (const auto& r : seq_)
{
const String& code = r->getCode();
if (code.size() == 1)
{
s += code;
}
else
{
s += "[" + code + "]"; // add brackets around non-standard ribos
}
}
if (three_prime_)
{
const String& code = three_prime_->getCode();
if (code == "3'-p")
{
s += "p";
}
else if (code == "3'-c")
{
s += "c";
}
else
{
s += "[" + code + "]";
}
}
return s;
}
void NASequence::clear()
{
seq_.clear();
three_prime_ = nullptr;
five_prime_ = nullptr;
}
void NASequence::parseString_(const String& s, NASequence& nas)
{
nas.clear();
if (s.empty())
return;
static RibonucleotideDB* rdb = RibonucleotideDB::getInstance();
String::ConstIterator str_it = s.begin();
if (*str_it == 'p') // special case for 5' phosphate
{
nas.setFivePrimeMod(rdb->getRibonucleotide("5'-p"));
++str_it;
}
else if (*str_it == '*') // special case for 5' phosphorothioate
{
nas.setFivePrimeMod(rdb->getRibonucleotide("5'-p*"));
++str_it;
}
String::ConstIterator stop = s.end();
if ((s.size() > 1) && (s.back() == 'p')) // special case for 3' phosphate
{
nas.setThreePrimeMod(rdb->getRibonucleotide("3'-p"));
--stop;
}
else if ((s.size() > 1) && (s.back() == 'c')) // special case for 3' cyclo-phosphate
{
nas.setThreePrimeMod(rdb->getRibonucleotide("3'-c"));
--stop;
}
for (; str_it != stop; ++str_it)
{
// skip spaces
if (*str_it == ' ')
continue;
// default case: add unmodified, standard ribonucleotide
if (*str_it != '[')
{
try
{
ConstRibonucleotidePtr r = rdb->getRibonucleotide(string(1, *str_it));
nas.seq_.push_back(r);
}
catch (Exception::ElementNotFound&)
{
String msg = "Cannot convert string to nucleic acid sequence: invalid character '" + String(*str_it) + "'";
throw Exception::ParseError(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, s, msg);
}
}
else // if (*str_it == '[') // non-standard ribonucleotide
{
// parse modified ribonucleotide and add it to the sequence:
str_it = parseMod_(str_it, s, nas);
}
}
}
String::ConstIterator NASequence::parseMod_(const String::ConstIterator str_it, const String& str, NASequence& nas)
{
static RibonucleotideDB* rdb = RibonucleotideDB::getInstance();
OPENMS_PRECONDITION(*str_it == '[', "Modification must start with '['.");
String::ConstIterator mod_start(str_it);
String::ConstIterator mod_end(++mod_start);
while ((mod_end != str.end()) && (*mod_end != ']'))
{
++mod_end; // advance to closing bracket
}
string mod(mod_start, mod_end);
if (mod_end == str.end())
{
throw Exception::ParseError(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, str, "Cannot convert string to modified ribonucleotide: missing ']'");
}
ConstRibonucleotidePtr r = rdb->getRibonucleotide(mod);
// @TODO: check if position is actually 5'/3' and there's no mod already
if (r->getTermSpecificity() == Ribonucleotide::FIVE_PRIME)
{
nas.setFivePrimeMod(r);
}
else if (r->getTermSpecificity() == Ribonucleotide::THREE_PRIME)
{
nas.setThreePrimeMod(r);
}
else
{
nas.seq_.push_back(r);
}
return mod_end;
}
OPENMS_DLLAPI ostream& operator<<(ostream& os, const NASequence& seq)
{
return (os << seq.toString());
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/Element.cpp | .cpp | 3,926 | 166 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: $
// --------------------------------------------------------------------------
//
#include <OpenMS/KERNEL/Peak1D.h>
#include <OpenMS/CHEMISTRY/Element.h>
#include <ostream>
#include <algorithm>
#include <cassert>
using namespace std;
namespace OpenMS
{
Element::Element() :
name_(OPENMS_CHEMISTRY_ELEMENT_NAME_DEFAULT),
symbol_(OPENMS_CHEMISTRY_ELEMENT_SYMBOL_DEFAULT),
atomic_number_(OPENMS_CHEMISTRY_ELEMENT_ATOMICNUMBER_DEFAULT),
average_weight_(OPENMS_CHEMISTRY_ELEMENT_WEIGHT_DEFAULT),
mono_weight_(OPENMS_CHEMISTRY_ELEMENT_WEIGHT_DEFAULT)
{
}
Element::Element(const Element & e) = default;
Element::Element(const string & name,
const string & symbol,
unsigned int atomic_number,
double average_weight,
double mono_weight,
const IsotopeDistribution & isotopes) :
name_(name),
symbol_(symbol),
atomic_number_(atomic_number),
average_weight_(average_weight),
mono_weight_(mono_weight)
{
this->setIsotopeDistribution(isotopes);
}
Element::~Element() = default;
void Element::setAtomicNumber(unsigned int atomic_number)
{
atomic_number_ = atomic_number;
}
unsigned int Element::getAtomicNumber() const
{
return atomic_number_;
}
void Element::setAverageWeight(double weight)
{
average_weight_ = weight;
}
double Element::getAverageWeight() const
{
return average_weight_;
}
void Element::setMonoWeight(double weight)
{
mono_weight_ = weight;
}
double Element::getMonoWeight() const
{
return mono_weight_;
}
void Element::setIsotopeDistribution(const IsotopeDistribution & distribution)
{
//force sortedness by mz. A lot of code relies on this.
assert(std::is_sorted(distribution.begin(), distribution.end(), Peak1D::MZLess()));
isotopes_ = distribution;
}
const IsotopeDistribution & Element::getIsotopeDistribution() const
{
return isotopes_;
}
void Element::setName(const string & name)
{
name_ = name;
}
const string & Element::getName() const
{
return name_;
}
void Element::setSymbol(const string & symbol)
{
symbol_ = symbol;
}
const string & Element::getSymbol() const
{
return symbol_;
}
Element & Element::operator=(const Element & element) = default;
bool Element::operator==(const Element & element) const
{
return name_ == element.name_ &&
symbol_ == element.symbol_ &&
atomic_number_ == element.atomic_number_ &&
average_weight_ == element.average_weight_ &&
mono_weight_ == element.mono_weight_ &&
isotopes_ == element.isotopes_;
}
bool Element::operator<(const Element & rhs) const
{
return std::tie(
atomic_number_,
mono_weight_,
symbol_,
name_,
average_weight_,
isotopes_)
<
std::tie(
rhs.atomic_number_,
rhs.mono_weight_,
rhs.symbol_,
rhs.name_,
rhs.average_weight_,
rhs.isotopes_);
}
bool Element::operator!=(const Element & element) const
{
return !(*this == element);
}
std::ostream & operator<<(std::ostream & os, const Element & element)
{
os << element.name_ << " "
<< element.symbol_ << " "
<< element.atomic_number_ << " "
<< element.average_weight_ << " "
<< element.mono_weight_;
for (const auto& isotope : element.isotopes_)
{
if (isotope.getIntensity() > 0.0f)
{
os << " " << isotope.getPosition() << "=" << isotope.getIntensity() * 100 << "%";
}
}
return os;
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/ModifiedPeptideGenerator.cpp | .cpp | 13,460 | 364 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Timo Sachsenberg $
// --------------------------------------------------------------------------
#include <OpenMS/CHEMISTRY/AASequence.h>
#include <OpenMS/CHEMISTRY/ResidueModification.h>
#include <OpenMS/CHEMISTRY/ModifiedPeptideGenerator.h>
#include <OpenMS/CHEMISTRY/ResidueDB.h>
#include <OpenMS/CHEMISTRY/ModificationsDB.h>
#include <boost/math/special_functions/binomial.hpp>
using std::vector;
using std::pair;
using std::map;
namespace OpenMS
{
constexpr int ModifiedPeptideGenerator::N_TERM_MODIFICATION_INDEX = -1; // magic constant to distinguish N_TERM only modifications from ANYWHERE modifications placed at N-term residue
constexpr int ModifiedPeptideGenerator::C_TERM_MODIFICATION_INDEX = -2; // magic constant to distinguish C_TERM only modifications from ANYWHERE modifications placed at C-term residue
// static
ModifiedPeptideGenerator::MapToResidueType ModifiedPeptideGenerator::getModifications(const StringList& modNames)
{
vector<const ResidueModification*> modifications;
for (const String& modification : modNames)
{
const ResidueModification* rm = ModificationsDB::getInstance()->getModification(modification);
modifications.push_back(rm);
}
std::sort(modifications.begin(), modifications.end());
return createResidueModificationToResidueMap_(modifications);
}
// static
ModifiedPeptideGenerator::MapToResidueType ModifiedPeptideGenerator::createResidueModificationToResidueMap_(const vector<const ResidueModification*>& mods)
{
// create a lookup structure from ResidueModification (e.g., "Oxidation (M)" to the modified Residue* in ResidueDB"
ModifiedPeptideGenerator::MapToResidueType m;
for (auto const & r : mods)
{
String name = r->getFullId();
bool is_terminal = r->getTermSpecificity() == ResidueModification::N_TERM || r->getTermSpecificity() == ResidueModification::C_TERM || r->getTermSpecificity() == ResidueModification::PROTEIN_N_TERM || r->getTermSpecificity() == ResidueModification::PROTEIN_C_TERM;
if (!is_terminal)
{
auto residue = ResidueDB::getInstance()->getResidue(r->getOrigin());
m.val[r] = ResidueDB::getInstance()->getModifiedResidue(residue, name);
}
else // terminal modification
{
if (r->getOrigin() == 'X')
{ // no residue associated with strictly terminal modification
m.val[r] = nullptr;
}
else
{ // specific residue associated with strictly terminal modification
auto residue = ResidueDB::getInstance()->getResidue(r->getOrigin());
m.val[r] = ResidueDB::getInstance()->getModifiedResidue(residue, name);
}
}
}
return m;
}
// static
void ModifiedPeptideGenerator::applyFixedModifications(
const MapToResidueType& fixed_mods,
AASequence& peptide)
{
// set terminal modifications for modifications without amino acid preference
for (auto const& mr : fixed_mods.val)
{
const ResidueModification* f = mr.first;
if (f->getTermSpecificity() == ResidueModification::N_TERM)
{
if (!peptide.hasNTerminalModification())
{
peptide.setNTerminalModification(f);
}
}
else if (f->getTermSpecificity() == ResidueModification::C_TERM)
{
if (!peptide.hasCTerminalModification())
{
peptide.setCTerminalModification(f);
}
}
}
//iterate over each residue
for (auto residue_it = peptide.begin(); residue_it != peptide.end(); ++residue_it)
{
// skip already modified residue
if (residue_it->isModified())
{
continue;
}
Size residue_index = residue_it - peptide.begin();
//set fixed modifications
for (auto const& mr : fixed_mods.val)
{
const ResidueModification* f = mr.first;
// check if amino acid match between modification and current residue
if (residue_it->getOneLetterCode()[0] != f->getOrigin()) { continue; }
// Term specificity is ANYWHERE on the peptide, C_TERM or N_TERM (currently no explicit support in OpenMS for protein C-term and protein N-term)
const ResidueModification::TermSpecificity& term_spec = f->getTermSpecificity();
if (term_spec == ResidueModification::ANYWHERE)
{
const Residue* r = mr.second; // map modification to the modified residue
peptide.setModification(residue_index, r);
}
else if (term_spec == ResidueModification::C_TERM && residue_index == (peptide.size() - 1))
{
peptide.setCTerminalModification(f);
}
else if (term_spec == ResidueModification::N_TERM && residue_index == 0)
{
peptide.setNTerminalModification(f);
}
}
}
}
// static
void ModifiedPeptideGenerator::applyVariableModifications(
const MapToResidueType& var_mods,
const AASequence& peptide,
Size max_variable_mods_per_peptide,
vector<AASequence>& all_modified_peptides,
bool keep_unmodified)
{
// no variable modifications specified or no variable mods allowed? no compatibility map needs to be build
if (var_mods.val.empty() || max_variable_mods_per_peptide == 0)
{
// if unmodified peptides should be kept return the original list of digested peptides
if (keep_unmodified) { all_modified_peptides.push_back(peptide); }
return;
}
// if there is at most one variable modification allowed for a peptide we don't need combinatoric placement and can reside to a faster implementation
if (max_variable_mods_per_peptide == 1)
{
applyAtMostOneVariableModification_(
var_mods,
peptide,
all_modified_peptides,
keep_unmodified);
return;
}
// iterate over each residue and build compatibility mapping describing
// which amino acid (peptide index) is compatible with which modification
map<int, vector<const ResidueModification*> > mod_compatibility;
// set terminal modifications for modifications without amino acid preference
for (auto const& mr : var_mods.val)
{
const ResidueModification* v = mr.first;
if (v->getTermSpecificity() == ResidueModification::N_TERM)
{
if (!peptide.hasNTerminalModification())
{
mod_compatibility[N_TERM_MODIFICATION_INDEX].push_back(v);
}
}
else if (v->getTermSpecificity() == ResidueModification::C_TERM)
{
if (!peptide.hasCTerminalModification())
{
mod_compatibility[C_TERM_MODIFICATION_INDEX].push_back(v);
}
}
}
for (auto residue_it = peptide.begin(); residue_it != peptide.end(); ++residue_it)
{
// skip already modified residues
if (residue_it->isModified())
{
continue;
}
Size residue_index = residue_it - peptide.begin();
//determine compatibility of variable modifications
for (auto const& mr : var_mods.val)
{
const ResidueModification* v = mr.first;
// check if amino acid match between modification and current residue
if (residue_it->getOneLetterCode()[0] != v->getOrigin())
{
continue;
}
// Term specificity is ANYWHERE on the peptide, C_TERM or N_TERM
// (currently no explicit support in OpenMS for protein C-term and
// protein N-term)
// TODO This is not true anymore!
const ResidueModification::TermSpecificity& term_spec = v->getTermSpecificity();
if (term_spec == ResidueModification::ANYWHERE)
{
mod_compatibility[residue_index].push_back(v);
}
// TODO think about if it really is the same case as the one above.
else if (term_spec == ResidueModification::C_TERM && residue_index == (peptide.size() - 1))
{
mod_compatibility[C_TERM_MODIFICATION_INDEX].push_back(v);
}
else if (term_spec == ResidueModification::N_TERM && residue_index == 0)
{
mod_compatibility[N_TERM_MODIFICATION_INDEX].push_back(v);
}
}
}
Size max_placements = std::min(max_variable_mods_per_peptide, mod_compatibility.size());
// stores all variants with how many modifications they already have
vector<pair<size_t, vector<AASequence>>> mod_peps_w_depth = {{0, {peptide}}};
Size num_res = 0;
for (Size s(0); s <= max_placements; ++s)
{
num_res += boost::math::binomial_coefficient<double>(mod_compatibility.size(), s);
}
mod_peps_w_depth.reserve(num_res);
auto rit = mod_compatibility.rbegin();
for (; rit != mod_compatibility.rend(); ++rit)
{
const auto& idx = rit->first;
const auto& mods = rit->second;
// copy the complete sequences from last iteration
auto tmp = mod_peps_w_depth;
for (auto& [old_depth, old_variants] : tmp)
{
// extends mod_peps_w_depth by adding variants with the next mod, if max_placements is not reached
if (old_depth < max_placements)
{
applyAllModsAtIdxAndExtend_(old_variants, idx, mods, var_mods);
mod_peps_w_depth.emplace_back(old_depth + 1, std::move(old_variants));
}
}
}
// move sequences from mod_peps_w_depth into result. Skip the initial peptide if desired.
for (auto& [depth, seqs] : mod_peps_w_depth)
{
if (depth != 0 || keep_unmodified)
{
all_modified_peptides.insert(
all_modified_peptides.end(),
make_move_iterator(seqs.begin()),
make_move_iterator(seqs.end()));
}
}
}
// static
void ModifiedPeptideGenerator::applyAtMostOneVariableModification_(
const MapToResidueType& var_mods,
const AASequence& peptide,
vector<AASequence>& all_modified_peptides,
bool keep_unmodified)
{
if (keep_unmodified)
{
all_modified_peptides.push_back(peptide);
}
// we want the same behavior as for the slower function... we would need a reverse iterator here that AASequence doesn't provide
for (auto residue_it = peptide.end() - 1; residue_it != peptide.begin() - 1; --residue_it)
{
// skip already modified residues
if (residue_it->isModified())
{
continue;
}
Size residue_index = residue_it - peptide.begin();
// determine compatibility of variable modifications
for (auto const & mr : var_mods.val)
{
const ResidueModification* v = mr.first;
const char r = residue_it->getOneLetterCode()[0];
// check if amino acid match between modification and current residue
if (r != v->getOrigin()) { continue; }
// Term specificity is ANYWHERE on the peptide, C_TERM or N_TERM (currently no explicit support in OpenMS for protein C-term and protein N-term)
const ResidueModification::TermSpecificity& term_spec = v->getTermSpecificity();
bool is_compatible(false);
if (term_spec == ResidueModification::ANYWHERE)
{
is_compatible = true;
}
else if (term_spec == ResidueModification::C_TERM && residue_index == (peptide.size() - 1))
{
is_compatible = true;
}
else if (term_spec == ResidueModification::N_TERM && residue_index == 0)
{
is_compatible = true;
}
// residue modification an be placed at current position? Then generate modified peptide.
if (is_compatible)
{
AASequence new_peptide = peptide;
new_peptide.setModification(residue_index, mr.second); // set modified Residue
all_modified_peptides.push_back(std::move(new_peptide));
}
}
}
}
void ModifiedPeptideGenerator::applyAllModsAtIdxAndExtend_(vector<AASequence>& original_sequences, int idx_to_modify, const vector<const ResidueModification*>& mods, const ModifiedPeptideGenerator::MapToResidueType& var_mods)
{
Size end = original_sequences.size();
original_sequences.reserve(end * mods.size());
for (Size s(1); s < mods.size(); ++s)
{
original_sequences.insert(original_sequences.end(),original_sequences.begin(), original_sequences.begin()+end);
}
for (Size cnt(0); cnt < mods.size(); ++cnt) // apply first mod later
{
for (Size i(0); i < end; i++)
{
applyModToPep_(original_sequences[cnt * end + i], idx_to_modify, mods[cnt], var_mods);
}
}
}
void ModifiedPeptideGenerator::applyModToPep_(AASequence& current_peptide, int current_index, const ResidueModification* m, const ModifiedPeptideGenerator::MapToResidueType& var_mods)
{
if (current_index == C_TERM_MODIFICATION_INDEX)
{
current_peptide.setCTerminalModification(m);
}
else if (current_index == N_TERM_MODIFICATION_INDEX)
{
current_peptide.setNTerminalModification(m);
}
else
{
const Residue* r = var_mods.val.at(m); // map modification to the modified residue
current_peptide.setModification(current_index, r); // set modified Residue
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/Residue.cpp | .cpp | 18,062 | 648 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Andreas Bertsch, Jang Jang Jin$
// --------------------------------------------------------------------------
#include <OpenMS/CHEMISTRY/Residue.h>
#include <OpenMS/CHEMISTRY/ResidueModification.h>
#include <OpenMS/CHEMISTRY/ModificationsDB.h>
#include <OpenMS/CONCEPT/Macros.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <iostream>
using namespace std;
namespace OpenMS
{
Residue::Residue() = default;
Residue::Residue(const String& name,
const String& three_letter_code,
const String& one_letter_code,
const EmpiricalFormula& formula,
double pka,
double pkb,
double pkc,
double gb_sc,
double gb_bb_l,
double gb_bb_r,
const set<String>& synonyms):
name_(name),
synonyms_(synonyms),
three_letter_code_(three_letter_code),
one_letter_code_(one_letter_code),
formula_(formula),
average_weight_(formula.getAverageWeight()),
mono_weight_(formula.getMonoWeight()),
pka_(pka),
pkb_(pkb),
pkc_(pkc),
gb_sc_(gb_sc),
gb_bb_l_(gb_bb_l),
gb_bb_r_(gb_bb_r)
{
if (!formula_.isEmpty())
{
internal_formula_ = formula_ - getInternalToFull();
}
}
Residue::~Residue() = default;
void Residue::setName(const String& name)
{
name_ = name;
}
const String& Residue::getName() const
{
return name_;
}
String Residue::getResidueTypeName(const Residue::ResidueType res_type)
{
return names_of_residuetype[res_type];
}
void Residue::setSynonyms(const set<String>& synonyms)
{
synonyms_ = synonyms;
}
void Residue::addSynonym(const String& synonym)
{
synonyms_.insert(synonym);
}
const set<String>& Residue::getSynonyms() const
{
return synonyms_;
}
void Residue::setThreeLetterCode(const String& three_letter_code)
{
OPENMS_PRECONDITION(three_letter_code.empty() || three_letter_code.size() == 3, "Three letter code needs to be a String of size 3")
three_letter_code_ = three_letter_code;
}
const String& Residue::getThreeLetterCode() const
{
OPENMS_POSTCONDITION(three_letter_code_.empty() || three_letter_code_.size() == 3, "Three letter code needs to be a String of size 3")
return three_letter_code_;
}
void Residue::setOneLetterCode(const String& one_letter_code)
{
OPENMS_PRECONDITION(one_letter_code.empty() || one_letter_code.size() == 1, "One letter code needs to be a String of size 1")
one_letter_code_ = one_letter_code;
}
const String& Residue::getOneLetterCode() const
{
OPENMS_POSTCONDITION(one_letter_code_.empty() || one_letter_code_.size() == 1, "One letter code needs to be a String of size 1")
return one_letter_code_;
}
double Residue::getPka() const
{
return pka_;
}
double Residue::getPkb() const
{
return pkb_;
}
double Residue::getPkc() const
{
return pkc_;
}
double Residue::getPiValue() const
{
double pi = 0;
double temp1 = getPka();
double temp2 = getPkb();
double temp3 = getPkc();
if (temp3 >= 0 && temp3 < temp1)
{
pi = (temp3 + temp2) / 2;
}
else if (temp3 >= temp2)
{
pi = (temp1 + temp3) / 2;
}
else
{
pi = (temp1 + temp2) / 2;
}
return pi;
}
void Residue::setPka(double value)
{
pka_ = value;
}
void Residue::setPkb(double value)
{
pkb_ = value;
}
void Residue::setPkc(double value)
{
pkc_ = value;
}
void Residue::setLossFormulas(const vector<EmpiricalFormula>& loss_formulas)
{
loss_formulas_ = loss_formulas;
}
void Residue::addLossFormula(const EmpiricalFormula& loss_formula)
{
loss_formulas_.push_back(loss_formula);
}
const vector<EmpiricalFormula>& Residue::getLossFormulas() const
{
return loss_formulas_;
}
void Residue::addLossName(const String& name)
{
loss_names_.push_back(name);
}
void Residue::setLossNames(const vector<String>& names)
{
loss_names_ = names;
}
const vector<String>& Residue::getLossNames() const
{
return loss_names_;
}
void Residue::setNTermLossFormulas(const vector<EmpiricalFormula>& NTerm_loss_formulas)
{
NTerm_loss_formulas_ = NTerm_loss_formulas;
}
void Residue::addNTermLossFormula(const EmpiricalFormula& NTerm_loss_formula)
{
NTerm_loss_formulas_.push_back(NTerm_loss_formula);
}
const vector<EmpiricalFormula> & Residue::getNTermLossFormulas() const
{
return NTerm_loss_formulas_;
}
void Residue::addNTermLossName(const String& name)
{
NTerm_loss_names_.push_back(name);
}
void Residue::setNTermLossNames(const vector<String>& names)
{
NTerm_loss_names_ = names;
}
const vector<String>& Residue::getNTermLossNames() const
{
return NTerm_loss_names_;
}
void Residue::setFormula(const EmpiricalFormula& formula)
{
formula_ = formula;
internal_formula_ = formula_ - getInternalToFull();
average_weight_ = formula_.getAverageWeight();
mono_weight_ = formula_.getMonoWeight();
}
EmpiricalFormula Residue::getFormula(ResidueType res_type) const
{
switch (res_type)
{
case Full:
return formula_;
case Internal:
return internal_formula_;
case NTerminal:
return internal_formula_ + getInternalToNTerm();
case CTerminal:
return internal_formula_ + getInternalToCTerm();
case BIon:
return internal_formula_ + getInternalToBIon();
case AIon:
return internal_formula_ + getInternalToAIon();
case CIon:
return internal_formula_ + getInternalToCIon();
case XIon:
return internal_formula_ + getInternalToXIon();
case YIon:
return internal_formula_ + getInternalToYIon();
case ZIon:
return internal_formula_ + getInternalToZIon();
case Zp1Ion:
return internal_formula_ + getInternalToZp1Ion();
case Zp2Ion:
return internal_formula_ + getInternalToZp2Ion();
default:
cerr << "Residue::getFormula: unknown ResidueType" << endl;
return formula_;
}
}
void Residue::setAverageWeight(double weight)
{
average_weight_ = weight;
return;
}
double Residue::getAverageWeight(ResidueType res_type) const
{
switch (res_type)
{
case Full:
return average_weight_;
case Internal:
return average_weight_ - getInternalToFull().getAverageWeight();
case NTerminal:
return average_weight_ + (getInternalToNTerm() - getInternalToFull()).getAverageWeight();
case CTerminal:
return average_weight_ + (getInternalToCTerm() - getInternalToFull()).getAverageWeight();
case BIon:
return average_weight_ + (getInternalToBIon() - getInternalToFull()).getAverageWeight();
case AIon:
return average_weight_ + (getInternalToAIon() - getInternalToFull()).getAverageWeight();
case CIon:
return average_weight_ + (getInternalToCIon() - getInternalToFull()).getAverageWeight();
case XIon:
return average_weight_ + (getInternalToXIon() - getInternalToFull()).getAverageWeight();
case YIon:
return average_weight_ + (getInternalToYIon() - getInternalToFull()).getAverageWeight();
case ZIon:
return average_weight_ + (getInternalToZIon() - getInternalToFull()).getAverageWeight();
default:
cerr << "Residue::getAverageWeight: unknown ResidueType" << endl;
return average_weight_;
}
}
void Residue::setMonoWeight(double weight)
{
mono_weight_ = weight;
return;
}
double Residue::getMonoWeight(ResidueType res_type) const
{
switch (res_type)
{
case Full:
return mono_weight_;
case Internal:
return mono_weight_ - internal_to_full_monoweight_;
case NTerminal:
return mono_weight_ - internal_to_full_monoweight_ + internal_to_nterm_monoweight_;
case CTerminal:
return mono_weight_ - internal_to_full_monoweight_ + internal_to_cterm_monoweight_;
case BIon:
return mono_weight_ - internal_to_full_monoweight_ + internal_to_b_monoweight_;
case AIon:
return mono_weight_ - internal_to_full_monoweight_ + internal_to_a_monoweight_;
case CIon:
return mono_weight_ - internal_to_full_monoweight_ + internal_to_c_monoweight_;
case XIon:
return mono_weight_ - internal_to_full_monoweight_ + internal_to_x_monoweight_;
case YIon:
return mono_weight_ - internal_to_full_monoweight_ + internal_to_y_monoweight_;
case ZIon:
return mono_weight_ - internal_to_full_monoweight_ + internal_to_z_monoweight_;
case Zp1Ion:
return mono_weight_ - internal_to_full_monoweight_ + internal_to_zp1_monoweight_;
case Zp2Ion:
return mono_weight_ - internal_to_full_monoweight_ + internal_to_zp2_monoweight_;
default:
cerr << "Residue::getMonoWeight: unknown ResidueType" << endl;
return mono_weight_;
}
}
void Residue::setModification(const ResidueModification* mod)
{
modification_ = mod;
// update all the members
if (mod->getAverageMass() != 0)
{
average_weight_ = mod->getAverageMass();
}
if (mod->getMonoMass() != 0)
{
mono_weight_ = mod->getMonoMass();
}
// update mono_weight_ by DiffMonoMass, if MonoMass is not known, but DiffMonoMass is
// as in the case of XLMOD.obo modifications
if ( (mod->getMonoMass() == 0) && (mod->getDiffMonoMass() != 0) )
{
mono_weight_ += mod->getDiffMonoMass();
}
if (!mod->getDiffFormula().isEmpty())
{
setFormula(getFormula() + mod->getDiffFormula());
}
else if (!mod->getFormula().empty())
{
String formula = mod->getFormula();
formula.removeWhitespaces();
setFormula(EmpiricalFormula(formula));
}
// neutral losses
loss_formulas_.clear();
loss_names_.clear();
if (mod->hasNeutralLoss())
{
loss_formulas_.insert(loss_formulas_.end(), mod->getNeutralLossDiffFormulas().begin(), mod->getNeutralLossDiffFormulas().end());
loss_names_.insert(loss_names_.end(), loss_names_.begin(), loss_names_.end());
}
}
const ResidueModification* Residue::getModification() const
{
return modification_;
}
void Residue::setModification(const String& name)
{
ModificationsDB* mod_db = ModificationsDB::getInstance();
const ResidueModification* mod = mod_db->getModification(name, one_letter_code_, ResidueModification::ANYWHERE);
setModification(mod);
}
void Residue::setModification(const ResidueModification& mod)
{
ModificationsDB* mod_db = ModificationsDB::getInstance();
//TODO think again. Most functions here or in ModificationsDB only check for fullID
const ResidueModification* modindb = mod_db->searchModification(mod);
if (modindb == nullptr)
{
modindb = mod_db->addNewModification_(mod);
}
setModification(modindb);
}
void Residue::setModificationByDiffMonoMass(double diffMonoMass)
{
ModificationsDB* mod_db = ModificationsDB::getInstance();
bool multimatch = false;
// quickly check for user-defined modification added by createUnknownFromMassString (e.g. M[+12321])
String diffMonoMassStr = ResidueModification::getDiffMonoMassWithBracket(diffMonoMass);
const ResidueModification* mod = mod_db->searchModificationsFast(one_letter_code_ + diffMonoMassStr, multimatch);
const double tol = 0.002;
if (mod == nullptr)
{
mod = mod_db->getBestModificationByDiffMonoMass(diffMonoMass, tol, one_letter_code_, ResidueModification::ANYWHERE);
}
if (mod == nullptr)
{
OPENMS_LOG_WARN << "Modification with monoisotopic mass diff. of " << diffMonoMassStr << " not found in databases with tolerance " << tol << ". Adding unknown modification." << std::endl;
mod = ResidueModification::createUnknownFromMassString(String(diffMonoMass),
diffMonoMass,
true,
ResidueModification::ANYWHERE,
this);
}
setModification(mod);
}
const String& Residue::getModificationName() const
{
if (!isModified()) return String::EMPTY;
return modification_->getId();
}
void Residue::setLowMassIons(const vector<EmpiricalFormula>& low_mass_ions)
{
low_mass_ions_ = low_mass_ions;
}
const vector<EmpiricalFormula>& Residue::getLowMassIons() const
{
return low_mass_ions_;
}
double Residue::getBackboneBasicityRight() const
{
return gb_bb_r_;
}
void Residue::setBackboneBasicityRight(double gb_bb_r)
{
gb_bb_r_ = gb_bb_r;
}
double Residue::getBackboneBasicityLeft() const
{
return gb_bb_l_;
}
void Residue::setBackboneBasicityLeft(double gb_bb_l)
{
gb_bb_l_ = gb_bb_l;
}
double Residue::getSideChainBasicity() const
{
return gb_sc_;
}
void Residue::setSideChainBasicity(double gb_sc)
{
gb_sc_ = gb_sc;
}
void Residue::setResidueSets(const set<String>& residue_sets)
{
residue_sets_ = residue_sets;
}
const set<String> & Residue::getResidueSets() const
{
return residue_sets_;
}
void Residue::addResidueSet(const String& residue_set)
{
residue_sets_.insert(residue_set);
}
bool Residue::isModified() const
{
return modification_ != nullptr;
}
bool Residue::hasNeutralLoss() const
{
return !loss_formulas_.empty();
}
bool Residue::hasNTermNeutralLosses() const
{
return !NTerm_loss_formulas_.empty();
}
bool Residue::operator==(const Residue& residue) const
{
// usually, its the same address (from ResidueDB)
if (this == &residue) return true;
// otherwise compare members
return name_ == residue.name_ &&
synonyms_ == residue.synonyms_ &&
three_letter_code_ == residue.three_letter_code_ &&
one_letter_code_ == residue.one_letter_code_ &&
formula_ == residue.formula_ &&
average_weight_ == residue.average_weight_ &&
mono_weight_ == residue.mono_weight_ &&
modification_ == residue.modification_ &&
loss_names_ == residue.loss_names_ &&
loss_formulas_ == residue.loss_formulas_ &&
NTerm_loss_names_ == residue.NTerm_loss_names_ &&
NTerm_loss_formulas_ == residue.NTerm_loss_formulas_ &&
low_mass_ions_ == residue.low_mass_ions_ &&
pka_ == residue.pka_ &&
pkb_ == residue.pkb_ &&
pkc_ == residue.pkc_ &&
gb_sc_ == residue.gb_sc_ &&
gb_bb_l_ == residue.gb_bb_l_ &&
gb_bb_r_ == residue.gb_bb_r_ &&
residue_sets_ == residue.residue_sets_;
}
bool Residue::operator==(char one_letter_code) const
{
return one_letter_code_[0] == one_letter_code;
}
bool Residue::operator!=(char one_letter_code) const
{
return one_letter_code_[0] != one_letter_code;
}
bool Residue::operator!=(const Residue& residue) const
{
return !(*this == residue);
}
bool Residue::isInResidueSet(const String& residue_set)
{
return residue_sets_.find(residue_set) != residue_sets_.end();
}
std::string Residue::residueTypeToIonLetter(const Residue::ResidueType& res_type)
{
switch (res_type)
{
case Residue::AIon: return "a";
case Residue::BIon: return "b";
case Residue::CIon: return "c";
case Residue::XIon: return "x";
case Residue::YIon: return "y";
case Residue::ZIon: return "z";
case Residue::Zp1Ion: return "z.";
case Residue::Zp2Ion: return "z'";
default:
OPENMS_LOG_ERROR << "Unknown residue type encountered. Can't map to ion letter." << endl;
}
return "";
}
String Residue::toString() const
{
if (getOneLetterCode().empty())
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Residue does not have a OneLetterCode. This is a bug. Please report it!", "");
}
if (!isModified())
{
return one_letter_code_;
}
else
{ // this already contains the origin!
return modification_->toString();
}
}
ostream& operator<<(ostream& os, const Residue& residue)
{
os << residue.name_ << ' '
<< residue.three_letter_code_ << ' '
<< residue.one_letter_code_ << ' '
<< residue.formula_;
return os;
}
// static members
// TODO They could actually be constexpr but EmpiricalFormula of a string literal is not constexpr yet
// not sure if possible with current C++ standard
const double Residue::internal_to_full_monoweight_ = Residue::getInternalToFull().getMonoWeight();
const double Residue::internal_to_nterm_monoweight_ = Residue::getInternalToNTerm().getMonoWeight();
const double Residue::internal_to_cterm_monoweight_ = Residue::getInternalToCTerm().getMonoWeight();
const double Residue::internal_to_a_monoweight_ = Residue::getInternalToAIon().getMonoWeight();
const double Residue::internal_to_b_monoweight_ = Residue::getInternalToBIon().getMonoWeight();
const double Residue::internal_to_c_monoweight_ = Residue::getInternalToCIon().getMonoWeight();
const double Residue::internal_to_x_monoweight_ = Residue::getInternalToXIon().getMonoWeight();
const double Residue::internal_to_y_monoweight_ = Residue::getInternalToYIon().getMonoWeight();
const double Residue::internal_to_z_monoweight_ = Residue::getInternalToZIon().getMonoWeight();
const double Residue::internal_to_zp1_monoweight_ = Residue::getInternalToZp1Ion().getMonoWeight();
const double Residue::internal_to_zp2_monoweight_ = Residue::getInternalToZp2Ion().getMonoWeight();
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/TheoreticalSpectrumGenerator.cpp | .cpp | 48,296 | 1,295 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg, Eugen Netz $
// $Authors: Andreas Bertsch $
// --------------------------------------------------------------------------
#include <OpenMS/CHEMISTRY/TheoreticalSpectrumGenerator.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/CoarseIsotopePatternGenerator.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/FineIsotopePatternGenerator.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/CHEMISTRY/AASequence.h>
#include <OpenMS/CHEMISTRY/ResidueDB.h>
#include <OpenMS/KERNEL/MSSpectrum.h>
#include <OpenMS/CONCEPT/RAIICleanup.h>
using namespace std;
namespace OpenMS
{
TheoreticalSpectrumGenerator::TheoreticalSpectrumGenerator() :
DefaultParamHandler("TheoreticalSpectrumGenerator")
{
defaults_.setValue("isotope_model", "none", "Model to use for isotopic peaks ('none' means no isotopic peaks are added, 'coarse' adds isotopic peaks in unit mass distance, 'fine' uses the hyperfine isotopic generator to add accurate isotopic peaks. Note that adding isotopic peaks is very slow.");
defaults_.setValidStrings("isotope_model", {"none","coarse","fine"});
defaults_.setValue("max_isotope", 2, "Defines the maximal isotopic peak which is added if 'isotope_model' is 'coarse'");
defaults_.setValue("max_isotope_probability", 0.05, "Defines the maximal isotopic probability to cover if 'isotope_model' is 'fine'");
defaults_.setValue("add_metainfo", "false", "Adds the type of peaks as metainfo to the peaks, like y8+, [M-H2O+2H]++");
defaults_.setValidStrings("add_metainfo", {"true","false"});
defaults_.setValue("add_losses", "false", "Adds common losses to those ion expect to have them, only water and ammonia loss is considered");
defaults_.setValidStrings("add_losses", {"true","false"});
defaults_.setValue("add_term_losses", "false", "Adds common N- and C-term losses (only if add_losses=true and isotope_model=none), only water and ammonia loss is considered.");
defaults_.setValidStrings("add_term_losses", {"true","false"});
defaults_.setValue("add_internal_fragments", "false", "Add b and a type fragments from internal cleavage events.");
defaults_.setValidStrings("add_internal_fragments", {"true","false"});
defaults_.setValue("sort_by_position", "true", "Sort output by position");
defaults_.setValidStrings("sort_by_position", {"true","false"});
defaults_.setValue("add_precursor_peaks", "false", "Adds peaks of the unfragmented precursor ion to the spectrum");
defaults_.setValidStrings("add_precursor_peaks", {"true","false"});
defaults_.setValue("add_all_precursor_charges", "false", "Adds precursor peaks with all charges in the given range");
defaults_.setValidStrings("add_all_precursor_charges", {"true","false"});
defaults_.setValue("add_abundant_immonium_ions", "false", "Add most abundant immonium ions (for Proline, Cystein, Iso/Leucine, Histidin, Phenylalanin, Tyrosine, Tryptophan)");
defaults_.setValidStrings("add_abundant_immonium_ions", {"true","false"});
defaults_.setValue("add_first_prefix_ion", "false", "If set to true e.g. b1 ions are added");
defaults_.setValidStrings("add_first_prefix_ion", {"true","false"});
defaults_.setValue("add_y_ions", "true", "Add peaks of y-ions to the spectrum");
defaults_.setValidStrings("add_y_ions", {"true","false"});
defaults_.setValue("add_b_ions", "true", "Add peaks of b-ions to the spectrum");
defaults_.setValidStrings("add_b_ions", {"true","false"});
defaults_.setValue("add_a_ions", "false", "Add peaks of a-ions to the spectrum");
defaults_.setValidStrings("add_a_ions", {"true","false"});
defaults_.setValue("add_c_ions", "false", "Add peaks of c-ions to the spectrum");
defaults_.setValidStrings("add_c_ions", {"true","false"});
defaults_.setValue("add_x_ions", "false", "Add peaks of x-ions to the spectrum");
defaults_.setValidStrings("add_x_ions", {"true","false"});
defaults_.setValue("add_z_ions", "false", "Add peaks of z-ions to the spectrum (sometimes observed in CID and for some AAs in ExD due to H abstraction)");
defaults_.setValidStrings("add_z_ions", {"true","false"});
defaults_.setValue("add_zp1_ions", "false", "Add peaks of z+1-radical cations (also [z+H]*^{+} or simply z*) to the spectrum (often observed in ExD)");
defaults_.setValidStrings("add_zp1_ions", {"true","false"});
defaults_.setValue("add_zp2_ions", "false", "Add peaks of z+2-radical cations (also [z+2H]*^{2+} or simply z') to the spectrum (often observed in ExD esp. with higher precursor charges >3 and smaller z-ions.)");
defaults_.setValidStrings("add_zp2_ions", {"true","false"});
// intensity options of the ions
defaults_.setValue("y_intensity", 1.0, "Intensity of the y-ions");
defaults_.setMinFloat("y_intensity", 0.0);
defaults_.setValue("b_intensity", 1.0, "Intensity of the b-ions");
defaults_.setMinFloat("b_intensity", 0.0);
defaults_.setValue("a_intensity", 1.0, "Intensity of the a-ions");
defaults_.setMinFloat("a_intensity", 0.0);
defaults_.setValue("c_intensity", 1.0, "Intensity of the c-ions");
defaults_.setMinFloat("c_intensity", 0.0);
defaults_.setValue("x_intensity", 1.0, "Intensity of the x-ions");
defaults_.setMinFloat("x_intensity", 0.0);
defaults_.setValue("z_intensity", 1.0, "Intensity of the z-ions");
defaults_.setMinFloat("z_intensity", 0.0);
defaults_.setValue("relative_loss_intensity", 0.1, "Intensity of loss ions, in relation to the intact ion intensity");
defaults_.setMinFloat("relative_loss_intensity", 0.0);
defaults_.setMaxFloat("relative_loss_intensity", 1.0);
// precursor intensity
defaults_.setValue("precursor_intensity", 1.0, "Intensity of the precursor peak");
defaults_.setMinFloat("precursor_intensity", 0.0);
defaults_.setValue("precursor_H2O_intensity", 1.0, "Intensity of the H2O loss peak of the precursor");
defaults_.setMinFloat("precursor_H2O_intensity", 0.0);
defaults_.setValue("precursor_NH3_intensity", 1.0, "Intensity of the NH3 loss peak of the precursor");
defaults_.setMinFloat("precursor_NH3_intensity", 0.0);
defaultsToParam_();
}
TheoreticalSpectrumGenerator::TheoreticalSpectrumGenerator(const TheoreticalSpectrumGenerator& rhs) :
DefaultParamHandler(rhs)
{
}
TheoreticalSpectrumGenerator& TheoreticalSpectrumGenerator::operator=(const TheoreticalSpectrumGenerator& rhs)
{
DefaultParamHandler::operator=(rhs);
return *this;
}
TheoreticalSpectrumGenerator::~TheoreticalSpectrumGenerator() = default;
void TheoreticalSpectrumGenerator::getSpectrum(PeakSpectrum& spectrum, const AASequence& peptide, Int min_charge, Int max_charge, Int precursor_charge) const
{
if (peptide.empty())
{
return;
}
MSSpectrum::Chunks chunks(spectrum);
PeakSpectrum::StringDataArray* ion_names;
PeakSpectrum::IntegerDataArray* charges;
bool charges_dynamic = false;
bool ion_names_dynamic = false;
// Assure memory is freed even if an exception occurs.
RAIICleanup _(
[&]
{
if (charges_dynamic) delete charges;
if (ion_names_dynamic) delete ion_names;
}
);
if (spectrum.getIntegerDataArrays().empty())
{
charges = new PeakSpectrum::IntegerDataArray();
charges_dynamic = true;
}
else
{
charges = &(spectrum.getIntegerDataArrays()[0]);
}
if (spectrum.getStringDataArrays().empty())
{
ion_names = new PeakSpectrum::StringDataArray();
ion_names_dynamic = true;
}
else
{
ion_names = &(spectrum.getStringDataArrays()[0]);
}
ion_names->setName(Constants::UserParam::IonNames);
charges->setName("Charges");
for (Int z = min_charge; z <= max_charge; ++z)
{
if (add_b_ions_) addPeaks_(spectrum, peptide, *ion_names, *charges, chunks, Residue::BIon, z);
if (add_y_ions_) addPeaks_(spectrum, peptide, *ion_names, *charges, chunks, Residue::YIon, z);
if (add_a_ions_) addPeaks_(spectrum, peptide, *ion_names, *charges, chunks, Residue::AIon, z);
if (add_c_ions_) addPeaks_(spectrum, peptide, *ion_names, *charges, chunks, Residue::CIon, z);
if (add_x_ions_) addPeaks_(spectrum, peptide, *ion_names, *charges, chunks, Residue::XIon, z);
if (add_z_ions_) addPeaks_(spectrum, peptide, *ion_names, *charges, chunks, Residue::ZIon, z);
if (add_zp1_ions_) addPeaks_(spectrum, peptide, *ion_names, *charges, chunks, Residue::Zp1Ion, z);
if (add_zp2_ions_) addPeaks_(spectrum, peptide, *ion_names, *charges, chunks, Residue::Zp2Ion, z);
}
if (add_internal_fragments_)
{
for (Int z = min_charge; z <= max_charge; ++z)
{
addInternalFragmentPeaks_(spectrum, peptide, *ion_names, *charges, chunks, Residue::BIon, z);
addInternalFragmentPeaks_(spectrum, peptide, *ion_names, *charges, chunks, Residue::AIon, z);
}
}
if (add_precursor_peaks_)
{
if (add_all_precursor_charges_)
{
for (Int z = min_charge; z <= max_charge; ++z)
{
addPrecursorPeaks_(spectrum, peptide, *ion_names, *charges, z);
chunks.add(false);
}
}
else // add_all_precursor_charges_ = false, only add precursor with highest charge
{
addPrecursorPeaks_(spectrum, peptide, *ion_names, *charges, max_charge);
chunks.add(false);
}
}
if (add_abundant_immonium_ions_)
{
addAbundantImmoniumIons_(spectrum, peptide, *ion_names, *charges);
chunks.add(true); // this chunk is ordered, as the if-statements in addAbundantImmoniumIons_() are in ascending order (by MZ)
}
if (add_metainfo_)
{
if (spectrum.getIntegerDataArrays().empty())
{
spectrum.getIntegerDataArrays().push_back(std::move(*charges));
}
if (spectrum.getStringDataArrays().empty())
{
spectrum.getStringDataArrays().push_back(std::move(*ion_names));
}
}
if (sort_by_position_) spectrum.sortByPositionPresorted(chunks.getChunks());
// set MS Level
spectrum.setMSLevel(2);
// set spectrum type
spectrum.setType(MSSpectrum::SpectrumSettings::SpectrumType::CENTROID);
// set precursor
Precursor prec;
if (precursor_charge == 0)
{
precursor_charge = max_charge +1;
}
if (precursor_charge < max_charge)
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "'precursor_charge' has to be higher than or equal to 'max_charge'.");
}
prec.setCharge(precursor_charge);
prec.setMZ(peptide.getMZ(precursor_charge, Residue::Full));
spectrum.getPrecursors().push_back(prec);
}
MSSpectrum TheoreticalSpectrumGenerator::generateSpectrum(const Precursor::ActivationMethod& fm, const AASequence& seq, int precursor_charge)
{
if (precursor_charge == 0)
{
OPENMS_LOG_WARN << "Precursor charge can't be 0. Using 2 instead." << endl;
precursor_charge = 2;
}
// initialize a TheoreticalSpectrumGenerator
TheoreticalSpectrumGenerator theo_gen;
// get current parameters (default)
// default with b and y ions
Param theo_gen_settings = theo_gen.getParameters();
if (fm == Precursor::ActivationMethod::CID || fm == Precursor::ActivationMethod::HCID || fm == Precursor::ActivationMethod::HCD)
{
theo_gen_settings.setValue("add_b_ions", "true");
theo_gen_settings.setValue("add_y_ions", "true");
if (fm == Precursor::ActivationMethod::HCD || fm == Precursor::ActivationMethod::HCID)
{
theo_gen_settings.setValue("add_a_ions", "true");
}
}
else if (fm == Precursor::ActivationMethod::ECD || fm == Precursor::ActivationMethod::ETD)
{
theo_gen_settings.setValue("add_c_ions", "true");
theo_gen_settings.setValue("add_z_ions", "false");
theo_gen_settings.setValue("add_zp1_ions", "true");
theo_gen_settings.setValue("add_zp2_ions", "true");
theo_gen_settings.setValue("add_b_ions", "false");
theo_gen_settings.setValue("add_y_ions", "false");
}
else if (fm == Precursor::ActivationMethod::ETciD || fm == Precursor::ActivationMethod::EThcD)
{
theo_gen_settings.setValue("add_a_ions", "true");
theo_gen_settings.setValue("add_b_ions", "true");
theo_gen_settings.setValue("add_c_ions", "true");
theo_gen_settings.setValue("add_x_ions", "true");
theo_gen_settings.setValue("add_y_ions", "true");
theo_gen_settings.setValue("add_z_ions", "true");
theo_gen_settings.setValue("add_zp1_ions", "true");
theo_gen_settings.setValue("add_zp2_ions", "true");
}
else
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Fragmentation method is not supported.");
}
// set changed parameters
theo_gen.setParameters(theo_gen_settings);
// generate b/y or c/z-ion spectrum of peptide seq
PeakSpectrum theo_spectrum;
theo_gen.getSpectrum(theo_spectrum, seq, 1, precursor_charge <= 2 ? 1 : 2);
return theo_spectrum;
}
void TheoreticalSpectrumGenerator::addAbundantImmoniumIons_(PeakSpectrum& spectrum, const AASequence& peptide, DataArrays::StringDataArray& ion_names, DataArrays::IntegerDataArray& charges) const
{
// Proline immonium ion (C4H8N)
if (peptide.has(*ResidueDB::getInstance()->getResidue('P')))
{
if (add_metainfo_)
{
ion_names.emplace_back("iP+");
charges.push_back(1);
}
spectrum.emplace_back(70.0656, 1.0); // emplace_back(MZ, intensity)
}
// Cysteine (C2H6NS)
if (peptide.has(*ResidueDB::getInstance()->getResidue('C')))
{
if (add_metainfo_)
{
ion_names.emplace_back("iC+");
charges.push_back(1);
}
spectrum.emplace_back(76.0221, 1.0);
}
// Iso/Leucin immonium ion (same mass for immonium ion)
if (peptide.has(*ResidueDB::getInstance()->getResidue('L')))
{
if (add_metainfo_)
{
ion_names.emplace_back("iL/I+");
charges.push_back(1);
}
spectrum.emplace_back(86.09698, 1.0);
}
// Histidin immonium ion (C5H8N3)
if (peptide.has(*ResidueDB::getInstance()->getResidue('H')))
{
if (add_metainfo_)
{
ion_names.emplace_back("iH+");
charges.push_back(1);
}
spectrum.emplace_back(110.0718, 1.0);
}
// Phenylalanin immonium ion (C8H10N)
if (peptide.has(*ResidueDB::getInstance()->getResidue('F')))
{
if (add_metainfo_)
{
ion_names.emplace_back("iF+");
charges.push_back(1);
}
spectrum.emplace_back(120.0813, 1.0);
}
// Tyrosine immonium ion (C8H10NO)
if (peptide.has(*ResidueDB::getInstance()->getResidue('Y')))
{
if (add_metainfo_)
{
ion_names.emplace_back("iY+");
charges.push_back(1);
}
spectrum.emplace_back(136.0762, 1.0);
}
// Tryptophan immonium ion
if (peptide.has(*ResidueDB::getInstance()->getResidue('W')))
{
if (add_metainfo_)
{
ion_names.emplace_back("iW+");
charges.push_back(1);
}
spectrum.emplace_back(159.0922, 1.0);
}
}
void TheoreticalSpectrumGenerator::addIsotopeCluster_(PeakSpectrum& spectrum,
const AASequence& ion,
DataArrays::StringDataArray& ion_names,
DataArrays::IntegerDataArray& charges,
const Residue::ResidueType res_type,
Int charge,
double intensity) const
{
// manually compute correct sum formula (instead of using built-in assumption of hydrogen adduct)
EmpiricalFormula f = ion.getFormula(res_type, charge) + EmpiricalFormula("H") * charge;
f.setCharge(0);
IsotopeDistribution dist;
if (isotope_model_ == 1)
{
dist = f.getIsotopeDistribution(CoarseIsotopePatternGenerator(max_isotope_));
}
else if (isotope_model_ == 2)
{
dist = f.getIsotopeDistribution(FineIsotopePatternGenerator(max_isotope_probability_));
}
const String ion_name = String(Residue::residueTypeToIonLetter(res_type)) + String(ion.size());
for (const auto& it : dist)
{
if (add_metainfo_) // one entry per peak
{
// TODO find naming scheme for isotopes of fragments
ion_names.push_back(ion_name);
charges.push_back(charge);
}
spectrum.emplace_back(it.getMZ() / charge, intensity * it.getIntensity());
}
}
void TheoreticalSpectrumGenerator::addLossesFaster_(PeakSpectrum& spectrum,
double mz,
const std::set<EmpiricalFormula>& f_losses,
int ion_ordinal,
DataArrays::StringDataArray& ion_names,
DataArrays::IntegerDataArray& charges,
const std::map<EmpiricalFormula, String>& formula_str_cache,
double intensity,
const String& ion_type_string,
bool add_metainfo,
int charge) const
{
const String charge_str((Size)abs(charge), '+');
const String ion_ordinal_str = ion_ordinal < 0 ? "-" : String(ion_ordinal) + "-"; // only add ion number for non-negative values
// TODO why do you need a separate set for the losses? Just use the keys from the formula_str_cache?
for (const auto& formula : f_losses)
{
spectrum.emplace_back((mz - formula.getMonoWeight()) / (double)charge, intensity);
if (add_metainfo)
{
const auto it = formula_str_cache.find(formula);
const String& loss_name = it->second;
// note: important to construct a string from char. If omitted it will perform pointer arithmetics on the "-" string literal
ion_names.push_back(ion_type_string);
ion_names.back().reserve(2 + ion_ordinal_str.size() + loss_name.size() + charge_str.size());
((ion_names.back() += ion_ordinal_str) += loss_name) += charge_str;
charges.push_back(charge);
}
}
}
void TheoreticalSpectrumGenerator::addLosses_(PeakSpectrum& spectrum,
const AASequence& ion,
DataArrays::StringDataArray& ion_names,
DataArrays::IntegerDataArray& charges,
double intensity,
const Residue::ResidueType res_type,
int charge) const
{
const String charge_str((Size)abs(charge), '+');
const String ion_type_str(Residue::residueTypeToIonLetter(res_type));
const String ion_ordinal_str(String(ion.size()) + "-");
std::set<String> losses;
for (const auto& it : ion)
{
if (it.hasNeutralLoss())
{
for (const auto& formula : it.getLossFormulas())
{
losses.insert(formula.toString());
}
}
}
spectrum.reserve(spectrum.size() + losses.size());
String ion_name;
for (const auto& it : losses)
{
EmpiricalFormula loss_ion = ion.getFormula(res_type, charge) - EmpiricalFormula(it);
// see 74e2ce6761e4a273164b29b8be487
// thanks to Chris and Sandro
// check for negative element frequencies (might happen if losses are not allowed for specific ions)
bool negative_elements(false);
for (const auto& eit : loss_ion)
{
if (eit.second < 0)
{
negative_elements = true;
break;
}
}
if (negative_elements)
{
continue;
}
double loss_pos = loss_ion.getMonoWeight();
const String& loss_name = it;
ion_name = ion_type_str + ion_ordinal_str + loss_name + charge_str;
if (add_isotopes_)
{
// manually compute correct sum formula (instead of using built-in assumption of hydrogen adduct)
loss_ion += EmpiricalFormula("H") * charge;
loss_ion.setCharge(0);
IsotopeDistribution dist;
if (isotope_model_ == 1)
{
dist = loss_ion.getIsotopeDistribution(CoarseIsotopePatternGenerator(max_isotope_));
}
else if (isotope_model_ == 2)
{
dist = loss_ion.getIsotopeDistribution(FineIsotopePatternGenerator(max_isotope_probability_));
}
for (const auto& iso : dist)
{
if (add_metainfo_)
{
ion_names.push_back(ion_name);
charges.push_back(charge);
}
spectrum.emplace_back(iso.getMZ() / (double)charge, intensity * rel_loss_intensity_ * iso.getIntensity());
}
}
else
{
if (add_metainfo_)
{
ion_names.push_back(ion_name);
charges.push_back(charge);
}
spectrum.emplace_back(loss_pos / (double)charge, intensity * rel_loss_intensity_);
}
}
}
void TheoreticalSpectrumGenerator::addInternalFragmentPeaks_(PeakSpectrum& spectrum,
const AASequence& peptide,
DataArrays::StringDataArray& ion_names,
DataArrays::IntegerDataArray& charges,
MSSpectrum::Chunks& chunks,
const Residue::ResidueType res_type,
Int charge) const
{
static double stat_a = Residue::getInternalToAIon().getMonoWeight();
static double stat_b = Residue::getInternalToBIon().getMonoWeight();
int f = 1 + int(add_isotopes_) + int(add_losses_); // TODO: calculate number of internal fragments
spectrum.reserve(spectrum.size() + f * peptide.size());
// precompute formula_str_cache
std::map<EmpiricalFormula, String> formula_str_cache;
if (add_losses_)
{
for (auto& p : peptide)
{
for (auto& formula : p.getLossFormulas())
{
String& loss_name = formula_str_cache[formula];
if (loss_name.empty())
{
loss_name = formula.toString();
}
}
}
if (add_term_losses_)
{
{
auto formula = EmpiricalFormula("H2O");
String& loss_name = formula_str_cache[formula];
if (loss_name.empty())
{
loss_name = formula.toString();
}
}
{
auto formula = EmpiricalFormula("NH3");
String& loss_name = formula_str_cache[formula];
if (loss_name.empty())
{
loss_name = formula.toString();
}
}
}
}
for (Size l = 1; l < peptide.size() - 1 - 2; ++l) // start at a2/b2, stop at n-1 a/b ion with min length of 2
{
double intensity(1);
// support for b and a type internal ions, TODO: own intensity for these type of ions?
switch (res_type)
{
case Residue::AIon: intensity = a_intensity_; break;
case Residue::BIon: intensity = b_intensity_; break;
default: break;
}
double mono_weight(Constants::PROTON_MASS_U * charge);
std::set<EmpiricalFormula> fx_losses;
double initial_mono_weight(mono_weight);
String ion_name;
for (size_t i = l; i < peptide.size() - 1; ++i)
{
if (i-l >= 10) break; // unlikely to observe longer internal fragments
mono_weight += peptide[i].getMonoWeight(Residue::Internal); // standard internal residue including named modifications: c
ion_name += peptide[i].getOneLetterCode();
if (i==l) continue; // don't generate peak for single AA
double pos(mono_weight);
double ion_offset = 0;
switch (res_type)
{
case Residue::AIon: ion_offset = stat_a; break;
case Residue::BIon: ion_offset = stat_b; break;
default: break;
}
pos = (pos + ion_offset) / charge;
spectrum.emplace_back(pos, intensity);
if (add_metainfo_)
{
if (res_type == Residue::AIon)
{
ion_names.emplace_back(ion_name + "-CO");
}
else // Residue::BIon
{
ion_names.emplace_back(ion_name);
}
charges.push_back(charge);
}
}
chunks.add(true);
if (add_losses_)
{
mono_weight = initial_mono_weight;
String ion_name;
for (size_t i = l; i < peptide.size() - 1; ++i)
{
if (i-l >= 10) break; // unlikely to observe longer internal fragments
mono_weight += peptide[i].getMonoWeight(Residue::Internal); // standard internal residue including named modifications: c
ion_name += peptide[i].getOneLetterCode();
if (i==l) continue; // don't generate peak for single AA
double ion_offset = 0;
switch (res_type)
{
case Residue::AIon: ion_offset = stat_a; break;
case Residue::BIon: ion_offset = stat_b; break;
default: break;
}
if (peptide[i].hasNeutralLoss())
{
for (const auto& formula : peptide[i].getLossFormulas()) fx_losses.insert(formula);
}
const String annotation_prefix_string = (res_type == Residue::AIon) ? ion_name + "-CO" : ion_name; // add string indicating a-ion
addLossesFaster_(spectrum, mono_weight + ion_offset, fx_losses,
-1, ion_names, charges, formula_str_cache, intensity * rel_loss_intensity_, // -1 = don't add ion number for internal ions
annotation_prefix_string, add_metainfo_, charge);
chunks.add(false); // unfortunately, the losses are not always inserted in sorted order
}
}
}
}
void TheoreticalSpectrumGenerator::addPeaks_(PeakSpectrum& spectrum,
const AASequence& peptide,
DataArrays::StringDataArray& ion_names,
DataArrays::IntegerDataArray& charges,
MSSpectrum::Chunks& chunks,
const Residue::ResidueType res_type,
Int charge) const
{
const String charge_str((Size)abs(charge), '+');
const String ion_name_str(Residue::residueTypeToIonLetter(res_type));
int min_nr_new_peaks = 1 + int(add_isotopes_) + int(add_losses_);
spectrum.reserve(spectrum.size() + min_nr_new_peaks * peptide.size());
// Generate the ion peaks:
// Does not generate peaks of full peptide (therefore "<").
// They are added via precursor mass (and neutral losses).
// Could be changed in the future.
double intensity(1);
switch (res_type)
{
case Residue::AIon: intensity = a_intensity_; break;
case Residue::BIon: intensity = b_intensity_; break;
case Residue::CIon: if (peptide.size() < 2) throw Exception::InvalidSize(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, 1, "peptide must have at least 2 residues for c-ion generation"); intensity = c_intensity_; break;
case Residue::XIon: if (peptide.size() < 2) throw Exception::InvalidSize(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, 1, "peptide must have at least 2 residues for x-ion generation"); intensity = x_intensity_; break;
case Residue::YIon: intensity = y_intensity_; break;
case Residue::ZIon: intensity = z_intensity_; break;
// TODO use different intensities?
case Residue::Zp1Ion: intensity = z_intensity_; break;
case Residue::Zp2Ion: intensity = z_intensity_; break;
default: break;
}
double mono_weight(Constants::PROTON_MASS_U * charge);
std::set<EmpiricalFormula> fx_losses;
// note: we will use a map instead of unordered_map because hashing the
// formula would be basically equivalent to calling toString()
// which we are trying to avoid here, while the less than operator
// in a map can check for size first and check the element map of a formula one-by-one
std::map<EmpiricalFormula, String> formula_str_cache;
// pre-compute formula_str_cache
if (add_losses_ && add_metainfo_)
{
for (auto& p : peptide)
{
for (auto& formula : p.getLossFormulas())
{
String& loss_name = formula_str_cache[formula];
if (loss_name.empty())
{
loss_name = formula.toString();
}
}
}
if (add_term_losses_)
{
{
auto formula = EmpiricalFormula("H2O");
String& loss_name = formula_str_cache[formula];
if (loss_name.empty())
{
loss_name = formula.toString();
}
}
{
auto formula = EmpiricalFormula("NH3");
String& loss_name = formula_str_cache[formula];
if (loss_name.empty())
{
loss_name = formula.toString();
}
}
}
}
if (res_type == Residue::AIon || res_type == Residue::BIon || res_type == Residue::CIon)
{
if (peptide.hasNTerminalModification())
{
mono_weight += peptide.getNTerminalModification()->getDiffMonoMass();
}
double initial_mono_weight(mono_weight);
static double stat_a = Residue::getInternalToAIon().getMonoWeight();
static double stat_b = Residue::getInternalToBIon().getMonoWeight();
static double stat_c = Residue::getInternalToCIon().getMonoWeight();
if (!add_isotopes_) // add single peak
{
Size i = Size(!add_first_prefix_ion_);
if (i == 1)
{
mono_weight += peptide[0].getMonoWeight(Residue::Internal);
if (peptide[0].hasNeutralLoss())
{
for (const auto& formula : peptide[0].getLossFormulas()) fx_losses.insert(formula);
}
}
for (; i < peptide.size() - 1; ++i)
{
mono_weight += peptide[i].getMonoWeight(Residue::Internal); // standard internal residue including named modifications: c
double pos(mono_weight);
double ion_offset = 0;
switch (res_type)
{
case Residue::AIon: ion_offset = stat_a; break;
case Residue::BIon: ion_offset = stat_b; break;
case Residue::CIon: ion_offset = stat_c; break;
default: break;
}
pos = (pos + ion_offset) / charge;
spectrum.emplace_back(pos, intensity);
if (add_metainfo_)
{
ion_names.emplace_back(ion_name_str);
ion_names.back().reserve(2 + 2 + charge_str.size());
(ion_names.back() += (i + 1)) += charge_str;
charges.push_back(charge);
}
}
chunks.add(true);
mono_weight = initial_mono_weight;
if (add_losses_)
{
const String annotation_prefix_string(Residue::residueTypeToIonLetter(res_type));
if (add_term_losses_)
{
fx_losses.insert(EmpiricalFormula("H2O")); // HCD water loss at N-term
}
for (i = Size(!add_first_prefix_ion_); i < peptide.size() - 1; ++i)
{
mono_weight += peptide[i].getMonoWeight(Residue::Internal); // standard internal residue including named modifications: c
double ion_offset = 0;
switch (res_type)
{
case Residue::AIon: ion_offset = stat_a; break;
case Residue::BIon: ion_offset = stat_b; break;
case Residue::CIon: ion_offset = stat_c; break;
default: break;
}
if (peptide[i].hasNeutralLoss())
{
for (const auto& formula : peptide[i].getLossFormulas()) fx_losses.insert(formula);
}
addLossesFaster_(spectrum, mono_weight + ion_offset, fx_losses,
i + 1, ion_names, charges, formula_str_cache, intensity * rel_loss_intensity_,
annotation_prefix_string, add_metainfo_, charge);
chunks.add(false); // unfortunately, the losses are not always inserted in sorted order
}
}
}
else // add isotope clusters (slow)
{
Size i = add_first_prefix_ion_ ? 1 : 2;
for (; i < peptide.size(); ++i)
{
const AASequence ion = peptide.getPrefix(i);
addIsotopeCluster_(spectrum, ion, ion_names, charges, res_type, charge, intensity);
}
chunks.add(true);
if (add_losses_)
{
// add loss peaks (slow)
i = add_first_prefix_ion_ ? 1 : 2;
for (; i < peptide.size(); ++i)
{
const AASequence ion = peptide.getPrefix(i);
addLosses_(spectrum, ion, ion_names, charges, intensity, res_type, charge);
}
chunks.add(true);
}
}
}
else // if (res_type == Residue::XIon || res_type == Residue::YIon || res_type == Residue::ZIon)
{
if (peptide.hasCTerminalModification())
{
mono_weight += peptide.getCTerminalModification()->getDiffMonoMass();
}
double initial_mono_weight(mono_weight);
static double stat_x = Residue::getInternalToXIon().getMonoWeight();
static double stat_y = Residue::getInternalToYIon().getMonoWeight();
static double stat_z = Residue::getInternalToZIon().getMonoWeight();
static double stat_zp1 = Residue::getInternalToZp1Ion().getMonoWeight();
static double stat_zp2 = Residue::getInternalToZp2Ion().getMonoWeight();
if (!add_isotopes_) // add single peak
{
for (Size i = peptide.size() - 1; i > 0; --i)
{
mono_weight += peptide[i].getMonoWeight(Residue::Internal); // standard internal residue including named modifications: c
double pos(mono_weight);
double ion_offset = 0;
switch (res_type)
{
case Residue::XIon: ion_offset = stat_x; break;
case Residue::YIon: ion_offset = stat_y; break;
case Residue::ZIon: ion_offset = stat_z; break;
case Residue::Zp1Ion: ion_offset = stat_zp1; break;
case Residue::Zp2Ion: ion_offset = stat_zp2; break;
default: break;
}
pos = (pos + ion_offset) / charge;
spectrum.emplace_back(pos, intensity);
if (add_metainfo_)
{
ion_names.emplace_back(ion_name_str);
//note: size of Residue::residueTypeToIonLetter(res_type) => 1, size of String(peptide.size() - i) => 3;
ion_names.back().reserve(2 + 3 + charge_str.size());
(ion_names.back() += Size(peptide.size() - i)) += charge_str;
charges.push_back(charge);
}
}
chunks.add(true);
if (add_losses_)
{
const String annotation_prefix_string(Residue::residueTypeToIonLetter(res_type));
if (add_term_losses_)
{
fx_losses.insert(EmpiricalFormula("H2O")); // HCD water and ammonia loss at C-term
fx_losses.insert(EmpiricalFormula("NH3"));
}
mono_weight = initial_mono_weight;
for (Size i = peptide.size() - 1; i > 0; --i)
{
mono_weight += peptide[i].getMonoWeight(Residue::Internal); // standard internal residue including named modifications: c
double ion_offset = 0;
switch (res_type)
{
case Residue::XIon: ion_offset = stat_x; break;
case Residue::YIon: ion_offset = stat_y; break;
case Residue::ZIon: ion_offset = stat_z; break;
case Residue::Zp1Ion: ion_offset = stat_zp1; break;
case Residue::Zp2Ion: ion_offset = stat_zp2; break;
default: break;
}
if (peptide[i].hasNeutralLoss())
{
for (const auto& formula : peptide[i].getLossFormulas()) fx_losses.insert(formula);
}
addLossesFaster_(spectrum, mono_weight + ion_offset, fx_losses,
peptide.size() - i, ion_names, charges, formula_str_cache, intensity * rel_loss_intensity_,
annotation_prefix_string, add_metainfo_, charge);
chunks.add(false); // losses are not always added in sorted order
}
}
}
else // add isotope clusters
{
for (Size i = 1; i < peptide.size(); ++i)
{
const AASequence ion = peptide.getSuffix(i);
addIsotopeCluster_(spectrum, ion, ion_names, charges, res_type, charge, intensity);
}
chunks.add(true);
if (add_losses_)
{
// add loss peaks (slow)
for (Size i = 1; i < peptide.size(); ++i)
{
const AASequence ion = peptide.getSuffix(i);
addLosses_(spectrum, ion, ion_names, charges, intensity, res_type, charge);
}
chunks.add(true);
}
}
}
}
void TheoreticalSpectrumGenerator::addPrecursorPeaks_(PeakSpectrum& spectrum,
const AASequence& peptide,
DataArrays::StringDataArray& ion_names,
DataArrays::IntegerDataArray& charges,
Int charge) const
{
const String charge_str((Size)abs(charge), '+');
String ion_name;
if (charge == 1)
{
ion_name = "[M+H]" + charge_str;
}
else
{
ion_name = "[M+" + String(charge) + "H]" + charge_str;
}
// precursor peak
double mono_pos = peptide.getMonoWeight(Residue::Full, charge);
if (add_isotopes_)
{
// manually compute correct sum formula (instead of using built-in assumption of hydrogen adduct)
auto formula = peptide.getFormula(Residue::Full, charge) + EmpiricalFormula("H") * charge;
formula.setCharge(0);
IsotopeDistribution dist;
if (isotope_model_ == 1)
{
dist = formula.getIsotopeDistribution(CoarseIsotopePatternGenerator(max_isotope_));
}
else if (isotope_model_ == 2)
{
dist = formula.getIsotopeDistribution(FineIsotopePatternGenerator(max_isotope_probability_));
}
for (IsotopeDistribution::ConstIterator it = dist.begin(); it != dist.end(); ++it)
{
if (add_metainfo_)
{
ion_names.push_back(ion_name);
charges.push_back(charge);
}
spectrum.emplace_back(it->getMZ() / (double)charge, pre_int_ * it->getIntensity());
}
}
else
{
if (add_metainfo_)
{
ion_names.push_back(ion_name);
charges.push_back(charge);
}
spectrum.emplace_back(mono_pos / (double)charge, pre_int_);
}
// loss peaks of the precursor
// loss of water
EmpiricalFormula ion = peptide.getFormula(Residue::Full, charge) - EmpiricalFormula("H2O");
mono_pos = ion.getMonoWeight();
const String ion_name_h2o("[M+H]-H2O");
if (add_isotopes_)
{
ion += EmpiricalFormula("H") * charge;
ion.setCharge(0);
IsotopeDistribution dist;
if (isotope_model_ == 1)
{
dist = ion.getIsotopeDistribution(CoarseIsotopePatternGenerator(max_isotope_));
}
else if (isotope_model_ == 2)
{
dist = ion.getIsotopeDistribution(FineIsotopePatternGenerator(max_isotope_probability_));
}
for (IsotopeDistribution::ConstIterator it = dist.begin(); it != dist.end(); ++it)
{
if (add_metainfo_)
{
String ion_name;
if (charge == 1)
{
ion_name = "[M+H-H2O]" + charge_str;
}
else
{
ion_name = "[M+" + String(charge) + "H-H2O]" + charge_str;
}
ion_names.push_back(ion_name);
charges.push_back(charge);
}
spectrum.emplace_back(it->getMZ() / charge, pre_int_H2O_ * it->getIntensity());
}
}
else
{
if (add_metainfo_)
{
String ion_name;
if (charge == 1)
{
ion_name = "[M+H-H2O]" + charge_str;
}
else
{
ion_name = "[M+" + String(charge) + "H-H2O]" + charge_str;
}
ion_names.push_back(ion_name);
charges.push_back(charge);
}
spectrum.emplace_back(mono_pos / (double)charge, pre_int_H2O_);
}
//loss of ammonia
ion = peptide.getFormula(Residue::Full, charge) - EmpiricalFormula("NH3");
mono_pos = ion.getMonoWeight();
const String ion_name_nh3("[M+H]-NH3");
if (add_isotopes_)
{
// manually compute correct sum formula (instead of using built-in assumption of hydrogen adduct)
ion += EmpiricalFormula("H") * charge;
ion.setCharge(0);
IsotopeDistribution dist;
if (isotope_model_ == 1)
{
dist = ion.getIsotopeDistribution(CoarseIsotopePatternGenerator(max_isotope_));
}
else if (isotope_model_ == 2)
{
dist = ion.getIsotopeDistribution(FineIsotopePatternGenerator(max_isotope_probability_));
}
for (IsotopeDistribution::ConstIterator it = dist.begin(); it != dist.end(); ++it)
{
if (add_metainfo_)
{
String ion_name;
if (charge == 1)
{
ion_name = "[M+H-NH3]" + charge_str;
}
else
{
ion_name = "[M+" + String(charge) + "H-NH3]" + charge_str;
}
ion_names.push_back(ion_name);
charges.push_back(charge);
}
spectrum.emplace_back(it->getMZ() / (double)charge, pre_int_NH3_ * it->getIntensity());
}
}
else
{
if (add_metainfo_)
{
String ion_name;
if (charge == 1)
{
ion_name = "[M+H-NH3]" + charge_str;
}
else
{
ion_name = "[M+" + String(charge) + "H-NH3]" + charge_str;
}
ion_names.push_back(ion_name);
charges.push_back(charge);
}
spectrum.emplace_back(mono_pos / (double)charge, pre_int_NH3_);
}
}
void TheoreticalSpectrumGenerator::getPrefixAndSuffixIonsMZ(std::vector<float>& spectrum, const AASequence& peptide, int charge) const
{
for (Int z = charge; z >= 1; --z)
{
if (add_b_ions_)
{
addPrefixAndSuffixIons_(spectrum, peptide, Residue::BIon, z);
}
if (add_y_ions_)
{
addPrefixAndSuffixIons_(spectrum, peptide, Residue::YIon, z);
}
if (add_a_ions_)
{
addPrefixAndSuffixIons_(spectrum, peptide, Residue::AIon, z );
}
if (add_x_ions_)
{
addPrefixAndSuffixIons_(spectrum, peptide, Residue::XIon, z);
}
if (add_c_ions_)
{
addPrefixAndSuffixIons_(spectrum, peptide, Residue::CIon, z);
}
if (add_z_ions_)
{
addPrefixAndSuffixIons_(spectrum, peptide, Residue::ZIon, z);
}
}
std::sort(spectrum.begin(), spectrum.end());
return;
}
void TheoreticalSpectrumGenerator::addPrefixAndSuffixIons_(std::vector<float>& spectrum, const OpenMS::AASequence& peptide, Residue::ResidueType res_type, int charge)
{
if (peptide.empty())
{
cout << "Warning: Attempt at creating Prefix and Suffix Ions Spectrum from empty string!" << endl;
return;
}
if (res_type == Residue::AIon || res_type == Residue::BIon || res_type == Residue::CIon)
{
double mono_weight(Constants::PROTON_MASS_U * charge);
if (peptide.hasNTerminalModification())
{
mono_weight += peptide.getNTerminalModification()->getDiffMonoMass();
}
switch (res_type)
{
case Residue::AIon: mono_weight += Residue::getInternalToAIon().getMonoWeight(); break;
case Residue::BIon: mono_weight += Residue::getInternalToBIon().getMonoWeight(); break;
case Residue::CIon: mono_weight += Residue::getInternalToCIon().getMonoWeight(); break;
default: break;
}
Size i = 0;
for (; i < peptide.size(); ++i)
{
mono_weight += peptide[i].getMonoWeight(Residue::Internal);
double pos(mono_weight / charge);
spectrum.emplace_back(pos);
}
}
else // if (res_type == Residue::XIon || res_type == Residue::YIon || res_type == Residue::ZIon)
{
double mono_weight(Constants::PROTON_MASS_U * charge);
if (peptide.hasCTerminalModification())
{
mono_weight += peptide.getCTerminalModification()->getDiffMonoMass();
}
switch (res_type)
{
case Residue::XIon: mono_weight += Residue::getInternalToXIon().getMonoWeight(); break;
case Residue::YIon: mono_weight += Residue::getInternalToYIon().getMonoWeight(); break;
case Residue::ZIon: mono_weight += Residue::getInternalToZIon().getMonoWeight(); break;
default: break;
}
for (Size j = peptide.size(); j >= 1; --j)
{
mono_weight += peptide[j-1].getMonoWeight(Residue::Internal);
double pos(mono_weight / charge);
spectrum.emplace_back(pos);
}
}
return;
}
void TheoreticalSpectrumGenerator::updateMembers_()
{
add_b_ions_ = param_.getValue("add_b_ions").toBool();
add_y_ions_ = param_.getValue("add_y_ions").toBool();
add_a_ions_ = param_.getValue("add_a_ions").toBool();
add_c_ions_ = param_.getValue("add_c_ions").toBool();
add_x_ions_ = param_.getValue("add_x_ions").toBool();
add_z_ions_ = param_.getValue("add_z_ions").toBool();
add_zp1_ions_ = param_.getValue("add_zp1_ions").toBool();
add_zp2_ions_ = param_.getValue("add_zp2_ions").toBool();
add_first_prefix_ion_ = param_.getValue("add_first_prefix_ion").toBool();
add_losses_ = param_.getValue("add_losses").toBool();
add_term_losses_ = param_.getValue("add_term_losses").toBool();
add_metainfo_ = param_.getValue("add_metainfo").toBool();
add_isotopes_ = param_.getValue("isotope_model") != "none";
add_internal_fragments_ = param_.getValue("add_internal_fragments").toBool();
if (param_.getValue("isotope_model") == "coarse") isotope_model_ = 1;
else if (param_.getValue("isotope_model") == "fine") isotope_model_ = 2;
sort_by_position_ = param_.getValue("sort_by_position").toBool();
add_precursor_peaks_ = param_.getValue("add_precursor_peaks").toBool();
add_all_precursor_charges_ = param_.getValue("add_all_precursor_charges").toBool();
add_abundant_immonium_ions_ = param_.getValue("add_abundant_immonium_ions").toBool();
a_intensity_ = (double)param_.getValue("a_intensity");
b_intensity_ = (double)param_.getValue("b_intensity");
c_intensity_ = (double)param_.getValue("c_intensity");
x_intensity_ = (double)param_.getValue("x_intensity");
y_intensity_ = (double)param_.getValue("y_intensity");
z_intensity_ = (double)param_.getValue("z_intensity");
max_isotope_ = (Int)param_.getValue("max_isotope");
max_isotope_probability_ = param_.getValue("max_isotope_probability");
rel_loss_intensity_ = (double)param_.getValue("relative_loss_intensity");
pre_int_ = (double)param_.getValue("precursor_intensity");
pre_int_H2O_ = (double)param_.getValue("precursor_H2O_intensity");
pre_int_NH3_ = (double)param_.getValue("precursor_NH3_intensity");
}
} // end namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/Ribonucleotide.cpp | .cpp | 4,193 | 175 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/CHEMISTRY/Ribonucleotide.h>
#include <OpenMS/CONCEPT/Exception.h>
#include <iostream>
using namespace std;
namespace OpenMS
{
const EmpiricalFormula Ribonucleotide::default_baseloss_ =
EmpiricalFormula("C5H10O5");
ostream& operator<<(ostream& os, const Ribonucleotide& ribo)
{
os << "Ribonucleotide '"
<< ribo.code_ << "' ("
<< ribo.name_ << ", "
<< ribo.formula_ << ")";
return os;
}
Ribonucleotide::Ribonucleotide(
const String& name, const String& code, const String& new_code,
const String& html_code, const EmpiricalFormula& formula, char origin,
double mono_mass, double avg_mass, enum TermSpecificityNuc term_spec,
const EmpiricalFormula& baseloss_formula):
name_(name), code_(code), new_code_(new_code), html_code_(html_code),
formula_(formula), origin_(origin), mono_mass_(mono_mass),
avg_mass_(avg_mass), term_spec_(term_spec),
baseloss_formula_(baseloss_formula)
{
}
Ribonucleotide::~Ribonucleotide() = default;
bool Ribonucleotide::operator==(const Ribonucleotide& ribonucleotide) const
{
return name_ == ribonucleotide.name_ &&
code_ == ribonucleotide.code_ &&
new_code_ == ribonucleotide.new_code_ &&
html_code_ == ribonucleotide.html_code_ &&
formula_ == ribonucleotide.formula_ &&
origin_ == ribonucleotide.origin_ &&
mono_mass_ == ribonucleotide.mono_mass_ &&
avg_mass_ == ribonucleotide.avg_mass_ &&
term_spec_ == ribonucleotide.term_spec_ &&
baseloss_formula_ == ribonucleotide.baseloss_formula_;
}
const String Ribonucleotide::getCode() const
{
return code_;
}
void Ribonucleotide::setCode(const String& code)
{
code_ = code;
}
const String Ribonucleotide::getName() const
{
return name_;
}
void Ribonucleotide::setName(const String& name)
{
name_ = name;
}
double Ribonucleotide::getMonoMass() const
{
return mono_mass_;
}
void Ribonucleotide::setMonoMass(double mono_mass)
{
mono_mass_ = mono_mass;
}
double Ribonucleotide::getAvgMass() const
{
return avg_mass_;
}
void Ribonucleotide::setAvgMass(double avg_mass)
{
avg_mass_ = avg_mass;
}
const String Ribonucleotide::getNewCode() const
{
return new_code_;
}
void Ribonucleotide::setNewCode(const String& new_code)
{
new_code_ = new_code;
}
char Ribonucleotide::getOrigin() const
{
return origin_;
}
void Ribonucleotide::setOrigin(char origin)
{
origin_ = origin;
}
String Ribonucleotide::getHTMLCode() const
{
return html_code_;
}
void Ribonucleotide::setHTMLCode(const String& html_code)
{
html_code_ = html_code;
}
const EmpiricalFormula Ribonucleotide::getFormula() const
{
return formula_;
}
void Ribonucleotide::setFormula(const EmpiricalFormula& formula)
{
formula_ = formula;
}
enum Ribonucleotide::TermSpecificityNuc Ribonucleotide::getTermSpecificity() const
{
return term_spec_;
}
void Ribonucleotide::setTermSpecificity(enum TermSpecificityNuc term_spec)
{
if (term_spec == NUMBER_OF_TERM_SPECIFICITY)
{
String msg = "invalid terminal specificity";
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
msg, "NUMBER_OF_TERM_SPECIFICITY");
}
term_spec_ = term_spec;
}
const EmpiricalFormula Ribonucleotide::getBaselossFormula() const
{
return baseloss_formula_;
}
void Ribonucleotide::setBaselossFormula(const EmpiricalFormula& formula)
{
baseloss_formula_ = formula;
}
bool Ribonucleotide::isModified() const
{
return (code_.length() != 1) || (code_[0] != origin_);
}
bool Ribonucleotide::isAmbiguous() const
{
return code_.back() == '?';
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/Tagger.cpp | .cpp | 6,454 | 196 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Eugen Netz $
// $Authors: Timo Sachsenberg, Eugen Netz $
// --------------------------------------------------------------------------
#include <OpenMS/CHEMISTRY/Tagger.h>
#include <OpenMS/CHEMISTRY/ResidueDB.h>
#include <OpenMS/CHEMISTRY/Residue.h>
#include <OpenMS/CHEMISTRY/ModificationsDB.h>
#include <OpenMS/MATH/MathFunctions.h>
#include <OpenMS/KERNEL/MSSpectrum.h>
#ifdef _OPENMP
#include <omp.h>
#endif
namespace OpenMS
{
char Tagger::getAAByMass_(double m) const
{
// fast check for border cases
if (m < min_gap_ || m > max_gap_) return ' ';
const double delta = tol_is_ppm_ ? Math::ppmToMass(tolerance_, m) : tolerance_;
auto left = mass2aa_.lower_bound(m - delta);
if (left == mass2aa_.end()) return ' ';
if (fabs(left->first - m) >= delta) return ' ';
// return the most exact one.
auto best_aa = left;
double min_delta = fabs(left->first - m);
while (fabs(left->first - m) < delta)
{
left++;
if (left == mass2aa_.end()) break;
if (min_delta > fabs(left->first - m))
{
best_aa = left;
min_delta = fabs(left->first - m);
}
}
return best_aa->second;
}
void Tagger::getTag_(std::string & tag, const std::vector<double>& mzs, const size_t i, std::vector<std::string>& tags, const size_t charge) const
{
const size_t N = mzs.size();
size_t j = i + 1;
while (j < N)
{
if (tag.size() == max_tag_length_)
{
return; // maximum tag size reached? - continue with next parent
}
const double gap = mzs[j] - mzs[i];
if ((gap * charge) > max_gap_)
{
return; // already too far away - continue with next parent
}
const char aa = getAAByMass_(gap * charge);
if (aa == ' ') { ++j; continue; } // can't extend tag
tag += aa;
if (tag.size() >= min_tag_length_)
{
tags.push_back(tag);
}
getTag_(tag, mzs, j, tags, charge);
// if aa is "L", then also add "I" as an alternative residue and extend the tag again
// this will add redundancy, (and redundant runtime) but we avoid dealing with J and ambiguous matching to I and L later on
if (aa == 'L')
{
tag.pop_back();
tag.push_back('I');
if (tag.size() >= min_tag_length_)
{
tags.push_back(tag);
}
getTag_(tag, mzs, j, tags, charge);
}
tag.pop_back(); // remove last string
++j;
}
}
Tagger::Tagger(size_t min_tag_length, double tolerance, size_t max_tag_length, size_t min_charge, size_t max_charge, const StringList& fixed_mods, const StringList& var_mods, bool tol_is_ppm)
: tolerance_{fabs(tolerance)}, tol_is_ppm_{tol_is_ppm}, min_tag_length_{min_tag_length}, max_tag_length_{max_tag_length}, min_charge_{min_charge}, max_charge_{max_charge}
{
const std::set<const Residue*> aas = ResidueDB::getInstance()->getResidues("Natural19WithoutI");
for (const auto& r : aas)
{
const char letter = r->getOneLetterCode()[0];
const double mass = r->getMonoWeight(Residue::Internal);
mass2aa_[mass] = letter;
}
// for fixed modifications, replace the unmodified residue with the modified one
for (const auto& mod : fixed_mods)
{
const ResidueModification* rm = ModificationsDB::getInstance()->getModification(mod);
Residue r = *(ResidueDB::getInstance()->getResidue(rm->getOrigin()));
r.setModification(rm->getId());
// remove the unmodified residue
// this requires searching the map by value, but this is only done once when the Tagger is initialized
for (std::map<double, char>::iterator it = mass2aa_.begin(); it != mass2aa_.end(); ++it)
{
if (it->second == rm->getOrigin())
{
mass2aa_.erase(it);
break;
}
}
const char name = rm->getOrigin();
const double mass = r.getMonoWeight(Residue::Internal);
mass2aa_[mass] = name;
}
// for variable modifications, add an additional instance of the residue with the modified mass to the list
for (const auto& mod : var_mods)
{
const ResidueModification* rm = ModificationsDB::getInstance()->getModification(mod);
Residue r = *(ResidueDB::getInstance()->getResidue(rm->getOrigin()));
r.setModification(rm->getId());
const char name = rm->getOrigin();
const double mass = r.getMonoWeight(Residue::Internal);
mass2aa_[mass] = name;
}
if (tol_is_ppm_)
{
min_gap_ = mass2aa_.begin()->first - Math::ppmToMass(tolerance_, mass2aa_.begin()->first);
max_gap_ = mass2aa_.rbegin()->first + Math::ppmToMass(tolerance_, mass2aa_.rbegin()->first);
}
else
{
min_gap_ = mass2aa_.begin()->first - tolerance_;
max_gap_ = mass2aa_.rbegin()->first + tolerance_;
}
}
void Tagger::getTag(const std::vector<double>& mzs, std::vector<std::string>& tags) const
{
// start peak
if (min_tag_length_ > mzs.size()) return; // avoid segfault
#pragma omp parallel
{
std::vector<std::string> tags_local;
#pragma omp for schedule(guided)
for (int i = 0; i < static_cast<int>(mzs.size() - min_tag_length_); ++i)
{
for (size_t charge = min_charge_; charge <= max_charge_; ++charge)
{
std::string tag;
getTag_(tag, mzs, i, tags_local, charge);
}
} // end of loop over starting peaks
#pragma omp critical (join_tags)
tags.insert(tags.end(), tags_local.begin(), tags_local.end());
} // end of parallel section
// make tags unique
sort(tags.begin(), tags.end());
auto last_unique_tag = unique(tags.begin(), tags.end());
if (last_unique_tag != tags.end())
{
tags.erase(last_unique_tag, tags.end());
}
}
void Tagger::getTag(const MSSpectrum& spec, std::vector<std::string>& tags) const
{
const size_t N = spec.size();
if (N < min_tag_length_) { return; }
// copy to double vector (speed)
std::vector<double> mzs;
mzs.reserve(N);
for (auto const& p : spec) { mzs.push_back(p.getMZ()); }
getTag(mzs, tags);
}
void Tagger::setMaxCharge(size_t max_charge)
{
max_charge_ = max_charge;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/DigestionEnzymeProtein.cpp | .cpp | 5,549 | 222 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Xiao Liang $
// $Authors: Xiao Liang $
// --------------------------------------------------------------------------
//
#include <OpenMS/CHEMISTRY/DigestionEnzymeProtein.h>
#include <iostream>
using namespace std;
namespace OpenMS
{
DigestionEnzymeProtein::DigestionEnzymeProtein() :
DigestionEnzyme(),
n_term_gain_(""),
c_term_gain_(""),
psi_id_(""),
xtandem_id_(""),
comet_id_(-1),
msgf_id_(-1),
omssa_id_(-1)
{
}
DigestionEnzymeProtein::DigestionEnzymeProtein(const DigestionEnzyme& d) :
DigestionEnzyme(d),
n_term_gain_(""),
c_term_gain_(""),
psi_id_(""),
xtandem_id_(""),
comet_id_(-1),
msgf_id_(-1),
omssa_id_(-1)
{
}
DigestionEnzymeProtein::DigestionEnzymeProtein(const String& name,
const String& cleavage_regex,
const std::set<String>& synonyms,
String regex_description,
EmpiricalFormula n_term_gain,
EmpiricalFormula c_term_gain,
String psi_id,
String xtandem_id,
Int comet_id,
Int msgf_id,
Int omssa_id) :
DigestionEnzyme(name, cleavage_regex, synonyms, std::move(regex_description)),
n_term_gain_(std::move(n_term_gain)),
c_term_gain_(std::move(c_term_gain)),
psi_id_(std::move(psi_id)),
xtandem_id_(std::move(xtandem_id)),
comet_id_(comet_id),
msgf_id_(msgf_id),
omssa_id_(omssa_id)
{
}
DigestionEnzymeProtein::~DigestionEnzymeProtein() = default;
void DigestionEnzymeProtein::setNTermGain(const EmpiricalFormula& value)
{
n_term_gain_ = value;
}
void DigestionEnzymeProtein::setCTermGain(const EmpiricalFormula& value)
{
c_term_gain_ = value;
}
EmpiricalFormula DigestionEnzymeProtein::getNTermGain() const
{
return n_term_gain_;
}
EmpiricalFormula DigestionEnzymeProtein::getCTermGain() const
{
return c_term_gain_;
}
void DigestionEnzymeProtein::setPSIID(const String& value)
{
psi_id_ = value;
}
String DigestionEnzymeProtein::getPSIID() const
{
return psi_id_;
}
void DigestionEnzymeProtein::setXTandemID(const String& value)
{
xtandem_id_ = value;
}
String DigestionEnzymeProtein::getXTandemID() const
{
return xtandem_id_;
}
void DigestionEnzymeProtein::setCometID(Int value)
{
comet_id_ = value;
}
Int DigestionEnzymeProtein::getCometID() const
{
return comet_id_;
}
void DigestionEnzymeProtein::setOMSSAID(Int value)
{
omssa_id_ = value;
}
Int DigestionEnzymeProtein::getOMSSAID() const
{
return omssa_id_;
}
void DigestionEnzymeProtein::setMSGFID(Int value)
{
msgf_id_ = value;
}
Int DigestionEnzymeProtein::getMSGFID() const
{
return msgf_id_;
}
bool DigestionEnzymeProtein::operator==(const DigestionEnzymeProtein& enzyme) const
{
return DigestionEnzyme::operator==(enzyme) &&
n_term_gain_ == enzyme.n_term_gain_ &&
c_term_gain_ == enzyme.c_term_gain_ &&
psi_id_ == enzyme.psi_id_ &&
xtandem_id_ == enzyme.xtandem_id_ &&
comet_id_ == enzyme.comet_id_ &&
msgf_id_ == enzyme.msgf_id_ &&
omssa_id_ == enzyme.omssa_id_;
}
// Note: comparison operators are not inherited. TODO rename it and make virtual
bool DigestionEnzymeProtein::operator==(const String& cleavage_regex) const
{
return cleavage_regex_ == cleavage_regex;
}
bool DigestionEnzymeProtein::operator!=(const String& cleavage_regex) const
{
return cleavage_regex_ != cleavage_regex;
}
bool DigestionEnzymeProtein::operator!=(const DigestionEnzymeProtein& enzyme) const
{
return !(*this == enzyme);
}
bool DigestionEnzymeProtein::operator<(const DigestionEnzymeProtein& enzyme) const
{
return this->getName() < enzyme.getName();
}
bool DigestionEnzymeProtein::setValueFromFile(const String& key, const String& value)
{
if (DigestionEnzyme::setValueFromFile(key, value))
{
return true;
}
if (key.hasSuffix(":NTermGain"))
{
setNTermGain(EmpiricalFormula(value));
return true;
}
if (key.hasSuffix(":CTermGain"))
{
setCTermGain(EmpiricalFormula(value));
return true;
}
if (key.hasSuffix(":PSIID"))
{
setPSIID(value);
return true;
}
if (key.hasSuffix(":XTandemID"))
{
setXTandemID(value);
return true;
}
if (key.hasSuffix(":CometID"))
{
setCometID(value.toInt());
return true;
}
if (key.hasSuffix(":OMSSAID"))
{
setOMSSAID(value.toInt());
return true;
}
if (key.hasSuffix(":MSGFID"))
{
setMSGFID(value.toInt());
return true;
}
return false;
}
ostream& operator<<(ostream& os, const DigestionEnzymeProtein& enzyme)
{
os << static_cast<const DigestionEnzyme&>(enzyme) << " "
<< enzyme.psi_id_;
return os;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/ModificationDefinition.cpp | .cpp | 3,090 | 117 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Andreas Bertsch $
// --------------------------------------------------------------------------
//
#include <OpenMS/CHEMISTRY/ModificationDefinition.h>
#include <OpenMS/CHEMISTRY/ModificationsDB.h>
#include <OpenMS/CONCEPT/Exception.h>
using namespace std;
namespace OpenMS
{
ModificationDefinition::ModificationDefinition() :
mod_(nullptr),
fixed_modification_(true),
max_occurrences_(0)
{
}
ModificationDefinition::ModificationDefinition(const ModificationDefinition& rhs) = default;
ModificationDefinition::ModificationDefinition(const String& mod, bool fixed, UInt max_occur) :
mod_(nullptr),
fixed_modification_(fixed),
max_occurrences_(max_occur)
{
setModification(mod);
}
ModificationDefinition::ModificationDefinition(const ResidueModification& mod, bool fixed, UInt max_occur) :
mod_(&mod),
fixed_modification_(fixed),
max_occurrences_(max_occur)
{
}
ModificationDefinition& ModificationDefinition::operator=(const ModificationDefinition& rhs)
{
if (this != &rhs)
{
mod_ = rhs.mod_;
fixed_modification_ = rhs.fixed_modification_;
max_occurrences_ = rhs.max_occurrences_;
}
return *this;
}
bool ModificationDefinition::operator==(const ModificationDefinition& rhs) const
{
return mod_ == rhs.mod_ &&
fixed_modification_ == rhs.fixed_modification_ &&
max_occurrences_ == rhs.max_occurrences_;
}
bool ModificationDefinition::operator!=(const ModificationDefinition& rhs) const
{
return !(*this == rhs);
}
ModificationDefinition::~ModificationDefinition() = default;
bool ModificationDefinition::operator<(const ModificationDefinition& rhs) const
{
return this->getModificationName() < rhs.getModificationName();
}
void ModificationDefinition::setFixedModification(bool fixed_mod)
{
fixed_modification_ = fixed_mod;
}
bool ModificationDefinition::isFixedModification() const
{
return fixed_modification_;
}
void ModificationDefinition::setModification(const String& modification)
{
mod_ = ModificationsDB::getInstance()->getModification(modification);
}
const ResidueModification& ModificationDefinition::getModification() const
{
if (!mod_)
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"No modification defined", nullptr);
}
return *mod_;
}
String ModificationDefinition::getModificationName() const
{
if (mod_ != nullptr)
{
return mod_->getFullId();
}
return "";
}
void ModificationDefinition::setMaxOccurrences(UInt max_occurrences)
{
max_occurrences_ = max_occurrences;
}
UInt ModificationDefinition::getMaxOccurrences() const
{
return max_occurrences_;
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/NucleicAcidSpectrumGenerator.cpp | .cpp | 20,053 | 519 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/CHEMISTRY/NucleicAcidSpectrumGenerator.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/CONCEPT/LogStream.h>
using namespace std;
namespace OpenMS
{
NucleicAcidSpectrumGenerator::NucleicAcidSpectrumGenerator() : DefaultParamHandler("NucleicAcidSpectrumGenerator")
{
defaults_.setValue("add_metainfo", "false", "Adds the type of peaks as meta information to the peaks, e.g. c1, y2, a3-B");
defaults_.setValidStrings("add_metainfo", {"true", "false"});
defaults_.setValue("add_precursor_peaks", "false", "Adds peaks of the unfragmented precursor ion to the spectrum");
defaults_.setValidStrings("add_precursor_peaks", {"true", "false"});
defaults_.setValue("add_all_precursor_charges", "false", "Adds precursor peaks with all charges in the given range");
defaults_.setValidStrings("add_all_precursor_charges", {"true", "false"});
defaults_.setValue("add_first_prefix_ion", "false", "If set to true a1, b1, ..., z1 ions are added");
defaults_.setValidStrings("add_first_prefix_ion", {"true", "false"});
defaults_.setValue("add_a_ions", "false", "Add peaks of a-ions to the spectrum");
defaults_.setValidStrings("add_a_ions", {"true", "false"});
defaults_.setValue("add_b_ions", "true", "Add peaks of b-ions to the spectrum");
defaults_.setValidStrings("add_b_ions", {"true", "false"});
defaults_.setValue("add_c_ions", "false", "Add peaks of c-ions to the spectrum");
defaults_.setValidStrings("add_c_ions", {"true", "false"});
defaults_.setValue("add_d_ions", "false", "Add peaks of d-ions to the spectrum"); // only for nucleotide sequences
defaults_.setValidStrings("add_d_ions", {"true", "false"});
defaults_.setValue("add_w_ions", "false", "Add peaks of w-ions to the spectrum"); // only for nucleotide sequences
defaults_.setValidStrings("add_w_ions", {"true", "false"});
defaults_.setValue("add_x_ions", "false", "Add peaks of x-ions to the spectrum");
defaults_.setValidStrings("add_x_ions", {"true", "false"});
defaults_.setValue("add_y_ions", "true", "Add peaks of y-ions to the spectrum");
defaults_.setValidStrings("add_y_ions", {"true", "false"});
defaults_.setValue("add_z_ions", "false", "Add peaks of z-ions to the spectrum");
defaults_.setValidStrings("add_z_ions", {"true", "false"});
defaults_.setValue("add_a-B_ions", "false", "Add peaks of a-B-ions to the spectrum"); // only for nucleotide sequences
defaults_.setValidStrings("add_a-B_ions", {"true", "false"});
// intensity options of the ions
defaults_.setValue("a_intensity", 1.0, "Intensity of the a-ions");
defaults_.setValue("b_intensity", 1.0, "Intensity of the b-ions");
defaults_.setValue("c_intensity", 1.0, "Intensity of the c-ions");
defaults_.setValue("d_intensity", 1.0, "Intensity of the d-ions");
defaults_.setValue("w_intensity", 1.0, "Intensity of the w-ions");
defaults_.setValue("x_intensity", 1.0, "Intensity of the x-ions");
defaults_.setValue("y_intensity", 1.0, "Intensity of the y-ions");
defaults_.setValue("z_intensity", 1.0, "Intensity of the z-ions");
defaults_.setValue("a-B_intensity", 1.0, "Intensity of the a-B-ions");
// precursor intensity
defaults_.setValue("precursor_intensity", 1.0, "Intensity of the precursor peak");
defaultsToParam_();
}
NucleicAcidSpectrumGenerator::NucleicAcidSpectrumGenerator(const NucleicAcidSpectrumGenerator& source) : DefaultParamHandler(source)
{
}
NucleicAcidSpectrumGenerator& NucleicAcidSpectrumGenerator::operator=(const NucleicAcidSpectrumGenerator& source)
{
DefaultParamHandler::operator=(source);
return *this;
}
NucleicAcidSpectrumGenerator::~NucleicAcidSpectrumGenerator() = default;
void NucleicAcidSpectrumGenerator::addFragmentPeaks_(MSSpectrum& spectrum, const vector<double>& fragment_masses, const String& ion_type, double offset, double intensity, Size start) const
{
for (Size i = start; i < fragment_masses.size(); ++i)
{
Peak1D peak(fragment_masses[i] + offset, intensity);
spectrum.push_back(peak);
}
if (add_metainfo_)
{
for (Size i = start; i < fragment_masses.size(); ++i)
{
String ion_name = ion_type + String(i + 1);
spectrum.getStringDataArrays()[0].push_back(ion_name);
}
}
}
void NucleicAcidSpectrumGenerator::addAMinusBPeaks_(MSSpectrum& spectrum, const vector<double>& fragment_masses, const NASequence& oligo, Size start) const
{
// offset: phosphate (from bond) minus 3 water (from various reactions)
static const double offset = EmpiricalFormula("H-5P").getMonoWeight();
// offset for first ("a1-B") ion: loss of 2 water
static const double initial_offset = -EmpiricalFormula("H4O2").getMonoWeight();
// methyl group may be retained on ribose for "ambiguous" mods:
static const double methyl_mass = EmpiricalFormula("CH2").getMonoWeight();
for (Size i = start; i < fragment_masses.size(); ++i)
{
double mass = oligo[i]->getBaselossFormula().getMonoWeight();
if (i > 0)
{
// base at position "i" is lost, so use fragment up to pos. "i - 1":
mass += fragment_masses[i - 1] + offset;
// check if the offset should be thiol or not
if (oligo[i-1]->getCode().back() == '*')
{
mass += EmpiricalFormula("SO-1").getMonoWeight();
}
}
else // first ribonucleotide
{
mass += initial_offset;
}
Peak1D peak(mass, aB_intensity_);
if (oligo[i]->isAmbiguous())
{
// special treatment for a-B ions of "ambiguous" modifications:
// create two peaks with half intensity, representing methyl group
// lost/retained on backbone:
peak.setIntensity(aB_intensity_ * 0.5);
spectrum.push_back(peak);
mass += methyl_mass;
peak.setMZ(mass);
}
spectrum.push_back(peak);
}
if (add_metainfo_)
{
for (Size i = start; i < fragment_masses.size(); ++i)
{
String ion_name = "a" + String(i + 1) + "-B";
spectrum.getStringDataArrays()[0].push_back(ion_name);
if (oligo[i]->isAmbiguous()) // two peaks were added
{
spectrum.getStringDataArrays()[0].push_back(ion_name);
}
}
}
}
MSSpectrum NucleicAcidSpectrumGenerator::getUnchargedSpectrum_(const NASequence& oligo) const
{
static const double H_mass = EmpiricalFormula("H").getMonoWeight();
// phosphate minus water:
static const double backbone_mass = EmpiricalFormula("H-1PO2").getMonoWeight();
static const double a_ion_offset = -EmpiricalFormula("H2O").getMonoWeight();
static const double b_ion_offset = 0.0;
static const double c_ion_offset = backbone_mass;
static const double d_ion_offset = EmpiricalFormula("HPO3").getMonoWeight();
static const double w_ion_offset = d_ion_offset;
static const double x_ion_offset = c_ion_offset;
static const double y_ion_offset = b_ion_offset;
static const double z_ion_offset = a_ion_offset;
// a a-B w x ions have different offsets if we have phosphorothioate linkages,
MSSpectrum spectrum;
if (oligo.empty())
{
return spectrum;
}
double three_prime_mass = 0.0, five_prime_mass = 0.0;
if (oligo.getThreePrimeMod() != nullptr)
{
three_prime_mass = oligo.getThreePrimeMod()->getMonoMass() - H_mass;
}
if (oligo.getFivePrimeMod() != nullptr)
{
five_prime_mass = oligo.getFivePrimeMod()->getMonoMass() - H_mass;
}
vector<double> ribo_masses(oligo.size());
// Create a vector of doubles to represent the phosorothioate linkage mass shift
vector<double> thiols(oligo.size(), 0.0);
Size index = 0;
for (const auto& ribo : oligo)
{
ribo_masses[index] = ribo.getMonoMass();
// * at the end means phosphorothioate
if (ribo.getCode().back() == '*')
{
thiols[index] = EmpiricalFormula("SO-1").getMonoWeight();
}
++index;
}
spectrum.getStringDataArrays().resize(1);
spectrum.getStringDataArrays()[0].setName(Constants::UserParam::IonNames);
vector<double> fragments_left, fragments_right, thiol_offsets;
Size start = add_first_prefix_ion_ ? 0 : 1;
// Drop the final thiol, 'cause its not linking anything
thiols.resize(oligo.size() - 1);
if ((add_a_ions_ || add_b_ions_ || add_c_ions_ || add_d_ions_ || add_aB_ions_) && (oligo.size() > start + 1))
{
fragments_left.resize(oligo.size() - 1);
fragments_left[0] = ribo_masses[0] + five_prime_mass;
for (Size i = 1; i < oligo.size() - 1; ++i)
{
fragments_left[i] = (fragments_left[i - 1] + ribo_masses[i] + backbone_mass + thiols[i - 1]);
}
// with thiols c and d ions have a 15.99 mass shift, we calculated that above now we add it
vector<double> frag_l_thiol(fragments_left.size());
std::transform(fragments_left.begin(), fragments_left.end(), thiols.begin(), frag_l_thiol.begin(), std::plus<double>());
if (add_a_ions_)
{
addFragmentPeaks_(spectrum, fragments_left, "a", a_ion_offset, a_intensity_, start);
}
if (add_b_ions_)
{
addFragmentPeaks_(spectrum, fragments_left, "b", b_ion_offset, b_intensity_, start);
}
if (add_c_ions_)
{
addFragmentPeaks_(spectrum, frag_l_thiol, "c", c_ion_offset, c_intensity_, start);
}
if (add_d_ions_)
{
addFragmentPeaks_(spectrum, frag_l_thiol, "d", d_ion_offset, d_intensity_, start);
}
if (add_aB_ions_) // special case
{
addAMinusBPeaks_(spectrum, fragments_left, oligo, start);
}
}
if ((add_w_ions_ || add_x_ions_ || add_y_ions_ || add_z_ions_) && (oligo.size() > 1))
{
fragments_right.resize(oligo.size() - 1);
fragments_right[0] = ribo_masses.back() + three_prime_mass;
for (Size i = 1; i < oligo.size() - 1; ++i)
{
Size ribo_index = oligo.size() - i - 1;
fragments_right[i] = (fragments_right[i - 1] + ribo_masses[ribo_index] + backbone_mass + thiols[ribo_index]);
}
// with thiols a and b ions have a 15.99 mass shift, we calculated that above now we add it
vector<double> frag_r_thiol(fragments_right.size());
std::reverse(thiols.begin(), thiols.end()); // Reverse, since we go from the other side
std::transform(fragments_right.begin(), fragments_right.end(), thiols.begin(), frag_r_thiol.begin(), std::plus<double>());
if (add_w_ions_)
{
addFragmentPeaks_(spectrum, frag_r_thiol, "w", w_ion_offset, w_intensity_);
}
if (add_x_ions_)
{
addFragmentPeaks_(spectrum, frag_r_thiol, "x", x_ion_offset, x_intensity_);
}
if (add_y_ions_)
{
addFragmentPeaks_(spectrum, fragments_right, "y", y_ion_offset, y_intensity_);
}
if (add_z_ions_)
{
addFragmentPeaks_(spectrum, fragments_right, "z", z_ion_offset, z_intensity_);
}
}
if (add_precursor_peaks_) // re-use what we've already calculated
{
Peak1D peak(0.0, precursor_intensity_);
bool have_left = !fragments_left.empty();
bool have_right = !fragments_right.empty();
if (have_left && have_right)
{
peak.setMZ(fragments_left[0] + fragments_right.back() + backbone_mass);
}
else if (have_left)
{
peak.setMZ(fragments_left.back() + ribo_masses.back() + backbone_mass + three_prime_mass);
}
else if (have_right)
{
peak.setMZ(fragments_right.back() + ribo_masses[0] + backbone_mass + five_prime_mass);
}
else // really, no fragment ions?
{
peak.setMZ(oligo.getMonoWeight(NASequence::Full, 0));
}
spectrum.push_back(peak);
if (add_metainfo_)
{
spectrum.getStringDataArrays()[0].push_back("M");
}
}
return spectrum;
}
void NucleicAcidSpectrumGenerator::addChargedSpectrum_(MSSpectrum& spectrum, const MSSpectrum& uncharged_spectrum, Int charge, bool add_precursor) const
{
if (uncharged_spectrum.empty())
return;
Size size = uncharged_spectrum.size();
if (add_precursor_peaks_ && !add_precursor)
{
--size; // uncharged spectrum contains precursor peak - exclude it
}
for (Size i = 0; i < size; ++i)
{
spectrum.push_back(uncharged_spectrum[i]);
spectrum.back().setMZ(std::fabs(spectrum.back().getMZ() / charge + Constants::PROTON_MASS_U));
}
if (add_metainfo_)
{
auto& ions = spectrum.getStringDataArrays()[0];
auto source_it = uncharged_spectrum.getStringDataArrays()[0].begin();
ions.insert(ions.end(), source_it, source_it + size);
auto& charges = spectrum.getIntegerDataArrays()[0];
charges.resize(charges.size() + size, charge);
}
}
void NucleicAcidSpectrumGenerator::getSpectrum(MSSpectrum& spectrum, const NASequence& oligo, Int min_charge, Int max_charge) const
{
Int sign = 1;
if (max_charge < 0 && min_charge < 0) // negative mode
{
sign = -1;
}
else if (max_charge * min_charge < 0)
{
// Signs don't match - we need to quit and throw error here to avoid messing up for loops below
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "min. and max. charge must both be either positive or negative");
}
if (abs(max_charge) < abs(min_charge))
{
swap(max_charge, min_charge);
}
if (add_metainfo_)
{
// @TODO: what if arrays already exist, but contain different data?
if (spectrum.getIntegerDataArrays().empty())
{
spectrum.getIntegerDataArrays().resize(1);
spectrum.getIntegerDataArrays()[0].setName("Charges");
}
if (spectrum.getStringDataArrays().empty())
{
spectrum.getStringDataArrays().resize(1);
spectrum.getStringDataArrays()[0].setName(Constants::UserParam::IonNames);
}
}
MSSpectrum uncharged_spectrum = getUnchargedSpectrum_(oligo);
for (UInt z = (UInt)abs(min_charge); z <= (UInt)abs(max_charge) && z < (UInt)oligo.size(); ++z)
{
bool add_precursor = ((add_precursor_peaks_ && add_all_precursor_charges_) || (add_precursor_peaks_ && (z == (UInt)abs(max_charge))));
addChargedSpectrum_(spectrum, uncharged_spectrum, z * sign, add_precursor);
}
spectrum.sortByPosition();
}
void NucleicAcidSpectrumGenerator::getMultipleSpectra(map<Int, MSSpectrum>& spectra, const NASequence& oligo, const set<Int>& charges, Int base_charge) const
{
spectra.clear();
if (charges.empty())
return;
bool negative_mode = *charges.begin() < 0;
bool add_all_precursors = (add_precursor_peaks_ && add_all_precursor_charges_);
bool add_final_precursor = (add_precursor_peaks_ && !add_all_precursor_charges_);
if (add_metainfo_)
{
for (Int charge : charges)
{
MSSpectrum& spectrum = spectra[charge];
spectrum.getIntegerDataArrays().resize(1);
spectrum.getIntegerDataArrays()[0].setName("Charges");
spectrum.getStringDataArrays().resize(1);
spectrum.getStringDataArrays()[0].setName(Constants::UserParam::IonNames);
}
}
MSSpectrum uncharged_spectrum = getUnchargedSpectrum_(oligo);
if (negative_mode)
{
if (base_charge > 0)
base_charge = -base_charge;
// in negative mode, charges are ordered high to low - iterate in reverse:
set<Int>::const_reverse_iterator charge_it = charges.rbegin();
// skip requested charges that are lower than "base_charge":
while (*charge_it > base_charge) // ">" because of negative mode
{
++charge_it;
if (charge_it == charges.rend())
return;
}
Int charge = base_charge;
while (charge_it != charges.rend())
{
MSSpectrum& spectrum = spectra[*charge_it];
for (; charge >= *charge_it; --charge)
{
addChargedSpectrum_(spectrum, uncharged_spectrum, charge, add_all_precursors);
}
++charge_it;
if (charge_it != charges.rend())
{
spectra[*charge_it] = spectrum; // initialize next spectrum
}
// if we want precursor peaks only for selected charge states, add them
// after the next spectrum has been initialized:
if (add_final_precursor)
{
spectrum.push_back(uncharged_spectrum.back());
spectrum.back().setMZ(std::fabs(spectrum.back().getMZ() / charge + Constants::PROTON_MASS_U));
if (add_metainfo_)
{
spectrum.getStringDataArrays()[0].push_back("M");
spectrum.getIntegerDataArrays()[0].push_back(charge);
}
}
spectrum.sortByPosition();
}
}
else // positive mode
{
set<Int>::const_iterator charge_it = charges.begin();
// skip requested charges that are lower than "base_charge":
while (*charge_it < base_charge)
{
++charge_it;
if (charge_it == charges.end())
return;
}
Int charge = base_charge;
while (charge_it != charges.end())
{
MSSpectrum& spectrum = spectra[*charge_it];
for (; charge <= *charge_it; ++charge)
{
addChargedSpectrum_(spectrum, uncharged_spectrum, charge, add_all_precursors);
}
++charge_it;
if (charge_it != charges.end())
{
spectra[*charge_it] = spectrum; // initialize next spectrum
}
// if we want precursor peaks only for selected charge states, add them
// after the next spectrum has been initialized:
if (add_final_precursor)
{
spectrum.push_back(uncharged_spectrum.back());
spectrum.back().setMZ(spectrum.back().getMZ() / charge + Constants::PROTON_MASS_U);
if (add_metainfo_)
{
spectrum.getStringDataArrays()[0].push_back("M");
spectrum.getIntegerDataArrays()[0].push_back(charge);
}
}
spectrum.sortByPosition();
}
}
}
void NucleicAcidSpectrumGenerator::updateMembers_()
{
add_a_ions_ = param_.getValue("add_a_ions").toBool();
add_b_ions_ = param_.getValue("add_b_ions").toBool();
add_c_ions_ = param_.getValue("add_c_ions").toBool();
add_d_ions_ = param_.getValue("add_d_ions").toBool();
add_w_ions_ = param_.getValue("add_w_ions").toBool();
add_x_ions_ = param_.getValue("add_x_ions").toBool();
add_y_ions_ = param_.getValue("add_y_ions").toBool();
add_z_ions_ = param_.getValue("add_z_ions").toBool();
add_aB_ions_ = param_.getValue("add_a-B_ions").toBool();
add_first_prefix_ion_ = param_.getValue("add_first_prefix_ion").toBool();
add_metainfo_ = param_.getValue("add_metainfo").toBool();
add_precursor_peaks_ = param_.getValue("add_precursor_peaks").toBool();
add_all_precursor_charges_ = param_.getValue("add_all_precursor_charges").toBool();
a_intensity_ = (double)param_.getValue("a_intensity");
b_intensity_ = (double)param_.getValue("b_intensity");
c_intensity_ = (double)param_.getValue("c_intensity");
d_intensity_ = (double)param_.getValue("d_intensity");
w_intensity_ = (double)param_.getValue("w_intensity");
x_intensity_ = (double)param_.getValue("x_intensity");
y_intensity_ = (double)param_.getValue("y_intensity");
z_intensity_ = (double)param_.getValue("z_intensity");
aB_intensity_ = (double)param_.getValue("a-B_intensity");
precursor_intensity_ = (double)param_.getValue("precursor_intensity");
}
} // end namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/AASequence.cpp | .cpp | 55,738 | 1,706 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Andreas Bertsch $
// --------------------------------------------------------------------------
#include <OpenMS/CHEMISTRY/ResidueModification.h>
#include <OpenMS/CONCEPT/Exception.h>
#include <OpenMS/CHEMISTRY/AASequence.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/CHEMISTRY/ResidueDB.h>
#include <OpenMS/CHEMISTRY/ModificationsDB.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/CONCEPT/Macros.h>
#include <OpenMS/CONCEPT/PrecisionWrapper.h>
#include <cmath>
#include <algorithm>
#include <map>
using namespace std;
namespace OpenMS
{
const ResidueModification* proteinTerminalResidueHelper( ModificationsDB* mod_db,
const char term,
const std::string& str,
const std::string& mod,
const std::string& res)
{
ResidueModification::TermSpecificity protein_term_spec = ResidueModification::NUMBER_OF_TERM_SPECIFICITY;
if (term == 'c')
{
protein_term_spec = ResidueModification::PROTEIN_C_TERM;
}
else if (term == 'n')
{
protein_term_spec = ResidueModification::PROTEIN_N_TERM;
}
const ResidueModification* m;
try
{
m = mod_db->getModification(mod, res, protein_term_spec);
}
catch (...)
{
// catch and rethrow with some additional information on term
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, str,
"Cannot convert string to peptide modification. No terminal modification matches to term specificity and origin.");
}
return m;
}
const ResidueModification* terminalResidueHelper( ModificationsDB* mod_db,
const char term,
bool protein_term,
const std::string& str,
const std::string& mod,
const std::string& res)
{
ResidueModification::TermSpecificity term_spec = ResidueModification::NUMBER_OF_TERM_SPECIFICITY;
ResidueModification::TermSpecificity protein_term_spec = ResidueModification::NUMBER_OF_TERM_SPECIFICITY;;
if (term == 'c')
{
term_spec = ResidueModification::C_TERM;
protein_term_spec = ResidueModification::PROTEIN_C_TERM;
}
else if (term == 'n')
{
term_spec = ResidueModification::N_TERM;
protein_term_spec = ResidueModification::PROTEIN_N_TERM;
}
const ResidueModification* m;
try
{
m = mod_db->getModification(mod, res, term_spec);
}
catch (...)
{
if (!protein_term)
{
// catch and rethrow with some additional information on term
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, str,
"Cannot convert string to peptide modification. No terminal modification matches to term specificity and origin.");
}
try
{
m = mod_db->getModification(mod, res, protein_term_spec);
}
catch (...)
{
// catch and rethrow with some additional information on term
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, str,
"Cannot convert string to peptide modification. No terminal modification matches to term specificity and origin.");
}
}
return m;
}
AASequence::AASequence(const String& s)
{
parseString_(s, *this, true);
}
AASequence::AASequence(const char* s)
{
parseString_(s, *this, true);
}
AASequence::AASequence(const String& s, bool permissive)
{
parseString_(s, *this, permissive);
}
AASequence::AASequence(const char* s, bool permissive)
{
parseString_(s, *this, permissive);
}
const Residue& AASequence::getResidue(Size index) const
{
if (index >= peptide_.size())
{
throw Exception::IndexOverflow(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, index, peptide_.size());
}
return *peptide_[index];
}
String AASequence::toString() const
{
std::stringstream ss;
ss << *this;
return String(ss.str());
}
String AASequence::toUnmodifiedString() const
{
String tmp;
for (ConstIterator it = begin(); it != end(); ++it)
{
tmp += it->getOneLetterCode();
}
return tmp;
}
String AASequence::toUniModString() const
{
const AASequence & seq = *this;
String bs;
if (seq.empty()) return bs;
if (seq.hasNTerminalModification())
{
const ResidueModification& mod = *(seq.getNTerminalModification());
double nominal_mass = Residue::getInternalToNTerm().getMonoWeight() + mod.getDiffMonoMass();
if (mod.getUniModRecordId() > -1)
{
bs += ".(" + mod.getUniModAccession() + ")";
}
else
{
bs += ".[" + String(nominal_mass) + "]";
}
}
for (Size i = 0; i != seq.size(); ++i)
{
const Residue& r = seq[i];
const String& aa = r.getOneLetterCode();
if (r.isModified())
{
const ResidueModification& mod = *(r.getModification());
double nominal_mass = r.getMonoWeight(Residue::Internal);
if (mod.getUniModRecordId() > -1)
{
bs += aa + "(" + mod.getUniModAccession() + ")";
}
else
{
bs += aa + "[" + String(nominal_mass) + "]";
}
}
else // amino acid not modified
{
bs += aa;
}
}
if (seq.hasCTerminalModification())
{
const ResidueModification& mod = *(seq.getCTerminalModification());
double nominal_mass = Residue::getInternalToCTerm().getMonoWeight() + mod.getDiffMonoMass();
if (mod.getUniModRecordId() > -1)
{
bs += ".(" + mod.getUniModAccession() + ")";
}
else
{
bs += ".[" + String(nominal_mass) + "]";
}
}
return bs;
}
String AASequence::toBracketString(bool integer_mass, bool mass_delta, const vector<String> & fixed_modifications) const
{
const AASequence& seq = *this;
String bs;
if (seq.empty())
{
return bs;
}
if (seq.hasNTerminalModification())
{
const ResidueModification& mod = *(seq.getNTerminalModification());
const String & nterm_mod_name = mod.getFullId(); // e.g. "Acetyl (N-term)"
// only add to string if not a fixed modification
if (std::find(fixed_modifications.begin(), fixed_modifications.end(), nterm_mod_name) == fixed_modifications.end())
{
double nominal_mass = mod.getDiffMonoMass();
if (!mass_delta)
{
nominal_mass += Residue::getInternalToNTerm().getMonoWeight();
}
String sign = (mass_delta && nominal_mass > 0) ? "+" : ""; // the '-' will be printed automatically by conversion to string
if (integer_mass)
{
bs += String("n[") + sign + static_cast<int>(std::round(nominal_mass)) + "]";
}
else
{
bs += "n[" + sign + String(nominal_mass) + "]";
}
}
}
for (Size i = 0; i != seq.size(); ++i)
{
const Residue& r = seq[i];
const String aa = !r.getOneLetterCode().empty() ? r.getOneLetterCode() : "X";
if (r.isModified())
{
const ResidueModification& mod = *(r.getModification());
const String & mod_name = mod.getFullId();
if (std::find(fixed_modifications.begin(), fixed_modifications.end(), mod_name) == fixed_modifications.end())
{
double nominal_mass;
if (mass_delta)
{
nominal_mass = mod.getDiffMonoMass();
}
else
{
nominal_mass = r.getMonoWeight(Residue::Internal);
}
String sign = (mass_delta && nominal_mass > 0) ? "+" : "";
if (aa == "X")
{
// cannot have delta mass for X
nominal_mass = r.getMonoWeight(Residue::Internal);
sign = "";
}
if (integer_mass)
{
bs += aa + String("[") + sign + static_cast<int>(std::round(nominal_mass)) + "]";
}
else
{
bs += aa + "[" + sign + String(nominal_mass) + "]";
}
}
else
{
bs += aa; // don't print fixed modification
}
}
else // amino acid not modified
{
bs += aa;
}
}
if (seq.hasCTerminalModification())
{
const ResidueModification& mod = *(seq.getCTerminalModification());
const String & cterm_mod_name = mod.getFullId();
// only add to string if not a fixed modification
if (std::find(fixed_modifications.begin(), fixed_modifications.end(), cterm_mod_name) == fixed_modifications.end())
{
double nominal_mass = mod.getDiffMonoMass();
if (!mass_delta)
{
nominal_mass += Residue::getInternalToCTerm().getMonoWeight();
}
String sign = (mass_delta && nominal_mass > 0) ? "+" : "";
if (integer_mass)
{
bs += String("c[") + sign + static_cast<int>(std::round(nominal_mass)) + "]";
}
else
{
bs += "c[" + sign + String(nominal_mass) + "]";
}
}
}
return bs;
}
bool AASequence::operator<(const AASequence& rhs) const
{
// check size
if (peptide_.size() != rhs.peptide_.size())
{
return (peptide_.size() < rhs.peptide_.size());
}
// when checking terminal mods, "no mod" is less than "any mod"
if (n_term_mod_ && !rhs.n_term_mod_)
{
return false;
}
else if (!n_term_mod_ && rhs.n_term_mod_)
{
return true;
}
else if (n_term_mod_ && rhs.n_term_mod_ && (n_term_mod_ != rhs.n_term_mod_))
{
return (n_term_mod_->getId() < rhs.n_term_mod_->getId());
}
ConstIterator a = begin();
ConstIterator b = rhs.begin();
// check one letter codes
for (; a != end(); ++a, ++b)
{
if (a->getOneLetterCode() != b->getOneLetterCode())
{
return (a->getOneLetterCode() < b->getOneLetterCode());
}
else if (a->getModification() != b->getModification())
{
return (a->getModification() < b->getModification());
}
}
// c-term
if (c_term_mod_ && !rhs.c_term_mod_)
{
return false;
}
else if (!c_term_mod_ && rhs.c_term_mod_)
{
return true;
}
else if (c_term_mod_ && rhs.c_term_mod_ && (c_term_mod_ != rhs.c_term_mod_))
{
return (c_term_mod_->getId() < rhs.c_term_mod_->getId());
}
return false;
}
EmpiricalFormula AASequence::getFormula(Residue::ResidueType type, Int charge) const
{
if (!peptide_.empty())
{
// Initialize with the missing/additional protons
EmpiricalFormula ef; // = EmpiricalFormula("H") * charge; ??
ef.setCharge(charge);
// terminal modifications
if (n_term_mod_ != nullptr &&
(type == Residue::Full || type == Residue::AIon ||
type == Residue::BIon || type == Residue::CIon ||
type == Residue::NTerminal))
{
ef += n_term_mod_->getDiffFormula();
}
if (c_term_mod_ != nullptr &&
(type == Residue::Full || type == Residue::XIon ||
type == Residue::YIon || type == Residue::ZIon ||
type == Residue::CTerminal))
{
ef += c_term_mod_->getDiffFormula();
}
static auto const rx = ResidueDB::getInstance()->getResidue("X");
for (auto const& e : peptide_)
{
// While PEPTIX[123]DE makes sense and represents an unknown mass of 123.0
// Da, the sequence PEPTIXDE does not make sense as it is unclear what a
// standard internal residue including named modifications
if (e == rx)
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Cannot get EF of sequence with unknown AA 'X'.", toString());
}
ef += e->getFormula(Residue::Internal);
}
// add the missing formula part
switch (type)
{
case Residue::Full:
{
return ef + Residue::getInternalToFull();
}
case Residue::Internal:
{
return ef;
}
case Residue::NTerminal:
{
return ef + Residue::getInternalToNTerm();
}
case Residue::CTerminal:
{
return ef + Residue::getInternalToCTerm();
}
case Residue::AIon:
{
return ef + Residue::getInternalToAIon();
}
case Residue::BIon:
{
return ef + Residue::getInternalToBIon();
}
case Residue::CIon:
{
return ef + Residue::getInternalToCIon();
}
case Residue::XIon:
{
return ef + Residue::getInternalToXIon();
}
case Residue::YIon:
{
return ef + Residue::getInternalToYIon();
}
case Residue::ZIon:
{
return ef + Residue::getInternalToZIon();
}
default:
OPENMS_LOG_ERROR << "AASequence::getFormula: unknown ResidueType\n";
}
return ef;
}
else
{
OPENMS_LOG_ERROR << "AASequence::getFormula: Formula for ResidueType " << type << " not defined for sequences of length 0.\n";
return EmpiricalFormula("");
}
}
double AASequence::getAverageWeight(Residue::ResidueType type, Int charge) const
{
// check whether tags are present
double tag_offset(0);
static auto const rx = ResidueDB::getInstance()->getResidue("X");
for (auto const& e : peptide_)
{
// While PEPTIX[123]DE makes sense and represents an unknown mass of 123.0
// Da, the sequence PEPTIXDE does not make sense as it is unclear what a
// standard internal residue including named modifications
if (e == rx) throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Cannot get weight of sequence with unknown AA 'X' with unknown mass.", toString());
if (e->getOneLetterCode().empty())
{
tag_offset += e->getAverageWeight(Residue::Internal);
}
}
// TODO inefficient, if averageWeight is already set in the Residue
return tag_offset + getFormula(type, charge).getAverageWeight();
}
double AASequence::getMZ(Int charge, Residue::ResidueType type) const
{
if (charge == 0)
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Can't calculate mass-to-charge ratio for charge=0.", toString());
}
return getMonoWeight(type, charge) / charge;
}
double AASequence::getMonoWeight(Residue::ResidueType type, Int charge) const
{
if (!peptide_.empty())
{
double mono_weight(Constants::PROTON_MASS_U * charge);
// terminal modifications
if (n_term_mod_ != nullptr &&
(type == Residue::Full || type == Residue::AIon ||
type == Residue::BIon || type == Residue::CIon ||
type == Residue::NTerminal))
{
mono_weight += n_term_mod_->getDiffMonoMass();
}
if (c_term_mod_ != nullptr &&
(type == Residue::Full || type == Residue::XIon ||
type == Residue::YIon || type == Residue::ZIon ||
type == Residue::CTerminal))
{
mono_weight += c_term_mod_->getDiffMonoMass();
}
static auto const rx = ResidueDB::getInstance()->getResidue("X");
for (auto const& e : peptide_)
{
// While PEPTIX[123]DE makes sense and represents an unknown mass of 123.0
// Da, the sequence PEPTIXDE does not make sense as it is unclear what a
// standard internal residue including named modifications
if (e == rx)
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Cannot get weight of sequence with unknown AA 'X' with unknown mass.", toString());
}
// single, unknown residue should represent.
mono_weight += e->getMonoWeight(Residue::Internal);
}
// add the missing formula part
switch (type)
{
case Residue::Full:
{
return mono_weight + Residue::getInternalToFull().getMonoWeight();
}
case Residue::Internal:
{
return mono_weight;
}
case Residue::NTerminal:
{
return mono_weight + Residue::getInternalToNTerm().getMonoWeight();
}
case Residue::CTerminal:
{
return mono_weight + Residue::getInternalToCTerm().getMonoWeight();
}
case Residue::AIon:
{
return mono_weight + Residue::getInternalToAIon().getMonoWeight();
}
case Residue::BIon:
{
return mono_weight + Residue::getInternalToBIon().getMonoWeight();
}
case Residue::CIon:
{
return mono_weight + Residue::getInternalToCIon().getMonoWeight();
}
case Residue::XIon:
{
return mono_weight + Residue::getInternalToXIon().getMonoWeight();
}
case Residue::YIon:
{
return mono_weight + Residue::getInternalToYIon().getMonoWeight();
}
case Residue::ZIon:
{
return mono_weight + Residue::getInternalToZIon().getMonoWeight();
}
default:
OPENMS_LOG_ERROR << "AASequence::getMonoWeight: unknown ResidueType\n";
}
return mono_weight;
}
else
{
OPENMS_LOG_ERROR << "AASequence::getMonoWeight: Mass for ResidueType " << type << " not defined for sequences of length 0.\n";
return 0.0;
}
}
/*void AASequence::getNeutralLosses(Map<const EmpiricalFormula, UInt) const
{
// the following losses are from the Zhang paper (AC, 76, 14, 2004)
// charge directed*/
/*
static const EmpiricalFormula R_44("NH2CHNH");
static const EmpiricalFormula R_59("CN3H5"); // guanidine
static const EmpiricalFormula R_61("N2H4CH");
// charge remote
static const EmpiricalFormula R_60("N2H4CO"); // combination of NH=C=NH + C-terminal H2O
static const EmpiricalFormula H2O("H2O"); // loss from the C-terminus
static const EmpiricalFormula NH3("NH3");
Map<const EmpiricalFormula*, UInt> losses;
for (Size i=0;i!=peptide_.size();++i)
{
if (peptide_[i]->hasNeutralLoss())
{
const EmpiricalFormula* loss = peptide_[i]->getLossFormulas();
if (losses.find(loss) != losses.end())
{
losses[loss]++;
}
else
{
losses[loss] = 1;
}
}
// TODO: hack this should be in the data file
if (peptide_[i]->getOneLetterCode() == "R")
{
losses[&R_44] = 1;
losses[&R_59] = 1;
losses[&R_61] = 1;
losses[&R_60] = 1;
}
losses[&H2O] = 1;
losses[&NH3] = 1;
}
return losses;
}*/
const Residue& AASequence::operator[](Size index) const
{
if (index >= size())
{
throw Exception::IndexOverflow(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, index, size());
}
return *peptide_[index];
}
AASequence& AASequence::operator+=(const AASequence& sequence)
{
for (Size i = 0; i != sequence.peptide_.size(); ++i)
{
peptide_.push_back(sequence.peptide_[i]);
}
return *this;
}
AASequence AASequence::operator+(const AASequence& sequence) const
{
AASequence seq;
seq.peptide_ = peptide_;
for (Size i = 0; i != sequence.peptide_.size(); ++i)
{
seq.peptide_.push_back(sequence.peptide_[i]);
}
return seq;
}
AASequence AASequence::operator+(const Residue* residue) const
{
if (!ResidueDB::getInstance()->hasResidue(residue))
{
throw Exception::ElementNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "given residue");
}
AASequence seq = *this;
seq += residue;
return seq;
}
AASequence& AASequence::operator+=(const Residue* residue)
{
if (!ResidueDB::getInstance()->hasResidue(residue))
{
throw Exception::ElementNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "given residue");
}
peptide_.push_back(residue);
return *this;
}
Size AASequence::size() const
{
return peptide_.size();
}
AASequence AASequence::getPrefix(Size index) const
{
if (index > size())
{
throw Exception::IndexOverflow(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, index, size());
}
if (index == size())
{
return *this;
}
AASequence seq;
seq.n_term_mod_ = n_term_mod_;
seq.peptide_.reserve(index);
seq.peptide_.insert(seq.peptide_.end(), peptide_.begin(), peptide_.begin() + index);
return seq;
}
AASequence AASequence::getSuffix(Size index) const
{
if (index > size())
{
throw Exception::IndexOverflow(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, index, size());
}
if (index == size())
{
return *this;
}
AASequence seq;
seq.c_term_mod_ = c_term_mod_;
seq.peptide_.reserve(size() - index);
seq.peptide_.insert(seq.peptide_.end(), peptide_.begin() + (size() - index), peptide_.end());
return seq;
}
AASequence AASequence::getSubsequence(Size index, UInt num) const
{
if (index >= size())
{
throw Exception::IndexOverflow(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, index, size());
}
if (index + num > size())
{
throw Exception::IndexOverflow(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, index + num, size());
}
AASequence seq;
if (index == 0)
{
seq.n_term_mod_ = n_term_mod_;
}
if (index + num == this->size())
{
seq.c_term_mod_ = c_term_mod_;
}
seq.peptide_.reserve(num);
seq.peptide_.insert(seq.peptide_.end(), peptide_.begin() + index, peptide_.begin() + index + num);
return seq;
}
bool AASequence::has(const Residue& residue) const
{
for (const Residue* rp : peptide_)
{
if (*rp == residue) return true;
}
return false;
}
bool AASequence::hasSubsequence(const AASequence& sub) const
{
if (sub.empty())
{
return true;
}
Size full_size = peptide_.size();
Size sub_size = sub.peptide_.size();
if (sub_size > full_size)
{
return false;
}
for (Size i = 0; i < full_size - sub_size + 1; ++i)
{
if (peptide_[i] == sub.peptide_[0])
{
Size j = 1;
for (; j < sub_size; ++j)
{
if (peptide_[j + i] != sub.peptide_[j])
{
break;
}
}
// check if we reached last position
if (j == sub_size)
{
return true;
}
}
}
return false;
}
bool AASequence::hasPrefix(const AASequence& sequence) const
{
if (sequence.empty())
{
return true;
}
if (sequence.size() > peptide_.size())
{
return false;
}
if (sequence.n_term_mod_ != n_term_mod_)
{
return false;
}
if (sequence.size() == peptide_.size() && sequence.c_term_mod_ != c_term_mod_)
{
return false;
}
for (Size i = 0; i != sequence.size(); ++i)
{
if (sequence.peptide_[i] != peptide_[i])
{
return false;
}
}
return true;
}
bool AASequence::hasSuffix(const AASequence& sequence) const
{
if (sequence.empty())
{
return true;
}
if (sequence.size() > peptide_.size())
{
return false;
}
if (sequence.c_term_mod_ != c_term_mod_)
{
return false;
}
if (sequence.size() == peptide_.size() && sequence.n_term_mod_ != n_term_mod_)
{
return false;
}
for (Size i = 0; i != sequence.size(); ++i)
{
if (sequence.peptide_[sequence.size() - 1 - i] != peptide_[size() - 1 - i])
{
return false;
}
}
return true;
}
bool AASequence::operator==(const AASequence& peptide) const
{
if (peptide_.size() != peptide.peptide_.size())
{
return false;
}
for (Size i = 0; i != size(); ++i)
{
if (peptide_[i] != peptide.peptide_[i])
{
return false;
}
// if AA sequence equal, check if modifications (if available) are equal
else if (peptide_.at(i)->getModification() != peptide.peptide_.at(i)->getModification())
{
return false;
}
}
if (n_term_mod_ != peptide.n_term_mod_)
{
return false;
}
if (c_term_mod_ != peptide.c_term_mod_)
{
return false;
}
return true;
}
bool AASequence::operator!=(const AASequence& peptide) const
{
return !(*this == peptide);
}
bool AASequence::empty() const
{
return peptide_.empty();
}
bool AASequence::isModified() const
{
if (n_term_mod_ != nullptr || c_term_mod_ != nullptr)
{
return true;
}
for (std::vector<const Residue*>::const_iterator it = peptide_.begin(); it != peptide_.end(); ++it)
{
if ((*it)->isModified())
{
return true;
}
}
return false;
}
std::ostream& operator<<(std::ostream& os, const AASequence& peptide)
{
// this is basically the implementation of toString
// deal with N-terminal modifications first
if (peptide.n_term_mod_ != nullptr)
{
os << peptide.n_term_mod_->toString();
}
for (const auto& aa : peptide)
{
os << aa.toString();
}
// deal with C-terminal modifications
if (peptide.c_term_mod_ != nullptr)
{
os << peptide.c_term_mod_->toString();
}
return os;
}
String::ConstIterator AASequence::parseModRoundBrackets_(const String::ConstIterator str_it,
const String& str,
AASequence& aas,
const ResidueModification::TermSpecificity& specificity)
{
OPENMS_PRECONDITION(*str_it == '(', "Modification must start with '('.");
String::ConstIterator mod_start = str_it;
String::ConstIterator mod_end = ++mod_start;
Size open_brackets = 1;
ModificationsDB* mod_db = ModificationsDB::getInstance();
while (mod_end != str.end())
{
if (*mod_end == ')')
{
--open_brackets;
}
else if (*mod_end == '(')
{
++open_brackets;
}
if (!open_brackets)
{
break;
}
++mod_end;
}
// Extract the actual modification as string
std::string mod(mod_start, mod_end);
if (mod_end == str.end())
{
throw Exception::ParseError(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, str,
"Cannot convert string to peptide modification: missing ')'");
}
// First search for N or C terminal modifications (start of peptide indicates N-terminal modification as well)
if (aas.peptide_.empty() || specificity == ResidueModification::N_TERM ||
specificity == ResidueModification::PROTEIN_N_TERM)
{
// Advance iterator one or two positions (we may or may not have a dot
// after the closing bracket) to point to the first AA of the peptide.
String::ConstIterator next_aa = mod_end;
++next_aa;
if (*next_aa == '.') ++next_aa;
if (specificity == ResidueModification::PROTEIN_N_TERM)
{
aas.n_term_mod_ = proteinTerminalResidueHelper(mod_db, 'n', str, mod, String(*next_aa));
return mod_end;
}
else
{
//TODO why are we allowing Protein Term here?
aas.n_term_mod_ = terminalResidueHelper(mod_db, 'n', true, str, mod, String(*next_aa));
return mod_end;
}
}
// get one letter code of unmodified version
const String& res = aas.peptide_.back()->getOneLetterCode();
if (specificity == ResidueModification::PROTEIN_C_TERM)
{
aas.c_term_mod_ = proteinTerminalResidueHelper(mod_db, 'c', str, mod, res);
return mod_end;
}
else if (specificity == ResidueModification::C_TERM)
{
//TODO why are we allowing Protein Term here?
aas.c_term_mod_ = terminalResidueHelper(mod_db, 'c', true, str, mod, res);
return mod_end;
}
try
{
const Residue* internal = ResidueDB::getInstance()->getModifiedResidue(aas.peptide_.back(), mod);
aas.peptide_.back() = internal;
}
catch(...) // no internal mod for this residue
{
// TODO: get rid of this code path, its deprecated and is only a hack for
// C/N-terminal modifications that don't use the dot notation
if (std::distance(str_it, str.begin()) == -1)
{
// old ambiguous notation: Modification might be at first amino acid or at N-terminus
aas.n_term_mod_ = terminalResidueHelper(mod_db, 'n', true, str, mod, res);
}
else if (std::distance(mod_end, str.end()) == 1) // potentially a C-terminal mod without explicitly declaring it using dot notation?
{
// old ambiguous notation: Modification might be at last amino acid or at C-terminus
aas.c_term_mod_ = terminalResidueHelper(mod_db, 'c', true, str, mod, res);
}
else
{
// neither internal nor terminal modification matches to our database
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Cannot convert string to peptide modification. No modification matches in our database.", mod);
}
}
return mod_end;
}
String::ConstIterator AASequence::parseModSquareBrackets_(const String::ConstIterator str_it,
const String& str,
AASequence& aas,
const ResidueModification::TermSpecificity& specificity)
{
OPENMS_PRECONDITION(*str_it == '[', "Modification must start with '['.");
String::ConstIterator mod_start = str_it;
String::ConstIterator mod_end = ++mod_start;
while ((mod_end != str.end()) && (*mod_end != ']')) {++mod_end;}
// Extract the actual modification as string
String mod(mod_start, mod_end);
if (mod_end == str.end())
{
throw Exception::ParseError(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, str,
"Cannot convert string to peptide modification: missing ']'");
}
double mass = mod.toDouble();
size_t decimal_pos = mod.find('.');
bool integer_mass = (decimal_pos == std::string::npos);
double tolerance = 0.5; // for integer mass values
if (!integer_mass) // float mass values -> adapt tolerance to decimal precision
{
size_t n_decimals = mod.size() - decimal_pos - 2;
tolerance = std::pow(10.0, -int(n_decimals));
}
bool delta_mass = (mod[0] == '+') || (mod[0] == '-');
ModificationsDB* mod_db = ModificationsDB::getInstance();
const Residue* residue = nullptr;
const ResidueModification* residue_mod = nullptr;
// handle N-term modification
if (specificity == ResidueModification::N_TERM)
{
// Advance iterator one or two positions (we may or may not have a dot
// after the closing bracket) to point to the first AA of the peptide.
String::ConstIterator next_aa = mod_end;
++next_aa;
if (*next_aa == '.') ++next_aa;
std::vector<String> term_mods;
if (!integer_mass) // for non-integer mass we just pick the closest inside the tolerance
{
if (delta_mass) // N-terminal mod specified by delta mass [+123.4]
{
residue_mod = mod_db->getBestModificationByDiffMonoMass(mass, tolerance, String(*next_aa), ResidueModification::N_TERM);
}
else // N-terminal mod specified by absolute mass [123.4]
{
double mod_mass = mass - Residue::getInternalToNTerm().getMonoWeight(); // here we need to subtract the N-Term mass
residue_mod = mod_db->getBestModificationByDiffMonoMass(mod_mass, tolerance, String(*next_aa), ResidueModification::N_TERM);
}
if (residue_mod != nullptr)
{
aas.n_term_mod_ = residue_mod;
return mod_end;
}
}
else // for integer mass we report on the modification in the tolerance and report which we picked.
{
if (delta_mass) // N-terminal mod specified by delta mass [+123.4]
{
mod_db->searchModificationsByDiffMonoMass(term_mods, mass, tolerance, String(*next_aa), ResidueModification::N_TERM);
}
else // N-terminal mod specified by absolute mass [123.4]
{
double mod_mass = mass - Residue::getInternalToNTerm().getMonoWeight(); // here we need to subtract the N-Term mass
mod_db->searchModificationsByDiffMonoMass(term_mods, mod_mass, tolerance, String(*next_aa), ResidueModification::N_TERM);
}
if (!term_mods.empty())
{
aas.n_term_mod_ = mod_db->getModification(term_mods[0], String(*next_aa), ResidueModification::N_TERM);
return mod_end;
}
}
OPENMS_LOG_WARN << "Warning: unknown N-terminal modification '" + mod + "' - adding it to the database\n";
}
else if (specificity == ResidueModification::ANYWHERE) // internal (not exclusively terminal) modification
{
residue = aas.peptide_.back();
if (delta_mass && (residue->getMonoWeight() <= 0.0)) // not allowed
{
throw Exception::ParseError(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, str,
"Using a mass difference to specify a modification on a residue of unknown mass is not supported in '" + \
residue->getOneLetterCode() + "[" + mod + "]'");
}
if (!delta_mass) // compute delta mass based on residue mass
{
// we expect that all masses are relative to the full mass of the
// residue, not its "internal" mass in the peptide (loss of H2O)!
mass -= residue->getMonoWeight(Residue::Internal);
delta_mass = true; // in case we need to create a new residue below
}
if (integer_mass) // use first modification that matches approximately
{
std::vector<String> res_mods;
mod_db->searchModificationsByDiffMonoMass(res_mods, mass, tolerance, residue->getOneLetterCode(), ResidueModification::ANYWHERE);
if (!res_mods.empty())
{
aas.peptide_.back() = ResidueDB::getInstance()->getModifiedResidue(residue, res_mods[0]);
return mod_end;
}
else if (aas.size() == 1) // N-terminal mod.?
{
std::vector<String> term_mods;
mod_db->searchModificationsByDiffMonoMass(term_mods, mass, tolerance, residue->getOneLetterCode(), ResidueModification::N_TERM);
if (!term_mods.empty())
{
aas.n_term_mod_ = mod_db->getModification(term_mods[0], residue->getOneLetterCode(), ResidueModification::N_TERM);
return mod_end;
}
}
else if (std::distance(mod_end, str.end()) == 1) // C-terminal mod.?
{
mod_db->searchModificationsByDiffMonoMass(res_mods, mass, tolerance, residue->getOneLetterCode(), ResidueModification::C_TERM);
if (!res_mods.empty())
{
aas.c_term_mod_ = mod_db->getModification(res_mods[0], residue->getOneLetterCode(), ResidueModification::C_TERM);
return mod_end;
}
}
}
else // float mass -> use best-matching modification
{
const ResidueModification* res_mod = mod_db->getBestModificationByDiffMonoMass(
mass, tolerance, residue->getOneLetterCode(),
ResidueModification::ANYWHERE);
if (res_mod)
{
String id = res_mod->getId();
if (id.empty()) id = res_mod->getFullId();
aas.peptide_.back() = ResidueDB::getInstance()->getModifiedResidue(residue, id);
return mod_end;
}
else if (aas.size() == 1) // N-terminal mod.?
{
res_mod = mod_db->getBestModificationByDiffMonoMass(mass, tolerance, residue->getOneLetterCode(), ResidueModification::N_TERM);
if (res_mod)
{
aas.n_term_mod_ = res_mod;
return mod_end;
}
}
else if (std::distance(mod_end, str.end()) == 1) // C-terminal mod.?
{
res_mod = mod_db->getBestModificationByDiffMonoMass(mass, tolerance, residue->getOneLetterCode(), ResidueModification::C_TERM);
if (res_mod)
{
aas.c_term_mod_ = res_mod;
return mod_end;
}
}
}
if (residue->getOneLetterCode() != "X") // don't warn for mass tags
{
OPENMS_LOG_WARN << "Warning: unknown modification '" + mod + "' of residue '" +
residue->getOneLetterCode() + "' - adding it to the database\n";
}
}
else if (specificity == ResidueModification::C_TERM)
{
residue = aas.peptide_.back();
std::vector<String> term_mods;
if (!integer_mass) // for non-integer mass we just pick the closest inside the tolerance
{
if (delta_mass) // C-terminal mod specified by delta mass [+123.4]
{
residue_mod = mod_db->getBestModificationByDiffMonoMass(mass, tolerance, residue->getOneLetterCode(), ResidueModification::C_TERM);
}
else // C-terminal mod specified by absolute mass [123.4]
{
double mod_mass = mass - Residue::getInternalToCTerm().getMonoWeight(); // here we need to subtract the N-Term mass
residue_mod = mod_db->getBestModificationByDiffMonoMass(mod_mass, tolerance, residue->getOneLetterCode(), ResidueModification::C_TERM);
}
if (residue_mod != nullptr)
{
aas.c_term_mod_ = residue_mod;
return mod_end;
}
}
else // for integer mass we report on the modification in the tolerance and report which we picked.
{
if (delta_mass) // C-terminal mod specified by delta mass [+123]
{
mod_db->searchModificationsByDiffMonoMass(term_mods, mass, tolerance, residue->getOneLetterCode(), ResidueModification::C_TERM);
}
else // C-terminal mod specified by absolute mass [123]
{
double mod_mass = mass - Residue::getInternalToCTerm().getMonoWeight(); // here we need to subtract the N-Term mass
mod_db->searchModificationsByDiffMonoMass(term_mods, mod_mass, tolerance, residue->getOneLetterCode(), ResidueModification::C_TERM);
}
if (!term_mods.empty())
{
aas.c_term_mod_ = mod_db->getModification(term_mods[0], residue->getOneLetterCode(), ResidueModification::C_TERM);
return mod_end;
}
}
OPENMS_LOG_WARN << "Warning: unknown C-terminal modification '" + mod + "' - adding it to the database\n";
}
// -----------------------------------
// Dealing with an unknown modification
// -----------------------------------
const ResidueModification* new_mod = ResidueModification::createUnknownFromMassString(mod,
mass,
delta_mass,
specificity,
residue);
// Notes on mass calculation: AASequence::getMonoWeight uses DiffMonoMass
// for its calculation of C/N-terminal modification mass and it uses
// getMonoWeight(Residue::Internal) for each Residue. The Residue weight is
// set when adding a modification using setModification_
if (specificity == ResidueModification::N_TERM)
{
aas.n_term_mod_ = new_mod;
return mod_end;
}
else if (specificity == ResidueModification::C_TERM)
{
aas.c_term_mod_ = new_mod;
return mod_end;
}
else
{
// Note: this calls setModification_ on a new Residue which changes its
// weight to the weight of the modification (set above)
aas.peptide_.back() = ResidueDB::getInstance()->
getModifiedResidue(residue, new_mod->getFullId());
return mod_end;
}
}
void AASequence::parseString_(const String& pep, AASequence& aas,
bool permissive)
{
// Reserving space, populate it and then shrink again (since we probably
// over-allocate due to modifications). This substantially speeds up the
// function for unmodified sequences (3x speedup).
aas.peptide_.clear();
String peptide(pep);
peptide.trim();
aas.peptide_.reserve(peptide.size());
if (peptide.empty()) return;
// remove optional n and c at start and end of string
if (peptide[0] == 'n')
{
peptide.erase(0,1);
}
if (peptide.empty())
{
return;
}
if (peptide.back() == 'c')
{
peptide.pop_back();
}
if (peptide.empty()) return;
// detect if this is the new dot notation containing dots for termini and
// track if last char denoted a terminus
bool dot_terminal(false), dot_notation(false);
static ResidueDB* rdb = ResidueDB::getInstance();
for (String::ConstIterator str_it = peptide.begin();
str_it != peptide.end(); ++str_it)
{
// skip (optional) terminal delimiters, but remember that last character was a terminal one
if (*str_it == '.')
{
dot_notation = true;
dot_terminal = true;
continue;
}
// 1. default case: add unmodified, standard residue
const Residue* r = rdb->getResidue(*str_it); // "isalpha" check not needed
if (r)
{
dot_terminal = false; // since we found an AA, we are not at a terminal position any more
aas.peptide_.push_back(r);
continue;
}
// 2. modification:
// determine specificity:
// - at termini we first assume we are dealing with a N- or C-terminal modifications
// and fall back to (internal) modifications if there is none in our DB
// - otherwise we can be sure we are dealing with an internal modification
ResidueModification::TermSpecificity specificity = ResidueModification::ANYWHERE;
if (str_it == peptide.begin() || (dot_notation && dot_terminal && aas.peptide_.empty()) )
{
specificity = ResidueModification::N_TERM;
}
else if (*str_it == 'c')
{
// note that still c[...] type substring remains as only single c have been erased before
// skip 'c', record that we are dealing with a C-terminal
++str_it;
specificity = ResidueModification::C_TERM;
}
else if (dot_notation && dot_terminal && !aas.peptide_.empty())
{
specificity = ResidueModification::C_TERM;
}
if (*str_it == '(')
{
str_it = parseModRoundBrackets_(str_it, peptide, aas, specificity);
}
else if (*str_it == '[')
{
str_it = parseModSquareBrackets_(str_it, peptide, aas, specificity);
}
else
{
if (permissive && ((*str_it == '*') || (*str_it == '#') ||
(*str_it == '+')))
{ // stop codons
aas.peptide_.push_back(rdb->getResidue('X'));
}
else if (permissive && (*str_it == ' '))
{ // skip, i.e. do nothing here
}
else
{
throw Exception::ParseError(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, peptide,
"Cannot convert string to amino acid sequence: unexpected character '" + String(*str_it) + "'");
}
}
dot_terminal = false; // previous char was no dot
}
// We do NOT deal with a single, unmodified X residue here,
// since the user might just want to represent the sequence (including modifications on other AA's),
// e.g. when digesting a peptide
// We check for 'weightless' X in places where a mass is needed, e.g. during getMonoMass()
aas.peptide_.shrink_to_fit();
}
void AASequence::getAAFrequencies(std::map<String, Size>& frequency_table) const
{
frequency_table.clear();
for (std::vector<const Residue*>::const_iterator it = peptide_.begin(); it != peptide_.end(); ++it)
{
++frequency_table[(*it)->getOneLetterCode()];
}
}
void AASequence::setModification(Size index, const String& modification)
{
if (index >= peptide_.size())
{
throw Exception::IndexOverflow(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, index, peptide_.size());
}
if (!modification.empty())
{
peptide_[index] = ResidueDB::getInstance()->getModifiedResidue(peptide_[index], modification);
}
else // remove modification
{
peptide_[index] = ResidueDB::getInstance()->getResidue(peptide_[index]->getOneLetterCode());
}
}
void AASequence::setModification(Size index, const ResidueModification* modindb)
{
if (index >= peptide_.size())
{
throw Exception::IndexOverflow(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, index, peptide_.size());
}
peptide_[index] = ResidueDB::getInstance()->getModifiedResidue(peptide_[index], modindb);
}
void AASequence::setModification(Size index, const ResidueModification& modification)
{
if (index >= peptide_.size())
{
throw Exception::IndexOverflow(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, index, peptide_.size());
}
const auto& db = ModificationsDB::getInstance();
const ResidueModification* modindb = db->searchModification(modification);
if (modindb == nullptr)
{
modindb = db->addNewModification_(modification);
}
peptide_[index] = ResidueDB::getInstance()->getModifiedResidue(peptide_[index], modindb);
}
void AASequence::setModificationByDiffMonoMass(Size index, double diffMonoMass)
{
if (index >= peptide_.size())
{
throw Exception::IndexOverflow(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, index, peptide_.size());
}
ModificationsDB* mod_db = ModificationsDB::getInstance();
bool multimatch = false;
// quickly check for user-defined modification added by createUnknownFromMassString (e.g. M[+12321])
String diffMonoMassStr = ResidueModification::getDiffMonoMassWithBracket(diffMonoMass);
const ResidueModification* mod = mod_db->searchModificationsFast(peptide_[index]->getOneLetterCode() + diffMonoMassStr, multimatch);
const double tol = 0.002;
if (mod == nullptr)
{
mod = mod_db->getBestModificationByDiffMonoMass(diffMonoMass, tol, peptide_[index]->getOneLetterCode(), ResidueModification::ANYWHERE);
}
if (mod == nullptr)
{
OPENMS_LOG_WARN << "Modification with monoisotopic mass diff. of " << diffMonoMassStr << " not found in databases with tolerance " << tol << ". Adding unknown modification.\n";
mod = ResidueModification::createUnknownFromMassString(String(diffMonoMass),
diffMonoMass,
true,
ResidueModification::ANYWHERE,
peptide_[index]);
}
peptide_[index] = ResidueDB::getInstance()->getModifiedResidue(peptide_[index], mod);
}
void AASequence::setModification(Size index, const Residue* modification)
{
peptide_[index] = modification;
}
void AASequence::setNTerminalModification(const String& modification)
{
if (modification.empty())
{
n_term_mod_ = nullptr;
return;
}
String residue = "";
if (modification.size() > 3 && modification.hasSuffix(")"))
{
char last_char_no_parentheses = modification[modification.length()-2];
if (isupper(last_char_no_parentheses))
{
residue = last_char_no_parentheses;
}
}
// For strings in our most common UniMod format
if (!modification.hasSubstring("Protein N-term"))
{
// since this method is called setNTerminalModification without further specification
// we have to look for both general terminus and Protein terminus.
// For backwards compatibility we look for the general terminus first
// We have to use try-catch since getModification unfortunately throws Exceptions.
try
{
n_term_mod_ = ModificationsDB::getInstance()
->getModification(modification, residue, ResidueModification::N_TERM);
return; // we found a mod. return
}
catch (...) {}
}
n_term_mod_ = ModificationsDB::getInstance()->getModification(modification, residue, ResidueModification::PROTEIN_N_TERM);
}
void AASequence::setCTerminalModification(const String& modification)
{
if (modification.empty())
{
c_term_mod_ = nullptr;
return;
}
String residue = "";
if (modification.size() > 3 && modification.hasSuffix(")"))
{
char last_char_no_parentheses = modification[modification.length()-2];
if (isupper(last_char_no_parentheses))
{
residue = last_char_no_parentheses;
}
}
// For strings in our most common UniMod format
if (!modification.hasSubstring("Protein C-term"))
{
// since this method is called setCTerminalModification without further specification
// we have to look for both general terminus and Protein terminus.
// For backwards compatibility we look for the general terminus first
// We have to use try-catch since getModification unfortunately throws Exceptions.
try
{
c_term_mod_ = ModificationsDB::getInstance()->getModification(modification, residue, ResidueModification::C_TERM);
return; // we found a mod. return
}
catch (...) {}
}
c_term_mod_ = ModificationsDB::getInstance()->getModification(modification, residue, ResidueModification::PROTEIN_C_TERM);
}
void AASequence::setCTerminalModification(const ResidueModification& mod)
{
ModificationsDB* mod_db = ModificationsDB::getInstance();
//TODO think again. Most functions here or in ModificationsDB only check for fullID
c_term_mod_ = mod_db->searchModification(mod);
if (c_term_mod_ == nullptr)
{
c_term_mod_ = mod_db->addNewModification_(mod);
}
}
void AASequence::setNTerminalModification(const ResidueModification& mod)
{
ModificationsDB* mod_db = ModificationsDB::getInstance();
//TODO think again. Most functions here or in ModificationsDB only check for fullID
n_term_mod_ = mod_db->searchModification(mod);
if (n_term_mod_ == nullptr)
{
n_term_mod_ = mod_db->addNewModification_(mod);
}
}
void AASequence::setCTerminalModification(const ResidueModification* modification)
{
c_term_mod_ = modification;
}
void AASequence::setNTerminalModification(const ResidueModification* modification)
{
n_term_mod_ = modification;
}
void AASequence::setCTerminalModificationByDiffMonoMass(double diffMonoMass, bool protein_term)
{
// since this method is called setCTerminalModification without further specification
// we have to look for both general terminus and Protein terminus.
// For backwards compatibility we look for the general terminus first
ResidueModification::TermSpecificity term = protein_term ? ResidueModification::PROTEIN_C_TERM : ResidueModification::C_TERM;
double tol = 0.002;
ModificationsDB* mod_db = ModificationsDB::getInstance();
bool multimatch = false;
// quickly check for user-defined modification added by createUnknownFromMassString (e.g. M[+12321])
String diffMonoMassStr = ResidueModification::getDiffMonoMassWithBracket(diffMonoMass);
// TODO make a distinction in the FullID about protein vs peptide term??
const ResidueModification* n_term_mod_ = mod_db->searchModificationsFast(".c"+diffMonoMassStr, multimatch);
String residue = "";
if (n_term_mod_ == nullptr)
{
n_term_mod_ = ModificationsDB::getInstance()
->getBestModificationByDiffMonoMass(diffMonoMass, tol, residue, term);
}
if (n_term_mod_ == nullptr)
{
OPENMS_LOG_WARN << "Modification with monoisotopic mass diff. of " << diffMonoMassStr << " not found in databases with tolerance " << tol << ". Adding unknown modification.\n";
n_term_mod_ = ResidueModification::createUnknownFromMassString(String(diffMonoMass),
diffMonoMass,
true,
term);
}
}
void AASequence::setNTerminalModificationByDiffMonoMass(double diffMonoMass, bool protein_term)
{
// since this method is called setNTerminalModification without further specification
// we have to look for both general terminus and Protein terminus.
// For backwards compatibility we look for the general terminus first
ResidueModification::TermSpecificity term = protein_term ? ResidueModification::PROTEIN_N_TERM : ResidueModification::N_TERM;
double tol = 0.002;
ModificationsDB* mod_db = ModificationsDB::getInstance();
bool multimatch = false;
// quickly check for user-defined modification added by createUnknownFromMassString (e.g. M[+12321])
String diffMonoMassStr = ResidueModification::getDiffMonoMassWithBracket(diffMonoMass);
// TODO make a distinction in the FullID about protein vs peptide term??
const ResidueModification* n_term_mod_ = mod_db->searchModificationsFast(".n"+diffMonoMassStr, multimatch);
String residue = "";
if (n_term_mod_ == nullptr)
{
n_term_mod_ = ModificationsDB::getInstance()
->getBestModificationByDiffMonoMass(diffMonoMass, tol, residue, term);
}
if (n_term_mod_ == nullptr)
{
OPENMS_LOG_WARN << "Modification with monoisotopic mass diff. of " << diffMonoMassStr << " not found in databases with tolerance " << tol << ". Adding unknown modification.\n";
n_term_mod_ = ResidueModification::createUnknownFromMassString(String(diffMonoMass),
diffMonoMass,
true,
term);
}
}
const String& AASequence::getNTerminalModificationName() const
{
if (n_term_mod_ == nullptr) return String::EMPTY;
return n_term_mod_->getId();
}
const ResidueModification* AASequence::getNTerminalModification() const
{
return n_term_mod_;
}
const ResidueModification* AASequence::getCTerminalModification() const
{
return c_term_mod_;
}
const String& AASequence::getCTerminalModificationName() const
{
if (c_term_mod_ == nullptr) return String::EMPTY;
return c_term_mod_->getId();
}
bool AASequence::hasNTerminalModification() const
{
return n_term_mod_ != nullptr;
}
bool AASequence::hasCTerminalModification() const
{
return c_term_mod_ != nullptr;
}
AASequence AASequence::fromString(const String& s, bool permissive)
{
AASequence aas;
parseString_(s, aas, permissive);
return aas;
}
AASequence AASequence::fromString(const char* s, bool permissive)
{
AASequence aas;
parseString_(String(s), aas, permissive);
return aas;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/TheoreticalSpectrumGeneratorXLMS.cpp | .cpp | 44,107 | 1,159 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Eugen Netz $
// $Authors: Eugen Netz $
// --------------------------------------------------------------------------
#include <OpenMS/CHEMISTRY/TheoreticalSpectrumGeneratorXLMS.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/CHEMISTRY/AASequence.h>
#include <OpenMS/KERNEL/MSSpectrum.h>
using namespace std;
namespace OpenMS
{
TheoreticalSpectrumGeneratorXLMS::TheoreticalSpectrumGeneratorXLMS() :
DefaultParamHandler("TheoreticalSpectrumGeneratorXLMS")
{
// TODO only partly functional (second isotopic peak if max_isotope = 2)
defaults_.setValue("add_isotopes", "false", "If set to 1 isotope peaks of the product ion peaks are added");
defaults_.setValidStrings("add_isotopes", {"true","false"});
defaults_.setValue("max_isotope", 2, "Defines the maximal isotopic peak which is added, add_isotopes must be set to 1");
defaults_.setValue("add_metainfo", "true", "Adds the type of peaks as metainfo to the peaks, like y8+, [M-H2O+2H]++");
defaults_.setValidStrings("add_metainfo", {"true","false"});
defaults_.setValue("add_charges", "true", "Adds the charges to a DataArray of the spectrum");
defaults_.setValidStrings("add_charges", {"true","false"});
defaults_.setValue("add_losses", "false", "Adds common losses to those ion expect to have them, only water and ammonia loss is considered");
defaults_.setValidStrings("add_losses", {"true","false"});
defaults_.setValue("add_precursor_peaks", "true", "Adds peaks of the precursor to the spectrum, which happen to occur sometimes");
defaults_.setValidStrings("add_precursor_peaks", {"true","false"});
// TODO not functional yet
defaults_.setValue("add_abundant_immonium_ions", "false", "Add most abundant immonium ions");
defaults_.setValidStrings("add_abundant_immonium_ions", {"true","false"});
defaults_.setValue("add_k_linked_ions", "true", "Add RES-Linked ions, which are specific to XLMS");
defaults_.setValidStrings("add_k_linked_ions", {"true","false"});
// TODO not functional yet
defaults_.setValue("add_first_prefix_ion", "true", "If set to true e.g. b1 ions are added");
defaults_.setValidStrings("add_first_prefix_ion", {"true","false"});
defaults_.setValue("add_y_ions", "true", "Add peaks of y-ions to the spectrum");
defaults_.setValidStrings("add_y_ions", {"true","false"});
defaults_.setValue("add_b_ions", "true", "Add peaks of b-ions to the spectrum");
defaults_.setValidStrings("add_b_ions", {"true","false"});
defaults_.setValue("add_a_ions", "true", "Add peaks of a-ions to the spectrum");
defaults_.setValidStrings("add_a_ions", {"true","false"});
defaults_.setValue("add_c_ions", "false", "Add peaks of c-ions to the spectrum");
defaults_.setValidStrings("add_c_ions", {"true","false"});
defaults_.setValue("add_x_ions", "false", "Add peaks of x-ions to the spectrum");
defaults_.setValidStrings("add_x_ions", {"true","false"});
defaults_.setValue("add_z_ions", "false", "Add peaks of z-ions to the spectrum");
defaults_.setValidStrings("add_z_ions", {"true","false"});
// intensity options of the ions
defaults_.setValue("y_intensity", 1.0, "Intensity of the y-ions");
defaults_.setValue("b_intensity", 1.0, "Intensity of the b-ions");
defaults_.setValue("a_intensity", 1.0, "Intensity of the a-ions");
defaults_.setValue("c_intensity", 1.0, "Intensity of the c-ions");
defaults_.setValue("x_intensity", 1.0, "Intensity of the x-ions");
defaults_.setValue("z_intensity", 1.0, "Intensity of the z-ions");
defaults_.setValue("relative_loss_intensity", 0.1, "Intensity of loss ions, in relation to the intact ion intensity");
// precursor intensity
defaults_.setValue("precursor_intensity", 1.0, "Intensity of the precursor peak");
defaults_.setValue("precursor_H2O_intensity", 1.0, "Intensity of the H2O loss peak of the precursor");
defaults_.setValue("precursor_NH3_intensity", 1.0, "Intensity of the NH3 loss peak of the precursor");
defaultsToParam_();
// preprocess loss_db_, a database of H2O and NH3 losses for all residues
AASequence residues = AASequence::fromString("RHKDESTNQCUGPAVILMFYW");
for (Size i = 0; i < residues.size(); ++i)
{
LossIndex residue_losses;
loss_db_.insert(std::make_pair(residues[i].getOneLetterCode(), residue_losses));
if (residues[i].hasNeutralLoss())
{
vector<EmpiricalFormula> loss_formulas = residues[i].getLossFormulas();
for (Size k = 0; k != loss_formulas.size(); ++k)
{
String loss_name = loss_formulas[k].toString();
if (loss_name == "H2O1") // for now only these most common losses are considered
{
if (loss_H2O_ < 1)
{
loss_H2O_ = loss_formulas[k].getMonoWeight();
}
loss_db_[residues[i].getOneLetterCode()].has_H2O_loss = true;
}
if (loss_name == "H3N1")
{
if (loss_NH3_ < 1)
{
loss_NH3_ = loss_formulas[k].getMonoWeight();
}
loss_db_[residues[i].getOneLetterCode()].has_NH3_loss = true;
}
}
}
}
}
TheoreticalSpectrumGeneratorXLMS::TheoreticalSpectrumGeneratorXLMS(const TheoreticalSpectrumGeneratorXLMS & rhs) :
DefaultParamHandler(rhs)
{
}
TheoreticalSpectrumGeneratorXLMS & TheoreticalSpectrumGeneratorXLMS::operator=(const TheoreticalSpectrumGeneratorXLMS & rhs)
{
if (this != &rhs)
{
DefaultParamHandler::operator=(rhs);
}
return *this;
}
TheoreticalSpectrumGeneratorXLMS::~TheoreticalSpectrumGeneratorXLMS() = default;
void TheoreticalSpectrumGeneratorXLMS::getLinearIonSpectrum(PeakSpectrum & spectrum, AASequence & peptide, Size link_pos, bool frag_alpha, int charge, Size link_pos_2) const
{
PeakSpectrum::IntegerDataArray charges;
PeakSpectrum::StringDataArray ion_names;
if (add_charges_)
{
if (!spectrum.getIntegerDataArrays().empty())
{
charges = spectrum.getIntegerDataArrays()[0];
}
charges.setName("charge");
}
if (add_metainfo_)
{
if (!spectrum.getStringDataArrays().empty())
{
ion_names = spectrum.getStringDataArrays()[0];
}
ion_names.setName(Constants::UserParam::IonNames);
}
std::vector< LossIndex > forward_losses;
std::vector< LossIndex > backward_losses;
if (add_losses_)
{
forward_losses = getForwardLosses_(peptide);
backward_losses = getBackwardLosses_(peptide);
}
for (Int z = 1; z <= charge; ++z)
{
if (add_b_ions_)
{
addLinearPeaks_(spectrum, charges, ion_names, peptide, link_pos, frag_alpha, Residue::BIon, forward_losses, backward_losses, z, link_pos_2);
}
if (add_y_ions_)
{
addLinearPeaks_(spectrum, charges, ion_names, peptide, link_pos, frag_alpha, Residue::YIon, forward_losses, backward_losses, z, link_pos_2);
}
if (add_a_ions_)
{
addLinearPeaks_(spectrum, charges, ion_names, peptide, link_pos, frag_alpha, Residue::AIon, forward_losses, backward_losses, z, link_pos_2);
}
if (add_x_ions_)
{
addLinearPeaks_(spectrum, charges, ion_names, peptide, link_pos, frag_alpha, Residue::XIon, forward_losses, backward_losses, z, link_pos_2);
}
if (add_c_ions_)
{
addLinearPeaks_(spectrum, charges, ion_names, peptide, link_pos, frag_alpha, Residue::CIon, forward_losses, backward_losses, z, link_pos_2);
}
if (add_z_ions_)
{
addLinearPeaks_(spectrum, charges, ion_names, peptide, link_pos, frag_alpha, Residue::ZIon, forward_losses, backward_losses, z, link_pos_2);
}
}
if (add_charges_)
{
if (!spectrum.getIntegerDataArrays().empty())
{
spectrum.getIntegerDataArrays()[0] = charges;
}
else
{
spectrum.getIntegerDataArrays().push_back(charges);
}
}
if (add_metainfo_)
{
if (!spectrum.getStringDataArrays().empty())
{
spectrum.getStringDataArrays()[0] = ion_names;
}
else
{
spectrum.getStringDataArrays().push_back(ion_names);
}
}
spectrum.sortByPosition();
return;
}
void TheoreticalSpectrumGeneratorXLMS::addLinearPeaks_(PeakSpectrum & spectrum, DataArrays::IntegerDataArray & charges, DataArrays::StringDataArray & ion_names, AASequence & peptide, Size link_pos, bool frag_alpha, Residue::ResidueType res_type, std::vector< LossIndex > & forward_losses, std::vector< LossIndex > & backward_losses, int charge, Size link_pos_2) const
{
if (peptide.empty())
{
cout << "Warning: Attempt at creating XLink Ions Spectrum from empty string!" << endl;
return;
}
String ion_type;
if (frag_alpha)
{
ion_type = "alpha|ci";
}
else
{
ion_type = "beta|ci";
}
// second link position, in case of a loop-link
Size link_pos_B = link_pos_2;
if (link_pos_2 == 0)
{
link_pos_B = link_pos;
}
double intensity(1);
switch (res_type)
{
case Residue::AIon: intensity = a_intensity_; break;
case Residue::BIon: intensity = b_intensity_; break;
case Residue::CIon: if (peptide.size() < 2) throw Exception::InvalidSize(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, 1, "peptide must have at least 2 residues for c-ion generation"); intensity = c_intensity_; break;
case Residue::XIon: if (peptide.size() < 2) throw Exception::InvalidSize(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, 1, "peptide must have at least 2 residues for x-ion generation"); intensity = x_intensity_; break;
case Residue::YIon: intensity = y_intensity_; break;
case Residue::ZIon: intensity = z_intensity_; break;
default: break;
}
if (res_type == Residue::AIon || res_type == Residue::BIon || res_type == Residue::CIon)
{
double mono_weight(Constants::PROTON_MASS_U * static_cast<double>(charge));
if (peptide.hasNTerminalModification())
{
mono_weight += peptide.getNTerminalModification()->getDiffMonoMass();
}
switch (res_type)
{
case Residue::AIon: mono_weight += Residue::getInternalToAIon().getMonoWeight(); break;
case Residue::BIon: mono_weight += Residue::getInternalToBIon().getMonoWeight(); break;
case Residue::CIon: mono_weight += Residue::getInternalToCIon().getMonoWeight(); break;
default: break;
}
Size i = 0;
for (; i < link_pos; ++i)
{
mono_weight += peptide[i].getMonoWeight(Residue::Internal);
double pos(mono_weight / static_cast<double>(charge));
int frag_index = i+1;
addPeak_(spectrum, charges, ion_names, pos, intensity, res_type, frag_index, charge, ion_type);
if (add_losses_)
{
addLinearIonLosses_(spectrum, charges, ion_names, mono_weight, res_type, frag_index, intensity, charge, ion_type, forward_losses[i]);
}
if (add_isotopes_ && max_isotope_ >= 2) // add second isotopic peak with fast method, if two or more peaks are asked for
{
pos += Constants::C13C12_MASSDIFF_U / static_cast<double>(charge);
addPeak_(spectrum, charges, ion_names, pos, intensity, res_type, frag_index, charge, ion_type);
}
}
}
else // if (res_type == Residue::XIon || res_type == Residue::YIon || res_type == Residue::ZIon)
{
double mono_weight(Constants::PROTON_MASS_U * static_cast<double>(charge));
if (peptide.hasCTerminalModification())
{
mono_weight += peptide.getCTerminalModification()->getDiffMonoMass();
}
switch (res_type)
{
case Residue::XIon: mono_weight += Residue::getInternalToXIon().getMonoWeight(); break;
case Residue::YIon: mono_weight += Residue::getInternalToYIon().getMonoWeight(); break;
case Residue::ZIon: mono_weight += Residue::getInternalToZIon().getMonoWeight(); break;
default: break;
}
for (Size i = peptide.size()-1; i > link_pos_B; --i)
{
mono_weight += peptide[i].getMonoWeight(Residue::Internal);
double pos(mono_weight / static_cast<double>(charge));
int frag_index = peptide.size() - i;
addPeak_(spectrum, charges, ion_names, pos, intensity, res_type, frag_index, charge, ion_type);
if (add_losses_)
{
addLinearIonLosses_(spectrum, charges, ion_names, pos, res_type, frag_index, intensity, charge, ion_type, backward_losses[i]);
}
if (add_isotopes_ && max_isotope_ >= 2) // add second isotopic peak with fast method, if two or more peaks are asked for
{
pos += Constants::C13C12_MASSDIFF_U / static_cast<double>(charge);
addPeak_(spectrum, charges, ion_names, pos, intensity, res_type, frag_index, charge, ion_type);
}
}
}
return;
}
void TheoreticalSpectrumGeneratorXLMS::getXLinkIonSpectrum(PeakSpectrum & spectrum, AASequence & peptide, Size link_pos, double precursor_mass, bool frag_alpha, int mincharge, int maxcharge, Size link_pos_2) const
{
PeakSpectrum::IntegerDataArray charges;
PeakSpectrum::StringDataArray ion_names;
if (add_charges_)
{
if (!spectrum.getIntegerDataArrays().empty())
{
charges = spectrum.getIntegerDataArrays()[0];
}
charges.setName("charge");
}
if (add_metainfo_)
{
if (!spectrum.getStringDataArrays().empty())
{
ion_names = spectrum.getStringDataArrays()[0];
}
ion_names.setName(Constants::UserParam::IonNames);
}
std::vector< LossIndex > forward_losses;
std::vector< LossIndex > backward_losses;
if (add_losses_)
{
forward_losses = getForwardLosses_(peptide);
backward_losses = getBackwardLosses_(peptide);
}
for (Int z = mincharge; z <= maxcharge; ++z)
{
if (add_b_ions_)
{
addXLinkIonPeaks_(spectrum, charges, ion_names, peptide, link_pos, precursor_mass, frag_alpha, Residue::BIon, forward_losses, backward_losses, z, link_pos_2);
}
if (add_y_ions_)
{
addXLinkIonPeaks_(spectrum, charges, ion_names, peptide, link_pos, precursor_mass, frag_alpha, Residue::YIon, forward_losses, backward_losses, z, link_pos_2);
}
if (add_a_ions_)
{
addXLinkIonPeaks_(spectrum, charges, ion_names, peptide, link_pos, precursor_mass, frag_alpha, Residue::AIon, forward_losses, backward_losses, z, link_pos_2);
}
if (add_x_ions_)
{
addXLinkIonPeaks_(spectrum, charges, ion_names, peptide, link_pos, precursor_mass, frag_alpha, Residue::XIon, forward_losses, backward_losses, z, link_pos_2);
}
if (add_c_ions_)
{
addXLinkIonPeaks_(spectrum, charges, ion_names, peptide, link_pos, precursor_mass, frag_alpha, Residue::CIon, forward_losses, backward_losses, z, link_pos_2);
}
if (add_z_ions_)
{
addXLinkIonPeaks_(spectrum, charges, ion_names, peptide, link_pos, precursor_mass, frag_alpha, Residue::ZIon, forward_losses, backward_losses, z, link_pos_2);
}
if (add_k_linked_ions_)
{
addKLinkedIonPeaks_(spectrum, charges, ion_names, peptide, link_pos, precursor_mass, frag_alpha, z);
}
}
if (add_precursor_peaks_)
{
addPrecursorPeaks_(spectrum, charges, ion_names, precursor_mass, maxcharge);
}
if (add_charges_)
{
if (!spectrum.getIntegerDataArrays().empty())
{
spectrum.getIntegerDataArrays()[0] = charges;
}
else
{
spectrum.getIntegerDataArrays().push_back(charges);
}
}
if (add_metainfo_)
{
if (!spectrum.getStringDataArrays().empty())
{
spectrum.getStringDataArrays()[0] = ion_names;
}
else
{
spectrum.getStringDataArrays().push_back(ion_names);
}
}
spectrum.sortByPosition();
return;
}
void TheoreticalSpectrumGeneratorXLMS::addXLinkIonPeaks_(PeakSpectrum & spectrum, DataArrays::IntegerDataArray & charges, DataArrays::StringDataArray & ion_names, AASequence & peptide, Size link_pos, double precursor_mass, bool frag_alpha, Residue::ResidueType res_type, std::vector< LossIndex > & forward_losses, std::vector< LossIndex > & backward_losses, int charge, Size link_pos_2) const
{
if (peptide.empty())
{
cout << "Warning: Attempt at creating XLink Ions Spectrum from empty string!" << endl;
return;
}
String ion_type;
if (frag_alpha)
{
ion_type = "alpha|xi";
}
else
{
ion_type = "beta|xi";
}
// second link position, in case of a loop-link
Size link_pos_B = link_pos_2;
if (link_pos_2 == 0)
{
link_pos_B = link_pos;
}
double intensity(1);
switch (res_type)
{
case Residue::AIon: intensity = a_intensity_; break;
case Residue::BIon: intensity = b_intensity_; break;
case Residue::CIon: if (peptide.size() < 2) throw Exception::InvalidSize(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, 1, "peptide must have at least 2 residues for c-ion generation"); intensity = c_intensity_; break;
case Residue::XIon: if (peptide.size() < 2) throw Exception::InvalidSize(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, 1, "peptide must have at least 2 residues for x-ion generation"); intensity = x_intensity_; break;
case Residue::YIon: intensity = y_intensity_; break;
case Residue::ZIon: intensity = z_intensity_; break;
default: break;
}
if (res_type == Residue::AIon || res_type == Residue::BIon || res_type == Residue::CIon)
{
// whole mass of both peptides + cross-link (or peptide + mono-link), converted to an internal ion
double mono_weight((Constants::PROTON_MASS_U * static_cast<double>(charge)) + precursor_mass - Residue::getInternalToFull().getMonoWeight());
if (peptide.hasCTerminalModification())
{
mono_weight -= peptide.getCTerminalModification()->getDiffMonoMass();
}
// adjust mass to given residue type
switch (res_type)
{
case Residue::AIon: mono_weight += Residue::getInternalToAIon().getMonoWeight(); break;
case Residue::BIon: mono_weight += Residue::getInternalToBIon().getMonoWeight(); break;
case Residue::CIon: mono_weight += Residue::getInternalToCIon().getMonoWeight(); break;
default: break;
}
// subtract one residue at a time
for (Size i = peptide.size()-1; i > link_pos_B; --i)
{
mono_weight -= peptide[i].getMonoWeight(Residue::Internal);
double pos(mono_weight / static_cast<double>(charge));
int frag_index = i;
addPeak_(spectrum, charges, ion_names, pos, intensity, res_type, frag_index, charge, ion_type);
if (add_losses_ && forward_losses.size() >= i)
{
String ion_name = "[" + ion_type + "$" + String(Residue::residueTypeToIonLetter(res_type)) + String(frag_index) + "]";
addXLinkIonLosses_(spectrum, charges, ion_names, mono_weight, intensity, charge, ion_name, forward_losses[i-1]);
}
if (add_isotopes_ && max_isotope_ >= 2) // add second isotopic peak with fast method, if two or more peaks are asked for
{
pos += Constants::C13C12_MASSDIFF_U / static_cast<double>(charge);
addPeak_(spectrum, charges, ion_names, pos, intensity, res_type, frag_index, charge, ion_type);
}
}
}
else // if (res_type == Residue::XIon || res_type == Residue::YIon || res_type == Residue::ZIon)
{
// whole mass of both peptides + cross-link (or peptide + mono-link), converted to an internal ion
double mono_weight((Constants::PROTON_MASS_U * static_cast<double>(charge)) + precursor_mass - Residue::getInternalToFull().getMonoWeight()); // whole mass
if (peptide.hasNTerminalModification())
{
mono_weight -= peptide.getNTerminalModification()->getDiffMonoMass();
}
// adjust mass to given residue type
switch (res_type)
{
case Residue::XIon: mono_weight += Residue::getInternalToXIon().getMonoWeight(); break;
case Residue::YIon: mono_weight += Residue::getInternalToYIon().getMonoWeight(); break;
case Residue::ZIon: mono_weight += Residue::getInternalToZIon().getMonoWeight(); break;
default: break;
}
// subtract one residue at a time
for (Size i = 0; i < link_pos; ++i)
{
mono_weight -= peptide[i].getMonoWeight(Residue::Internal);
double pos(mono_weight / static_cast<double>(charge));
int frag_index = peptide.size() - 1 - i;
addPeak_(spectrum, charges, ion_names, pos, intensity, res_type, frag_index, charge, ion_type);
if (add_losses_ && backward_losses.size() >= i+2)
{
String ion_name = "[" + ion_type + "$" + String(Residue::residueTypeToIonLetter(res_type)) + String(frag_index) + "]";
addXLinkIonLosses_(spectrum, charges, ion_names, mono_weight, intensity, charge, ion_name, backward_losses[i+1]);
}
if (add_isotopes_ && max_isotope_ >= 2) // add second isotopic peak with fast method, if two or more peaks are asked for
{
pos += Constants::C13C12_MASSDIFF_U / static_cast<double>(charge);
addPeak_(spectrum, charges, ion_names, pos, intensity, res_type, frag_index, charge, ion_type);
}
}
}
return;
}
// helper to add a single peak to a spectrum (simple fragmentation)
void TheoreticalSpectrumGeneratorXLMS::addPeak_(PeakSpectrum & spectrum, DataArrays::IntegerDataArray & charges, DataArrays::StringDataArray & ion_names, double pos, double intensity, Residue::ResidueType res_type, Size frag_index, int charge, String ion_type) const
{
if (pos < 0) {return;}
Peak1D p;
p.setMZ(pos);
p.setIntensity(intensity);
spectrum.push_back(p);
if (add_metainfo_)
{
ion_names.emplace_back("[" + ion_type + "$" + String(Residue::residueTypeToIonLetter(res_type)) + String(frag_index) + "]");
}
if (add_charges_)
{
charges.push_back(charge);
}
}
void TheoreticalSpectrumGeneratorXLMS::addLinearIonLosses_(PeakSpectrum & spectrum, DataArrays::IntegerDataArray& charges, DataArrays::StringDataArray& ion_names, double mono_weight, Residue::ResidueType res_type, Size frag_index, double intensity, int charge, String ion_type, LossIndex & losses) const
{
Peak1D p;
p.setIntensity(intensity * rel_loss_intensity_);
if (losses.has_H2O_loss)
{
double mass_with_loss = mono_weight - loss_H2O_;
if (mass_with_loss > 0.0)
{
p.setMZ(mass_with_loss / static_cast<double>(charge));
if (add_metainfo_)
{
// remove final bracket, insert loss name and add the bracket again
ion_names.emplace_back("[" + ion_type + "$" + String(Residue::residueTypeToIonLetter(res_type)) + String(frag_index) + "-H2O1]");
}
if (add_charges_)
{
charges.push_back(charge);
}
spectrum.push_back(p);
}
}
if (losses.has_NH3_loss)
{
double mass_with_loss = mono_weight - loss_NH3_;
if (mass_with_loss > 0.0)
{
p.setMZ(mass_with_loss / static_cast<double>(charge));
if (add_metainfo_)
{
// remove final bracket, insert loss name and add the bracket again
ion_names.emplace_back("[" + ion_type + "$" + String(Residue::residueTypeToIonLetter(res_type)) + String(frag_index) + "-H3N1]");
}
if (add_charges_)
{
charges.push_back(charge);
}
spectrum.push_back(p);
}
}
}
void TheoreticalSpectrumGeneratorXLMS::addPrecursorPeaks_(PeakSpectrum & spectrum, DataArrays::IntegerDataArray & charges, DataArrays::StringDataArray & ion_names, double precursor_mass, int charge) const
{
Peak1D p;
// precursor peak
double mono_pos = precursor_mass + (Constants::PROTON_MASS_U * static_cast<double>(charge));
p.setMZ(mono_pos / static_cast<double>(charge));
p.setIntensity(pre_int_);
if (add_metainfo_)
{
ion_names.emplace_back("[M+H]");
}
if (add_charges_)
{
charges.push_back(charge);
}
spectrum.push_back(p);
if (add_isotopes_ && max_isotope_ >= 2) // add second isotopic peak with fast method, if two or more peaks are asked for
{
double pos = mono_pos + (Constants::C13C12_MASSDIFF_U / static_cast<double>(charge));
p.setMZ(pos);
p.setIntensity(pre_int_);
if (add_metainfo_)
{
ion_names.emplace_back("[M+H]");
}
if (add_charges_)
{
charges.push_back(charge);
}
spectrum.push_back(p);
}
// loss peaks of the precursor
// loss of water
mono_pos = precursor_mass + (Constants::PROTON_MASS_U * static_cast<double>(charge)) - EmpiricalFormula("H2O").getMonoWeight();
p.setMZ(mono_pos / static_cast<double>(charge));
p.setIntensity(pre_int_H2O_);
if (add_metainfo_)
{
ion_names.emplace_back("[M+H]-H2O");
}
if (add_charges_)
{
charges.push_back(charge);
}
spectrum.push_back(p);
if (add_isotopes_ && max_isotope_ >= 2) // add second isotopic peak with fast method, if two or more peaks are asked for
{
double pos = mono_pos + (Constants::C13C12_MASSDIFF_U / static_cast<double>(charge));
p.setMZ(pos);
p.setIntensity(pre_int_H2O_);
if (add_metainfo_)
{
ion_names.emplace_back("[M+H]-H2O");
}
if (add_charges_)
{
charges.push_back(charge);
}
spectrum.push_back(p);
}
//loss of ammonia
mono_pos = precursor_mass + (Constants::PROTON_MASS_U * static_cast<double>(charge)) - EmpiricalFormula("NH3").getMonoWeight();
p.setMZ(mono_pos / static_cast<double>(charge));
p.setIntensity(pre_int_NH3_);
if (add_metainfo_)
{
ion_names.emplace_back("[M+H]-NH3");
}
if (add_charges_)
{
charges.push_back(charge);
}
spectrum.push_back(p);
if (add_isotopes_ && max_isotope_ >= 2) // add second isotopic peak with fast method, if two or more peaks are asked for
{
double pos = mono_pos + (Constants::C13C12_MASSDIFF_U / static_cast<double>(charge));
p.setMZ(pos);
p.setIntensity(pre_int_NH3_);
if (add_metainfo_)
{
ion_names.emplace_back("[M+H]-NH3");
}
if (add_charges_)
{
charges.push_back(charge);
}
spectrum.push_back(p);
}
}
void TheoreticalSpectrumGeneratorXLMS::addKLinkedIonPeaks_(PeakSpectrum & spectrum, DataArrays::IntegerDataArray & charges, DataArrays::StringDataArray & ion_names, AASequence & peptide, Size link_pos, double precursor_mass, bool frag_alpha, int charge) const
{
double mono_weight = precursor_mass;
// link_pos can be zero, if the cross-link is N-terminal
if (link_pos > 0)
{
mono_weight -= peptide.getPrefix(link_pos).getMonoWeight(Residue::BIon);
}
else
{
return; // this fragment type is not necessary for links on peptide terminal residues
}
// same here for C-terminal links
if (link_pos < peptide.size())
{
mono_weight -= peptide.getSuffix(peptide.size() - link_pos - 1).getMonoWeight(Residue::XIon);
}
else
{
return;
}
mono_weight += Constants::PROTON_MASS_U * static_cast<double>(charge);
if (mono_weight < 0)
{
return;
}
double pos(mono_weight / static_cast<double>(charge));
Peak1D p;
p.setMZ(pos);
p.setIntensity(1.0);
spectrum.push_back(p);
// here the ion type is reversed compared to other peak types,
// because for this special ion type, it would not make sense to call it alpha$y(n)-alpha$a(n)
// Only one residue is left of the fragmented Peptide, so we call it a RES-linked beta
String ion_type;
String ion_name;
if (add_metainfo_)
{
if (frag_alpha)
{
ion_type = "beta";
}
else
{
ion_type = "alpha";
}
int l_pos = link_pos;
if (l_pos < 1)
{
l_pos = 0;
}
ion_name = "[" + peptide[l_pos].getOneLetterCode() + "-linked-" + ion_type + "]";
ion_names.push_back(ion_name);
}
if (add_charges_)
{
charges.push_back(charge);
}
if (add_isotopes_ && max_isotope_ >= 2) // add second isotopic peak with fast method, if two or more peaks are asked for
{
pos += Constants::C13C12_MASSDIFF_U / static_cast<double>(charge);
p.setMZ(pos);
spectrum.push_back(p);
if (add_metainfo_)
{
ion_names.push_back(ion_name);
}
if (add_charges_)
{
charges.push_back(charge);
}
}
}
void TheoreticalSpectrumGeneratorXLMS::addXLinkIonLosses_(PeakSpectrum & spectrum, DataArrays::IntegerDataArray& charges, DataArrays::StringDataArray& ion_names, double mono_weight, double intensity, int charge, String ion_name, LossIndex & losses) const
{
Peak1D p;
p.setIntensity(intensity * rel_loss_intensity_);
if (losses.has_H2O_loss)
{
double mass_with_loss = mono_weight - loss_H2O_;
if (mass_with_loss > 0.0)
{
p.setMZ(mass_with_loss / static_cast<double>(charge));
if (add_metainfo_)
{
// remove final bracket, insert loss name and add the bracket again
ion_names.emplace_back(ion_name.prefix(ion_name.size()-1) + "-H2O1]");
}
if (add_charges_)
{
charges.push_back(charge);
}
spectrum.push_back(p);
}
}
if (losses.has_NH3_loss)
{
double mass_with_loss = mono_weight - loss_NH3_;
if (mass_with_loss > 0.0)
{
p.setMZ(mass_with_loss / static_cast<double>(charge));
if (add_metainfo_)
{
// remove final bracket, insert loss name and add the bracket again
ion_names.emplace_back(ion_name.prefix(ion_name.size()-1) + "-H3N1]");
}
if (add_charges_)
{
charges.push_back(charge);
}
spectrum.push_back(p);
}
}
}
void TheoreticalSpectrumGeneratorXLMS::getXLinkIonSpectrum(PeakSpectrum & spectrum, OPXLDataStructs::ProteinProteinCrossLink & crosslink, bool frag_alpha, int mincharge, int maxcharge) const
{
PeakSpectrum::IntegerDataArray charges;
PeakSpectrum::StringDataArray ion_names;
if (add_charges_)
{
if (!spectrum.getIntegerDataArrays().empty())
{
charges = spectrum.getIntegerDataArrays()[0];
}
charges.setName("charge");
}
if (add_metainfo_)
{
if (!spectrum.getStringDataArrays().empty())
{
ion_names = spectrum.getStringDataArrays()[0];
}
ion_names.setName(Constants::UserParam::IonNames);
}
std::vector< LossIndex > forward_losses;
std::vector< LossIndex > backward_losses;
LossIndex losses_peptide2;
if (!crosslink.alpha)
{
return;
}
AASequence alpha = *crosslink.alpha;
AASequence beta;
if (crosslink.beta) { beta = *crosslink.beta; }
if (add_losses_)
{
if (frag_alpha)
{
losses_peptide2 = getBackwardLosses_(beta)[0];
forward_losses = getForwardLosses_(alpha);
backward_losses = getBackwardLosses_(alpha);
}
else
{
losses_peptide2 = getBackwardLosses_(alpha)[0];
forward_losses = getForwardLosses_(beta);
backward_losses = getBackwardLosses_(beta);
}
}
for (Int z = mincharge; z <= maxcharge; ++z)
{
if (add_b_ions_)
{
addXLinkIonPeaks_(spectrum, charges, ion_names, crosslink, frag_alpha, Residue::BIon, forward_losses, backward_losses, losses_peptide2, z);
}
if (add_y_ions_)
{
addXLinkIonPeaks_(spectrum, charges, ion_names, crosslink, frag_alpha, Residue::YIon, forward_losses, backward_losses, losses_peptide2, z);
}
if (add_a_ions_)
{
addXLinkIonPeaks_(spectrum, charges, ion_names, crosslink, frag_alpha, Residue::AIon, forward_losses, backward_losses, losses_peptide2, z);
}
if (add_x_ions_)
{
addXLinkIonPeaks_(spectrum, charges, ion_names, crosslink, frag_alpha, Residue::XIon, forward_losses, backward_losses, losses_peptide2, z);
}
if (add_c_ions_)
{
addXLinkIonPeaks_(spectrum, charges, ion_names, crosslink, frag_alpha, Residue::CIon, forward_losses, backward_losses, losses_peptide2, z);
}
if (add_z_ions_)
{
addXLinkIonPeaks_(spectrum, charges, ion_names, crosslink, frag_alpha, Residue::ZIon, forward_losses, backward_losses, losses_peptide2, z);
}
if (add_k_linked_ions_ && !beta.empty())
{
double precursor_mass = alpha.getMonoWeight() + crosslink.cross_linker_mass;
precursor_mass += beta.getMonoWeight();
AASequence peptide;
Size link_pos;
if (frag_alpha)
{
peptide = alpha;
link_pos = crosslink.cross_link_position.first;
}
else
{
peptide = beta;
link_pos = crosslink.cross_link_position.second;
}
addKLinkedIonPeaks_(spectrum, charges, ion_names, peptide, link_pos, precursor_mass, frag_alpha, z);
}
}
if (add_precursor_peaks_)
{
double precursor_mass = alpha.getMonoWeight() + crosslink.cross_linker_mass;
if (!beta.empty())
{
precursor_mass += beta.getMonoWeight();
}
addPrecursorPeaks_(spectrum, charges, ion_names, precursor_mass, maxcharge);
}
if (add_charges_)
{
if (!spectrum.getIntegerDataArrays().empty())
{
spectrum.getIntegerDataArrays()[0] = charges;
}
else
{
spectrum.getIntegerDataArrays().push_back(charges);
}
}
if (add_metainfo_)
{
if (!spectrum.getStringDataArrays().empty())
{
spectrum.getStringDataArrays()[0] = ion_names;
}
else
{
spectrum.getStringDataArrays().push_back(ion_names);
}
}
spectrum.sortByPosition();
return;
}
void TheoreticalSpectrumGeneratorXLMS::addXLinkIonPeaks_(PeakSpectrum & spectrum, DataArrays::IntegerDataArray & charges, DataArrays::StringDataArray & ion_names, OPXLDataStructs::ProteinProteinCrossLink & crosslink, bool frag_alpha, Residue::ResidueType res_type, std::vector< LossIndex > & forward_losses, std::vector< LossIndex > & backward_losses, LossIndex & losses_peptide2, int charge) const
{
if (!crosslink.alpha || crosslink.alpha->empty())
{
cout << "Warning: Attempt at creating XLink Ions Spectrum from empty string!" << endl;
return;
}
AASequence alpha = *crosslink.alpha;
AASequence beta;
if (crosslink.beta) { beta = *crosslink.beta; }
double precursor_mass = alpha.getMonoWeight() + crosslink.cross_linker_mass;
if (!beta.empty())
{
precursor_mass += beta.getMonoWeight();
}
String ion_type;
AASequence peptide;
AASequence peptide2;
Size link_pos;
if (frag_alpha)
{
ion_type = "alpha|xi";
peptide = alpha;
peptide2 = beta;
link_pos = crosslink.cross_link_position.first;
}
else
{
ion_type = "beta|xi";
peptide = beta;
peptide2 = alpha;
link_pos = crosslink.cross_link_position.second;
}
double intensity(1);
switch (res_type)
{
case Residue::AIon: intensity = a_intensity_; break;
case Residue::BIon: intensity = b_intensity_; break;
case Residue::CIon: if (peptide.size() < 2) throw Exception::InvalidSize(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, 1, "peptide must have at least 2 residues for c-ion generation"); intensity = c_intensity_; break;
case Residue::XIon: if (peptide.size() < 2) throw Exception::InvalidSize(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, 1, "peptide must have at least 2 residues for x-ion generation"); intensity = x_intensity_; break;
case Residue::YIon: intensity = y_intensity_; break;
case Residue::ZIon: intensity = z_intensity_; break;
default: break;
}
if (res_type == Residue::AIon || res_type == Residue::BIon || res_type == Residue::CIon)
{
double mono_weight((Constants::PROTON_MASS_U * static_cast<double>(charge)) + precursor_mass - Residue::getInternalToFull().getMonoWeight());
if (peptide.hasCTerminalModification())
{
mono_weight -= peptide.getCTerminalModification()->getDiffMonoMass();
}
// adjust mass to given residue type
switch (res_type)
{
case Residue::AIon: mono_weight += Residue::getInternalToAIon().getMonoWeight(); break;
case Residue::BIon: mono_weight += Residue::getInternalToBIon().getMonoWeight(); break;
case Residue::CIon: mono_weight += Residue::getInternalToCIon().getMonoWeight(); break;
default: break;
}
// subtract one residue at a time
for (Size i = peptide.size()-1; i > link_pos; --i)
{
mono_weight -= peptide[i].getMonoWeight(Residue::Internal);
double pos(mono_weight / static_cast<double>(charge));
int frag_index = i;
addPeak_(spectrum, charges, ion_names, pos, intensity, res_type, frag_index, charge, ion_type);
if (add_losses_ && forward_losses.size() >= i)
{
String ion_name = "[" + ion_type + "$" + String(Residue::residueTypeToIonLetter(res_type)) + String(frag_index) + "]";
LossIndex losses = losses_peptide2;
losses.has_H2O_loss = losses_peptide2.has_H2O_loss || forward_losses[i-1].has_H2O_loss;
losses.has_NH3_loss = losses_peptide2.has_NH3_loss || forward_losses[i-1].has_NH3_loss;
addXLinkIonLosses_(spectrum, charges, ion_names, mono_weight, intensity, charge, ion_name, losses);
}
if (add_isotopes_ && max_isotope_ >= 2) // add second isotopic peak with fast method, if two or more peaks are asked for
{
pos += Constants::C13C12_MASSDIFF_U / static_cast<double>(charge);
addPeak_(spectrum, charges, ion_names, pos, intensity, res_type, frag_index, charge, ion_type);
}
}
}
else // if (res_type == Residue::XIon || res_type == Residue::YIon || res_type == Residue::ZIon)
{
// whole mass of both peptides + cross-link (or peptide + mono-link), converted to an internal ion
double mono_weight((Constants::PROTON_MASS_U * static_cast<double>(charge)) + precursor_mass - Residue::getInternalToFull().getMonoWeight()); // whole mass
if (peptide.hasNTerminalModification())
{
mono_weight -= peptide.getNTerminalModification()->getDiffMonoMass();
}
// adjust mass to given residue type
switch (res_type)
{
case Residue::XIon: mono_weight += Residue::getInternalToXIon().getMonoWeight(); break;
case Residue::YIon: mono_weight += Residue::getInternalToYIon().getMonoWeight(); break;
case Residue::ZIon: mono_weight += Residue::getInternalToZIon().getMonoWeight(); break;
default: break;
}
// subtract one residue at a time
for (Size i = 0; i < link_pos; ++i)
{
mono_weight -= peptide[i].getMonoWeight(Residue::Internal);
double pos(mono_weight / static_cast<double>(charge));
int frag_index = peptide.size() - 1 - i;
addPeak_(spectrum, charges, ion_names, pos, intensity, res_type, frag_index, charge, ion_type);
if (add_losses_ && backward_losses.size() >= i+2)
{
String ion_name = "[" + ion_type + "$" + String(Residue::residueTypeToIonLetter(res_type)) + String(frag_index) + "]";
LossIndex losses = losses_peptide2;
losses.has_H2O_loss = losses_peptide2.has_H2O_loss || backward_losses[i+1].has_H2O_loss;
losses.has_NH3_loss = losses_peptide2.has_NH3_loss || backward_losses[i+1].has_NH3_loss;
addXLinkIonLosses_(spectrum, charges, ion_names, mono_weight, intensity, charge, ion_name, losses);
}
if (add_isotopes_ && max_isotope_ >= 2) // add second isotopic peak with fast method, if two or more peaks are asked for
{
pos += Constants::C13C12_MASSDIFF_U / static_cast<double>(charge);
addPeak_(spectrum, charges, ion_names, pos, intensity, res_type, frag_index, charge, ion_type);
}
}
}
return;
}
std::vector< TheoreticalSpectrumGeneratorXLMS::LossIndex > TheoreticalSpectrumGeneratorXLMS::getForwardLosses_(AASequence & peptide) const
{
// this gives us a "forward set" with incremental losses from the first to the last residue
std::vector< LossIndex > ion_losses(peptide.size());
ion_losses[0] = loss_db_.at(peptide[0].getOneLetterCode());
for (Size i = 1; i < peptide.size(); ++i)
{
ion_losses[i].has_H2O_loss = ion_losses[i-1].has_H2O_loss || loss_db_.at(peptide[i].getOneLetterCode()).has_H2O_loss;
ion_losses[i].has_NH3_loss = ion_losses[i-1].has_NH3_loss || loss_db_.at(peptide[i].getOneLetterCode()).has_NH3_loss;
}
return ion_losses;
}
std::vector< TheoreticalSpectrumGeneratorXLMS::LossIndex > TheoreticalSpectrumGeneratorXLMS::getBackwardLosses_(AASequence & peptide) const
{
// this gives us a "backward set" with incremental losses from the last to the first residue
std::vector< LossIndex > ion_losses(peptide.size());
ion_losses[ion_losses.size()-1] = loss_db_.at(peptide[peptide.size()-1].getOneLetterCode());
for (Size i = ion_losses.size()-1; i > 0; --i)
{
ion_losses[i-1].has_H2O_loss = ion_losses[i].has_H2O_loss || loss_db_.at(peptide[i-1].getOneLetterCode()).has_H2O_loss;
ion_losses[i-1].has_NH3_loss = ion_losses[i].has_NH3_loss || loss_db_.at(peptide[i-1].getOneLetterCode()).has_NH3_loss;
}
return ion_losses;
}
void TheoreticalSpectrumGeneratorXLMS::updateMembers_()
{
add_b_ions_ = param_.getValue("add_b_ions").toBool();
add_y_ions_ = param_.getValue("add_y_ions").toBool();
add_a_ions_ = param_.getValue("add_a_ions").toBool();
add_c_ions_ = param_.getValue("add_c_ions").toBool();
add_x_ions_ = param_.getValue("add_x_ions").toBool();
add_z_ions_ = param_.getValue("add_z_ions").toBool();
add_first_prefix_ion_ = param_.getValue("add_first_prefix_ion").toBool();
add_losses_ = param_.getValue("add_losses").toBool();
add_metainfo_ = param_.getValue("add_metainfo").toBool();
add_charges_ = param_.getValue("add_charges").toBool();
add_isotopes_ = param_.getValue("add_isotopes").toBool();
add_precursor_peaks_ = param_.getValue("add_precursor_peaks").toBool();
add_abundant_immonium_ions_ = param_.getValue("add_abundant_immonium_ions").toBool();
a_intensity_ = static_cast<double>(param_.getValue("a_intensity"));
b_intensity_ = static_cast<double>(param_.getValue("b_intensity"));
c_intensity_ = static_cast<double>(param_.getValue("c_intensity"));
x_intensity_ = static_cast<double>(param_.getValue("x_intensity"));
y_intensity_ = static_cast<double>(param_.getValue("y_intensity"));
z_intensity_ = static_cast<double>(param_.getValue("z_intensity"));
max_isotope_ = static_cast<Int>(param_.getValue("max_isotope"));
rel_loss_intensity_ = static_cast<double>(param_.getValue("relative_loss_intensity"));
pre_int_ = static_cast<double>(param_.getValue("precursor_intensity"));
pre_int_H2O_ = static_cast<double>(param_.getValue("precursor_H2O_intensity"));
pre_int_NH3_ = static_cast<double>(param_.getValue("precursor_NH3_intensity"));
add_k_linked_ions_ = param_.getValue("add_k_linked_ions").toBool();
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/ProteaseDigestion.cpp | .cpp | 5,603 | 162 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow, Xiao Liang $
// $Authors: Marc Sturm, Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/CHEMISTRY/ProteaseDigestion.h>
#include <OpenMS/CHEMISTRY/ProteaseDB.h>
#include <OpenMS/SYSTEM/File.h>
#include <algorithm>
#include <boost/regex.hpp>
#include <limits>
using namespace std;
namespace OpenMS
{
void ProteaseDigestion::setEnzyme(const String& enzyme_name)
{
enzyme_ = ProteaseDB::getInstance()->getEnzyme(enzyme_name);
re_.reset(new boost::regex(enzyme_->getRegEx()));
}
bool ProteaseDigestion::isValidProduct(const String& protein,
int pos,
int length,
bool ignore_missed_cleavages,
bool allow_nterm_protein_cleavage,
bool allow_random_asp_pro_cleavage) const
{
return isValidProduct_(protein, pos, length, ignore_missed_cleavages, allow_nterm_protein_cleavage, allow_random_asp_pro_cleavage);
}
bool ProteaseDigestion::isValidProduct(const AASequence& protein,
int pep_pos,
int pep_length,
bool ignore_missed_cleavages,
bool allow_nterm_protein_cleavage,
bool allow_random_asp_pro_cleavage) const
{
String seq = protein.toUnmodifiedString();
return isValidProduct_(seq, pep_pos, pep_length, ignore_missed_cleavages, allow_nterm_protein_cleavage, allow_random_asp_pro_cleavage);
}
Size ProteaseDigestion::peptideCount(const AASequence& protein)
{
// For unspecific cleavage every cutting position may be skipped. Thus, we get (n + 1) \choose 2 products.
if (enzyme_->getName() == UnspecificCleavage)
{
return (protein.size() + 1) * protein.size() / 2;
};
std::vector<int> pep_positions = tokenize_(protein.toUnmodifiedString());
Size count = pep_positions.size();
// missed cleavages
Size sum = count;
for (Size i = 1; i < count; ++i)
{
if (i > missed_cleavages_)
{
break;
}
sum += count - i;
}
return sum;
}
Size ProteaseDigestion::digest(const AASequence& protein, vector<AASequence>& output, Size min_length, Size max_length) const
{
// initialization
output.clear();
std::vector<std::pair<size_t,size_t>> idcs; // small overhead filling intermediate vector first and iterating again
Size wrong_size = digest(protein, idcs, min_length, max_length);
output.reserve(idcs.size());
std::transform(idcs.begin(), idcs.end(), std::back_inserter(output),
[&protein](std::pair<size_t, size_t>& start_end)
{
return protein.getSubsequence(start_end.first, UInt(start_end.second - start_end.first));
}
);
return wrong_size;
}
Size ProteaseDigestion::digest(const AASequence& protein, vector<std::pair<size_t,size_t>>& output, Size min_length, Size max_length) const
{
// initialization
output.clear();
// verify if currently set specificity is supported
if (! (specificity_ == Specificity::SPEC_FULL ||
specificity_ == Specificity::SPEC_SEMI))
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
String("Specificity value set on current ProteaseDigestion object is not supported by ProteaseDigestion::digest()."), String(specificity_));
}
// disable max length filter by setting to maximum length
if (max_length == 0 || max_length > protein.size())
{
max_length = protein.size();
}
Size mc = (enzyme_->getName() == UnspecificCleavage) ? std::numeric_limits<Size>::max() : missed_cleavages_;
Size wrong_size(0);
// naive cleavage sites
std::vector<int> pep_positions = tokenize_(protein.toUnmodifiedString());
pep_positions.push_back(protein.size()); // positions now contains 0, x1, ... xn, end
Size count = pep_positions.size();
Size begin = pep_positions[0];
for (Size i = 1; i < count; ++i)
{
Size l = pep_positions[i] - begin;
if (l >= min_length && l <= max_length)
{
output.emplace_back(begin, pep_positions[i]);
}
else
{
++wrong_size;
}
begin = pep_positions[i];
}
// missed cleavages
if (pep_positions.size() > 1 && mc != 0) // there is at least one cleavage site (in addition to last position)!
{
// generate fragments with missed cleavages
for (Size mcs = 1; ((mcs <= mc) && (mcs < count - 1)); ++mcs)
{
begin = pep_positions[0];
for (Size j = 1; j < count - mcs; ++j)
{
Size l = pep_positions[j + mcs] - begin;
if (l >= min_length && l <= max_length)
{
output.emplace_back(begin, pep_positions[j + mcs]);
}
else
{
++wrong_size;
}
begin = pep_positions[j];
}
}
}
// semi-specific variants
if (specificity_ == SPEC_SEMI)
{
wrong_size = wrong_size + semiSpecificDigestion_(pep_positions, output, min_length, max_length);
}
return wrong_size;
}
} //namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/ProteaseDB.cpp | .cpp | 2,168 | 79 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Xiao Liang $
// $Authors: Xiao Liang, Chris Bielow $
// --------------------------------------------------------------------------
//
#include <OpenMS/CHEMISTRY/ProteaseDB.h>
#include <fstream>
using namespace std;
namespace OpenMS
{
ProteaseDB::ProteaseDB():
DigestionEnzymeDB<DigestionEnzymeProtein, ProteaseDB>("CHEMISTRY/Enzymes.xml")
{
}
void ProteaseDB::getAllXTandemNames(vector<String>& all_names) const
{
all_names.clear();
for (ConstEnzymeIterator it = const_enzymes_.begin(); it != const_enzymes_.end(); ++it)
{
if (!(*it)->getXTandemID().empty())
{
all_names.push_back((*it)->getName());
}
}
}
void ProteaseDB::getAllCometNames(vector<String>& all_names) const
{
all_names.clear();
for (ConstEnzymeIterator it = const_enzymes_.begin(); it != const_enzymes_.end(); ++it)
{
if ((*it)->getCometID() != -1)
{
all_names.push_back((*it)->getName());
}
}
}
void ProteaseDB::getAllOMSSANames(vector<String>& all_names) const
{
all_names.clear();
for (ConstEnzymeIterator it = const_enzymes_.begin(); it != const_enzymes_.end(); ++it)
{
if ((*it)->getOMSSAID() != -1)
{
all_names.push_back((*it)->getName());
}
}
}
void ProteaseDB::getAllMSGFNames(vector<String>& all_names) const
{
all_names.clear();
for (ConstEnzymeIterator it = const_enzymes_.begin(); it != const_enzymes_.end(); ++it)
{
if ((*it)->getMSGFID() != -1) // MS-GF+ starts enzyme numbering at 0
{
all_names.push_back((*it)->getName());
}
}
}
void ProteaseDB::writeTSV(String const& filename)
{
std::ofstream ofs(filename, std::ofstream::out);
ofs << "OpenMS_AllowedEnzymes" << "\n";
for (ConstEnzymeIterator it = const_enzymes_.begin(); it != const_enzymes_.end(); ++it)
{
ofs << (*it)->getName() << "\n";
}
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/SpectrumAnnotator.cpp | .cpp | 19,023 | 494 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Mathias Walzer $
// $Authors: Mathias Walzer $
// --------------------------------------------------------------------------
#include <OpenMS/CHEMISTRY/SpectrumAnnotator.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <OpenMS/MATH/MathFunctions.h>
using namespace std;
namespace OpenMS
{
const boost::regex SpectrumAnnotator::nt_regex_("[a,b,c][[:digit:]]+[+]*");
const boost::regex SpectrumAnnotator::ct_regex_("[x,y,z][[:digit:]]+[+]*");
const boost::regex SpectrumAnnotator::noloss_regex_("[a,b,c,x,y,z][[:digit:]]+[+]*");
const boost::regex SpectrumAnnotator::seriesposition_regex_("[a,b,c,x,y,z]([[:digit:]]+)[+,-]*[[:word:]]*[+]*");
SpectrumAnnotator::SpectrumAnnotator() :
DefaultParamHandler("SpectrumAnnotator")
{
defaults_.setValue("basic_statistics", "true", "If set, meta values for peak_number, sum_intensity, matched_ion_number, matched_intensity are added");
defaults_.setValidStrings("basic_statistics", {"true","false"});
/** adds meta values
peak_number
sum_intensity
matched_ion_number
matched_intensity
*/
defaults_.setValue("list_of_ions_matched", "true", "If set, meta values for matched_ions are added");
defaults_.setValidStrings("list_of_ions_matched", {"true","false"});
/** adds meta values
matched_ions
*/
defaults_.setValue("max_series", "true", "If set, meta values for max_series_type, max_series_size are added");
defaults_.setValidStrings("max_series", {"true","false"});
/** adds meta values
max_series_type
max_series_size
*/
defaults_.setValue("S/N_statistics", "true", "If set to 1 isotope peaks of the product ion peaks are added");
defaults_.setValidStrings("S/N_statistics", {"true","false"});
/** adds meta values
sn_by_matched_intensity
sn_by_median_intensity
*/
defaults_.setValue("precursor_statistics", "true", "If set, meta values for precursor_in_ms2 are added");
defaults_.setValidStrings("precursor_statistics", {"true","false"});
/** adds meta values
precursor_in_ms2
*/
defaults_.setValue("topNmatch_fragmenterrors", unsigned(7), "If set n > 0, meta values for topN_meanfragmenterror, topN_MSEfragmenterror, topN_stddevfragmenterror are added");
/** adds meta values
topN_meanfragmenterror
topN_MSEfragmenterror
topN_stddevfragmenterror
*/
// TODO right now topN matched, but additional information would be gained if the topN would be assembled from NOT ONLY the matched (high intense peak not identified?)
defaults_.setValue("fragmenterror_statistics", "true", "If set, meta values for median_fragment_error, IQR_fragment_error are added");
defaults_.setValidStrings("fragmenterror_statistics", {"true","false"});
/** adds meta values
median_fragment_error
IQR_fragment_error
*/
defaults_.setValue("terminal_series_match_ratio", "true", "If set, meta values for NTermIonCurrentRatio, CTermIonCurrentRatio are added");
defaults_.setValidStrings("terminal_series_match_ratio", {"true","false"});
/** adds meta values
NTermIonCurrentRatio
CTermIonCurrentRatio
*/
defaultsToParam_();
}
SpectrumAnnotator::SpectrumAnnotator(const SpectrumAnnotator & rhs) :
DefaultParamHandler(rhs)
{
}
SpectrumAnnotator & SpectrumAnnotator::operator=(const SpectrumAnnotator & rhs)
{
if (this != &rhs)
{
DefaultParamHandler::operator=(rhs);
}
return *this;
}
SpectrumAnnotator::~SpectrumAnnotator() = default;
void SpectrumAnnotator::annotateMatches(PeakSpectrum& spec, const PeptideHit& ph, const TheoreticalSpectrumGenerator& tg, const SpectrumAlignment& sa) const
{
PeakSpectrum theoretical_spec;
vector<pair<Size, Size> > al;
const Int zmin = 1;
const Int zmax = 2;
tg.getSpectrum(theoretical_spec, ph.getSequence(), zmin, min(ph.getCharge(), zmax));
OPENMS_PRECONDITION(theoretical_spec.isSorted(), "TheoreticalSpectrumGenerator::getSpectrum did not yield a sorted spectrum!")
if (!spec.isSorted()) { spec.sortByPosition(); }
sa.getSpectrumAlignment(al, theoretical_spec, spec); // peaks from theor. may be matched to none or one in spec!
PeakSpectrum::FloatDataArray error_annotations;
error_annotations.setName("IonMatchError");
error_annotations.resize(spec.size());
PeakSpectrum::StringDataArray type_annotations;
type_annotations.setName("IonNames");
type_annotations.resize(spec.size());
PeakSpectrum::IntegerDataArray charge_annotations;
charge_annotations.setName("Charges");
charge_annotations.resize(spec.size());
for (auto it = al.begin(); it != al.end(); ++it)
{
error_annotations[it->second] = std::fabs(spec[it->second].getMZ() - theoretical_spec[it->first].getMZ());
type_annotations[it->second] = theoretical_spec.getStringDataArrays()[0][it->first];
charge_annotations[it->second] = theoretical_spec.getIntegerDataArrays()[0][it->first];
}
const Param& sap = sa.getParameters();
spec.setMetaValue("fragment_mass_tolerance", sap.getValue("tolerance"));
spec.setMetaValue("fragment_mass_tolerance_ppm", sap.getValue("is_relative_tolerance").toBool());
spec.setFloatDataArrays(PeakSpectrum::FloatDataArrays(1, std::move(error_annotations)));
spec.setStringDataArrays(PeakSpectrum::StringDataArrays(1, std::move(type_annotations)));
spec.setIntegerDataArrays(PeakSpectrum::IntegerDataArrays(1, std::move(charge_annotations)));
}
void SpectrumAnnotator::addIonMatchStatistics(PeptideIdentification& pi, MSSpectrum& spec, const TheoreticalSpectrumGenerator& tg, const SpectrumAlignment& sa) const
{
if (spec.empty()) return;
for (auto ph = pi.getHits().begin(); ph != pi.getHits().end(); ++ph)
{
annotateMatches(spec, *ph, tg, sa);
spec.sortByIntensity();
StringList ions;
double sum_intensity = 0;
double match_intensity = 0;
vector<double> fragmenterrors, intensities, mzs; // sorted by ascending intensity via spec.sortByIntensity for topN statistics
fragmenterrors.reserve(spec.size());
intensities.reserve(spec.size());
mzs.reserve(spec.size());
double nint = 0;
double cint = 0;
StringList allowed_types = ListUtils::create<String>("y,b,a,c,x,z");
map<String, vector<bool> > ion_series;
for (StringList::iterator st = allowed_types.begin(); st != allowed_types.end(); ++st)
{
ion_series.insert(make_pair(*st, vector<bool>(ph->getSequence().size()-1, false)));
}
PeakSpectrum::StringDataArray type_annotations;
PeakSpectrum::FloatDataArray error_annotations;
PeakSpectrum::IntegerDataArray charge_annotations;
for (auto it = spec.getStringDataArrays().begin(); it != spec.getStringDataArrays().end(); ++it)
{
if (it->getName() == "IonNames") { type_annotations = *it; break; }
}
for (auto it = spec.getFloatDataArrays().begin(); it != spec.getFloatDataArrays().end(); ++it)
{
if (it->getName() == "IonMatchError") { error_annotations = *it; break; }
}
for (auto it = spec.getIntegerDataArrays().begin(); it != spec.getIntegerDataArrays().end(); ++it)
{
if (it->getName() == "Charges") { charge_annotations = *it; break; }
}
for (size_t i = 0; i < spec.size(); ++i)
{
sum_intensity += spec[i].getIntensity();
String ion_name = type_annotations.at(i);
if (ion_name.empty()) continue;
fragmenterrors.push_back(error_annotations.at(i));
intensities.push_back(spec[i].getIntensity());
match_intensity += spec[i].getIntensity();
mzs.push_back(spec[i].getMZ());
if (terminal_series_match_ratio_)
{
if (boost::regex_match(ion_name, nt_regex_))
{
nint += spec[i].getIntensity();
}
else if (boost::regex_match(ion_name, ct_regex_))
{
cint += spec[i].getIntensity();
}
}
if (max_series_) // without loss max series is sometimes pretty crummy
{
const String& ion_type = ion_name.prefix(1);
boost::cmatch what;
if (boost::regex_match(ion_name.c_str(), what, seriesposition_regex_) &&
ListUtils::contains(allowed_types, ion_type))
{
// what[0] contains the whole string
// what[1] contains the response code
try
{
int i = std::atoi(what[1].first);
ion_series[ion_type].at(i-1) = true;
}
catch (std::out_of_range&)
{
OPENMS_LOG_WARN << "Note: Ions of " << ion_type << ion_name.substr(1).remove('+').toInt()
<< " will be ignored for max_series " << ph->getSequence().toString() << endl;
continue;
}
}
}
ions.emplace_back(std::move(ion_name));
}
if (basic_statistics_)
{
ph->setMetaValue("matched_ions", ListUtils::concatenate(ions, ","));
ph->setMetaValue("matched_intensity", match_intensity);
ph->setMetaValue("matched_ion_number", ions.size());
ph->setMetaValue("peak_number", spec.size());
ph->setMetaValue("sum_intensity", sum_intensity);
}
if (terminal_series_match_ratio_)
{
ph->setMetaValue("NTermIonCurrentRatio", nint/match_intensity);
ph->setMetaValue("CTermIonCurrentRatio", cint/match_intensity);
}
if (topNmatch_fragmenterrors_)
{
if (fragmenterrors.empty())
{
ph->setMetaValue("median_fragment_error", 0);
ph->setMetaValue("IQR_fragment_error", 0);
ph->setMetaValue("topN_meanfragmenterror", 0);
ph->setMetaValue("topN_MSEfragmenterror", 0);
ph->setMetaValue("topN_stddevfragmenterror", 0);
}
else
{
vector<double> fe(fragmenterrors);
std::size_t mid = fe.size() / 2;
std::size_t lq = fe.size() / 4;
std::size_t uq = lq + mid;
std::nth_element(fe.begin(), fe.begin()+mid, fe.end());
if (fe.size() % 2 != 0)
{
ph->setMetaValue("median_fragment_error", fe[mid]);
}
else
{
double right2mid = fe[mid];
std::nth_element(fe.begin(), fe.begin() + mid-1, fe.end());
ph->setMetaValue("median_fragment_error", (right2mid + fe[mid-1]) / 2.0);
}
std::nth_element(fe.begin(), fe.begin() + lq, fe.end());
std::nth_element(fe.begin() + lq + 1, fe.begin() + mid, fe.end());
std::nth_element(fe.begin() + mid + 1, fe.begin() + uq, fe.end());
ph->setMetaValue("IQR_fragment_error", fe[uq]-fe[lq]);
vector<double> topn_fe;
topn_fe.resize(fragmenterrors.size());
std::reverse_copy(fragmenterrors.begin(), fragmenterrors.end(), topn_fe.begin()); // fragmenterrors is sortByIntensity before, get TopN from the back of the vector
topn_fe.resize(topNmatch_fragmenterrors_);
double mean = Math::mean(topn_fe.begin(), topn_fe.end());
double stdev = Math::sd(topn_fe.begin(), topn_fe.end(), mean);
double sq_sum = 0;
for (std::vector<double>::iterator it = topn_fe.begin(); it != topn_fe.end(); ++it)
{
sq_sum += *it * *it;
}
double m_sq_sum = (sq_sum / topn_fe.size());
ph->setMetaValue("topN_meanfragmenterror", mean);
ph->setMetaValue("topN_MSEfragmenterror", m_sq_sum);
ph->setMetaValue("topN_stddevfragmenterror", stdev);
}
}
if (max_series_)
{
String max_series;
int max_stretch = 0;
for (map<String, vector<bool> >::iterator tt = ion_series.begin(); tt != ion_series.end(); ++tt)
{
int stretch = 0;
for (vector<bool>::iterator it = tt->second.begin(); it != tt->second.end(); ++it)
{
if (*it)
{
++stretch;
}
else
{
stretch = 0;
}
if (stretch > max_stretch)
{
max_stretch = stretch;
max_series = tt->first;
}
}
}
ph->setMetaValue("max_series_type", max_series);
ph->setMetaValue("max_series_size", max_stretch);
}
//TODO parent peak intensity complement pairs number
if (SN_statistics_)
{
float sn_by_matched_intensity = (match_intensity / ions.size()) /
((sum_intensity-match_intensity) / (spec.size()-ions.size()));
if (spec.size() - ions.size() == 0)
{
sn_by_matched_intensity = 0;
}
ph->setMetaValue("sn_by_matched_intensity", sn_by_matched_intensity);
float median = 0;
// spec is already in sorted order of intensity
if (spec.size() % 2 == 0)
median = (spec[spec.size() / 2 - 1].getIntensity() + spec[spec.size() / 2].getIntensity()) / 2;
else
median = spec[spec.size() / 2].getIntensity();
float sign_int= 0;
float nois_int = 0;
size_t sign_count= 0;
size_t nois_count = 0;
for (MSSpectrum::const_iterator pt = spec.begin(); pt != spec.end(); ++pt)
{
if (pt->getIntensity() <= median)
{
++nois_count;
nois_int += pt->getIntensity();
}
else
{
++sign_count;
sign_int += pt->getIntensity();
}
}
float sn_by_median_intensity = (sign_int / sign_count) / (nois_int / nois_count);
if (nois_count == 0 || sign_count == 0)
{
sn_by_median_intensity = 0;
}
ph->setMetaValue("sn_by_median_intensity", sn_by_median_intensity);
}
//TODO charge related features might be worth looking at in the future
if (precursor_statistics_)
{
bool precursor = false;
for (std::vector<Precursor>::const_iterator pit = spec.getPrecursors().begin(); pit != spec.getPrecursors().end(); ++pit)
{
spec.sortByPosition();
//TODO what about precursor_H2O_loss and precursor_NH3_loss
if (spec.findNearest(pit->getMZ(),sa.getParameters().getValue("tolerance"),
sa.getParameters().getValue("tolerance")) > -1)
{
precursor = true;
}
}
ph->setMetaValue("precursor_in_ms2", precursor);
}
//TODO add "FragmentArray"s
Param sap = sa.getParameters();
pi.setMetaValue("fragment_match_tolerance", (double)sap.getValue("tolerance"));
}
}
void SpectrumAnnotator::addPeakAnnotationsToPeptideHit(PeptideHit& ph, const PeakSpectrum& spec, const TheoreticalSpectrumGenerator& tg, const SpectrumAlignment& sa, bool include_unmatched_peaks) const
{
PeakSpectrum theoretical_spec;
vector<pair<Size, Size>> al;
const Int zmin = 1;
const Int zmax = 2;
tg.getSpectrum(theoretical_spec, ph.getSequence(), zmin, min(ph.getCharge(), zmax));
OPENMS_PRECONDITION(theoretical_spec.isSorted(), "TheoreticalSpectrumGenerator::getSpectrum did not yield a sorted spectrum!")
PeakSpectrum spec_copy = spec;
if (!spec_copy.isSorted()) { spec_copy.sortByPosition(); }
sa.getSpectrumAlignment(al, theoretical_spec, spec_copy);
// Get the ion name and charge data arrays from the theoretical spectrum
const PeakSpectrum::StringDataArray* ion_names = nullptr;
const PeakSpectrum::IntegerDataArray* ion_charges = nullptr;
for (const auto& sda : theoretical_spec.getStringDataArrays())
{
if (sda.getName() == "IonNames")
{
ion_names = &sda;
break;
}
}
for (const auto& ida : theoretical_spec.getIntegerDataArrays())
{
if (ida.getName() == "Charges")
{
ion_charges = &ida;
break;
}
}
// Build a map from spectrum index to alignment match (if any)
std::map<Size, Size> spec_idx_to_theo_idx;
for (const auto& match : al)
{
spec_idx_to_theo_idx[match.second] = match.first;
}
// Build the PeakAnnotation vector
std::vector<PeptideHit::PeakAnnotation> peak_annotations;
if (include_unmatched_peaks)
{
// Include all spectrum peaks
peak_annotations.reserve(spec_copy.size());
for (Size i = 0; i < spec_copy.size(); ++i)
{
PeptideHit::PeakAnnotation pa;
pa.mz = spec_copy[i].getMZ();
pa.intensity = spec_copy[i].getIntensity();
// Check if this peak was matched
auto it = spec_idx_to_theo_idx.find(i);
if (it != spec_idx_to_theo_idx.end())
{
Size theo_idx = it->second;
if (ion_names != nullptr && theo_idx < ion_names->size())
{
pa.annotation = (*ion_names)[theo_idx];
}
if (ion_charges != nullptr && theo_idx < ion_charges->size())
{
pa.charge = (*ion_charges)[theo_idx];
}
}
// Unmatched peaks will have default empty annotation and charge=0
peak_annotations.push_back(std::move(pa));
}
}
else
{
// Only include matched peaks (original behavior)
peak_annotations.reserve(al.size());
for (const auto& match : al)
{
PeptideHit::PeakAnnotation pa;
pa.mz = spec_copy[match.second].getMZ();
pa.intensity = spec_copy[match.second].getIntensity();
if (ion_names != nullptr && match.first < ion_names->size())
{
pa.annotation = (*ion_names)[match.first];
}
if (ion_charges != nullptr && match.first < ion_charges->size())
{
pa.charge = (*ion_charges)[match.first];
}
peak_annotations.push_back(std::move(pa));
}
}
ph.setPeakAnnotations(std::move(peak_annotations));
}
void SpectrumAnnotator::updateMembers_()
{
basic_statistics_ = param_.getValue("basic_statistics").toBool();
list_of_ions_matched_ = param_.getValue("list_of_ions_matched").toBool();
max_series_ = param_.getValue("max_series").toBool();
SN_statistics_ = param_.getValue("S/N_statistics").toBool();
precursor_statistics_ = param_.getValue("precursor_statistics").toBool();
topNmatch_fragmenterrors_ = (unsigned)param_.getValue("topNmatch_fragmenterrors");
fragmenterror_statistics_ = param_.getValue("fragmenterror_statistics").toBool();
terminal_series_match_ratio_ = param_.getValue("terminal_series_match_ratio").toBool();
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/RibonucleotideDB.cpp | .cpp | 17,618 | 469 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/CHEMISTRY/RibonucleotideDB.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/SYSTEM/File.h>
#include <QtCore/QFile>
#include <QtCore/QTextStream>
#include <nlohmann/json.hpp>
// This is the only place wherein Nlohmann/json is used. It is updating its requirements to work with explicit
// conversions only.
using namespace std;
/// @brief Specialize nlohmann::adl_serializer for OpenMS::EmpiricalFormula so that nlohmann::json
// knows how to (de)serialize it: from_json constructs an EmpiricalFormula from a JSON string,to_json
// converts it back to its string form, all explicitly.
namespace nlohmann {
//
template <>
struct adl_serializer<OpenMS::EmpiricalFormula>
{
static void from_json(const json& j, OpenMS::EmpiricalFormula& ef)
{
std::string formula_string = j.get<std::string>();
ef = OpenMS::EmpiricalFormula(formula_string);
}
static void to_json(json& j, const OpenMS::EmpiricalFormula& ef)
{
j = ef.toString();
}
};
}
namespace OpenMS
{
// A structure for storing a pointer to a ribo in the database, as well as the possible alternatives if it is ambiguous (eg a methyl group that for which we can't determine the localization)
struct ParsedEntry_
{
unique_ptr<Ribonucleotide> ribo;
String alternative_1;
String alternative_2;
bool isAmbiguous () { return !alternative_1.empty(); }
};
RibonucleotideDB::RibonucleotideDB() : max_code_length_(0)
{
// Modomics mods were retreived from https://www.genesilico.pl/modomics/api/modifications
readFromJSON_("CHEMISTRY/Modomics.json");
OPENMS_LOG_DEBUG << "Loading modomics RNA Modifications from "<< File::find("CHEMISTRY/Modomics.json") <<"\n";
// We still use the old tsv format for custom mods
readFromFile_("CHEMISTRY/Custom_RNA_modifications.tsv");
OPENMS_LOG_DEBUG << "Loading custom RNA Modifications from "<< File::find("CHEMISTRY/Custom_RNA_modifications.tsv") <<"\n";
if (File::exists("CHEMISTRY/User_Modifications.tsv"))
{
OPENMS_LOG_INFO << "Loading user specified Modifications from TSV\n";
}
if (File::exists("CHEMISTRY/User_Modifications.json"))
{
OPENMS_LOG_INFO << "Loading user specified Modifications from JSON\n";
}
}
RibonucleotideDB* RibonucleotideDB::getInstance()
{
static RibonucleotideDB* db_ = new RibonucleotideDB(); // Meyers' singleton -> thread safe
return db_;
}
// All valid JSON ribonucleotides must at minimum have elements defining name, short_name, reference_moiety, and formula
// @throw Exception::MissingInformation if some of the required info for the entry is missing
void entryIsWellFormed_(const nlohmann::json::value_type& entry)
{
if (entry.find("name") == entry.cend())
{
String msg = "\"name\" entry missing for ribonucleotide";
throw Exception::MissingInformation(__FILE__, __LINE__,
OPENMS_PRETTY_FUNCTION, msg);
}
if (entry.find("short_name") == entry.cend())
{
String msg = "\"short_name\" entry missing for ribonucleotide";
throw Exception::MissingInformation(__FILE__, __LINE__,
OPENMS_PRETTY_FUNCTION, msg);
}
if (entry.find("reference_moiety") == entry.cend())
{
String msg = "\"reference_moiety\" entry missing for ribonucleotide";
throw Exception::MissingInformation(__FILE__, __LINE__,
OPENMS_PRETTY_FUNCTION, msg);
}
if (entry.find("formula") == entry.cend())
{
String msg = "\"formula\" entry missing for ribonucleotide";
throw Exception::MissingInformation(__FILE__, __LINE__,
OPENMS_PRETTY_FUNCTION, msg);
}
}
// Return the Empirical formula for the ribo with a base-loss. Ideally we store these in the JSON, otherwise its guessed from the code.
EmpiricalFormula getBaseLossFormula_(const nlohmann::json::value_type& entry)
{
String code = entry.at("short_name").get<std::string>();
// If we have an explicitly defined baseloss_formula
if (auto e = entry.find("baseloss_formula"); e != entry.cend() && !e->is_null())
{
return EmpiricalFormula(e->get<std::string>());
}
//TODO: Calculate base loss formula from SMILES
else // If we don't have a defined baseloss_formula calculate it from our shortCode
{
if (code.hasPrefix('d')) // handle deoxyribose, possibly with methyl mod
{
return EmpiricalFormula("C5H10O4");
}
else if (code.hasSuffix('m')) // mod. attached to the ribose, not base
{
return EmpiricalFormula("C6H12O5");
}
else if (code.hasSuffix("m*")) // check if we have both a sulfer and a 2'-O methyl
{
return EmpiricalFormula("C6H12O5");
}
else if (code.hasSuffix("Ar(p)") || code.hasSuffix("Gr(p)"))
{
return EmpiricalFormula("C10H19O21P");
}
else
{
return EmpiricalFormula("C5H10O5");
}
}
}
// Generate an entry from a JSON object.
ParsedEntry_ parseEntry_(const nlohmann::json::value_type& entry)
{
ParsedEntry_ parsed;
auto ribo = std::make_unique<Ribonucleotide>();
ribo->setName(entry.at("name").template get<std::string>());
String code = entry.at("short_name").get<std::string>();
ribo->setCode(code);
// NewCode doesn't exist any more, we use the same shortname for compatibility
ribo->setNewCode(code);
// Handle moiety
if (entry["reference_moiety"].size() == 1 && string(entry.at("reference_moiety").at(0)).length() == 1)
{
ribo->setOrigin(string(entry.at("reference_moiety").at(0))[0]);
ribo->setTermSpecificity(Ribonucleotide::ANYWHERE); // due to format changes we get the terminal specificity from the moieties, modomics contains base specific terminals, but they can be represented by the wild-card ones
}
else if (entry["reference_moiety"].size() == 4) // if all moieties are possible it might be a terminal
{
ribo->setOrigin('X'); // Use X as any unmodified
if (code.hasSuffix("pN"))
{
ribo->setTermSpecificity(Ribonucleotide::FIVE_PRIME);
}
else if (code.hasSuffix("p") && code.hasPrefix("N"))
{
ribo->setTermSpecificity(Ribonucleotide::THREE_PRIME);
}
else
{
ribo->setTermSpecificity(Ribonucleotide::ANYWHERE); //other nonspecific mods
}
}
else
{
String msg = "we don't support bases with multiple reference moieties or multicharacter moieties.";
throw Exception::InvalidValue(__FILE__, __LINE__,
OPENMS_PRETTY_FUNCTION, msg, entry["reference_moiety"].dump());
}
if (entry.find("abbrev") != entry.cend())
{
ribo->setHTMLCode(entry.at("abbrev").get<std::string>()); //This is the single letter unicode representation that only SOME mods have
}
ribo->setFormula(entry.at("formula").get<OpenMS::EmpiricalFormula>());
if (auto e = entry.find("mass_avg"); e != entry.cend() && !e->is_null())
{
ribo->setAvgMass(e->get<double>());
}
if (std::abs(ribo->getAvgMass() - ribo->getFormula().getAverageWeight()) >= 0.01)
{
OPENMS_LOG_DEBUG << "Average mass of " << code << " differs substantially from its formula mass.\n";
}
if (auto e = entry.find("mass_monoiso"); e != entry.cend() && !e->is_null())
{
ribo->setMonoMass(e->get<double>());
}
else
{
OPENMS_LOG_DEBUG << "Monoisotopic mass of " << code << " is not defined. Calculating from formula\n";
ribo->setMonoMass(ribo->getFormula().getMonoWeight());
}
if ( std::abs(ribo->getMonoMass() - ribo->getFormula().getMonoWeight()) >= 0.01)
{
OPENMS_LOG_DEBUG << "Average mass of " << code << " differs substantially from its formula mass.\n";
}
// Handle base loss formula
ribo->setBaselossFormula(getBaseLossFormula_(entry));
// Handle ambiguities
if (code.hasSuffix('?') || code.hasSuffix("?*")) // ambiguity code -> fill the map
{
if (!entry.contains("alternatives"))
{
String msg = "Ambiguous mod without alternative found in " + code;
throw Exception::ParseError(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, code, msg);
}
parsed.alternative_1 = string(entry.at("alternatives").at(0)), parsed.alternative_2 = string(entry.at("alternatives").at(1)); // we always have exactly two ambiguities
}
parsed.ribo = std::move(ribo);
return parsed;
}
// Read from a JSON file into a RibonucleotideDB
void RibonucleotideDB::readFromJSON_(const std::string& path)
{
using json = nlohmann::json;
String full_path = File::find(path);
// the input file is Unicode encoded, so we need Qt to read it:
QFile file(full_path.toQString());
if (!file.open(QIODevice::ReadOnly | QIODevice::Text))
{
throw Exception::FileNotReadable(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, full_path);
}
QTextStream source(&file);
source.setAutoDetectUnicode(true);
Size line_count = 0;
json mod_obj;
try
{
mod_obj = json::parse(String(source.readAll()));
}
catch (Exception::ParseError& e)
{
OPENMS_LOG_ERROR << "Error: Failed to parse Modomics JSON. Reason:\n" << e.getName() << " - " << e.what() << endl;
throw;
}
for (auto& element : mod_obj)
{
line_count++;
try
{
// Throw an exception if we are straight up missing necessary elements of the JSON
entryIsWellFormed_(element);
ParsedEntry_ entry = parseEntry_(element);
unique_ptr<Ribonucleotide> ribo = std::move(entry.ribo);
if (entry.isAmbiguous()) // Handle the ambiguity map
{
ambiguity_map_[ribo->getCode()] = make_pair(getRibonucleotide(entry.alternative_1), getRibonucleotide(entry.alternative_2));
}
// there are some weird exotic mods in modomics that don't have codes. We ignore them
if (ribo->getCode() != "")
{
code_map_[ribo->getCode()] = ribonucleotides_.size();
max_code_length_ = max(max_code_length_, ribo->getCode().size());
ribonucleotides_.push_back(std::move(ribo));
}
}
catch (Exception::BaseException& e)
{
OPENMS_LOG_ERROR << "Error: Failed to parse input element " << line_count << ". Reason:\n" << e.getName() << " - " << e.what() << "\nSkipping this line." << endl;
}
}
}
// Read entries from a TSV file
void RibonucleotideDB::readFromFile_(const std::string& path)
{
String full_path = File::find(path);
String header = "name\tshort_name\tnew_nomenclature\toriginating_base\trnamods_abbrev\thtml_abbrev\tformula\tmonoisotopic_mass\taverage_mass";
// the input file is Unicode encoded, so we need Qt to read it:
QFile file(full_path.toQString());
if (!file.open(QIODevice::ReadOnly | QIODevice::Text))
{
throw Exception::FileNotReadable(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, full_path);
}
QTextStream source(&file);
source.setAutoDetectUnicode(true);
Size line_count = 1;
String line = source.readLine();
while (line[0] == '#') // skip leading comments
{
line = source.readLine();
++line_count;
}
if (!line.hasPrefix(header)) // additional columns are allowed
{
String msg = "expected header line starting with: '" + header + "'";
throw Exception::ParseError(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, line, msg);
}
QChar prime(0x2032); // Unicode "prime" character
while (!source.atEnd())
{
line_count++;
QString row = source.readLine();
// replace all "prime" characters with apostrophes (e.g. in "5'", "3'"):
row.replace(prime, '\'');
try
{
unique_ptr<Ribonucleotide> ribo = parseRow_(row.toStdString(), line_count);
code_map_[ribo->getCode()] = ribonucleotides_.size();
max_code_length_ = max(max_code_length_, ribo->getCode().size());
ribonucleotides_.push_back(std::move(ribo));
}
catch (Exception::BaseException& e)
{
OPENMS_LOG_ERROR << "Error: Failed to parse input line " << line_count << ". Reason:\n" << e.getName() << " - " << e.what() << "\nSkipping this line." << endl;
}
}
}
//Parse a row in a TSV file
const unique_ptr<Ribonucleotide> RibonucleotideDB::parseRow_(const std::string& row, Size line_count)
{
vector<String> parts;
String(row).split('\t', parts);
if (parts.size() < 9)
{
String msg = "9 tab-separated fields expected, found " + String(parts.size()) + " in line " + String(line_count);
throw Exception::ParseError(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, row, msg);
}
unique_ptr<Ribonucleotide> ribo (new Ribonucleotide());
ribo->setName(parts[0]);
if (parts[1].hasSuffix("QtRNA")) // use just "Q" instead of "QtRNA"
{
ribo->setCode(parts[1].chop(4));
}
else
{
ribo->setCode(parts[1]);
}
ribo->setNewCode(parts[2]);
if (parts[3] == "preQ0base") // queuosine and its derivatives
{
ribo->setOrigin('G'); // queuosine replaces "G" in tRNA-Asp/Asn
}
else if (parts[3].size() == 1) // A, C, G, U
{
ribo->setOrigin(parts[3][0]);
}
// "parts[4]" is the Unicode equivalent to "parts[5]", so we can skip it
ribo->setHTMLCode(parts[5]);
if (!parts[6].empty() && (parts[6] != "-"))
{
ribo->setFormula(EmpiricalFormula(parts[6]));
}
if (!parts[7].empty() && (parts[7] != "None"))
{
ribo->setMonoMass(parts[7].toDouble());
if ((ribo->getMonoMass() == 0.0) && (!ribo->getFormula().isEmpty()))
{
ribo->setMonoMass(ribo->getFormula().getMonoWeight());
}
}
if (!parts[8].empty() && (parts[8] != "None"))
{
ribo->setAvgMass(parts[8].toDouble());
if ((ribo->getAvgMass() == 0.0) && (!ribo->getFormula().isEmpty()))
{
ribo->setAvgMass(ribo->getFormula().getAverageWeight());
}
}
// Modomics' "new code" contains information on terminal specificity:
if ((!parts[2].empty()) && parts[2].back() == 'N') // terminal mod., exception: "GN"
{
if (parts[2].hasSubstring("55") || (parts[2] == "N"))
{
ribo->setTermSpecificity(Ribonucleotide::FIVE_PRIME);
}
else if (parts[2].hasSubstring("33"))
{
ribo->setTermSpecificity(Ribonucleotide::THREE_PRIME);
}
}
else // default specificity is "ANYWHERE"; now set formula after base loss:
{
if (parts[1].front() == 'd') // handle deoxyribose, possibly with methyl mod
{
ribo->setBaselossFormula(EmpiricalFormula("C5H10O4"));
}
else if (parts[1].back() == 'm') // mod. attached to the ribose, not base
{
ribo->setBaselossFormula(EmpiricalFormula("C6H12O5"));
}
else if (parts[1].substr(parts[1].size() - 2) == "m*") // check if we have both a sulfer and a 2'-O methyl
{
ribo->setBaselossFormula(EmpiricalFormula("C6H12O5"));
}
else if (parts[1].back() == '?') // ambiguity code -> fill the map
{
if (parts.size() < 10)
{
String msg = "10th field expected for ambiguous modification in line " + String(line_count);
throw Exception::ParseError(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, row, msg);
}
String code1 = parts[9].prefix(' '), code2 = parts[9].suffix(' ');
ambiguity_map_[parts[1]] = make_pair(getRibonucleotide(code1), getRibonucleotide(code2));
}
else if ((parts[1] == "Ar(p)") || (parts[1] == "Gr(p)"))
{
ribo->setBaselossFormula(EmpiricalFormula("C10H19O21P"));
}
}
return ribo;
}
RibonucleotideDB::ConstRibonucleotidePtr RibonucleotideDB::getRibonucleotide(const std::string& code)
{
std::unordered_map<std::string, Size>::const_iterator pos = code_map_.find(code);
if (pos == code_map_.end())
{
throw Exception::ElementNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, code);
}
return ribonucleotides_[pos->second].get();
}
RibonucleotideDB::ConstRibonucleotidePtr RibonucleotideDB::getRibonucleotidePrefix(const std::string& seq)
{
std::string prefix = seq.substr(0, max_code_length_);
while (!prefix.empty())
{
auto pos = code_map_.find(prefix);
if (pos != code_map_.end())
{
return ribonucleotides_[pos->second].get();
}
prefix = prefix.substr(0, prefix.size() - 1);
}
throw Exception::ElementNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, seq);
}
pair<RibonucleotideDB::ConstRibonucleotidePtr, RibonucleotideDB::ConstRibonucleotidePtr> RibonucleotideDB::getRibonucleotideAlternatives(const std::string& code)
{
auto pos = ambiguity_map_.find(code);
if (pos == ambiguity_map_.end())
{
throw Exception::ElementNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, code);
}
return pos->second;
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/AAIndex.cpp | .cpp | 435 | 14 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/CHEMISTRY/AAIndex.h>
namespace OpenMS
{
} // closing namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/ModificationsDB.cpp | .cpp | 30,247 | 907 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Andreas Bertsch $
// --------------------------------------------------------------------------
//
#include <OpenMS/CHEMISTRY/ResidueModification.h>
#include <OpenMS/CHEMISTRY/ModificationsDB.h>
#include <OpenMS/FORMAT/UnimodXMLFile.h>
#include <OpenMS/SYSTEM/File.h>
#include <OpenMS/CHEMISTRY/Residue.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/CONCEPT/Macros.h>
#include <fstream>
#include <limits>
#include <utility>
using namespace std;
namespace OpenMS
{
bool ModificationsDB::residuesMatch_(const char residue, const ResidueModification* curr_mod) const
{
const char origin = curr_mod->getOrigin();
if (origin != 'X')
{
// residues match if they are equal or they match everything (X/.)
return (origin == residue || residue == 'X' || residue == '.' || residue == '?');
}
else
{
// origin is X, this usually means that the modification can be at any amino acid
// residues do NOT match if the modification is user-defined and has origin
// X (which here means an actual input AA X and it does *not* mean "match
// all AA") while the current residue is not X. Make sure we don't match things like
// PEPN[400] and PEPX[400] since these have very different masses.
bool non_matching_user_defined = (
curr_mod->isUserDefined() &&
residue != '?' &&
origin != residue );
return !non_matching_user_defined;
}
}
bool ModificationsDB::is_instantiated_ = false;
ModificationsDB* ModificationsDB::getInstance()
{
static ModificationsDB* db_ = ModificationsDB::initializeModificationsDB();
return db_;
}
ModificationsDB* ModificationsDB::initializeModificationsDB(OpenMS::String unimod_file, OpenMS::String custommod_file, OpenMS::String psimod_file, OpenMS::String xlmod_file)
{
// Currently its not possible to check for double initialization since getInstance() also calls this function.
// if (is_instantiated_)
// {
// throw Exception::FailedAPICall(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Cannot initialize ModificationsDB twice");
// }
static ModificationsDB* db_ = new ModificationsDB(std::move(unimod_file), std::move(custommod_file), std::move(psimod_file), std::move(xlmod_file));
return db_;
}
ModificationsDB::ModificationsDB(const OpenMS::String& unimod_file, const OpenMS::String& custommod_file, const OpenMS::String& psimod_file, const OpenMS::String& xlmod_file)
{
if (!unimod_file.empty())
{
readFromUnimodXMLFile(unimod_file);
}
if(!custommod_file.empty())
{
readFromUnimodXMLFile(custommod_file);
}
if (!psimod_file.empty())
{
readFromOBOFile(psimod_file);
}
if (!xlmod_file.empty())
{
readFromOBOFile(xlmod_file);
}
is_instantiated_ = true;
}
ModificationsDB::~ModificationsDB()
{
modification_names_.clear();
for (auto it = mods_.begin(); it != mods_.end(); ++it)
{
delete *it;
}
}
bool ModificationsDB::isInstantiated()
{
return is_instantiated_;
}
Size ModificationsDB::getNumberOfModifications() const
{
Size s;
#pragma omp critical (OpenMS_ModificationsDB)
{
s = mods_.size();
}
return s;
}
const ResidueModification* ModificationsDB::searchModificationsFast(const String& mod_name_,
bool& multiple_matches,
const String& residue,
ResidueModification::TermSpecificity term_spec
) const
{
const ResidueModification* mod(nullptr);
String mod_name = mod_name_;
multiple_matches = false;
char res = '?'; // empty
if (!residue.empty()) res = residue[0];
#pragma omp critical(OpenMS_ModificationsDB)
{
bool found = true;
auto modifications = modification_names_.find(mod_name);
if (modifications == modification_names_.end())
{
// Try to fix things, Skyline for example uses unimod:10 and not UniMod:10 syntax
if (mod_name.size() > 6 && mod_name.prefix(6).toLower() == "unimod")
{
mod_name = "UniMod" + mod_name.substr(6, mod_name.size() - 6);
}
modifications = modification_names_.find(mod_name);
if (modifications == modification_names_.end())
{
OPENMS_LOG_WARN << OPENMS_PRETTY_FUNCTION << "Modification not found: " << mod_name << endl;
found = false;
}
}
int nr_mods = 0;
if (found)
{
for (const auto& it : modifications->second)
{
if ( residuesMatch_(res, it) &&
(term_spec == ResidueModification::NUMBER_OF_TERM_SPECIFICITY ||
(term_spec == it->getTermSpecificity())))
{
mod = it;
nr_mods++;
}
}
}
if (nr_mods > 1) multiple_matches = true;
}
return mod;
}
const ResidueModification* ModificationsDB::searchModification(const ResidueModification& mod_in) const
{
const ResidueModification* mod(nullptr);
const String& mod_name = mod_in.getFullId();
#pragma omp critical(OpenMS_ModificationsDB)
{
bool found = true;
auto modifications = modification_names_.find(mod_name);
if (modifications == modification_names_.end())
{
OPENMS_LOG_WARN << OPENMS_PRETTY_FUNCTION << "Modification not found: " << mod_name << endl;
found = false;
}
if (found)
{
for (const auto& mod_indb : modifications->second)
{
if (mod_in == *mod_indb)
{
mod = mod_indb;
break;
}
}
}
}
return mod;
}
const ResidueModification* ModificationsDB::getModification(Size index) const
{
OPENMS_PRECONDITION(index < mods_.size(), "Index out of bounds in ModificationsDB::getModification(Size index)." );
return mods_[index];
}
void ModificationsDB::searchModifications(set<const ResidueModification*>& mods,
const String& mod_name_,
const String& residue,
ResidueModification::TermSpecificity term_spec) const
{
mods.clear();
String mod_name = mod_name_;
char res = '?'; // empty
if (!residue.empty()) res = residue[0];
#pragma omp critical(OpenMS_ModificationsDB)
{
bool found = true;
auto modifications = modification_names_.find(mod_name);
if (modifications == modification_names_.end())
{
// Try to fix things, Skyline for example uses unimod:10 and not UniMod:10 syntax
if (mod_name.size() > 6 && mod_name.prefix(6).toLower() == "unimod")
{
mod_name = "UniMod" + mod_name.substr(6, mod_name.size() - 6);
}
modifications = modification_names_.find(mod_name);
if (modifications == modification_names_.end())
{
OPENMS_LOG_WARN << OPENMS_PRETTY_FUNCTION << "Modification not found: " << mod_name << endl;
found = false;
}
}
if (found)
{
for (const auto& it : modifications->second)
{
if ( residuesMatch_(res, it) &&
(term_spec == ResidueModification::NUMBER_OF_TERM_SPECIFICITY ||
(term_spec == it->getTermSpecificity())))
{
mods.insert(it);
}
}
}
}
}
const ResidueModification* ModificationsDB::getModification(const String& mod_name, const String& residue, ResidueModification::TermSpecificity term_spec) const
{
const ResidueModification* mod(nullptr);
// if residue is specified, try residue-specific search first to avoid
// ambiguities (e.g. "Carbamidomethyl (N-term)"/"Carbamidomethyl (C)"):
bool multiple_matches = false;
if (!residue.empty() &&
(term_spec == ResidueModification::NUMBER_OF_TERM_SPECIFICITY))
{
mod = searchModificationsFast(mod_name, multiple_matches, residue,
ResidueModification::ANYWHERE);
}
if (mod == nullptr) mod = searchModificationsFast(mod_name, multiple_matches, residue, term_spec);
if (mod == nullptr)
{
String message = String("Retrieving the modification failed. It is not available for the residue '") + residue
+ "' and term specificity '" + ResidueModification().getTermSpecificityName(term_spec) + "'. ";
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, message, mod_name);
}
if (multiple_matches)
{
OPENMS_LOG_WARN << "Warning (ModificationsDB::getModification): more than one modification with name '" + mod_name + "', residue '" + residue + "', specificity '" + String(Int(term_spec)) << "' found, picking the first one only.";
// for (auto it = mods.begin(); it != mods.end(); ++it)
// {
// OPENMS_LOG_WARN << " " << (*it)->getFullId();
// }
OPENMS_LOG_WARN << "\n";
}
return mod;
}
bool ModificationsDB::has(const String & modification) const
{
bool has_mod;
#pragma omp critical(OpenMS_ModificationsDB)
{
has_mod = (modification_names_.find(modification) != modification_names_.end());
}
return has_mod;
}
Size ModificationsDB::findModificationIndex(const String & mod_name) const
{
if (!has(mod_name))
{
throw Exception::ElementNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Modification not found: " + mod_name);
}
bool one_mod(true);
#pragma omp critical(OpenMS_ModificationsDB)
{
if (modification_names_.find(mod_name)->second.size() > 1)
{
one_mod = false;
}
}
if (!one_mod)
{
throw Exception::ElementNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "More than one modification with name: " + mod_name);
}
Size index(numeric_limits<Size>::max());
#pragma omp critical(OpenMS_ModificationsDB)
{
const ResidueModification* mod = *(modification_names_.find(mod_name)->second.begin());
for (Size i = 0; i != mods_.size(); ++i)
{
if (mods_[i] == mod)
{
index = i;
break;
}
}
}
if (index == numeric_limits<Size>::max())
{
throw Exception::ElementNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Modification name found but modification not found: " + mod_name);
}
return index;
}
void ModificationsDB::searchModificationsByDiffMonoMass(vector<String>& mods, double mass, double max_error, const String& residue, ResidueModification::TermSpecificity term_spec)
{
mods.clear();
char res = '?'; // empty
if (!residue.empty()) res = residue[0];
#pragma omp critical(OpenMS_ModificationsDB)
{
for (auto const & m : mods_)
{
if ((fabs(m->getDiffMonoMass() - mass) <= max_error) &&
residuesMatch_(res, m) &&
((term_spec == ResidueModification::NUMBER_OF_TERM_SPECIFICITY) ||
(term_spec == m->getTermSpecificity())))
{
mods.push_back(m->getFullId());
}
}
}
}
void ModificationsDB::searchModificationsByDiffMonoMass(vector<const ResidueModification*>& mods, double mass, double max_error, const String& residue, ResidueModification::TermSpecificity term_spec)
{
mods.clear();
char res = '?'; // empty
if (!residue.empty()) res = residue[0];
#pragma omp critical(OpenMS_ModificationsDB)
{
for (auto const & m : mods_)
{
if ((fabs(m->getDiffMonoMass() - mass) <= max_error) &&
residuesMatch_(res, m) &&
((term_spec == ResidueModification::NUMBER_OF_TERM_SPECIFICITY) ||
(term_spec == m->getTermSpecificity())))
{
mods.push_back(m);
}
}
}
}
void ModificationsDB::searchModificationsByDiffMonoMassSorted(vector<String>& mods, double mass, double max_error, const String& residue, ResidueModification::TermSpecificity term_spec)
{
mods.clear();
std::map<std::pair<double,Size>, const String&> diff_idx2mods;
char res = '?'; // empty
if (!residue.empty()) res = residue[0];
double diff = 0;
Size cnt = 0;
#pragma omp critical(OpenMS_ModificationsDB)
{
for (auto const & m : mods_)
{
diff = fabs(m->getDiffMonoMass() - mass);
if ((diff <= max_error) &&
residuesMatch_(res, m) &&
((term_spec == ResidueModification::NUMBER_OF_TERM_SPECIFICITY) ||
(term_spec == m->getTermSpecificity())))
{
diff_idx2mods.emplace(make_pair(diff, cnt++), m->getFullId());
}
}
}
for (const auto& foo_mod : diff_idx2mods)
{
mods.push_back(foo_mod.second);
}
}
void ModificationsDB::searchModificationsByDiffMonoMassSorted(vector<const ResidueModification*>& mods, double mass, double max_error, const String& residue, ResidueModification::TermSpecificity term_spec)
{
mods.clear();
std::map<std::pair<double,Size>, const ResidueModification*> diff_idx2mods;
char res = '?'; // empty
if (!residue.empty()) res = residue[0];
double diff = 0;
Size cnt = 0;
#pragma omp critical(OpenMS_ModificationsDB)
{
for (auto const & m : mods_)
{
diff = fabs(m->getDiffMonoMass() - mass);
if ((diff <= max_error) &&
residuesMatch_(res, m) &&
((term_spec == ResidueModification::NUMBER_OF_TERM_SPECIFICITY) ||
(term_spec == m->getTermSpecificity())))
{
diff_idx2mods.emplace(make_pair(diff, cnt++), m);
}
}
}
for (const auto& foo_mod : diff_idx2mods)
{
mods.push_back(foo_mod.second);
}
}
const ResidueModification* ModificationsDB::getBestModificationByDiffMonoMass(double mass, double max_error, const String& residue, ResidueModification::TermSpecificity term_spec)
{
double min_error = max_error;
const ResidueModification* mod = nullptr;
char res = '?'; // empty
if (!residue.empty())
{
res = residue[0];
}
#pragma omp critical(OpenMS_ModificationsDB)
{
for (auto const & m : mods_)
{
// using less instead of less-or-equal will pick the first matching
// modification of equally heavy modifications (in our case this is the
// first matching UniMod entry)
double mass_error = fabs(m->getDiffMonoMass() - mass);
if ((mass_error < min_error) &&
residuesMatch_(res, m) &&
((term_spec == ResidueModification::NUMBER_OF_TERM_SPECIFICITY) ||
(term_spec == m->getTermSpecificity())))
{
min_error = mass_error;
mod = m;
}
}
}
return mod;
}
void ModificationsDB::readFromUnimodXMLFile(const String& filename)
{
vector<ResidueModification*> new_mods;
UnimodXMLFile().load(filename, new_mods);
for (auto & m : new_mods)
{
// create full ID based on other information:
m->setFullId();
#pragma omp critical(OpenMS_ModificationsDB)
{
// e.g. Oxidation (M)
modification_names_[m->getFullId()].insert(m);
// e.g. Oxidation
modification_names_[m->getId()].insert(m);
// e.g. Oxidized
modification_names_[m->getFullName()].insert(m);
// e.g. UniMod:312
modification_names_[m->getUniModAccession()].insert(m);
mods_.push_back(m);
}
}
}
const ResidueModification* ModificationsDB::addModification(std::unique_ptr<ResidueModification> new_mod)
{
const ResidueModification* ret;
#pragma omp critical(OpenMS_ModificationsDB)
{
auto it = modification_names_.find(new_mod->getFullId());
if (it != modification_names_.end())
{
OPENMS_LOG_WARN << "Modification already exists in ModificationsDB. Skipping." << new_mod->getFullId() << endl;
ret = *(it->second.begin()); // returning from omp critical is not allowed
}
else
{
modification_names_[new_mod->getFullId()].insert(new_mod.get());
modification_names_[new_mod->getId()].insert(new_mod.get());
modification_names_[new_mod->getFullName()].insert(new_mod.get());
modification_names_[new_mod->getUniModAccession()].insert(new_mod.get());
mods_.push_back(new_mod.get());
new_mod.release(); // do not delete the object;
ret = mods_.back();
}
}
return ret;
}
const ResidueModification* ModificationsDB::addModification(const ResidueModification& new_mod)
{
const ResidueModification* ret = new ResidueModification(new_mod);
#pragma omp critical(OpenMS_ModificationsDB)
{
auto it = modification_names_.find(new_mod.getFullId());
if (it != modification_names_.end())
{
OPENMS_LOG_WARN << "Modification already exists in ModificationsDB. Skipping." << new_mod.getFullId() << endl;
ret = *(it->second.begin()); // returning from omp critical is not allowed
}
else
{
modification_names_[ret->getFullId()].insert(ret);
modification_names_[ret->getId()].insert(ret);
modification_names_[ret->getFullName()].insert(ret);
modification_names_[ret->getUniModAccession()].insert(ret);
mods_.push_back(const_cast<ResidueModification*>(ret));
ret = mods_.back();
}
}
return ret;
}
const ResidueModification* ModificationsDB::addNewModification_(const ResidueModification& new_mod)
{
const ResidueModification* ret = new ResidueModification(new_mod);
#pragma omp critical(OpenMS_ModificationsDB)
{
modification_names_[ret->getFullId()].insert(ret);
modification_names_[ret->getId()].insert(ret);
modification_names_[ret->getFullName()].insert(ret);
modification_names_[ret->getUniModAccession()].insert(ret);
mods_.push_back(const_cast<ResidueModification*>(ret));
ret = mods_.back();
}
return ret;
}
void ModificationsDB::readFromOBOFile(const String& filename)
{
ResidueModification mod;
// add multiple mods for multiple specificities
//Map<String, ResidueModification> all_mods;
multimap<String, ResidueModification> all_mods;
ifstream is(File::find(filename).c_str());
String line, line_wo_spaces, id;
String origin = "";
bool reading_cross_link = false;
//parse file
while (getline(is, line, '\n'))
{
line.trim();
line_wo_spaces = line;
line_wo_spaces.removeWhitespaces();
if (line.empty() || line[0] == '!') //skip empty lines and comments
{
continue;
}
if (line_wo_spaces == "[Term]") //new term
{
// if the last [Term] was a moon-link, then it does not belong in CrossLinksDB
if (!id.empty() && !reading_cross_link) //store last term
{
// split into single residues and make unique (for XL-MS, where equal specificities for both sides are possible)
vector<String> origins;
origin.split(",", origins);
std::sort(origins.begin(), origins.end());
vector<String>::iterator unique_end = unique(origins.begin(), origins.end());
origins.resize(distance(origins.begin(), unique_end));
for (vector<String>::iterator orig_it = origins.begin(); orig_it != origins.end(); ++orig_it)
{
// we don't allow modifications with ambiguity codes as origin (except "X"):
if ((orig_it->size() == 1) && (*orig_it != "B") && (*orig_it != "J") && (*orig_it != "Z"))
{
mod.setOrigin((*orig_it)[0]);
all_mods.insert(make_pair(id, mod));
}
}
// for mono-links from XLMOD.obo:
if (origin.hasSubstring("ProteinN-term"))
{
mod.setTermSpecificity(ResidueModification::PROTEIN_N_TERM);
mod.setOrigin('X');
all_mods.insert(make_pair(id, mod));
}
if (origin.hasSubstring("ProteinC-term"))
{
mod.setTermSpecificity(ResidueModification::PROTEIN_C_TERM);
mod.setOrigin('X');
all_mods.insert(make_pair(id, mod));
}
id = "";
origin = "";
mod = ResidueModification();
}
else if (reading_cross_link) // re-initialize before reading next [Term]
{
id = "";
origin = "";
mod = ResidueModification();
reading_cross_link = false;
}
}
//new id line
else if (line_wo_spaces.hasPrefix("id:"))
{
id = line.substr(line.find(':') + 1).trim();
mod.setId(id);
mod.setPSIMODAccession(id);
}
else if (line_wo_spaces.hasPrefix("name:"))
{
String name = line.substr(line.find(':') + 1).trim();
mod.setFullName(name);
if (mod.getId().hasSubstring("XLMOD"))
{
mod.setName(name);
mod.setId(name);
mod.setFullName(name);
}
}
else if (line_wo_spaces.hasPrefix("is_a:"))
{
// TODO
}
else if (line_wo_spaces.hasPrefix("def:"))
{
line.remove('[');
line.remove(']');
line.remove(',');
vector<String> split;
line.split(' ', split);
for (Size i = 0; i != split.size(); ++i)
{
if (split[i].hasPrefix("UniMod:"))
{
// Parse UniMod identifier to int
String identifier = split[i].substr(7, split[i].size());
mod.setUniModRecordId(identifier.toInt());
}
}
}
else if (line_wo_spaces.hasPrefix("comment:"))
{
// TODO
}
else if (line_wo_spaces.hasPrefix("synonym:"))
{
vector<String> val_split;
line.split('"', val_split);
if (val_split.size() < 3)
{
throw Exception::ParseError(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, line, "missing \" characters to enclose argument!");
}
mod.addSynonym(val_split[1]);
if (line_wo_spaces.hasSubstring("PSI-MOD-label"))
{
mod.setName(val_split[1]);
}
}
else if (line_wo_spaces.hasPrefix("property_value:"))
{
String val = line_wo_spaces.substr(15, line_wo_spaces.size() - 15);
val.trim();
if (val.hasSubstring("\"none\""))
{
continue;
}
vector<String> val_split;
val.split('"', val_split);
if (val_split.size() != 3)
{
throw Exception::ParseError(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, line, "missing \" characters to enclose argument!");
}
if (val.hasPrefix("DiffAvg:"))
{
mod.setDiffAverageMass(val_split[1].toDouble());
}
else if (val.hasPrefix("DiffFormula:"))
{
vector<String> tmp_split;
line.split('"', tmp_split);
tmp_split[1].removeWhitespaces();
mod.setDiffFormula(EmpiricalFormula(tmp_split[1]));
}
else if (val.hasPrefix("DiffMono:"))
{
mod.setDiffMonoMass(val_split[1].toDouble());
}
else if (val.hasPrefix("Formula:"))
{
mod.setFormula(val_split[1]);
}
else if (val.hasPrefix("MassAvg:"))
{
mod.setAverageMass(val_split[1].toDouble());
}
else if (val.hasPrefix("MassMono:"))
{
mod.setMonoMass(val_split[1].toDouble());
}
else if (val.hasPrefix("Origin:"))
{
//mod.setOrigin(val_split[1]);
origin = val_split[1];
}
else if (val.hasPrefix("Source:"))
{
mod.setSourceClassification(val_split[1]);
}
else if (val.hasPrefix("TermSpec:"))
{
mod.setTermSpecificity(val_split[1]);
}
// XLMOD specific fields
else if (val.hasPrefix("reactionSites:"))
{
if (val_split[1] == "2")
{
reading_cross_link = true;
}
}
else if (val.hasPrefix("monoisotopicMass:"))
{
mod.setDiffMonoMass(val_split[1].toDouble());
}
else if (val.hasPrefix("specificities:"))
{
// TODO cross-linker specificities can be different for both chain sides, right now the union of both sides is used
// Input parameters of the cross-link search tool make sure, that the chemistry is not violated
origin = val_split[1];
// remove brackets
origin.remove('(');
origin.remove(')');
origin.substitute("&", ",");
}
}
}
if (!id.empty()) //store last term
{
// split into single residues and make unique (for XL-MS, where equal specificities for both sides are possible)
vector<String> origins;
origin.split(",", origins);
std::sort(origins.begin(), origins.end());
vector<String>::iterator unique_end = unique(origins.begin(), origins.end());
origins.resize(distance(origins.begin(), unique_end));
for (vector<String>::iterator orig_it = origins.begin(); orig_it != origins.end(); ++orig_it)
{
// we don't allow modifications with ambiguity codes as origin (except "X"):
if ((orig_it->size() == 1) && (*orig_it != "B") && (*orig_it != "J") && (*orig_it != "Z"))
{
mod.setOrigin((*orig_it)[0]);
all_mods.insert(make_pair(id, mod));
}
}
// for mono-links from XLMOD.obo:
if (origin.hasSubstring("ProteinN-term"))
{
mod.setTermSpecificity(ResidueModification::N_TERM);
mod.setOrigin('X');
all_mods.insert(make_pair(id, mod));
}
if (origin.hasSubstring("ProteinC-term"))
{
mod.setTermSpecificity(ResidueModification::C_TERM);
mod.setOrigin('X');
all_mods.insert(make_pair(id, mod));
}
id = "";
origin = "";
mod = ResidueModification();
}
// now use the term and all synonyms to build the database
#pragma omp critical(OpenMS_ModificationsDB)
{
for (multimap<String, ResidueModification>::const_iterator it = all_mods.begin(); it != all_mods.end(); ++it)
{
// check whether a unimod definition already exists, then simply add synonyms to it
if (it->second.getUniModRecordId() > 0)
{
//cerr << "Found UniMod PSI-MOD mapping: " << it->second.getPSIMODAccession() << " " << it->second.getUniModAccession() << endl;
set<const ResidueModification*> mods = modification_names_[it->second.getUniModAccession()];
for (set<const ResidueModification*>::const_iterator mit = mods.begin(); mit != mods.end(); ++mit)
{
//cerr << "Adding PSIMOD accession: " << it->second.getPSIMODAccession() << " " << it->second.getUniModAccession() << endl;
modification_names_[it->second.getPSIMODAccession()].insert(*mit);
}
}
else
{
// the mod has so far not been mapped to a unimod mod
// first check whether the mod is specific
if ((it->second.getOrigin() != 'X') ||
((it->second.getTermSpecificity() != ResidueModification::ANYWHERE) &&
(it->second.getDiffMonoMass() != 0)))
{
mods_.push_back(new ResidueModification(it->second));
set<String> synonyms = it->second.getSynonyms();
synonyms.insert(it->first);
synonyms.insert(it->second.getFullName());
//synonyms.insert(it->second.getUniModAccession());
synonyms.insert(it->second.getPSIMODAccession());
// full ID is auto-generated based on (short) ID, but we want the name instead:
mods_.back()->setId(it->second.getFullName());
mods_.back()->setFullId();
mods_.back()->setId(it->second.getId());
synonyms.insert(mods_.back()->getFullId());
// now check each of the names and link it to the residue modification
for (set<String>::const_iterator nit = synonyms.begin(); nit != synonyms.end(); ++nit)
{
modification_names_[*nit].insert(mods_.back());
}
}
}
}
}
}
void ModificationsDB::getAllSearchModifications(vector<String>& modifications) const
{
modifications.clear();
#pragma omp critical(OpenMS_ModificationsDB)
{
for (auto const & m : mods_)
{
if (m->getUniModRecordId() > 0)
{
modifications.push_back(m->getFullId());
}
}
}
// sort by name (case INsensitive)
sort(modifications.begin(), modifications.end(), [&](const String& a, const String& b) {
size_t i(0);
while (i < a.size() && i < b.size())
{
if (tolower(a[i]) == tolower(b[i]))
{
++i;
}
else
{
return tolower(a[i]) < tolower(b[i]);
}
}
return a.size() < b.size();
});
}
void ModificationsDB::writeTSV(String const& filename)
{
std::ofstream ofs(filename, std::ofstream::out);
ofs << "FullId\tFullName\tUnimodAccession\tOrigin/AA\tTerminusSpecificity\tDiffMonoMass\n";
ResidueModification tmp;
for (const auto& mod : mods_)
{
ofs << mod->getFullId() << "\t" << mod->getFullName() << "\t" << mod->getUniModAccession() << "\t" << mod->getOrigin() << "\t"
<< tmp.getTermSpecificityName(mod->getTermSpecificity()) << "\t"
<< mod->getDiffMonoMass() << "\n";
}
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/DigestionEnzymeRNA.cpp | .cpp | 1,876 | 86 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Hendrik Weisser $
// --------------------------------------------------------------------------
//
#include <OpenMS/CHEMISTRY/DigestionEnzymeRNA.h>
#include <iostream>
using namespace std;
namespace OpenMS
{
bool DigestionEnzymeRNA::setValueFromFile(const String& key, const String& value)
{
if (DigestionEnzyme::setValueFromFile(key, value))
{
return true;
}
if (key.hasSuffix(":CutsAfter"))
{
setCutsAfterRegEx(value);
return true;
}
if (key.hasSuffix(":CutsBefore"))
{
setCutsBeforeRegEx(value);
return true;
}
if (key.hasSuffix(":ThreePrimeGain"))
{
setThreePrimeGain(value);
return true;
}
if (key.hasSuffix(":FivePrimeGain"))
{
setFivePrimeGain(value);
return true;
}
return false;
}
void DigestionEnzymeRNA::setCutsAfterRegEx(const String& value)
{
cuts_after_regex_ = value;
}
String DigestionEnzymeRNA::getCutsAfterRegEx() const
{
return cuts_after_regex_;
}
void DigestionEnzymeRNA::setCutsBeforeRegEx(const String& value)
{
cuts_before_regex_ = value;
}
String DigestionEnzymeRNA::getCutsBeforeRegEx() const
{
return cuts_before_regex_;
}
void DigestionEnzymeRNA::setThreePrimeGain(const String& value)
{
three_prime_gain_ = value;
}
String DigestionEnzymeRNA::getThreePrimeGain() const
{
return three_prime_gain_;
}
void DigestionEnzymeRNA::setFivePrimeGain(const String& value)
{
five_prime_gain_ = value;
}
String DigestionEnzymeRNA::getFivePrimeGain() const
{
return five_prime_gain_;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/SimpleTSGXLMS.cpp | .cpp | 27,829 | 734 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Eugen Netz $
// $Authors: Eugen Netz $
// --------------------------------------------------------------------------
#include <OpenMS/CHEMISTRY/SimpleTSGXLMS.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/CHEMISTRY/AASequence.h>
#include <OpenMS/KERNEL/MSSpectrum.h>
#if OPENMS_BOOST_VERSION_MINOR >= 67 && OPENMS_BOOST_VERSION_MAJOR == 1
#define OPENMS_USE_PDQSORT
#include <boost/sort/pdqsort/pdqsort.hpp>
#endif
using namespace std;
namespace OpenMS
{
SimpleTSGXLMS::SimpleTSGXLMS() :
DefaultParamHandler("SimpleTSGXLMS")
{
// TODO only partly functional (second isotopic peak if max_isotope = 2)
defaults_.setValue("add_isotopes", "false", "If set to 1 isotope peaks of the product ion peaks are added");
defaults_.setValidStrings("add_isotopes", {"true","false"});
defaults_.setValue("max_isotope", 2, "Defines the maximal isotopic peak which is added, add_isotopes must be set to 1");
defaults_.setValue("add_losses", "false", "Adds common losses to those ion expect to have them, only water and ammonia loss is considered");
defaults_.setValidStrings("add_losses", {"true","false"});
defaults_.setValue("add_precursor_peaks", "true", "Adds peaks of the precursor to the spectrum, which happen to occur sometimes");
defaults_.setValidStrings("add_precursor_peaks", {"true","false"});
// TODO not functional yet
defaults_.setValue("add_abundant_immonium_ions", "false", "Add most abundant immonium ions");
defaults_.setValidStrings("add_abundant_immonium_ions", {"true","false"});
defaults_.setValue("add_k_linked_ions", "true", "Add RES-Linked ions, which are specific to XLMS");
defaults_.setValidStrings("add_k_linked_ions", {"true","false"});
// TODO not functional yet
defaults_.setValue("add_first_prefix_ion", "true", "If set to true e.g. b1 ions are added");
defaults_.setValidStrings("add_first_prefix_ion", {"true","false"});
defaults_.setValue("add_y_ions", "true", "Add peaks of y-ions to the spectrum");
defaults_.setValidStrings("add_y_ions", {"true","false"});
defaults_.setValue("add_b_ions", "true", "Add peaks of b-ions to the spectrum");
defaults_.setValidStrings("add_b_ions", {"true","false"});
defaults_.setValue("add_a_ions", "true", "Add peaks of a-ions to the spectrum");
defaults_.setValidStrings("add_a_ions", {"true","false"});
defaults_.setValue("add_c_ions", "false", "Add peaks of c-ions to the spectrum");
defaults_.setValidStrings("add_c_ions", {"true","false"});
defaults_.setValue("add_x_ions", "false", "Add peaks of x-ions to the spectrum");
defaults_.setValidStrings("add_x_ions", {"true","false"});
defaults_.setValue("add_z_ions", "false", "Add peaks of z-ions to the spectrum");
defaults_.setValidStrings("add_z_ions", {"true","false"});
defaultsToParam_();
// preprocess loss_db_, a database of H2O and NH3 losses for all residues
AASequence residues = AASequence::fromString("RHKDESTNQCUGPAVILMFYW");
for (Size i = 0; i < residues.size(); ++i)
{
LossIndex residue_losses;
loss_db_.insert(std::make_pair(residues[i].getOneLetterCode(), residue_losses));
if (residues[i].hasNeutralLoss())
{
vector<EmpiricalFormula> loss_formulas = residues[i].getLossFormulas();
for (Size k = 0; k != loss_formulas.size(); ++k)
{
String loss_name = loss_formulas[k].toString();
if (loss_name == "H2O1") // for now only these most common losses are considered
{
if (loss_H2O_ < 1)
{
loss_H2O_ = loss_formulas[k].getMonoWeight();
}
loss_db_[residues[i].getOneLetterCode()].has_H2O_loss = true;
}
if (loss_name == "H3N1")
{
if (loss_NH3_ < 1)
{
loss_NH3_ = loss_formulas[k].getMonoWeight();
}
loss_db_[residues[i].getOneLetterCode()].has_NH3_loss = true;
}
}
}
}
}
SimpleTSGXLMS::SimpleTSGXLMS(const SimpleTSGXLMS & rhs) :
DefaultParamHandler(rhs)
{
}
SimpleTSGXLMS & SimpleTSGXLMS::operator=(const SimpleTSGXLMS & rhs)
{
if (this != &rhs)
{
DefaultParamHandler::operator=(rhs);
}
return *this;
}
SimpleTSGXLMS::~SimpleTSGXLMS() = default;
void SimpleTSGXLMS::getLinearIonSpectrum(std::vector< SimplePeak >& spectrum, AASequence& peptide, Size link_pos, int charge, Size link_pos_2) const
{
std::vector< LossIndex > forward_losses;
std::vector< LossIndex > backward_losses;
if (add_losses_)
{
forward_losses = getForwardLosses_(peptide);
backward_losses = getBackwardLosses_(peptide);
}
for (Int z = charge; z >= 1; --z)
{
if (add_b_ions_)
{
addLinearPeaks_(spectrum, peptide, link_pos, Residue::BIon, forward_losses, backward_losses, z, link_pos_2);
}
if (add_y_ions_)
{
addLinearPeaks_(spectrum, peptide, link_pos, Residue::YIon, forward_losses, backward_losses, z, link_pos_2);
}
if (add_a_ions_)
{
addLinearPeaks_(spectrum, peptide, link_pos, Residue::AIon, forward_losses, backward_losses, z, link_pos_2);
}
if (add_x_ions_)
{
addLinearPeaks_(spectrum, peptide, link_pos, Residue::XIon, forward_losses, backward_losses, z, link_pos_2);
}
if (add_c_ions_)
{
addLinearPeaks_(spectrum, peptide, link_pos, Residue::CIon, forward_losses, backward_losses, z, link_pos_2);
}
if (add_z_ions_)
{
addLinearPeaks_(spectrum, peptide, link_pos, Residue::ZIon, forward_losses, backward_losses, z, link_pos_2);
}
}
#ifdef OPENMS_USE_PDQSORT
boost::sort::pdqsort_branchless(spectrum.begin(), spectrum.end(), [](const SimplePeak& a, const SimplePeak& b) {return a.mz < b.mz;});
#else
std::stable_sort(spectrum.begin(), spectrum.end(), [](const SimplePeak& a, const SimplePeak& b) {return a.mz < b.mz;});
#endif
return;
}
void SimpleTSGXLMS::addLinearPeaks_(std::vector< SimplePeak >& spectrum, AASequence& peptide, Size link_pos, Residue::ResidueType res_type, std::vector< LossIndex >& forward_losses, std::vector< LossIndex >& backward_losses, int charge, Size link_pos_2) const
{
if (peptide.empty())
{
cout << "Warning: Attempt at creating XLink Ions Spectrum from empty string!" << endl;
return;
}
// second link position, in case of a loop-link
Size link_pos_B = link_pos_2;
if (link_pos_2 == 0)
{
link_pos_B = link_pos;
}
if (res_type == Residue::AIon || res_type == Residue::BIon || res_type == Residue::CIon)
{
double mono_weight(Constants::PROTON_MASS_U * charge);
if (peptide.hasNTerminalModification())
{
mono_weight += peptide.getNTerminalModification()->getDiffMonoMass();
}
switch (res_type)
{
case Residue::AIon: mono_weight += Residue::getInternalToAIon().getMonoWeight(); break;
case Residue::BIon: mono_weight += Residue::getInternalToBIon().getMonoWeight(); break;
case Residue::CIon: mono_weight += Residue::getInternalToCIon().getMonoWeight(); break;
default: break;
}
Size i = 0;
for (; i < link_pos; ++i)
{
mono_weight += peptide[i].getMonoWeight(Residue::Internal);
double pos(mono_weight / charge);
if (add_losses_)
{
addLosses_(spectrum, mono_weight, charge, forward_losses[i]);
}
spectrum.emplace_back(pos, charge);
if (add_isotopes_ && max_isotope_ >= 2) // add second isotopic peak with fast method, if two or more peaks are asked for
{
spectrum.emplace_back(pos+(Constants::C13C12_MASSDIFF_U / charge), charge);
}
}
}
else // if (res_type == Residue::XIon || res_type == Residue::YIon || res_type == Residue::ZIon)
{
double mono_weight(Constants::PROTON_MASS_U * charge);
if (peptide.hasCTerminalModification())
{
mono_weight += peptide.getCTerminalModification()->getDiffMonoMass();
}
switch (res_type)
{
case Residue::XIon: mono_weight += Residue::getInternalToXIon().getMonoWeight(); break;
case Residue::YIon: mono_weight += Residue::getInternalToYIon().getMonoWeight(); break;
case Residue::ZIon: mono_weight += Residue::getInternalToZIon().getMonoWeight(); break;
default: break;
}
for (Size i = peptide.size()-1; i > link_pos_B; --i)
{
mono_weight += peptide[i].getMonoWeight(Residue::Internal);
double pos(mono_weight / charge);
if (add_losses_)
{
addLosses_(spectrum, pos, charge, backward_losses[i]);
}
spectrum.emplace_back(pos, charge);
if (add_isotopes_ && max_isotope_ >= 2) // add second isotopic peak with fast method, if two or more peaks are asked for
{
spectrum.emplace_back(pos+(Constants::C13C12_MASSDIFF_U / charge), charge);
}
}
}
return;
}
void SimpleTSGXLMS::getXLinkIonSpectrum(std::vector< SimplePeak >& spectrum, AASequence& peptide, Size link_pos, double precursor_mass, int mincharge, int maxcharge, Size link_pos_2) const
{
std::vector< LossIndex > forward_losses;
std::vector< LossIndex > backward_losses;
if (add_losses_)
{
forward_losses = getForwardLosses_(peptide);
backward_losses = getBackwardLosses_(peptide);
}
for (Int z = mincharge; z <= maxcharge; ++z)
{
if (add_b_ions_)
{
addXLinkIonPeaks_(spectrum, peptide, link_pos, precursor_mass, Residue::BIon, forward_losses, backward_losses, z, link_pos_2);
}
if (add_y_ions_)
{
addXLinkIonPeaks_(spectrum, peptide, link_pos, precursor_mass, Residue::YIon, forward_losses, backward_losses, z, link_pos_2);
}
if (add_a_ions_)
{
addXLinkIonPeaks_(spectrum, peptide, link_pos, precursor_mass, Residue::AIon, forward_losses, backward_losses, z, link_pos_2);
}
if (add_x_ions_)
{
addXLinkIonPeaks_(spectrum, peptide, link_pos, precursor_mass, Residue::XIon, forward_losses, backward_losses, z, link_pos_2);
}
if (add_c_ions_)
{
addXLinkIonPeaks_(spectrum, peptide, link_pos, precursor_mass, Residue::CIon, forward_losses, backward_losses, z, link_pos_2);
}
if (add_z_ions_)
{
addXLinkIonPeaks_(spectrum, peptide, link_pos, precursor_mass, Residue::ZIon, forward_losses, backward_losses, z, link_pos_2);
}
if (add_k_linked_ions_)
{
addKLinkedIonPeaks_(spectrum, peptide, link_pos, precursor_mass, z);
}
}
if (add_precursor_peaks_)
{
addPrecursorPeaks_(spectrum, precursor_mass, maxcharge);
}
#ifdef OPENMS_USE_PDQSORT
std::reverse(spectrum.begin(), spectrum.end());
boost::sort::pdqsort_branchless(spectrum.begin(), spectrum.end(), [](const SimplePeak& a, const SimplePeak& b) {return a.mz < b.mz;});
#else
std::sort(spectrum.begin(), spectrum.end(), [](const SimplePeak& a, const SimplePeak& b) {return a.mz < b.mz;});
#endif
return;
}
void SimpleTSGXLMS::addXLinkIonPeaks_(std::vector< SimplePeak >& spectrum, AASequence& peptide, Size link_pos, double precursor_mass, Residue::ResidueType res_type, std::vector< LossIndex >& forward_losses, std::vector< LossIndex >& backward_losses, int charge, Size link_pos_2) const
{
if (peptide.empty())
{
cout << "Warning: Attempt at creating XLink Ions Spectrum from empty string!" << endl;
return;
}
// second link position, in case of a loop-link
Size link_pos_B = link_pos_2;
if (link_pos_2 == 0)
{
link_pos_B = link_pos;
}
if (res_type == Residue::AIon || res_type == Residue::BIon || res_type == Residue::CIon)
{
// whole mass of both peptides + cross-link (or peptide + mono-link), converted to an internal ion
double mono_weight((Constants::PROTON_MASS_U * charge) + precursor_mass - Residue::getInternalToFull().getMonoWeight());
if (peptide.hasCTerminalModification())
{
mono_weight -= peptide.getCTerminalModification()->getDiffMonoMass();
}
// adjust mass to given residue type
switch (res_type)
{
case Residue::AIon: mono_weight += Residue::getInternalToAIon().getMonoWeight(); break;
case Residue::BIon: mono_weight += Residue::getInternalToBIon().getMonoWeight(); break;
case Residue::CIon: mono_weight += Residue::getInternalToCIon().getMonoWeight(); break;
default: break;
}
// subtract one residue at a time
for (Size i = peptide.size()-1; i > link_pos_B; --i)
{
mono_weight -= peptide[i].getMonoWeight(Residue::Internal);
double pos(mono_weight / charge);
if (add_isotopes_ && max_isotope_ >= 2) // add second isotopic peak with fast method, if two or more peaks are asked for
{
spectrum.emplace_back(pos+(Constants::C13C12_MASSDIFF_U / charge), charge);
}
spectrum.emplace_back(pos, charge);
if (add_losses_ && forward_losses.size() >= i)
{
addLosses_(spectrum, mono_weight, charge, forward_losses[i-1]);
}
}
}
else // if (res_type == Residue::XIon || res_type == Residue::YIon || res_type == Residue::ZIon)
{
// whole mass of both peptides + cross-link (or peptide + mono-link), converted to an internal ion
double mono_weight((Constants::PROTON_MASS_U * charge) + precursor_mass - Residue::getInternalToFull().getMonoWeight()); // whole mass
if (peptide.hasNTerminalModification())
{
mono_weight -= peptide.getNTerminalModification()->getDiffMonoMass();
}
// adjust mass to given residue type
switch (res_type)
{
case Residue::XIon: mono_weight += Residue::getInternalToXIon().getMonoWeight(); break;
case Residue::YIon: mono_weight += Residue::getInternalToYIon().getMonoWeight(); break;
case Residue::ZIon: mono_weight += Residue::getInternalToZIon().getMonoWeight(); break;
default: break;
}
// subtract one residue at a time
for (Size i = 0; i < link_pos; ++i)
{
mono_weight -= peptide[i].getMonoWeight(Residue::Internal);
double pos(mono_weight / charge);
if (add_isotopes_ && max_isotope_ >= 2) // add second isotopic peak with fast method, if two or more peaks are asked for
{
spectrum.emplace_back(pos+(Constants::C13C12_MASSDIFF_U / charge), charge);
}
spectrum.emplace_back(pos, charge);
if (add_losses_ && backward_losses.size() >= i+2)
{
addLosses_(spectrum, mono_weight, charge, backward_losses[i+1]);
}
}
}
return;
}
void SimpleTSGXLMS::addPrecursorPeaks_(std::vector< SimplePeak >& spectrum, double precursor_mass, int charge) const
{
// precursor peak
double mono_pos = precursor_mass + (Constants::PROTON_MASS_U * charge);
if (add_isotopes_ && max_isotope_ >= 2) // add second isotopic peak with fast method, if two or more peaks are asked for
{
spectrum.emplace_back((mono_pos + Constants::C13C12_MASSDIFF_U) / charge, charge);
}
spectrum.emplace_back(mono_pos / charge, charge);
// loss peaks of the precursor
// loss of water
mono_pos = precursor_mass + (Constants::PROTON_MASS_U * charge) - loss_H2O_;
if (add_isotopes_ && max_isotope_ >= 2) // add second isotopic peak with fast method, if two or more peaks are asked for
{
spectrum.emplace_back((mono_pos + Constants::C13C12_MASSDIFF_U) / charge, charge);
}
spectrum.emplace_back(mono_pos / charge, charge);
//loss of ammonia
mono_pos = precursor_mass + (Constants::PROTON_MASS_U * charge) - loss_NH3_;
if (add_isotopes_ && max_isotope_ >= 2) // add second isotopic peak with fast method, if two or more peaks are asked for
{
spectrum.emplace_back((mono_pos + Constants::C13C12_MASSDIFF_U) / charge, charge);
}
spectrum.emplace_back(mono_pos / charge, charge);
}
void SimpleTSGXLMS::addKLinkedIonPeaks_(std::vector< SimplePeak >& spectrum, AASequence& peptide, Size link_pos, double precursor_mass, int charge) const
{
double mono_weight = precursor_mass;
// link_pos can be zero, if the cross-link is N-terminal
if (link_pos > 0)
{
mono_weight -= peptide.getPrefix(link_pos).getMonoWeight(Residue::BIon);
}
else
{
return; // this fragment type is not necessary for links on peptide terminal residues
}
// same here for C-terminal links
if (link_pos < peptide.size())
{
mono_weight -= peptide.getSuffix(peptide.size() - link_pos - 1).getMonoWeight(Residue::XIon);
}
else
{
return;
}
mono_weight += Constants::PROTON_MASS_U * charge;
if (mono_weight < 0)
{
return;
}
if (add_isotopes_ && max_isotope_ >= 2) // add second isotopic peak with fast method, if two or more peaks are asked for
{
spectrum.emplace_back((mono_weight + Constants::C13C12_MASSDIFF_U) / charge, charge);
}
spectrum.emplace_back(mono_weight / charge, charge);
}
void SimpleTSGXLMS::addLosses_(std::vector< SimplePeak >& spectrum, double mono_weight, int charge, LossIndex& losses) const
{
if (losses.has_H2O_loss)
{
spectrum.emplace_back((mono_weight - loss_H2O_) / charge, charge);
}
if (losses.has_NH3_loss)
{
spectrum.emplace_back((mono_weight - loss_NH3_) / charge, charge);
}
}
void SimpleTSGXLMS::getXLinkIonSpectrum(std::vector< SimplePeak >& spectrum, OPXLDataStructs::ProteinProteinCrossLink& crosslink, bool frag_alpha, int mincharge, int maxcharge) const
{
std::vector< LossIndex > forward_losses;
std::vector< LossIndex > backward_losses;
LossIndex losses_peptide2;
if (!crosslink.alpha)
{
return;
}
AASequence alpha = *crosslink.alpha;
AASequence beta;
if (crosslink.beta) { beta = *crosslink.beta; }
if (add_losses_)
{
if (frag_alpha)
{
losses_peptide2 = getBackwardLosses_(beta)[0];
forward_losses = getForwardLosses_(alpha);
backward_losses = getBackwardLosses_(alpha);
}
else
{
losses_peptide2 = getBackwardLosses_(alpha)[0];
forward_losses = getForwardLosses_(beta);
backward_losses = getBackwardLosses_(beta);
}
}
for (Int z = mincharge; z <= maxcharge; ++z)
{
if (add_b_ions_)
{
addXLinkIonPeaks_(spectrum, crosslink, frag_alpha, Residue::BIon, forward_losses, backward_losses, losses_peptide2, z);
}
if (add_y_ions_)
{
addXLinkIonPeaks_(spectrum, crosslink, frag_alpha, Residue::YIon, forward_losses, backward_losses, losses_peptide2, z);
}
if (add_a_ions_)
{
addXLinkIonPeaks_(spectrum, crosslink, frag_alpha, Residue::AIon, forward_losses, backward_losses, losses_peptide2, z);
}
if (add_x_ions_)
{
addXLinkIonPeaks_(spectrum, crosslink, frag_alpha, Residue::XIon, forward_losses, backward_losses, losses_peptide2, z);
}
if (add_c_ions_)
{
addXLinkIonPeaks_(spectrum, crosslink, frag_alpha, Residue::CIon, forward_losses, backward_losses, losses_peptide2, z);
}
if (add_z_ions_)
{
addXLinkIonPeaks_(spectrum, crosslink, frag_alpha, Residue::ZIon, forward_losses, backward_losses, losses_peptide2, z);
}
if (add_k_linked_ions_ && !beta.empty())
{
double precursor_mass = alpha.getMonoWeight() + beta.getMonoWeight() + crosslink.cross_linker_mass;
AASequence peptide;
Size link_pos;
if (frag_alpha)
{
peptide = alpha;
link_pos = crosslink.cross_link_position.first;
}
else
{
peptide = beta;
link_pos = crosslink.cross_link_position.second;
}
addKLinkedIonPeaks_(spectrum, peptide, link_pos, precursor_mass, z);
}
}
if (add_precursor_peaks_)
{
double precursor_mass = alpha.getMonoWeight() + crosslink.cross_linker_mass;
if (!beta.empty())
{
precursor_mass += beta.getMonoWeight();
}
addPrecursorPeaks_(spectrum, precursor_mass, maxcharge);
}
#ifdef OPENMS_USE_PDQSORT
std::reverse(spectrum.begin(), spectrum.end());
boost::sort::pdqsort_branchless(spectrum.begin(), spectrum.end(), [](const SimplePeak& a, const SimplePeak& b) {return a.mz < b.mz;});
#else
std::sort(spectrum.begin(), spectrum.end(), [](const SimplePeak& a, const SimplePeak& b) {return a.mz < b.mz;});
#endif
return;
}
void SimpleTSGXLMS::addXLinkIonPeaks_(std::vector< SimplePeak >& spectrum, OPXLDataStructs::ProteinProteinCrossLink& crosslink, bool frag_alpha, Residue::ResidueType res_type, std::vector< LossIndex >& forward_losses, std::vector< LossIndex >& backward_losses, LossIndex& losses_peptide2, int charge) const
{
if (!crosslink.alpha || crosslink.alpha->empty())
{
cout << "Warning: Attempt at creating XLink Ions Spectrum from empty string!" << endl;
return;
}
AASequence alpha = *crosslink.alpha;
AASequence beta;
if (crosslink.beta) { beta = *crosslink.beta; }
double precursor_mass = alpha.getMonoWeight() + crosslink.cross_linker_mass;
if (!beta.empty())
{
precursor_mass += beta.getMonoWeight();
}
AASequence peptide;
AASequence peptide2;
Size link_pos;
if (frag_alpha)
{
peptide = alpha;
peptide2 = beta;
link_pos = crosslink.cross_link_position.first;
}
else
{
peptide = beta;
peptide2 = alpha;
link_pos = crosslink.cross_link_position.second;
}
if (res_type == Residue::AIon || res_type == Residue::BIon || res_type == Residue::CIon)
{
double mono_weight((Constants::PROTON_MASS_U * charge) + precursor_mass - Residue::getInternalToFull().getMonoWeight());
if (peptide.hasCTerminalModification())
{
mono_weight -= peptide.getCTerminalModification()->getDiffMonoMass();
}
// adjust mass to given residue type
switch (res_type)
{
case Residue::AIon: mono_weight += Residue::getInternalToAIon().getMonoWeight(); break;
case Residue::BIon: mono_weight += Residue::getInternalToBIon().getMonoWeight(); break;
case Residue::CIon: mono_weight += Residue::getInternalToCIon().getMonoWeight(); break;
default: break;
}
// subtract one residue at a time
for (Size i = peptide.size()-1; i > link_pos; --i)
{
mono_weight -= peptide[i].getMonoWeight(Residue::Internal);
double pos(mono_weight / charge);
if (add_isotopes_ && max_isotope_ >= 2) // add second isotopic peak with fast method, if two or more peaks are asked for
{
spectrum.emplace_back(pos+(Constants::C13C12_MASSDIFF_U / charge), charge);
}
spectrum.emplace_back(pos, charge);
if (add_losses_ && forward_losses.size() >= i)
{
SimpleTSGXLMS::LossIndex losses;
losses.has_H2O_loss = losses_peptide2.has_H2O_loss || forward_losses[i-1].has_H2O_loss;
losses.has_NH3_loss = losses_peptide2.has_NH3_loss || forward_losses[i-1].has_NH3_loss;
addLosses_(spectrum, mono_weight, charge, losses);
}
}
}
else // if (res_type == Residue::XIon || res_type == Residue::YIon || res_type == Residue::ZIon)
{
// whole mass of both peptides + cross-link (or peptide + mono-link), converted to an internal ion
double mono_weight((Constants::PROTON_MASS_U * charge) + precursor_mass - Residue::getInternalToFull().getMonoWeight()); // whole mass
if (peptide.hasNTerminalModification())
{
mono_weight -= peptide.getNTerminalModification()->getDiffMonoMass();
}
// adjust mass to given residue type
switch (res_type)
{
case Residue::XIon: mono_weight += Residue::getInternalToXIon().getMonoWeight(); break;
case Residue::YIon: mono_weight += Residue::getInternalToYIon().getMonoWeight(); break;
case Residue::ZIon: mono_weight += Residue::getInternalToZIon().getMonoWeight(); break;
default: break;
}
// subtract one residue at a time
for (Size i = 0; i < link_pos; ++i)
{
mono_weight -= peptide[i].getMonoWeight(Residue::Internal);
double pos(mono_weight / charge);
if (add_isotopes_ && max_isotope_ >= 2) // add second isotopic peak with fast method, if two or more peaks are asked for
{
spectrum.emplace_back(pos+(Constants::C13C12_MASSDIFF_U / charge), charge);
}
spectrum.emplace_back(pos, charge);
if (add_losses_ && backward_losses.size() >= i+2)
{
SimpleTSGXLMS::LossIndex losses;
losses.has_H2O_loss = losses_peptide2.has_H2O_loss || backward_losses[i+1].has_H2O_loss;
losses.has_NH3_loss = losses_peptide2.has_NH3_loss || backward_losses[i+1].has_NH3_loss;
addLosses_(spectrum, mono_weight, charge, losses);
}
}
}
return;
}
std::vector< SimpleTSGXLMS::LossIndex > SimpleTSGXLMS::getForwardLosses_(AASequence& peptide) const
{
// this gives us a "forward set" with incremental losses from the first to the last residue
std::vector< LossIndex > ion_losses(peptide.size());
ion_losses[0] = loss_db_.at(peptide[0].getOneLetterCode());
for (Size i = 1; i < peptide.size(); ++i)
{
ion_losses[i].has_H2O_loss = ion_losses[i-1].has_H2O_loss || loss_db_.at(peptide[i].getOneLetterCode()).has_H2O_loss;
ion_losses[i].has_NH3_loss = ion_losses[i-1].has_NH3_loss || loss_db_.at(peptide[i].getOneLetterCode()).has_NH3_loss;
}
return ion_losses;
}
std::vector< SimpleTSGXLMS::LossIndex > SimpleTSGXLMS::getBackwardLosses_(AASequence& peptide) const
{
// this gives us a "backward set" with incremental losses from the last to the first residue
std::vector< LossIndex > ion_losses(peptide.size());
ion_losses[ion_losses.size()-1] = loss_db_.at(peptide[peptide.size()-1].getOneLetterCode());
for (Size i = ion_losses.size()-1; i > 0; --i)
{
ion_losses[i-1].has_H2O_loss = ion_losses[i].has_H2O_loss || loss_db_.at(peptide[i-1].getOneLetterCode()).has_H2O_loss;
ion_losses[i-1].has_NH3_loss = ion_losses[i].has_NH3_loss || loss_db_.at(peptide[i-1].getOneLetterCode()).has_NH3_loss;
}
return ion_losses;
}
void SimpleTSGXLMS::updateMembers_()
{
add_b_ions_ = param_.getValue("add_b_ions").toBool();
add_y_ions_ = param_.getValue("add_y_ions").toBool();
add_a_ions_ = param_.getValue("add_a_ions").toBool();
add_c_ions_ = param_.getValue("add_c_ions").toBool();
add_x_ions_ = param_.getValue("add_x_ions").toBool();
add_z_ions_ = param_.getValue("add_z_ions").toBool();
add_first_prefix_ion_ = param_.getValue("add_first_prefix_ion").toBool();
add_losses_ = param_.getValue("add_losses").toBool();
add_isotopes_ = param_.getValue("add_isotopes").toBool();
add_precursor_peaks_ = param_.getValue("add_precursor_peaks").toBool();
add_abundant_immonium_ions_ = param_.getValue("add_abundant_immonium_ions").toBool();
max_isotope_ = static_cast<Int>(param_.getValue("max_isotope"));
add_k_linked_ions_ = param_.getValue("add_k_linked_ions").toBool();
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/ElementDB.cpp | .cpp | 37,303 | 705 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Andreas Bertsch, Timo Sachsenberg, Chris Bielow, Jang Jang Jin$
// --------------------------------------------------------------------------
//
#include <OpenMS/CHEMISTRY/ElementDB.h>
#include <OpenMS/DATASTRUCTURES/String.h>
#include <OpenMS/CONCEPT/Exception.h>
#include <OpenMS/CHEMISTRY/Element.h>
#include <iostream>
#include <cmath>
#include <memory>
using namespace std;
namespace OpenMS
{
ElementDB::ElementDB()
{
storeElements_();
}
ElementDB::~ElementDB()
{
clear_();
}
ElementDB* ElementDB::getInstance()
{
static ElementDB* db_ = new ElementDB;
return db_;
}
const unordered_map<string, const Element*>& ElementDB::getNames() const
{
return names_;
}
const unordered_map<string, const Element*>& ElementDB::getSymbols() const
{
return symbols_;
}
const unordered_map<unsigned int, const Element*>& ElementDB::getAtomicNumbers() const
{
return atomic_numbers_;
}
const Element* ElementDB::getElement(const string& name) const
{
if (auto entry = symbols_.find(name); entry != symbols_.end())
{
return entry->second;
}
else
{
if (auto entry = names_.find(name); entry != names_.end())
{
return entry->second;
}
}
return nullptr;
}
const Element* ElementDB::getElement(unsigned int atomic_number) const
{
if (auto entry = atomic_numbers_.find(atomic_number); entry != atomic_numbers_.end())
{
return entry->second;
}
return nullptr;
}
bool ElementDB::hasElement(const string& name) const
{
return (names_.count(name) == 1)|| (symbols_.count(name) == 1);
}
bool ElementDB::hasElement(unsigned int atomic_number) const
{
return atomic_numbers_.find(atomic_number) != atomic_numbers_.end();
}
void ElementDB::addElement(const std::string& name,
const std::string& symbol,
const unsigned int an,
const std::map<unsigned int, double>& abundance,
const std::map<unsigned int, double>& mass,
bool replace_existing)
{
if (hasElement(an) && !replace_existing)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("Element with atomic number ") + an + " already exists");
}
buildElement_(name, symbol, an, abundance, mass);
}
double ElementDB::calculateAvgWeight_(const map<unsigned int, double>& abundance, const map<unsigned int, double>& mass)
{
double avg = 0;
// calculate weighted average
for (const auto& it : abundance)
{
avg += mass.at(it.first) * abundance.at(it.first);
}
return avg;
}
double ElementDB::calculateMonoWeight_(const map<unsigned int, double>& abundance, const map<unsigned int, double>& mass)
{
double highest_abundance = -1.0;
int highest_abundance_isotope = -1;
// the monoisotopic weight is the *most abundant* isotope of an element
for (const auto& it : abundance)
{
if (it.second > highest_abundance)
{
highest_abundance = it.second;
highest_abundance_isotope = it.first;
}
}
if (highest_abundance_isotope != -1) return mass.at(highest_abundance_isotope);
else return 0.0;
}
template<class CONT, class KEY>
void addIfUniqueOrThrow(CONT& container, const KEY& key, unique_ptr<const Element>& replacement)
{
auto elem = container.find(key);
if (elem != container.end())
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String(key), "Already exists!");
}
container[key] = replacement.get();
}
void ElementDB::storeElements_()
{
map<unsigned int, double> hydrogen_abundance = {{1u, 0.999885}, {2u, 0.000115}, {3u, 0.0}};
map<unsigned int, double> hydrogen_mass = {{1u, 1.0078250319}, {2u, 2.01410178}, {3u, 3.01604927}};
buildElement_("Hydrogen", "H", 1u, hydrogen_abundance, hydrogen_mass);
map<unsigned int, double> helium_abundance = {{3u, 1.34e-06}, {4u, 0.9999986599999999}};
map<unsigned int, double> helium_mass = {{3u, 3.0160293191}, {4u, 4.00260325415}};
buildElement_("Helium", "He", 2u, helium_abundance, helium_mass);
map<unsigned int, double> lithium_abundance = {{6u, 0.0759}, {7u, 0.9240999999999999}};
map<unsigned int, double> lithium_mass = {{6u, 6.015122}, {7u, 7.016004}};
buildElement_("Lithium", "Li", 3u, lithium_abundance, lithium_mass);
map<unsigned int, double> beryllium_abundance = {{9u, 1.0}};
map<unsigned int, double> beryllium_mass = {{9u, 9.0121822}};
buildElement_("Beryllium", "Be", 4u, beryllium_abundance, beryllium_mass);
map<unsigned int, double> bor_abundance = {{10u, 0.19899999999999998}, {11u, 0.8009999999999999}};
map<unsigned int, double> bor_mass = {{10u, 10.012937000000001}, {11u, 11.009304999999999}};
buildElement_("Boron", "B", 5u, bor_abundance, bor_mass);
map<unsigned int, double> carbon_abundance = {{12u, 0.9893000000000001}, {13u, 0.010700000000000001}};
map<unsigned int, double> carbon_mass = {{12u, 12.0}, {13u, 13.003355000000001}};
buildElement_("Carbon", "C", 6u, carbon_abundance, carbon_mass);
map<unsigned int, double> nitrogen_abundance = {{14u, 0.9963200000000001}, {15u, 0.00368}};
map<unsigned int, double> nitrogen_mass = {{14u, 14.003074}, {15u, 15.000109}};
buildElement_("Nitrogen", "N", 7u, nitrogen_abundance, nitrogen_mass);
map<unsigned int, double> oxygen_abundance = {{16u, 0.9975700000000001}, {17u, 0.00037999999999999997}, {18u, 0.0020499999999999997}};
map<unsigned int, double> oxygen_mass = {{16u, 15.994915000000001}, {17u, 16.999132}, {18u, 17.999168999999998}};
buildElement_("Oxygen", "O", 8u, oxygen_abundance, oxygen_mass);
map<unsigned int, double> fluorine_abundance = {{19u, 1.0}};
map<unsigned int, double> fluorine_mass = {{19u, 18.99840322}};
buildElement_("Fluorine", "F", 9u, fluorine_abundance, fluorine_mass);
map<unsigned int, double> neon_abundance = {{20u, 0.9048}, {21u, 0.0027}, {22u, 0.0925}};
map<unsigned int, double> neon_mass = {{20u, 19.99244018}, {21u, 20.9938467}, {22u, 21.9913851}};
buildElement_("Neon", "Ne", 10u, neon_abundance, neon_mass);
map<unsigned int, double> sodium_abundance = {{23u, 1.0}};
map<unsigned int, double> sodium_mass = {{23u, 22.989769280899999}};
buildElement_("Sodium", "Na", 11u, sodium_abundance, sodium_mass);
map<unsigned int, double> magnesium_abundance = {{24u, 0.7898999999999999}, {25u, 0.1}, {26u, 0.1101}};
map<unsigned int, double> magnesium_mass = {{24u, 23.985042}, {25u, 24.985837}, {26u, 25.982593000000001}};
buildElement_("Magnesium", "Mg", 12u, magnesium_abundance, magnesium_mass);
map<unsigned int, double> aluminium_abundance = {{27u, 1.0}};
map<unsigned int, double> aluminium_mass = {{27u, 26.981538629999999}};
buildElement_("Aluminium", "Al", 13u, aluminium_abundance, aluminium_mass);
map<unsigned int, double> silicon_abundance = {{28u, 0.9220999999999999}, {29u, 0.0467}, {30u, 0.031}};
map<unsigned int, double> silicon_mass = {{28u, 27.976926532499999}, {29u, 28.9764947}, {30u, 29.973770170000002}};
buildElement_("Silicon", "Si", 14u, silicon_abundance, silicon_mass);
map<unsigned int, double> phosphorus_abundance = {{31u, 1.0}};
map<unsigned int, double> phosphorus_mass = {{31u, 30.973761490000001}};
buildElement_("Phosphorus", "P", 15u, phosphorus_abundance, phosphorus_mass);
map<unsigned int, double> sulfur_abundance = {{32u, 0.9493}, {33u, 0.0076}, {34u, 0.0429}, {36u, 0.0002}};
map<unsigned int, double> sulfur_mass = {{32u, 31.972070729999999}, {33u, 32.971457999999998}, {34u, 33.967866999999998}, {36u, 35.967081}};
buildElement_("Sulfur", "S", 16u, sulfur_abundance, sulfur_mass);
map<unsigned int, double> chlorine_abundance = {{35u, 0.7576}, {37u, 0.24239999999999998}};
map<unsigned int, double> chlorine_mass = {{35u, 34.968852679999998}, {37u, 36.965902589999999}};
buildElement_("Chlorine", "Cl", 17u, chlorine_abundance, chlorine_mass);
map<unsigned int, double> argon_abundance = {{36u, 0.003336}, {38u, 0.000629}, {40u, 0.996035}};
map<unsigned int, double> argon_mass = {{36u, 35.967545106000003}, {38u, 37.9627324}, {40u, 39.9623831225}};
buildElement_("Argon", "Ar", 18u, argon_abundance, argon_mass);
map<unsigned int, double> potassium_abundance = {{39u, 0.932581}, {40u, 0.000117}, {41u, 0.067302}};
map<unsigned int, double> potassium_mass = {{39u, 38.963706680000001}, {40u, 39.963998480000001}, {41u, 40.961825760000004}};
buildElement_("Potassium", "K", 19u, potassium_abundance, potassium_mass);
map<unsigned int, double> calcium_abundance = {{40u, 0.96941}, {42u, 0.00647}, {43u, 0.00135}, {44u, 0.02086}, {46u, 4e-05}, {48u, 0.00187}};
map<unsigned int, double> calcium_mass = {{40u, 39.962590980000002}, {42u, 41.958618010000002}, {43u, 42.958766599999997}, {44u, 43.955481800000001}, {46u, 45.953692599999997}, {48u, 47.952534}};
buildElement_("Calcium", "Ca", 20u, calcium_abundance, calcium_mass);
map<unsigned int, double> scandium_abundance = {{45u, 1.0}};
map<unsigned int, double> scandium_mass = {{45u, 44.955910000000003}};
buildElement_("Scandium", "Sc", 21u, scandium_abundance, scandium_mass);
map<unsigned int, double> titanium_abundance = {{46u, 0.0825}, {47u, 0.07440000000000001}, {48u, 0.7372}, {49u, 0.0541}, {50u, 0.0518}};
map<unsigned int, double> titanium_mass = {{46u, 45.952631599999997}, {47u, 46.951763100000001}, {48u, 47.947946299999998}, {49u, 48.947870000000002}, {50u, 49.944791199999997}};
buildElement_("Titanium", "Ti", 22u, titanium_abundance, titanium_mass);
map<unsigned int, double> vanadium_abundance = {{50u, 0.0025}, {51u, 0.9975}};
map<unsigned int, double> vanadium_mass = {{50u, 49.947158500000001}, {51u, 50.943959499999998}};
buildElement_("Vanadium", "V", 23u, vanadium_abundance, vanadium_mass);
map<unsigned int, double> chromium_abundance = {{50u, 0.043449999999999996}, {52u, 0.83789}, {53u, 0.09501}, {54u, 0.02365}};
map<unsigned int, double> chromium_mass = {{50u, 49.946044200000003}, {52u, 51.940507500000003}, {53u, 52.940649399999998}, {54u, 53.938880400000002}};
buildElement_("Chromium", "Cr", 24u, chromium_abundance, chromium_mass);
map<unsigned int, double> manganese_abundance = {{55u, 1.0}};
map<unsigned int, double> manganese_mass = {{55u, 54.938049999999997}};
buildElement_("Manganese", "Mn", 25u, manganese_abundance, manganese_mass);
map<unsigned int, double> ferrum_abundance = {{54u, 0.058449999999999995}, {56u, 0.91754}, {57u, 0.021191}, {58u, 0.002819}};
map<unsigned int, double> ferrum_mass = {{54u, 53.939610500000001}, {56u, 55.934937499999997}, {57u, 56.935394000000002}, {58u, 57.933275600000002}};
buildElement_("Ferrum", "Fe", 26u, ferrum_abundance, ferrum_mass);
map<unsigned int, double> cobalt_abundance = {{59u, 1.0}};
map<unsigned int, double> cobalt_mass = {{59u, 58.933194999999998}};
buildElement_("Cobalt", "Co", 27u, cobalt_abundance, cobalt_mass);
map<unsigned int, double> nickel_abundance = {{58u, 0.680169}, {60u, 0.262231}, {61u, 0.011399}, {62u, 0.036345}, {64u, 0.009256}};
map<unsigned int, double> nickel_mass = {{58u, 57.935347999999998}, {60u, 59.930790999999999}, {61u, 60.931060000000002}, {62u, 61.928348999999997}, {64u, 63.927970000000002}};
buildElement_("Nickel", "Ni", 28u, nickel_abundance, nickel_mass);
map<unsigned int, double> copper_abundance = {{63u, 0.6917}, {65u, 0.30829999999999996}};
map<unsigned int, double> copper_mass = {{63u, 62.929600999999998}, {65u, 64.927794000000006}};
buildElement_("Copper", "Cu", 29u, copper_abundance, copper_mass);
map<unsigned int, double> zinc_abundance = {{64u, 0.4863}, {66u, 0.27899999999999997}, {67u, 0.040999999999999995}, {68u, 0.1875}, {70u, 0.0062}};
map<unsigned int, double> zinc_mass = {{64u, 63.929147}, {66u, 65.926036999999994}, {67u, 66.927131000000003}, {68u, 67.924847999999997}, {70u, 69.925325000000001}};
buildElement_("Zinc", "Zn", 30u, zinc_abundance, zinc_mass);
map<unsigned int, double> gallium_abundance = {{69u, 0.60108}, {71u, 0.39892000000000005}};
map<unsigned int, double> gallium_mass = {{69u, 68.925573600000007}, {71u, 70.924701299999995}};
buildElement_("Gallium", "Ga", 31u, gallium_abundance, gallium_mass);
map<unsigned int, double> germanium_abundance = {{70u, 0.20379999999999998}, {72u, 0.2731}, {73u, 0.0776}, {74u, 0.36719999999999997}, {76u, 0.0776}};
map<unsigned int, double> germanium_mass = {{70u, 69.924247399999999}, {72u, 71.922075800000002}, {73u, 72.9234589}, {74u, 73.921177799999995}, {76u, 75.921401}};
buildElement_("Germanium", "Ge", 32u, germanium_abundance, germanium_mass);
map<unsigned int, double> arsenic_abundance = {{75u, 1.0}};
map<unsigned int, double> arsenic_mass = {{75u, 74.921596500000007}};
buildElement_("Arsenic", "As", 33u, arsenic_abundance, arsenic_mass);
map<unsigned int, double> selenium_abundance = {{74u, 0.00889}, {76u, 0.09366}, {77u, 0.07635}, {78u, 0.23772}, {80u, 0.49607}, {82u, 0.08731}};
map<unsigned int, double> selenium_mass = {{74u, 73.922476399999994}, {76u, 75.919213600000006}, {77u, 76.919914000000006}, {78u, 77.917309099999997}, {80u, 79.916521299999999}, {82u, 81.916699399999999}};
buildElement_("Selenium", "Se", 34u, selenium_abundance, selenium_mass);
map<unsigned int, double> bromine_abundance = {{79u, 0.5069}, {81u, 0.49310000000000004}};
map<unsigned int, double> bromine_mass = {{79u, 78.918337100000002}, {81u, 80.916290599999996}};
buildElement_("Bromine", "Br", 35u, bromine_abundance, bromine_mass);
map<unsigned int, double> krypton_abundance = {{78u, 0.0034999999999999996}, {80u, 0.0225}, {82u, 0.11599999999999999}, {83u, 0.115}, {84u, 0.57}, {86u, 0.17300000000000001}};
map<unsigned int, double> krypton_mass = {{78u, 77.920400000000001}, {80u, 79.916380000000004}, {82u, 81.913482000000002}, {83u, 82.914135000000002}, {84u, 83.911507}, {86u, 85.910616000000005}};
buildElement_("Krypton", "Kr", 36u, krypton_abundance, krypton_mass);
map<unsigned int, double> rubidium_abundance = {{85u, 0.7217}};
map<unsigned int, double> rubidium_mass = {{85u, 84.911789737999996}};
buildElement_("Rubidium", "Rb", 37u, rubidium_abundance, rubidium_mass);
map<unsigned int, double> strontium_abundance = {{84u, 0.005600000000000001}, {86u, 0.0986}, {87u, 0.07}, {88u, 0.8258}};
map<unsigned int, double> strontium_mass = {{84u, 83.913425000000004}, {86u, 85.909260730900002}, {87u, 86.908877497000006}, {88u, 87.905612257100003}};
buildElement_("Strontium", "Sr", 38u, strontium_abundance, strontium_mass);
map<unsigned int, double> yttrium_abundance = {{89u, 1.0}};
map<unsigned int, double> yttrium_mass = {{89u, 88.905850000000001}};
buildElement_("Yttrium", "Y", 39u, yttrium_abundance, yttrium_mass);
map<unsigned int, double> zirconium_abundance = {{90u, 0.5145000000000001}, {91u, 0.11220000000000001}, {92u, 0.17149999999999999}, {94u, 0.17379999999999998}, {96u, 0.0280}};
map<unsigned int, double> zirconium_mass = {{90u, 89.9047044}, {91u, 90.905645800000002}, {92u, 91.905040799999995}, {94u, 93.906315199999995}, {96u, 95.9082776}};
buildElement_("Zirconium", "Zr", 40u, zirconium_abundance, zirconium_mass);
map<unsigned int, double> nibium_abundance = {{93u, 1.0}};
map<unsigned int, double> nibium_mass = {{93u, 92.906378099999998}};
buildElement_("Nibium", "Nb", 41u, nibium_abundance, nibium_mass);
map<unsigned int, double> molybdenum_abundance = {{92u, 0.1484}, {94u, 0.0925}, {95u, 0.1592}, {96u, 0.1668}, {97u, 0.0955}, {98u, 0.2413}, {100u, 0.09630000000000001}};
map<unsigned int, double> molybdenum_mass = {{92u, 91.906809999999993}, {94u, 93.905088000000006}, {95u, 94.905840999999995}, {96u, 95.904679000000002}, {97u, 96.906020999999996}, {98u, 97.905407999999994}, {100u, 99.907477}};
buildElement_("Molybdenum", "Mo", 42u, molybdenum_abundance, molybdenum_mass);
// Technitium(Tc) abundance is not known.
map<unsigned int, double> ruthenium_abundance = {{96u, 0.0554}, {98u, 0.0187}, {99u, 0.1276}, {100u, 0.126}, {101u, 0.17059999999999997}, {102u, 0.3155}, {104u, 0.1862}};
map<unsigned int, double> ruthenium_mass = {{96u, 95.907597999999993}, {98u, 97.905287000000001}, {99u, 98.9059393}, {100u, 99.904219499999996}, {101u, 100.905582100000004}, {102u, 101.904349300000007}, {104u, 103.905433000000002}};
buildElement_("Ruthenium", "Ru", 44u, ruthenium_abundance, ruthenium_mass);
map<unsigned int, double> rhodium_abundance = {{103u, 1.0}};
map<unsigned int, double> rhodium_mass = {{103u, 102.905500000000004}};
buildElement_("Rhodium", "Rh", 45u, rhodium_abundance, rhodium_mass);
map<unsigned int, double> palladium_abundance = {{102u, 0.0102}, {104u, 0.1114}, {105u, 0.22329999999999997}, {106u, 0.2733}, {108u, 0.2646}, {110u, 0.11720000000000001}};
map<unsigned int, double> palladium_mass = {{102u, 101.905608999999998}, {104u, 103.904036000000005}, {105u, 104.905085}, {106u, 105.903486000000001}, {108u, 107.903891999999999}, {110u, 109.905152999999999}};
buildElement_("Palladium", "Pd", 46u, palladium_abundance, palladium_mass);
map<unsigned int, double> silver_abundance = {{107u, 0.51839}, {109u, 0.48161000000000004}};
map<unsigned int, double> silver_mass = {{107u, 106.905092999999994}, {109u, 108.904756000000006}};
buildElement_("Silver", "Ag", 47u, silver_abundance, silver_mass);
map<unsigned int, double> cadmium_abundance = {{106u, 0.0125}, {108u, 0.0089}, {110u, 0.1249}, {111u, 0.128}, {112u, 0.2413}, {113u, 0.1222}, {114u, 0.2873}, {116u, 0.07490000000000001}};
map<unsigned int, double> cadmium_mass = {{106u, 105.906458000000001}, {108u, 107.904184000000001}, {110u, 109.903002099999995}, {111u, 110.904178099999996}, {112u, 111.902757800000003}, {113u, 112.904401699999994}, {114u, 113.903358499999996}, {116u, 115.904756000000006}};
buildElement_("Cadmium", "Cd", 48u, cadmium_abundance, cadmium_mass);
map<unsigned int, double> indium_abundance = {{113u, 0.0429}, {115u, 0.9571}};
map<unsigned int, double> indium_mass = {{113u, 112.904060000000001}, {115u, 114.903878000000006}};
buildElement_("Indium", "In", 49u, indium_abundance, indium_mass);
map<unsigned int, double> tin_abundance = {{112u, 0.0097}, {114u, 0.0066}, {115u, 0.0034000000000000002}, {116u, 0.1454}, {117u, 0.0768}, {118u, 0.2422}, {119u, 0.0859}, {120u, 0.3258}, {122u, 0.0463}, {124u, 0.0579}};
map<unsigned int, double> tin_mass = {{112u, 111.904818000000006}, {114u, 113.902777900000004}, {115u, 114.903341999999995}, {116u, 115.901741000000001}, {117u, 116.902951999999999}, {118u, 117.901602999999994}, {119u, 118.903307999999996}, {120u, 119.902194699999996}, {122u, 121.903439000000006}, {124u, 123.905273899999997}};
buildElement_("Tin", "Sn", 50u, tin_abundance, tin_mass);
map<unsigned int, double> antimony_abundance = {{121u, 0.5721}, {123u, 0.4279}};
map<unsigned int, double> antimony_mass = {{121u, 120.903815699999996}, {123u, 122.904213999999996}};
buildElement_("Antimony", "Sb", 51u, antimony_abundance, antimony_mass);
map<unsigned int, double> tellurium_abundance = {{120u, 0.0009}, {122u, 0.0255}, {124u, 0.047400000000000005}, {125u, 0.0707}, {126u, 0.1884}, {128u, 0.31739999999999996}, {130u, 0.3408}};
map<unsigned int, double> tellurium_mass = {{120u, 119.904020000000003}, {122u, 121.9030439}, {124u, 123.902817900000002}, {125u, 124.904430700000006}, {126u, 125.903311700000003}, {128u, 127.904463100000001}, {130u, 129.906224400000014}};
buildElement_("Tellurium", "Te", 52u, tellurium_abundance, tellurium_mass);
map<unsigned int, double> iodine_abundance = {{127u, 1.0}};
map<unsigned int, double> iodine_mass = {{127u, 126.904472999999996}};
buildElement_("Iodine", "I", 53u, iodine_abundance, iodine_mass);
map<unsigned int, double> xenon_abundance = {{128u, 0.0191}, {129u, 0.264}, {130u, 0.040999999999999995}, {131u, 0.212}, {132u, 0.26899999999999996}, {134u, 0.10400000000000001}, {136u, 0.08900000000000001}};
map<unsigned int, double> xenon_mass = {{128u, 127.903531000000001}, {129u, 128.904779999999988}, {130u, 129.903509000000014}, {131u, 130.90507199999999}, {132u, 131.904144000000002}, {134u, 133.905394999999999}, {136u, 135.90721400000001}};
buildElement_("Xenon", "Xe", 54u, xenon_abundance, xenon_mass);
map<unsigned int, double> caesium_abundance = {{133u, 1.0}};
map<unsigned int, double> caesium_mass = {{133u, 132.905451932999995}};
buildElement_("Caesium", "Cs", 55u, caesium_abundance, caesium_mass);
map<unsigned int, double> barium_abundance = {{132u, 0.00101}, {134u, 0.024169999999999997}, {135u, 0.06591999999999999}, {136u, 0.07854}, {137u, 0.11231999999999999}, {138u, 0.71698}};
map<unsigned int, double> barium_mass = {{132u, 131.9050613}, {134u, 133.904508399999997}, {135u, 134.905688599999991}, {136u, 135.904575899999998}, {137u, 136.905827399999993}, {138u, 137.905247199999991}};
buildElement_("Barium", "Ba", 56u, barium_abundance, barium_mass);
map<unsigned int, double> lanthanum_abundance = {{138u, 0.00089}, {139u, 0.99911}};
map<unsigned int, double> lanthanum_mass = {{138u, 137.907112000000012}, {139u, 138.906353300000006}};
buildElement_("Lanthanum", "La", 57u, lanthanum_abundance, lanthanum_mass);
map<unsigned int, double> cerium_abundance = {{136u, 0.00185}, {138u, 0.00251}, {140u, 0.8845000000000001}, {142u, 0.11114}};
map<unsigned int, double> cerium_mass = {{136u, 135.907172000000003}, {138u, 137.905991}, {140u, 139.905438699999991}, {142u, 141.909244000000001}};
buildElement_("Cerium", "Ce", 58u, cerium_abundance, cerium_mass);
map<unsigned int, double> praseodymium_abundance = {{141u, 1.0}};
map<unsigned int, double> praseodymium_mass = {{141u, 140.907646999999997}};
buildElement_("Praseodymium", "Pr", 59u, praseodymium_abundance, praseodymium_mass);
map<unsigned int, double> neodymium_abundance = {{142u, 0.272}, {143u, 0.122}, {144u, 0.23800000000000002}, {145u, 0.083}, {146u, 0.172}, {148u, 0.057999999999999996}, {150u, 0.055999999999999994}};
map<unsigned int, double> neodymium_mass = {{142u, 141.907723299999987}, {143u, 142.909814299999994}, {144u, 143.910087299999987}, {145u, 144.912573600000002}, {146u, 145.913116900000006}, {148u, 147.916892999999988}, {150u, 149.920891000000012}};
buildElement_("Neodymium", "Nd", 60u, neodymium_abundance, neodymium_mass);
// Promethium(Pm) abundance is not known.
map<unsigned int, double> samarium_abundance = {{144u, 0.0308}, {147u, 0.15}, {148u, 0.1125}, {149u, 0.1382}, {150u, 0.0737}, {152u, 0.26739999999999997}, {154u, 0.2274}};
map<unsigned int, double> samarium_mass = {{144u, 143.911999000000009}, {147u, 146.9148979}, {148u, 147.914822700000002}, {149u, 148.917184700000007}, {150u, 149.917275499999988}, {152u, 151.919732399999987}, {154u, 153.92220929999999}};
buildElement_("Samarium", "Sm", 62u, samarium_abundance, samarium_mass);
map<unsigned int, double> europium_abundance = {{151u, 0.4781}, {153u, 0.5219}};
map<unsigned int, double> europium_mass = {{151u, 150.919857}, {153u, 152.921237}};
buildElement_("Europium", "Eu", 63u, europium_abundance, europium_mass);
map<unsigned int, double> gadolinium_abundance = {{152u, 0.002}, {154u, 0.0218}, {155u, 0.14800000000000002}, {156u, 0.2047}, {157u, 0.1565}, {158u, 0.2484}, {160u, 0.2186}};
map<unsigned int, double> gadolinium_mass = {{152u, 151.919791000000004}, {154u, 153.920865600000013}, {155u, 154.92262199999999}, {156u, 155.922122699999989}, {157u, 156.923960099999988}, {158u, 157.924103900000006}, {160u, 159.927054099999992}};
buildElement_("Gadolinium", "Gd", 64u, gadolinium_abundance, gadolinium_mass);
map<unsigned int, double> terbium_abundance = {{159u, 1.0}};
map<unsigned int, double> terbium_mass = {{159u, 158.925354}};
buildElement_("Terbium", "Tb", 65u, terbium_abundance, terbium_mass);
map<unsigned int, double> dysprosium_abundance = {{156u, 0.00056}, {158u, 0.00095}, {160u, 0.02329}, {161u, 0.18889}, {162u, 0.25475}, {163u, 0.24896}, {164u, 0.28260}};
map<unsigned int, double> dysprosium_mass = {{156u, 155.924284}, {158u, 157.92441}, {160u, 159.925203}, {161u, 160.926939}, {162u, 161.926804}, {163u, 162.928737}, {164u, 163.929181}};
buildElement_("Dysprosium", "Dy", 66u, dysprosium_abundance, dysprosium_mass);
map<unsigned int, double> holmium_abundance = {{165u, 1.0}};
map<unsigned int, double> holmium_mass = {{165u, 164.930328}};
buildElement_("Holmium", "Ho", 67u, holmium_abundance, holmium_mass);
map<unsigned int, double> erbium_abundance = {{162u, 0.00056}, {164u, 0.01601}, {166u, 0.33503}, {167u, 0.22869}, {168u, 0.26978}, {170u, 0.14910}};
map<unsigned int, double> erbium_mass = {{162u, 161.928787}, {164u, 163.929207}, {166u, 165.930299}, {167u, 166.932054}, {168u, 167.932376}, {170u, 169.93547}};
buildElement_("Erbium", "Er", 68u, erbium_abundance, erbium_mass);
map<unsigned int, double> thulium_abundance = {{169u, 1.0}};
map<unsigned int, double> thulium_mass = {{169u, 168.934218}};
buildElement_("Thulium", "Tm", 69u, thulium_abundance, thulium_mass);
map<unsigned int, double> ytterbium_abundance = {{168u, 0.00126}, {170u, 0.03023}, {171u, 0.14216}, {172u, 0.21754}, {173u, 0.16098}, {174u, 0.31896}, {176u, 0.12887}};
map<unsigned int, double> ytterbium_mass = {{168u, 167.933889}, {170u, 169.93476725}, {171u, 170.93633152}, {172u, 171.93638666}, {173u, 172.93821622}, {174u, 173.93886755}, {176u, 175.9425747}};
buildElement_("Ytterbium", "Yb", 70u, ytterbium_abundance, ytterbium_mass);
map<unsigned int, double> lutetium_abundance = {{175u, 0.97401}, {176u, 0.02599}};
map<unsigned int, double> lutetium_mass = {{175u, 174.940777}, {176u, 175.942692}};
buildElement_("Lutetium", "Lu", 71u, lutetium_abundance, lutetium_mass);
map<unsigned int, double> hafnium_abundance = {{176u, 0.0526}, {177u, 0.18600000000000003}, {178u, 0.2728}, {179u, 0.1362}, {180u, 0.3508}};
map<unsigned int, double> hafnium_mass = {{176u, 175.941408599999988}, {177u, 176.943220700000012}, {178u, 177.943698799999993}, {179u, 178.945816100000002}, {180u, 179.946550000000002}};
buildElement_("Hafnium", "Hf", 72u, hafnium_abundance, hafnium_mass);
map<unsigned int, double> tantalum_abundance = {{180u, 0.0001176}, {181u, 0.99988}};
map<unsigned int, double> tantalum_mass = {{180u, 179.94747}, {181u, 180.947995800000001}};
buildElement_("Tantalum", "Ta", 73u, tantalum_abundance, tantalum_mass);
map<unsigned int, double> tungsten_abundance = {{180u, 0.0012}, {182u, 0.265}, {183u, 0.1431}, {184u, 0.3064}, {186u, 0.2843}};
map<unsigned int, double> tungsten_mass = {{180u, 179.946704000000011}, {182u, 181.948204199999992}, {183u, 182.950222999999994}, {184u, 183.950930999999997}, {186u, 185.954364099999992}};
buildElement_("Tungsten", "W", 74u, tungsten_abundance, tungsten_mass);
map<unsigned int, double> rhenium_abundance = {{185u, 0.374}, {187u, 0.626}};
map<unsigned int, double> rhenium_mass = {{185u, 184.952955000000003}, {187u, 186.95575310000001}};
buildElement_("Rhenium", "Re", 75u, rhenium_abundance, rhenium_mass);
map<unsigned int, double> osmium_abundance = {{184u, 0.0002}, {186u, 0.0159}, {187u, 0.0196}, {188u, 0.1324}, {189u, 0.1615}, {190u, 0.2626}, {192u, 0.4078}};
map<unsigned int, double> osmium_mass = {{184u, 183.952493}, {186u, 185.953838}, {187u, 186.955750}, {188u, 187.955837}, {189u, 188.958146}, {190u, 189.958446}, {192u, 191.96148}};
buildElement_("Osmium", "Os", 76u, osmium_abundance, osmium_mass);
map<unsigned int, double> iridium_abundance = {{191u, 0.3723}, {193u, 0.6277}};
map<unsigned int, double> iridium_mass = {{191u, 190.960591}, {193u, 192.962924}};
buildElement_("Iridium", "Ir", 77u, rhenium_abundance, rhenium_mass);
// Pt-190 is radioactive but with a very long half-life. Since its natural occurence is very low, we neglect it by default (m=189.959930 abund.frac.=0.00014)
// TODO re-evaluate inclusion?
map<unsigned int, double> platinum_abundance = {{192u, 0.00782}, {194u, 0.32966999999999996}, {195u, 0.33832}, {196u, 0.25242000000000003}, {198u, 0.07163}};
map<unsigned int, double> platinum_mass = {{192u, 191.961038000000002}, {194u, 193.962680299999988}, {195u, 194.964791100000014}, {196u, 195.964951500000012}, {198u, 197.967893000000004}};
buildElement_("Platinum", "Pt", 78u, platinum_abundance, platinum_mass);
map<unsigned int, double> gold_abundance = {{197u, 1.0}};
map<unsigned int, double> gold_mass = {{197u, 196.96655100000001}};
buildElement_("Gold", "Au", 79u, gold_abundance, gold_mass);
map<unsigned int, double> mercury_abundance = {{196u, 0.0015}, {198u, 0.09970000000000001}, {199u, 0.16870000000000002}, {200u, 0.231}, {201u, 0.1318}, {202u, 0.2986}, {204u, 0.0687}};
map<unsigned int, double> mercury_mass = {{196u, 195.965833000000004}, {198u, 197.966768999999999}, {199u, 198.968279899999999}, {200u, 199.968325999999991}, {201u, 200.970302299999986}, {202u, 201.970642999999996}, {204u, 203.973493899999994}};
buildElement_("Mercury", "Hg", 80u, mercury_abundance, mercury_mass);
map<unsigned int, double> thallium_abundance = {{203u, 0.2952}, {205u, 0.7048000000000001}};
map<unsigned int, double> thallium_mass = {{203u, 202.972344200000009}, {205u, 204.97442749999999}};
buildElement_("Thallium", "Tl", 81u, thallium_abundance, thallium_mass);
map<unsigned int, double> lead_abundance = {{204u, 0.013999999999999999}, {206u, 0.24100000000000002}, {207u, 0.221}, {208u, 0.524}};
map<unsigned int, double> lead_mass = {{204u, 203.973043600000011}, {206u, 205.974465299999991}, {207u, 206.975896900000009}, {208u, 207.976653800000008}};
buildElement_("Lead", "Pb", 82u, lead_abundance, lead_mass);
map<unsigned int, double> bismuth_abundance = {{209u, 1.0}};
map<unsigned int, double> bismuth_mass = {{209u, 208.980398699999995}};
buildElement_("Bismuth", "Bi", 83u, bismuth_abundance, bismuth_mass);
// Polonium (Pb) abundance is not known.
// Astatine(At) abundance is not known.
// Radon(Rn) abundance is not known.
// Radium(Ra) abundance is not known.
map<unsigned int, double> thorium_abundance = {{230u, 0.0002}, {232u, 0.9998}};
map<unsigned int, double> thorium_mass = {{230u, 230.033133800000002}, {232u, 232.038055299999996}};
buildElement_("Thorium", "Th", 90u, thorium_abundance, thorium_mass);
map<unsigned int, double> protactinium_abundance = {{231u, 1.0}};
map<unsigned int, double> protactinium_mass = {{231u, 231.03588}};
buildElement_("Protactinium", "Pa", 91u, protactinium_abundance, protactinium_mass);
map<unsigned int, double> uranium_abundance = {{234u, 0.000054}, {235u, 0.007204}, {238u, 0.992742}};
map<unsigned int, double> uranium_mass = {{234u, 234.040950}, {235u, 235.043928}, {238u, 238.05079}};
buildElement_("Uranium", "U", 92u, uranium_abundance, uranium_mass);
// special case for deuterium and tritium: add symbol alias
const Element* deuterium = getElement("(2)H");
symbols_["D"] = deuterium;
const Element* tritium = getElement("(3)H");
symbols_["T"] = tritium;
// Pu, Am, Cm, Bk, Cf, Es, Fm, Md, No, Lr, Rf, Db, Sg, Bh, Hs, Mt, Ds, Rg, Cn, Nh, Fl, Mc, Lv, Ts and Og Abundances are not known.
}
void ElementDB::buildElement_(const string& name, const string& symbol, const unsigned int an, const map<unsigned int, double>& abundance, const map<unsigned int, double>& mass)
{
IsotopeDistribution isotopes = parseIsotopeDistribution_(abundance, mass);
double avg_weight = calculateAvgWeight_(abundance, mass);
double mono_weight = calculateMonoWeight_(abundance, mass);
addElementToMaps_(name, symbol, an, make_unique<const Element>(name, symbol, an, avg_weight, mono_weight, isotopes));
storeIsotopes_(name, symbol, an, mass, isotopes);
}
void overwrite(const Element* old, unique_ptr<const Element>& new_e)
{
if (old->getSymbol() != new_e->getSymbol())
{ // -- this would invalidate the lookup, since e_ptr->getSymbols().at("O")->getSymbol() == 'P'
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, new_e->getSymbol(),
"Replacing element with name " + old->getName() + " and symbol " + old->getSymbol() + " has different new symbol: " + new_e->getSymbol());
}
if (old->getName() != new_e->getName())
{ // -- this would invalidate the lookup, since e_ptr->getName().at("Oxygen")->getName() == 'Something'
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, new_e->getSymbol(), "Replacing element with name " + old->getName() + " has different new name: " + new_e->getName());
}
if (old->getAtomicNumber() != new_e->getAtomicNumber())
{ // -- this would invalidate the lookup, since e_ptr->getAtomicNumbers().at(12)->getAtomicNumber() == 14
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, new_e->getSymbol(),
"Replacing element with atomic number " + String(old->getAtomicNumber()) + " has different new atomic number: " + String(new_e->getAtomicNumber()));
}
// ... overwrite
*(const_cast<Element*>(old)) = *new_e;
}
void ElementDB::addElementToMaps_(const string& name, const string& symbol, const unsigned int an, unique_ptr<const Element> e)
{
// overwrite existing element if it already exists
// find() has to be protected here in a parallel context
if (atomic_numbers_.find(an) != atomic_numbers_.end())
{
// in order to ensure that existing elements are still valid and memory
// addresses do not change, we have to modify the Element in place
// instead of replacing it.
overwrite(atomic_numbers_[an], e);
// do not release 'e' here; it needs to be deleted when it goes out if scope
}
else
{
addIfUniqueOrThrow(names_, name, e);
addIfUniqueOrThrow(symbols_, symbol, e);
addIfUniqueOrThrow(atomic_numbers_, an, e);
e.release(); // allocation will be cleaned up by ~ElementDB now
}
}
void ElementDB::storeIsotopes_(const string& name, const string& symbol, const unsigned int an, const map<unsigned int, double>& mass, const IsotopeDistribution& isotopes)
{
for (const auto& isotope : isotopes)
{
double atomic_mass = isotope.getMZ();
unsigned int mass_number = std::round(atomic_mass);
string iso_name = "(" + std::to_string(mass_number) + ")" + name;
string iso_symbol = "(" + std::to_string(mass_number) + ")" + symbol;
// set avg and mono to same value for isotopes (old hack...)
double iso_avg_weight = mass.at(mass_number);
double iso_mono_weight = iso_avg_weight;
IsotopeDistribution iso_isotopes;
IsotopeDistribution::ContainerType iso_container;
iso_container.push_back(Peak1D(atomic_mass, 1.0));
iso_isotopes.set(iso_container);
auto iso_element = make_unique<const Element>(iso_name, iso_symbol, an, iso_avg_weight, iso_mono_weight, iso_isotopes);
if (auto has_elem = names_.find(iso_name); has_elem != names_.end())
{ // already exists: overwrite (affects all maps, since they all point to the same thing)
overwrite(has_elem->second, iso_element);
// do not release 'iso_element' here; it needs to be deleted when it goes out if scope
}
else
{
addIfUniqueOrThrow(names_, iso_name, iso_element);
addIfUniqueOrThrow(symbols_, iso_symbol, iso_element);
iso_element.release(); // allocation will be cleaned up by ~ElementDB now
}
}
}
IsotopeDistribution ElementDB::parseIsotopeDistribution_(const map<unsigned int, double>& abundance, const map<unsigned int, double>& mass)
{
IsotopeDistribution::ContainerType dist;
for (map<unsigned int, double>::const_iterator it = abundance.begin(); it != abundance.end(); ++it)
{
dist.push_back(Peak1D(mass.at(it->first) , abundance.at(it->first)));
}
IsotopeDistribution iso_dist;
iso_dist.set(dist);
return iso_dist;
}
void ElementDB::clear_()
{
// names_ has the union of all Element*, deleting this is sufficient to avoid mem leaks
for (auto it = names_.begin(); it != names_.end(); ++it)
{
delete it->second;
}
names_.clear();
symbols_.clear();
atomic_numbers_.clear();
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/DecoyGenerator.cpp | .cpp | 7,412 | 220 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Timo Sachsenberg $
// --------------------------------------------------------------------------
#include <OpenMS/CHEMISTRY/AASequence.h>
#include <OpenMS/CHEMISTRY/ProteaseDigestion.h>
#include <OpenMS/CHEMISTRY/DecoyGenerator.h>
#include <OpenMS/CONCEPT/Macros.h>
#include <chrono>
#include <algorithm>
using namespace OpenMS;
DecoyGenerator::DecoyGenerator()
{
const UInt64 seed = std::chrono::high_resolution_clock::now().time_since_epoch().count();
shuffler_.seed(seed);
}
void DecoyGenerator::setSeed(UInt64 seed)
{
shuffler_.seed(seed);
}
AASequence DecoyGenerator::reverseProtein(const AASequence& protein) const
{
OPENMS_PRECONDITION(!protein.isModified(), "Decoy generation only supports unmodified proteins.")
String s = protein.toUnmodifiedString();
std::reverse(s.begin(), s.end());
return AASequence::fromString(s);
}
AASequence DecoyGenerator::reversePeptides(const AASequence& protein, const String& protease) const
{
OPENMS_PRECONDITION(!protein.isModified(), "Decoy generation only supports unmodified proteins.")
std::vector<AASequence> peptides;
ProteaseDigestion ed;
ed.setMissedCleavages(0); // important as we want to reverse between all cutting sites
ed.setEnzyme(protease);
ed.setSpecificity(EnzymaticDigestion::SPEC_FULL);
ed.digest(protein, peptides);
String pseudo_reversed;
for (int i = 0; i < static_cast<int>(peptides.size()) - 1; ++i)
{
std::string s = peptides[i].toUnmodifiedString();
auto last = --s.end(); // don't reverse enzymatic cutting site
std::reverse(s.begin(), last);
pseudo_reversed += s;
}
// the last peptide of a protein is not an enzymatic cutting site so we do a full reverse
std::string s = peptides[peptides.size() - 1 ].toUnmodifiedString();
std::reverse(s.begin(), s.end());
pseudo_reversed += s;
return AASequence::fromString(pseudo_reversed);
}
// generate decoy protein sequences
std::vector<AASequence> DecoyGenerator::shuffle(const AASequence& protein, const String& protease, int decoy_factor)
{
OPENMS_PRECONDITION(!protein.isModified(), "Decoy generation only supports unmodified proteins.");
ProteaseDigestion digestor;
digestor.setEnzyme(protease);
digestor.setMissedCleavages(0); // for decoy generation disable missed cleavages
digestor.setSpecificity(EnzymaticDigestion::SPEC_FULL);
std::vector<AASequence> output;
digestor.digest(protein, output);
// generate decoy_factor number of complete decoy proteins
std::vector<AASequence> decoy_proteins;
for (int variant = 0; variant < decoy_factor; ++variant)
{
String decoy_sequence;
for (const auto & aas : output)
{
if (aas.size() <= 2)
{
decoy_sequence += aas.toUnmodifiedString();
continue;
}
// Important: create DecoyGenerator instance per peptide with same seed
// Otherwise same peptides end up creating different decoys -> much more decoys than targets
// But: we add variant to seed to get different decoys in multiple decoy generation
DecoyGenerator dg;
dg.setSeed(4711 + variant); // + variant to get different decoys in multiple decoy generation
decoy_sequence += dg.shufflePeptides(aas, protease).toUnmodifiedString();
}
decoy_proteins.push_back(AASequence::fromString(decoy_sequence));
}
return decoy_proteins;
}
AASequence DecoyGenerator::shufflePeptides(
const AASequence& protein,
const String& protease,
const int max_attempts)
{
OPENMS_PRECONDITION(!protein.isModified(), "Decoy generation only supports unmodified proteins.");
std::vector<AASequence> peptides;
ProteaseDigestion ed;
ed.setMissedCleavages(0); // important as we want to reverse between all cutting sites
ed.setEnzyme(protease);
ed.setSpecificity(EnzymaticDigestion::SPEC_FULL);
ed.digest(protein, peptides);
String protein_shuffled;
for (int i = 0; i < static_cast<int>(peptides.size()) - 1; ++i)
{
const std::string peptide_string = peptides[i].toUnmodifiedString();
// add from cache if available
bool cached(false);
#pragma omp critical (td_cache_)
{
auto it = td_cache_.find(peptide_string);
if (it != td_cache_.end())
{
protein_shuffled += it->second; // add if cached
cached = true;
}
}
if (cached) continue;
String peptide_string_shuffled = peptide_string;
auto last = --peptide_string_shuffled.end();
double lowest_identity(1.0);
String lowest_identity_string(peptide_string_shuffled);
for (int i = 0; i < max_attempts; ++i) // try to find sequence with low identity
{
shuffler_.portable_random_shuffle(std::begin(peptide_string_shuffled), last);
double identity = SequenceIdentity_(peptide_string_shuffled, peptide_string);
if (identity < lowest_identity)
{
lowest_identity = identity;
lowest_identity_string = peptide_string_shuffled;
if (identity <= (1.0/peptide_string_shuffled.size() + 1e-6))
{
break; // found perfect shuffle (only 1 (=cutting site) of all AAs match)
}
}
}
protein_shuffled += lowest_identity_string;
#pragma omp critical (td_cache_)
{
td_cache_[peptide_string] = lowest_identity_string;
}
}
// the last peptide of a protein is not an enzymatic cutting site so we do a full shuffle
const std::string peptide_string = peptides[peptides.size() - 1 ].toUnmodifiedString();
bool cached(false);
#pragma omp critical (td_cache_)
{
auto it = td_cache_.find(peptide_string);
if (it != td_cache_.end())
{
protein_shuffled += it->second; // add if cached
cached = true;
}
}
if (cached) return AASequence::fromString(protein_shuffled);
String peptide_string_shuffled = peptide_string;
double lowest_identity(1.0);
String lowest_identity_string(peptide_string_shuffled);
for (int i = 0; i < max_attempts; ++i) // try to find sequence with low identity
{
shuffler_.portable_random_shuffle(std::begin(peptide_string_shuffled), std::end(peptide_string_shuffled));
double identity = SequenceIdentity_(peptide_string_shuffled, peptide_string);
if (identity < lowest_identity)
{
lowest_identity = identity;
lowest_identity_string = peptide_string_shuffled;
if (identity == 0)
{
break; // found best shuffle
}
}
}
protein_shuffled += lowest_identity_string;
#pragma omp critical (td_cache_)
{
td_cache_[peptide_string] = lowest_identity_string;
}
return AASequence::fromString(protein_shuffled);
}
// static
double DecoyGenerator::SequenceIdentity_(const String& decoy, const String& target)
{
int match = 0;
for (Size i = 0; i < target.size(); ++i)
{
if (target[i] == decoy[i]) { ++match; }
}
double identity = (double) match / target.size();
// also compare against reverse
match = 0;
for (int i = (int)target.size() - 1; i >= 0; --i)
{
int j = (int)target.size() - 1 - i;
if (target[j] == decoy[i]) { ++match; }
}
double rev_identity = (double) match / target.size();
return std::max(identity, rev_identity);
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/RNaseDigestion.cpp | .cpp | 7,491 | 232 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow, Xiao Liang $
// $Authors: Marc Sturm, Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/CHEMISTRY/RNaseDB.h>
#include <OpenMS/CHEMISTRY/RNaseDigestion.h>
#include <OpenMS/CHEMISTRY/RibonucleotideDB.h>
using namespace std;
namespace OpenMS
{
void RNaseDigestion::setEnzyme(const DigestionEnzyme* enzyme)
{
EnzymaticDigestion::setEnzyme(enzyme);
const DigestionEnzymeRNA* rnase =
dynamic_cast<const DigestionEnzymeRNA*>(enzyme_);
String five_prime_code = rnase->getFivePrimeGain();
if (five_prime_code == "p")
{
five_prime_code = "5'-p";
}
String three_prime_code = rnase->getThreePrimeGain();
if (three_prime_code == "p")
{
three_prime_code = "3'-p";
}
else if (three_prime_code == "c")
{
three_prime_code = "3'-c";
}
else if (three_prime_code != "")
{
three_prime_code = '['+three_prime_code+']';
}
static RibonucleotideDB* ribo_db = RibonucleotideDB::getInstance();
five_prime_gain_ = five_prime_code.empty() ?
nullptr :
ribo_db->getRibonucleotide(five_prime_code);
three_prime_gain_ = three_prime_code.empty() ?
nullptr :
ribo_db->getRibonucleotide(three_prime_code);
cuts_after_regexes_.clear();
cuts_before_regexes_.clear();
StringList CAregexes, CBregexes;
rnase->getCutsAfterRegEx().split(',', CAregexes);
rnase->getCutsBeforeRegEx().split(',', CBregexes);
for (auto it = std::begin(CAregexes); it != std::end(CAregexes); ++it)
{
cuts_after_regexes_.emplace_back(*it);
}
for (auto it = std::begin(CBregexes); it != std::end(CBregexes); ++it)
{
cuts_before_regexes_.emplace_back(*it);
}
}
void RNaseDigestion::setEnzyme(const String& enzyme_name)
{
setEnzyme(RNaseDB::getInstance()->getEnzyme(enzyme_name));
}
vector<pair<Size, Size>> RNaseDigestion::getFragmentPositions_(
const NASequence& rna, Size min_length, Size max_length) const
{
if (min_length == 0)
{
min_length = 1;
}
if ((max_length == 0) || (max_length > rna.size()))
{
max_length = rna.size();
}
vector<pair<Size, Size>> result;
if (enzyme_->getName() == NoCleavage) // no cleavage
{
Size length = rna.size();
if ((length >= min_length) && (length <= max_length))
{
result.emplace_back(0, length);
}
}
else if (enzyme_->getName() == UnspecificCleavage) // unspecific cleavage
{
result.reserve(rna.size() * (max_length - min_length + 1));
for (Size i = 0; i <= rna.size() - min_length; ++i)
{
const Size right = std::min(i + max_length, rna.size());
for (Size j = i + min_length; j <= right; ++j)
{
result.emplace_back(i, j - i);
}
}
}
else // proper enzyme cleavage
{
vector<Size> fragment_pos(1, 0);
for (Size i = 1; i < rna.size(); ++i)
{
bool is_match = true;
// can't match if we don't have enough bases before or after
if (i < cuts_after_regexes_.size() || rna.size() - i < cuts_before_regexes_.size())
{
is_match = false;
}
for (auto it = cuts_after_regexes_.begin(); it != cuts_after_regexes_.end() && is_match; ++it) // Check if the cuts_after_regexes all match
{
if (!boost::regex_search(rna[i - cuts_after_regexes_.size() + (it - cuts_after_regexes_.begin())]->getCode(), *it))
{
is_match = false;
}
}
for (auto it = cuts_before_regexes_.begin(); it != cuts_before_regexes_.end() && is_match; ++it) // Check if the cuts_before_regexes all match
{
if (!boost::regex_search(rna[i + (it - cuts_before_regexes_.begin())]->getCode(), *it))
{
is_match = false;
}
}
if (is_match)
{
fragment_pos.push_back(i);
}
}
fragment_pos.push_back(rna.size());
// "fragment_pos" has at least two elements (zero and "rna.size()"):
for (Size start_it = 0; start_it < fragment_pos.size() - 1; ++start_it)
{
Size start_pos = fragment_pos[start_it];
for (Size offset = 0; offset <= missed_cleavages_; ++offset)
{
Size end_it = start_it + offset + 1;
if (end_it >= fragment_pos.size())
{
break;
}
Size end_pos = fragment_pos[end_it];
Size length = end_pos - start_pos;
if ((length >= min_length) && (length <= max_length))
{
result.emplace_back(start_pos, length);
}
}
}
}
return result;
}
void RNaseDigestion::digest(const NASequence& rna, vector<NASequence>& output,
Size min_length, Size max_length) const
{
output.clear();
if (rna.empty())
return;
vector<pair<Size, Size>> positions = getFragmentPositions_(rna, min_length,
max_length);
for (const auto& pos : positions)
{
NASequence fragment = rna.getSubsequence(pos.first, pos.second);
if (pos.first > 0)
{
fragment.setFivePrimeMod(five_prime_gain_);
}
if (pos.first + pos.second < rna.size())
{
fragment.setThreePrimeMod(three_prime_gain_);
}
output.push_back(fragment);
}
}
void RNaseDigestion::digest(IdentificationData& id_data, Size min_length,
Size max_length) const
{
for (IdentificationData::ParentSequenceRef parent_ref = id_data.getParentSequences().begin();
parent_ref != id_data.getParentSequences().end(); ++parent_ref)
{
if (parent_ref->molecule_type != IdentificationData::MoleculeType::RNA)
{
continue;
}
NASequence rna = NASequence::fromString(parent_ref->sequence);
vector<pair<Size, Size>> positions =
getFragmentPositions_(rna, min_length, max_length);
for (const auto& pos : positions)
{
NASequence fragment = rna.getSubsequence(pos.first, pos.second);
if (pos.first > 0)
{
fragment.setFivePrimeMod(five_prime_gain_);
}
if (pos.first + pos.second < rna.size())
{
fragment.setThreePrimeMod(three_prime_gain_);
}
IdentificationData::IdentifiedOligo oligo(fragment);
Size end_pos = pos.first + pos.second; // past-the-end position!
IdentificationData::ParentMatch match(pos.first, end_pos - 1);
match.left_neighbor = ((pos.first > 0) ?
rna[pos.first - 1]->getCode() :
IdentificationData::ParentMatch::LEFT_TERMINUS);
match.right_neighbor = ((end_pos < rna.size()) ?
rna[end_pos]->getCode() :
IdentificationData::ParentMatch::RIGHT_TERMINUS);
oligo.parent_matches[parent_ref].insert(match);
id_data.registerIdentifiedOligo(oligo);
}
}
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/ModificationDefinitionsSet.cpp | .cpp | 12,665 | 386 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Andreas Bertsch $
// --------------------------------------------------------------------------
//
#include <OpenMS/CHEMISTRY/ModificationDefinitionsSet.h>
#include <OpenMS/CHEMISTRY/ModificationsDB.h>
#include <OpenMS/CHEMISTRY/ResidueDB.h>
using namespace std;
namespace OpenMS
{
ModificationDefinitionsSet::ModificationDefinitionsSet() :
max_mods_per_peptide_(0)
{
}
ModificationDefinitionsSet::ModificationDefinitionsSet(const ModificationDefinitionsSet& rhs) = default;
ModificationDefinitionsSet::ModificationDefinitionsSet(const StringList& fixed_modifications, const StringList& variable_modifications) :
max_mods_per_peptide_(0)
{
setModifications(fixed_modifications, variable_modifications);
}
ModificationDefinitionsSet::~ModificationDefinitionsSet() = default;
void ModificationDefinitionsSet::setMaxModifications(Size max_mod)
{
max_mods_per_peptide_ = max_mod;
}
Size ModificationDefinitionsSet::getMaxModifications() const
{
return max_mods_per_peptide_;
}
Size ModificationDefinitionsSet::getNumberOfModifications() const
{
return variable_mods_.size() + fixed_mods_.size();
}
Size ModificationDefinitionsSet::getNumberOfFixedModifications() const
{
return fixed_mods_.size();
}
Size ModificationDefinitionsSet::getNumberOfVariableModifications() const
{
return variable_mods_.size();
}
void ModificationDefinitionsSet::addModification(const ModificationDefinition& mod_def)
{
if (mod_def.isFixedModification())
{
fixed_mods_.insert(mod_def);
}
else
{
variable_mods_.insert(mod_def);
}
return;
}
void ModificationDefinitionsSet::setModifications(const set<ModificationDefinition>& mods)
{
fixed_mods_.clear();
variable_mods_.clear();
for (set<ModificationDefinition>::const_iterator it = mods.begin(); it != mods.end(); ++it)
{
if (it->isFixedModification())
{
fixed_mods_.insert(*it);
}
else
{
variable_mods_.insert(*it);
}
}
return;
}
void ModificationDefinitionsSet::setModifications(const String& fixed_modifications, const String& variable_modifications)
{
setModifications(ListUtils::create<String>(fixed_modifications), ListUtils::create<String>(variable_modifications));
}
void ModificationDefinitionsSet::setModifications(const StringList& fixed_modifications, const StringList& variable_modifications)
{
fixed_mods_.clear();
variable_mods_.clear();
for (StringList::const_iterator it = fixed_modifications.begin(); it != fixed_modifications.end(); ++it)
{
ModificationDefinition def(*it, true);
fixed_mods_.insert(def);
}
for (StringList::const_iterator it = variable_modifications.begin(); it != variable_modifications.end(); ++it)
{
ModificationDefinition def(*it, false);
variable_mods_.insert(def);
}
}
set<ModificationDefinition> ModificationDefinitionsSet::getModifications() const
{
set<ModificationDefinition> mods = fixed_mods_;
for (set<ModificationDefinition>::const_iterator it = variable_mods_.begin(); it != variable_mods_.end(); ++it)
{
mods.insert(*it);
}
return mods;
}
set<String> ModificationDefinitionsSet::getModificationNames() const
{
set<String> mod_names;
for (set<ModificationDefinition>::const_iterator it = variable_mods_.begin(); it != variable_mods_.end(); ++it)
{
mod_names.insert(it->getModificationName());
}
for (set<ModificationDefinition>::const_iterator it = fixed_mods_.begin(); it != fixed_mods_.end(); ++it)
{
mod_names.insert(it->getModificationName());
}
return mod_names;
}
void ModificationDefinitionsSet::getModificationNames(StringList& fixed_modifications, StringList& variable_modifications) const
{
fixed_modifications.clear();
fixed_modifications.reserve(fixed_mods_.size());
for (set<ModificationDefinition>::const_iterator it = fixed_mods_.begin(); it != fixed_mods_.end(); ++it)
{
fixed_modifications.push_back(it->getModificationName());
}
variable_modifications.clear();
variable_modifications.reserve(variable_mods_.size());
for (set<ModificationDefinition>::const_iterator it = variable_mods_.begin(); it != variable_mods_.end(); ++it)
{
variable_modifications.push_back(it->getModificationName());
}
}
const set<ModificationDefinition>& ModificationDefinitionsSet::getFixedModifications() const
{
return fixed_mods_;
}
const set<ModificationDefinition>& ModificationDefinitionsSet::getVariableModifications() const
{
return variable_mods_;
}
set<String> ModificationDefinitionsSet::getFixedModificationNames() const
{
set<String> mod_names;
for (set<ModificationDefinition>::const_iterator it = fixed_mods_.begin(); it != fixed_mods_.end(); ++it)
{
mod_names.insert(it->getModificationName());
}
return mod_names;
}
set<String> ModificationDefinitionsSet::getVariableModificationNames() const
{
set<String> mod_names;
for (set<ModificationDefinition>::const_iterator it = variable_mods_.begin(); it != variable_mods_.end(); ++it)
{
mod_names.insert(it->getModificationName());
}
return mod_names;
}
ModificationDefinitionsSet& ModificationDefinitionsSet::operator=(const ModificationDefinitionsSet& rhs)
{
if (this != &rhs)
{
variable_mods_ = rhs.variable_mods_;
fixed_mods_ = rhs.fixed_mods_;
max_mods_per_peptide_ = rhs.max_mods_per_peptide_;
}
return *this;
}
bool ModificationDefinitionsSet::isCompatible(const AASequence& peptide) const
{
set<String> var_names(getVariableModificationNames()), fixed_names(getFixedModificationNames());
// no modifications present and needed
if (fixed_names.empty() && !peptide.isModified())
{
return true;
}
// check whether the fixed modifications are fulfilled
for (set<String>::const_iterator it1 = fixed_names.begin(); it1 != fixed_names.end(); ++it1)
{
String origin = ModificationsDB::getInstance()->getModification(*it1)->getOrigin();
// only single 1lc amino acids are allowed
if (origin.size() != 1) continue;
for (AASequence::ConstIterator it2 = peptide.begin(); it2 != peptide.end(); ++it2)
{
if (origin == it2->getOneLetterCode())
{
// check whether the residue is modified (has to be)
if (!it2->isModified())
{
return false;
}
// check whether the modification is the same
if (ModificationsDB::getInstance()->getModification(*it1)->getId() != it2->getModificationName())
{
return false;
}
}
}
}
// check whether other modifications than the variable are present
for (AASequence::ConstIterator it = peptide.begin(); it != peptide.end(); ++it)
{
if (it->isModified())
{
String mod = it->getModification()->getFullId();
if (var_names.find(mod) == var_names.end() &&
fixed_names.find(mod) == fixed_names.end())
{
return false;
}
}
}
if (peptide.hasNTerminalModification())
{
String mod = peptide.getNTerminalModification()->getFullId();
if (var_names.find(mod) == var_names.end() &&
fixed_names.find(mod) == fixed_names.end())
{
return false;
}
}
if (peptide.hasCTerminalModification())
{
String mod = peptide.getCTerminalModification()->getFullId();
if (var_names.find(mod) == var_names.end() &&
fixed_names.find(mod) == fixed_names.end())
{
return false;
}
}
return true;
}
bool ModificationDefinitionsSet::operator==(const ModificationDefinitionsSet& rhs) const
{
return variable_mods_ == rhs.variable_mods_ &&
fixed_mods_ == rhs.fixed_mods_ &&
max_mods_per_peptide_ == rhs.max_mods_per_peptide_;
}
bool ModificationDefinitionsSet::operator!=(const ModificationDefinitionsSet& rhs) const
{
return !(*this == rhs);
}
void ModificationDefinitionsSet::addMatches_(multimap<double, ModificationDefinition>& matches, double mass, const String& residue, ResidueModification::TermSpecificity term_spec, const set<ModificationDefinition>& source, bool is_delta, double tolerance)
{
for (set<ModificationDefinition>::const_iterator it = source.begin();
it != source.end(); ++it)
{
const ResidueModification& mod = it->getModification();
// do the residues match?
char origin = mod.getOrigin();
if (!(residue.empty() || (origin == 'X') || (residue[0] == origin) ||
(residue == ".") || (residue == "X"))) continue;
// do the term specificities match?
if (!((term_spec == ResidueModification::NUMBER_OF_TERM_SPECIFICITY) ||
(term_spec == mod.getTermSpecificity()))) continue;
// do the masses match?
double mass_error = tolerance;
if (is_delta)
{
mass_error = fabs(mod.getDiffMonoMass() - mass);
if (mass_error > tolerance) continue;
}
else
{
double mod_mass = mod.getMonoMass();
if ((mod_mass <= 0) && !residue.empty())
{
// no absolute mass stored? - calculate it based on the residue
// (see 'ModificationsDB::getBestModificationByMonoMass'):
const Residue* res = ResidueDB::getInstance()->getResidue(residue);
if (res == nullptr) continue;
double weight = (res->getMonoWeight() -
res->getInternalToFull().getMonoWeight());
mod_mass = mod.getDiffMonoMass() + weight;
}
mass_error = fabs(mod_mass - mass);
if (mass_error > tolerance) continue;
}
matches.insert(make_pair(mass_error, *it));
}
}
void ModificationDefinitionsSet::findMatches(multimap<double, ModificationDefinition>& matches, double mass, const String& residue, ResidueModification::TermSpecificity term_spec, bool consider_fixed, bool consider_variable, bool is_delta, double tolerance) const
{
if (!consider_variable && !consider_fixed)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "No modifications to consider - set 'consider_variable' and/or 'consider_fixed' to true.");
}
matches.clear();
if (consider_fixed)
{
addMatches_(matches, mass, residue, term_spec, fixed_mods_, is_delta, tolerance);
}
if (consider_variable)
{
addMatches_(matches, mass, residue, term_spec, variable_mods_, is_delta, tolerance);
}
}
// @TODO: should this function handle "max_mods_per_peptide_" as well?
void ModificationDefinitionsSet::inferFromPeptides(const PeptideIdentificationList& peptides)
{
// amino acid (or terminus) -> set of modifications (incl. no mod. = 0):
map<String, set<const ResidueModification*> > mod_map;
for (const PeptideIdentification& pep : peptides)
{
for (const PeptideHit& hit : pep.getHits())
{
const AASequence& seq = hit.getSequence();
mod_map["N-term"].insert(seq.getNTerminalModification());
mod_map["C-term"].insert(seq.getCTerminalModification());
for (AASequence::ConstIterator seq_it = seq.begin();
seq_it != seq.end(); ++seq_it)
{
mod_map[seq_it->getOneLetterCode()].insert(seq_it->getModification());
}
}
}
fixed_mods_.clear();
variable_mods_.clear();
for (map<String, set<const ResidueModification*> >::const_iterator map_it =
mod_map.begin(); map_it != mod_map.end(); ++map_it)
{
set<const ResidueModification*>::const_iterator set_it =
map_it->second.begin();
// if there's only one mod, it's probably a fixed one:
if ((map_it->second.size() == 1) && (*set_it != 0))
{
ModificationDefinition mod_def(**set_it, true);
fixed_mods_.insert(mod_def);
}
else // variable mod(s)
{
for (; set_it != map_it->second.end(); ++set_it)
{
if (*set_it != 0)
{
ModificationDefinition mod_def(**set_it, false);
variable_mods_.insert(mod_def);
}
}
}
}
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/EmpiricalFormula.cpp | .cpp | 19,105 | 691 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow, Ahmed Khalil $
// $Authors: Andreas Bertsch, Chris Bielow $
// --------------------------------------------------------------------------
//
#include <OpenMS/CHEMISTRY/EmpiricalFormula.h>
#include <OpenMS/CHEMISTRY/Element.h>
#include <OpenMS/CHEMISTRY/ElementDB.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/IsotopePatternGenerator.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/IsotopeDistribution.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/CoarseIsotopePatternGenerator.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/MATH/MathFunctions.h>
#include <boost/math/special_functions/binomial.hpp>
#include <iostream>
#include <algorithm>
using namespace std;
namespace OpenMS
{
EmpiricalFormula::EmpiricalFormula() :
charge_(0)
{}
EmpiricalFormula::EmpiricalFormula(const String& formula)
{
charge_ = parseFormula_(formula_, formula);
}
EmpiricalFormula::EmpiricalFormula(SignedSize number, const Element* element, SignedSize charge)
{
formula_[element] = number;
charge_ = charge;
}
EmpiricalFormula::~EmpiricalFormula() = default;
double EmpiricalFormula::getMonoWeight() const
{
double weight = Constants::PROTON_MASS_U * charge_;
for (const auto& it : formula_)
{
weight += it.first->getMonoWeight() * (double)it.second;
}
return weight;
}
double EmpiricalFormula::getLightestIsotopeWeight() const
{
double weight = Constants::PROTON_MASS_U * charge_;
for (const auto& it : formula_)
{
// Isotopes should be filled sorted by mz in Elements, so we use
// the first element instead of getMin()
weight += it.first->getIsotopeDistribution()[0].getMZ() * (double)it.second;
}
return weight;
}
double EmpiricalFormula::getAverageWeight() const
{
double weight = Constants::PROTON_MASS_U * charge_;
for (const auto& it : formula_)
{
weight += it.first->getAverageWeight() * (double)it.second;
}
return weight;
}
double EmpiricalFormula::calculateTheoreticalIsotopesNumber() const
{
double total = 1;
for (const auto& element : formula_)
{
UInt non_trace_isotopes = 0;
const auto& distr = element.first->getIsotopeDistribution();
for (const auto& isotope : distr)
{
if (isotope.getIntensity() != 0)
{
non_trace_isotopes++;
}
}
if (non_trace_isotopes>1 && element.second!=1)
{
total *= boost::math::binomial_coefficient<double>(UInt(element.second), non_trace_isotopes);
}
else
{
total *= element.second*non_trace_isotopes;
}
}
return total;
}
bool EmpiricalFormula::estimateFromWeightAndCompAndS(double average_weight, UInt S, double C, double H, double N, double O, double P)
{
const ElementDB* db = ElementDB::getInstance();
double remaining_weight = average_weight - S * db->getElement("S")->getAverageWeight();
// The number of sulfurs is set to 0 because we're explicitly specifying their count.
// We propagate the return value to let the programmer know if the approximation succeeded
// without requesting a negative number of hydrogens.
bool ret = estimateFromWeightAndComp(remaining_weight, C, H, N, O, 0.0, P);
formula_.at(db->getElement("S")) = S;
return ret;
}
bool EmpiricalFormula::estimateFromWeightAndComp(double average_weight, double C, double H, double N, double O, double S, double P)
{
const ElementDB* db = ElementDB::getInstance();
double avgTotal = (C * db->getElement("C")->getAverageWeight() +
H * db->getElement("H")->getAverageWeight() +
N * db->getElement("N")->getAverageWeight() +
O * db->getElement("O")->getAverageWeight() +
S * db->getElement("S")->getAverageWeight() +
P * db->getElement("P")->getAverageWeight());
double factor = average_weight / avgTotal;
formula_.clear();
formula_.insert(make_pair(db->getElement("C"), (SignedSize) Math::round(C * factor)));
formula_.insert(make_pair(db->getElement("N"), (SignedSize) Math::round(N * factor)));
formula_.insert(make_pair(db->getElement("O"), (SignedSize) Math::round(O * factor)));
formula_.insert(make_pair(db->getElement("S"), (SignedSize) Math::round(S * factor)));
formula_.insert(make_pair(db->getElement("P"), (SignedSize) Math::round(P * factor)));
double remaining_mass = average_weight-getAverageWeight();
SignedSize adjusted_H = Math::round(remaining_mass / db->getElement("H")->getAverageWeight());
// It's possible for a very small mass to get a negative value here.
if (adjusted_H < 0)
{
// The approximation can still be useful, but we set the return flag to false to explicitly notify the programmer.
return false;
}
// Only insert hydrogens if their number is not negative.
formula_.insert(make_pair(db->getElement("H"), adjusted_H));
// The approximation had no issues.
return true;
}
bool EmpiricalFormula::estimateFromMonoWeightAndComp(double mono_weight, double C, double H, double N, double O, double S, double P)
{
const ElementDB* db = ElementDB::getInstance();
double monoTotal = (C * db->getElement("C")->getMonoWeight() +
H * db->getElement("H")->getMonoWeight() +
N * db->getElement("N")->getMonoWeight() +
O * db->getElement("O")->getMonoWeight() +
S * db->getElement("S")->getMonoWeight() +
P * db->getElement("P")->getMonoWeight());
double factor = mono_weight / monoTotal;
formula_.clear();
formula_.insert(make_pair(db->getElement("C"), (SignedSize) Math::round(C * factor)));
formula_.insert(make_pair(db->getElement("N"), (SignedSize) Math::round(N * factor)));
formula_.insert(make_pair(db->getElement("O"), (SignedSize) Math::round(O * factor)));
formula_.insert(make_pair(db->getElement("S"), (SignedSize) Math::round(S * factor)));
formula_.insert(make_pair(db->getElement("P"), (SignedSize) Math::round(P * factor)));
double remaining_mass = mono_weight-getMonoWeight();
SignedSize adjusted_H = Math::round(remaining_mass / db->getElement("H")->getMonoWeight());
// It's possible for a very small mass to get a negative value here.
if (adjusted_H < 0)
{
// The approximation can still be useful, but we set the return flag to false to explicitly notify the programmer.
return false;
}
// Only insert hydrogens if their number is not negative.
formula_.insert(make_pair(db->getElement("H"), adjusted_H));
// The approximation had no issues.
return true;
}
IsotopeDistribution EmpiricalFormula::getIsotopeDistribution(const IsotopePatternGenerator& solver) const
{
return solver.run(*this);
}
IsotopeDistribution EmpiricalFormula::getConditionalFragmentIsotopeDist(const EmpiricalFormula& precursor,
const std::set<UInt>& precursor_isotopes,
const CoarseIsotopePatternGenerator& solver) const
{
// A fragment's isotopes can only be as high as the largest isolated precursor isotope.
UInt max_depth = *std::max_element(precursor_isotopes.begin(), precursor_isotopes.end())+1;
// Treat *this as the fragment molecule
EmpiricalFormula complementary_fragment = precursor-*this;
IsotopeDistribution fragment_isotope_dist = getIsotopeDistribution(CoarseIsotopePatternGenerator(max_depth));
IsotopeDistribution comp_fragment_isotope_dist = complementary_fragment.getIsotopeDistribution(CoarseIsotopePatternGenerator(max_depth));
IsotopeDistribution result = solver.calcFragmentIsotopeDist(fragment_isotope_dist, comp_fragment_isotope_dist, precursor_isotopes, getMonoWeight());
// Renormalize to make these conditional probabilities (conditioned on the isolated precursor isotopes)
result.renormalize();
return result;
}
SignedSize EmpiricalFormula::getNumberOf(const Element* element) const
{
const auto& it = formula_.find(element);
if (it != formula_.end())
{
return it->second;
}
return 0;
}
SignedSize EmpiricalFormula::getNumberOfAtoms() const
{
SignedSize num_atoms(0);
for (const auto& it : formula_) num_atoms += it.second;
return num_atoms;
}
void EmpiricalFormula::setCharge(Int charge)
{
charge_ = charge;
}
Int EmpiricalFormula::getCharge() const
{
return charge_;
}
String EmpiricalFormula::toString() const
{
String formula;
auto formula_map = toMap();
for (const auto& it : formula_map)
{
(formula += it.first) += String(it.second);
}
return formula;
}
std::map<std::string, int> EmpiricalFormula::toMap() const
{
std::map<std::string, int> formula_map;
for (const auto & it : formula_)
{
formula_map[it.first->getSymbol()] = it.second;
}
return formula_map;
}
EmpiricalFormula EmpiricalFormula::operator*(const SignedSize& times) const
{
EmpiricalFormula ef(*this);
for (const auto& it : formula_) ef.formula_[it.first] *= times;
ef.charge_ *= times;
ef.removeZeroedElements_();
return ef;
}
EmpiricalFormula EmpiricalFormula::operator+(const EmpiricalFormula& formula) const
{
EmpiricalFormula ef;
ef.formula_ = formula.formula_;
for (const auto& it : formula_)
{
auto ef_it = ef.formula_.find(it.first);
if (ef_it != ef.formula_.end())
{
ef_it->second += it.second;
}
else
{
ef.formula_.insert(it);
}
}
ef.charge_ = charge_ + formula.charge_;
ef.removeZeroedElements_();
return ef;
}
EmpiricalFormula& EmpiricalFormula::operator+=(const EmpiricalFormula& formula)
{
for (const auto& it : formula.formula_)
{
auto f_it = formula_.find(it.first);
if (f_it != formula_.end())
{
f_it->second += it.second;
}
else
{
formula_.insert(it);
}
}
charge_ += formula.charge_;
removeZeroedElements_();
return *this;
}
EmpiricalFormula EmpiricalFormula::operator-(const EmpiricalFormula& formula) const
{
EmpiricalFormula ef(*this);
for (const auto& it : formula.formula_)
{
const Element* e = it.first;
SignedSize num = it.second;
auto ef_it = ef.formula_.find(e);
if (ef_it != ef.formula_.end())
{
ef_it->second -= num;
}
else
{
ef.formula_[e] = -num;
}
}
ef.charge_ = charge_ - formula.charge_;
ef.removeZeroedElements_();
return ef;
}
EmpiricalFormula& EmpiricalFormula::operator-=(const EmpiricalFormula& formula)
{
for (const auto& it : formula.formula_)
{
auto f_it = formula_.find(it.first);
if (f_it != formula_.end())
{
f_it->second -= it.second;
}
else
{
formula_[it.first] = -it.second;
}
}
charge_ -= formula.charge_;
removeZeroedElements_();
return *this;
}
bool EmpiricalFormula::isCharged() const
{
return charge_ != 0;
}
bool EmpiricalFormula::isEmpty() const
{
return formula_.empty();
}
bool EmpiricalFormula::hasElement(const Element* element) const
{
return formula_.find(element) != formula_.end();
}
bool EmpiricalFormula::contains(const EmpiricalFormula& ef) const
{
for (const auto& it : ef)
{
if (this->getNumberOf(it.first) < it.second)
{
return false;
}
}
return true;
}
bool EmpiricalFormula::operator==(const EmpiricalFormula& formula) const
{
return formula_ == formula.formula_ && charge_ == formula.charge_;
}
bool EmpiricalFormula::operator!=(const EmpiricalFormula& formula) const
{
return formula_ != formula.formula_ || charge_ != formula.charge_;
}
ostream& operator<<(ostream& os, const EmpiricalFormula& formula)
{
std::map<String, SignedSize> new_formula;
for (const auto& it : formula.formula_)
{
new_formula[it.first->getSymbol()] = it.second;
}
for (const auto& it : new_formula)
{
os << it.first;
if (it.second > 1) os << it.second;
}
if (formula.charge_ == 0)
{
return os;
}
if (formula.charge_ > 0)
{
if (formula.charge_ == 1)
{
os << "+";
}
else
{
os << "+" << formula.charge_;
}
}
else
{
if (formula.charge_ == -1)
{
os << "-";
}
else
{
os << "-" << formula.charge_;
}
}
return os;
}
Int EmpiricalFormula::parseFormula_(std::map<const Element*, SignedSize>& ef, const String& input_formula) const
{
Int charge{0};
String formula(input_formula);
formula.trim();
// we start with the charge part, read until the begin of the formula or a element symbol occurs
String suffix;
for (SignedSize reverse_i(formula.size() - 1); reverse_i >= 0; --reverse_i)
{
if (!isalpha(formula[reverse_i]))
{
suffix.insert(0,1, formula[reverse_i]); // pre-pend
}
else
{
break;
}
}
// determine charge
if (!suffix.empty())
{
Size i = 1;
for (; i < suffix.size(); ++i)
{
if (!isdigit(suffix[i]))
{
break;
}
}
if (i != suffix.size())
{
// we found the charge part
String charge_str;
for (Size j = i + 1; j < suffix.size(); ++j)
{
charge_str += suffix[j];
}
Int tmp_charge = 1;
if (!charge_str.empty())
{
tmp_charge = charge_str.toInt();
}
if (suffix[i] == '-')
{
charge = -1 * tmp_charge;
}
else
{
if (suffix[i] == '+')
{
charge = tmp_charge;
}
else
{
throw Exception::ParseError(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, formula, "Cannot parse charge part of formula!");
}
}
// now remove the charge part from the formula
formula.resize(formula.size() - charge_str.size() - 1);
}
}
if (suffix.size() == 1 && suffix[0] == '+')
{
charge = 1;
formula.resize(formula.size() - 1);
}
else if (suffix.size() == formula.size())
{
if (suffix.size() > 1)
{
if (suffix[0] == '-' || suffix[0] == '+')
{
charge = suffix.toInt();
return charge;
}
}
else
{
if (suffix == "-")
{
charge = -1;
return charge;
}
}
}
// split the formula
std::vector<std::string> splitter;
splitter.reserve(formula.size() / 2); // reasonable estimate for small formulae like C6H12O6
if (!formula.empty())
{
if (!isdigit(formula[0]) || formula[0] == '(')
{
bool is_isotope(false), is_symbol(false);
bool char_is_upper, is_bracket;
std::string split;
for (const auto& curr : formula)
{
char_is_upper = isupper(curr);
is_bracket = (curr == '(');
if ((char_is_upper && (!is_isotope || is_symbol)) || is_bracket)
{
if (!split.empty())
{
splitter.push_back(std::move(split));
is_isotope = false;
is_symbol = false;
}
split = curr;
}
else
{
split += curr;
}
if (is_bracket)
{
is_isotope = true;
}
if (char_is_upper)
{
is_symbol = true;
}
}
splitter.push_back(std::move(split));
}
else
{
throw Exception::ParseError(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, formula, "This formula does not begin with an element!");
}
}
// add up the elements
const ElementDB* db = ElementDB::getInstance();
for (Size i = 0; i != splitter.size(); ++i)
{
const String& split = splitter[i];
String number;
String symbol;
bool had_symbol(false);
for (SignedSize j = split.size() - 1; j >= 0; --j)
{
if (!had_symbol && (isdigit(split[j]) || split[j] == '-'))
{
number.insert(0,1, split[j]); // pre-pend
}
else
{
symbol.insert(0,1, split[j]); // pre-pend
had_symbol = true;
}
}
SignedSize num(1);
if (!number.empty())
{
num = number.toInt();
}
const Element* e = db->getElement(symbol);
if (e != nullptr)
{
if (num != 0)
{
auto it = ef.find(e);
if (it != ef.end())
{
it->second += num;
}
else
{
ef.insert(std::make_pair(e, num));
}
}
}
else
{
throw Exception::ParseError(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Unknown element '" + split + "'", "'" + symbol + "' found.");
}
}
// remove elements with 0 counts
auto it = ef.begin();
while (it != ef.end())
{
if (it->second == 0)
{
ef.erase(it++); // Note: post increment needed! Otherwise iterator is invalidated
}
else
{
++it;
}
}
return charge;
}
void EmpiricalFormula::removeZeroedElements_()
{
MapType_::iterator it = formula_.begin();
while (it != formula_.end())
{
if (it->second == 0)
{
formula_.erase(it++); // Note: post increment needed! Otherwise iterator is invalidated
}
else
{
++it;
}
}
}
bool EmpiricalFormula::operator<(const EmpiricalFormula& rhs) const
{
if (formula_.size() != rhs.formula_.size())
{
return formula_.size() < rhs.formula_.size();
}
if (charge_ != rhs.charge_)
{
return charge_ < rhs.charge_;
}
return formula_ < rhs.formula_;
}
EmpiricalFormula EmpiricalFormula::hydrogen(int n_atoms)
{
const ElementDB* db = ElementDB::getInstance();
return EmpiricalFormula(n_atoms, db->getElement(1));
}
EmpiricalFormula EmpiricalFormula::water(int n_molecules)
{
const ElementDB* db = ElementDB::getInstance();
EmpiricalFormula formula;
formula.formula_[db->getElement(1)] = n_molecules * 2; // hydrogen
formula.formula_[db->getElement(8)] = n_molecules; // oxygen
return formula;
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/RNaseDB.cpp | .cpp | 570 | 22 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Hendrik Weisser $
// --------------------------------------------------------------------------
//
#include <OpenMS/CHEMISTRY/RNaseDB.h>
using namespace std;
namespace OpenMS
{
RNaseDB::RNaseDB():
DigestionEnzymeDB<DigestionEnzymeRNA, RNaseDB>("CHEMISTRY/Enzymes_RNA.xml")
{
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/DigestionEnzyme.cpp | .cpp | 4,992 | 200 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Xiao Liang $
// $Authors: Xiao Liang $
// --------------------------------------------------------------------------
//
#include <OpenMS/CHEMISTRY/DigestionEnzyme.h>
#include <OpenMS/CONCEPT/Exception.h>
#include <iostream>
#include <utility>
using namespace std;
namespace OpenMS
{
DigestionEnzyme::DigestionEnzyme() :
name_("unknown_enzyme"),
cleavage_regex_(""),
synonyms_(),
regex_description_("")
{
}
DigestionEnzyme::DigestionEnzyme(const String& name,
const String& cleavage_regex,
const std::set<String>& synonyms,
String regex_description) :
name_(name),
cleavage_regex_(cleavage_regex),
synonyms_(synonyms),
regex_description_(std::move(regex_description))
{
}
DigestionEnzyme::DigestionEnzyme(const String& name,
String cut_before,
const String& nocut_after,
String sense,
const std::set<String>& synonyms,
String regex_description) :
name_(name),
synonyms_(synonyms),
regex_description_(std::move(regex_description))
{
//TODO check if all letters are A-Z?
if (cut_before.empty())
{
//Maybe assertion?
throw Exception::MissingInformation(
__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"No cleavage position given when trying to construct a DigestionEnzyme.");
}
else if (!cut_before.hasSuffix("X"))
{
//TODO think about this
cut_before = cut_before + "X";
}
cleavage_regex_ = "";
if (sense.toLower() == "c")
{
cleavage_regex_ += "(?<=[" + cut_before + "]";
if (!nocut_after.empty())
{
cleavage_regex_ += "(?!" + nocut_after + "])";
}
}
else if (sense.toLower() == "n")
{
if (!nocut_after.empty())
{
cleavage_regex_ += "(?<![" + nocut_after + "])";
}
cleavage_regex_ += "(?=[" + cut_before + "]";
}
else
{
throw Exception::MissingInformation(
__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"Cannot infer cleavage sense when constructing DigestionEnzyme. Has to be N or C.");
}
}
DigestionEnzyme::~DigestionEnzyme() = default;
void DigestionEnzyme::setName(const String& name)
{
name_ = name;
}
const String& DigestionEnzyme::getName() const
{
return name_;
}
void DigestionEnzyme::setSynonyms(const set<String>& synonyms)
{
synonyms_ = synonyms;
}
void DigestionEnzyme::addSynonym(const String& synonym)
{
synonyms_.insert(synonym);
}
const set<String>& DigestionEnzyme::getSynonyms() const
{
return synonyms_;
}
void DigestionEnzyme::setRegEx(const String& cleavage_regex)
{
cleavage_regex_ = cleavage_regex;
}
const String& DigestionEnzyme::getRegEx() const
{
return cleavage_regex_;
}
void DigestionEnzyme::setRegExDescription(const String& value)
{
regex_description_ = value;
}
const String& DigestionEnzyme::getRegExDescription() const
{
return regex_description_;
}
bool DigestionEnzyme::operator==(const DigestionEnzyme& enzyme) const
{
return name_ == enzyme.name_ &&
synonyms_ == enzyme.synonyms_ &&
cleavage_regex_ == enzyme.cleavage_regex_ &&
regex_description_ == enzyme.regex_description_;
}
bool DigestionEnzyme::operator==(const String& cleavage_regex) const
{
return cleavage_regex_ == cleavage_regex;
}
bool DigestionEnzyme::operator!=(const String& cleavage_regex) const
{
return cleavage_regex_ != cleavage_regex;
}
bool DigestionEnzyme::operator!=(const DigestionEnzyme& enzyme) const
{
return !(*this == enzyme);
}
bool DigestionEnzyme::operator<(const DigestionEnzyme& enzyme) const
{
return this->getName() < enzyme.getName();
}
bool DigestionEnzyme::setValueFromFile(const String& key, const String& value)
{
if (key.hasSuffix(":Name"))
{
setName(value);
return true;
}
if (key.hasSuffix(":RegEx"))
{
setRegEx(value);
return true;
}
if (key.hasSuffix(":RegExDescription"))
{
setRegExDescription(value);
return true;
}
if (key.hasSubstring(":Synonyms:"))
{
addSynonym(value);
return true;
}
return false;
}
ostream& operator<<(ostream& os, const DigestionEnzyme& enzyme)
{
os << "digestion enzyme:" << enzyme.name_ << " (cleavage: "
<< enzyme.cleavage_regex_ << " - " << enzyme.regex_description_ << ")";
return os;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/CrossLinksDB.cpp | .cpp | 11,569 | 350 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Eugen Netz $
// $Authors: Eugen Netz $
// --------------------------------------------------------------------------
//
#include <OpenMS/CHEMISTRY/CrossLinksDB.h>
#include <OpenMS/SYSTEM/File.h>
#include <fstream>
using namespace std;
namespace OpenMS
{
CrossLinksDB::CrossLinksDB():
ModificationsDB("","","") // if you clear the mods you don't need to read them in the first place! Also avoids easy memory leaks.
{
mods_.clear();
modification_names_.clear();
readFromOBOFile("CHEMISTRY/XLMOD.obo"); //TODO please comment why this is needed! Why not use the one from ModificationsDB
}
CrossLinksDB::~CrossLinksDB()
{
modification_names_.clear();
for (vector<ResidueModification*>::iterator it = mods_.begin(); it != mods_.end(); ++it)
{
delete *it;
}
}
void CrossLinksDB::readFromOBOFile(const String& filename)
{
ResidueModification mod;
// add multiple mods for multiple specificities
//Map<String, ResidueModification> all_mods;
multimap<String, ResidueModification> all_mods;
ifstream is(File::find(filename).c_str());
String line, line_wo_spaces, id;
String origin = "";
bool reading_mono_link = false;
//parse file
while (getline(is, line, '\n'))
{
line.trim();
line_wo_spaces = line;
line_wo_spaces.removeWhitespaces();
if (line.empty() || line[0] == '!') //skip empty lines and comments
{
continue;
}
if (line_wo_spaces == "[Term]") //new term
{
// if the last [Term] was a moon-link, then it does not belong in CrossLinksDB
if (!id.empty() && !reading_mono_link) //store last term
{
// split into single residues and make unique (for XL-MS, where equal specificities for both sides are possible)
vector<String> origins;
origin.split(",", origins);
std::sort(origins.begin(), origins.end());
vector<String>::iterator unique_end = unique(origins.begin(), origins.end());
origins.resize(distance(origins.begin(), unique_end));
for (vector<String>::iterator orig_it = origins.begin(); orig_it != origins.end(); ++orig_it)
{
// we don't allow modifications with ambiguity codes as origin (except "X"):
if ((orig_it->size() == 1) && (*orig_it != "B") && (*orig_it != "J") && (*orig_it != "Z"))
{
mod.setOrigin((*orig_it)[0]);
all_mods.insert(make_pair(id, mod));
}
}
if (origin.hasSubstring("ProteinN-term"))
{
mod.setTermSpecificity(ResidueModification::N_TERM);
mod.setOrigin('X');
all_mods.insert(make_pair(id, mod));
}
if (origin.hasSubstring("ProteinC-term"))
{
mod.setTermSpecificity(ResidueModification::C_TERM);
mod.setOrigin('X');
all_mods.insert(make_pair(id, mod));
}
id = "";
origin = "";
mod = ResidueModification();
}
else if (reading_mono_link) // re-initialize before reading next [Term]
{
id = "";
origin = "";
mod = ResidueModification();
reading_mono_link = false;
}
}
//new id line
else if (line_wo_spaces.hasPrefix("id:"))
{
id = line.substr(line.find(':') + 1).trim();
mod.setId(id);
mod.setPSIMODAccession(id);
}
else if (line_wo_spaces.hasPrefix("name:"))
{
String name = line.substr(line.find(':') + 1).trim();
mod.setFullName(name);
if (mod.getId().hasSubstring("XLMOD"))
{
mod.setName(name);
mod.setId(name);
mod.setFullName(name);
}
}
else if (line_wo_spaces.hasPrefix("is_a:"))
{
// TODO
}
else if (line_wo_spaces.hasPrefix("def:"))
{
line.remove('[');
line.remove(']');
line.remove(',');
vector<String> split;
line.split(' ', split);
for (Size i = 0; i != split.size(); ++i)
{
if (split[i].hasPrefix("UniMod:"))
{
// Parse UniMod identifier to int
String identifier = split[i].substr(7, split[i].size());
mod.setUniModRecordId(identifier.toInt());
}
}
}
else if (line_wo_spaces.hasPrefix("comment:"))
{
// TODO
}
else if (line_wo_spaces.hasPrefix("synonym:"))
{
vector<String> val_split;
line.split('"', val_split);
if (val_split.size() < 3)
{
throw Exception::ParseError(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, line, "missing \" characters to enclose argument!");
}
mod.addSynonym(val_split[1]);
if (line_wo_spaces.hasSubstring("PSI-MOD-label"))
{
mod.setName(val_split[1]);
}
}
else if (line_wo_spaces.hasPrefix("property_value:"))
{
String val = line_wo_spaces.substr(15, line_wo_spaces.size() - 15);
val.trim();
if (val.hasSubstring("\"none\""))
{
continue;
}
vector<String> val_split;
val.split('"', val_split);
if (val_split.size() != 3)
{
throw Exception::ParseError(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, line, "missing \" characters to enclose argument!");
}
if (val.hasPrefix("DiffAvg:"))
{
mod.setDiffAverageMass(val_split[1].toDouble());
}
else if (val.hasPrefix("DiffFormula:"))
{
vector<String> tmp_split;
line.split('"', tmp_split);
tmp_split[1].removeWhitespaces();
mod.setDiffFormula(EmpiricalFormula(tmp_split[1]));
}
else if (val.hasPrefix("DiffMono:"))
{
mod.setDiffMonoMass(val_split[1].toDouble());
}
else if (val.hasPrefix("Formula:"))
{
mod.setFormula(val_split[1]);
}
else if (val.hasPrefix("MassAvg:"))
{
mod.setAverageMass(val_split[1].toDouble());
}
else if (val.hasPrefix("MassMono:"))
{
mod.setMonoMass(val_split[1].toDouble());
}
else if (val.hasPrefix("Origin:"))
{
//mod.setOrigin(val_split[1]);
origin = val_split[1];
}
else if (val.hasPrefix("Source:"))
{
mod.setSourceClassification(val_split[1]);
}
else if (val.hasPrefix("TermSpec:"))
{
mod.setTermSpecificity(val_split[1]);
}
// XLMOD specific fields
else if (val.hasPrefix("reactionSites:"))
{
if (val_split[1] == "1")
{
reading_mono_link = true;
}
}
else if (val.hasPrefix("monoisotopicMass:"))
{
mod.setDiffMonoMass(val_split[1].toDouble());
}
else if (val.hasPrefix("specificities:"))
{
// TODO cross-linker specificities can be different for both chain sides, right now the union of both sides is used
// Input parameters of the cross-link search tool make sure, that the chemistry is not violated
origin = val_split[1];
// remove brackets
origin.remove('(');
origin.remove(')');
origin.substitute("&", ",");
}
}
}
if (!id.empty()) //store last term
{
// split into single residues and make unique (for XL-MS, where equal specificities for both sides are possible)
vector<String> origins;
origin.split(",", origins);
std::sort(origins.begin(), origins.end());
vector<String>::iterator unique_end = unique(origins.begin(), origins.end());
origins.resize(distance(origins.begin(), unique_end));
for (vector<String>::iterator orig_it = origins.begin(); orig_it != origins.end(); ++orig_it)
{
// we don't allow modifications with ambiguity codes as origin (except "X"):
if ((orig_it->size() == 1) && (*orig_it != "B") && (*orig_it != "J") && (*orig_it != "Z"))
{
mod.setOrigin((*orig_it)[0]);
all_mods.insert(make_pair(id, mod));
}
}
if (origin.hasSubstring("ProteinN-term"))
{
mod.setTermSpecificity(ResidueModification::N_TERM);
mod.setOrigin('X');
all_mods.insert(make_pair(id, mod));
}
if (origin.hasSubstring("ProteinC-term"))
{
mod.setTermSpecificity(ResidueModification::C_TERM);
mod.setOrigin('X');
all_mods.insert(make_pair(id, mod));
}
id = "";
origin = "";
mod = ResidueModification();
}
// now use the term and all synonyms to build the database
for (multimap<String, ResidueModification>::const_iterator it = all_mods.begin(); it != all_mods.end(); ++it)
{
// check whether a unimod definition already exists, then simply add synonyms to it
if (it->second.getUniModRecordId() > 0)
{
//cerr << "Found UniMod PSI-MOD mapping: " << it->second.getPSIMODAccession() << " " << it->second.getUniModAccession() << endl;
set<const ResidueModification*> mods = modification_names_[it->second.getUniModAccession()];
for (set<const ResidueModification*>::const_iterator mit = mods.begin(); mit != mods.end(); ++mit)
{
//cerr << "Adding PSIMOD accession: " << it->second.getPSIMODAccession() << " " << it->second.getUniModAccession() << endl;
modification_names_[it->second.getPSIMODAccession()].insert(*mit);
}
}
else
{
// the mod has so far not been mapped to a unimod mod
// first check whether the mod is specific
if ((it->second.getOrigin() != 'X') ||
((it->second.getTermSpecificity() != ResidueModification::ANYWHERE) &&
(it->second.getDiffMonoMass() != 0)))
{
mods_.push_back(new ResidueModification(it->second));
set<String> synonyms = it->second.getSynonyms();
synonyms.insert(it->first);
synonyms.insert(it->second.getFullName());
//synonyms.insert(it->second.getUniModAccession());
synonyms.insert(it->second.getPSIMODAccession());
// full ID is auto-generated based on (short) ID, but we want the name instead:
mods_.back()->setId(it->second.getFullName());
mods_.back()->setFullId();
mods_.back()->setId(it->second.getId());
synonyms.insert(mods_.back()->getFullId());
// now check each of the names and link it to the residue modification
for (set<String>::const_iterator nit = synonyms.begin(); nit != synonyms.end(); ++nit)
{
modification_names_[*nit].insert(mods_.back());
}
}
}
}
}
void CrossLinksDB::getAllSearchModifications(vector<String>& modifications) const
{
modifications.clear();
for (vector<ResidueModification*>::const_iterator it = mods_.begin(); it != mods_.end(); ++it)
{
if (!(*it)->getPSIMODAccession().empty())
{
modifications.push_back((*it)->getFullId());
}
}
sort(modifications.begin(), modifications.end());
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/ModifiedNASequenceGenerator.cpp | .cpp | 10,184 | 287 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Timo Sachsenberg $
// --------------------------------------------------------------------------
#include <OpenMS/CHEMISTRY/ModifiedNASequenceGenerator.h>
#include <OpenMS/CHEMISTRY/Ribonucleotide.h>
#include <OpenMS/CHEMISTRY/NASequence.h>
#include <vector>
#include <map>
#include <algorithm>
using std::vector;
using std::set;
using std::map;
namespace OpenMS
{
// static
void ModifiedNASequenceGenerator::applyFixedModifications(
const set<ConstRibonucleotidePtr>& fixed_mods,
NASequence& seq)
{
// apply modifications at chain ends
std::for_each(fixed_mods.begin(), fixed_mods.end(), [&seq] (const ConstRibonucleotidePtr& f)
{
if (f->getTermSpecificity() == Ribonucleotide::FIVE_PRIME)
{
if (!seq.hasFivePrimeMod()) { seq.setFivePrimeMod(f); }
}
else if (f->getTermSpecificity() == Ribonucleotide::THREE_PRIME)
{
if (!seq.hasThreePrimeMod()) { seq.setThreePrimeMod(f); }
}
}
);
// iterate over each nucleotide
size_t residue_index(0);
for (auto const & r : seq)
{
// skip already modified residue
if (r.isModified()) { ++residue_index; continue; }
//set fixed modifications
std::for_each(fixed_mods.begin(), fixed_mods.end(), [&seq, &residue_index, r] (ConstRibonucleotidePtr const & f)
{
// check if modification and current ribo match
const String& code = r.getCode();
if (code.size() == 1 && code[0] == f->getOrigin())
{
// replace the nucleoside with the modified version (skip 5'/3' modifications)
if (f->getTermSpecificity() == Ribonucleotide::ANYWHERE)
{
seq.set(residue_index, f);
}
}
}
);
++residue_index;
}
}
// static
void ModifiedNASequenceGenerator::applyVariableModifications(
const set<ConstRibonucleotidePtr>& var_mods,
const NASequence& seq,
size_t max_variable_mods_per_seq,
vector<NASequence>& all_modified_seqs,
bool keep_unmodified)
{
// no variable modifications specified or no variable mods allowed? no compatibility map needs to be build
if (var_mods.empty() || max_variable_mods_per_seq == 0)
{
// if unmodified seqs should be kept return the original list of digested seqs
if (keep_unmodified) { all_modified_seqs.push_back(seq); }
return;
}
// if there is at most one variable modification allowed for a seq we don't need combinatoric placement and can resort to a faster implementation
if (max_variable_mods_per_seq == 1)
{
applyAtMostOneVariableModification_(var_mods,
seq,
all_modified_seqs,
keep_unmodified);
return;
}
// keep a list of all possible modifications of this seq
vector<NASequence> modified_seqs;
// only add unmodified version if flag is set (default)
if (keep_unmodified) { modified_seqs.push_back(seq); }
//iterate over each residue and build compatibility mapping describing
//which ribonucleotide (seq index) is compatible with which modification
map<int, vector<ConstRibonucleotidePtr>> map_compatibility;
const int FIVE_PRIME_MODIFICATION_INDEX = -1;
const int THREE_PRIME_MODIFICATION_INDEX = -2;
// set terminal modifications, if any are specified
std::for_each(var_mods.begin(), var_mods.end(), [&seq, &map_compatibility, &FIVE_PRIME_MODIFICATION_INDEX, &THREE_PRIME_MODIFICATION_INDEX] (ConstRibonucleotidePtr const & v)
{
if (v->getTermSpecificity() == Ribonucleotide::FIVE_PRIME)
{
if (!seq.hasFivePrimeMod()) { map_compatibility[FIVE_PRIME_MODIFICATION_INDEX].push_back(v); }
}
else if (v->getTermSpecificity() == Ribonucleotide::THREE_PRIME)
{
if (!seq.hasThreePrimeMod()) { map_compatibility[THREE_PRIME_MODIFICATION_INDEX].push_back(v); }
}
});
size_t residue_index(0);
for (auto const & r : seq)
{
// skip already modified residues
if (r.isModified())
{
++residue_index;
continue;
}
//determine compatibility of variable modifications
std::for_each(var_mods.begin(), var_mods.end(), [&residue_index, &r, &map_compatibility](ConstRibonucleotidePtr const & v)
{
// check if modification and current ribo match
const String& code = r.getCode();
if (code.size() == 1 && code[0] == v->getOrigin())
{
if (v->getTermSpecificity() == Ribonucleotide::ANYWHERE)
{
map_compatibility[static_cast<int>(residue_index)].push_back(v);
}
}
});
++residue_index;
}
// Check if no compatible site that can be modified by variable
// modification. If so just return seqs without variable modifications.
const size_t & compatible_mod_sites = map_compatibility.size();
if (compatible_mod_sites == 0)
{
if (keep_unmodified) { all_modified_seqs.push_back(seq); }
return;
}
// generate powerset of max_variable_mods_per_seq sized subset of all compatible modification sites
size_t max_placements = std::min(max_variable_mods_per_seq, compatible_mod_sites);
for (size_t n_var_mods = 1; n_var_mods <= max_placements; ++n_var_mods)
{
// enumerate all modified seqs with n_var_mods variable modified residues
size_t zeros = std::max((size_t)0, compatible_mod_sites - n_var_mods);
vector<bool> subset_mask;
for (size_t i = 0; i != compatible_mod_sites; ++i)
{
// create mask 000011 to select last (e.g. n_var_mods = 2) two compatible sites as subset from the set of all compatible sites
if (i < zeros)
{
subset_mask.push_back(false);
}
else
{
subset_mask.push_back(true);
}
}
// generate all subsets of compatible sites {000011, ... , 101000, 110000} with current number of allowed variable modifications per seq
do
{
// create subset indices e.g.{4,12} from subset mask e.g. 1010000 corresponding to the positions in the seq sequence
vector<int> subset_indices;
map<int, vector<ConstRibonucleotidePtr>>::const_iterator mit = map_compatibility.begin();
for (size_t i = 0; i != compatible_mod_sites; ++i, ++mit)
{
if (subset_mask[i])
{
subset_indices.push_back(mit->first);
}
}
// now enumerate all modifications
recurseAndGenerateVariableModifiedSequences_(subset_indices, map_compatibility, 0, seq, modified_seqs);
}
while (next_permutation(subset_mask.begin(), subset_mask.end()));
}
// add modified version of the current seq to the list of all seqs
all_modified_seqs.insert(all_modified_seqs.end(), modified_seqs.begin(), modified_seqs.end());
}
// static
void ModifiedNASequenceGenerator::recurseAndGenerateVariableModifiedSequences_(
const vector<int>& subset_indices,
const map<int, vector<ConstRibonucleotidePtr>>& map_compatibility,
int depth,
const NASequence& current_seq,
vector<NASequence>& modified_seqs)
{
const int FIVE_PRIME_MODIFICATION_INDEX = -1;
const int THREE_PRIME_MODIFICATION_INDEX = -2;
// cout << depth << " " << subset_indices.size() << " " << current_seq.toString() << endl;
// end of recursion. Add the modified seq and return
if (depth == (int)subset_indices.size())
{
modified_seqs.push_back(current_seq);
return;
}
// get modifications compatible to residue at current seq position
const int current_index = subset_indices[depth];
auto const pos_mod_it = map_compatibility.find(current_index);
const vector<ConstRibonucleotidePtr>& mods = pos_mod_it->second; // we don't need to check for .end as entry is guaranteed to exist
for (auto const & m : mods)
{
// copy seq and apply modification
NASequence new_seq = current_seq;
if (current_index == THREE_PRIME_MODIFICATION_INDEX)
{
new_seq.setThreePrimeMod(m);
}
else if (current_index == FIVE_PRIME_MODIFICATION_INDEX)
{
new_seq.setFivePrimeMod(m);
}
else
{
new_seq.set(current_index, m);
}
// recurse with modified seq
recurseAndGenerateVariableModifiedSequences_(subset_indices, map_compatibility, depth + 1, new_seq, modified_seqs);
}
}
// static
void ModifiedNASequenceGenerator::applyAtMostOneVariableModification_(
const set<ConstRibonucleotidePtr>& var_mods,
const NASequence& seq,
vector<NASequence>& all_modified_seqs,
bool keep_unmodified)
{
if (keep_unmodified)
{
all_modified_seqs.push_back(seq);
}
// we want the same behavior as for the slower function... we would need a reverse iterator here that NASequence doesn't provide
for (NASequence::ConstIterator ribo_it = seq.cend() - 1; ribo_it != seq.cbegin() - 1; --ribo_it)
{
// skip already modified residues
if (ribo_it->isModified())
{
continue;
}
size_t residue_index = ribo_it - seq.cbegin();
// matches every variable modification to every site and return the new sequence with single modification
std::for_each(var_mods.begin(), var_mods.end(),
[ribo_it, residue_index, &all_modified_seqs, &seq](ConstRibonucleotidePtr const & v)
{
// check if modification and current ribo match
const String& code = ribo_it->getCode();
if (code.size() == 1 && code[0] == v->getOrigin())
{
NASequence new_seq = seq;
new_seq.set(residue_index, v);
all_modified_seqs.push_back(new_seq);
}
});
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/MASSDECOMPOSITION/MassDecomposition.cpp | .cpp | 5,056 | 209 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Andreas Bertsch $
// --------------------------------------------------------------------------
#include <OpenMS/CHEMISTRY/MASSDECOMPOSITION/MassDecomposition.h>
#include <OpenMS/DATASTRUCTURES/String.h>
#include <iostream>
using namespace std;
namespace OpenMS
{
MassDecomposition::MassDecomposition() :
number_of_max_aa_(0)
{
}
MassDecomposition::MassDecomposition(const String& deco) :
number_of_max_aa_(0)
{
String tmp = deco;
vector<String> split;
// some more info per line
if (deco.has('('))
{
Size pos = tmp.find('(', 0);
tmp = tmp.substr(0, pos);
tmp.trim();
}
tmp.split(' ', split);
number_of_max_aa_ = 0;
// only one aa type?
if (!split.empty())
{
for (Size i = 0; i != split.size(); ++i)
{
char aa = split[i][0];
String s = split[i];
s.erase(0, 1);
Size n = (Size)s.toInt();
if (number_of_max_aa_ < n)
{
number_of_max_aa_ = n;
}
decomp_[aa] = n;
}
}
}
MassDecomposition::MassDecomposition(const MassDecomposition& rhs) = default;
MassDecomposition& MassDecomposition::operator=(const MassDecomposition& rhs)
{
if (&rhs != this)
{
decomp_ = rhs.decomp_;
number_of_max_aa_ = rhs.number_of_max_aa_;
}
return *this;
}
MassDecomposition& MassDecomposition::operator+=(const MassDecomposition& d)
{
for (map<char, Size>::const_iterator it = d.decomp_.begin(); it != d.decomp_.end(); ++it)
{
map<char, Size>::iterator it2 = decomp_.find(it->first);
if (it2 == decomp_.end())
{
decomp_.insert(*it);
if (it->second > number_of_max_aa_)
{
number_of_max_aa_ = it->second;
}
}
else
{
it2->second += it->second;
if (it2->second > number_of_max_aa_)
{
number_of_max_aa_ = it2->second;
}
}
}
return *this;
}
bool MassDecomposition::operator<(const MassDecomposition& rhs) const
{
return decomp_ < rhs.decomp_;
}
bool MassDecomposition::operator==(const String& deco) const
{
MassDecomposition md(deco);
return decomp_ == md.decomp_ && number_of_max_aa_ == md.number_of_max_aa_;
}
String MassDecomposition::toString() const
{
String s;
for (map<char, Size>::const_iterator it = decomp_.begin(); it != decomp_.end(); ++it)
{
s += it->first + String(it->second) + String(" ");
}
return s.trim();
}
String MassDecomposition::toExpandedString() const
{
String s;
for (map<char, Size>::const_iterator it = decomp_.begin(); it != decomp_.end(); ++it)
{
s += String(it->second, it->first);
}
return s;
}
bool MassDecomposition::containsTag(const String& tag) const
{
map<char, Size> tmp;
for (String::ConstIterator it = tag.begin(); it != tag.end(); ++it)
{
char aa = *it;
map<char, Size>::const_iterator it2 = decomp_.find(aa);
if (it2 == decomp_.end())
{
return false;
}
map<char, Size>::iterator it3 = tmp.find(aa);
if (it3 != tmp.end())
{
++(it3->second);
}
else
{
tmp[aa] = 1;
}
}
// check if tag decomp_ is compatible with decomp_
for (map<char, Size>::const_iterator it = tmp.begin(); it != tmp.end(); ++it)
{
if (decomp_.find(it->first)->second < it->second)
{
return false;
}
}
return true;
}
bool MassDecomposition::compatible(const MassDecomposition& deco) const
{
for (map<char, Size>::const_iterator it = deco.decomp_.begin(); it != deco.decomp_.end(); ++it)
{
map<char, Size>::const_iterator it2 = decomp_.find(it->first);
if (it2 == decomp_.end() || decomp_.find(it->first)->second < it->second)
{
cerr << it->first << " " << it->second << endl;
return false;
}
}
return true;
}
MassDecomposition MassDecomposition::operator+(const MassDecomposition& rhs) const
{
MassDecomposition d(*this);
for (map<char, Size>::const_iterator it = rhs.decomp_.begin(); it != rhs.decomp_.end(); ++it)
{
map<char, Size>::iterator it2 = d.decomp_.find(it->first);
if (it2 == d.decomp_.end())
{
d.decomp_.insert(*it);
if (it->second > number_of_max_aa_)
{
d.number_of_max_aa_ = it->second;
}
}
else
{
d.decomp_[it->first] += it->second;
if (d.decomp_[it->first] > d.number_of_max_aa_)
{
d.number_of_max_aa_ = d.decomp_[it->first];
}
}
}
return d;
}
Size MassDecomposition::getNumberOfMaxAA() const
{
return number_of_max_aa_;
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/MASSDECOMPOSITION/MassDecompositionAlgorithm.cpp | .cpp | 7,258 | 191 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Andreas Bertsch $
// --------------------------------------------------------------------------
#include <OpenMS/CHEMISTRY/MASSDECOMPOSITION/MassDecompositionAlgorithm.h>
#include <OpenMS/CHEMISTRY/ModificationsDB.h>
#include <OpenMS/CHEMISTRY/ResidueDB.h>
#include <OpenMS/CHEMISTRY/ModificationDefinition.h>
#include <OpenMS/CHEMISTRY/ModificationDefinitionsSet.h>
#include <iostream>
#include <map>
using namespace std;
namespace OpenMS
{
MassDecompositionAlgorithm::MassDecompositionAlgorithm() :
DefaultParamHandler("MassDecompositionAlgorithm"),
alphabet_(nullptr),
decomposer_(nullptr)
{
defaults_.setValue("decomp_weights_precision", 0.01, "precision used to calculate the decompositions, this only affects cache usage!", {"advanced"});
defaults_.setValue("tolerance", 0.3, "tolerance which is allowed for the decompositions");
vector<String> all_mods;
ModificationsDB::getInstance()->getAllSearchModifications(all_mods);
defaults_.setValue("fixed_modifications", std::vector<std::string>(), "fixed modifications, specified using UniMod (www.unimod.org) terms, e.g. 'Carbamidomethyl (C)' or 'Oxidation (M)'");
defaults_.setValidStrings("fixed_modifications", ListUtils::create<std::string>(all_mods));
defaults_.setValue("variable_modifications", std::vector<std::string>(), "variable modifications, specified using UniMod (www.unimod.org) terms, e.g. 'Carbamidomethyl (C)' or 'Oxidation (M)'");
defaults_.setValidStrings("variable_modifications", ListUtils::create<std::string>(all_mods));
defaults_.setValue("residue_set", "Natural19WithoutI", "The predefined amino acid set that should be used, see doc of ResidueDB for possible residue sets", {"advanced"});
set<String> residue_sets = ResidueDB::getInstance()->getResidueSets();
vector<std::string> valid_strings;
for (set<String>::const_iterator it = residue_sets.begin(); it != residue_sets.end(); ++it)
{
valid_strings.push_back(*it);
}
defaults_.setValidStrings("residue_set", valid_strings);
defaultsToParam_();
}
MassDecompositionAlgorithm::~MassDecompositionAlgorithm()
{
delete alphabet_;
delete decomposer_;
}
void MassDecompositionAlgorithm::getDecompositions(vector<MassDecomposition> & decomps, double mass)
{
double tolerance((double) param_.getValue("tolerance"));
ims::RealMassDecomposer::decompositions_type decompositions = decomposer_->getDecompositions(mass, tolerance);
for (ims::RealMassDecomposer::decompositions_type::const_iterator pos = decompositions.begin(); pos != decompositions.end(); ++pos)
{
String d;
for (ims::IMSAlphabet::size_type i = 0; i < alphabet_->size(); ++i)
{
if ((*pos)[i] > 0)
{
d += alphabet_->getName(i) + String((*pos)[i]) + " ";
}
}
d.trim();
MassDecomposition decomp(d);
decomps.push_back(decomp);
}
return;
}
void MassDecompositionAlgorithm::updateMembers_()
{
// todo add accessor to tolerance, it is called very often in CID mode
std::map<char, double> aa_to_weight;
set<const Residue *> residues = ResidueDB::getInstance()->getResidues(String(param_.getValue("residue_set").toString()));
for (set<const Residue *>::const_iterator it = residues.begin(); it != residues.end(); ++it)
{
aa_to_weight[(*it)->getOneLetterCode()[0]] = (*it)->getMonoWeight(Residue::Internal);
}
// now handle the modifications
ModificationDefinitionsSet mod_set(ListUtils::toStringList<std::string>(param_.getValue("fixed_modifications")), ListUtils::toStringList<std::string>(param_.getValue("variable_modifications")));
const set<ModificationDefinition>& fixed_mods = mod_set.getFixedModifications();
for (set<ModificationDefinition>::const_iterator it = fixed_mods.begin(); it != fixed_mods.end(); ++it)
{
const ResidueModification& mod = it->getModification();
char aa = ' ';
if (mod.getOrigin() == 'X')
{
cerr << "MassDecompositionAlgorithm: Warning: cannot handle modification " << mod.getName() << ", because aa is ambiguous (" << mod.getOrigin() << "), ignoring modification!" << endl;
continue;
}
else
{
aa = mod.getOrigin();
}
if (mod.getMonoMass() != 0)
{
aa_to_weight[aa] = mod.getMonoMass();
}
else
{
if (mod.getDiffMonoMass() != 0)
{
aa_to_weight[aa] += mod.getDiffMonoMass();
}
else
{
cerr << "MassDecompositionAlgorithm: Warning: cannot handle modification " << mod.getName() << ", because no monoisotopic mass value was found! Ignoring modification!" << endl;
continue;
}
}
}
const StringList mod_names(ListUtils::create<String>("a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z"));
vector<String>::const_iterator actual_mod_name = mod_names.begin();
const set<ModificationDefinition>& var_mods = mod_set.getVariableModifications();
for (set<ModificationDefinition>::const_iterator it = var_mods.begin(); it != var_mods.end(); ++it)
{
ResidueModification mod = it->getModification();
//cerr << it->getModification() << " " << mod.getOrigin() << " " << mod.getId() << " " << mod.getFullId() << " " << mod.getUniModAccession() << " " << mod.getPSIMODAccession() << endl;
char aa = (*actual_mod_name)[0];
char origin_aa = ' ';
++actual_mod_name;
if (mod.getOrigin() == 'X')
{
cerr << "MassDecompositionAlgorithm: Warning: cannot handle modification " << mod.getName() << ", because aa is ambiguous (" << mod.getOrigin() << "), ignoring modification!" << endl;
continue;
}
else
{
origin_aa = mod.getOrigin();
}
if (mod.getMonoMass() != 0)
{
aa_to_weight[aa] = mod.getMonoMass();
}
else
{
if (mod.getDiffMonoMass() != 0)
{
aa_to_weight[aa] = aa_to_weight[origin_aa] + mod.getDiffMonoMass();
}
else
{
cerr << "Warning: cannot handle modification " << mod.getName() << ", because no monoisotopic mass value was found! Ignoring modification!" << endl;
continue;
}
}
}
if (alphabet_ != nullptr)
{
delete alphabet_;
}
if (decomposer_ != nullptr)
{
delete decomposer_;
}
// init mass decomposer
alphabet_ = new ims::IMSAlphabet();
for (std::map<char, double>::const_iterator it = aa_to_weight.begin(); it != aa_to_weight.end(); ++it)
{
alphabet_->push_back(String(it->first), it->second);
}
// initializes weights
ims::Weights weights(alphabet_->getMasses(), (double) param_.getValue("decomp_weights_precision"));
// optimize alphabet by dividing by gcd
weights.divideByGCD();
// decomposes real values
decomposer_ = new ims::RealMassDecomposer(weights);
return;
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/MASSDECOMPOSITION/IMS/IMSElement.cpp | .cpp | 1,705 | 55 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Anton Pervukhin <Anton.Pervukhin@CeBiTec.Uni-Bielefeld.DE> $
// --------------------------------------------------------------------------
//
#include <OpenMS/CHEMISTRY/MASSDECOMPOSITION/IMS/IMSElement.h>
#include <ostream>
namespace OpenMS::ims
{
/**
@note Value for electron mass is taken from
@link www.mcelwee.net/html/table_of_physical_constants.html
*/
const IMSElement::mass_type IMSElement::ELECTRON_MASS_IN_U = 0.00054858;
IMSElement & IMSElement::operator=(const IMSElement & element)
{
// if one doesn't assign object to itself,
// assign all object elements to the elements of the given object
if (this != &element)
{
name_ = element.name_;
sequence_ = element.sequence_;
isotopes_ = element.isotopes_;
}
return *this;
}
bool IMSElement::operator==(const IMSElement & element) const
{
return this == &element ||
(name_ == element.name_ &&
sequence_ == element.sequence_ &&
isotopes_ == element.isotopes_);
}
bool IMSElement::operator!=(const IMSElement & element) const
{
return !this->operator==(element);
}
std::ostream & operator<<(std::ostream & os, const IMSElement & element)
{
os << "name:\t" << element.getName() << "\nsequence:\t" << element.getSequence()
<< "\nisotope distribution:\n" << element.getIsotopeDistribution() << '\n';
return os;
}
} // namespace OpenMS // namespace ims
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/MASSDECOMPOSITION/IMS/IMSIsotopeDistribution.cpp | .cpp | 7,626 | 243 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Anton Pervukhin <Anton.Pervukhin@CeBiTec.Uni-Bielefeld.DE> $
// --------------------------------------------------------------------------
//
#include <OpenMS/CHEMISTRY/MASSDECOMPOSITION/IMS/IMSIsotopeDistribution.h>
#include <functional>
#include <numeric>
#include <algorithm>
#include <iostream>
#include <cmath>
namespace OpenMS::ims
{
IMSIsotopeDistribution::size_type IMSIsotopeDistribution::SIZE;
IMSIsotopeDistribution::abundance_type IMSIsotopeDistribution::ABUNDANCES_SUM_ERROR;
/**
* Constructor with single isotope. It sets isotopes consist of one entry
* with given mass and 100% abundance.
*/
/*
IsotopeDistribution::IsotopeDistribution(mass_type mass): nominalMass(0) {
peaks.push_back(peaks_container::value_type(mass, 1.0));
}
*/
IMSIsotopeDistribution & IMSIsotopeDistribution::operator=(const IMSIsotopeDistribution & distribution)
{
if (this != &distribution)
{
peaks_ = distribution.peaks_;
nominal_mass_ = distribution.nominal_mass_;
}
return *this;
}
bool IMSIsotopeDistribution::operator==(const IMSIsotopeDistribution & distribution) const
{
return this == &distribution ||
(peaks_ == distribution.peaks_ &&
nominal_mass_ == distribution.nominal_mass_);
}
bool IMSIsotopeDistribution::operator!=(const IMSIsotopeDistribution & distribution) const
{
return !this->operator==(distribution);
}
IMSIsotopeDistribution & IMSIsotopeDistribution::operator*=(const IMSIsotopeDistribution & distribution)
{
if (distribution.empty())
{
return *this;
}
if (this->empty())
{
return operator=(distribution);
}
// creates a temporary destination container to store peaks
// (abundances and masses)
peaks_container dest(SIZE);
// checks if the size of abundances and masses containers coincides with
// the static variable SIZE (meant to be set by client for distribution)
setMinimumSize_();
// creates a non-const equivalent of a const parameter - it's needed to
// get non-const iterators out of it
IMSIsotopeDistribution & non_const_distribution =
const_cast<IMSIsotopeDistribution &>(distribution);
non_const_distribution.setMinimumSize_();
// sets up different iterators for an efficient folding:
// it2_dest - iterator on a destination container,
// it2_begin, it2_end - iterators on begin and end of the second
// source container (function parameter)
// it1 - iterator on the first source container
// it2 - iterator on the second source container
peaks_iterator it2_begin = non_const_distribution.peaks_.begin();
peaks_iterator it1, it2,
it_dest = dest.begin(),
it2_end = it2_begin;
abundance_type abundances_sum, masses_mult_abundances_sum;
for (; it_dest != dest.end(); ++it_dest, ++it2_end)
{
abundances_sum = 0;
masses_mult_abundances_sum = 0;
it1 = peaks_.begin();
it2 = it2_end;
for (; it2 != it2_begin; ++it1, --it2)
{
abundances_sum += it1->abundance * it2->abundance;
masses_mult_abundances_sum +=
it1->abundance * it2->abundance * (it1->mass + it2->mass);
}
// adds last element
abundances_sum += it1->abundance * it2->abundance;
masses_mult_abundances_sum += it1->abundance * it2->abundance * (it1->mass + it2->mass);
// assigns results to containers through iterators
it_dest->abundance = abundances_sum;
it_dest->mass = (abundances_sum != 0) ?
masses_mult_abundances_sum / abundances_sum : 0;
}
nominal_mass_ += distribution.nominal_mass_;
peaks_.swap(dest);
this->normalize();
return *this;
}
/**
Folds the distribution with itself @c power times. Implements
Russian Multiplication Scheme by this reducing the number of
folding operations. For the sake of performance folding is
implemented iteratively, not recursively.
@return The distribution folded with itself @c power times.
*/
IMSIsotopeDistribution & IMSIsotopeDistribution::operator*=(unsigned int power)
{
if (power <= 1)
{
return *this;
}
// folding proceeds a following:
// - first, binary representation of power is calculated, i.e.
// power = 138 -----> binary representation = [0, 1, 0, 1, 0, 0, 0, 1]
// - then, one loops through array every time folding the copy
// of this distribution with itself into lets say this_power_two_index distribution.
// Additionally, if the current index is equal to 1, then result distribution
// is folded with the current this_power_two_index distribution.
// At the end, the result distribution is outputted.
// calculates binary representation of power
std::vector<unsigned int> binary;
while (power > 0)
{
binary.push_back(power % 2);
power >>= 1;
}
// initializes distribution which will folded iteratively up to each entry
IMSIsotopeDistribution this_power_two_index(*this);
// initializes result distribution where foldings will be collected
IMSIsotopeDistribution result;
// starts folding based on binary representation
if (binary[0])
{
result = this_power_two_index;
}
std::vector<unsigned int>::size_type index = 0;
while (++index < binary.size())
{
// folds distribution with itself iteratively
this_power_two_index *= this_power_two_index;
if (binary[index])
{
// collects distribution in the result
result *= this_power_two_index;
}
}
return operator=(result);
}
IMSIsotopeDistribution::mass_type IMSIsotopeDistribution::getAverageMass() const
{
mass_type average_mass = 0.0;
for (size_type i = 0; i < peaks_.size(); ++i)
{
average_mass += this->getMass(i) * this->getAbundance(i);
}
return average_mass;
}
IMSIsotopeDistribution::abundances_container IMSIsotopeDistribution::getAbundances() const
{
abundances_container _abundances;
for (size_type i = 0; i < size(); ++i)
{
_abundances.push_back(this->getAbundance(i));
}
return _abundances;
}
IMSIsotopeDistribution::masses_container IMSIsotopeDistribution::getMasses() const
{
masses_container _masses;
for (size_type i = 0; i < size(); ++i)
{
_masses.push_back(this->getMass(i));
}
return _masses;
}
void IMSIsotopeDistribution::normalize()
{
abundance_type sum = 0.0;
for (const_peaks_iterator cit = peaks_.begin(); cit < peaks_.end(); ++cit)
{
sum += cit->abundance;
}
if (sum > 0 && std::fabs(sum - 1) > ABUNDANCES_SUM_ERROR)
{
abundance_type scale = 1 / sum;
for (peaks_iterator it = peaks_.begin(); it < peaks_.end(); ++it)
{
it->abundance *= scale;
}
}
}
void IMSIsotopeDistribution::setMinimumSize_()
{
if (peaks_.size() < SIZE)
{
peaks_.resize(SIZE);
}
}
std::ostream & operator<<(std::ostream & os, const IMSIsotopeDistribution & distribution)
{
for (IMSIsotopeDistribution::size_type i = 0; i < distribution.size(); ++i)
{
os << distribution.getMass(i) << ' '
<< distribution.getAbundance(i) << '\n';
}
return os;
}
} // namespace OpenMS // namespace ims
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/MASSDECOMPOSITION/IMS/RealMassDecomposer.cpp | .cpp | 6,041 | 159 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Anton Pervukhin <Anton.Pervukhin@CeBiTec.Uni-Bielefeld.DE> $
// --------------------------------------------------------------------------
//
#include <OpenMS/CHEMISTRY/MASSDECOMPOSITION/IMS/RealMassDecomposer.h>
#include <iostream>
#include <memory>
namespace OpenMS::ims
{
RealMassDecomposer::RealMassDecomposer(const Weights & weights) :
weights_(weights)
{
rounding_errors_ = std::make_pair(weights.getMinRoundingError(), weights.getMaxRoundingError());
precision_ = weights.getPrecision();
decomposer_ = std::make_shared<integer_decomposer_type>(
weights);
}
RealMassDecomposer::decompositions_type RealMassDecomposer::getDecompositions(double mass, double error)
{
// defines the range of integers to be decomposed
integer_value_type start_integer_mass = static_cast<integer_value_type>(
ceil((1 + rounding_errors_.first) * (mass - error) / precision_));
integer_value_type end_integer_mass = static_cast<integer_value_type>(
floor((1 + rounding_errors_.second) * (mass + error) / precision_));
decompositions_type all_decompositions_from_range;
// loops and finds decompositions for every integer mass,
// then checks if real mass of decomposition lays in the allowed
// error interval [mass-error; mass+error]
for (integer_value_type integer_mass = start_integer_mass;
integer_mass < end_integer_mass; ++integer_mass)
{
decompositions_type decompositions =
decomposer_->getAllDecompositions(integer_mass);
for (decompositions_type::iterator pos = decompositions.begin();
pos != decompositions.end(); )
{
double parent_mass = weights_.getParentMass(*pos);
if (fabs(parent_mass - mass) > error)
{
pos = decompositions.erase(pos);
}
else
{
++pos;
}
}
all_decompositions_from_range.insert(all_decompositions_from_range.end(),
decompositions.begin(), decompositions.end());
}
return all_decompositions_from_range;
}
RealMassDecomposer::decompositions_type RealMassDecomposer::getDecompositions(double mass, double error,
const constraints_type & constraints)
{
// defines the range of integers to be decomposed
integer_value_type start_integer_mass = static_cast<integer_value_type>(
ceil((1 + rounding_errors_.first) * (mass - error) / precision_));
integer_value_type end_integer_mass = static_cast<integer_value_type>(
floor((1 + rounding_errors_.second) * (mass + error) / precision_));
decompositions_type all_decompositions_from_range;
// loops and finds decompositions for every integer mass,
// then checks if real mass of decomposition lays in the allowed
// error interval [mass-error; mass+error]
for (integer_value_type integer_mass = start_integer_mass;
integer_mass < end_integer_mass; ++integer_mass)
{
decompositions_type decompositions =
decomposer_->getAllDecompositions(integer_mass);
for (decompositions_type::iterator pos = decompositions.begin();
pos != decompositions.end(); )
{
double parent_mass = weights_.getParentMass(*pos);
if (fabs(parent_mass - mass) > error)
{
pos = decompositions.erase(pos);
}
else
{
bool to_erase = false;
if (!constraints.empty())
{
for (constraints_type::const_iterator it =
constraints.begin(); it != constraints.end(); ++it)
{
if ((*pos)[it->first] < it->second.first ||
(*pos)[it->first] > it->second.second)
{
to_erase = true;
break;
}
}
}
if (to_erase)
{
pos = decompositions.erase(pos);
}
else
{
++pos;
}
}
}
all_decompositions_from_range.insert(all_decompositions_from_range.end(),
decompositions.begin(), decompositions.end());
}
return all_decompositions_from_range;
}
RealMassDecomposer::number_of_decompositions_type RealMassDecomposer::getNumberOfDecompositions(double mass, double error)
{
// defines the range of integers to be decomposed
integer_value_type start_integer_mass = static_cast<integer_value_type>(1);
if (mass - error > 0)
{
start_integer_mass = static_cast<integer_value_type>(
ceil((1 + rounding_errors_.first) * (mass - error) / precision_));
}
integer_value_type end_integer_mass = static_cast<integer_value_type>(
floor((1 + rounding_errors_.second) * (mass + error) / precision_));
number_of_decompositions_type number_of_decompositions = static_cast<number_of_decompositions_type>(0);
// loops and finds decompositions for every integer mass,
// then checks if real mass of decomposition lays in the allowed
// error interval [mass-error; mass+error]
for (integer_value_type integer_mass = start_integer_mass;
integer_mass < end_integer_mass; ++integer_mass)
{
decompositions_type decompositions =
decomposer_->getAllDecompositions(integer_mass);
for (decompositions_type::iterator pos = decompositions.begin();
pos != decompositions.end(); ++pos)
{
double parent_mass = weights_.getParentMass(*pos);
if (fabs(parent_mass - mass) <= error)
{
++number_of_decompositions;
}
}
}
return number_of_decompositions;
}
} // namespace OpenMS // namespace ims
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/MASSDECOMPOSITION/IMS/Weights.cpp | .cpp | 4,049 | 134 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Anton Pervukhin <Anton.Pervukhin@CeBiTec.Uni-Bielefeld.DE> $
// --------------------------------------------------------------------------
//
#include <OpenMS/CONCEPT/Exception.h>
#include <OpenMS/MATH/MathFunctions.h>
#include <OpenMS/CHEMISTRY/MASSDECOMPOSITION/IMS/Weights.h>
#include <OpenMS/DATASTRUCTURES/String.h>
namespace OpenMS::ims
{
Weights & Weights::operator=(const Weights & other)
{
if (this != &other)
{
alphabet_masses_ = other.alphabet_masses_;
precision_ = other.precision_;
weights_ = other.weights_;
}
return *this;
}
void Weights::setPrecision(Weights::alphabet_mass_type precision)
{
this->precision_ = precision;
weights_.clear();
// convert alphabet masses (double) to integer masses (weights) with the given precision
for (alphabet_masses_type::size_type i = 0; i < alphabet_masses_.size(); ++i)
{
weights_.push_back(static_cast<weight_type>(floor((alphabet_masses_[i] / precision) + 0.5)));
}
}
void Weights::swap(size_type index1, size_type index2)
{
weight_type weight = weights_[index1];
weights_[index1] = weights_[index2];
weights_[index2] = weight;
alphabet_mass_type mass = alphabet_masses_[index1];
alphabet_masses_[index1] = alphabet_masses_[index2];
alphabet_masses_[index2] = mass;
}
Weights::alphabet_mass_type Weights::getParentMass(const std::vector<unsigned int> & decomposition) const
{
// checker whether the passed decomposition is applicable
if (alphabet_masses_.size() != decomposition.size())
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("The passed decomposition has the wrong size. Expected ") + String(alphabet_masses_.size()) + String(" but got ") + String(decomposition.size()) + String("."));
}
alphabet_mass_type parent_mass = 0;
for (std::vector<unsigned int>::size_type i = 0; i < decomposition.size(); ++i)
{
parent_mass += alphabet_masses_[i] * decomposition[i];
}
return parent_mass;
}
bool Weights::divideByGCD()
{
if (weights_.size() < 2)
{
return false;
}
weight_type d = Math::gcd(weights_[0], weights_[1]);
for (weights_type::size_type i = 2; i < weights_.size(); ++i)
{
d = Math::gcd(d, weights_[i]);
if (d == 1)
{
return false;
}
}
// if we're here: d != 1
precision_ *= d;
// rescales the integer weights. Don't use setPrecision() here since
// the result could be different due to rounding errors.
for (weights_type::size_type i = 0; i < weights_.size(); ++i)
{
weights_[i] /= d;
}
return true;
}
Weights::alphabet_mass_type Weights::getMinRoundingError() const
{
alphabet_mass_type min_error = 0;
for (size_type i = 0; i < weights_.size(); ++i)
{
alphabet_mass_type error = (precision_ * static_cast<alphabet_mass_type>(weights_[i]) - alphabet_masses_[i]) / alphabet_masses_[i];
if (error < 0 && error < min_error)
{
min_error = error;
}
}
return min_error;
}
Weights::alphabet_mass_type Weights::getMaxRoundingError() const
{
alphabet_mass_type max_error = 0;
for (size_type i = 0; i < weights_.size(); ++i)
{
alphabet_mass_type error = (precision_ * static_cast<alphabet_mass_type>(weights_[i]) - alphabet_masses_[i]) / alphabet_masses_[i];
if (error > 0 && error > max_error)
{
max_error = error;
}
}
return max_error;
}
std::ostream & operator<<(std::ostream & os, const Weights & weights)
{
for (Weights::size_type i = 0; i < weights.size(); ++i)
{
os << weights.getWeight(i) << std::endl;
}
return os;
}
} // namespace OpenMS // namespace ims
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/MASSDECOMPOSITION/IMS/IMSAlphabet.cpp | .cpp | 4,006 | 149 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Anton Pervukhin <Anton.Pervukhin@CeBiTec.Uni-Bielefeld.DE> $
// --------------------------------------------------------------------------
//
#include <functional>
#include <algorithm>
#include <OpenMS/DATASTRUCTURES/String.h>
#include <OpenMS/CHEMISTRY/MASSDECOMPOSITION/IMS/IMSAlphabet.h>
#include <OpenMS/CHEMISTRY/MASSDECOMPOSITION/IMS/IMSAlphabetTextParser.h>
namespace OpenMS::ims
{
const IMSAlphabet::name_type & IMSAlphabet::getName(size_type index) const
{
return getElement(index).getName();
}
IMSAlphabet::mass_type IMSAlphabet::getMass(size_type index) const
{
return getElement(index).getMass();
}
IMSAlphabet::mass_type IMSAlphabet::getMass(const name_type & name) const
{
return getElement(name).getMass();
}
bool IMSAlphabet::hasName(const name_type & name) const
{
return std::find_if(elements_.begin(), elements_.end(),
[&name](const element_type& e) { return e.getName() == name; })
!= elements_.end();
}
const IMSAlphabet::element_type & IMSAlphabet::getElement(const name_type & name) const
{
for (const IMSElement& cit : elements_)
{
if (cit.getName() == name)
{
return cit;
}
}
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, name + " was not found in IMSAlphabet!", String(name));
}
void IMSAlphabet::setElement(const name_type & name, mass_type mass, bool forced)
{
bool found = false;
for (size_type i = 0; i < elements_.size(); ++i)
{
if (name == elements_[i].getName())
{
element_type element(name, mass);
elements_[i] = element;
found = true;
break;
}
}
if (!found && forced)
{
this->push_back(name, mass);
}
}
bool IMSAlphabet::erase(const name_type & name)
{
bool found = false;
for (iterator it = elements_.begin(); it != elements_.end(); ++it)
{
if (it->getName() == name)
{
elements_.erase(it);
found = true;
break;
}
}
return found;
}
IMSAlphabet::masses_type IMSAlphabet::getMasses(size_type index) const
{
masses_type masses;
for (const IMSElement& cit : elements_)
{
masses.push_back(cit.getMass(index));
}
return masses;
}
IMSAlphabet::masses_type IMSAlphabet::getAverageMasses() const
{
masses_type masses;
for (const IMSElement& cit : elements_)
{
masses.push_back(cit.getAverageMass());
}
return masses;
}
void IMSAlphabet::sortByNames()
{
std::sort(elements_.begin(), elements_.end(),
[&](const element_type& a, const element_type& b)
{ return a.getName() < b.getName(); });
}
void IMSAlphabet::sortByValues()
{
std::sort(elements_.begin(), elements_.end(), MassSortingCriteria_());
}
void IMSAlphabet::load(const std::string & fname)
{
IMSAlphabetTextParser parser;
this->load(fname, parser);
}
void IMSAlphabet::load(const std::string & fname, IMSAlphabetParser<> & parser)
{
parser.load(fname);
this->clear();
for (const auto & pos : parser.getElements())
{
this->push_back(pos.first, pos.second);
}
this->sortByValues();
}
std::ostream & operator<<(std::ostream & os, const IMSAlphabet & alphabet)
{
for (IMSAlphabet::size_type i = 0; i < alphabet.size(); ++i)
{
os << alphabet.getElement(i) << '\n';
}
return os;
}
} // namespace OpenMS // namespace ims
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/MASSDECOMPOSITION/IMS/IMSAlphabetTextParser.cpp | .cpp | 1,378 | 43 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Anton Pervukhin <Anton.Pervukhin@CeBiTec.Uni-Bielefeld.DE> ?? $
// --------------------------------------------------------------------------
//
#include <sstream>
#include <OpenMS/CHEMISTRY/MASSDECOMPOSITION/IMS/IMSAlphabetTextParser.h>
/**
Parses the data from the stream @c is .
While loading the following is ignored:
- white space
- lines containing only white space
- lines starting with '#' (even after leading whitespace, but not after anything else)
@param[in] is The input stream to be parsed.
*/
void OpenMS::ims::IMSAlphabetTextParser::parse(std::istream & is)
{
// first make sure the store is empty
elements_.clear();
std::string line;
std::string name;
const std::string delimits(" \t"), comments("#");
double mass;
while (std::getline(is, line))
{
std::string::size_type i = line.find_first_not_of(delimits);
if (i == std::string::npos || comments.find(line[i]) != std::string::npos)
{
continue; // skip comment lines
}
std::istringstream input(line);
input >> name >> mass;
elements_.insert(std::make_pair(name, mass));
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/ISOTOPEDISTRIBUTION/IsotopeDistribution.cpp | .cpp | 7,695 | 278 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Clemens Groepl, Andreas Bertsch, Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/IsotopeDistribution.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/CONCEPT/Exception.h>
#include <OpenMS/CHEMISTRY/EmpiricalFormula.h>
#include <OpenMS/CHEMISTRY/Element.h>
#include <OpenMS/KERNEL/Peak1D.h>
#include <algorithm>
#include <cmath>
#include <cstdlib>
#include <fstream>
#include <functional>
#include <iostream>
#include <limits>
#include <numeric>
#include <tuple>
#include <utility>
using namespace std;
namespace OpenMS
{
IsotopeDistribution::IsotopeDistribution()
{
distribution_.push_back(Peak1D(0, 1));
}
IsotopeDistribution& IsotopeDistribution::operator=(const IsotopeDistribution & iso)
{
if (this != &iso)
{
distribution_ = iso.distribution_;
}
return *this;
}
void IsotopeDistribution::set(const ContainerType & distribution)
{
distribution_ = distribution;
}
void IsotopeDistribution::set(ContainerType && distribution)
{
distribution_ = std::move(distribution);
}
const IsotopeDistribution::ContainerType& IsotopeDistribution::getContainer() const
{
return distribution_;
}
Peak1D::CoordinateType IsotopeDistribution::getMax() const
{
if (distribution_.empty())
{
return 0;
}
return std::max_element(begin(), end(), MassAbundance::MZLess())->getMZ();
}
Peak1D::CoordinateType IsotopeDistribution::getMin() const
{
if (distribution_.empty())
{
return 0;
}
return std::min_element(begin(), end(), MassAbundance::MZLess())->getMZ();
}
Peak1D IsotopeDistribution::getMostAbundant() const
{
if (distribution_.empty())
{
return Peak1D(0, 1);
}
return *std::max_element(begin(), end(), MassAbundance::IntensityLess());
}
Size IsotopeDistribution::size() const
{
return distribution_.size();
}
void IsotopeDistribution::clear()
{
distribution_.clear();
}
void IsotopeDistribution::resize(UInt new_size)
{
distribution_.resize(new_size);
}
void IsotopeDistribution::trimIntensities(double cutoff)
{
distribution_.erase(
remove_if(distribution_.begin(),
distribution_.end(),
[&cutoff](const MassAbundance& sample)
{
return sample.getIntensity() < cutoff;
}), distribution_.end());
}
void IsotopeDistribution::sort_(
function<bool(const MassAbundance& p1, const MassAbundance& p2)> sorter)
{
sort(distribution_.begin(), distribution_.end(), std::move(sorter));
}
void IsotopeDistribution::sortByIntensity()
{
sort_([](const MassAbundance& p1, const MassAbundance& p2){
return p1.getIntensity() > p2.getIntensity();
});
}
void IsotopeDistribution::sortByMass()
{
sort_([](const MassAbundance& p1, const MassAbundance& p2){
return p1.getMZ() < p2.getMZ();
});
}
void IsotopeDistribution::transform_(function<void(MassAbundance&)> lambda)
{
for_each(distribution_.begin(), distribution_.end(), std::move(lambda));
}
bool IsotopeDistribution::operator==(const IsotopeDistribution & isotope_distribution) const
{
return distribution_ == isotope_distribution.distribution_;
}
bool IsotopeDistribution::operator<(const IsotopeDistribution & rhs) const
{
if (distribution_.size() != rhs.distribution_.size())
{
return distribution_.size() < rhs.distribution_.size();
}
// both vectors have same size
auto it = distribution_.begin();
auto rhs_it = rhs.distribution_.begin();
for (; it != distribution_.end(); ++it, ++rhs_it)
{
if (*it != *rhs_it)
{
const double mz = it->getMZ();
const double in = it->getIntensity();
const double rhs_mz = rhs_it->getMZ();
const double rhs_in = rhs_it->getIntensity();
return tie(mz, in) < tie(rhs_mz, rhs_in);
}
}
return false;
}
bool IsotopeDistribution::operator!=(const IsotopeDistribution & isotope_distribution) const
{
return !(isotope_distribution == *this);
}
void IsotopeDistribution::renormalize()
{
if (!distribution_.empty())
{
double sum(0);
// loop backwards as most distributions contains a lot of small values at the end
for (auto it = distribution_.rbegin(); it != distribution_.rend(); ++it)
{
sum += it->getIntensity();
}
for (Iterator it = distribution_.begin(); it != distribution_.end(); ++it)
{
it->setIntensity(it->getIntensity() / sum);
}
}
}
void IsotopeDistribution::trimRight(double cutoff)
{
auto riter = distribution_.rbegin();
// loop from right to left until an entry is larger than the cutoff
for (; riter != distribution_.rend(); ++riter)
{
if (riter->getIntensity() >= cutoff)
{
break;
}
}
// trim the container
distribution_.resize(riter.base() - distribution_.begin());
}
void IsotopeDistribution::trimLeft(double cutoff)
{
for (auto iter = distribution_.begin(); iter != distribution_.end(); ++iter)
{
if (iter->getIntensity() >= cutoff)
{
distribution_.erase(distribution_.begin(), iter);
break;
}
}
}
double IsotopeDistribution::averageMass() const
{
double prob_sum = accumulate(distribution_.begin(),
distribution_.end(),
0.0,
[](double total_prob, const Peak1D& iso)
{
return total_prob + iso.getIntensity();
});
return accumulate(distribution_.begin(), distribution_.end(), 0.0,
[&prob_sum](double average_mass, const Peak1D& iso)
{
return average_mass +
iso.getMZ() * (iso.getIntensity() / prob_sum);
});
}
void IsotopeDistribution::merge(double resolution, double min_prob)
{
// Sort by mass and trim the tails of the container
sortByMass();
trimLeft(min_prob);
trimRight(min_prob);
ContainerType raw = distribution_;
double mass_range = (raw.back().getMZ() - raw.front().getMZ());
UInt output_size = ceil(mass_range / resolution);
if (output_size > distribution_.size())
{
throw Exception::IllegalArgument(__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"New Isotope Distribution "
"has more points than the old one.");
}
distribution_.clear();
ContainerType distribution(output_size, Peak1D(0, 0));
double delta = mass_range / output_size;
for (auto& p : raw)
{
UInt index = round((p.getMZ() - raw.front().getMZ())/resolution);
if (index >= distribution.size()) {continue;}
double mass = raw.front().getMZ() + (index * delta);
distribution[index].setMZ(mass);
distribution[index].setIntensity(distribution[index].getIntensity() + p.getIntensity());
}
distribution_ = distribution;
trimIntensities(min_prob);
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/ISOTOPEDISTRIBUTION/FineIsotopePatternGenerator.cpp | .cpp | 1,874 | 52 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Rost $
// $Authors: Hannes Rost, Michał Startek, Mateusz Łącki $
// --------------------------------------------------------------------------
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/FineIsotopePatternGenerator.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/IsotopeDistribution.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/IsoSpecWrapper.h>
#include <OpenMS/CHEMISTRY/ElementDB.h>
namespace OpenMS
{
IsotopeDistribution FineIsotopePatternGenerator::run(const EmpiricalFormula& formula) const
{
if (formula.getCharge() < 0)
{
throw Exception::Precondition(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"FineIsotopePatternGenerator does not support negative charges (formula: " + formula.toString() + ").");
}
if (formula.getCharge() > 0)
{
// add hydrogen atoms to the formula to match the charge
EmpiricalFormula charged_formula = formula;
charged_formula += EmpiricalFormula(formula.getCharge(), ElementDB::getInstance()->getElement("H"));
charged_formula.setCharge(0); // reset charge, since we added H atoms to match the charge
/// note: technically, the masses are off by q*electron mass (do we care?)
return run(charged_formula);
}
if (use_total_prob_)
{
IsotopeDistribution result(IsoSpecTotalProbWrapper(formula, 1.0-stop_condition_, true).run());
result.sortByMass();
return result;
}
else
{
IsotopeDistribution result(IsoSpecThresholdWrapper(formula, stop_condition_, absolute_).run());
result.sortByMass();
return result;
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/ISOTOPEDISTRIBUTION/CoarseIsotopePatternGenerator.cpp | .cpp | 20,913 | 521 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Clemens Groepl, Andreas Bertsch, Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/CoarseIsotopePatternGenerator.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/IsotopeDistribution.h>
#include <OpenMS/CHEMISTRY/ElementDB.h>
#include <OpenMS/CHEMISTRY/EmpiricalFormula.h>
#include <OpenMS/CHEMISTRY/Element.h>
#include <include/OpenMS/CONCEPT/Constants.h>
#include <cmath>
#include <iostream>
#include <cstdlib>
#include <algorithm>
#include <limits>
#include <functional>
#include <numeric>
using namespace std;
namespace OpenMS
{
CoarseIsotopePatternGenerator::CoarseIsotopePatternGenerator(const Size max_isotope, const bool round_masses) :
IsotopePatternGenerator(),
max_isotope_(max_isotope),
round_masses_(round_masses)
{
}
CoarseIsotopePatternGenerator::~CoarseIsotopePatternGenerator() = default;
void CoarseIsotopePatternGenerator::setMaxIsotope(const Size& max_isotope)
{
max_isotope_ = max_isotope;
}
Size CoarseIsotopePatternGenerator::getMaxIsotope() const
{
return max_isotope_;
}
void CoarseIsotopePatternGenerator::setRoundMasses(const bool round_masses)
{
round_masses_ = round_masses;
}
bool CoarseIsotopePatternGenerator::getRoundMasses() const
{
return round_masses_;
}
IsotopeDistribution CoarseIsotopePatternGenerator::run(const EmpiricalFormula& formula) const
{
if (formula.getCharge() < 0)
{
throw Exception::Precondition(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"CoarseIsotopePatternGenerator does not support negative charges (formula: " + formula.toString() + ").");
}
IsotopeDistribution result;
auto it = formula.begin();
for (; it != formula.end(); ++it)
{
IsotopeDistribution tmp = it->first->getIsotopeDistribution();
result.set(convolve(result.getContainer(),
convolvePow_(tmp.getContainer(), it->second)));
}
// charged adducts are assumed to be H+, but are not part of the actual formula, yet are used in EmpiricalFormula::getMonoWeight();
auto proton_charge = ElementDB::getInstance()->getElement("H")->getIsotopeDistribution();
result.set(convolve(result.getContainer(), convolvePow_(proton_charge.getContainer(), formula.getCharge())));
// replace atomic numbers with masses.
result.set(correctMass_(result.getContainer(), formula.getLightestIsotopeWeight()));
result.renormalize();
return result;
}
IsotopeDistribution CoarseIsotopePatternGenerator::estimateFromPeptideWeight(double average_weight)
{
// Element counts are from Senko's Averagine model
return estimateFromWeightAndComp(average_weight, 4.9384, 7.7583, 1.3577, 1.4773, 0.0417, 0);
}
IsotopeDistribution CoarseIsotopePatternGenerator::estimateFromPeptideMonoWeight(double mono_weight)
{
// Element counts are from Senko's Averagine model
return estimateFromMonoWeightAndComp(mono_weight, 4.9384, 7.7583, 1.3577, 1.4773, 0.0417, 0);
}
IsotopeDistribution CoarseIsotopePatternGenerator::estimateFromPeptideWeightAndS(double average_weight, UInt S)
{
// Element counts are from Senko's Averagine model, excluding sulfur.
return estimateFromWeightAndCompAndS(average_weight, S, 4.9384, 7.7583, 1.3577, 1.4773, 0);
}
IsotopeDistribution CoarseIsotopePatternGenerator::estimateFromRNAWeight(double average_weight)
{
return estimateFromWeightAndComp(average_weight, 9.75, 12.25, 3.75, 7, 0, 1);
}
IsotopeDistribution CoarseIsotopePatternGenerator::estimateFromRNAMonoWeight(double mono_weight)
{
return estimateFromMonoWeightAndComp(mono_weight, 9.75, 12.25, 3.75, 7, 0, 1);
}
IsotopeDistribution CoarseIsotopePatternGenerator::estimateFromDNAWeight(double average_weight)
{
return estimateFromWeightAndComp(average_weight, 9.75, 12.25, 3.75, 6, 0, 1);
}
IsotopeDistribution CoarseIsotopePatternGenerator::estimateFromWeightAndComp(double average_weight, double C, double H, double N, double O, double S, double P)
{
EmpiricalFormula ef;
ef.estimateFromWeightAndComp(average_weight, C, H, N, O, S, P);
return ef.getIsotopeDistribution(*this);
}
IsotopeDistribution CoarseIsotopePatternGenerator::estimateFromMonoWeightAndComp(double mono_weight, double C, double H, double N, double O, double S, double P)
{
EmpiricalFormula ef;
ef.estimateFromMonoWeightAndComp(mono_weight, C, H, N, O, S, P);
return ef.getIsotopeDistribution(*this);
}
IsotopeDistribution CoarseIsotopePatternGenerator::estimateFromWeightAndCompAndS(double average_weight, UInt S, double C, double H, double N, double O, double P)
{
EmpiricalFormula ef;
ef.estimateFromWeightAndCompAndS(average_weight, S, C, H, N, O, P);
return ef.getIsotopeDistribution(*this);
}
IsotopeDistribution CoarseIsotopePatternGenerator::estimateForFragmentFromPeptideWeight(double average_weight_precursor, double average_weight_fragment, const std::set<UInt>& precursor_isotopes)
{
// Element counts are from Senko's Averagine model
return estimateForFragmentFromWeightAndComp(average_weight_precursor, average_weight_fragment, precursor_isotopes, 4.9384, 7.7583, 1.3577, 1.4773, 0.0417, 0);
}
IsotopeDistribution CoarseIsotopePatternGenerator::estimateForFragmentFromPeptideWeightAndS(double average_weight_precursor, UInt S_precursor, double average_weight_fragment, UInt S_fragment, const std::set<UInt>& precursor_isotopes) const
{
UInt max_depth = *std::max_element(precursor_isotopes.begin(), precursor_isotopes.end())+1;
double average_weight_comp_fragment = average_weight_precursor - average_weight_fragment;
double S_comp_fragment = S_precursor - S_fragment;
// We need the solver to return atomic numbers to be compatible with calcFragmentIsotopeDist
CoarseIsotopePatternGenerator solver(max_depth, false);
EmpiricalFormula ef_fragment;
ef_fragment.estimateFromWeightAndCompAndS(average_weight_fragment, S_fragment, 4.9384, 7.7583, 1.3577, 1.4773, 0);
IsotopeDistribution id_fragment(ef_fragment.getIsotopeDistribution(solver));
IsotopeDistribution id_comp_fragment(solver.estimateFromPeptideWeightAndS(average_weight_comp_fragment, S_comp_fragment));
IsotopeDistribution result = calcFragmentIsotopeDist(id_fragment, id_comp_fragment, precursor_isotopes, ef_fragment.getLightestIsotopeWeight());
return result;
}
// static
IsotopeDistribution CoarseIsotopePatternGenerator::approximateFromPeptideWeight(double mass, UInt num_peaks, UInt charge)
{
IsotopeDistribution result;
result.resize(num_peaks);
// lambda * mass. Lambda is the parameter for Poisson distribution. Value (1/1800) taken from Bellew et al
double factor = mass / 1800.0;
// for k=0, non-normalized value is always 1
result[0] = Peak1D(mass, 1.0f);
float curr_intensity = 1.0f;
for (UInt k = 1; k < num_peaks; ++k) // result[0] is always 1 anyway
{
curr_intensity *= factor / k; // represents (m * lambda)^k / k!
// at some point, curr_intensity will become too small for float (which is the intensity type of Peak1D)
result[k] = Peak1D(mass + (k * OpenMS::Constants::NEUTRON_MASS_U / charge),
curr_intensity != curr_intensity ? 0.0f : curr_intensity);
}
result.renormalize();
return result;
}
std::vector<double> CoarseIsotopePatternGenerator::approximateIntensities(double mass, UInt num_peaks)
{
std::vector<double> result(num_peaks, 1.0);
// lambda * mass. Lambda is the parameter of Poisson distribution. Value (1/1800) taken from Bellew et al
double factor = mass / 1800.0;
double curr_intensity = 1.0;
double sum = 1.0; // result[0] is always factor^0/1 = 1, which is the reason why we start the loop at 1
for (UInt k = 1; k < num_peaks; ++k)
{
curr_intensity *= factor / k; // represents (m * lambda)^k / k!
// at some point, curr_intensity will become too small for float (which is the intensity type of Peak1D)
result[k] = curr_intensity != curr_intensity ? 0.0: curr_intensity;
sum += result[k];
}
// normalize
for (UInt k = 0; k != result.size(); ++k)
{
result[k] /= sum;
}
return result;
}
IsotopeDistribution CoarseIsotopePatternGenerator::estimateForFragmentFromRNAWeight(double average_weight_precursor, double average_weight_fragment, const std::set<UInt>& precursor_isotopes)
{
return estimateForFragmentFromWeightAndComp(average_weight_precursor, average_weight_fragment, precursor_isotopes, 9.75, 12.25, 3.75, 7, 0, 1);
}
IsotopeDistribution CoarseIsotopePatternGenerator::estimateForFragmentFromDNAWeight(double average_weight_precursor, double average_weight_fragment, const std::set<UInt>& precursor_isotopes)
{
return estimateForFragmentFromWeightAndComp(average_weight_precursor, average_weight_fragment, precursor_isotopes, 9.75, 12.25, 3.75, 6, 0, 1);
}
IsotopeDistribution CoarseIsotopePatternGenerator::estimateForFragmentFromWeightAndComp(double average_weight_precursor, double average_weight_fragment, const std::set<UInt>& precursor_isotopes, double C, double H, double N, double O, double S, double P) const
{
UInt max_depth = *std::max_element(precursor_isotopes.begin(), precursor_isotopes.end()) + 1;
// We need the solver to return atomic numbers to be compatible with calcFragmentIsotopeDist
CoarseIsotopePatternGenerator solver(max_depth, false);
EmpiricalFormula ef_fragment;
ef_fragment.estimateFromWeightAndComp(average_weight_fragment, C, H, N, O, S, P);
IsotopeDistribution id_fragment = ef_fragment.getIsotopeDistribution(solver);
EmpiricalFormula ef_comp_frag;
ef_comp_frag.estimateFromWeightAndComp(average_weight_precursor-average_weight_fragment, C, H, N, O, S, P);
IsotopeDistribution id_comp_fragment = ef_comp_frag.getIsotopeDistribution(solver);
IsotopeDistribution result = calcFragmentIsotopeDist(id_fragment, id_comp_fragment, precursor_isotopes, ef_fragment.getLightestIsotopeWeight());
return result;
}
IsotopeDistribution CoarseIsotopePatternGenerator::calcFragmentIsotopeDist(const IsotopeDistribution& fragment_isotope_dist, const IsotopeDistribution& comp_fragment_isotope_dist, const std::set<UInt>& precursor_isotopes, const double fragment_lightest_iso_mass) const
{
IsotopeDistribution result = calcFragmentIsotopeDist_(fragment_isotope_dist.getContainer(), comp_fragment_isotope_dist.getContainer(), precursor_isotopes);
// replace atomic numbers with masses.
result.set(correctMass_(result.getContainer(), fragment_lightest_iso_mass));
return result;
}
IsotopeDistribution::ContainerType CoarseIsotopePatternGenerator::convolve(const IsotopeDistribution::ContainerType& left, const IsotopeDistribution::ContainerType& right) const
{
IsotopeDistribution::ContainerType result;
if (left.empty() || right.empty())
{
result.clear();
return result;
}
// ensure the isotope cluster has no gaps
// (e.g. from Bromine there is only Bromine-79 & Bromine-81, so we need to insert Bromine-80 with zero probability)
IsotopeDistribution::ContainerType left_l = fillGaps_(left);
IsotopeDistribution::ContainerType right_l = fillGaps_(right);
IsotopeDistribution::ContainerType::size_type r_max = left_l.size() + right_l.size() - 1;
if ((IsotopeDistribution::ContainerType::size_type)max_isotope_ != 0 && r_max > (IsotopeDistribution::ContainerType::size_type)max_isotope_)
{
r_max = (IsotopeDistribution::ContainerType::size_type)max_isotope_;
}
// pre-fill result with masses
result.resize(r_max);
for (IsotopeDistribution::ContainerType::size_type i = 0; i != r_max; ++i)
{
result[i] = Peak1D(left_l[0].getMZ() + right_l[0].getMZ() + i, 0);
}
// fill result with probabilities
// (we loop backwards because then the small products tend to come first, for better numerics)
for (SignedSize i = left_l.size() - 1; i >= 0; --i)
{
for (SignedSize j = min<SignedSize>(r_max - i, right_l.size()) - 1; j >= 0; --j)
{
Peak1D& peak = result[i + j];
Peak1D::IntensityType p_intensity = peak.getIntensity();
peak.setIntensity( p_intensity + left_l[i].getIntensity() * right_l[j].getIntensity());
}
}
return result;
}
IsotopeDistribution::ContainerType CoarseIsotopePatternGenerator::convolvePow_(const IsotopeDistribution::ContainerType& input, Size n) const
{
IsotopeDistribution::ContainerType result;
if (input.empty())
{
return result;
}
// TODO: use FFT convolve?
if (n == 1)
{
result = input; // Not needed copy
return result;
}
Size log2n = 0;
// modification by Chris to prevent infinite loop when n > 2^63
if (n > (Size(1) << (std::numeric_limits<Size>::digits - 1)))
{
log2n = std::numeric_limits<Size>::digits;
}
else
{
// find binary logarithm of n
for (; (Size(1) << log2n) < n; ++log2n)
{
}
}
IsotopeDistribution::ContainerType input_l = fillGaps_(input);
// get started
if (n & 1)
{
result = input_l;
}
else
{
result.clear();
result.push_back(IsotopeDistribution::MassAbundance(0, 1.0));
}
// to avoid taking unnecessary squares, we check the loop condition
// somewhere in the middle
IsotopeDistribution::ContainerType convolution_power = convolveSquare_(input_l);
for (Size i = 1;; ++i)
{
if (n & (Size(1) << i))
{
result = convolve(result, convolution_power);
}
// check the loop condition
if (i >= log2n)
{
break;
}
// prepare next round
convolution_power = convolveSquare_(convolution_power);
}
return result;
}
IsotopeDistribution::ContainerType CoarseIsotopePatternGenerator::convolveSquare_(const IsotopeDistribution::ContainerType& input) const
{
IsotopeDistribution::ContainerType result;
IsotopeDistribution::ContainerType::size_type r_max = 2 * input.size() - 1;
if ((IsotopeDistribution::ContainerType::size_type)max_isotope_ != 0 && (IsotopeDistribution::ContainerType::size_type)(max_isotope_ + 1) < r_max)
{
r_max = (IsotopeDistribution::ContainerType::size_type)(max_isotope_ + 1);
}
result.resize(r_max);
for (IsotopeDistribution::ContainerType::size_type i = 0; i != r_max; ++i)
{
result[i] = Peak1D(2 * input[0].getMZ() + i, 0);
}
// we loop backwards because then the small products tend to come first
// (for better numerics)
for (SignedSize i = input.size() - 1; i >= 0; --i)
{
for (SignedSize j = min<SignedSize>(r_max - i, input.size()) - 1; j >= 0; --j)
{
result[i + j].setIntensity( result[i + j].getIntensity() + input[i].getIntensity() * input[j].getIntensity());
}
}
return result;
}
IsotopeDistribution CoarseIsotopePatternGenerator::calcFragmentIsotopeDist_(const IsotopeDistribution::ContainerType& fragment_isotope_dist, const IsotopeDistribution::ContainerType& comp_fragment_isotope_dist, const std::set<UInt>& precursor_isotopes) const
{
IsotopeDistribution result;
if (fragment_isotope_dist.empty() || comp_fragment_isotope_dist.empty())
{
result.clear();
return result;
}
IsotopeDistribution::ContainerType::size_type r_max = fragment_isotope_dist.size();
if ((IsotopeDistribution::ContainerType::size_type)max_isotope_ != 0 && r_max > (IsotopeDistribution::ContainerType::size_type)max_isotope_)
{
r_max = (IsotopeDistribution::ContainerType::size_type)max_isotope_;
}
// pre-fill result with masses
result.resize(r_max);
for (IsotopeDistribution::ContainerType::size_type i = 0; i != r_max; ++i)
{
result[i] = Peak1D(fragment_isotope_dist[0].getMZ() + i, 0);
}
// Example: Let the Precursor formula be C2, and assume precursors M0, M+1, and M+2 were isolated.
// Let the fragment formula be C1, and therefore the complementary fragment formula is also C1
//
// let fi = fragment formula's isotope, pi = precursor formula's isotope, ci = complementary fragment formula's isotope
// let P(fi=x) be the probability of the formula existing as isotope x in precursor form (i.e. random sample from the universe)
//
// We want to calculate the probability the fragment will be isotope x given that we isolated precursors M0,M+1,M+2
//
// P(fi=0|pi=0 or pi=1 or pi=2) = P(fi=0) * P(pi=0 or pi=1 or pi=2|fi=0) / P(pi=0 or pi=1 or pi=2) // Bayes' theorem
// = P(fi=0) * (P(pi=0|fi=0) + P(pi=1|fi=0) + P(pi=2|fi=0)) / (P(pi=0) + P(pi=1) + P(pi=2)) // mutually exclusive events
// = P(fi=0) * (P(ci=0) + P(ci=1) + P(ci=2)) / (P(pi=0) + P(pi=1) + P(pi=2)) // The only way pi=x|fi=y, is if ci=x-y
// = P(fi=0) * (P(ci=0) + P(ci=1) + P(ci=2)) // ignore normalization for now
// ^this is the form we're calculating in the code, which is technically P(fi=0 and (pi=0 or pi=1 or pi=2)) because we didn't normalize
// = 0.9893 * (0.9893 + 0.0107 + 0)
// Note: In this example, P(ci=2)=0 because the complementary fragment is just C and cannot exist with 2 extra neutrons
//
// P(fi=1|pi=0 or pi=1 or pi=2) = P(fi=1) * P(pi=0 or pi=1 or pi=2|fi=1) / P(pi=0 or pi=1 or pi=2)
// = P(fi=1) * (P(pi=0|fi=1) + P(pi=1|fi=1) + P(pi=2|fi=1)) / (P(pi=0) + P(pi=1) + P(pi=2))
// = P(fi=1) * (P(ci=-1) + P(ci=0) + P(ci=1)) / (P(pi=0) + P(pi=1) + P(pi=2))
// Note: P(ci<0)=0
// = P(fi=1) * (P(ci=0) + P(ci=1))
// ^this is the form we're calculating in the code
// = 0.0107 * (0.9893 + 0.0107)
//
// P(fi=2|pi=0 or pi=1 or pi=2) = P(fi=2) * P(pi=0 or pi=1 or pi=2|fi=2) / P(pi=0 or pi=1 or pi=2)
// = P(fi=2) * (P(pi=0|fi=2) + P(pi=1|fi=2) + P(pi=2|fi=2)) / (P(pi=0) + P(pi=1) + P(pi=2))
// = P(fi=2) * (P(ci=-2) + P(ci=-1) + P(ci=0)) / (P(pi=0) + P(pi=1) + P(pi=2))
// = P(fi=2) * P(ci=0)
// ^this is the form we're calculating in the code
// = 0 * (0.9893)
// Note: In this example, P(fi=2)=0 because the fragment is just C and cannot exist with 2 extra neutrons.
//
// normalization is needed to get true conditional probabilities if desired.
//
for (Size i = 0; i < fragment_isotope_dist.size(); ++i)
{
for (std::set<UInt>::const_iterator precursor_itr = precursor_isotopes.begin(); precursor_itr != precursor_isotopes.end(); ++precursor_itr)
{
if (*precursor_itr >= i &&
(*precursor_itr-i) < comp_fragment_isotope_dist.size())
{
result[i].setIntensity( result[i].getIntensity() + comp_fragment_isotope_dist[*precursor_itr-i].getIntensity());
}
}
result[i].setIntensity(result[i].getIntensity() * fragment_isotope_dist[i].getIntensity());
}
return result;
}
IsotopeDistribution::ContainerType CoarseIsotopePatternGenerator::fillGaps_(const IsotopeDistribution::ContainerType& id) const
{
if (id.empty()) return id;
IsotopeDistribution::ContainerType id_gapless;
Size mass = round(id.begin()->getMZ());
Size massend = round((--id.end())->getMZ());
id_gapless.resize(massend - mass + 1);
Size mass_cnt = mass;
for (auto& peak : id_gapless)
{
peak.setMZ(mass_cnt);
mass_cnt++;
}
for (const auto& peak : id)
{
Size to_set = round(peak.getMZ());
id_gapless[to_set-mass].setIntensity(peak.getIntensity());
}
return id_gapless;
}
IsotopeDistribution::ContainerType CoarseIsotopePatternGenerator::correctMass_(const IsotopeDistribution::ContainerType& input, const double lighest_iso_weight) const
{
IsotopeDistribution::ContainerType result(input.size());
for (Size i = 0; i < input.size(); ++i)
{
// We assume that a coarse isotopic peak is mostly composed of carbon-13's
// and therefore use the mass difference between carbon-13 and carbon-12
// to determine the expected mass of a coarse isotopic peak.
double mass = lighest_iso_weight + (i * Constants::C13C12_MASSDIFF_U);
if (getRoundMasses())
{
mass = round(mass);
}
result[i] = Peak1D(mass, input[i].getIntensity() );
}
return result;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/ISOTOPEDISTRIBUTION/IsotopePatternGenerator.cpp | .cpp | 859 | 34 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Nikos Patikos $
// $Authors: Nikos Patikos $
// --------------------------------------------------------------------------
//
#include <OpenMS/CHEMISTRY/Element.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/IsotopePatternGenerator.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <cmath>
#include <fstream>
using namespace std;
namespace OpenMS
{
IsotopePatternGenerator::IsotopePatternGenerator(double probability_cutoff) :
min_prob_(probability_cutoff)
{
}
IsotopePatternGenerator::IsotopePatternGenerator() :
min_prob_(1e-15)
{
}
IsotopePatternGenerator::~IsotopePatternGenerator() = default;
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/CHEMISTRY/ISOTOPEDISTRIBUTION/IsoSpecWrapper.cpp | .cpp | 13,220 | 316 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: $
// $Authors: Hannes Rost, Michał Startek, Mateusz Łącki $
// --------------------------------------------------------------------------
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/IsoSpecWrapper.h>
#include <OpenMS/CONCEPT/Macros.h>
#include <OpenMS/CONCEPT/Exception.h>
#include <OpenMS/KERNEL/Peak1D.h>
#include <OpenMS/CHEMISTRY/Element.h>
#include <iterator>
#include <memory>
#include <string>
#include <utility>
// Override IsoSpec's use of mmap whenever it is available
#define ISOSPEC_GOT_SYSTEM_MMAN false
#define ISOSPEC_GOT_MMAN false
#define ISOSPEC_BUILDING_OPENMS true
// TODO Fix this weird way of including. Just make a library out of it,
// link to it and declare it PUBLIC such that it gets linked to dependents of OpenMS lib
// But since it is PUBLIC, you should export the library also for installation (see evergreen thirdparty)
#include "IsoSpec/allocator.cpp"
#include "IsoSpec/dirtyAllocator.cpp"
#include "IsoSpec/isoSpec++.cpp"
#include "IsoSpec/isoMath.cpp"
#include "IsoSpec/marginalTrek++.cpp"
#include "IsoSpec/operators.cpp"
#include "IsoSpec/element_tables.cpp"
#include "IsoSpec/misc.cpp"
#include "IsoSpec/fasta.cpp"
using namespace std;
using namespace IsoSpec;
namespace OpenMS
{
/// Convert an set of isotope probabiities to IsoSpec input
Iso _OMS_IsoFromParameters(const std::vector<int>& isotopeNr,
const std::vector<int>& atomCounts,
const std::vector<std::vector<double> >& isotopeMasses,
const std::vector<std::vector<double> >& isotopeProbabilities)
{
OPENMS_PRECONDITION(isotopeNr.size() == atomCounts.size(), "Vectors need to be of the same size")
OPENMS_PRECONDITION(isotopeNr.size() == isotopeMasses.size(), "Vectors need to be of the same size")
OPENMS_PRECONDITION(isotopeNr.size() == isotopeProbabilities.size(), "Vectors need to be of the same size")
// Check that all probabilities are non-zero
if (!std::all_of(std::begin(isotopeProbabilities), std::end(isotopeProbabilities), [](std::vector<double> prob){
return std::all_of(std::begin(prob), std::end(prob), [](double p){return p > 0.0;});
}))
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
std::string("All probabilities need to be larger than zero").c_str());
}
int dimNumber = isotopeNr.size();
// Convert vector of vector to double**
std::unique_ptr<const double*[]> IM(new const double*[dimNumber]);
std::unique_ptr<const double*[]> IP(new const double*[dimNumber]);
for (int i = 0; i < dimNumber; i++)
{
IM[i] = isotopeMasses[i].data();
IP[i] = isotopeProbabilities[i].data();
}
// IsoSpec will *copy* these values, so once provided we are safe to
// destroy them here on the OpenMS side
Iso ret(dimNumber, isotopeNr.data(), atomCounts.data(), IM.get(), IP.get());
return ret;
}
/// Convert an OpenMS EmpiricalFormula to the input format for IsoSpec
Iso _OMS_IsoFromEmpiricalFormula(const EmpiricalFormula& formula)
{
// Use our own isotopic tables
std::vector<int> isotopeNumbers, atomCounts;
std::vector<std::vector<double> > isotopeMasses, isotopeProbabilities;
// Iterate through all elements in the molecular formula
for (const auto& elem : formula)
{
atomCounts.push_back(elem.second);
std::vector<double> masses;
std::vector<double> probs;
for (const auto& iso : elem.first->getIsotopeDistribution())
{
if (iso.getIntensity() <= 0.0) continue; // Note: there will be a segfault if one of the intensities is zero!
masses.push_back(iso.getMZ());
probs.push_back(iso.getIntensity());
}
// For each element store how many isotopes it has and their masses/probabilities
isotopeNumbers.push_back( masses.size() );
isotopeMasses.push_back(masses);
isotopeProbabilities.push_back(probs);
}
// Store the data in a IsotopeDistribution
return _OMS_IsoFromParameters(isotopeNumbers, atomCounts, isotopeMasses, isotopeProbabilities);
}
IsoSpecThresholdGeneratorWrapper::IsoSpecThresholdGeneratorWrapper(const std::vector<int>& isotopeNr,
const std::vector<int>& atomCounts,
const std::vector<std::vector<double> >& isotopeMasses,
const std::vector<std::vector<double> >& isotopeProbabilities,
double threshold,
bool absolute) :
ITG(std::make_unique<IsoSpec::IsoThresholdGenerator>(
_OMS_IsoFromParameters(isotopeNr, atomCounts, isotopeMasses, isotopeProbabilities),
threshold,
absolute))
{};
IsoSpecThresholdGeneratorWrapper::IsoSpecThresholdGeneratorWrapper(const EmpiricalFormula& formula,
double threshold,
bool absolute) :
ITG(std::make_unique<IsoSpec::IsoThresholdGenerator>(_OMS_IsoFromEmpiricalFormula(formula), threshold, absolute))
{};
bool IsoSpecThresholdGeneratorWrapper::nextConf() { return ITG->advanceToNextConfiguration(); };
Peak1D IsoSpecThresholdGeneratorWrapper::getConf() { return Peak1D(ITG->mass(), ITG->prob()); };
double IsoSpecThresholdGeneratorWrapper::getMass() { return ITG->mass(); };
double IsoSpecThresholdGeneratorWrapper::getIntensity() { return ITG->prob(); };
double IsoSpecThresholdGeneratorWrapper::getLogIntensity() { return ITG->lprob(); };
// in this special case it needs to go in cpp file (see e.g., https://stackoverflow.com/questions/38242200/where-should-a-default-destructor-c11-style-go-header-or-cpp)
IsoSpecThresholdGeneratorWrapper::~IsoSpecThresholdGeneratorWrapper() = default;
// --------------------------------------------------------------------------------
IsoSpecTotalProbGeneratorWrapper::IsoSpecTotalProbGeneratorWrapper(const std::vector<int>& isotopeNr,
const std::vector<int>& atomCounts,
const std::vector<std::vector<double> >& isotopeMasses,
const std::vector<std::vector<double> >& isotopeProbabilities,
double total_prob_hint) :
ILG(std::make_unique<IsoSpec::IsoLayeredGenerator>(_OMS_IsoFromParameters(isotopeNr, atomCounts, isotopeMasses, isotopeProbabilities), 1024, 1024, true, total_prob_hint))
{};
IsoSpecTotalProbGeneratorWrapper::IsoSpecTotalProbGeneratorWrapper(const EmpiricalFormula& formula,
double total_prob_hint) :
ILG(std::make_unique<IsoSpec::IsoLayeredGenerator>(_OMS_IsoFromEmpiricalFormula(formula), 1024, 1024, true, total_prob_hint))
{};
IsoSpecTotalProbGeneratorWrapper::~IsoSpecTotalProbGeneratorWrapper() = default;
bool IsoSpecTotalProbGeneratorWrapper::nextConf() { return ILG->advanceToNextConfiguration(); };
Peak1D IsoSpecTotalProbGeneratorWrapper::getConf() { return Peak1D(ILG->mass(), ILG->prob()); };
double IsoSpecTotalProbGeneratorWrapper::getMass() { return ILG->mass(); };
double IsoSpecTotalProbGeneratorWrapper::getIntensity() { return ILG->prob(); };
double IsoSpecTotalProbGeneratorWrapper::getLogIntensity() { return ILG->lprob(); };
// --------------------------------------------------------------------------------
IsoSpecOrderedGeneratorWrapper::IsoSpecOrderedGeneratorWrapper(const std::vector<int>& isotopeNr,
const std::vector<int>& atomCounts,
const std::vector<std::vector<double> >& isotopeMasses,
const std::vector<std::vector<double> >& isotopeProbabilities) :
IOG(std::make_unique<IsoSpec::IsoOrderedGenerator>(_OMS_IsoFromParameters(isotopeNr, atomCounts, isotopeMasses, isotopeProbabilities)))
{};
IsoSpecOrderedGeneratorWrapper::IsoSpecOrderedGeneratorWrapper(const EmpiricalFormula& formula) :
IOG(std::make_unique<IsoSpec::IsoOrderedGenerator>(_OMS_IsoFromEmpiricalFormula(formula)))
{};
IsoSpecOrderedGeneratorWrapper::~IsoSpecOrderedGeneratorWrapper() = default; // needs to be in cpp file because of incomplete types!
bool IsoSpecOrderedGeneratorWrapper::nextConf() { return IOG->advanceToNextConfiguration(); };
Peak1D IsoSpecOrderedGeneratorWrapper::getConf() { return Peak1D(IOG->mass(), IOG->prob()); };
double IsoSpecOrderedGeneratorWrapper::getMass() { return IOG->mass(); };
double IsoSpecOrderedGeneratorWrapper::getIntensity() { return IOG->prob(); };
double IsoSpecOrderedGeneratorWrapper::getLogIntensity() { return IOG->lprob(); };
// --------------------------------------------------------------------------------
IsoSpecThresholdWrapper::IsoSpecThresholdWrapper(const std::vector<int>& isotopeNr,
const std::vector<int>& atomCounts,
const std::vector<std::vector<double> >& isotopeMasses,
const std::vector<std::vector<double> >& isotopeProbabilities,
double threshold,
bool absolute) :
ITG(std::make_unique<IsoSpec::IsoThresholdGenerator>(
_OMS_IsoFromParameters(isotopeNr, atomCounts, isotopeMasses, isotopeProbabilities),
threshold,
absolute))
{}
IsoSpecThresholdWrapper::IsoSpecThresholdWrapper(const EmpiricalFormula& formula,
double threshold,
bool absolute) :
ITG(std::make_unique<IsoSpec::IsoThresholdGenerator>(
_OMS_IsoFromEmpiricalFormula(formula),
threshold,
absolute))
{};
IsotopeDistribution IsoSpecThresholdWrapper::run()
{
std::vector<Peak1D> distribution;
distribution.reserve(ITG->count_confs());
ITG->reset();
while (ITG->advanceToNextConfiguration())
distribution.emplace_back(Peak1D(ITG->mass(), ITG->prob()));
IsotopeDistribution ID;
ID.set(std::move(distribution));
return ID;
}
IsoSpecThresholdWrapper::~IsoSpecThresholdWrapper() = default;
// --------------------------------------------------------------------------------
IsoSpecTotalProbWrapper::IsoSpecTotalProbWrapper(const std::vector<int>& isotopeNr,
const std::vector<int>& atomCounts,
const std::vector<std::vector<double> >& isotopeMasses,
const std::vector<std::vector<double> >& isotopeProbabilities,
double _total_prob,
bool _do_p_trim) :
ILG(std::make_unique<IsoSpec::IsoLayeredGenerator>(
_OMS_IsoFromParameters(isotopeNr, atomCounts, isotopeMasses, isotopeProbabilities),
1024,
1024,
true,
_total_prob)),
target_prob(_total_prob),
do_p_trim(_do_p_trim)
{};
IsoSpecTotalProbWrapper::IsoSpecTotalProbWrapper(const EmpiricalFormula& formula,
double _total_prob,
bool _do_p_trim) :
ILG(std::make_unique<IsoSpec::IsoLayeredGenerator>(_OMS_IsoFromEmpiricalFormula(formula), 1024, 1024, true, _total_prob)),
target_prob(_total_prob),
do_p_trim(_do_p_trim)
{};
IsoSpecTotalProbWrapper::~IsoSpecTotalProbWrapper() = default;
IsotopeDistribution IsoSpecTotalProbWrapper::run()
{
std::vector<Peak1D> distribution;
// There is no sensible way to precalculate the number of configurations
// in IsoLayeredGenerator
double acc_prob = 0.0;
while (acc_prob < target_prob && ILG->advanceToNextConfiguration())
{
double p = ILG->prob();
acc_prob += p;
distribution.emplace_back(Peak1D(ILG->mass(), p));
}
if (do_p_trim)
{
// the p_trim: extract the rest of the last layer, and perform quickselect
while (ILG->advanceToNextConfigurationWithinLayer())
distribution.emplace_back(Peak1D(ILG->mass(), ILG->prob()));
size_t start = 0;
size_t end = distribution.size();
double sum_to_start = 0.0;
while (start < end)
{
// Partition part
size_t pivot = start + (end-start)/2; // middle
double pprob = distribution[pivot].getIntensity();
std::swap(distribution[pivot], distribution[end-1]);
double new_csum = sum_to_start;
size_t loweridx = start;
for (size_t ii = start; ii < end-1; ii++)
if (distribution[ii].getIntensity() > pprob)
{
std::swap(distribution[ii], distribution[loweridx]);
new_csum += distribution[loweridx].getIntensity();
loweridx++;
}
std::swap(distribution[end-1], distribution[loweridx]);
// Selection part
if (new_csum < target_prob)
{
start = loweridx + 1;
sum_to_start = new_csum + distribution[loweridx].getIntensity();
}
else
end = loweridx;
}
distribution.resize(end);
}
IsotopeDistribution ID;
ID.set(std::move(distribution));
return ID;
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/FeatureFinderMultiplexAlgorithm.cpp | .cpp | 49,432 | 1,153 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Lars Nilse $
// $Authors: Lars Nilse $
// --------------------------------------------------------------------------
#include <OpenMS/FEATUREFINDER/FeatureFinderMultiplexAlgorithm.h>
#include <OpenMS/FEATUREFINDER/MultiplexDeltaMassesGenerator.h>
#include <OpenMS/FEATUREFINDER/MultiplexDeltaMasses.h>
#include <OpenMS/FEATUREFINDER/MultiplexIsotopicPeakPattern.h>
#include <OpenMS/FEATUREFINDER/MultiplexFilteredMSExperiment.h>
#include <OpenMS/FEATUREFINDER/MultiplexFilteringCentroided.h>
#include <OpenMS/FEATUREFINDER/MultiplexFilteringProfile.h>
#include <OpenMS/FEATUREFINDER/MultiplexClustering.h>
#include <OpenMS/PROCESSING/CENTROIDING/PeakPickerHiRes.h>
#include <OpenMS/ML/CLUSTERING/GridBasedCluster.h>
#include <OpenMS/ANALYSIS/OPENSWATH/PeakIntegrator.h>
#include <OpenMS/PROCESSING/MISC/SplinePackage.h>
#include <OpenMS/PROCESSING/MISC/SplineInterpolatedPeaks.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <OpenMS/ML/REGRESSION/LinearRegressionWithoutIntercept.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/KERNEL/MSChromatogram.h>
#include <OpenMS/KERNEL/ChromatogramPeak.h>
#include <OpenMS/KERNEL/SpectrumHelper.h>
#include <OpenMS/KERNEL/ConsensusMap.h>
#include <OpenMS/KERNEL/FeatureMap.h>
#include <vector>
#include <numeric>
#include <fstream>
#include <iostream>
#include <ostream>
#include <algorithm>
#include <boost/algorithm/string/split.hpp>
#include <boost/algorithm/string/replace.hpp>
#include <boost/algorithm/string/classification.hpp>
//#define DEBUG
using namespace std;
namespace OpenMS
{
FeatureFinderMultiplexAlgorithm::FeatureFinderMultiplexAlgorithm() :
DefaultParamHandler("FeatureFinderMultiplexAlgorithm")
{
// parameter section: algorithm
defaults_.setValue("algorithm:labels", "[][Lys8,Arg10]", "Labels used for labelling the samples. If the sample is unlabelled (i.e. you want to detect only single peptide features) please leave this parameter empty. [...] specifies the labels for a single sample. For example\n\n[][Lys8,Arg10] ... SILAC\n[][Lys4,Arg6][Lys8,Arg10] ... triple-SILAC\n[Dimethyl0][Dimethyl6] ... Dimethyl\n[Dimethyl0][Dimethyl4][Dimethyl8] ... triple Dimethyl\n[ICPL0][ICPL4][ICPL6][ICPL10] ... ICPL");
defaults_.setValue("algorithm:charge", "1:4", "Range of charge states in the sample, i.e. min charge : max charge.");
defaults_.setValue("algorithm:isotopes_per_peptide", "3:6", "Range of isotopes per peptide in the sample. For example 3:6, if isotopic peptide patterns in the sample consist of either three, four, five or six isotopic peaks. ", {"advanced"});
defaults_.setValue("algorithm:rt_typical", 40.0, "Typical retention time [s] over which a characteristic peptide elutes. (This is not an upper bound. Peptides that elute for longer will be reported.)");
defaults_.setMinFloat("algorithm:rt_typical", 0.0);
defaults_.setValue("algorithm:rt_band", 0.0, "The algorithm searches for characteristic isotopic peak patterns, spectrum by spectrum. For some low-intensity peptides, an important peak might be missing in one spectrum but be present in one of the neighbouring ones. The algorithm takes a bundle of neighbouring spectra with width rt_band into account. For example with rt_band = 0, all characteristic isotopic peaks have to be present in one and the same spectrum. As rt_band increases, the sensitivity of the algorithm but also the likelihood of false detections increases.");
defaults_.setMinFloat("algorithm:rt_band", 0.0);
defaults_.setValue("algorithm:rt_min", 2.0, "Lower bound for the retention time [s]. (Any peptides seen for a shorter time period are not reported.)");
defaults_.setMinFloat("algorithm:rt_min", 0.0);
defaults_.setValue("algorithm:mz_tolerance", 6.0, "m/z tolerance for search of peak patterns.");
defaults_.setMinFloat("algorithm:mz_tolerance", 0.0);
defaults_.setValue("algorithm:mz_unit", "ppm", "Unit of the 'mz_tolerance' parameter.");
defaults_.setValidStrings("algorithm:mz_unit", {"Da","ppm"});
defaults_.setValue("algorithm:intensity_cutoff", 1000.0, "Lower bound for the intensity of isotopic peaks.");
defaults_.setMinFloat("algorithm:intensity_cutoff", 0.0);
defaults_.setValue("algorithm:peptide_similarity", 0.5, "Two peptides in a multiplet are expected to have the same isotopic pattern. This parameter is a lower bound on their similarity.");
defaults_.setMinFloat("algorithm:peptide_similarity", -1.0);
defaults_.setMaxFloat("algorithm:peptide_similarity", 1.0);
defaults_.setValue("algorithm:averagine_similarity", 0.4, "The isotopic pattern of a peptide should resemble the averagine model at this m/z position. This parameter is a lower bound on similarity between measured isotopic pattern and the averagine model.");
defaults_.setMinFloat("algorithm:averagine_similarity", -1.0);
defaults_.setMaxFloat("algorithm:averagine_similarity", 1.0);
defaults_.setValue("algorithm:averagine_similarity_scaling", 0.95, "Let x denote this scaling factor, and p the averagine similarity parameter. For the detection of single peptides, the averagine parameter p is replaced by p' = p + x(1-p), i.e. x = 0 -> p' = p and x = 1 -> p' = 1. (For knock_out = true, peptide doublets and singlets are detected simultaneously. For singlets, the peptide similarity filter is irreleavant. In order to compensate for this 'missing filter', the averagine parameter p is replaced by the more restrictive p' when searching for singlets.)", {"advanced"});
defaults_.setMinFloat("algorithm:averagine_similarity_scaling", 0.0);
defaults_.setMaxFloat("algorithm:averagine_similarity_scaling", 1.0);
defaults_.setValue("algorithm:missed_cleavages", 0, "Maximum number of missed cleavages due to incomplete digestion. (Only relevant if enzymatic cutting site coincides with labelling site. For example, Arg/Lys in the case of trypsin digestion and SILAC labelling.)");
defaults_.setMinInt("algorithm:missed_cleavages", 0);
defaults_.setValue("algorithm:spectrum_type", "automatic", "Type of MS1 spectra in input mzML file. 'automatic' determines the spectrum type directly from the input mzML file.", {"advanced"});
defaults_.setValidStrings("algorithm:spectrum_type", {"profile","centroid","automatic"});
defaults_.setValue("algorithm:averagine_type","peptide","The type of averagine to use, currently RNA, DNA or peptide", {"advanced"});
defaults_.setValidStrings("algorithm:averagine_type", {"peptide","RNA","DNA"});
defaults_.setValue("algorithm:knock_out", "false", "Is it likely that knock-outs are present? (Supported for doublex, triplex and quadruplex experiments only.)", {"advanced"});
defaults_.setValidStrings("algorithm:knock_out", {"true","false"});
defaults_.setSectionDescription("algorithm", "algorithmic parameters");
// parameter section: labels
defaults_.setValue("labels:Arg6", 12.08, "description", {"advanced"});
defaults_.setSectionDescription("labels", "mass shifts for all possible labels");
MultiplexDeltaMassesGenerator generator;
const Param& p = generator.getParameters();
for (Param::ParamIterator it = p.begin(); it != p.end(); ++it)
{
String label_name = "labels:";
label_name += it->name;
defaults_.setValue(label_name, it->value, it->description, {"advanced"});
defaults_.setMinFloat(label_name, 0.0);
label_mass_shift_.insert(make_pair(it->name, it->value));
}
// parameter section: algorithm, get selected charge range
String charge_string = defaults_.getValue("algorithm:charge").toString();
charge_min_ = charge_string.prefix(':').toInt();
charge_max_ = charge_string.suffix(':').toInt();
if (charge_min_ > charge_max_)
{
swap(charge_min_, charge_max_);
}
// parameter section: algorithm, get isotopes per peptide range
String isotopes_per_peptide_string = defaults_.getValue("algorithm:isotopes_per_peptide").toString();
isotopes_per_peptide_min_ = isotopes_per_peptide_string.prefix(':').toInt();
isotopes_per_peptide_max_ = isotopes_per_peptide_string.suffix(':').toInt();
if (isotopes_per_peptide_min_ > isotopes_per_peptide_max_)
{
swap(isotopes_per_peptide_min_, isotopes_per_peptide_max_);
}
centroided_ = false;
}
/**
* @brief order of charge states
*
* 2+ 3+ 4+ 1+ 5+ 6+ ...
*
* Order charge states by the likelihood of their occurrence, i.e. we search for the most likely charge states first.
*/
static size_t orderCharge(int charge)
{
if ((1 < charge) && (charge < 5))
{
return (charge - 1);
}
else if (charge == 1)
{
return 4;
}
else
{
return charge;
}
}
/**
* @brief comparator of peak patterns
*
* The comperator determines in which order the peak patterns are searched for.
* First we check the number of mass shifts (triplets before doublets before singlets).
* Then we check the first mass shift (for example 6 Da before 12 Da i.e. misscleavage).
* Finally we check for charges (2+ before 1+, most likely first).
*
* @param[in] pattern1 first peak pattern
* @param[in] pattern2 second peak pattern
*
* @return true if pattern1 should be searched before pattern2
*/
static bool lessPattern(const MultiplexIsotopicPeakPattern& pattern1, const MultiplexIsotopicPeakPattern& pattern2)
{
if (pattern1.getMassShiftCount() == pattern2.getMassShiftCount())
{
// The first mass shift is by definition always zero.
if ((pattern1.getMassShiftCount() > 1) && (pattern2.getMassShiftCount() > 1))
{
if (pattern1.getMassShiftAt(1) == pattern2.getMassShiftAt(1))
{
// 2+ before 3+ before 4+ before 1+ before 5+ before 6+ etc.
return orderCharge(pattern1.getCharge()) < orderCharge(pattern2.getCharge());
}
else
{
return pattern1.getMassShiftAt(1) < pattern2.getMassShiftAt(1);
}
}
else
{
// 2+ before 3+ before 4+ before 1+ before 5+ before 6+ etc.
return orderCharge(pattern1.getCharge()) < orderCharge(pattern2.getCharge());
}
}
else
{
// triplets before doublets before singlets
return pattern1.getMassShiftCount() > pattern2.getMassShiftCount();
}
}
std::vector<MultiplexIsotopicPeakPattern> FeatureFinderMultiplexAlgorithm::generatePeakPatterns_(int charge_min, int charge_max, int peaks_per_peptide_max, const std::vector<MultiplexDeltaMasses>& mass_pattern_list)
{
std::vector<MultiplexIsotopicPeakPattern> list;
// iterate over all charge states
for (int c = charge_max; c >= charge_min; --c)
{
// iterate over all mass shifts
for (unsigned i = 0; i < mass_pattern_list.size(); ++i)
{
MultiplexIsotopicPeakPattern pattern(c, peaks_per_peptide_max, mass_pattern_list[i], i);
list.push_back(pattern);
}
}
sort(list.begin(), list.end(), lessPattern);
return list;
}
void FeatureFinderMultiplexAlgorithm::correctPeptideIntensities_(const MultiplexIsotopicPeakPattern& pattern, std::map<size_t, SplinePackage>& spline_chromatograms, const std::vector<double>& rt_peptide, std::vector<double>& intensity_peptide) const
{
// determine ratios through linear regression
// (In most labelled mass spectrometry experiments, the fold change i.e. ratio and not the individual peptide intensities
// are of primary interest. For that reason, we determine the ratios from interpolated chromatogram data points directly,
// and then correct the current ones.)
std::vector<double> ratios; // light/light, medium/light, heavy/light etc.
ratios.push_back(1.0);
// loop over peptides
for (size_t peptide = 1; peptide < pattern.getMassShiftCount(); ++peptide)
{
std::vector<double> intensities1;
std::vector<double> intensities2;
// loop over isotopes
for (size_t isotope = 0; isotope < isotopes_per_peptide_max_; ++isotope)
{
// find splines for the mass traces of the lightest and other peptide
size_t idx_1 = isotope;
size_t idx_2 = peptide * isotopes_per_peptide_max_ + isotope;
if ((spline_chromatograms.find(idx_1) == spline_chromatograms.end()) || (spline_chromatograms.find(idx_2) == spline_chromatograms.end()))
{
continue;
}
std::map<size_t, SplinePackage>::iterator it1 = spline_chromatograms.find(idx_1);
std::map<size_t, SplinePackage>::iterator it2 = spline_chromatograms.find(idx_2);
double rt_min = std::min(it1->second.getPosMin(), it2->second.getPosMin());
double rt_max = std::max(it1->second.getPosMax(), it2->second.getPosMax());
double rt_step_width = 0.7 * std::min(it1->second.getPosStepWidth(), it2->second.getPosStepWidth());
for (double rt = rt_min; rt < rt_max; rt += rt_step_width)
{
double intensity1 = it1->second.eval(rt);
double intensity2 = it2->second.eval(rt + rt_peptide[peptide] - rt_peptide[0]); // Take RT shifts between peptide into account to find corresponding intensities.
// Use only if we land within both chromatograms i.e. non-zero intensities.
if ((intensity1 > 0) && (intensity2 > 0))
{
intensities1.push_back(intensity1);
intensities2.push_back(intensity2);
}
}
}
// We require at least five data points for a reliable linear interpolation.
if (intensities1.size() > 5)
{
OpenMS::Math::LinearRegressionWithoutIntercept linreg;
linreg.addData(intensities1, intensities2);
ratios.push_back(linreg.getSlope());
}
else
{
ratios.push_back(-1.0);
}
}
// correction for doublets
if ((pattern.getMassShiftCount() == 2) && (ratios[1] > 0))
{
double intensity0 = (intensity_peptide[0] + ratios[1] * intensity_peptide[1]) / (1 + ratios[1] * ratios[1]);
double intensity1 = ratios[1] * intensity0;
intensity_peptide[0] = intensity0;
intensity_peptide[1] = intensity1;
}
// correction for triplets or higher multiplets
else if ((pattern.getMassShiftCount() > 2))
{
for (size_t peptide = 1; peptide < pattern.getMassShiftCount(); ++peptide)
{
if (ratios[peptide] > 0)
{
intensity_peptide[peptide] = ratios[peptide] * intensity_peptide[0];
}
}
}
}
std::vector<double> FeatureFinderMultiplexAlgorithm::determinePeptideIntensitiesCentroided_(const MultiplexIsotopicPeakPattern& pattern, const std::multimap<size_t, MultiplexSatelliteCentroided >& satellites)
{
// determine peptide intensities and RT shift between the peptides
// i.e. first determine the RT centre of mass for each peptide
std::vector<double> rt_peptide;
std::vector<double> intensity_peptide;
std::map<size_t, SplinePackage> spline_chromatograms;
PeakIntegrator pi;
Param param = pi.getDefaults();
param.setValue("integration_type","trapezoid"); // intensity_sum, simpson, trapezoid (Note that 'simpson' may lead to negative area-under-the-curve.)
pi.setParameters(param);
// loop over peptides
for (size_t peptide = 0; peptide < pattern.getMassShiftCount(); ++peptide)
{
// coordinates of the peptide feature
// RT is the intensity-average of all satellites peaks of all (!) mass traces
double rt(0);
double rt_min(0);
double rt_max(0);
double intensity_sum_simple(0); // for intensity-averaged rt
double intensity_sum(0);
// loop over isotopes i.e. mass traces of the peptide
for (size_t isotope = 0; isotope < isotopes_per_peptide_max_; ++isotope)
{
// find satellites for this isotope i.e. mass trace
size_t idx = peptide * isotopes_per_peptide_max_ + isotope;
std::pair<std::multimap<size_t, MultiplexSatelliteCentroided >::const_iterator, std::multimap<size_t, MultiplexSatelliteCentroided >::const_iterator> satellites_isotope;
satellites_isotope = satellites.equal_range(idx);
MSChromatogram chromatogram;
// loop over satellites for this isotope i.e. mass trace
for (std::multimap<size_t, MultiplexSatelliteCentroided >::const_iterator satellite_it = satellites_isotope.first; satellite_it != satellites_isotope.second; ++satellite_it)
{
// find indices of the peak
size_t rt_idx = (satellite_it->second).getRTidx();
size_t mz_idx = (satellite_it->second).getMZidx();
// find peak itself
MSExperiment::ConstIterator it_rt = exp_centroid_.begin();
std::advance(it_rt, rt_idx);
MSSpectrum::ConstIterator it_mz = it_rt->begin();
std::advance(it_mz, mz_idx);
double rt_temp = it_rt->getRT();
double intensity_temp = it_mz->getIntensity();
if ((peptide + isotope == 0) || (rt_temp < rt_min))
{
rt_min = rt_temp;
}
if ((peptide + isotope == 0) || (rt_temp > rt_max))
{
rt_max = rt_temp;
}
rt += rt_temp * intensity_temp;
intensity_sum_simple += intensity_temp;
chromatogram.push_back(ChromatogramPeak(rt_temp, intensity_temp));
}
chromatogram.sortByPosition();
// construct spline interpolations for later use
// Reliable spline interpolation only for 5 or more data points in chromatogram.
if (chromatogram.size() > 5)
{
std::vector<double> rt;
std::vector<double> intensity;
for (const auto &it : chromatogram)
{
rt.push_back(it.getRT());
intensity.push_back(it.getIntensity());
}
spline_chromatograms.insert(std::make_pair(idx, SplinePackage(rt, intensity)));
}
if (chromatogram.size() > 1)
{
double rt_start = chromatogram.begin()->getPos();
double rt_end = chromatogram.back().getPos();
PeakIntegrator::PeakArea pa = pi.integratePeak(chromatogram, rt_start, rt_end);
intensity_sum += pa.area;
}
else if (chromatogram.size() == 1)
{
intensity_sum += chromatogram.begin()->getIntensity();
}
}
rt /= intensity_sum_simple;
rt_peptide.push_back(rt);
if (intensity_sum == 0 || (rt_max - rt_min < static_cast<double>(param_.getValue("algorithm:rt_min"))))
{
intensity_sum = -1.0;
}
intensity_peptide.push_back(intensity_sum);
}
// If any of the peptide intensities could not be determined (i.e. -1) then there is no need for further corrections.
if (std::find(intensity_peptide.begin(), intensity_peptide.end(), -1.0) != intensity_peptide.end())
{
return intensity_peptide;
}
// If the pattern searched for peptide singlets, then there are no further corrections possible.
if (pattern.getMassShiftCount() < 2)
{
return intensity_peptide;
}
correctPeptideIntensities_(pattern, spline_chromatograms, rt_peptide, intensity_peptide);
return intensity_peptide;
}
std::vector<double> FeatureFinderMultiplexAlgorithm::determinePeptideIntensitiesProfile_(const MultiplexIsotopicPeakPattern& pattern, const std::multimap<size_t, MultiplexSatelliteProfile >& satellites)
{
// determine peptide intensities and RT shift between the peptides
// i.e. first determine the RT centre of mass for each peptide
std::vector<double> rt_peptide;
std::vector<double> intensity_peptide;
std::map<size_t, SplinePackage> spline_chromatograms;
PeakIntegrator pi;
Param param = pi.getDefaults();
param.setValue("integration_type","trapezoid"); // intensity_sum, simpson, trapezoid (Note that 'simpson' may lead to negative area-under-the-curve.)
pi.setParameters(param);
// loop over peptides
for (size_t peptide = 0; peptide < pattern.getMassShiftCount(); ++peptide)
{
// coordinates of the peptide feature
// RT is the intensity-average of all satellites peaks of all (!) mass traces
double rt(0);
double rt_min(0);
double rt_max(0);
double intensity_sum_simple(0); // for intensity-averaged rt
double intensity_sum(0);
// loop over isotopes i.e. mass traces of the peptide
for (size_t isotope = 0; isotope < isotopes_per_peptide_max_; ++isotope)
{
// find satellites for this isotope i.e. mass trace
size_t idx = peptide * isotopes_per_peptide_max_ + isotope;
std::pair<std::multimap<size_t, MultiplexSatelliteProfile >::const_iterator, std::multimap<size_t, MultiplexSatelliteProfile >::const_iterator> satellites_isotope;
satellites_isotope = satellites.equal_range(idx);
MSChromatogram chromatogram;
// loop over satellites for this isotope i.e. mass trace
for (std::multimap<size_t, MultiplexSatelliteProfile >::const_iterator satellite_it = satellites_isotope.first; satellite_it != satellites_isotope.second; ++satellite_it)
{
double rt_temp = (satellite_it->second).getRT();
double intensity_temp = (satellite_it->second).getIntensity();
if ((peptide + isotope == 0) || (rt_temp < rt_min))
{
rt_min = rt_temp;
}
if ((peptide + isotope == 0) || (rt_temp > rt_max))
{
rt_max = rt_temp;
}
rt += rt_temp * intensity_temp;
intensity_sum_simple += intensity_temp;
chromatogram.push_back(ChromatogramPeak(rt_temp, intensity_temp));
}
makePeakPositionUnique(chromatogram, IntensityAveragingMethod::MEDIAN);
// construct spline interpolations for later use
// Reliable spline interpolation only for 5 or more data points in chromatogram.
if (chromatogram.size() > 5)
{
std::vector<double> rt;
std::vector<double> intensity;
for (const auto &it : chromatogram)
{
rt.push_back(it.getRT());
intensity.push_back(it.getIntensity());
}
spline_chromatograms.insert(std::make_pair(idx, SplinePackage(rt, intensity)));
}
if (chromatogram.size() > 1)
{
// Positions are already sorted in makePeakPositionUnique(), i.e. sortByPosition() not necessary.
double rt_start = chromatogram.begin()->getPos();
double rt_end = chromatogram.back().getPos();
PeakIntegrator::PeakArea pa = pi.integratePeak(chromatogram, rt_start, rt_end);
intensity_sum += pa.area;
}
else if (chromatogram.size() == 1)
{
intensity_sum += chromatogram.begin()->getIntensity();
}
}
rt /= intensity_sum_simple;
rt_peptide.push_back(rt);
if (intensity_sum == 0 || (rt_max - rt_min < static_cast<double>(param_.getValue("algorithm:rt_min"))))
{
intensity_sum = -1.0;
}
intensity_peptide.push_back(intensity_sum);
}
// If any of the peptide intensities could not be determined (i.e. -1) then there is no need for further corrections.
if (std::find(intensity_peptide.begin(), intensity_peptide.end(), -1.0) != intensity_peptide.end())
{
return intensity_peptide;
}
// If the pattern searched for peptide singlets, then there are no further corrections possible.
if (pattern.getMassShiftCount() < 2)
{
return intensity_peptide;
}
correctPeptideIntensities_(pattern, spline_chromatograms, rt_peptide, intensity_peptide);
return intensity_peptide;
}
void FeatureFinderMultiplexAlgorithm::generateMapsCentroided_(const std::vector<MultiplexIsotopicPeakPattern>& patterns, const std::vector<MultiplexFilteredMSExperiment>& filter_results, std::vector<std::map<int, GridBasedCluster> >& cluster_results)
{
// loop over peak patterns
for (unsigned pattern = 0; pattern < patterns.size(); ++pattern)
{
// loop over clusters
for (const auto& cluster_pair : cluster_results[pattern])
{
GridBasedCluster cluster = cluster_pair.second;
std::vector<int> points = cluster.getPoints();
// Construct a satellite set for the complete peptide multiplet
// Make sure there are no duplicates, i.e. the same satellite from different filtered peaks.
std::multimap<size_t, MultiplexSatelliteCentroided > satellites;
// loop over points in cluster
for (const auto& point : points)
{
MultiplexFilteredPeak peak = filter_results[pattern].getPeak(point);
// loop over satellites of the peak
for (const auto& satellite : peak.getSatellites())
{
// check if this satellite (i.e. these indices) are already in the set
bool satellite_in_set = false;
for (const auto& existing_satellite : satellites)
{
if ((existing_satellite.second.getRTidx() == satellite.second.getRTidx()) && (existing_satellite.second.getMZidx() == satellite.second.getMZidx()))
{
satellite_in_set = true;
break;
}
}
if (satellite_in_set)
{
break;
}
satellites.insert(std::make_pair(satellite.first, MultiplexSatelliteCentroided(satellite.second.getRTidx(), satellite.second.getMZidx())));
}
}
// determine peptide intensities
std::vector<double> peptide_intensities = determinePeptideIntensitiesCentroided_(patterns[pattern], satellites);
// If no reliable peptide intensity can be determined, we do not report the peptide multiplet.
if (std::find(peptide_intensities.begin(), peptide_intensities.end(), -1.0) != peptide_intensities.end())
{
continue;
}
std::vector<Feature> features;
ConsensusFeature consensus;
bool abort = false;
// construct the feature and consensus maps
// loop over peptides
for (size_t peptide = 0; (peptide < patterns[pattern].getMassShiftCount() && !abort); ++peptide)
{
// coordinates of the peptide feature
// RT is the intensity-average of all satellites peaks of the mono-isotopic mass trace
// m/z is the intensity-average of all satellites peaks of the mono-isotopic mass trace
Feature feature;
double rt(0);
double mz(0);
double intensity_sum(0);
// loop over isotopes i.e. mass traces of the peptide
for (size_t isotope = 0; isotope < isotopes_per_peptide_max_; ++isotope)
{
// find satellites for this isotope i.e. mass trace
size_t idx = peptide * isotopes_per_peptide_max_ + isotope;
std::pair<std::multimap<size_t, MultiplexSatelliteCentroided >::const_iterator, std::multimap<size_t, MultiplexSatelliteCentroided >::const_iterator> satellites_isotope;
satellites_isotope = satellites.equal_range(idx);
DBoundingBox<2> mass_trace;
// loop over satellites for this isotope i.e. mass trace
for (std::multimap<size_t, MultiplexSatelliteCentroided >::const_iterator satellite_it = satellites_isotope.first; satellite_it != satellites_isotope.second; ++satellite_it)
{
// find indices of the peak
size_t rt_idx = (satellite_it->second).getRTidx();
size_t mz_idx = (satellite_it->second).getMZidx();
// find peak itself
MSExperiment::ConstIterator it_rt = exp_centroid_.begin();
std::advance(it_rt, rt_idx);
MSSpectrum::ConstIterator it_mz = it_rt->begin();
std::advance(it_mz, mz_idx);
if (isotope == 0)
{
rt += it_rt->getRT() * it_mz->getIntensity();
mz += it_mz->getMZ() * it_mz->getIntensity();
intensity_sum += it_mz->getIntensity();
}
mass_trace.enlarge(it_rt->getRT(), it_mz->getMZ());
}
if ((mass_trace.width() == 0) || (mass_trace.height() == 0))
{
// The mass trace contains only a single point. Add a small margin around
// the point, otherwise the mass trace is considered empty and not drawn.
// TODO: Remove the magic number for the margin.
mass_trace.enlarge(mass_trace.minX() - 0.01, mass_trace.minY() - 0.01);
mass_trace.enlarge(mass_trace.maxX() + 0.01, mass_trace.maxY() + 0.01);
}
if (!(mass_trace.isEmpty()))
{
ConvexHull2D hull;
hull.addPoint(DPosition<2>(mass_trace.minX(), mass_trace.minY()));
hull.addPoint(DPosition<2>(mass_trace.minX(), mass_trace.maxY()));
hull.addPoint(DPosition<2>(mass_trace.maxX(), mass_trace.minY()));
hull.addPoint(DPosition<2>(mass_trace.maxX(), mass_trace.maxY()));
feature.getConvexHulls().push_back(hull);
}
}
if (intensity_sum <= 0)
{
continue;
}
rt /= intensity_sum;
mz /= intensity_sum;
feature.setRT(rt);
feature.setMZ(mz);
feature.setIntensity(peptide_intensities[peptide]);
feature.setCharge(patterns[pattern].getCharge());
feature.setOverallQuality(1.0);
// Check that the feature eluted long enough.
// DBoundingBox<2> box = feature.getConvexHull().getBoundingBox(); // convex hull of the entire peptide feature
DBoundingBox<2> box = feature.getConvexHulls()[0].getBoundingBox(); // convex hull of the mono-isotopic mass trace
if (box.maxX() - box.minX() < static_cast<double>(param_.getValue("algorithm:rt_min")))
{
abort = true;
break;
}
features.push_back(feature);
if (peptide == 0)
{
// The first/lightest peptide acts as anchor of the peptide multiplet consensus.
// All peptide feature handles are connected to this point.
consensus.setRT(rt);
consensus.setMZ(mz);
consensus.setIntensity(peptide_intensities[peptide]);
consensus.setCharge(patterns[pattern].getCharge());
consensus.setQuality(1.0);
}
FeatureHandle feature_handle;
feature_handle.setRT(rt);
feature_handle.setMZ(mz);
feature_handle.setIntensity(peptide_intensities[peptide]);
feature_handle.setCharge(patterns[pattern].getCharge());
feature_handle.setMapIndex(peptide);
//feature_handle.setUniqueId(&UniqueIdInterface::setUniqueId); // TODO: Do we need to set unique ID?
consensus.insert(feature_handle);
consensus_map_.getColumnHeaders()[peptide].size++;
}
if (!abort)
{
consensus_map_.push_back(consensus);
for (Feature& feat : features)
{
feature_map_.push_back(feat);
}
}
}
}
}
void FeatureFinderMultiplexAlgorithm::generateMapsProfile_(const std::vector<MultiplexIsotopicPeakPattern>& patterns, const std::vector<MultiplexFilteredMSExperiment>& filter_results, const std::vector<std::map<int, GridBasedCluster> >& cluster_results)
{
// progress logger
unsigned progress = 0;
startProgress(0, patterns.size(), "constructing maps");
// loop over peak patterns
for (unsigned pattern = 0; pattern < patterns.size(); ++pattern)
{
setProgress(++progress);
// loop over clusters
for (const auto& cluster_pair : cluster_results[pattern])
{
GridBasedCluster cluster = cluster_pair.second;
std::vector<int> points = cluster.getPoints();
// Construct a satellite set for the complete peptide multiplet
// Make sure there are no duplicates, i.e. the same satellite from different filtered peaks.
std::multimap<size_t, MultiplexSatelliteProfile > satellites;
// loop over points in cluster
for (const auto& point : points)
{
MultiplexFilteredPeak peak = filter_results[pattern].getPeak(point);
// loop over satellites of the peak
for (const auto& satellite : peak.getSatellitesProfile())
{
satellites.insert(std::make_pair(satellite.first, MultiplexSatelliteProfile(satellite.second.getRT(), satellite.second.getMZ(), satellite.second.getIntensity())));
}
}
// determine peptide intensities
std::vector<double> peptide_intensities = determinePeptideIntensitiesProfile_(patterns[pattern], satellites);
// If no reliable peptide intensity can be determined for one of the peptides, we do not report the peptide multiplet.
if (std::find(peptide_intensities.begin(), peptide_intensities.end(), -1.0) != peptide_intensities.end())
{
continue;
}
std::vector<Feature> features;
ConsensusFeature consensus;
bool abort = false;
// construct the feature and consensus maps
// loop over peptides
for (size_t peptide = 0; (peptide < patterns[pattern].getMassShiftCount() && !abort); ++peptide)
{
// coordinates of the peptide feature
// RT is the intensity-average of all satellites peaks of the mono-isotopic mass trace
// m/z is the intensity-average of all satellites peaks of the mono-isotopic mass trace
Feature feature;
double rt(0);
double mz(0);
double intensity_sum(0);
// loop over isotopes i.e. mass traces of the peptide
for (size_t isotope = 0; isotope < isotopes_per_peptide_max_; ++isotope)
{
// find satellites for this isotope i.e. mass trace
size_t idx = peptide * isotopes_per_peptide_max_ + isotope;
std::pair<std::multimap<size_t, MultiplexSatelliteProfile >::const_iterator, std::multimap<size_t, MultiplexSatelliteProfile >::const_iterator> satellites_isotope;
satellites_isotope = satellites.equal_range(idx);
DBoundingBox<2> mass_trace;
// loop over satellites for this isotope i.e. mass trace
for (std::multimap<size_t, MultiplexSatelliteProfile >::const_iterator satellite_it = satellites_isotope.first; satellite_it != satellites_isotope.second; ++satellite_it)
{
if (isotope == 0)
{
// Satellites of zero intensity makes sense (borders of peaks), but mess up feature/consensus construction.
double intensity_temp = (satellite_it->second).getIntensity() + 0.0001;
rt += (satellite_it->second).getRT() * intensity_temp;
mz += (satellite_it->second).getMZ() * intensity_temp;
intensity_sum += intensity_temp;
}
mass_trace.enlarge((satellite_it->second).getRT(), (satellite_it->second).getMZ());
}
if ((mass_trace.width() == 0) || (mass_trace.height() == 0))
{
// The mass trace contains only a single point. Add a small margin around
// the point, otherwise the mass trace is considered empty and not drawn.
// TODO: Remove the magic number for the margin.
mass_trace.enlarge(mass_trace.minX() - 0.01, mass_trace.minY() - 0.01);
mass_trace.enlarge(mass_trace.maxX() + 0.01, mass_trace.maxY() + 0.01);
}
if (!(mass_trace.isEmpty()))
{
ConvexHull2D hull;
hull.addPoint(DPosition<2>(mass_trace.minX(), mass_trace.minY()));
hull.addPoint(DPosition<2>(mass_trace.minX(), mass_trace.maxY()));
hull.addPoint(DPosition<2>(mass_trace.maxX(), mass_trace.minY()));
hull.addPoint(DPosition<2>(mass_trace.maxX(), mass_trace.maxY()));
feature.getConvexHulls().push_back(hull);
}
}
rt /= intensity_sum;
mz /= intensity_sum;
feature.setRT(rt);
feature.setMZ(mz);
feature.setIntensity(peptide_intensities[peptide]);
feature.setCharge(patterns[pattern].getCharge());
feature.setOverallQuality(1.0);
// Check that the feature eluted long enough.
// DBoundingBox<2> box = feature.getConvexHull().getBoundingBox(); // convex hull of the entire peptide feature
DBoundingBox<2> box = feature.getConvexHulls()[0].getBoundingBox(); // convex hull of the mono-isotopic mass trace
if (box.maxX() - box.minX() < static_cast<double>(param_.getValue("algorithm:rt_min")))
{
abort = true;
break;
}
features.push_back(feature);
if (peptide == 0)
{
// The first/lightest peptide acts as anchor of the peptide multiplet consensus.
// All peptide feature handles are connected to this point.
consensus.setRT(rt);
consensus.setMZ(mz);
consensus.setIntensity(peptide_intensities[peptide]);
consensus.setCharge(patterns[pattern].getCharge());
consensus.setQuality(1.0);
}
FeatureHandle feature_handle;
feature_handle.setRT(rt);
feature_handle.setMZ(mz);
feature_handle.setIntensity(peptide_intensities[peptide]);
feature_handle.setCharge(patterns[pattern].getCharge());
feature_handle.setMapIndex(peptide);
//feature_handle.setUniqueId(&UniqueIdInterface::setUniqueId); // TODO: Do we need to set unique ID?
consensus.insert(feature_handle);
consensus_map_.getColumnHeaders()[peptide].size++;
}
if (!abort)
{
consensus_map_.push_back(consensus);
for (Feature& feat : features)
{
feature_map_.push_back(feat);
}
}
}
}
endProgress();
}
void FeatureFinderMultiplexAlgorithm::run(MSExperiment& exp, bool progress)
{
// parameter section: algorithm, get selected charge range
String charge_string = param_.getValue("algorithm:charge").toString();
charge_min_ = charge_string.prefix(':').toInt();
charge_max_ = charge_string.suffix(':').toInt();
if (charge_min_ > charge_max_)
{
swap(charge_min_, charge_max_);
}
// parameter section: algorithm, get isotopes per peptide range
String isotopes_per_peptide_string = param_.getValue("algorithm:isotopes_per_peptide").toString();
isotopes_per_peptide_min_ = isotopes_per_peptide_string.prefix(':').toInt();
isotopes_per_peptide_max_ = isotopes_per_peptide_string.suffix(':').toInt();
if (isotopes_per_peptide_min_ > isotopes_per_peptide_max_)
{
swap(isotopes_per_peptide_min_, isotopes_per_peptide_max_);
}
// parameter section: labels, get all mass shifts
label_mass_shift_["Arg6"] = param_.getValue("labels:Arg6");
label_mass_shift_["Arg10"] = param_.getValue("labels:Arg10");
label_mass_shift_["Lys4"] = param_.getValue("labels:Lys4");
label_mass_shift_["Lys6"] = param_.getValue("labels:Lys6");
label_mass_shift_["Lys8"] = param_.getValue("labels:Lys8");
label_mass_shift_["Leu3"] = param_.getValue("labels:Leu3");
label_mass_shift_["Dimethyl0"] = param_.getValue("labels:Dimethyl0");
label_mass_shift_["Dimethyl4"] = param_.getValue("labels:Dimethyl4");
label_mass_shift_["Dimethyl6"] = param_.getValue("labels:Dimethyl6");
label_mass_shift_["Dimethyl8"] = param_.getValue("labels:Dimethyl8");
label_mass_shift_["ICPL0"] = param_.getValue("labels:ICPL0");
label_mass_shift_["ICPL4"] = param_.getValue("labels:ICPL4");
label_mass_shift_["ICPL6"] = param_.getValue("labels:ICPL6");
label_mass_shift_["ICPL10"] = param_.getValue("labels:ICPL10");
progress_ = progress;
// check for empty experimental data
if (exp.getSpectra().empty())
{
throw OpenMS::Exception::FileEmpty(__FILE__, __LINE__, __FUNCTION__, "Error: No MS1 spectra in input file.");
}
// clear chromatograms (otherwise they are used to calculate optimal RT and m/z ranges)
exp.getChromatograms().clear();
// update m/z and RT ranges
exp.updateRanges();
// sort according to RT and MZ
exp.sortSpectra();
// determine type of spectral data (profile or centroided)
SpectrumSettings::SpectrumType spectrum_type;
if (param_.getValue("algorithm:spectrum_type") == "automatic")
{
spectrum_type = exp[0].getType(true);
// The following means that UNKNOWN will be handled as profile.
centroided_ = (spectrum_type == SpectrumSettings::SpectrumType::CENTROID);
}
else if (param_.getValue("algorithm:spectrum_type") == "centroid")
{
centroided_ = true;
}
else // "profile"
{
centroided_ = false;
}
// store experiment in member variables
if (centroided_)
{
exp.swap(exp_centroid_);
// exp_profile_ will never be used.
}
else
{
exp.swap(exp_profile_);
// exp_centroid_ will be constructed later on.
}
/**
* pick peaks (if input data are in profile mode)
*/
std::vector<std::vector<PeakPickerHiRes::PeakBoundary> > boundaries_exp_s; // peak boundaries for spectra
std::vector<std::vector<PeakPickerHiRes::PeakBoundary> > boundaries_exp_c; // peak boundaries for chromatograms
if (!centroided_)
{
PeakPickerHiRes picker;
Param param = picker.getParameters();
picker.setLogType(getLogType());
param.setValue("ms_levels", ListUtils::create<Int>("1"));
param.setValue("signal_to_noise", 0.0); // signal-to-noise estimation switched off
picker.setParameters(param);
picker.pickExperiment(exp_profile_, exp_centroid_, boundaries_exp_s, boundaries_exp_c);
}
/**
* generate peak patterns for subsequent filtering step
*/
MultiplexDeltaMassesGenerator generator = MultiplexDeltaMassesGenerator(param_.getValue("algorithm:labels").toString(), param_.getValue("algorithm:missed_cleavages"), label_mass_shift_);
if (param_.getValue("algorithm:knock_out") == "true")
{
generator.generateKnockoutDeltaMasses();
}
#ifdef DEBUG
generator.printSamplesLabelsList(std::cout);
generator.printDeltaMassesList(std::cout);
#endif
std::vector<MultiplexDeltaMasses> masses = generator.getDeltaMassesList();
std::vector<MultiplexIsotopicPeakPattern> patterns = generatePeakPatterns_(charge_min_, charge_max_, isotopes_per_peptide_max_, masses);
// Switch off averagine_similarity_scaling if we search for single peptide features only.
// (This scaling parameter is only relevant if we search for multiplets and (!) singlets.)
double averagine_similarity_scaling;
std::vector<std::vector<String> > list = generator.getSamplesLabelsList();
if (list.size() == 1 && list[0].size() == 1 && list[0][0] == "no_label")
{
// search for singlets only
averagine_similarity_scaling = 0.0;
}
else
{
// search for multiplets (and optionally singlets, if knock_out switched on)
averagine_similarity_scaling = param_.getValue("algorithm:averagine_similarity_scaling");
}
if (centroided_)
{
// centroided data
/**
* filter for peak patterns
*/
MultiplexFilteringCentroided filtering(exp_centroid_, patterns, isotopes_per_peptide_min_, isotopes_per_peptide_max_, param_.getValue("algorithm:intensity_cutoff"), param_.getValue("algorithm:rt_band"), param_.getValue("algorithm:mz_tolerance"), (param_.getValue("algorithm:mz_unit") == "ppm"), param_.getValue("algorithm:peptide_similarity"), param_.getValue("algorithm:averagine_similarity"), averagine_similarity_scaling, param_.getValue("algorithm:averagine_type").toString());
filtering.setLogType(getLogType());
std::vector<MultiplexFilteredMSExperiment> filter_results = filtering.filter();
/**
* cluster filter results
*/
MultiplexClustering clustering(exp_centroid_, param_.getValue("algorithm:mz_tolerance"), (param_.getValue("algorithm:mz_unit") == "ppm"), param_.getValue("algorithm:rt_typical"));
clustering.setLogType(getLogType());
std::vector<std::map<int, GridBasedCluster> > cluster_results = clustering.cluster(filter_results);
/**
* construct feature and consensus maps i.e. the final results
*/
filtering.getCentroidedExperiment().swap(exp_centroid_);
generateMapsCentroided_(patterns, filter_results, cluster_results);
}
else
{
// profile data
/**
* filter for peak patterns
*/
MultiplexFilteringProfile filtering(exp_profile_, exp_centroid_, boundaries_exp_s, patterns, isotopes_per_peptide_min_, isotopes_per_peptide_max_, param_.getValue("algorithm:intensity_cutoff"), param_.getValue("algorithm:rt_band"), param_.getValue("algorithm:mz_tolerance"), (param_.getValue("algorithm:mz_unit") == "ppm"), param_.getValue("algorithm:peptide_similarity"), param_.getValue("algorithm:averagine_similarity"), averagine_similarity_scaling, param_.getValue("algorithm:averagine_type").toString());
filtering.setLogType(getLogType());
std::vector<MultiplexFilteredMSExperiment> filter_results = filtering.filter();
exp_blacklist_ = filtering.getBlacklist();
/**
* cluster filter results
*/
MultiplexClustering clustering(exp_profile_, exp_centroid_, boundaries_exp_s, param_.getValue("algorithm:rt_typical"));
clustering.setLogType(getLogType());
std::vector<std::map<int, GridBasedCluster> > cluster_results = clustering.cluster(filter_results);
/**
* construct feature and consensus maps i.e. the final results
*/
filtering.getCentroidedExperiment().swap(exp_centroid_);
filtering.getPeakBoundaries().swap(boundaries_exp_s);
generateMapsProfile_(patterns, filter_results, cluster_results);
}
// finalize consensus map
//TODO only if sample labels are not empty
consensus_map_.setExperimentType("labeled_MS1");
consensus_map_.sortByPosition();
consensus_map_.applyMemberFunction(&UniqueIdInterface::setUniqueId);
Size i{0};
for (auto & ch : consensus_map_.getColumnHeaders())
{
ch.second.setMetaValue("channel_id", i);
++i;
}
// construct sample_labels
std::vector<std::vector<String> > samples_labels;
std::vector<String> temp_samples;
String labels(param_.getValue("algorithm:labels").toString());
boost::replace_all(labels, "[]", "no_label");
boost::replace_all(labels, "()", "no_label");
boost::replace_all(labels, "{}", "no_label");
boost::split(temp_samples, labels, boost::is_any_of("[](){}")); // any bracket allowed to separate samples
for (unsigned i = 0; i < temp_samples.size(); ++i)
{
if (!temp_samples[i].empty())
{
if (temp_samples[i]=="no_label")
{
vector<String> temp_labels;
temp_labels.emplace_back("no_label");
samples_labels.push_back(temp_labels);
}
else
{
vector<String> temp_labels;
boost::split(temp_labels, temp_samples[i], boost::is_any_of(",;: ")); // various separators allowed to separate labels
samples_labels.push_back(temp_labels);
}
}
}
if (samples_labels.empty())
{
vector<String> temp_labels;
temp_labels.emplace_back("no_label");
samples_labels.push_back(temp_labels);
}
// annotate maps
for (unsigned i = 0; i < samples_labels.size(); ++i)
{
ConsensusMap::ColumnHeader& desc = consensus_map_.getColumnHeaders()[i];
if (param_.getValue("algorithm:knock_out") == "true")
{
// With knock-outs present, the correct labels can only be determined during ID mapping.
// For now, we simply store a unique identifier.
std::stringstream stream;
stream << "label " << i;
desc.label = stream.str();
}
else
{
String label_string;
for (unsigned j = 0; j < samples_labels[i].size(); ++j)
{
label_string.append(samples_labels[i][j]);
}
desc.label = label_string;
}
}
// finalize feature map
feature_map_.sortByPosition();
feature_map_.applyMemberFunction(&UniqueIdInterface::setUniqueId);
}
FeatureMap& FeatureFinderMultiplexAlgorithm::getFeatureMap()
{
return feature_map_;
}
ConsensusMap& FeatureFinderMultiplexAlgorithm::getConsensusMap()
{
return consensus_map_;
}
MSExperiment& FeatureFinderMultiplexAlgorithm::getBlacklist()
{
return exp_blacklist_;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/GaussTraceFitter.cpp | .cpp | 9,814 | 309 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg$
// $Authors: Stephan Aiche$
// --------------------------------------------------------------------------
#include <OpenMS/FEATUREFINDER/GaussTraceFitter.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <Eigen/Core>
#include <numeric> // for "accumulate"
namespace OpenMS
{
const Size GaussTraceFitter::NUM_PARAMS_ = 3;
GaussTraceFitter::GaussTraceFitter()
{
//setName("GaussTraceFitter");
}
GaussTraceFitter::GaussTraceFitter(const GaussTraceFitter& other) :
TraceFitter(other)
{
this->height_ = other.height_;
this->x0_ = other.x0_;
this->sigma_ = other.sigma_;
updateMembers_();
}
GaussTraceFitter& GaussTraceFitter::operator=(const GaussTraceFitter& source)
{
TraceFitter::operator=(source);
this->height_ = source.height_;
this->x0_ = source.x0_;
this->sigma_ = source.sigma_;
updateMembers_();
return *this;
}
GaussTraceFitter::~GaussTraceFitter() = default;
void GaussTraceFitter::fit(FeatureFinderAlgorithmPickedHelperStructs::MassTraces& traces)
{
OPENMS_LOG_DEBUG << "Traces length: " << traces.size() << "\n";
setInitialParameters_(traces);
std::vector<double> x_init(NUM_PARAMS_);
x_init[0] = height_;
x_init[1] = x0_;
x_init[2] = sigma_;
TraceFitter::ModelData data;
data.traces_ptr = &traces;
data.weighted = this->weighted_;
GaussTraceFunctor functor(NUM_PARAMS_, &data);
TraceFitter::optimize_(x_init, functor);
}
double GaussTraceFitter::getLowerRTBound() const
{
return x0_ - 2.5 * sigma_;
}
double GaussTraceFitter::getUpperRTBound() const
{
return x0_ + 2.5 * sigma_;
}
double GaussTraceFitter::getHeight() const
{
return height_;
}
double GaussTraceFitter::getCenter() const
{
return x0_;
}
double GaussTraceFitter::getFWHM() const
{
return 2.35482 * sigma_; // 2 * sqrt(2 * log(2)) * sigma
}
double GaussTraceFitter::getSigma() const
{
return sigma_;
}
bool GaussTraceFitter::checkMaximalRTSpan(const double max_rt_span)
{
return 5.0 * sigma_ > max_rt_span * region_rt_span_;
}
bool GaussTraceFitter::checkMinimalRTSpan(const std::pair<double, double>& rt_bounds, const double min_rt_span)
{
return (rt_bounds.second - rt_bounds.first) < (min_rt_span * 5.0 * sigma_);
}
/// a lot faster than std::pow(b, 2)
double pow2(double b) { return b*b;}
double GaussTraceFitter::getValue(double rt) const
{
return height_ * exp(-0.5 * pow2(rt - x0_) / pow2(sigma_));
}
double GaussTraceFitter::getArea()
{
// area under the curve, 2.5... is approx. sqrt(2 * pi):
return 2.506628 * height_ * sigma_;
}
String GaussTraceFitter::getGnuplotFormula(const FeatureFinderAlgorithmPickedHelperStructs::MassTrace& trace, const char function_name, const double baseline, const double rt_shift)
{
std::stringstream s;
s << String(function_name) << "(x)= " << baseline << " + ";
s << (trace.theoretical_int * height_) << " * exp(-0.5*(x-" << (rt_shift + x0_) << ")**2/(" << sigma_ << ")**2)";
return String(s.str());
}
void GaussTraceFitter::getOptimizedParameters_(const std::vector<double>& x_init)
{
height_ = x_init[0];
x0_ = x_init[1];
sigma_ = std::fabs(x_init[2]);
}
GaussTraceFitter::GaussTraceFunctor::GaussTraceFunctor(int dimensions,
const TraceFitter::ModelData* data) :
TraceFitter::GenericFunctor(dimensions,
static_cast<int>(data->traces_ptr->getPeakCount())),
m_data(data)
{
}
int GaussTraceFitter::GaussTraceFunctor::operator()(const double* x, double* fvec)
{
// Create Eigen::Map views for convenient indexing
Eigen::Map<const Eigen::VectorXd> x_map(x, m_inputs);
Eigen::Map<Eigen::VectorXd> fvec_map(fvec, m_values);
double height = x_map(0);
double x0 = x_map(1);
double sig = x_map(2);
double c_fac = -0.5 / pow2(sig);
Size count = 0;
for (Size t = 0; t < m_data->traces_ptr->size(); ++t)
{
const FeatureFinderAlgorithmPickedHelperStructs::MassTrace& trace = (*m_data->traces_ptr)[t];
double weight = m_data->weighted ? trace.theoretical_int : 1.0;
for (Size i = 0; i < trace.peaks.size(); ++i)
{
fvec_map(count) = (m_data->traces_ptr->baseline + trace.theoretical_int * height
* exp(c_fac * pow2(trace.peaks[i].first - x0)) - trace.peaks[i].second->getIntensity()) * weight;
++count;
}
}
return 0;
}
// compute Jacobian matrix for the different parameters
int GaussTraceFitter::GaussTraceFunctor::df(const double* x, double* J)
{
// Create Eigen::Map views for convenient indexing
Eigen::Map<const Eigen::VectorXd> x_map(x, m_inputs);
Eigen::Map<Eigen::MatrixXd> J_map(J, m_values, m_inputs);
double height = x_map(0);
double x0 = x_map(1);
double sig = x_map(2);
double sig_sq = pow2(sig);
double inv_siq2 = 1 / sig_sq;
double sig_3 = sig * sig_sq;
double inv_sig3 = 1 / sig_3;
double c_fac = -0.5 / sig_sq;
Size count = 0;
for (Size t = 0; t < m_data->traces_ptr->size(); ++t)
{
const FeatureFinderAlgorithmPickedHelperStructs::MassTrace& trace = (*m_data->traces_ptr)[t];
double weight = m_data->weighted ? trace.theoretical_int : 1.0;
for (Size i = 0; i < trace.peaks.size(); ++i)
{
double rt = trace.peaks[i].first;
double e = exp(c_fac * pow2(rt - x0));
J_map(count, 0) = trace.theoretical_int * e * weight;
J_map(count, 1) = trace.theoretical_int * height * e * (rt - x0) * inv_siq2 * weight;
J_map(count, 2) = 0.125* trace.theoretical_int* height* e* pow2(rt - x0) * inv_sig3 * weight;
++count;
}
}
return 0;
}
void GaussTraceFitter::setInitialParameters_(FeatureFinderAlgorithmPickedHelperStructs::MassTraces& traces)
{
OPENMS_LOG_DEBUG << "Setting initial params for Fitter. Number of traces: " << traces.size() << std::endl;
// aggregate data; some peaks (where intensity is zero) can be missing!
// mapping: RT -> total intensity over all mass traces
std::list<std::pair<double, double> > total_intensities;
traces.computeIntensityProfile(total_intensities);
const Size N = total_intensities.size();
const Size LEN = 2; // window size: 2 * LEN + 1
std::vector<double> totals(N + 2 * LEN); // pad with zeros at ends
Int index = LEN;
// OPENMS_LOG_DEBUG << "Summed intensities:\n";
for (std::list<std::pair<double, double> >::iterator it =
total_intensities.begin(); it != total_intensities.end(); ++it)
{
totals[index++] = it->second;
// OPENMS_LOG_DEBUG << it->second << std::endl;
}
std::vector<double> smoothed(N);
Size max_index = 0; // index of max. smoothed intensity
if (N <= LEN + 1) // not enough distinct x values for smoothing
{
// throw Exception::UnableToFit(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "UnableToFit-MovingAverage", "Too few time points for smoothing with window size " + String(2 * LEN + 1));
for (Size i = 0; i < N; ++i)
{
smoothed[i] = totals[i + LEN];
if (smoothed[i] > smoothed[max_index]) max_index = i;
}
}
else // compute moving average for smoothing
{
// OPENMS_LOG_DEBUG << "Smoothed intensities:\n";
double sum = std::accumulate(&totals[LEN], &totals[2 * LEN], 0.0);
for (Size i = 0; i < N; ++i)
{
sum += totals[i + 2 * LEN];
smoothed[i] = sum / (2 * LEN + 1);
sum -= totals[i];
if (smoothed[i] > smoothed[max_index]) max_index = i;
// OPENMS_LOG_DEBUG << smoothed[i] << std::endl;
}
}
height_ = smoothed[max_index] - traces.baseline;
std::list<std::pair<double, double> >::iterator it = total_intensities.begin();
std::advance(it, max_index);
x0_ = it->first;
region_rt_span_ = (total_intensities.rbegin()->first -
total_intensities.begin()->first);
// find RT values where intensity is at half-maximum:
Int left_index = static_cast<Int>(max_index);
while ((left_index > 0) && (smoothed[left_index] > height_ * 0.5))
{
--left_index;
}
double left_height = smoothed[left_index];
it = total_intensities.begin();
std::advance(it, left_index);
double left_rt = it->first;
Int right_index = static_cast<Int>(max_index);
while ((right_index < Int(N - 1)) && (smoothed[right_index] > height_ * 0.5))
{
++right_index;
}
double right_height = smoothed[right_index];
it = total_intensities.end();
std::advance(it, right_index - Int(N));
double right_rt = it->first;
double delta_x = right_rt - left_rt;
double alpha = (left_height + right_height) * 0.5 / height_; // ~0.5
if (alpha >= 1)
{
sigma_ = 1.0; // degenerate case, all values are the same
}
else
{
sigma_ = delta_x * 0.5 / sqrt(-2.0 * log(alpha));
}
#ifndef NDEBUG
OPENMS_LOG_DEBUG << "\nMax. idx: " << max_index
<< "\nHeight: " << height_
<< "\nx0: " << x0_
<< "\nregion_rt_span: " << region_rt_span_
<< "\nLeft half-maximum at index " << left_index << ", RT " << left_rt
<< "\nRight half-maximum at index " << right_index << ", RT " << right_rt
<< "\nSigma: " << sigma_ << std::endl;
#endif
}
void GaussTraceFitter::updateMembers_()
{
TraceFitter::updateMembers_();
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/ElutionPeakDetection.cpp | .cpp | 21,173 | 634 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg$
// $Authors: Erhan Kenar, Holger Franken, Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/DATASTRUCTURES/ListUtils.h>
#include <OpenMS/FEATUREFINDER/ElutionPeakDetection.h>
#include <OpenMS/PROCESSING/SMOOTHING/SavitzkyGolayFilter.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <boost/dynamic_bitset.hpp>
#ifdef _OPENMP
#include <omp.h>
#endif
// #define DEBUG_EPD
namespace OpenMS
{
ElutionPeakDetection::ElutionPeakDetection() :
DefaultParamHandler("ElutionPeakDetection"), ProgressLogger()
{
defaults_.setValue("chrom_fwhm", 5.0, "Expected full-width-at-half-maximum of chromatographic peaks (in seconds).");
defaults_.setValue("chrom_peak_snr", 3.0, "Minimum signal-to-noise a mass trace should have.");
// NOTE: the algorithm will only act upon the "fixed" value, if you would
// like to use the "auto" setting, you will have to call filterByPeakWidth
// yourself
defaults_.setValue("width_filtering", "fixed", "Enable filtering of unlikely peak widths. The fixed setting filters out mass traces outside the [min_fwhm, max_fwhm] interval (set parameters accordingly!). The auto setting filters with the 5 and 95% quantiles of the peak width distribution.");
defaults_.setValidStrings("width_filtering", {"off","fixed","auto"});
defaults_.setValue("min_fwhm", 1.0, "Minimum full-width-at-half-maximum of chromatographic peaks (in seconds). Ignored if parameter width_filtering is off or auto.", {"advanced"});
defaults_.setValue("max_fwhm", 60.0, "Maximum full-width-at-half-maximum of chromatographic peaks (in seconds). Ignored if parameter width_filtering is off or auto.", {"advanced"});
defaults_.setValue("masstrace_snr_filtering", "false", "Apply post-filtering by signal-to-noise ratio after smoothing.", {"advanced"});
defaults_.setValidStrings("masstrace_snr_filtering", {"true","false"});
defaultsToParam_();
this->setLogType(CMD);
}
ElutionPeakDetection::~ElutionPeakDetection() = default;
double ElutionPeakDetection::computeMassTraceNoise(const MassTrace& tr)
{
// compute RMSE
double squared_sum(0.0);
std::vector<double> smooth_ints(tr.getSmoothedIntensities());
for (Size i = 0; i < smooth_ints.size(); ++i)
{
squared_sum += (tr[i].getIntensity() - smooth_ints[i]) * (tr[i].getIntensity() - smooth_ints[i]);
}
double rmse(0.0);
if (!smooth_ints.empty())
{
rmse = std::sqrt(squared_sum / smooth_ints.size());
}
return rmse;
}
double ElutionPeakDetection::computeMassTraceSNR(const MassTrace& tr)
{
double snr(0.0);
if (tr.getSize() > 0)
{
double noise_area = computeMassTraceNoise(tr) * tr.getTraceLength();
double signal_area = tr.computePeakArea();
snr = signal_area / noise_area;
}
// std::cout << "snr " << snr << " ";
return snr;
}
double ElutionPeakDetection::computeApexSNR(const MassTrace& tr)
{
double noise_level(computeMassTraceNoise(tr));
double snr = 0;
if (noise_level > 0.0)
{
double smoothed_apex_int(tr.getMaxIntensity(true));
snr = smoothed_apex_int / noise_level;
}
// std::cout << "snr " << snr << " ";
return snr;
}
void ElutionPeakDetection::findLocalExtrema(const MassTrace& tr, const Size& num_neighboring_peaks,
std::vector<Size>& chrom_maxes, std::vector<Size>& chrom_mins) const
{
std::vector<double> smoothed_ints_vec(tr.getSmoothedIntensities());
Size mt_length(smoothed_ints_vec.size());
if (mt_length != tr.getSize())
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"MassTrace was not smoothed before! Aborting...", String(smoothed_ints_vec.size()));
}
// first make sure that everything is cleared
chrom_maxes.clear();
chrom_mins.clear();
// Handle empty traces to avoid crash
if (mt_length == 0)
{
return;
}
if (mt_length < 3)
{
// determine the index of maximum intensity
Size max_idx = tr.findMaxByIntPeak(true);
chrom_maxes.push_back(max_idx);
return;
}
// Remember which indices we have already used
boost::dynamic_bitset<> used_idx(mt_length);
// Extract RTs from the chromatogram and store them into vectors for index access
// Store indices along with smoothed_ints to keep track of the peak order
std::multimap<double, Size> intensity_indices;
for (Size idx = 0; idx < mt_length; ++idx)
{
intensity_indices.insert(std::make_pair(smoothed_ints_vec[idx], idx));
}
// Step 1: Identify maxima
for (const auto& intensity_pair : intensity_indices)
{
double ref_int = intensity_pair.first;
Size ref_idx = intensity_pair.second;
if (!(used_idx[ref_idx]) && ref_int > 0.0)
{ // only allow unused points as seeds (potential local maximum)
bool real_max = true;
// Get start_idx and end_idx based on expected peak width
Size start_idx(0);
if (ref_idx > num_neighboring_peaks)
{
start_idx = ref_idx - num_neighboring_peaks;
}
Size end_idx = ref_idx + num_neighboring_peaks;
if (end_idx > mt_length)
{
end_idx = mt_length;
}
// Identify putative peak between start_idx and end_idx, now check if
// no other maxima exist within the expected boundaries (check whether
// ref_int is higher than all smoothed intensities within the
// boundaries).
for (Size j = start_idx; j < end_idx; ++j)
{
if (j == ref_idx)
{ // skip seed
continue;
}
if (used_idx[j])
{ // peak has already been collected?
if (smoothed_ints_vec[j] > ref_int)
{ // break if higher intensity
real_max = false;
break;
}
else
{ // skip if only a low intensity peak (e.g. flanks of elution profile)
continue;
}
}
if (smoothed_ints_vec[j] > ref_int)
{
real_max = false;
break;
}
}
// If no other maxima exists, then add the current one to the list and
// mark all indices as used
if (real_max)
{
chrom_maxes.push_back(ref_idx);
for (Size j = start_idx; j < end_idx; ++j)
{
used_idx[j] = true;
}
}
}
}
std::sort(chrom_maxes.begin(), chrom_maxes.end());
// Step 2: Identify minima using bisection between two maxima
if (chrom_maxes.size() > 1)
{
// Keep track of two maxima
Size left_idx(0), right_idx(1);
while (left_idx < right_idx && right_idx < chrom_maxes.size())
{
// 2.1 Perform bisection between the two maxima to find potential minimum
Size left_bound(chrom_maxes[left_idx] + 1);
Size right_bound(chrom_maxes[right_idx] - 1);
while ((left_bound + 1) < right_bound)
{
// Identify middle between two bounds
double mid_dist((right_bound - left_bound) / 2.0);
Size mid_element_idx(left_bound + std::floor(mid_dist));
double mid_element_int = smoothed_ints_vec[mid_element_idx];
// Walk to the left if the slope is positive here
if (mid_element_int <= smoothed_ints_vec[mid_element_idx + 1])
{
right_bound = mid_element_idx;
}
// else walk to the right ...
else
{
left_bound = mid_element_idx;
}
}
// 2.2 Choose minimum (either left_bound or right_bound) and get minimal RT / Intensity
Size min_rt((smoothed_ints_vec[left_bound] < smoothed_ints_vec[right_bound]) ? left_bound : right_bound);
double min_int(1.0);
if (smoothed_ints_vec[min_rt] > min_int)
{
min_int = smoothed_ints_vec[min_rt];
}
// 2.3 Compute distance and intensities
double left_max_int(smoothed_ints_vec[chrom_maxes[left_idx]]);
double right_max_int(smoothed_ints_vec[chrom_maxes[right_idx]]);
double left_rt(tr[chrom_maxes[left_idx]].getRT());
double mid_rt(tr[min_rt].getRT());
double right_rt(tr[chrom_maxes[right_idx]].getRT());
// compute the distance from the two maxima to the new minima
double left_dist(std::fabs(mid_rt - left_rt));
double right_dist(std::fabs(right_rt - mid_rt));
double min_dist(min_fwhm_ / 2.0);
// out debug info
#ifdef DEBUG_EPD
std::cout << "findLocalExtrema: Identified potential minimum \n";
std::cout << " " << tr.getLabel() << ": left_idx,right_idx " << left_idx << "," << right_idx <<
":" << left_max_int << " min: " << min_int << " " << right_max_int <<
" l " << left_rt << " r " << right_rt << " m " << mid_rt << '\n';
std::cout << " Int: min " << min_int << ", left: " << left_max_int << ", right: " << right_max_int << '\n';
std::cout << " Distance: min " << min_dist << ", left: " << left_dist << ", right: " << right_dist << '\n';
#endif
// 2.4 Decide whether to split the masstrace (introduce a minimum):
// i) the maxima intensity should be at least 2x above the minimum for a split
// ii) check that splitting the trace would not create peaks smaller than min_dist
if (left_max_int / min_int >= 2.0
&& right_max_int / min_int >= 2.0
&& left_dist >= min_dist
&& right_dist >= min_dist)
{
#ifdef DEBUG_EPD
std::cout << " -> add new minima " << ": left_idx,right_idx " << left_idx << "," << right_idx <<
" l " << left_rt << " r " << right_rt << " m " << mid_rt << '\n';
#endif
chrom_mins.push_back(min_rt);
left_idx = right_idx;
++right_idx;
}
else
{
// keep one of the maxima (the one with higher intensity), replace
// the other with the next in RT
if (left_max_int > right_max_int)
{
++right_idx;
}
else
{
left_idx = right_idx;
++right_idx;
}
}
}
}
return;
}
void ElutionPeakDetection::detectPeaks(MassTrace& mt, std::vector<MassTrace>& single_mtraces)
{
// make sure that single_mtraces is empty
single_mtraces.clear();
detectElutionPeaks_(mt, single_mtraces);
return;
}
void ElutionPeakDetection::detectPeaks(std::vector<MassTrace>& mt_vec, std::vector<MassTrace>& single_mtraces)
{
// make sure that single_mtraces is empty
single_mtraces.clear();
this->startProgress(0, mt_vec.size(), "elution peak detection");
Size progress(0);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (SignedSize i = 0; i < (SignedSize) mt_vec.size(); ++i)
{
IF_MASTERTHREAD this->setProgress(progress);
#ifdef _OPENMP
#pragma omp atomic
#endif
++progress;
// push_back to 'single_mtraces' is protected, so threading is ok
detectElutionPeaks_(mt_vec[i], single_mtraces);
}
this->endProgress();
return;
}
void ElutionPeakDetection::filterByPeakWidth(std::vector<MassTrace>& mt_vec, std::vector<MassTrace>& filt_mtraces)
{
filt_mtraces.clear();
std::multimap<double, Size> sorted_by_peakwidth;
for (Size i = 0; i < mt_vec.size(); ++i)
{
sorted_by_peakwidth.insert(std::make_pair(mt_vec[i].estimateFWHM(true), i));
}
double mapsize(sorted_by_peakwidth.size());
Size lower_quartile_idx(std::floor(mapsize * 0.05));
Size upper_quartile_idx(std::floor(mapsize * 0.95));
Size count_mt(0);
// filter out mass traces below lower quartile and above upper quartile
for (const auto& width_pair : sorted_by_peakwidth)
{
if (count_mt >= lower_quartile_idx && count_mt <= upper_quartile_idx)
{
// std::cout << "pw added " << width_pair.first << '\n';
filt_mtraces.push_back(mt_vec[width_pair.second]);
}
++count_mt;
}
std::cout << "pw low: " << filt_mtraces[0].estimateFWHM(true) << " " << " pw high: " << filt_mtraces[filt_mtraces.size() - 1].estimateFWHM(true) << '\n';
return;
}
void ElutionPeakDetection::detectElutionPeaks_(MassTrace& mt, std::vector<MassTrace>& single_mtraces)
{
// *********************************************************************
// Step 1: Smooth data
// *********************************************************************
double scan_time(mt.getAverageMS1CycleTime());
Size win_size = std::ceil(chrom_fwhm_ / scan_time);
// add smoothed data (original data is still accessible)
smoothData(mt, static_cast<Int>(win_size));
#ifdef DEBUG_EPD
Size i = 0;
std::cout << "*****\n";
std::cout << " finding elution peaks in mass traces RT " << mt.getCentroidRT() << " / mz " << mt.getCentroidMZ() << '\n';
std::cout << " used for smoothing: win_size " << win_size << " FWHM scan num " /* << mt.getFWHMScansNum() */ << '\n';
std::cout << "*****\n";
for (const auto& peak : mt)
{
// std::cout << peak.getIntensity() << " " << mt.getSmoothedIntensities()[i] << '\n';
++i;
}
std::cout << "*****\n";
#endif
// *********************************************************************
// Step 2: Identify local maxima and minima
// *********************************************************************
std::vector<Size> maxes, mins;
findLocalExtrema(mt, win_size / 2, maxes, mins);
#ifdef DEBUG_EPD
std::cout << "findLocalExtrema returned: maxima " << maxes.size() << " / minima " << mins.size() << '\n';
#endif
// *********************************************************************
// Step 3: Split mass trace according to detected peaks
// *********************************************************************
// if only one maximum exists: finished!
if (maxes.size() == 1)
{
bool pw_ok = true;
bool snr_ok = true;
// *********************************************************************
// Step 3.1: check mass trace length criteria (if fixed filter is enabled)
// *********************************************************************
if (pw_filtering_ == "fixed")
{
double act_fwhm(mt.estimateFWHM(true));
if (act_fwhm < min_fwhm_ || act_fwhm > max_fwhm_)
{
pw_ok = false;
}
}
// *********************************************************************
// Step 3.2: check mass trace signal to noise filter criteria
// *********************************************************************
if (mt_snr_filtering_)
{
if (computeApexSNR(mt) < chrom_peak_snr_)
{
snr_ok = false;
}
}
if (pw_ok && snr_ok)
{
mt.updateSmoothedMaxRT();
if (pw_filtering_ != "fixed")
{
mt.estimateFWHM(true);
}
#ifdef _OPENMP
#pragma omp critical (OPENMS_ElutionPeakDetection_mtraces)
#endif
{
single_mtraces.push_back(mt);
}
}
}
else if (maxes.empty())
{
return;
}
else // split mt to sub-traces
{
MassTrace::const_iterator cp_it = mt.begin();
Size last_idx(0);
// add last data point as last minimum (to grep the last chunk of the MT)
mins.push_back(mt.getSize() - 1);
for (Size min_idx = 0; min_idx < mins.size(); ++min_idx)
{
// *********************************************************************
// Step 3.1: Create new mass trace (sub-trace between cp_it and split point)
// *********************************************************************
std::vector<PeakType> tmp_mt;
std::vector<double> smoothed_tmp;
while (last_idx <= mins[min_idx])
{
tmp_mt.push_back(*cp_it);
smoothed_tmp.push_back(mt.getSmoothedIntensities()[last_idx]);
++cp_it;
++last_idx;
}
// Create new mass trace, copy smoothed intensities
MassTrace new_mt(tmp_mt);
new_mt.setSmoothedIntensities(smoothed_tmp);
// copy ion mobility centroid and peak fwhm to split traces
new_mt.setCentroidIM(mt.getCentroidIM());
new_mt.fwhm_mz_avg = mt.fwhm_mz_avg;
new_mt.fwhm_im_avg = mt.fwhm_im_avg;
// check filter criteria
bool pw_ok = true;
bool snr_ok = true;
// *********************************************************************
// Step 3.2: check mass trace length criteria (if fixed filter is enabled)
// *********************************************************************
if (pw_filtering_ == "fixed")
{
double act_fwhm(new_mt.estimateFWHM(true));
if (act_fwhm < min_fwhm_ || act_fwhm > max_fwhm_)
{
pw_ok = false;
}
}
// *********************************************************************
// Step 3.3: check mass trace signal to noise filter criteria
// *********************************************************************
if (mt_snr_filtering_)
{
if (computeApexSNR(mt) < chrom_peak_snr_)
{
snr_ok = false;
}
}
if (pw_ok && snr_ok)
{
// set label of sub-trace
new_mt.setLabel(mt.getLabel() + "." + String(min_idx + 1));
new_mt.updateSmoothedMaxRT();
new_mt.updateWeightedMeanMZ();
new_mt.updateWeightedMZsd();
new_mt.setQuantMethod(mt.getQuantMethod());
if (pw_filtering_ != "fixed")
{
new_mt.estimateFWHM(true);
}
#ifdef _OPENMP
#pragma omp critical (OPENMS_ElutionPeakDetection_mtraces)
#endif
{
single_mtraces.push_back(new_mt);
}
}
}
}
return;
}
void ElutionPeakDetection::smoothData(MassTrace& mt, int win_size) const
{
// alternative smoothing using SavitzkyGolay
// looking at the unit test, this method gives better fits than lowess smoothing
// reference paper uses lowess smoothing
// Handle traces with fewer than 3 points - Savitzky-Golay requires minimum frame length of 3
if (mt.getSize() < 3)
{
std::vector<double> smoothed;
smoothed.reserve(mt.getSize());
for (Size i = 0; i < mt.getSize(); ++i)
{
smoothed.push_back(mt[i].getIntensity());
}
mt.setSmoothedIntensities(smoothed);
return;
}
MSSpectrum spectrum;
for (Size i = 0; i != mt.getSize(); ++i)
{
spectrum.push_back(Peak1D(mt[i].getRT(), mt[i].getIntensity()));
}
SavitzkyGolayFilter sg;
Param param;
param.setValue("polynomial_order", 2);
param.setValue("frame_length", std::max(3, win_size)); // frame length must be at least polynomial_order+1, otherwise SG will fail
sg.setParameters(param);
sg.filter(spectrum);
MSSpectrum::iterator iter = spectrum.begin();
std::vector<double> smoothed_intensities;
for (; iter != spectrum.end(); ++iter)
{
smoothed_intensities.push_back(iter->getIntensity());
}
mt.setSmoothedIntensities(smoothed_intensities);
//alternative end
// std::cout << "win_size elution: " << scan_time << " " << win_size << '\n';
// if there is no previous FWHM estimation... do it now
// if (win_size == 0)
// {
// mt.estimateFWHM(false); // estimate FWHM
// win_size = mt.getFWHMScansNum();
// }
// use one global window size for all mass traces to smooth
// std::vector<double> rts, ints;
//
// for (MassTrace::const_iterator c_it = mt.begin(); c_it != mt.end(); ++c_it)
// {
// rts.push_back(c_it->getRT());
// ints.push_back(c_it->getIntensity());
// }
// LowessSmoothing lowess_smooth;
// Param lowess_params;
// lowess_params.setValue("window_size", win_size);
// lowess_smooth.setParameters(lowess_params);
// std::vector<double> smoothed_data;
// lowess_smooth.smoothData(rts, ints, smoothed_data);
// mt.setSmoothedIntensities(smoothed_data);
}
void ElutionPeakDetection::updateMembers_()
{
chrom_fwhm_ = (double)param_.getValue("chrom_fwhm");
chrom_peak_snr_ = (double)param_.getValue("chrom_peak_snr");
min_fwhm_ = (double)param_.getValue("min_fwhm");
max_fwhm_ = (double)param_.getValue("max_fwhm");
pw_filtering_ = param_.getValue("width_filtering").toString();
mt_snr_filtering_ = param_.getValue("masstrace_snr_filtering").toBool();
}
} //namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/FeatureFinderAlgorithmPickedHelperStructs.cpp | .cpp | 6,950 | 241 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg$
// $Authors: Stephan Aiche $
// --------------------------------------------------------------------------
#include <OpenMS/KERNEL/Peak1D.h>
#include <OpenMS/FEATUREFINDER/FeatureFinderAlgorithmPickedHelperStructs.h>
namespace OpenMS
{
FeatureFinderAlgorithmPickedHelperStructs::IsotopePattern::IsotopePattern(Size size) :
peak(size, -1),
spectrum(size),
intensity(size),
mz_score(size),
theoretical_mz(size)
{
}
Size FeatureFinderAlgorithmPickedHelperStructs::TheoreticalIsotopePattern::size() const
{
return intensity.size();
}
bool FeatureFinderAlgorithmPickedHelperStructs::Seed::operator<(const Seed& rhs) const
{
return intensity < rhs.intensity;
}
ConvexHull2D FeatureFinderAlgorithmPickedHelperStructs::MassTrace::getConvexhull() const
{
ConvexHull2D::PointArrayType hull_points(peaks.size());
for (Size i = 0; i < peaks.size(); ++i)
{
hull_points[i][0] = peaks[i].first;
hull_points[i][1] = peaks[i].second->getMZ();
}
ConvexHull2D hull;
hull.addPoints(hull_points);
return hull;
}
void FeatureFinderAlgorithmPickedHelperStructs::MassTrace::updateMaximum()
{
if (peaks.empty())
{
return;
}
max_rt = peaks.begin()->first;
max_peak = peaks.begin()->second;
for (Size i = 1; i < peaks.size(); ++i)
{
if (peaks[i].second->getIntensity() > max_peak->getIntensity())
{
max_rt = peaks[i].first;
max_peak = peaks[i].second;
}
}
}
double FeatureFinderAlgorithmPickedHelperStructs::MassTrace::getAvgMZ() const
{
double sum = 0.0;
double intensities = 0.0;
for (Size i = 0; i < peaks.size(); ++i)
{
sum += peaks[i].second->getMZ() * peaks[i].second->getIntensity();
intensities += peaks[i].second->getIntensity();
}
return sum / intensities;
}
bool FeatureFinderAlgorithmPickedHelperStructs::MassTrace::isValid() const
{
return peaks.size() >= 3;
}
FeatureFinderAlgorithmPickedHelperStructs::MassTraces::MassTraces() :
max_trace(0)
{
}
Size FeatureFinderAlgorithmPickedHelperStructs::MassTraces::getPeakCount() const
{
Size sum = 0;
for (Size i = 0; i < this->size(); ++i)
{
sum += this->at(i).peaks.size();
}
return sum;
}
bool FeatureFinderAlgorithmPickedHelperStructs::MassTraces::isValid(double seed_mz, double trace_tolerance)
{
//Abort if too few traces were found
if (this->size() < 2)
{
return false;
}
//Abort if the seed was removed
for (Size j = 0; j < this->size(); ++j)
{
if (std::fabs(seed_mz - this->at(j).getAvgMZ()) <= trace_tolerance)
{
return true;
}
}
return false;
}
Size FeatureFinderAlgorithmPickedHelperStructs::MassTraces::getTheoreticalmaxPosition() const
{
if (this->empty())
{
throw Exception::Precondition(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "There must be at least one trace to determine the theoretical maximum trace!");
}
Size max = 0;
double max_int = this->at(0).theoretical_int;
for (Size i = 1; i < this->size(); ++i)
{
if (this->at(i).theoretical_int > max_int)
{
max_int = this->at(i).theoretical_int;
max = i;
}
}
return max;
}
void FeatureFinderAlgorithmPickedHelperStructs::MassTraces::updateBaseline()
{
if (this->empty())
{
baseline = 0.0;
return;
}
bool first = true;
for (Size i = 0; i < this->size(); ++i)
{
for (Size j = 0; j < this->at(i).peaks.size(); ++j)
{
if (first)
{
baseline = this->at(i).peaks[j].second->getIntensity();
first = false;
}
if (this->at(i).peaks[j].second->getIntensity() < baseline)
{
baseline = this->at(i).peaks[j].second->getIntensity();
}
}
}
}
std::pair<double, double> FeatureFinderAlgorithmPickedHelperStructs::MassTraces::getRTBounds() const
{
if (this->empty())
{
throw Exception::Precondition(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "There must be at least one trace to determine the RT boundaries!");
}
double min = std::numeric_limits<double>::max();
double max = -std::numeric_limits<double>::max();
//Abort if the seed was removed
for (Size i = 0; i < this->size(); ++i)
{
for (Size j = 0; j < this->at(i).peaks.size(); ++j)
{
double rt = this->at(i).peaks[j].first;
if (rt > max)
{
max = rt;
}
if (rt < min)
{
min = rt;
}
}
}
return std::make_pair(min, max);
}
void FeatureFinderAlgorithmPickedHelperStructs::MassTraces::computeIntensityProfile(std::list<std::pair<double, double> >& intensity_profile) const
{
// typedefs for better readability
typedef MassTraces::const_iterator TTraceIterator;
typedef std::list<std::pair<double, double> >::iterator TProfileIterator;
typedef std::vector<std::pair<double, const Peak1D*> > TMassTracePeakList;
typedef TMassTracePeakList::const_iterator TTracePeakIterator;
TTraceIterator trace_it = this->begin();
// we add the first trace without check, as the profile is currently empty
for (TTracePeakIterator trace_peak_it = trace_it->peaks.begin(); trace_peak_it != trace_it->peaks.end(); ++trace_peak_it)
{
intensity_profile.emplace_back(trace_peak_it->first, trace_peak_it->second->getIntensity());
}
++trace_it;
// accumulate intensities over all the remaining mass traces
for (; trace_it != this->end(); ++trace_it)
{
TProfileIterator profile_it = intensity_profile.begin();
TTracePeakIterator trace_peak_it = trace_it->peaks.begin();
while (trace_peak_it != trace_it->peaks.end())
{
// append .. if profile has already ended
if (profile_it == intensity_profile.end())
{
intensity_profile.emplace_back(trace_peak_it->first, trace_peak_it->second->getIntensity());
++trace_peak_it;
}
// prepend
else if (profile_it->first > trace_peak_it->first)
{
intensity_profile.insert(profile_it, std::make_pair(trace_peak_it->first, trace_peak_it->second->getIntensity()));
++trace_peak_it;
}
// proceed
else if (profile_it->first < trace_peak_it->first)
{
++profile_it;
}
// merge
else if (profile_it->first == trace_peak_it->first)
{
profile_it->second += trace_peak_it->second->getIntensity();
++trace_peak_it;
++profile_it;
}
}
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/LevMarqFitter1D.cpp | .cpp | 3,073 | 86 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: $
// --------------------------------------------------------------------------
#include <OpenMS/FEATUREFINDER/LevMarqFitter1D.h>
#include <unsupported/Eigen/NonLinearOptimization>
#include <Eigen/Core>
#include <fstream>
namespace OpenMS
{
/// Internal adapter to wrap GenericFunctor for Eigen's LM solver (const version)
class GenericFunctorConstEigenAdapter
{
public:
GenericFunctorConstEigenAdapter(const LevMarqFitter1D::GenericFunctor& functor)
: functor_(functor)
{
}
int inputs() const { return functor_.inputs(); }
int values() const { return functor_.values(); }
int operator()(const Eigen::VectorXd& x, Eigen::VectorXd& fvec) const
{
return functor_(x.data(), fvec.data());
}
int df(const Eigen::VectorXd& x, Eigen::MatrixXd& J) const
{
return functor_.df(x.data(), J.data());
}
private:
const LevMarqFitter1D::GenericFunctor& functor_;
};
void LevMarqFitter1D::optimize_(std::vector<double>& x_init, GenericFunctor& functor) const
{
//TODO: this function is copy&paste from TraceFitter.h. Make a generic wrapper for
//LM optimization
int data_count = functor.values();
int num_params = functor.inputs();
// LM always expects N>=p, cause Jacobian be rectangular M x N with M>=N
if (data_count < num_params)
{
throw Exception::UnableToFit(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "UnableToFit-FinalSet", "Skipping feature, we always expects N>=p");
}
// Create Eigen vector and copy data from std::vector
Eigen::VectorXd x_eigen = Eigen::Map<Eigen::VectorXd>(x_init.data(), x_init.size());
// Create adapter to wrap our functor for Eigen's LM solver
GenericFunctorConstEigenAdapter adapter(functor);
Eigen::LevenbergMarquardt<GenericFunctorConstEigenAdapter> lmSolver(adapter);
lmSolver.parameters.maxfev = max_iteration_;
Eigen::LevenbergMarquardtSpace::Status status = lmSolver.minimize(x_eigen);
//the states are poorly documented. after checking the source, we believe that
//all states except NotStarted, Running and ImproperInputParameters are good
//termination states.
if (status <= Eigen::LevenbergMarquardtSpace::ImproperInputParameters)
{
throw Exception::UnableToFit(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "UnableToFit-FinalSet", "Could not fit the gaussian to the data: Error " + String(status));
}
// Copy results back to x_init
std::copy(x_eigen.data(), x_eigen.data() + x_eigen.size(), x_init.begin());
}
void LevMarqFitter1D::updateMembers_()
{
Fitter1D::updateMembers_();
max_iteration_ = this->param_.getValue("max_iteration");
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/MultiplexFilteredMSExperiment.cpp | .cpp | 3,933 | 140 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Lars Nilse $
// $Authors: Lars Nilse $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/KERNEL/FeatureHandle.h>
#include <OpenMS/KERNEL/ConsensusMap.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/FEATUREFINDER/MultiplexSatelliteCentroided.h>
#include <OpenMS/FEATUREFINDER/MultiplexFilteredPeak.h>
#include <OpenMS/FEATUREFINDER/MultiplexFilteredMSExperiment.h>
#include <vector>
#include <algorithm>
#include <iostream>
using namespace std;
namespace OpenMS
{
MultiplexFilteredMSExperiment::MultiplexFilteredMSExperiment() = default;
void MultiplexFilteredMSExperiment::addPeak(const MultiplexFilteredPeak& peak)
{
result_.push_back(peak);
}
MultiplexFilteredPeak MultiplexFilteredMSExperiment::getPeak(size_t i) const
{
return result_[i];
}
double MultiplexFilteredMSExperiment::getMZ(size_t i) const
{
return result_[i].getMZ();
}
vector<double> MultiplexFilteredMSExperiment::getMZ() const
{
vector<double> mz;
mz.resize(result_.size());
for (size_t i = 0; i < result_.size(); ++i)
{
mz[i] = result_[i].getMZ();
}
return mz;
}
double MultiplexFilteredMSExperiment::getRT(size_t i) const
{
return result_[i].getRT();
}
vector<double> MultiplexFilteredMSExperiment::getRT() const
{
vector<double> rt;
rt.resize(result_.size());
for (size_t i = 0; i < result_.size(); ++i)
{
rt[i] = result_[i].getRT();
}
return rt;
}
size_t MultiplexFilteredMSExperiment::size() const
{
return result_.size();
}
void MultiplexFilteredMSExperiment::writeDebugOutput(const MSExperiment& exp_picked, const String& debug_out) const
{
ConsensusMap map;
// loop over peaks
for (const MultiplexFilteredPeak& peak : result_)
{
ConsensusFeature consensus;
consensus.setRT(peak.getRT());
consensus.setMZ(peak.getMZ());
consensus.setIntensity(1.0);
consensus.setCharge(1);
consensus.setQuality(1.0);
std::multimap<size_t, MultiplexSatelliteCentroided > satellites = peak.getSatellites();
int count = 0;
// loop over satellites
for (const auto &it_satellite : satellites)
{
// find indices of the peak
size_t rt_idx = (it_satellite.second).getRTidx();
size_t mz_idx = (it_satellite.second).getMZidx();
// find peak itself
MSExperiment::ConstIterator it_rt = exp_picked.begin();
std::advance(it_rt, rt_idx);
MSSpectrum::ConstIterator it_mz = it_rt->begin();
std::advance(it_mz, mz_idx);
FeatureHandle feature_handle;
feature_handle.setRT(it_rt->getRT());
feature_handle.setMZ(it_mz->getMZ());
feature_handle.setIntensity(1.0);
feature_handle.setCharge(it_satellite.first);
feature_handle.setMapIndex(count);
consensus.insert(feature_handle);
map.getColumnHeaders()[count].size++;
// give the maps some names (irrelevant for the debug output)
ConsensusMap::ColumnHeader& col = map.getColumnHeaders()[count];
std::stringstream ss;
ss << "satellite_" << count;
col.label = ss.str();
col.filename = "satellites";
++count;
}
map.push_back(consensus);
}
map.sortByPosition();
map.applyMemberFunction(&UniqueIdInterface::setUniqueId);
map.setExperimentType("label-free");
FileHandler().storeConsensusFeatures(debug_out, map);
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/FeatureFinderAlgorithmMetaboIdent.cpp | .cpp | 36,585 | 915 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Timo Sachsenberg, Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/FEATUREFINDER/FeatureFinderAlgorithmMetaboIdent.h>
#include <OpenMS/FEATUREFINDER/EGHTraceFitter.h>
#include <OpenMS/FEATUREFINDER/ElutionModelFitter.h>
#include <OpenMS/FEATUREFINDER/GaussTraceFitter.h>
#include <OpenMS/FEATUREFINDER/TraceFitter.h>
#include <OpenMS/ANALYSIS/OPENSWATH/ChromatogramExtractor.h>
#include <OpenMS/ANALYSIS/OPENSWATH/DATAACCESS/SimpleOpenMSSpectraAccessFactory.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/CoarseIsotopePatternGenerator.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/IsotopeDistribution.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/MATH/MathFunctions.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/IONMOBILITY/IMTypes.h>
#include <OpenMS/IONMOBILITY/IMDataConverter.h>
#include <OpenMS/PROCESSING/FEATURE/FeatureOverlapFilter.h>
#include <vector>
#include <numeric>
#include <fstream>
#include <algorithm>
#include <random>
#ifdef _OPENMP
#include <omp.h>
#endif
using namespace std;
namespace OpenMS
{
FeatureFinderAlgorithmMetaboIdent::FeatureFinderAlgorithmMetaboIdent() :
DefaultParamHandler("FeatureFinderAlgorithmMetaboIdent")
{
defaults_.setValue("candidates_out", "", "Optional output file: Feature candidates (before filtering and model fitting).", {"output file"});
defaults_.setValue("extract:mz_window", 10.0, "m/z window size for chromatogram extraction (unit: ppm if 1 or greater, else Da/Th)");
defaults_.setMinFloat("extract:mz_window", 0.0);
defaults_.setValue(
"extract:rt_window",
0.0,
"RT window size (in sec.) for chromatogram extraction. If set, this parameter takes precedence over 'extract:rt_quantile'.",
vector<string>{"advanced"});
defaults_.setMinFloat("extract:rt_window", 0.0);
defaults_.setValue(
"extract:im_window",
0.0,
"Ion mobility window size for extraction. If set to 0, no IM filtering is performed.",
vector<string>{"advanced"});
defaults_.setMinFloat("extract:im_window", 0.0);
defaults_.setValue("extract:n_isotopes", 2, "Number of isotopes to include in each peptide assay.");
defaults_.setMinInt("extract:n_isotopes", 2);
defaults_.setValue(
"extract:isotope_pmin",
0.0,
"Minimum probability for an isotope to be included in the assay for a peptide. If set, this parameter takes precedence over 'extract:n_isotopes'.",
vector<string>{"advanced"});
defaults_.setMinFloat("extract:isotope_pmin", 0.0);
defaults_.setMaxFloat("extract:isotope_pmin", 1.0);
defaults_.setSectionDescription("extract", "Parameters for ion chromatogram extraction");
defaults_.setValue("detect:peak_width", 60.0, "Expected elution peak width in seconds, for smoothing (Gauss filter). Also determines the RT extration window, unless set explicitly via 'extract:rt_window'.");
defaults_.setMinFloat("detect:peak_width", 0.0);
defaults_.setValue(
"detect:min_peak_width",
0.2,
"Minimum elution peak width. Absolute value in seconds if 1 or greater, else relative to 'peak_width'.",
vector<string>{"advanced"});
defaults_.setMinFloat("detect:min_peak_width", 0.0);
defaults_.setValue(
"detect:signal_to_noise",
0.8,
"Signal-to-noise threshold for OpenSWATH feature detection",
vector<string>{"advanced"});
defaults_.setMinFloat("detect:signal_to_noise", 0.1);
defaults_.setSectionDescription("detect", "Parameters for detecting features in extracted ion chromatograms");
// parameters for model fitting (via ElutionModelFitter):
defaults_.setValue("model:type", "symmetric", "Type of elution model to fit to features");
defaults_.setValidStrings("model:type", {"symmetric", "asymmetric", "none"});
defaults_.insert("model:", ElutionModelFitter().getParameters()); // copy parameters
defaults_.remove("model:asymmetric");
defaults_.setSectionDescription("model", "Parameters for fitting elution models to features");
defaults_.setValue("EMGScoring:max_iteration", 100, "Maximum number of iterations for EMG fitting.");
defaults_.setMinInt("EMGScoring:max_iteration", 1);
defaults_.setValue("EMGScoring:init_mom", "true", "Alternative initial parameters for fitting through method of moments.");
defaults_.setValidStrings("EMGScoring:init_mom", {"true","false"});
defaults_.setSectionDescription("EMGScoring", "Parameters for fitting exp. mod. Gaussians to mass traces.");
defaults_.setValue("debug", 0, "Debug level for feature detection.", vector<string>{"advanced"});
defaults_.setMinInt("debug", 0);
defaults_.setValue("faims:merge_features", "true",
"For FAIMS data with multiple compensation voltages: Merge features that represent "
"the same analyte detected at different CVs. Features are merged if they have the same "
"charge and are within 5 seconds RT and 0.05 Da m/z. Intensities are summed.");
defaults_.setValidStrings("faims:merge_features", {"true", "false"});
defaults_.setSectionDescription("faims", "Parameters for FAIMS data processing");
defaultsToParam_();
}
void FeatureFinderAlgorithmMetaboIdent::updateMembers_()
{
peak_width_ = param_.getValue("detect:peak_width");
min_peak_width_ = param_.getValue("detect:min_peak_width");
signal_to_noise_ = param_.getValue("detect:signal_to_noise");
rt_window_ = param_.getValue("extract:rt_window");
if (rt_window_ == 0.0)
{
// calculate RT window based on other parameters:
rt_window_ = 4 * peak_width_;
OPENMS_LOG_INFO << "RT window size calculated as " << rt_window_
<< " seconds." << endl;
}
mz_window_ = param_.getValue("extract:mz_window");
mz_window_ppm_ = mz_window_ >= 1;
im_window_ = param_.getValue("extract:im_window");
isotope_pmin_ = param_.getValue("extract:isotope_pmin");
// extract up to 10 isotopes if minimum probability is larger than 0
n_isotopes_ = ((isotope_pmin_ > 0.0) ?
10 : (int)param_.getValue("extract:n_isotopes"));
iso_gen_.setMaxIsotope(n_isotopes_);
elution_model_ = (string)param_.getValue("model:type");
// debug
debug_level_ = param_.getValue("debug");
candidates_out_ = (string)param_.getValue("candidates_out");
}
void FeatureFinderAlgorithmMetaboIdent::run(const vector<FeatureFinderAlgorithmMetaboIdent::FeatureFinderMetaboIdentCompound>& metaboIdentTable,
FeatureMap& features,
const String& spectra_file)
{
// Check for FAIMS data
auto faims_groups = IMDataConverter::splitByFAIMSCV(std::move(ms_data_));
const bool has_faims = faims_groups.size() > 1 || !std::isnan(faims_groups[0].first);
if (!has_faims)
{
// Non-FAIMS data: restore ms_data_ and run directly
ms_data_ = std::move(faims_groups[0].second);
runSingleGroup_(metaboIdentTable, features, spectra_file);
return;
}
// FAIMS data: process each CV group separately
OPENMS_LOG_INFO << "FAIMS data detected with " << faims_groups.size() << " compensation voltage(s)." << endl;
// Clear combined outputs
features.clear(true);
chrom_data_.clear(true);
library_.clear(true);
trafo_ = TransformationDescription();
n_shared_ = 0;
bool first_group = true;
for (auto& [group_cv, faims_group] : faims_groups)
{
OPENMS_LOG_INFO << "Processing FAIMS CV group: " << group_cv << " V ("
<< faims_group.size() << " spectra)" << endl;
// Create algorithm instance for this group (use same parameters)
FeatureFinderAlgorithmMetaboIdent ff_group;
ff_group.setParameters(this->getParameters());
ff_group.setMSData(std::move(faims_group));
FeatureMap features_cv;
ff_group.runSingleGroup_(metaboIdentTable, features_cv, spectra_file);
// Annotate features with FAIMS CV and add to combined results
for (auto& feat : features_cv)
{
feat.setMetaValue(Constants::UserParam::FAIMS_CV, group_cv);
features.push_back(feat);
}
// Copy ProteinIdentifications from first group
if (first_group)
{
features.setProteinIdentifications(features_cv.getProteinIdentifications());
}
// Copy UnassignedPeptideIdentifications with FAIMS annotation
for (auto& pep_id : features_cv.getUnassignedPeptideIdentifications())
{
pep_id.setMetaValue(Constants::UserParam::FAIMS_CV, group_cv);
features.getUnassignedPeptideIdentifications().push_back(std::move(pep_id));
}
// Combine chromatograms
for (auto& chrom : ff_group.getChromatograms().getChromatograms())
{
chrom.setMetaValue(Constants::UserParam::FAIMS_CV, group_cv);
chrom_data_.addChromatogram(std::move(chrom));
}
// Combine transformations
TransformationDescription::DataPoints points = trafo_.getDataPoints();
for (const auto& point : ff_group.getTransformations().getDataPoints())
{
points.push_back(point);
}
trafo_.setDataPoints(points);
n_shared_ += ff_group.getNShared();
first_group = false;
}
// Warn about library output for FAIMS data
OPENMS_LOG_WARN << "Warning: Library output is not available for multi-FAIMS data. "
<< "Each FAIMS CV group has its own assay library." << endl;
OPENMS_LOG_INFO << "Combined " << features.size() << " features from all FAIMS CV groups." << endl;
// Optionally merge features from different FAIMS CV groups that represent the same analyte
if (param_.getValue("faims:merge_features").toBool())
{
Size before_merge = features.size();
FeatureOverlapFilter::mergeFAIMSFeatures(features, 5.0, 0.05);
if (features.size() < before_merge)
{
OPENMS_LOG_INFO << "Merged FAIMS features: " << before_merge << " -> " << features.size()
<< " (" << (before_merge - features.size()) << " features merged)" << endl;
}
}
// Set primary MS run path
features.setPrimaryMSRunPath({spectra_file});
features.ensureUniqueId();
}
void FeatureFinderAlgorithmMetaboIdent::runSingleGroup_(const vector<FeatureFinderAlgorithmMetaboIdent::FeatureFinderMetaboIdentCompound>& metaboIdentTable,
FeatureMap& features,
const String& spectra_file)
{
// if proper mzML is annotated in MS data use this as reference. Otherwise, overwrite with spectra_file information.
features.setPrimaryMSRunPath({spectra_file}, ms_data_);
if (ms_data_.empty())
{
OPENMS_LOG_WARN << "Warning: No MS1 scans in:" << spectra_file << endl;
return;
}
for (const auto& c : metaboIdentTable)
{
addTargetToLibrary_(c.getName(), c.getFormula(), c.getMass(), c.getCharges(), c.getRTs(), c.getRTRanges(),
c.getIsotopeDistribution(), c.getIonMobilities());
}
// initialize algorithm classes needed later:
Param params = feat_finder_.getParameters();
params.setValue("stop_report_after_feature", -1); // return all features
params.setValue("EMGScoring:max_iteration", param_.getValue("EMGScoring:max_iteration")); // propagate setting to sub algorithms
params.setValue("EMGScoring:init_mom", param_.getValue("EMGScoring:init_mom")); // propagate setting to sub algorithms
params.setValue("Scores:use_rt_score", "false"); // RT may not be reliable
params.setValue("Scores:use_ionseries_scores", "false"); // since FFID only uses MS1 spectra, this is useless
params.setValue("Scores:use_ms2_isotope_scores", "false"); // since FFID only uses MS1 spectra, this is useless
params.setValue("Scores:use_ms1_correlation", "false"); // this would be redundant to the "MS2" correlation and since
// precursor transition = first product transition, additionally biased
params.setValue("Scores:use_ms1_mi", "false"); // same as above. On MS1 level we basically only care about the "MS1 fullscan" scores
//TODO for MS1 level scoring there is an additional parameter add_up_spectra with which we can add up spectra
// around the apex, to complete isotopic envelopes (and therefore make this score more robust).
params.setValue("write_convex_hull", "true"); // some parts of FFMId expect convex hulls
if ((elution_model_ != "none") || (!candidates_out_.empty()))
{
params.setValue("Scores:use_elution_model_score", "false"); // TODO: test if this works for requantificiation
}
else // no elution model
{
params.setValue("Scores:use_elution_model_score", "true");
}
if (min_peak_width_ < 1.0)
{
min_peak_width_ *= peak_width_;
}
params.setValue("TransitionGroupPicker:PeakPickerChromatogram:gauss_width",
peak_width_);
params.setValue("TransitionGroupPicker:min_peak_width", min_peak_width_);
// disabling the signal-to-noise threshold (setting the parameter to zero)
// totally breaks the OpenSWATH feature detection (no features found)!
params.setValue("TransitionGroupPicker:PeakPickerChromatogram:signal_to_noise",
signal_to_noise_);
params.setValue("TransitionGroupPicker:PeakPickerChromatogram:write_sn_log_messages", "false");
params.setValue("TransitionGroupPicker:recalculate_peaks", "true");
params.setValue("TransitionGroupPicker:PeakPickerChromatogram:peak_width", -1.0);
params.setValue("TransitionGroupPicker:PeakPickerChromatogram:method",
"corrected");
feat_finder_.setParameters(params);
feat_finder_.setLogType(ProgressLogger::NONE);
feat_finder_.setStrictFlag(false);
//-------------------------------------------------------------
// run feature detection
//-------------------------------------------------------------
// Check if data has ion mobility information
IMFormat im_format = IMTypes::determineIMFormat(ms_data_);
bool has_IM = (im_format == IMFormat::CONCATENATED || im_format == IMFormat::MULTIPLE_SPECTRA);
if (has_IM && im_window_ > 0.0)
{
OPENMS_LOG_INFO << "Ion mobility data detected. Using IM window: " << im_window_ << endl;
}
OPENMS_LOG_INFO << "Extracting chromatograms..." << endl;
ChromatogramExtractor extractor;
// extractor.setLogType(ProgressLogger::NONE);
vector<OpenSwath::ChromatogramPtr> chrom_temp;
vector<ChromatogramExtractor::ExtractionCoordinates> coords;
extractor.prepare_coordinates(chrom_temp, coords, library_,
numeric_limits<double>::quiet_NaN(), false);
std::shared_ptr<PeakMap> shared = std::make_shared<PeakMap>(ms_data_);
OpenSwath::SpectrumAccessPtr spec_temp =
SimpleOpenMSSpectraFactory::getSpectrumAccessOpenMSPtr(shared);
if (has_IM && im_window_ > 0.0)
{
extractor.extractChromatograms(spec_temp, chrom_temp, coords, mz_window_,
mz_window_ppm_, im_window_, "tophat");
}
else
{
extractor.extractChromatograms(spec_temp, chrom_temp, coords, mz_window_,
mz_window_ppm_, "tophat");
}
extractor.return_chromatogram(chrom_temp, coords, library_, (*shared)[0],
chrom_data_.getChromatograms(), false);
OPENMS_LOG_DEBUG << "Extracted " << chrom_data_.getNrChromatograms()
<< " chromatogram(s)." << endl;
OPENMS_LOG_INFO << "Detecting chromatographic peaks..." << endl;
Logger::LogSinkGuard log_guard(getGlobalLogInfo(), cout); // suppress status output from OpenSWATH (exception-safe)
feat_finder_.pickExperiment(chrom_data_, features, library_,
TransformationDescription(), ms_data_);
OPENMS_LOG_INFO << "Found " << features.size()
<< " feature candidates in total." << endl;
ms_data_.reset(); // not needed anymore, free up the memory
// complete feature annotation:
annotateFeatures_(features);
// write auxiliary output:
// features.setProteinIdentifications(proteins);
features.ensureUniqueId();
if (!candidates_out_.empty()) // store feature candidates
{
sort(features.begin(), features.end(), feature_compare_);
FileHandler().storeFeatures(candidates_out_, features);
}
selectFeaturesFromCandidates_(features);
OPENMS_LOG_INFO << features.size()
<< " features left after selection of best candidates." << endl;
constexpr bool CHECK_TRACES_FOR_OVERLAP = true;
// criterium used to select the best feature amongs overlapping ones (lower = better)
auto FeatureComparator = [](const Feature& left, const Feature& right)
{
double left_rt_delta = std::abs(double(left.getMetaValue("rt_deviation")));
double right_rt_delta = std::abs(double(right.getMetaValue("rt_deviation")));
size_t left_intensity = left.getIntensity();
size_t right_intensity = right.getIntensity();
return std::tie(left_rt_delta, right_intensity) < std::tie(right_rt_delta, left_intensity); // Note: left and right intensity are swapped because here higher is better
};
// callback used to transfer information from an identical overlapping feature with different annotation to the representative on
auto FeatureOverlapCallback = [](Feature& cluster_representative, Feature& overlap)
{
size_t best_intensity = cluster_representative.getIntensity();
size_t overlap_intensity = overlap.getIntensity();
if (overlap_intensity != best_intensity) return true; // early out: features are different
// this part will nearly never be called (e.g., only completely identicial features)
// so it is ok to perform some slow operations like querying meta values
double best_rt_delta = std::abs(double(cluster_representative.getMetaValue("rt_deviation")));
double overlap_rt_delta = std::abs(double(overlap.getMetaValue("rt_deviation")));
if (overlap_rt_delta == best_rt_delta)
{
double best_RT = cluster_representative.getRT();
double overlap_RT = overlap.getRT();
double best_MZ = cluster_representative.getMZ();
double overlap_MZ = overlap.getMZ();
// are the features the same? (@TODO: use "Math::approximatelyEqual"?)
if ((overlap_MZ == best_MZ) && (overlap_RT == best_RT))
{
// update annotations:
// @TODO: also adjust "formula" and "expected_rt"?
String label = cluster_representative.getMetaValue("label");
label += "/" + String(overlap.getMetaValue("label"));
cluster_representative.setMetaValue("label", label);
StringList alt_refs;
if (cluster_representative.metaValueExists("alt_PeptideRef"))
{
alt_refs = cluster_representative.getMetaValue("alt_PeptideRef");
}
alt_refs.push_back(overlap.getMetaValue("PeptideRef"));
cluster_representative.setMetaValue("alt_PeptideRef", alt_refs);
}
}
// annotate which features were removed because of overlap with the representative feature
String ref = String(overlap.getMetaValue("PeptideRef")) + " (RT " +
String(float(overlap.getRT())) + ")";
StringList overlap_refs = cluster_representative.getMetaValue("overlap_removed", StringList{});
overlap_refs.push_back(std::move(ref));
cluster_representative.setMetaValue("overlap_removed", std::move(overlap_refs)); // TODO: implement setMetaValue that takes DataValue as r-value reference &&
return true;
};
FeatureOverlapFilter::filter(features, FeatureComparator, FeatureOverlapCallback, CHECK_TRACES_FOR_OVERLAP);
std::stable_sort(features.begin(), features.end(), feature_compare_); // sort by ref and rt
if (features.empty())
{
OPENMS_LOG_INFO << "No features left after filtering." << endl;
return;
}
n_shared_ = addTargetAnnotations_(features);
if (elution_model_ != "none")
{
ElutionModelFitter emf;
Param emf_params = param_.copy("model:", true);
emf_params.remove("type");
emf_params.setValue("asymmetric",
(elution_model_ == "asymmetric") ? "true" : "false");
emf.setParameters(emf_params);
emf.fitElutionModels(features);
}
else if (!candidates_out_.empty()) // hulls not needed, remove them
{
for (Feature& feat : features)
{
for (Feature& sub : feat.getSubordinates())
{
sub.getConvexHulls().clear();
}
}
}
extractTransformations_(features);
}
/// Calculate mass-to-charge ratio from mass and charge
double FeatureFinderAlgorithmMetaboIdent::calculateMZ_(double mass, Int charge) const
{
return (mass + charge * Constants::PROTON_MASS_U) / fabs(charge);
}
/// Add a target (from the input file) to the assay library
void FeatureFinderAlgorithmMetaboIdent::addTargetToLibrary_(const String& name, const String& formula,
double mass, const vector<Int>& charges,
const vector<double>& rts,
vector<double> rt_ranges,
const vector<double>& iso_distrib,
const vector<double>& ion_mobilities)
{
if ((mass <= 0) && formula.empty())
{
OPENMS_LOG_ERROR << "Error: No mass or sum formula given for target '"
<< name << "' - skipping this target." << endl;
return;
}
if (rts.empty())
{
OPENMS_LOG_ERROR << "Error: No retention time (RT) given for target '"
<< name << "' - skipping this target." << endl;
return;
}
// @TODO: detect entries with same RT and m/z ("collisions")
TargetedExperiment::Compound target;
target.setMetaValue("name", name);
target.molecular_formula = formula;
EmpiricalFormula emp_formula(formula);
bool mass_given = (mass > 0);
if (!mass_given)
{
mass = emp_formula.getMonoWeight();
}
target.theoretical_mass = mass;
String target_id = name + "_m" + String(float(mass));
// get isotope distribution for target:
IsotopeDistribution iso_dist;
Size n_isotopes = n_isotopes_;
if (iso_distrib.empty() || (iso_distrib[0] == 0))
{
if (formula.empty())
{
OPENMS_LOG_ERROR << "Error: No sum formula given for target '" << name
<< "'; cannot calculate isotope distribution"
<< " - using estimation method for peptides." << endl;
iso_dist = iso_gen_.estimateFromPeptideWeight(mass);
}
else
{
iso_dist = emp_formula.getIsotopeDistribution(iso_gen_);
}
}
else
{
n_isotopes = min(n_isotopes, iso_distrib.size());
IsotopeDistribution::ContainerType probs;
probs.reserve(n_isotopes);
for (Size i = 0; i < n_isotopes; ++i)
{
probs.push_back(Peak1D(i, iso_distrib[i]));
}
iso_dist.set(probs);
}
if (isotope_pmin_ > 0.0)
{
iso_dist.trimLeft(isotope_pmin_);
iso_dist.trimRight(isotope_pmin_);
}
iso_dist.renormalize();
// Prepare IM values: either empty (no filtering), one value (for all), or one per RT
// Use -1.0 as sentinel to match default drift_time_ and ChromatogramExtractor's check (>= 0.0)
vector<double> ims = ion_mobilities;
if (ims.empty())
{
ims.resize(rts.size(), -1.0); // -1 means no IM filtering (matches default drift_time_)
}
else if (ims.size() == 1)
{
ims.resize(rts.size(), ims[0]);
}
else if (ims.size() != rts.size())
{
OPENMS_LOG_ERROR << "Error: Number of IonMobility values (" << ims.size()
<< ") does not match number of RT values (" << rts.size()
<< ") for target '" << name << "' - skipping this target." << endl;
return;
}
// Prepare RT ranges: either empty (use default), one value (for all), or one per RT
if (rt_ranges.empty())
{
rt_ranges.resize(rts.size(), 0.0);
}
else if (rt_ranges.size() == 1)
{
rt_ranges.resize(rts.size(), rt_ranges[0]);
}
else if (rt_ranges.size() != rts.size())
{
OPENMS_LOG_ERROR << "Error: Number of RetentionTimeRange values (" << rt_ranges.size()
<< ") does not match number of RT values (" << rts.size()
<< ") for target '" << name << "' - skipping this target." << endl;
return;
}
// go through different charge states:
for (vector<Int>::const_iterator z_it = charges.begin();
z_it != charges.end(); ++z_it)
{
if (*z_it == 0)
{
OPENMS_LOG_ERROR << "Error: Invalid charge 0 for target '" << name
<< "' - skipping this charge." << endl;
continue;
}
target.setChargeState(*z_it);
double mz = 0.0;
if (!mass_given) // calculate m/z from formula
{
emp_formula.setCharge(*z_it);
// "EmpiricalFormula::getMonoWeight()" already includes charges:
mz = abs(emp_formula.getMonoWeight() / *z_it);
}
else
{
mz = calculateMZ_(mass, *z_it);
}
for (Size i = 0; i < rts.size(); ++i)
{
target.id = target_id + "_z" + String(*z_it) + "_rt" +
String(float(rts[i]));
target.setMetaValue("expected_rt", rts[i]);
target_rts_[target.id] = rts[i];
// Store IM information if provided - set drift time for ChromatogramExtractor
// Check >= 0.0 to match ChromatogramExtractor's IM filtering logic
if (ims[i] >= 0.0)
{
target.setDriftTime(ims[i]); // Required for IM-aware chromatogram extraction
target.setMetaValue("expected_im", ims[i]);
}
else
{
// Reset drift time to -1 (no IM filtering) - target is reused across iterations
target.setDriftTime(-1.0);
}
double rt_tol = rt_ranges[i] / 2.0;
if (rt_tol == 0)
{
rt_tol = rt_window_ / 2.0;
}
// store beginning and end of RT region:
target.rts.clear();
addTargetRT_(target, rts[i] - rt_tol);
addTargetRT_(target, rts[i] + rt_tol);
library_.addCompound(target);
generateTransitions_(target.id, mz, *z_it, iso_dist);
}
}
}
/// Generate transitions for a target ion and add them to the library
void FeatureFinderAlgorithmMetaboIdent::generateTransitions_(const String& target_id, double mz, Int charge,
const IsotopeDistribution& iso_dist)
{
// go through different isotopes:
Size counter = 0;
for (const Peak1D& iso : iso_dist)
{
ReactionMonitoringTransition transition;
String annotation = "i" + String(counter);
String transition_name = target_id + "_" + annotation;
transition.setNativeID(transition_name);
transition.setPrecursorMZ(mz);
// @TODO: use accurate masses from the isotope distribution here?
transition.setProductMZ(mz + abs(Constants::C13C12_MASSDIFF_U *
float(counter) / charge));
transition.setLibraryIntensity(iso.getIntensity());
// transition.setMetaValue("annotation", annotation); // ???
transition.setCompoundRef(target_id);
library_.addTransition(transition);
isotope_probs_[transition_name] = iso.getIntensity();
++counter;
}
}
/// Helper function to add retention time to a target
void FeatureFinderAlgorithmMetaboIdent::addTargetRT_(TargetedExperiment::Compound& target, double rt)
{
TargetedExperiment::RetentionTime te_rt;
te_rt.retention_time_unit =
TargetedExperimentHelper::RetentionTime::RTUnit::SECOND;
te_rt.retention_time_type =
TargetedExperimentHelper::RetentionTime::RTType::LOCAL;
te_rt.setRT(rt);
target.rts.push_back(te_rt);
}
/// Add relevant annotations/meta values to features
void FeatureFinderAlgorithmMetaboIdent::annotateFeatures_(FeatureMap& features)
{
for (Feature& feat : features)
{
feat.setMZ(feat.getMetaValue("PrecursorMZ"));
String ref = feat.getMetaValue("PeptideRef");
const TargetedExperiment::Compound& compound =
library_.getCompoundByRef(ref);
feat.setCharge(compound.getChargeState());
ensureConvexHulls_(feat);
feat.getPeptideIdentifications().clear();
feat.setMetaValue("label", compound.getMetaValue("name"));
feat.setMetaValue("sum_formula", compound.molecular_formula);
feat.setMetaValue("expected_rt",
compound.getMetaValue("expected_rt"));
// Add IM annotations if available
if (compound.metaValueExists("expected_im"))
{
feat.setMetaValue("expected_im", compound.getMetaValue("expected_im"));
}
// annotate subordinates with theoretical isotope intensities:
for (Feature& sub : feat.getSubordinates())
{
String native_id = sub.getMetaValue("native_id");
sub.setMetaValue("isotope_probability", isotope_probs_[native_id]);
sub.removeMetaValue("FeatureLevel"); // value "MS2" is misleading
}
// annotate num_mass_traces, required for SIRIUS
feat.setMetaValue(Constants::UserParam::NUM_OF_MASSTRACES, feat.getSubordinates().size());
}
features.getProteinIdentifications().clear();
}
/// Create hulls for mass traces of a feature, if not already present
void FeatureFinderAlgorithmMetaboIdent::ensureConvexHulls_(Feature& feature) const
{
if (feature.getConvexHulls().empty())
{
double rt_min = feature.getMetaValue("leftWidth");
double rt_max = feature.getMetaValue("rightWidth");
for (Feature& sub : feature.getSubordinates())
{
double abs_mz_tol = mz_window_ / 2.0;
if (mz_window_ppm_)
{
abs_mz_tol = sub.getMZ() * abs_mz_tol * 1.0e-6;
}
ConvexHull2D hull;
hull.addPoint(DPosition<2>(rt_min, sub.getMZ() - abs_mz_tol));
hull.addPoint(DPosition<2>(rt_min, sub.getMZ() + abs_mz_tol));
hull.addPoint(DPosition<2>(rt_max, sub.getMZ() - abs_mz_tol));
hull.addPoint(DPosition<2>(rt_max, sub.getMZ() + abs_mz_tol));
feature.getConvexHulls().push_back(hull);
}
}
}
/// Select the best feature for an assay from a set of candidates
void FeatureFinderAlgorithmMetaboIdent::selectFeaturesFromCandidates_(FeatureMap& features)
{
String previous_ref;
double best_rt_dist = numeric_limits<double>::infinity();
FeatureMap::Iterator best_it = features.begin();
for (FeatureMap::Iterator it = features.begin(); it != features.end();
++it)
{
// features from same assay (same "PeptideRef") appear consecutively:
String ref = it->getMetaValue("PeptideRef");
if (ref != previous_ref) // new assay
{
previous_ref = ref;
best_rt_dist = rt_window_;
best_it = it;
}
double target_rt = target_rts_[ref];
double rt_min = it->getMetaValue("leftWidth");
double rt_max = it->getMetaValue("rightWidth");
double rt_dist = numeric_limits<double>::infinity();
if ((rt_min <= target_rt) && (rt_max >= target_rt))
{
if (best_rt_dist <= 0.0)
{
OPENMS_LOG_WARN
<< "Warning: overlapping feature candidates for assay '" << ref
<< "'" << endl;
}
rt_dist = 0.0;
}
else if (best_rt_dist > 0.0)
{
rt_dist = (rt_min > target_rt) ? (rt_min - target_rt) : (target_rt -
rt_max);
}
if ((rt_dist < best_rt_dist) ||
((rt_dist == best_rt_dist) && (it->getIntensity() >
best_it->getIntensity())))
{
// new best candidate for this assay:
best_rt_dist = rt_dist;
// mark no-longer-best candidate for removal:
if (best_it != it) best_it->setMetaValue("FFMetId_remove", "");
best_it = it;
best_it->setMetaValue("rt_deviation", target_rt - best_it->getRT());
}
else // this candidate is worse than a previous one
{
it->setMetaValue("FFMetId_remove", ""); // mark for removal
}
}
features.erase(remove_if(features.begin(), features.end(),
feature_filter_), features.end());
}
/// Create a string of identifying information for a compound
String FeatureFinderAlgorithmMetaboIdent::prettyPrintCompound(const TargetedExperiment::Compound& compound)
{
return (String(compound.getMetaValue("name")) + " (m=" +
String(float(compound.theoretical_mass)) + ", z=" +
String(compound.getChargeState()) + ", rt=" +
String(float(double(compound.getMetaValue("expected_rt")))) + ")");
}
/// Add "peptide" identifications with information about targets to features
Size FeatureFinderAlgorithmMetaboIdent::addTargetAnnotations_(FeatureMap& features)
{
Size n_shared = 0;
set<String> found_refs;
for (FeatureMap::Iterator it = features.begin(); it != features.end(); ++it)
{
found_refs.insert(it->getMetaValue("PeptideRef"));
if (it->metaValueExists("alt_PeptideRef"))
{
n_shared++;
StringList alt_refs = it->getMetaValue("alt_PeptideRef");
found_refs.insert(alt_refs.begin(), alt_refs.end());
}
}
// targets without features:
size_t n_missing = library_.getCompounds().size() - found_refs.size();
features.getUnassignedPeptideIdentifications().reserve(n_missing);
for (vector<TargetedExperiment::Compound>::const_iterator it =
library_.getCompounds().begin(); it != library_.getCompounds().end();
++it)
{
if (!found_refs.count(it->id))
{
PeptideIdentification peptide;
peptide.setIdentifier("id");
peptide.setMetaValue("label", it->getMetaValue("name"));
peptide.setMetaValue("PeptideRef", it->id);
peptide.setRT(it->getMetaValue("expected_rt"));
peptide.setMZ(calculateMZ_(it->theoretical_mass, it->getChargeState()));
// Add IM annotation if available
if (it->metaValueExists("expected_im"))
{
peptide.setMetaValue("expected_im", it->getMetaValue("expected_im"));
}
features.getUnassignedPeptideIdentifications().push_back(peptide);
}
if (features.getUnassignedPeptideIdentifications().size() >= n_missing)
{
break; // found all
}
}
if (n_missing)
{
features.getProteinIdentifications().resize(1);
features.getProteinIdentifications()[0].setIdentifier("id");
}
return n_shared; // for summary statistics
}
void FeatureFinderAlgorithmMetaboIdent::extractTransformations_(const FeatureMap& features)
{
TransformationDescription::DataPoints points;
for (const auto& f : features)
{
TransformationDescription::DataPoint point;
point.first = f.getMetaValue("expected_rt");
point.second = f.getRT();
point.note = f.getMetaValue("PeptideRef");
points.push_back(point);
}
trafo_.setDataPoints(points);
}
void FeatureFinderAlgorithmMetaboIdent::setMSData(const PeakMap& m)
{
ms_data_ = m;
vector<MSSpectrum>& specs = ms_data_.getSpectra();
// keep only MS1
specs.erase(
std::remove_if(specs.begin(), specs.end(),
[](const MSSpectrum & s) { return s.getMSLevel() != 1; }),
specs.end());
}
void FeatureFinderAlgorithmMetaboIdent::setMSData(PeakMap&& m)
{
ms_data_ = std::move(m);
vector<MSSpectrum>& specs = ms_data_.getSpectra();
// keep only MS1
specs.erase(
std::remove_if(specs.begin(), specs.end(),
[](const MSSpectrum & s) { return s.getMSLevel() != 1; }),
specs.end());
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/EGHTraceFitter.cpp | .cpp | 14,658 | 421 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg$
// $Authors: Stephan Aiche$
// --------------------------------------------------------------------------
#include <OpenMS/FEATUREFINDER/EGHTraceFitter.h>
#include <unsupported/Eigen/NonLinearOptimization>
#include <Eigen/Core>
#include <OpenMS/CONCEPT/LogStream.h>
#include <numeric> // for "accumulate"
namespace OpenMS
{
// from table 1 in the Lan & Jorgenson paper:
const double EGHTraceFitter::EPSILON_COEFS_[] =
{4.0, -6.293724, 9.232834, -11.342910, 9.123978, -4.173753, 0.827797};
const Size EGHTraceFitter::NUM_PARAMS_ = 4;
EGHTraceFitter::EGHTraceFunctor::EGHTraceFunctor(int dimensions,
const TraceFitter::ModelData* data) :
TraceFitter::GenericFunctor(dimensions, data->traces_ptr->getPeakCount()), m_data(data)
{
}
EGHTraceFitter::EGHTraceFunctor::~EGHTraceFunctor() = default;
int EGHTraceFitter::EGHTraceFunctor::operator()(const double* x, double* fvec)
{
// Create Eigen::Map views for convenient indexing
Eigen::Map<const Eigen::VectorXd> x_map(x, m_inputs);
Eigen::Map<Eigen::VectorXd> fvec_map(fvec, m_values);
double H = x_map(0);
double tR = x_map(1);
double sigma = x_map(2);
double tau = x_map(3);
double t_diff, t_diff2, denominator = 0.0;
double fegh = 0.0;
UInt count = 0;
for (Size t = 0; t < m_data->traces_ptr->size(); ++t)
{
const FeatureFinderAlgorithmPickedHelperStructs::MassTrace& trace = m_data->traces_ptr->at(t);
double weight = m_data->weighted ? trace.theoretical_int : 1.0;
for (Size i = 0; i < trace.peaks.size(); ++i)
{
double rt = trace.peaks[i].first;
t_diff = rt - tR;
t_diff2 = t_diff * t_diff; // -> (t - t_R)^2
denominator = 2 * sigma * sigma + tau * t_diff; // -> 2\sigma_{g}^{2} + \tau \left(t - t_R\right)
if (denominator > 0.0)
{
fegh = m_data->traces_ptr->baseline + trace.theoretical_int * H * exp(-t_diff2 / denominator);
}
else
{
fegh = 0.0;
}
fvec_map(count) = (fegh - trace.peaks[i].second->getIntensity()) * weight;
++count;
}
}
return 0;
}
int EGHTraceFitter::EGHTraceFunctor::df(const double* x, double* J)
{
// Create Eigen::Map views for convenient indexing
Eigen::Map<const Eigen::VectorXd> x_map(x, m_inputs);
Eigen::Map<Eigen::MatrixXd> J_map(J, m_values, m_inputs);
double H = x_map(0);
double tR = x_map(1);
double sigma = fabs(x_map(2)); // must be non-negative!
double tau = x_map(3);
double derivative_H, derivative_tR, derivative_sigma, derivative_tau = 0.0;
double t_diff, t_diff2, exp1, denominator = 0.0;
UInt count = 0;
for (Size t = 0; t < m_data->traces_ptr->size(); ++t)
{
const FeatureFinderAlgorithmPickedHelperStructs::MassTrace& trace = m_data->traces_ptr->at(t);
double weight = m_data->weighted ? trace.theoretical_int : 1.0;
for (Size i = 0; i < trace.peaks.size(); ++i)
{
double rt = trace.peaks[i].first;
t_diff = rt - tR;
t_diff2 = t_diff * t_diff; // -> (t - t_R)^2
denominator = 2 * sigma * sigma + tau * t_diff; // -> 2\sigma_{g}^{2} + \tau \left(t - t_R\right)
if (denominator > 0)
{
exp1 = exp(-t_diff2 / denominator);
// \partial H f_{egh}(t) = \exp\left( \frac{-\left(t-t_R \right)}{2\sigma_{g}^{2} + \tau \left(t - t_R\right)} \right)
derivative_H = trace.theoretical_int * exp1;
// \partial t_R f_{egh}(t) &=& H \exp \left( \frac{-\left(t-t_R \right)}{2\sigma_{g}^{2} + \tau \left(t - t_R\right)} \right) \left( \frac{\left( 4 \sigma_{g}^{2} + \tau \left(t-t_R \right) \right) \left(t-t_R \right)}{\left( 2\sigma_{g}^{2} + \tau \left(t - t_R\right) \right)^2} \right)
derivative_tR = trace.theoretical_int * H * exp1 * ((4 * sigma * sigma + tau * t_diff) * t_diff) / (denominator * denominator);
// \partial \sigma_{g}^{2} f_{egh}(t) &=& H \exp \left( \frac{-\left(t-t_R \right)^2}{2\sigma_{g}^{2} + \tau \left(t - t_R\right)} \right) \left( \frac{ 2 \left(t - t_R\right)^2}{\left( 2\sigma_{g}^{2} + \tau \left(t - t_R\right) \right)^2} \right)
// // \partial \sigma_{g}^{2} f_{egh}(t) &=& H \exp \left( \frac{-\left(t-t_R \right)^2}{2\sigma_{g}^{2} + \tau \left(t - t_R\right)} \right) \left( \frac{ 2 \left(t - t_R\right)^2}{\left( 2\sigma_{g}^{2} + \tau \left(t - t_R\right) \right)^2} \right)
// derivative_sigma_square = trace.theoretical_int * H * exp1 * 2 * t_diff2 / (denominator * denominator));
// \partial \sigma_{g} f_{egh}(t) &=& H \exp \left( \frac{-\left(t-t_R \right)^2}{2\sigma_{g}^{2} + \tau \left(t - t_R\right)} \right) \left( \frac{ 4 \sigma_{g} \left(t - t_R\right)^2}{\left( 2\sigma_{g}^{2} + \tau \left(t - t_R\right) \right)^2} \right)
derivative_sigma = trace.theoretical_int * H * exp1 * 4 * sigma * t_diff2 / (denominator * denominator);
// \partial \tau f_{egh}(t) &=& H \exp \left( \frac{-\left(t-t_R \right)^2}{2\sigma_{g}^{2} + \tau \left(t - t_R\right)} \right) \left( \frac{ \left(t - t_R\right)^3}{\left( 2\sigma_{g}^{2} + \tau \left(t - t_R\right) \right)^2} \right)
derivative_tau = trace.theoretical_int * H * exp1 * t_diff * t_diff2 / (denominator * denominator);
}
else
{
derivative_H = 0.0;
derivative_tR = 0.0;
derivative_sigma = 0.0;
derivative_tau = 0.0;
}
// set the jacobian matrix
J_map(count, 0) = derivative_H * weight;
J_map(count, 1) = derivative_tR * weight;
J_map(count, 2) = derivative_sigma * weight;
J_map(count, 3) = derivative_tau * weight;
++count;
}
}
return 0;
}
EGHTraceFitter::EGHTraceFitter() = default;
EGHTraceFitter::EGHTraceFitter(const EGHTraceFitter& other) :
TraceFitter(other)
{
this->height_ = other.height_;
this->apex_rt_ = other.apex_rt_;
this->sigma_ = other.sigma_;
this->tau_ = other.tau_;
this->region_rt_span_ = other.region_rt_span_;
this->sigma_5_bound_ = other.sigma_5_bound_;
updateMembers_();
}
EGHTraceFitter& EGHTraceFitter::operator=(const EGHTraceFitter& source)
{
TraceFitter::operator=(source);
this->height_ = source.height_;
this->apex_rt_ = source.apex_rt_;
this->sigma_ = source.sigma_;
this->tau_ = source.tau_;
this->region_rt_span_ = source.region_rt_span_;
this->sigma_5_bound_ = source.sigma_5_bound_;
updateMembers_();
return *this;
}
EGHTraceFitter::~EGHTraceFitter() = default;
void EGHTraceFitter::fit(FeatureFinderAlgorithmPickedHelperStructs::MassTraces& traces)
{
setInitialParameters_(traces);
std::vector<double> x_init(NUM_PARAMS_);
x_init[0] = height_;
x_init[1] = apex_rt_;
x_init[2] = sigma_;
x_init[3] = tau_;
TraceFitter::ModelData data{};
data.traces_ptr = &traces;
data.weighted = this->weighted_;
EGHTraceFunctor functor(NUM_PARAMS_, &data);
TraceFitter::optimize_(x_init, functor);
}
double EGHTraceFitter::getLowerRTBound() const
{
return sigma_5_bound_.first;
}
double EGHTraceFitter::getTau() const
{
return tau_;
}
double EGHTraceFitter::getUpperRTBound() const
{
return sigma_5_bound_.second;
}
double EGHTraceFitter::getHeight() const
{
return height_;
}
double EGHTraceFitter::getSigma() const
{
return sigma_;
}
double EGHTraceFitter::getCenter() const
{
return apex_rt_;
}
bool EGHTraceFitter::checkMaximalRTSpan(const double max_rt_span)
{
return (sigma_5_bound_.second - sigma_5_bound_.first) > max_rt_span * region_rt_span_;
}
bool EGHTraceFitter::checkMinimalRTSpan(const std::pair<double, double>& rt_bounds, const double min_rt_span)
{
return (rt_bounds.second - rt_bounds.first) < min_rt_span * (sigma_5_bound_.second - sigma_5_bound_.first);
}
double EGHTraceFitter::getValue(double rt) const
{
// equation 12 from Lan & Jorgenson paper:
double fegh = 0.0;
double t_diff = rt - apex_rt_;
double denominator = 2 * sigma_ * sigma_ + tau_ * t_diff;
if (denominator > 0.0)
{
fegh = height_ * exp(-t_diff * t_diff / denominator);
}
return fegh;
}
double EGHTraceFitter::getArea()
{
// equation 21 from Lan & Jorgenson paper:
double abs_tau = fabs(tau_);
double abs_sigma = fabs(sigma_);
double phi = atan(abs_tau / abs_sigma);
double epsilon = EPSILON_COEFS_[0];
double phi_pow = phi;
for (Size i = 1; i < 7; ++i)
{
epsilon += phi_pow * EPSILON_COEFS_[i];
phi_pow *= phi;
}
// 0.62... is approx. sqrt(pi / 8):
return height_ * (abs_sigma * 0.6266571 + abs_tau) * epsilon;
}
double EGHTraceFitter::getFWHM() const
{
std::pair<double, double> bounds = getAlphaBoundaries_(0.5);
return bounds.second - bounds.first;
}
String EGHTraceFitter::getGnuplotFormula(const FeatureFinderAlgorithmPickedHelperStructs::MassTrace& trace, const char function_name, const double baseline, const double rt_shift)
{
std::stringstream s;
s << String(function_name) << "(x)= " << baseline << " + ";
s << "("; // the overall bracket
s << "((" << 2 * sigma_ * sigma_ << " + " << tau_ << " * (x - " << (rt_shift + apex_rt_) << " )) > 0) ? "; // condition
s << (trace.theoretical_int * height_) << " * exp(-1 * (x - " << (rt_shift + apex_rt_) << ")**2 " <<
"/" <<
" ( " << 2 * sigma_ * sigma_ << " + " << tau_ << " * (x - " << (rt_shift + apex_rt_) << " )))";
s << " : 0)";
return String(s.str());
}
std::pair<double, double> EGHTraceFitter::getAlphaBoundaries_(const double alpha) const
{
std::pair<double, double> bounds;
// solved equations A.2 and A.3 from the Lan & Jorgenson paper (Appendix
// A) for the boundaries A_alpha and B_alpha:
double L = log(alpha);
double s = sqrt(((L * tau_) * (L * tau_) / 4) - 2 * L * sigma_ * sigma_);
double s1, s2;
s1 = (-1 * (L * tau_) / 2) + s;
s2 = (-1 * (L * tau_) / 2) - s;
// the smaller one (should be < 0) = lower bound
bounds.first = apex_rt_ + std::min(s1, s2);
// bigger one (should be > 0) = upper bound
bounds.second = apex_rt_ + std::max(s1, s2);
return bounds;
}
void EGHTraceFitter::getOptimizedParameters_(const std::vector<double>& x_init)
{
height_ = x_init[0];
apex_rt_ = x_init[1];
sigma_ = x_init[2];
tau_ = x_init[3];
// we set alpha to 0.04 which is conceptually equal to
// 2.5 sigma for lower and upper bound
sigma_5_bound_ = getAlphaBoundaries_(0.043937);
}
void EGHTraceFitter::setInitialParameters_(FeatureFinderAlgorithmPickedHelperStructs::MassTraces& traces)
{
OPENMS_LOG_DEBUG << "EGHTraceFitter->setInitialParameters(...)\n";
OPENMS_LOG_DEBUG << "Number of traces: " << traces.size() << '\n';
// aggregate data; some peaks (where intensity is zero) can be missing!
// mapping: RT -> total intensity over all mass traces
std::list<std::pair<double, double> > total_intensities;
traces.computeIntensityProfile(total_intensities);
// compute moving average for smoothing:
const Size N = total_intensities.size();
const Size LEN = 2; // window size: 2 * LEN + 1
std::vector<double> totals(N + 2 * LEN); // pad with zeros at ends
Int index = LEN;
// OPENMS_LOG_DEBUG << "Summed intensities:\n";
for (std::list<std::pair<double, double> >::iterator it =
total_intensities.begin(); it != total_intensities.end(); ++it)
{
totals[index++] = it->second;
// OPENMS_LOG_DEBUG << it->second << '\n';
}
std::vector<double> smoothed(N);
Size max_index = 0; // index of max. smoothed intensity
// OPENMS_LOG_DEBUG << "Smoothed intensities:\n";
double sum = std::accumulate(&totals[LEN], &totals[2 * LEN], 0.0);
for (Size i = 0; i < N; ++i)
{
sum += totals[i + 2 * LEN];
smoothed[i] = sum / (2 * LEN + 1);
sum -= totals[i];
if (smoothed[i] > smoothed[max_index])
{
max_index = i;
}
// OPENMS_LOG_DEBUG << smoothed[i] << '\n';
}
OPENMS_LOG_DEBUG << "Maximum at index " << max_index << '\n';
height_ = smoothed[max_index] - traces.baseline;
OPENMS_LOG_DEBUG << "height: " << height_ << '\n';
std::list<std::pair<double, double> >::iterator it = total_intensities.begin();
std::advance(it, max_index);
apex_rt_ = it->first;
OPENMS_LOG_DEBUG << "apex_rt: " << apex_rt_ << '\n';
region_rt_span_ = (total_intensities.rbegin()->first -
total_intensities.begin()->first);
OPENMS_LOG_DEBUG << "region_rt_span: " << region_rt_span_ << '\n';
// find RT values where intensity is at half-maximum:
index = static_cast<Int>(max_index);
while ((index > 0) && (smoothed[index] > height_ * 0.5))
{
--index;
}
double left_height = smoothed[index];
it = total_intensities.begin();
std::advance(it, index);
double left_rt = it->first;
OPENMS_LOG_DEBUG << "Left half-maximum at index " << index << ", RT " << left_rt
<< '\n';
index = static_cast<Int>(max_index);
while ((index < Int(N - 1)) && (smoothed[index] > height_ * 0.5))
{
++index;
}
double right_height = smoothed[index];
it = total_intensities.end();
std::advance(it, index - Int(N));
double right_rt = it->first;
OPENMS_LOG_DEBUG << "Right half-maximum at index " << index << ", RT "
<< right_rt << '\n';
double A = apex_rt_ - left_rt;
double B = right_rt - apex_rt_;
//OPENMS_LOG_DEBUG << "A: " << A << '\n';
//OPENMS_LOG_DEBUG << "B: " << B << '\n';
// compute estimates for tau / sigma based on A and B:
double alpha = (left_height + right_height) * 0.5 / height_; // ~0.5
double log_alpha = log(alpha);
tau_ = -1 / log_alpha * (B - A);
//EGH function fails when tau==0
if (tau_ == 0)
{
tau_ = std::numeric_limits<double>::epsilon();
}
OPENMS_LOG_DEBUG << "tau: " << tau_ << '\n';
sigma_ = sqrt(-0.5 / log_alpha * B * A);
OPENMS_LOG_DEBUG << "sigma: " << sigma_ << '\n';
}
void EGHTraceFitter::updateMembers_()
{
TraceFitter::updateMembers_();
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/FeatureFinderIdentificationAlgorithm.cpp | .cpp | 79,846 | 1,949 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Marc Sturm $
// --------------------------------------------------------------------------
#include <OpenMS/FEATUREFINDER/FeatureFinderIdentificationAlgorithm.h>
#include <OpenMS/FEATUREFINDER/FFIDAlgoExternalIDHandler.h>
#include <OpenMS/FEATUREFINDER/EGHTraceFitter.h>
#include <OpenMS/FEATUREFINDER/ElutionModelFitter.h>
#include <OpenMS/FEATUREFINDER/GaussTraceFitter.h>
#include <OpenMS/FEATUREFINDER/TraceFitter.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/CONCEPT/UniqueIdGenerator.h>
#include <OpenMS/IONMOBILITY/IMTypes.h>
#include <OpenMS/IONMOBILITY/IMDataConverter.h>
#include <OpenMS/IONMOBILITY/FAIMSHelper.h>
#include <OpenMS/ANALYSIS/OPENSWATH/ChromatogramExtractor.h>
#include <OpenMS/ANALYSIS/OPENSWATH/DATAACCESS/SimpleOpenMSSpectraAccessFactory.h>
#include <OpenMS/ML/SVM/SimpleSVM.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/MapAlignmentAlgorithmIdentification.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/CoarseIsotopePatternGenerator.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/IsotopeDistribution.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/CHEMISTRY/ModificationsDB.h>
#include <OpenMS/MATH/MathFunctions.h>
#include <OpenMS/PROCESSING/FEATURE/FeatureOverlapFilter.h>
#include <vector>
#include <numeric>
#include <fstream>
#include <algorithm>
#include <random>
#include <optional>
#ifdef _OPENMP
#include <omp.h>
#endif
using namespace std;
using namespace OpenMS::Internal;
namespace OpenMS
{
FeatureFinderIdentificationAlgorithm::FeatureFinderIdentificationAlgorithm() :
DefaultParamHandler("FeatureFinderIdentificationAlgorithm")
{
std::vector<std::string> output_file_tags;
output_file_tags.emplace_back("output file");
defaults_.setValue("candidates_out", "", "Optional output file with feature candidates.", output_file_tags);
defaults_.setValue("debug", 0, "Debug level for feature detection.", {"advanced"});
defaults_.setMinInt("debug", 0);
defaults_.setValue("extract:batch_size", 5000, "Nr of peptides used in each batch of chromatogram extraction."
" Smaller values decrease memory usage but increase runtime.");
defaults_.setMinInt("extract:batch_size", 1);
defaults_.setValue("extract:mz_window", 10.0, "m/z window size for chromatogram extraction (unit: ppm if 1 or greater, else Da/Th)");
defaults_.setMinFloat("extract:mz_window", 0.0);
defaults_.setValue(
"extract:IM_window",
0.06,
"Ion mobility (IM) window for chromatogram extraction in the IM dimension. "
"Set to 0.0 to disable IM filtering (even if data contains IM information). "
"The window is applied as +/- IM_window/2 around the median IM value of identified peptides. "
"This parameter is automatically ignored if the input data does not contain IM information "
"(determined via IMTypes::determineIMFormat). "
"Currently only concatenated IM format is supported. "
"Typical values: 0.05-0.10 for TIMS data (1/K0 units), 3-5 for FAIMS data (compensation voltage). "
"Note: IM values are calculated per peptide/charge/RT-region, using the median of all identifications "
"in that region for robustness. The median, min, and max IM values are propagated to output features "
"as meta-values (IM_median, IM_min, IM_max) for quality control.");
defaults_.setMinFloat("extract:IM_window", 0.0);
defaults_.setValue("extract:n_isotopes", 2, "Number of isotopes to include in each peptide assay.");
defaults_.setMinInt("extract:n_isotopes", 2);
defaults_.setValue(
"extract:isotope_pmin",
0.0,
"Minimum probability for an isotope to be included in the assay for a peptide. If set, this parameter takes precedence over 'extract:n_isotopes'.",
{"advanced"});
defaults_.setMinFloat("extract:isotope_pmin", 0.0);
defaults_.setMaxFloat("extract:isotope_pmin", 1.0);
defaults_.setValue(
"extract:rt_quantile",
0.95,
"Quantile of the RT deviations between aligned internal and external IDs to use for scaling the RT extraction window",
{"advanced"});
defaults_.setMinFloat("extract:rt_quantile", 0.0);
defaults_.setMaxFloat("extract:rt_quantile", 1.0);
defaults_.setValue(
"extract:rt_window",
0.0,
"RT window size (in sec.) for chromatogram extraction. If set, this parameter takes precedence over 'extract:rt_quantile'.",
{"advanced"});
defaults_.setMinFloat("extract:rt_window", 0.0);
defaults_.setSectionDescription("extract", "Parameters for ion chromatogram extraction");
defaults_.setValue("detect:peak_width", 60.0, "Expected elution peak width in seconds, for smoothing (Gauss filter). Also determines the RT extration window, unless set explicitly via 'extract:rt_window'.");
defaults_.setMinFloat("detect:peak_width", 0.0);
defaults_.setValue(
"detect:min_peak_width",
0.2,
"Minimum elution peak width. Absolute value in seconds if 1 or greater, else relative to 'peak_width'.",
{"advanced"});
defaults_.setMinFloat("detect:min_peak_width", 0.0);
defaults_.setValue(
"detect:signal_to_noise",
0.8,
"Signal-to-noise threshold for OpenSWATH feature detection",
{"advanced"});
defaults_.setMinFloat("detect:signal_to_noise", 0.1);
defaults_.setValue("detect:mapping_tolerance", 0.0, "RT tolerance (plus/minus) for mapping peptide IDs to features. Absolute value in seconds if 1 or greater, else relative to the RT span of the feature.");
defaults_.setMinFloat("detect:mapping_tolerance", 0.0);
defaults_.setSectionDescription("detect", "Parameters for detecting features in extracted ion chromatograms");
// parameters for SVM classification:
defaults_.setValue("svm:samples", 0, "Number of observations to use for training ('0' for all)");
defaults_.setMinInt("svm:samples", 0);
defaults_.setValue("svm:no_selection", "false", "By default, roughly the same number of positive and negative observations, with the same intensity distribution, are selected for training. This aims to reduce biases, but also reduces the amount of training data. Set this flag to skip this procedure and consider all available observations (subject to 'svm:samples').");
defaults_.setValidStrings("svm:no_selection", {"true","false"});
defaults_.setValue("svm:xval_out", "", "Output file: SVM cross-validation (parameter optimization) results", output_file_tags);
defaults_.setValidStrings("svm:xval_out", {"csv"});
defaults_.insert("svm:", SimpleSVM().getParameters());
defaults_.setValue("quantify_decoys", "false", "Whether decoy peptides should be quantified (true) or skipped (false).");
defaults_.setValidStrings("quantify_decoys", {"true","false"});
defaults_.setValue("min_psm_cutoff", "none", "Minimum score for the best PSM of a spectrum to be used as seed. Use 'none' for no cutoff.");
defaults_.setValue("add_mass_offset_peptides", 0.0, "If for every peptide (or seed) also an offset peptide is extracted (true). Can be used to downstream to determine MBR false transfer rates. (0.0 = disabled)");
defaults_.setMinFloat("add_mass_offset_peptides", 0.0);
// available scores: initialPeakQuality,total_xic,peak_apices_sum,var_xcorr_coelution,var_xcorr_coelution_weighted,var_xcorr_shape,var_xcorr_shape_weighted,var_library_corr,var_library_rmsd,var_library_sangle,var_library_rootmeansquare,var_library_manhattan,var_library_dotprod,var_intensity_score,nr_peaks,sn_ratio,var_log_sn_score,var_elution_model_fit_score,xx_lda_prelim_score,var_isotope_correlation_score,var_isotope_overlap_score,var_massdev_score,var_massdev_score_weighted,var_bseries_score,var_yseries_score,var_dotprod_score,var_manhatt_score,main_var_xx_swath_prelim_score,xx_swath_prelim_score
// exclude some redundant/uninformative scores:
// @TODO: intensity bias introduced by "peak_apices_sum"?
// names of scores to use as SVM features
String score_metavalues = "peak_apices_sum,var_xcorr_coelution,var_xcorr_shape,var_library_sangle,var_intensity_score,sn_ratio,var_log_sn_score,var_elution_model_fit_score,xx_lda_prelim_score,var_ms1_isotope_correlation_score,var_ms1_isotope_overlap_score,var_massdev_score,main_var_xx_swath_prelim_score";
defaults_.setValue(
"svm:predictors",
score_metavalues,
"Names of OpenSWATH scores to use as predictors for the SVM (comma-separated list)",
{"advanced"});
defaults_.setValue(
"svm:min_prob",
0.0,
"Minimum probability of correctness, as predicted by the SVM, required to retain a feature candidate",
{"advanced"});
defaults_.setMinFloat("svm:min_prob", 0.0);
defaults_.setMaxFloat("svm:min_prob", 1.0);
defaults_.setSectionDescription("svm", "Parameters for scoring features using a support vector machine (SVM)");
// parameters for model fitting (via ElutionModelFitter):
std::vector<std::string> models = {"symmetric","asymmetric","none"};
defaults_.setValue("model:type", models[0], "Type of elution model to fit to features");
defaults_.setValidStrings("model:type", models);
defaults_.insert("model:", ElutionModelFitter().getParameters()); // copy parameters
defaults_.remove("model:asymmetric");
defaults_.setSectionDescription("model", "Parameters for fitting elution models to features");
defaults_.setValue("EMGScoring:max_iteration", 100, "Maximum number of iterations for EMG fitting.");
defaults_.setMinInt("EMGScoring:max_iteration", 1);
defaults_.setValue("EMGScoring:init_mom", "true", "Alternative initial parameters for fitting through method of moments.");
defaults_.setValidStrings("EMGScoring:init_mom", {"true","false"});
defaults_.setSectionDescription("EMGScoring", "Parameters for fitting exp. mod. Gaussians to mass traces.");
defaults_.setValue("faims:merge_features", "true",
"For FAIMS data with multiple compensation voltages: Merge features that represent "
"the same analyte detected at different CVs. Features are merged if they have the same "
"charge and are within 5 seconds RT and 0.05 Da m/z. Intensities are summed.");
defaults_.setValidStrings("faims:merge_features", {"true", "false"});
defaults_.setSectionDescription("faims", "Parameters for FAIMS data processing");
defaultsToParam_();
}
PeakMap& FeatureFinderIdentificationAlgorithm::getMSData()
{
return ms_data_;
}
const PeakMap& FeatureFinderIdentificationAlgorithm::getMSData() const
{
return ms_data_;
}
void FeatureFinderIdentificationAlgorithm::setMSData(const PeakMap& ms_data)
{
ms_data_ = ms_data;
vector<MSSpectrum>& specs = ms_data_.getSpectra();
// keep only MS1
specs.erase(
std::remove_if(specs.begin(), specs.end(),
[](const MSSpectrum & s) { return s.getMSLevel() != 1; }),
specs.end());
}
void FeatureFinderIdentificationAlgorithm::setMSData(PeakMap&& ms_data)
{
ms_data_ = std::move(ms_data);
vector<MSSpectrum>& specs = ms_data_.getSpectra();
// keep only MS1
specs.erase(
std::remove_if(specs.begin(), specs.end(),
[](const MSSpectrum & s) { return s.getMSLevel() != 1; }),
specs.end());
}
PeakMap& FeatureFinderIdentificationAlgorithm::getChromatograms()
{
return chrom_data_;
}
const PeakMap& FeatureFinderIdentificationAlgorithm::getChromatograms() const
{
return chrom_data_;
}
ProgressLogger& FeatureFinderIdentificationAlgorithm::getProgressLogger()
{
return prog_log_;
}
const ProgressLogger& FeatureFinderIdentificationAlgorithm::getProgressLogger() const
{
return prog_log_;
}
TargetedExperiment& FeatureFinderIdentificationAlgorithm::getLibrary()
{
return output_library_;
}
const TargetedExperiment& FeatureFinderIdentificationAlgorithm::getLibrary() const
{
return output_library_;
}
Size FeatureFinderIdentificationAlgorithm::addOffsetPeptides_(PeptideIdentificationList& peptides, double offset)
{
// WARNING: Superhack! Use unique ID to distinguish seeds from real IDs. Use a mod that will never occur to
// make them truly unique and not be converted to an actual modification.
const String pseudo_mod_name = String(10000);
AASequence some_seq = AASequence::fromString("XXX[" + pseudo_mod_name + "]");
PeptideIdentificationList offset_peptides;
offset_peptides.reserve(peptides.size());
Size n_added{};
for (const auto & p : peptides) // for every peptide (or seed) we add an offset peptide
{
/*
// check if already a peptide in peptide_map_ that is close in RT and MZ
// if so don't add seed
bool peptide_already_exists = false;
double offset_RT = p.getRT();
double offset_MZ = p.getMZ() + offset;
double offset_charge = p.getHits()[0].getCharge();
for (const auto & peptide : peptides)
{
double peptide_RT = peptide.getRT();
double peptide_MZ = peptide.getMZ();
// RT or MZ values of seed match in range -> peptide already exists -> don't add seed
// Consider up to 5th isotopic trace (e.g., because of seed misassignment)
double th_tolerance = mz_window_ppm_ ? mz_window_ * 1e-6 * peptide_MZ : mz_window_;
if ((fabs(offset_RT - peptide_RT) <= seed_rt_window_ / 2.0) &&
((fabs(offset_MZ - peptide_MZ) <= th_tolerance) ||
fabs(offset_MZ - (1.0/offset_charge) * Constants::C13C12_MASSDIFF_U - peptide_MZ) <= th_tolerance ||
fabs(offset_MZ - (2.0/offset_charge) * Constants::C13C12_MASSDIFF_U - peptide_MZ) <= th_tolerance ||
fabs(offset_MZ - (3.0/offset_charge) * Constants::C13C12_MASSDIFF_U - peptide_MZ) <= th_tolerance ||
fabs(offset_MZ - (4.0/offset_charge) * Constants::C13C12_MASSDIFF_U - peptide_MZ) <= th_tolerance ||
fabs(offset_MZ - (5.0/offset_charge) * Constants::C13C12_MASSDIFF_U - peptide_MZ) <= th_tolerance)
)
{
peptide_already_exists = true;
break;
}
}
// prevent decoys to be extracted at other target peptide
if (!peptide_already_exists)
{
*/
offset_peptides.emplace_back();
PeptideHit hit;
hit.setCharge(p.getHits()[0].getCharge());
hit.setSequence(some_seq);
offset_peptides.back().getHits().push_back(std::move(hit));
offset_peptides.back().setRT(p.getRT());
offset_peptides.back().setMZ(p.getMZ() + offset);
offset_peptides.back().setMetaValue("FFId_category", "internal");
offset_peptides.back().setMetaValue("OffsetPeptide", "true"); // mark as offset peptide
offset_peptides.back().setMetaValue("SeedFeatureID", String(UniqueIdGenerator::getUniqueId())); // also mark as seed so we can indicate that we have a mass without sequence
//}
}
for (auto & p : offset_peptides) // add offset peptides
{
peptides.push_back(std::move(p));
addPeptideToMap_(peptides.back(), peptide_map_);
n_added++;
}
return n_added;
}
Size FeatureFinderIdentificationAlgorithm::addSeeds_(PeptideIdentificationList& peptides, const FeatureMap& seeds)
{
size_t seeds_added{};
// WARNING: Superhack! Use unique ID to distinguish seeds from real IDs. Use a mod that will never occur to
// make them truly unique and not be converted to an actual modification.
const String pseudo_mod_name = String(10000);
AASequence some_seq = AASequence::fromString("XXX[" + pseudo_mod_name + "]");
for (const Feature& feat : seeds)
{
// check if already a peptide in peptide_map_ that is close in RT and MZ
// if so don't add seed
bool peptide_already_exists = false;
for (const auto & peptide : peptides)
{
double seed_RT = feat.getRT();
double seed_MZ = feat.getMZ();
double seed_charge = feat.getCharge();
double peptide_RT = peptide.getRT();
double peptide_MZ = peptide.getMZ();
// RT or MZ values of seed match in range -> peptide already exists -> don't add seed
// Consider up to 5th isotopic trace (e.g., because of seed misassignment)
double th_tolerance = mz_window_ppm_ ? mz_window_ * 1e-6 * peptide_MZ : mz_window_;
if ((fabs(seed_RT - peptide_RT) <= seed_rt_window_ / 2.0) &&
((fabs(seed_MZ - peptide_MZ) <= th_tolerance) ||
fabs(seed_MZ - (1.0/seed_charge) * Constants::C13C12_MASSDIFF_U - peptide_MZ) <= th_tolerance ||
fabs(seed_MZ - (2.0/seed_charge) * Constants::C13C12_MASSDIFF_U - peptide_MZ) <= th_tolerance ||
fabs(seed_MZ - (3.0/seed_charge) * Constants::C13C12_MASSDIFF_U - peptide_MZ) <= th_tolerance ||
fabs(seed_MZ - (4.0/seed_charge) * Constants::C13C12_MASSDIFF_U - peptide_MZ) <= th_tolerance ||
fabs(seed_MZ - (5.0/seed_charge) * Constants::C13C12_MASSDIFF_U - peptide_MZ) <= th_tolerance)
)
{
peptide_already_exists = true;
String seq = "empty";
int chg = 0;
if (!peptide.getHits().empty())
{
seq = peptide.getHits()[0].getSequence().toString();
chg = peptide.getHits()[0].getCharge();
}
OPENMS_LOG_DEBUG_NOFILE << "Skipping seed from FeatureID " << String(feat.getUniqueId()) << " with CHG: " << seed_charge << "; RT: " << seed_RT << "; MZ: " << seed_MZ <<
" due to overlap with " << seq << "/" << chg << " at MZ: " << peptide_MZ << "; RT: " << peptide_RT << endl;
break;
}
}
if (!peptide_already_exists)
{
// WARNING: Superhack! Store ID generated from seed in the original input peptide
// vector to make sure that the pointers that will be added to peptide_map_
// stay valid for the duration of the function.
peptides.emplace_back();
PeptideHit seed_hit;
seed_hit.setCharge(feat.getCharge());
seed_hit.setSequence(some_seq);
peptides.back().getHits().push_back(std::move(seed_hit));
peptides.back().setRT(feat.getRT());
peptides.back().setMZ(feat.getMZ());
peptides.back().setMetaValue("FFId_category", "internal");
peptides.back().setMetaValue("SeedFeatureID", String(feat.getUniqueId()));
// Copy IM meta value from feature if present (some feature finders annotate IM)
// If not present, the seed will be extracted across the full IM range
if (feat.metaValueExists(Constants::UserParam::IM))
{
peptides.back().setMetaValue(Constants::UserParam::IM, feat.getMetaValue(Constants::UserParam::IM));
}
addPeptideToMap_(peptides.back(), peptide_map_);
++seeds_added;
}
}
return seeds_added;
}
// ===== Helper functions for run() =====
void FeatureFinderIdentificationAlgorithm::validateSVMParameters_() const
{
if ((svm_n_samples_ > 0) && (svm_n_samples_ < 2 * svm_n_parts_))
{
String msg = "Sample size of " + String(svm_n_samples_) +
" (parameter 'svm:samples') is not enough for " + String(svm_n_parts_) +
"-fold cross-validation (parameter 'svm:xval').";
throw Exception::InvalidParameter(__FILE__, __LINE__,
OPENMS_PRETTY_FUNCTION, msg);
}
}
void FeatureFinderIdentificationAlgorithm::initializeFeatureFinder_()
{
Param params = feat_finder_.getParameters();
params.setValue("stop_report_after_feature", -1); // return all features
params.setValue("EMGScoring:max_iteration", param_.getValue("EMGScoring:max_iteration"));
params.setValue("EMGScoring:init_mom", param_.getValue("EMGScoring:init_mom"));
params.setValue("Scores:use_rt_score", "false"); // RT may not be reliable
params.setValue("Scores:use_ionseries_scores", "false"); // since FFID only uses MS1 spectra, this is useless
params.setValue("Scores:use_ms2_isotope_scores", "false"); // since FFID only uses MS1 spectra, this is useless
params.setValue("Scores:use_ms1_correlation", "false"); // this would be redundant to the "MS2" correlation and since
// precursor transition = first product transition, additionally biased
params.setValue("Scores:use_ms1_mi", "false"); // same as above. On MS1 level we basically only care about the "MS1 fullscan" scores
//TODO for MS1 level scoring there is an additional parameter add_up_spectra with which we can add up spectra
// around the apex, to complete isotopic envelopes (and therefore make this score more robust).
if ((elution_model_ != "none") || (!candidates_out_.empty()))
{
params.setValue("write_convex_hull", "true");
}
if (min_peak_width_ < 1.0)
{
min_peak_width_ *= peak_width_;
}
params.setValue("TransitionGroupPicker:PeakPickerChromatogram:gauss_width",
peak_width_);
params.setValue("TransitionGroupPicker:min_peak_width", min_peak_width_);
// disabling the signal-to-noise threshold (setting the parameter to zero)
// totally breaks the OpenSWATH feature detection (no features found)!
params.setValue("TransitionGroupPicker:PeakPickerChromatogram:signal_to_noise",
signal_to_noise_);
params.setValue("TransitionGroupPicker:recalculate_peaks", "true");
params.setValue("TransitionGroupPicker:PeakPickerChromatogram:peak_width", -1.0);
params.setValue("TransitionGroupPicker:PeakPickerChromatogram:method",
"corrected");
params.setValue("TransitionGroupPicker:PeakPickerChromatogram:write_sn_log_messages", "false"); // disabled in OpenSWATH
feat_finder_.setParameters(params);
feat_finder_.setLogType(ProgressLogger::NONE);
feat_finder_.setStrictFlag(false);
// to use MS1 Swath scores:
feat_finder_.setMS1Map(SimpleOpenMSSpectraFactory::getSpectrumAccessOpenMSPtr(std::make_shared<MSExperiment>(ms_data_)));
}
double FeatureFinderIdentificationAlgorithm::calculateRTWindow_(double rt_uncertainty) const
{
if (rt_window_ != 0.0)
{
return rt_window_; // Already set, return it
}
// Calculate RT window based on other parameters and alignment quality:
double map_tol = mapping_tolerance_;
if (map_tol < 1.0)
{
map_tol *= (2 * peak_width_); // relative tolerance
}
double calculated_window = (rt_uncertainty + 2 * peak_width_ + map_tol) * 2;
OPENMS_LOG_INFO << "RT window size calculated as " << calculated_window << " seconds." << endl;
return calculated_window;
}
bool FeatureFinderIdentificationAlgorithm::isSeedPseudoHit_(const PeptideHit& hit)
{
return hit.getSequence().toUnmodifiedString().hasPrefix("XXX");
}
void FeatureFinderIdentificationAlgorithm::removeSeedPseudoIDs_(FeatureMap& features)
{
// Remove all hits with pseudo ids (seeds) from features
for (Feature& f : features)
{
PeptideIdentificationList& ids = f.getPeptideIdentifications();
// if we have peptide identifications assigned and all are annotated as OffsetPeptide,
// we mark the feature as also an OffsetPeptide
if (!ids.empty() && std::all_of(ids.begin(), ids.end(),
[](const PeptideIdentification& pid) { return pid.metaValueExists("OffsetPeptide"); }))
{
f.setMetaValue("OffsetPeptide", "true");
}
// remove all seed pseudo hits (PSM details)
for (auto& pid : ids)
{
std::vector<PeptideHit>& hits = pid.getHits();
auto it = remove_if(hits.begin(), hits.end(), isSeedPseudoHit_);
hits.erase(it, hits.end());
}
// remove empty PeptideIdentifications
auto it = remove_if(ids.begin(), ids.end(),
[](const PeptideIdentification& pid) { return pid.empty(); });
ids.erase(it, ids.end());
}
// clean up unassigned PeptideIdentifications
PeptideIdentificationList& ids = features.getUnassignedPeptideIdentifications();
for (auto& pid : ids)
{
std::vector<PeptideHit>& hits = pid.getHits();
auto it = remove_if(hits.begin(), hits.end(), isSeedPseudoHit_);
hits.erase(it, hits.end());
}
// remove empty PeptideIdentifications
auto it = remove_if(ids.begin(), ids.end(),
[](const PeptideIdentification& pid) { return pid.empty(); });
ids.erase(it, ids.end());
}
std::pair<double, double> FeatureFinderIdentificationAlgorithm::calculateRTBounds_(
double rt_min, double rt_max) const
{
if (mapping_tolerance_ > 0.0)
{
double abs_tol = mapping_tolerance_;
if (abs_tol < 1.0)
{
abs_tol *= (rt_max - rt_min);
}
return {rt_min - abs_tol, rt_max + abs_tol};
}
return {rt_min, rt_max};
}
// ===== End of helper functions =====
void FeatureFinderIdentificationAlgorithm::run(
PeptideIdentificationList peptides,
const vector<ProteinIdentification>& proteins,
PeptideIdentificationList peptides_ext,
vector<ProteinIdentification> proteins_ext,
FeatureMap& features,
const FeatureMap& seeds,
const String& spectra_file
)
{
// Check for FAIMS data
auto faims_groups = IMDataConverter::splitByFAIMSCV(std::move(ms_data_));
const bool has_faims = faims_groups.size() > 1 || !std::isnan(faims_groups[0].first);
if (!has_faims)
{
// Non-FAIMS data: restore ms_data_ and run directly
ms_data_ = std::move(faims_groups[0].second);
runSingleGroup_(peptides, proteins, peptides_ext, proteins_ext, features, seeds, spectra_file);
return;
}
// FAIMS data: process each CV group separately
OPENMS_LOG_INFO << "FAIMS data detected with " << faims_groups.size() << " compensation voltage(s)." << endl;
// Clear combined outputs
features.clear(true);
chrom_data_.clear(true);
output_library_.clear(true);
bool first_group = true;
for (auto& [group_cv, faims_group] : faims_groups)
{
OPENMS_LOG_INFO << "Processing FAIMS CV group: " << group_cv << " V" << endl;
// Filter peptide IDs for this FAIMS CV
PeptideIdentificationList peptides_cv = FAIMSHelper::filterPeptidesByFAIMSCV(peptides, group_cv);
PeptideIdentificationList peptides_ext_cv = FAIMSHelper::filterPeptidesByFAIMSCV(peptides_ext, group_cv);
if (peptides_cv.empty() && peptides_ext_cv.empty())
{
OPENMS_LOG_INFO << "No peptide IDs for FAIMS CV " << group_cv << " V. Skipping." << endl;
continue;
}
OPENMS_LOG_INFO << " " << peptides_cv.size() << " internal IDs, "
<< peptides_ext_cv.size() << " external IDs" << endl;
// Create algorithm instance for this group (use same parameters)
FeatureFinderIdentificationAlgorithm ffid_group;
ffid_group.getProgressLogger().setLogType(prog_log_.getLogType());
ffid_group.setParameters(this->getParameters());
ffid_group.setMSData(std::move(faims_group));
// Run feature detection
FeatureMap features_cv;
ffid_group.runSingleGroup_(peptides_cv, proteins, peptides_ext_cv, proteins_ext, features_cv, seeds, spectra_file);
// Annotate features with FAIMS CV and add to results
for (auto& feat : features_cv)
{
feat.setMetaValue(Constants::UserParam::FAIMS_CV, group_cv);
features.push_back(feat);
}
// Copy UnassignedPeptideIdentifications with FAIMS CV annotation
for (auto& pep_id : features_cv.getUnassignedPeptideIdentifications())
{
pep_id.setMetaValue(Constants::UserParam::FAIMS_CV, group_cv);
features.getUnassignedPeptideIdentifications().push_back(std::move(pep_id));
}
// Copy ProteinIdentifications only from the first group
if (first_group)
{
features.setProteinIdentifications(features_cv.getProteinIdentifications());
}
// Combine chromatograms
for (const auto& chrom : ffid_group.getChromatograms().getChromatograms())
{
MSChromatogram chrom_copy = chrom;
chrom_copy.setMetaValue(Constants::UserParam::FAIMS_CV, group_cv);
chrom_data_.addChromatogram(std::move(chrom_copy));
}
first_group = false;
}
// Warn about library output for FAIMS data
OPENMS_LOG_WARN << "Warning: Library output is not available for multi-FAIMS data. "
<< "Each FAIMS CV group has its own assay library." << endl;
OPENMS_LOG_INFO << "Combined " << features.size() << " features from all FAIMS CV groups." << endl;
// Optionally merge features from different FAIMS CV groups that represent the same analyte
if (param_.getValue("faims:merge_features").toBool())
{
Size before_merge = features.size();
FeatureOverlapFilter::mergeFAIMSFeatures(features, 5.0, 0.05);
if (features.size() < before_merge)
{
OPENMS_LOG_INFO << "Merged FAIMS features: " << before_merge << " -> " << features.size()
<< " (" << (before_merge - features.size()) << " features merged)" << endl;
}
}
// Set primary MS run path
features.setPrimaryMSRunPath({spectra_file});
features.ensureUniqueId();
}
void FeatureFinderIdentificationAlgorithm::runSingleGroup_(
PeptideIdentificationList peptides,
const vector<ProteinIdentification>& proteins,
PeptideIdentificationList peptides_ext,
vector<ProteinIdentification> proteins_ext,
FeatureMap& features,
const FeatureMap& seeds,
const String& spectra_file
)
{
// Clear output library from any previous run
output_library_.clear(true);
// Validate parameters
validateSVMParameters_();
// annotate mzML file
features.setPrimaryMSRunPath({spectra_file}, ms_data_);
// Check IM format
double IM_window = param_.getValue("extract:IM_window");
IMFormat im_format = IMTypes::determineIMFormat(ms_data_);
bool has_IM = false;
if (im_format == IMFormat::CONCATENATED)
{
has_IM = true;
}
else if (im_format != IMFormat::NONE) // has IM but wrong format
{
OPENMS_LOG_ERROR << "Wrong IM format detected. Expecting in concatenated format (float data arrays)" << std::endl;
}
// Initialize feature finder with appropriate parameters
initializeFeatureFinder_();
bool with_external_ids = !peptides_ext.empty();
if (with_external_ids && !seeds.empty())
{
throw Exception::IllegalArgument(
__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
"Using seeds and external ids is currently not supported.");
}
double rt_uncertainty(0);
if (with_external_ids)
{
// Use the external ID handler to align internal and external IDs
rt_uncertainty = external_id_handler_.alignInternalAndExternalIDs(peptides, peptides_ext, rt_quantile_);
}
// Calculate RT window if not already set
rt_window_ = calculateRTWindow_(rt_uncertainty);
//-------------------------------------------------------------
// prepare peptide map
//-------------------------------------------------------------
OPENMS_LOG_INFO << "Preparing mapping of peptide data..." << endl;
peptide_map_.clear();
// Reserve enough space for all possible seeds
{
Size max_size = peptides.size() + seeds.size();
if (add_mass_offset_peptides_)
{
max_size *= 2;
}
peptides.reserve(max_size);
}
for (auto& pep : peptides)
{
addPeptideToMap_(pep, peptide_map_); // stores pointer to pep in map
pep.setMetaValue("FFId_category", "internal");
}
// Calculate global IM statistics BEFORE adding seeds
// This ensures we only learn from real peptide identifications with IM data
// and don't need to check/skip seeds during calculation
calculateGlobalIMStats_();
// TODO make sure that only assembled traces (more than one trace -> has a charge) if FFMetabo is used
// see FeatureFindingMetabo: defaults_.setValue("remove_single_traces", "false", "Remove unassembled traces (single traces).");
Size seeds_added = addSeeds_(peptides, seeds);
OPENMS_LOG_INFO << "#Seeds without RT and m/z overlap with identified peptides added: " << seeds_added << endl;
if (add_mass_offset_peptides_ > 0.0)
{
Size n_added = addOffsetPeptides_(peptides, add_mass_offset_peptides_);
OPENMS_LOG_INFO << "#Offset peptides without RT and m/z overlap with other peptides added: " << n_added << endl;
}
n_internal_peps_ = peptide_map_.size();
if (with_external_ids)
{
// Process and add external peptides
for (PeptideIdentification& pep : peptides_ext)
{
addPeptideToMap_(pep, peptide_map_, true);
pep.setMetaValue("FFId_category", "external");
}
n_external_peps_ = peptide_map_.size() - n_internal_peps_;
}
std::shared_ptr<PeakMap> shared = std::make_shared<PeakMap>(ms_data_);
OpenSwath::SpectrumAccessPtr spec_temp =
SimpleOpenMSSpectraFactory::getSpectrumAccessOpenMSPtr(shared);
auto chunks = chunk_(peptide_map_.begin(), peptide_map_.end(), batch_size_);
PeptideRefRTMap ref_rt_map;
if (debug_level_ >= 668)
{
OPENMS_LOG_INFO << "Creating full assay library for debugging." << endl;
// Warning: this step is pretty inefficient, since it does the whole library generation twice
// Really use for debug only
createAssayLibrary_(peptide_map_.begin(), peptide_map_.end(), ref_rt_map, false);
cout << "Writing debug.traml file." << endl;
FileHandler().storeTransitions("debug.traml", library_);
ref_rt_map.clear();
library_.clear(true);
}
//-------------------------------------------------------------
// run feature detection
//-------------------------------------------------------------
//Note: progress only works in non-debug when no logs come in-between
getProgressLogger().startProgress(0, chunks.size(), "Creating assay library and extracting chromatograms");
Size chunk_count = 0;
for (auto& chunk : chunks)
{
//TODO since ref_rt_map is only used after chunking, we could create
// maps per chunk and merge them in the end. Would help in parallelizing as well.
// fills library_ (TargetedExperiment)
createAssayLibrary_(chunk.first, chunk.second, ref_rt_map);
OPENMS_LOG_DEBUG << "#Transitions: " << library_.getTransitions().size() << endl;
ChromatogramExtractor extractor;
// extractor.setLogType(ProgressLogger::NONE);
{
vector<OpenSwath::ChromatogramPtr> chrom_temp;
vector<ChromatogramExtractor::ExtractionCoordinates> coords;
// take entries in library_ and put to chrom_temp and coords
extractor.prepare_coordinates(chrom_temp, coords, library_,
numeric_limits<double>::quiet_NaN(), false);
if (has_IM && IM_window > 0.0)
{
extractor.extractChromatograms(spec_temp, chrom_temp, coords, mz_window_,
mz_window_ppm_, IM_window, "tophat");
}
else
{
extractor.extractChromatograms(spec_temp, chrom_temp, coords, mz_window_,
mz_window_ppm_, "tophat");
}
extractor.return_chromatogram(chrom_temp, coords, library_, (*shared)[0],
chrom_data_.getChromatograms(), false);
}
OPENMS_LOG_DEBUG << "Extracted " << chrom_data_.getNrChromatograms()
<< " chromatogram(s)." << endl;
OPENMS_LOG_DEBUG << "Detecting chromatographic peaks..." << endl;
// suppress status output from OpenSWATH, unless in debug mode:
std::optional<Logger::LogSinkGuard> log_guard; // RAII: re-inserts cout on scope exit (exception-safe)
if (debug_level_ < 1)
{
log_guard.emplace(getGlobalLogInfo(), cout);
}
feat_finder_.pickExperiment(chrom_data_, features, library_,
TransformationDescription(), ms_data_);
chrom_data_.clear(true);
// Accumulate library entries for output before clearing
for (const auto& pep : library_.getPeptides())
{
output_library_.addPeptide(pep);
}
for (const auto& prot : library_.getProteins())
{
output_library_.addProtein(prot);
}
for (const auto& trans : library_.getTransitions())
{
output_library_.addTransition(trans);
}
library_.clear(true);
// since chrom_data_ here is just a container for the chromatograms and identifications will be empty,
// pickExperiment above will only add empty ProteinIdentification runs with colliding identifiers.
// Usually we could sanitize the identifiers or merge the runs, but since they are empty and we add the
// "real" proteins later -> just clear them
features.getProteinIdentifications().clear();
getProgressLogger().setProgress(++chunk_count);
}
getProgressLogger().endProgress();
OPENMS_LOG_INFO << "Found " << features.size() << " feature candidates in total."
<< endl;
ms_data_.reset(); // not needed anymore, free up the memory
// complete feature annotation:
annotateFeatures_(features, ref_rt_map);
// sort everything:
sort(features.getUnassignedPeptideIdentifications().begin(),
features.getUnassignedPeptideIdentifications().end(),
peptide_compare_);
sort(features.begin(), features.end(), feature_compare_);
postProcess_(features, with_external_ids);
statistics_(features);
features.setProteinIdentifications(proteins);
// add external IDs (if any):
features.getProteinIdentifications().insert(
features.getProteinIdentifications().end(), proteins_ext.begin(),
proteins_ext.end());
features.getUnassignedPeptideIdentifications().insert(
features.getUnassignedPeptideIdentifications().end(),
peptides_ext.begin(), peptides_ext.end());
// remove all hits with pseudo ids (seeds)
removeSeedPseudoIDs_(features);
// add back ignored PSMs
features.getUnassignedPeptideIdentifications().insert(features.getUnassignedPeptideIdentifications().end(),
std::move_iterator(unassignedIDs_.begin()),
std::move_iterator(unassignedIDs_.end()));
features.ensureUniqueId();
}
void FeatureFinderIdentificationAlgorithm::postProcess_(
FeatureMap & features,
bool with_external_ids)
{
// don't do SVM stuff unless we have external data to apply the model to:
if (with_external_ids)
{
external_id_handler_.classifyFeaturesWithSVM(features, param_);
}
// make sure proper unique ids get assigned to all features
features.ensureUniqueId();
// store feature candidates before filtering
if (!candidates_out_.empty())
{
FileHandler().storeFeatures(candidates_out_, features);
}
// Use ExternalIDHandler for feature filtering
if (with_external_ids)
{
external_id_handler_.filterClassifiedFeatures(features, external_id_handler_.getSVMProbsInternal().empty() ? 0.0 : double(param_.getValue("svm:min_prob")));
OPENMS_LOG_INFO << features.size() << " features left after filtering." << endl;
}
else
{
filterFeatures_(features, with_external_ids);
OPENMS_LOG_INFO << features.size() << " features left after filtering." << endl;
}
if (features.empty()) return; // elution model fit throws on empty features
// Calculate FDR if we have external IDs
if (with_external_ids)
{
external_id_handler_.calculateFDR(features);
}
//TODO MRMFeatureFinderScoring already does an ElutionModel scoring. It uses EMG fitting.
// Would be nice if we could only do the fitting once, since it is one of the bottlenecks.
// What is the intention of this post-processing here anyway? Does it filter anything?
// If so, why not filter based on the corresponding Swath/MRM score?
if (elution_model_ != "none")
{
ElutionModelFitter emf;
Param emf_params = param_.copy("model:", true);
emf_params.remove("type");
emf_params.setValue("asymmetric",
(elution_model_ == "asymmetric") ? "true" : "false");
emf.setParameters(emf_params);
emf.fitElutionModels(features);
}
else if (!candidates_out_.empty()) // hulls not needed, remove them
{
for (auto & feat : features)
{
for (auto & sub : feat.getSubordinates())
{
sub.getConvexHulls().clear();
}
}
}
}
void FeatureFinderIdentificationAlgorithm::runOnCandidates(FeatureMap & features)
{
if ((svm_n_samples_ > 0) && (svm_n_samples_ < 2 * svm_n_parts_))
{
String msg = "Sample size of " + String(svm_n_samples_) +
" (parameter 'svm:samples') is not enough for " + String(svm_n_parts_) +
"-fold cross-validation (parameter 'svm:xval').";
throw Exception::InvalidParameter(__FILE__, __LINE__,
OPENMS_PRETTY_FUNCTION, msg);
}
bool with_external_ids = (!features.empty() && features[0].metaValueExists("predicted_class"));
// extract ID information for statistics:
peptide_map_.clear();
set<AASequence> internal_seqs;
for (PeptideIdentification& pep : features.getUnassignedPeptideIdentifications())
{
const AASequence& seq = pep.getHits()[0].getSequence();
if (pep.getMetaValue("FFId_category") == "internal")
{
internal_seqs.insert(seq);
}
peptide_map_[seq];
}
for (const Feature& feat : features)
{
if (feat.getPeptideIdentifications().empty())
{
continue;
}
const PeptideIdentification& pep_id = feat.getPeptideIdentifications()[0];
const AASequence& seq = pep_id.getHits()[0].getSequence();
if (pep_id.getMetaValue("FFId_category") == "internal")
{
internal_seqs.insert(seq);
}
peptide_map_[seq];
}
n_internal_peps_ = internal_seqs.size();
n_external_peps_ = peptide_map_.size() - internal_seqs.size();
// sort everything:
sort(features.getUnassignedPeptideIdentifications().begin(),
features.getUnassignedPeptideIdentifications().end(),
peptide_compare_);
sort(features.begin(), features.end(), feature_compare_);
postProcess_(features, with_external_ids);
statistics_(features);
}
void FeatureFinderIdentificationAlgorithm::statistics_(FeatureMap const & features) const
{
// same peptide sequence may be quantified based on internal and external
// IDs if charge states differ!
set<AASequence> quantified_internal, quantified_all;
for (const auto& f : features)
{
const PeptideIdentification& pep_id = f.getPeptideIdentifications()[0];
const AASequence& seq = pep_id.getHits()[0].getSequence();
if (f.getIntensity() > 0.0)
{
quantified_all.insert(seq);
if (pep_id.getMetaValue("FFId_category") == "internal")
{
quantified_internal.insert(seq);
}
}
}
Size n_quant_external = quantified_all.size() - quantified_internal.size();
// If internal and external IDs for a peptide map to different RT regions,
// it is possible that there is a quantification from the "external" region,
// but not from the "internal" region (no matching feature) - therefore the
// number of "missing" external peptides can be negative!
Int n_missing_external = Int(n_external_peps_) - n_quant_external;
OPENMS_LOG_INFO << "\nSummary statistics (counting distinct peptides including "
"PTMs):\n"
<< peptide_map_.size() << " peptides identified ("
<< n_internal_peps_ << " internal, " << n_external_peps_
<< " additional external)\n"
<< quantified_all.size() << " peptides with features ("
<< quantified_internal.size() << " internal, "
<< n_quant_external << " external)\n"
<< peptide_map_.size() - quantified_all.size()
<< " peptides without features ("
<< n_internal_peps_ - quantified_internal.size() << " internal, "
<< n_missing_external << " external)\n" << endl;
}
void FeatureFinderIdentificationAlgorithm::calculateGlobalIMStats_()
{
// Update ranges from MS data to get IM range from raw data
ms_data_.updateRanges();
// Try to get IM range from MS data (will throw if no IM data available)
try
{
global_im_stats_.min = ms_data_.getMinMobility();
global_im_stats_.max = ms_data_.getMaxMobility();
}
catch (const Exception::InvalidRange&)
{
OPENMS_LOG_DEBUG << "No IM data found in MS data." << endl;
global_im_stats_ = IMStats(); // Empty stats
return;
}
// Calculate median from peptide identifications for robust central tendency
// (more representative of where peptides actually elute than data range center)
std::vector<double> im_values_from_ids;
// Collect IM values from all peptide identifications
// Note: This is called BEFORE adding seeds, so we only see real identifications
for (const auto& pep_entry : peptide_map_)
{
const ChargeMap& charge_map = pep_entry.second;
for (const auto& charge_entry : charge_map)
{
const RTMap& internal_ids = charge_entry.second.first;
for (const auto& rt_pepid : internal_ids)
{
const PeptideIdentification& pep_id = *rt_pepid.second;
const double im = pep_id.getMetaValue(Constants::UserParam::IM, -1.0);
if (im >= 0.0) // Only collect valid IM values (>= 0.0 matches ChromatogramExtractor)
{
im_values_from_ids.push_back(im);
}
}
}
}
// If we have ID-based IM values, use them for median calculation
// Otherwise, use center of data range as fallback
if (!im_values_from_ids.empty())
{
std::sort(im_values_from_ids.begin(), im_values_from_ids.end());
Size n = im_values_from_ids.size();
if (n % 2 == 0)
{
global_im_stats_.median = (im_values_from_ids[n/2 - 1] + im_values_from_ids[n/2]) / 2.0;
}
else
{
global_im_stats_.median = im_values_from_ids[n/2];
}
OPENMS_LOG_INFO << "Global IM statistics: median=" << global_im_stats_.median
<< " (from " << n << " identifications), "
<< "min=" << global_im_stats_.min << ", "
<< "max=" << global_im_stats_.max << " (from MS data range)" << endl;
}
else
{
// No IDs with IM annotation - use center of data range
global_im_stats_.median = (global_im_stats_.min + global_im_stats_.max) / 2.0;
OPENMS_LOG_INFO << "Global IM statistics: median=" << global_im_stats_.median
<< " (center of data range), "
<< "min=" << global_im_stats_.min << ", "
<< "max=" << global_im_stats_.max << " (from MS data range)" << endl;
}
}
/**
* @brief Calculate ion mobility statistics (median, min, max) for a given RT region
*
* This function computes robust statistics for ion mobility values from peptide
* identifications within a single RT region. The statistics are used for:
* 1. Setting the drift time on peptide assays (using median)
* 2. Extracting chromatograms with appropriate IM windows
* 3. Annotating features with IM QC metrics
*
* Implementation Strategy:
* - Collects IM values from internal peptide identifications in the RT region
* - Skips individual IDs that lack IM annotation (logged as debug, summary as info)
* - Uses MEDIAN instead of mean for robustness against outliers
* - Computes min/max to characterize the IM distribution spread
* - Returns empty stats only if NO valid IM values are available
*
* Note: Seeds from untargeted feature finders may or may not have IM annotation,
* depending on the feature finder. If IM is annotated, it is used; otherwise the
* seed is extracted across the full IM range (ChromatogramExtractor disables IM
* filtering when ion_mobility < 0).
*
* Note: RT region boundaries are determined from ALL IDs (including those without IM),
* so skipping individual IDs for IM statistics does not affect RT extraction.
*
* @param[in] r RT region containing peptide identifications (per charge state)
* @return IMStats structure with median, min, and max IM values
* Returns {-1.0, -1.0, -1.0} only if no valid IM data is available
*
* @note The median is calculated using the standard definition:
* - For odd n: middle element of sorted array
* - For even n: average of two middle elements
*/
FeatureFinderIdentificationAlgorithm::IMStats
FeatureFinderIdentificationAlgorithm::getRTRegionIMStats_(const RTRegion& r)
{
IMStats stats;
const ChargeMap& cm = r.ids;
std::vector<double> im_values;
Size n_ids_without_im = 0;
// Collect all IM values from internal IDs
for (const auto& e : cm)
{
const RTMap& internal_ids = e.second.first; // internal
for (const auto& rt_pepidptr : internal_ids)
{
const PeptideIdentification& pep_id = *rt_pepidptr.second;
const double im = pep_id.getMetaValue(Constants::UserParam::IM, -1.0);
if (im < 0.0)
{
// ID without IM (negative value) - skip this ID but continue with others
n_ids_without_im++;
OPENMS_LOG_DEBUG << "Identification at RT " << pep_id.getRT()
<< " lacks IM annotation - skipping for IM statistics" << endl;
}
else
{
im_values.push_back(im); // includes 0.0 as valid IM value
}
}
}
if (im_values.empty())
{
return stats; // Return empty stats
}
// Calculate statistics
std::sort(im_values.begin(), im_values.end());
stats.min = im_values.front();
stats.max = im_values.back();
// Calculate median
Size n = im_values.size();
if (n % 2 == 0)
{
stats.median = (im_values[n/2 - 1] + im_values[n/2]) / 2.0;
}
else
{
stats.median = im_values[n/2];
}
if (n_ids_without_im > 0)
{
OPENMS_LOG_INFO << "Calculated IM statistics from " << n << " IDs with IM data "
<< "(skipped " << n_ids_without_im << " IDs without IM annotation)" << endl;
}
return stats;
}
void FeatureFinderIdentificationAlgorithm::createAssayLibrary_(
const PeptideMap::iterator& begin,
const PeptideMap::iterator& end,
PeptideRefRTMap& ref_rt_map,
bool clear_IDs)
{
std::set<String> protein_accessions;
Size seedcount = 0;
for (auto pm_it = begin;
pm_it != end; ++pm_it)
{
TargetedExperiment::Peptide peptide;
const AASequence &seq = pm_it->first;
// @NOTE: Technically, "TargetedExperiment::Peptide" stores the unmodified
// sequence and the modifications separately. Unfortunately, creating the
// modifications vector is complex and there is currently no convenient
// conversion function (see "TargetedExperimentHelper::getAASequence" for
// the reverse conversion). However, "Peptide" is later converted to
// "OpenSwath::LightPeptide" anyway, and this is done via "AASequence"
// (see "OpenSwathDataAccessHelper::convertTargetedPeptide"). So for our
// purposes it works to just store the sequence including modifications in
// "Peptide".
// for now, seeds are stored in the same PeptideRefMap, all
// under the same fake sequence key entry
// TODO add own data structure for them
if (seq.toUnmodifiedString().hasPrefix("XXX")) // seed
{
// This will force the SWATH scores to consider it like an unidentified peptide and e.g. use averagine isotopes
peptide.sequence = "";
// we do not have to aggregate their retention times, therefore just
// iterate over the entries
const ChargeMap& cm = pm_it->second;
for (const auto& charge_rtmap : cm)
{
Int charge = charge_rtmap.first;
// only go through internals for seeds (->first). External seeds are not supported
for (const auto& rt_pep : charge_rtmap.second.first)
{
// since we don't know their IDs, seeds will all need a different grouplabel in SWATH
// to not be combined
seedcount++;
double mz = rt_pep.second->getMZ();
double rt = rt_pep.second->getRT();
String uid = rt_pep.second->getMetaValue("SeedFeatureID");
// UID should be enough, but let's add the seed count to be sure.
String peptide_id = peptide.sequence + "[" + uid + "][" + String(seedcount) + "]/" + String(charge);
peptide.setChargeState(charge);
peptide.id = peptide_id;
peptide.protein_refs = {"not_available"};
peptide.setPeptideGroupLabel(peptide_id);
//create an entry in the "output" ref_rt_map for internals
RTMap &internal_ids = ref_rt_map[peptide_id].first;
// get isotope distribution for peptide:
//TODO Why 10? Document constant?
Size n_isotopes = (isotope_pmin_ > 0.0) ? 10 : n_isotopes_;
CoarseIsotopePatternGenerator generator(n_isotopes);
IsotopeDistribution iso_dist = generator
.estimateFromPeptideWeight(mz * charge - charge * Constants::PROTON_MASS_U);
if (isotope_pmin_ > 0.0)
{
iso_dist.trimLeft(isotope_pmin_);
iso_dist.trimRight(isotope_pmin_);
iso_dist.renormalize();
}
double rt_tolerance = seed_rt_window_ / 2.0;
// store beginning and end of RT region: here we only need one entry
peptide.rts.clear();
addPeptideRT_(peptide, rt - rt_tolerance);
addPeptideRT_(peptide, rt + rt_tolerance);
// Use IM from seed if annotated (some feature finders provide IM)
// If not annotated, drift time stays at default (-1) -> full IM range extraction
// Check >= 0.0 to match ChromatogramExtractor's IM filtering logic
const double seed_im = rt_pep.second->getMetaValue(Constants::UserParam::IM, -1.0);
if (seed_im >= 0.0)
{
peptide.setDriftTime(seed_im);
// Store IM stats for annotation (use seed IM as median, with some tolerance for min/max)
im_stats_[peptide.id] = {seed_im, seed_im, seed_im};
}
else
{
// Reset drift time to -1 (no IM filtering) - peptide object is reused
peptide.setDriftTime(-1.0);
}
library_.addPeptide(peptide);
generateTransitions_(peptide.id, mz, charge, iso_dist);
internal_ids.emplace(rt_pep);
}
}
}
else
{
peptide.sequence = seq.toString();
// keep track of protein accessions:
set<String> current_accessions;
const pair<RTMap, RTMap> &pair = pm_it->second.begin()->second; // internal/external pair
const RTMap& internal_ids = pair.first;
const RTMap& external_ids = pair.second;
// WARNING: This assumes that at least one hit is present.
const PeptideHit &hit = (internal_ids.empty() ?
external_ids.begin()->second->getHits()[0] :
internal_ids.begin()->second->getHits()[0]);
current_accessions = hit.extractProteinAccessionsSet();
protein_accessions.insert(current_accessions.begin(),
current_accessions.end());
// missing protein accession would crash OpenSWATH algorithms:
if (current_accessions.empty())
{
current_accessions.insert("not_available");
}
peptide.protein_refs = vector<String>(current_accessions.begin(),
current_accessions.end());
// get regions in RT which peptide eludes (ideally only one):
std::vector<RTRegion> rt_regions;
getRTRegions_(pm_it->second, rt_regions, clear_IDs);
// note: IM values are stored in the PeptideIdentifications* for the different
// peptides, charges, and regions
// get isotope distribution for peptide:
Size n_isotopes = (isotope_pmin_ > 0.0) ? 10 : n_isotopes_;
IsotopeDistribution iso_dist =
seq.getFormula(Residue::Full, 0).getIsotopeDistribution(CoarseIsotopePatternGenerator(n_isotopes));
if (isotope_pmin_ > 0.0)
{
iso_dist.trimLeft(isotope_pmin_);
iso_dist.trimRight(isotope_pmin_);
iso_dist.renormalize();
}
// go through different charge states:
for (ChargeMap::const_iterator cm_it = pm_it->second.begin();
cm_it != pm_it->second.end(); ++cm_it)
{
Int charge = cm_it->first;
double mz = seq.getMZ(charge);
OPENMS_LOG_DEBUG << "\nPeptide " << peptide.sequence << "/" << charge << " (m/z: " << mz << "):" << endl;
peptide.setChargeState(charge);
String peptide_id = peptide.sequence + "/" + String(charge);
// we want to detect one feature per peptide and charge state - if there
// are multiple RT regions, group them together:
peptide.setPeptideGroupLabel(peptide_id);
peptide.rts.clear();
Size counter = 0;
// accumulate IDs over multiple regions:
RTMap &internal_ids = ref_rt_map[peptide_id].first;
RTMap &external_ids = ref_rt_map[peptide_id].second;
for (RTRegion& reg : rt_regions)
{
if (reg.ids.count(charge))
{
OPENMS_LOG_DEBUG_NOFILE << "Charge " << charge << ", Region# " << counter + 1 << " (RT: "
<< float(reg.start) << "-" << float(reg.end)
<< ", size " << float(reg.end - reg.start) << ")"
<< endl;
peptide.id = peptide_id;
if (rt_regions.size() > 1)
peptide.id += ":" + String(++counter);
// store beginning and end of RT region:
peptide.rts.clear();
addPeptideRT_(peptide, reg.start);
addPeptideRT_(peptide, reg.end);
// determine IM statistics (median, min, max)
// for the peptide and current charge state in the region
// (Note: because it is the same peptide and charge state the IM should not differ that much)
// Check >= 0.0 to match ChromatogramExtractor's IM filtering logic
IMStats im_stats = getRTRegionIMStats_(reg);
if (im_stats.median >= 0.0)
{
peptide.setDriftTime(im_stats.median); // use median (more robust than mean)
im_stats_[peptide.id] = im_stats; // store for later annotation
}
else
{
// Reset drift time to -1 (no IM filtering) - peptide object is reused
peptide.setDriftTime(-1.0);
}
library_.addPeptide(peptide);
generateTransitions_(peptide.id, mz, charge, iso_dist);
}
internal_ids.insert(reg.ids[charge].first.begin(),
reg.ids[charge].first.end());
external_ids.insert(reg.ids[charge].second.begin(),
reg.ids[charge].second.end());
}
}
}
}
// add proteins to library:
for (String const &acc : protein_accessions)
{
TargetedExperiment::Protein protein;
protein.id = acc;
library_.addProtein(protein);
}
}
// extract RT regions of identified peptides (from charge map)
void FeatureFinderIdentificationAlgorithm::getRTRegions_(
ChargeMap& peptide_data,
std::vector<RTRegion>& rt_regions,
bool clear_IDs) const
{
// use RTs from all charge states of a single peptide to get a more complete picture:
std::vector<double> rts;
for (auto& cm : peptide_data)
{
// "internal" IDs:
for (auto& rt : cm.second.first)
{
rts.push_back(rt.first);
}
// "external" IDs:
for (auto& rt : cm.second.second)
{
rts.push_back(rt.first);
}
}
sort(rts.begin(), rts.end());
double rt_tolerance = rt_window_ / 2.0;
for (auto& rt : rts)
{
if (rt_regions.empty() || rt_regions.back().end < rt - rt_tolerance)
rt_regions.push_back({rt - rt_tolerance, rt + rt_tolerance, ChargeMap()});
else
rt_regions.back().end = rt + rt_tolerance;
}
// sort the peptide IDs into the regions:
for (auto& cm : peptide_data)
{
// regions are sorted by RT, as are IDs, so just iterate linearly:
std::vector<RTRegion>::iterator reg_it = rt_regions.begin();
int charge = cm.first;
// "internal" IDs:
for (auto& rt : cm.second.first)
{
// while RT larger than current region end: skip to next region (or end)
while (rt.first > reg_it->end)
{
++reg_it;
}
RTMap& internal_ids = reg_it->ids[charge].first;
// insert RT and peptide id object into multimap (for current charge of the peptide)
internal_ids.insert(rt);
}
reg_it = rt_regions.begin(); // reset to start
// "external" IDs:
for (auto& rt : cm.second.second)
{
while (rt.first > reg_it->end)
{
++reg_it;
}
RTMap& external_ids = reg_it->ids[charge].second;
external_ids.insert(rt);
}
if (clear_IDs)
{
// ID references no longer needed (now stored in the RT regions):
cm.second.first.clear();
cm.second.second.clear();
}
}
}
void FeatureFinderIdentificationAlgorithm::addPeptideRT_(TargetedExperiment::Peptide& peptide, double rt) const
{
TargetedExperiment::RetentionTime te_rt;
te_rt.setRT(rt);
te_rt.retention_time_type = TargetedExperimentHelper::RetentionTime::RTType::NORMALIZED;
peptide.rts.push_back(te_rt);
}
/// generate transitions (isotopic traces) for a peptide ion and add them to the library:
void FeatureFinderIdentificationAlgorithm::generateTransitions_(
const String& peptide_id,
double mz,
Int charge,
const IsotopeDistribution& iso_dist)
{
// go through different isotopes:
Size counter = 0;
for (const Peak1D& iso : iso_dist)
{
ReactionMonitoringTransition transition;
String annotation = "i" + String(counter + 1);
String transition_name = peptide_id + "_" + annotation;
transition.setNativeID(transition_name);
transition.setPrecursorMZ(mz);
transition.setProductMZ(mz + Constants::C13C12_MASSDIFF_U * float(counter) / charge);
transition.setLibraryIntensity(iso.getIntensity());
transition.setMetaValue("annotation", annotation);
transition.setPeptideRef(peptide_id);
//TODO what about transition charge? A lot of DIA scores depend on it and default to charge 1 otherwise.
library_.addTransition(transition);
isotope_probs_[transition_name] = iso.getIntensity();
++counter;
}
}
void FeatureFinderIdentificationAlgorithm::annotateFeaturesFinalizeAssay_(
FeatureMap& features, map<Size, vector<PeptideIdentification*> >& feat_ids,
RTMap& rt_internal)
{
set<PeptideIdentification*> assigned_ids;
if (!feat_ids.empty())
{
// find the "best" feature (with the most IDs):
Size best_index = 0;
Size best_count = 0;
for (map<Size, vector<PeptideIdentification*> >::iterator fi_it =
feat_ids.begin(); fi_it != feat_ids.end(); ++fi_it)
{
Size current_index = fi_it->first;
Size current_count = fi_it->second.size();
if ((current_count > best_count) ||
((current_count == best_count) && // break ties by intensity
(features[current_index].getIntensity() >
features[best_index].getIntensity())))
{
best_count = current_count;
best_index = current_index;
}
}
// assign IDs:
if (best_count > 0)
{
// we define the (one) feature with most matching IDs as correct:
features[best_index].setMetaValue("feature_class", "positive");
features[best_index].getPeptideIdentifications().resize(best_count);
for (Size i = 0; i < best_count; ++i)
{
features[best_index].getPeptideIdentifications()[i] =
*(feat_ids[best_index][i]);
}
assigned_ids.insert(feat_ids[best_index].begin(),
feat_ids[best_index].end());
}
}
// store unassigned IDs from the current RT region:
for (RTMap::const_iterator rt_it = rt_internal.begin();
rt_it != rt_internal.end(); ++rt_it)
{
if (!assigned_ids.count(rt_it->second))
{
const PeptideIdentification& pep_id = *(rt_it->second);
features.getUnassignedPeptideIdentifications().push_back(pep_id);
}
}
// clean-up:
feat_ids.clear();
rt_internal.clear();
}
/// annotate identified features with m/z, isotope probabilities, etc.
void FeatureFinderIdentificationAlgorithm::annotateFeatures_(FeatureMap& features, PeptideRefRTMap& ref_rt_map)
{
String previous_ref, peptide_ref;
RTMap transformed_internal;
Size i = 0;
map<Size, vector<PeptideIdentification*> > feat_ids;
for (Feature& feat : features)
{
feat.setMZ(feat.getMetaValue("PrecursorMZ"));
feat.setCharge(feat.getPeptideIdentifications()[0].getHits()[0].
getCharge());
ensureConvexHulls_(feat);
// remove "fake" IDs generated by OpenSWATH (they would be removed with
// a warning when writing output, because of missing protein
// identification with corresponding identifier):
feat.getPeptideIdentifications().clear();
// annotate subordinates with theoretical isotope intensities:
for (Feature& sub : feat.getSubordinates())
{
String native_id = sub.getMetaValue("native_id");
sub.setMetaValue("isotope_probability", isotope_probs_[native_id]);
}
peptide_ref = feat.getMetaValue("PeptideRef");
// Annotate feature with ion mobility statistics (if available)
// This provides QC metrics for IM-enabled experiments:
// - IM_median: robust central IM value used for chromatogram extraction
// - IM_min/max: spread of IM distribution (large spread may indicate issues)
// Note: Uses full peptide ref (with region number) as this is the key in im_stats_
String full_peptide_ref = peptide_ref; // keep full ref with region number
if (im_stats_.count(full_peptide_ref))
{
const IMStats& stats = im_stats_.at(full_peptide_ref);
feat.setMetaValue("IM_median", stats.median);
feat.setMetaValue("IM_min", stats.min);
feat.setMetaValue("IM_max", stats.max);
}
// remove region number, if present:
Size pos_slash = peptide_ref.rfind('/');
Size pos_colon = peptide_ref.find(':', pos_slash + 2);
peptide_ref = peptide_ref.substr(0, pos_colon);
if (peptide_ref != previous_ref)
{
if (!previous_ref.empty())
{
annotateFeaturesFinalizeAssay_(
features, feat_ids, ref_rt_map[previous_ref].first);
}
previous_ref = peptide_ref;
}
RTMap& rt_internal = ref_rt_map[peptide_ref].first;
RTMap& rt_external = ref_rt_map[peptide_ref].second;
if (rt_internal.empty() && rt_external.empty())
{
OPENMS_LOG_DEBUG << "PeptideRefs in RTMap:" << endl;
for (const auto& rtm : ref_rt_map)
{
OPENMS_LOG_DEBUG << rtm.first << endl;
}
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "RT internal and external are both empty for peptide '" + String(peptide_ref) + "' stored as '" + String(feat.getMetaValue("PeptideRef")) + "'.");
}
if (!rt_internal.empty()) // validate based on internal IDs
{
// map IDs to features (based on RT):
double rt_min = features[i].getMetaValue("leftWidth");
double rt_max = features[i].getMetaValue("rightWidth");
std::tie(rt_min, rt_max) = calculateRTBounds_(rt_min, rt_max);
RTMap::const_iterator lower = rt_internal.lower_bound(rt_min);
RTMap::const_iterator upper = rt_internal.upper_bound(rt_max);
int id_count = 0;
for (; lower != upper; ++lower)
{
feat_ids[i].push_back(lower->second);
++id_count;
}
// "total" only includes IDs from this RT region:
feat.setMetaValue("n_total_ids", rt_internal.size());
feat.setMetaValue("n_matching_ids", id_count);
if (id_count > 0) // matching IDs -> feature may be correct
{
feat.setMetaValue("feature_class", "ambiguous");
}
else // no matching IDs -> feature is wrong
{
feat.setMetaValue("feature_class", "negative");
}
}
else // only external IDs -> no validation possible
{
// Set feature class to unknown
feat.setMetaValue("n_total_ids", 0);
feat.setMetaValue("n_matching_ids", -1);
feat.setMetaValue("feature_class", "unknown");
// Add a dummy peptide identification from external data
if (!rt_external.empty())
{
PeptideIdentification id = *(rt_external.begin()->second);
id.clearMetaInfo();
id.setMetaValue("FFId_category", "implied");
id.setRT(feat.getRT());
id.setMZ(feat.getMZ());
// only one peptide hit per ID - see function "addPeptideToMap_":
PeptideHit& hit = id.getHits()[0];
hit.clearMetaInfo();
hit.setScore(0.0);
feat.getPeptideIdentifications().push_back(id);
}
}
// distance from feature to closest peptide ID:
if (external_id_handler_.hasRTTransformation())
{
// use external IDs if available, otherwise RT-transformed internal IDs
// (but only compute the transform if necessary, once per assay!):
if (rt_external.empty() && (transformed_internal.empty() ||
(peptide_ref != previous_ref)))
{
transformed_internal.clear();
for (RTMap::const_iterator it = rt_internal.begin();
it != rt_internal.end(); ++it)
{
double transformed_rt = external_id_handler_.transformRT(it->first);
RTMap::value_type pair = make_pair(transformed_rt, it->second);
transformed_internal.insert(transformed_internal.end(), pair);
}
}
const RTMap& rt_ref = (rt_external.empty() ? transformed_internal :
rt_external);
double rt_min = feat.getMetaValue("leftWidth");
double rt_max = feat.getMetaValue("rightWidth");
std::tie(rt_min, rt_max) = calculateRTBounds_(rt_min, rt_max);
RTMap::const_iterator lower = rt_ref.lower_bound(rt_min);
RTMap::const_iterator upper = rt_ref.upper_bound(rt_max);
if (lower != upper) // there's at least one ID within the feature
{
feat.setMetaValue("rt_delta", 0.0);
}
else // check closest ID
{
double rt_delta1 = numeric_limits<double>::infinity();
if (lower != rt_ref.begin())
{
rt_delta1 = fabs((--lower)->first - rt_min);
}
double rt_delta2 = numeric_limits<double>::infinity();
if (upper != rt_ref.end())
{
rt_delta2 = fabs(upper->first - rt_min);
}
feat.setMetaValue("rt_delta", min(rt_delta1, rt_delta2));
}
}
++i;
}
// set of features from the last assay:
annotateFeaturesFinalizeAssay_(features, feat_ids,
ref_rt_map[peptide_ref].first);
// store unassigned peptide IDs from assays that did not generate any
// feature candidates:
for (PeptideRefRTMap::iterator ref_it = ref_rt_map.begin();
ref_it != ref_rt_map.end(); ++ref_it)
{
RTMap& rt_internal = ref_it->second.first;
if (!rt_internal.empty()) // not cleared by '...FinalizeAssay()'
{
for (RTMap::const_iterator rt_it = rt_internal.begin();
rt_it != rt_internal.end(); ++rt_it)
{
const PeptideIdentification& pep_id = *(rt_it->second);
features.getUnassignedPeptideIdentifications().push_back(pep_id);
}
}
}
}
void FeatureFinderIdentificationAlgorithm::ensureConvexHulls_(Feature& feature) const
{
if (feature.getConvexHulls().empty()) // add hulls for mass traces
{
double rt_min = feature.getMetaValue("leftWidth");
double rt_max = feature.getMetaValue("rightWidth");
for (Feature& sub : feature.getSubordinates())
{
double abs_mz_tol = mz_window_ / 2.0;
if (mz_window_ppm_)
{
abs_mz_tol = sub.getMZ() * abs_mz_tol * 1.0e-6;
}
ConvexHull2D hull;
hull.addPoint(DPosition<2>(rt_min, sub.getMZ() - abs_mz_tol));
hull.addPoint(DPosition<2>(rt_min, sub.getMZ() + abs_mz_tol));
hull.addPoint(DPosition<2>(rt_max, sub.getMZ() - abs_mz_tol));
hull.addPoint(DPosition<2>(rt_max, sub.getMZ() + abs_mz_tol));
feature.getConvexHulls().push_back(hull);
}
}
}
void FeatureFinderIdentificationAlgorithm::addPeptideToMap_(PeptideIdentification& peptide, PeptideMap& peptide_map, bool external)
{
if (peptide.getHits().empty())
{
return;
}
peptide.sort();
PeptideHit& hit = peptide.getHits()[0];
peptide.getHits().resize(1);
// if we don't quantify decoys we don't add them to the peptide list
if (!quantify_decoys_)
{
if (hit.isDecoy())
{
unassignedIDs_.push_back(peptide);
return;
}
}
if (use_psm_cutoff_)
{
if ( (peptide.isHigherScoreBetter() && hit.getScore() < psm_score_cutoff_) ||
(!peptide.isHigherScoreBetter() && hit.getScore() > psm_score_cutoff_) )
{
unassignedIDs_.push_back(peptide);
return;
}
}
Int charge = hit.getCharge();
// precursor information
double rt = peptide.getRT();
double mz = peptide.getMZ();
// meta value to forcefully overwrite m/z value with external one
// to quantify this kind of data, we need to introduce a modification that matches the mass difference
// we just start at the N-term and look for the first unmodified AA
if (hit.metaValueExists("CalcMass"))
{
double diff_mz = (double)hit.getMetaValue("CalcMass") - hit.getSequence().getMZ(charge);
double diff_mass = diff_mz * charge;
if (fabs(diff_mass) > 0.01)
{
OPENMS_LOG_DEBUG_NOFILE << "Peptide m/z value and m/z of CalcMass meta value differ (" << hit.getSequence().getMZ(charge) << " / " << hit.getMetaValue("CalcMass")
<< "Assuming unspecified/unlocalized modification." << endl;
AASequence seq = hit.getSequence(); // TODO: add ref version to PeptideHit
for (auto r = seq.begin(); r != seq.end(); ++r)
{
if (r->isModified()) continue;
int residue_index = r - seq.begin();
seq.setModificationByDiffMonoMass(residue_index, diff_mass);
break;
}
hit.setSequence(std::move(seq));
}
}
if (external)
{
OPENMS_LOG_DEBUG_NOFILE << "Adding peptide (external) " << hit.getSequence() << "; CHG: " << charge << "; RT: " << rt << "; MZ: " << mz << endl;
peptide_map[hit.getSequence()][charge].second.emplace(rt, &peptide);
}
else
{
if (peptide.metaValueExists("SeedFeatureID"))
{
OPENMS_LOG_DEBUG_NOFILE << "Adding seed (internal) from FeatureID " << peptide.getMetaValue("SeedFeatureID") << ": " << hit.getSequence() << "; CHG: " << charge << "; RT: " << rt << "; MZ: " << mz << endl;
}
else
{
OPENMS_LOG_DEBUG_NOFILE << "Adding peptide (internal) " << hit.getSequence() << "; CHG: " << charge << "; RT: " << rt << "; MZ: " << mz << endl;
}
peptide_map[hit.getSequence()][charge].first.emplace(rt, &peptide); // place into multimap
}
}
void FeatureFinderIdentificationAlgorithm::updateMembers_()
{
peak_width_ = param_.getValue("detect:peak_width");
min_peak_width_ = param_.getValue("detect:min_peak_width");
signal_to_noise_ = param_.getValue("detect:signal_to_noise");
batch_size_ = param_.getValue("extract:batch_size");
rt_quantile_ = param_.getValue("extract:rt_quantile");
rt_window_ = param_.getValue("extract:rt_window");
mz_window_ = param_.getValue("extract:mz_window");
mz_window_ppm_ = mz_window_ >= 1;
isotope_pmin_ = param_.getValue("extract:isotope_pmin");
n_isotopes_ = param_.getValue("extract:n_isotopes");
mapping_tolerance_ = param_.getValue("detect:mapping_tolerance");
elution_model_ = param_.getValue("model:type").toString();
// SVM related parameters
svm_min_prob_ = param_.getValue("svm:min_prob");
svm_predictor_names_ = ListUtils::create<String>(param_.getValue("svm:predictors").toString());
svm_xval_out_ = param_.getValue("svm:xval_out").toString();
svm_quality_cutoff = param_.getValue("svm:min_prob");
svm_n_parts_ = param_.getValue("svm:xval");
svm_n_samples_ = param_.getValue("svm:samples");
// debug
debug_level_ = param_.getValue("debug");
candidates_out_ = param_.getValue("candidates_out").toString();
// quantification of decoys
quantify_decoys_ = param_.getValue("quantify_decoys").toBool();
use_psm_cutoff_ = param_.getValue("min_psm_cutoff") != "none";
if (use_psm_cutoff_)
{
psm_score_cutoff_ = double(param_.getValue("min_psm_cutoff"));
}
add_mass_offset_peptides_ = double(param_.getValue("add_mass_offset_peptides"));
}
void FeatureFinderIdentificationAlgorithm::filterFeatures_(OpenMS::FeatureMap& features, bool classified)
{
if (features.empty())
{
return;
}
// For non-classified features, we still use the original filtering
if (!classified)
{
// remove features without ID (or pseudo ID from seeds)
features.erase(std::remove_if(features.begin(), features.end(),
feature_filter_peptides_), features.end());
}
// Note: The classified case is now handled by ExternalIDHandler::filterClassifiedFeatures
// in the postProcess_ method
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/MultiplexDeltaMasses.cpp | .cpp | 2,314 | 89 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Lars Nilse $
// $Authors: Lars Nilse $
// --------------------------------------------------------------------------
#include <OpenMS/FEATUREFINDER/MultiplexDeltaMasses.h>
#include <sstream>
#include <utility>
using namespace std;
namespace OpenMS
{
MultiplexDeltaMasses::DeltaMass::DeltaMass(double dm, LabelSet ls) :
delta_mass(dm), label_set(std::move(ls))
{
}
MultiplexDeltaMasses::DeltaMass::DeltaMass(double dm, const String& l) :
delta_mass(dm), label_set()
{
label_set.insert(l);
}
MultiplexDeltaMasses::MultiplexDeltaMasses() = default;
MultiplexDeltaMasses::MultiplexDeltaMasses(const vector<MultiplexDeltaMasses::DeltaMass>& dm) :
delta_masses_(dm)
{
}
std::vector<MultiplexDeltaMasses::DeltaMass>& MultiplexDeltaMasses::getDeltaMasses()
{
return delta_masses_;
}
const std::vector<MultiplexDeltaMasses::DeltaMass>& MultiplexDeltaMasses::getDeltaMasses() const
{
return delta_masses_;
}
String MultiplexDeltaMasses::labelSetToString(const MultiplexDeltaMasses::LabelSet& ls)
{
std::stringstream ss;
for (MultiplexDeltaMasses::LabelSet::const_iterator it = ls.begin(); it != ls.end(); ++it)
{
if (it != ls.begin())
{
ss << " ";
}
ss << (*it);
}
return String(ss.str());
}
bool operator<(const MultiplexDeltaMasses &dm1, const MultiplexDeltaMasses &dm2)
{
if (dm1.getDeltaMasses().size() != dm2.getDeltaMasses().size())
{
// Search first for complete multiplets, then knock-out cases.
return (dm1.getDeltaMasses().size() > dm2.getDeltaMasses().size());
}
else
{
for (unsigned i = 0; i < dm1.getDeltaMasses().size(); ++i)
{
double ms1 = dm1.getDeltaMasses()[i].delta_mass - dm1.getDeltaMasses()[0].delta_mass;
double ms2 = dm2.getDeltaMasses()[i].delta_mass - dm2.getDeltaMasses()[0].delta_mass;
if (ms1 != ms2)
{
// Search first for cases without miscleavages.
return (ms1 < ms2);
}
}
}
return (false);
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/EmgModel.cpp | .cpp | 3,846 | 120 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: $
// --------------------------------------------------------------------------
#include <OpenMS/FEATUREFINDER/EmgModel.h>
#include <OpenMS/CONCEPT/Constants.h>
namespace OpenMS
{
EmgModel::EmgModel() :
InterpolationModel()
{
setName("EmgModel");
defaults_.setValue("bounding_box:min", 0.0f, "Lower end of bounding box enclosing the data used to fit the model.", {"advanced"});
defaults_.setValue("bounding_box:max", 1.0f, "Upper end of bounding box enclosing the data used to fit the model.", {"advanced"});
defaults_.setValue("statistics:mean", 0.0f, "Centroid position of the model.", {"advanced"});
defaults_.setValue("statistics:variance", 1.0f, "The variance of the model.", {"advanced"});
defaults_.setValue("emg:height", 100000.0f, "Height of the exponentially modified Gaussian.", {"advanced"});
defaults_.setValue("emg:width", 5.0f, "Width of the exponentially modified Gaussian.", {"advanced"});
defaults_.setValue("emg:symmetry", 5.0f, "Symmetry of the exponentially modified Gaussian.", {"advanced"});
defaults_.setValue("emg:retention", 1200.0f, "Retention time of the exponentially modified Gaussian.", {"advanced"});
defaultsToParam_();
}
EmgModel::EmgModel(const EmgModel & source) :
InterpolationModel(source)
{
setParameters(source.getParameters());
updateMembers_();
}
EmgModel::~EmgModel() = default;
EmgModel & EmgModel::operator=(const EmgModel & source)
{
if (&source == this)
{
return *this;
}
InterpolationModel::operator=(source);
setParameters(source.getParameters());
updateMembers_();
return *this;
}
void EmgModel::setSamples()
{
LinearInterpolation::container_type & data = interpolation_.getData();
data.clear();
if (max_ == min_)
{
return;
}
data.reserve(UInt((max_ - min_) / interpolation_step_ + 1));
CoordinateType pos = min_;
double sqrt_2pi = sqrt(2 * Constants::PI);
double term_sq2 = (-2.4055 / sqrt(2.0));
double part1 = (height_ * width_ / symmetry_);
double part2 = pow(width_, 2) / (2 * pow(symmetry_, 2));
double part3 = width_ / symmetry_;
for (UInt i = 0; pos < max_; ++i)
{
pos = min_ + i * interpolation_step_;
double diff = pos - retention_;
// data.push_back (Simplified EMG)
data.push_back((part1 * sqrt_2pi * exp(part2 - (diff / symmetry_)) / (1 + exp(term_sq2 * ((diff / width_) - part3)))));
}
interpolation_.setScale(interpolation_step_);
interpolation_.setOffset(min_);
}
void EmgModel::setOffset(CoordinateType offset)
{
double diff = offset - getInterpolation().getOffset();
min_ += diff;
max_ += diff;
statistics_.setMean(statistics_.mean() + diff);
InterpolationModel::setOffset(offset);
param_.setValue("bounding_box:min", min_);
param_.setValue("bounding_box:max", max_);
param_.setValue("statistics:mean", statistics_.mean());
}
EmgModel::CoordinateType EmgModel::getCenter() const
{
return statistics_.mean();
}
void EmgModel::updateMembers_()
{
InterpolationModel::updateMembers_();
min_ = param_.getValue("bounding_box:min");
max_ = param_.getValue("bounding_box:max");
statistics_.setMean(param_.getValue("statistics:mean"));
statistics_.setVariance(param_.getValue("statistics:variance"));
height_ = param_.getValue("emg:height");
width_ = param_.getValue("emg:width");
symmetry_ = param_.getValue("emg:symmetry");
retention_ = param_.getValue("emg:retention");
setSamples();
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/EmgScoring.cpp | .cpp | 370 | 13 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
namespace OpenMS
{
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/MultiplexSatelliteProfile.cpp | .cpp | 838 | 36 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Lars Nilse $
// $Authors: Lars Nilse $
// --------------------------------------------------------------------------
#include <OpenMS/FEATUREFINDER/MultiplexFilteringProfile.h>
using namespace std;
namespace OpenMS
{
MultiplexSatelliteProfile::MultiplexSatelliteProfile(float rt, double mz, float intensity) :
rt_(rt), mz_(mz), intensity_(intensity)
{
}
float MultiplexSatelliteProfile::getRT() const
{
return rt_;
}
double MultiplexSatelliteProfile::getMZ() const
{
return mz_;
}
float MultiplexSatelliteProfile::getIntensity() const
{
return intensity_;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/BiGaussModel.cpp | .cpp | 3,799 | 121 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: $
// --------------------------------------------------------------------------
#include <OpenMS/FEATUREFINDER/BiGaussModel.h>
namespace OpenMS
{
BiGaussModel::BiGaussModel() :
InterpolationModel(), statistics1_(), statistics2_()
{
setName("BiGaussModel");
defaults_.setValue("bounding_box:min", 0.0, "Lower end of bounding box enclosing the data used to fit the model.", {"advanced"});
defaults_.setValue("bounding_box:max", 1.0, "Upper end of bounding box enclosing the data used to fit the model.", {"advanced"});
defaults_.setValue("statistics:mean", 0.0, "Centroid position of the model, this also separates both halves of the model.", {"advanced"});
defaults_.setValue("statistics:variance1", 1.0, "Variance of the first gaussian, used for the lower half of the model.", {"advanced"});
defaults_.setValue("statistics:variance2", 1.0, "Variance of the second gaussian, used for the upper half of the model.", {"advanced"});
defaultsToParam_();
}
BiGaussModel::BiGaussModel(const BiGaussModel & source) :
InterpolationModel(source)
{
setParameters(source.getParameters());
updateMembers_();
}
BiGaussModel::~BiGaussModel() = default;
BiGaussModel & BiGaussModel::operator=(const BiGaussModel & source)
{
if (&source == this)
{
return *this;
}
InterpolationModel::operator=(source);
setParameters(source.getParameters());
updateMembers_();
return *this;
}
void BiGaussModel::setSamples()
{
LinearInterpolation::container_type & data = interpolation_.getData();
data.clear();
if (max_ == min_)
{
return;
}
data.reserve(UInt((max_ - min_) / interpolation_step_ + 1));
CoordinateType pos = min_;
for (UInt i = 0; pos < max_; ++i)
{
pos = min_ + i * interpolation_step_;
if (pos < statistics1_.mean())
{
data.push_back(statistics1_.normalDensity_sqrt2pi(pos));
}
else
{
data.push_back(statistics2_.normalDensity_sqrt2pi(pos));
}
}
// scale data so that integral over distribution equals one
// multiply sum by interpolation_step_ -> rectangular approximation of integral
IntensityType factor = scaling_ / interpolation_step_ /
std::accumulate(data.begin(), data.end(), IntensityType(0));
for (auto& value : data)
{
value *= factor;
}
interpolation_.setScale(interpolation_step_);
interpolation_.setOffset(min_);
}
void BiGaussModel::updateMembers_()
{
InterpolationModel::updateMembers_();
min_ = param_.getValue("bounding_box:min");
max_ = param_.getValue("bounding_box:max");
statistics1_.setMean(param_.getValue("statistics:mean"));
statistics2_.setMean(param_.getValue("statistics:mean"));
statistics1_.setVariance(param_.getValue("statistics:variance1"));
statistics2_.setVariance(param_.getValue("statistics:variance2"));
setSamples();
}
void BiGaussModel::setOffset(CoordinateType offset)
{
double diff = offset - getInterpolation().getOffset();
min_ += diff;
max_ += diff;
statistics1_.setMean(statistics1_.mean() + diff);
statistics2_.setMean(statistics2_.mean() + diff);
InterpolationModel::setOffset(offset);
param_.setValue("bounding_box:min", min_);
param_.setValue("bounding_box:max", max_);
param_.setValue("statistics:mean", statistics1_.mean());
}
BiGaussModel::CoordinateType BiGaussModel::getCenter() const
{
return statistics2_.mean();
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/MultiplexFilteringCentroided.cpp | .cpp | 4,076 | 124 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Lars Nilse $
// $Authors: Lars Nilse $
// --------------------------------------------------------------------------
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/KERNEL/BaseFeature.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/IsotopeDistribution.h>
#include <OpenMS/PROCESSING/CENTROIDING/PeakPickerHiRes.h>
#include <OpenMS/FEATUREFINDER/MultiplexFilteringCentroided.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <utility>
#ifdef _OPENMP
#include <omp.h>
#endif
// #define DEBUG
using namespace std;
namespace OpenMS
{
MultiplexFilteringCentroided::MultiplexFilteringCentroided(const MSExperiment& exp_centroided, const std::vector<MultiplexIsotopicPeakPattern>& patterns, int isotopes_per_peptide_min, int isotopes_per_peptide_max, double intensity_cutoff, double rt_band, double mz_tolerance, bool mz_tolerance_unit, double peptide_similarity, double averagine_similarity, double averagine_similarity_scaling, String averagine_type) :
MultiplexFiltering(exp_centroided, patterns, isotopes_per_peptide_min, isotopes_per_peptide_max, intensity_cutoff, rt_band, mz_tolerance, mz_tolerance_unit, peptide_similarity, averagine_similarity, averagine_similarity_scaling, std::move(averagine_type))
{
}
vector<MultiplexFilteredMSExperiment> MultiplexFilteringCentroided::filter()
{
// progress logger
unsigned progress = 0;
startProgress(0, patterns_.size() * exp_centroided_.size(), "filtering LC-MS data");
// list of filter results for each peak pattern
vector<MultiplexFilteredMSExperiment> filter_results;
#ifdef DEBUG
// clock for monitoring run time performance
unsigned int start = clock();
#endif
// loop over all patterns
for (unsigned pattern_idx = 0; pattern_idx < patterns_.size(); ++pattern_idx)
{
// current pattern
MultiplexIsotopicPeakPattern pattern = patterns_[pattern_idx];
// data structure storing peaks which pass all filters for this pattern
MultiplexFilteredMSExperiment result;
// update white experiment
updateWhiteMSExperiment_();
// filter (white) experiment
// loop over spectra
for (const auto &it_rt : exp_centroided_white_)
{
// skip empty spectra
if (it_rt.empty())
{
continue;
}
setProgress(++progress);
double rt = it_rt.getRT();
size_t idx_rt = &it_rt - &exp_centroided_white_[0];
MSExperiment::ConstIterator it_rt_band_begin = exp_centroided_white_.RTBegin(rt - rt_band_/2);
MSExperiment::ConstIterator it_rt_band_end = exp_centroided_white_.RTEnd(rt + rt_band_/2);
// loop over m/z
//for (MSSpectrum::ConstIterator it_mz = it_rt.begin(); it_mz != it_rt.end(); ++it_mz)
#pragma omp parallel for
for (SignedSize s = 0; s < (SignedSize) it_rt.size(); s++)
{
auto& it_mz = it_rt[s];
double mz = it_mz.getMZ();
MultiplexFilteredPeak peak(mz, rt, exp_centroided_mapping_[idx_rt][s], idx_rt);
if (!(filterPeakPositions_(mz, exp_centroided_white_.begin(), it_rt_band_begin, it_rt_band_end, pattern, peak)))
{
continue;
}
if (!(filterAveragineModel_(pattern, peak)))
{
continue;
}
if (!(filterPeptideCorrelation_(pattern, peak)))
{
continue;
}
/**
* All filters passed.
*/
#pragma omp critical
{
result.addPeak(peak);
blacklistPeak_(peak, pattern_idx);
};
}
}
// add results of this pattern to list
filter_results.push_back(result);
}
endProgress();
return filter_results;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/InterpolationModel.cpp | .cpp | 383 | 16 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: $
// --------------------------------------------------------------------------
namespace OpenMS
{
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/EmgFitter1D.cpp | .cpp | 12,307 | 374 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: $
// --------------------------------------------------------------------------
#include <OpenMS/FEATUREFINDER/EmgFitter1D.h>
#include <OpenMS/FEATUREFINDER/InterpolationModel.h>
#include <OpenMS/FEATUREFINDER/EmgModel.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <Eigen/Core>
namespace OpenMS
{
const EmgFitter1D::CoordinateType EmgFitter1D::EgmFitterFunctor::sqrt2pi = sqrt(2.0 * Constants::PI);
const EmgFitter1D::CoordinateType EmgFitter1D::EgmFitterFunctor::emg_const = 2.4055;
const EmgFitter1D::CoordinateType EmgFitter1D::EgmFitterFunctor::sqrt_2 = sqrt(2.0);
const EmgFitter1D::CoordinateType EmgFitter1D::EgmFitterFunctor::c = -emg_const / sqrt_2;
int EmgFitter1D::EgmFitterFunctor::operator()(const double* x, double* fvec) const
{
// Create Eigen::Map views for convenient indexing
Eigen::Map<const Eigen::VectorXd> x_map(x, m_inputs);
Eigen::Map<Eigen::VectorXd> fvec_map(fvec, m_values);
Size n = m_data->n;
EmgFitter1D::RawDataArrayType set = m_data->set;
EmgFitter1D::CoordinateType h = x_map(0);
EmgFitter1D::CoordinateType w = x_map(1);
EmgFitter1D::CoordinateType s = x_map(2);
EmgFitter1D::CoordinateType z = x_map(3);
EmgFitter1D::CoordinateType Yi = 0.0;
double prefix = (h * w / s) * sqrt2pi;
double part1 = w*w / (2 * s*s);
double part2 = w / s;
// iterate over all points of the signal
for (Size i = 0; i < n; i++)
{
double t = set[i].getPos();
double diff = t - z;
// Simplified EMG (doi=10.1.1.915.3568) Equation 9
Yi = prefix * exp(part1 - (diff / s)) / (1 + exp(c * ((diff / w) - part2)));
fvec_map(i) = Yi - set[i].getIntensity();
}
return 0;
}
// compute Jacobian matrix for the different parameters
int EmgFitter1D::EgmFitterFunctor::df(const double* x, double* J) const
{
// Create Eigen::Map views for convenient indexing
Eigen::Map<const Eigen::VectorXd> x_map(x, m_inputs);
Eigen::Map<Eigen::MatrixXd> J_map(J, m_values, m_inputs);
Size n = m_data->n;
EmgFitter1D::RawDataArrayType set = m_data->set;
EmgFitter1D::CoordinateType h = x_map(0);
EmgFitter1D::CoordinateType w = x_map(1);
EmgFitter1D::CoordinateType w2 = w*w;
EmgFitter1D::CoordinateType s = x_map(2);
EmgFitter1D::CoordinateType s2 = s*s;
EmgFitter1D::CoordinateType s3 = s2 * s;
EmgFitter1D::CoordinateType z = x_map(3);
EmgFitter1D::CoordinateType diff, exp1, exp2, exp3 = 0.0;
EmgFitter1D::CoordinateType derivative_height, derivative_width, derivative_symmetry, derivative_retention = 0.0;
// iterate over all points of the signal
for (Size i = 0; i < n; i++)
{
EmgFitter1D::CoordinateType t = set[i].getPos();
diff = t - z;
exp1 = exp((w2 / (2 * s2)) - (diff / s));
exp3 = exp((-emg_const / sqrt_2) * ((diff / w) - w / s));
exp2 = 1 + exp3;
// f'(h)
derivative_height = w / s * sqrt2pi * exp1 / exp2;
// f'(w)
derivative_width = h / s * sqrt2pi * exp1 / exp2 + (h * w2) / s3 * sqrt2pi * exp1 / exp2 + (emg_const * h * w) / s * sqrt2pi * exp1 * (-diff / w2 - 1 / s) * exp3 / ((exp2 * exp2) * sqrt_2);
// f'(s)
derivative_symmetry = -h * w / s2 * sqrt2pi * exp1 / exp2 + h * w / s * sqrt2pi * (-(w * w) / s3 + diff / s2) * exp1 / exp2 + (emg_const * h * w2) / s3 * sqrt2pi * exp1 * exp3 / ((exp2 * exp2) * sqrt_2);
// f'(z)
derivative_retention = h * w / s2 * sqrt2pi * exp1 / exp2 - (emg_const * h) / s * sqrt2pi * exp1 * exp3 / ((exp2 * exp2) * sqrt_2);
// set the jacobian matrix
J_map(i, 0) = derivative_height;
J_map(i, 1) = derivative_width;
J_map(i, 2) = derivative_symmetry;
J_map(i, 3) = derivative_retention;
}
return 0;
}
EmgFitter1D::EmgFitter1D() :
LevMarqFitter1D()
{
setName("EmgFitter1D");
defaults_.setValue("init_mom", "false", "Initialize parameters using method of moments estimators.", {"advanced"});
defaults_.setValidStrings("init_mom", {"true","false"});
defaults_.setValue("statistics:variance", 1.0, "Variance of the model.", {"advanced"});
defaultsToParam_();
}
EmgFitter1D::EmgFitter1D(const EmgFitter1D& source) :
LevMarqFitter1D(source)
{
setParameters(source.getParameters());
updateMembers_();
}
EmgFitter1D::~EmgFitter1D() = default;
EmgFitter1D& EmgFitter1D::operator=(const EmgFitter1D& source)
{
if (&source == this)
{
return *this;
}
LevMarqFitter1D::operator=(source);
setParameters(source.getParameters());
updateMembers_();
return *this;
}
EmgFitter1D::QualityType EmgFitter1D::fit1d(const RawDataArrayType& set, std::unique_ptr<InterpolationModel>& model)
{
// Calculate bounding box
CoordinateType min_bb = set[0].getPos(), max_bb = set[0].getPos();
for (Size pos = 1; pos < set.size(); ++pos)
{
CoordinateType tmp = set[pos].getPos();
if (min_bb > tmp)
{
min_bb = tmp;
}
if (max_bb < tmp)
{
max_bb = tmp;
}
}
// Enlarge the bounding box by a few multiples of the standard deviation
const CoordinateType stdev = sqrt(statistics_.variance()) * tolerance_stdev_box_;
min_bb -= stdev;
max_bb += stdev;
// Set advanced parameters for residual_ und jacobian_ method
EmgFitter1D::Data d;
d.n = set.size();
d.set = set;
// Compute start parameters
setInitialParameters_(set);
// Optimize parameter with Levenberg-Marquardt algorithm
std::vector<double> x_init(4);
x_init[0] = height_;
x_init[1] = width_;
x_init[2] = symmetry_;
x_init[3] = retention_;
if (!symmetric_)
{
EgmFitterFunctor functor(4, &d);
optimize_(x_init, functor);
}
// Set optimized parameters
height_ = x_init[0];
width_ = x_init[1];
symmetry_ = x_init[2];
retention_ = x_init[3];
// build model
model = std::unique_ptr<InterpolationModel>(new EmgModel());
model->setInterpolationStep(interpolation_step_);
Param tmp;
tmp.setValue("bounding_box:min", min_bb);
tmp.setValue("bounding_box:max", max_bb);
tmp.setValue("statistics:variance", statistics_.variance());
tmp.setValue("statistics:mean", statistics_.mean());
tmp.setValue("emg:height", height_);
tmp.setValue("emg:width", width_);
tmp.setValue("emg:symmetry", symmetry_);
tmp.setValue("emg:retention", retention_);
model->setParameters(tmp);
// calculate pearson correlation
std::vector<float> real_data;
real_data.reserve(set.size());
std::vector<float> model_data;
model_data.reserve(set.size());
for (Size i = 0; i < set.size(); ++i)
{
real_data.push_back(set[i].getIntensity());
model_data.push_back(model->getIntensity(DPosition<1>(set[i].getPosition())));
}
QualityType correlation = Math::pearsonCorrelationCoefficient(real_data.begin(), real_data.end(), model_data.begin(), model_data.end());
if (std::isnan(correlation))
{
correlation = -1.0;
}
return correlation;
}
void EmgFitter1D::setInitialParametersMOM_(const RawDataArrayType& set)
{
std::vector<CoordinateType> weighted;
weighted.reserve(set.size());
CoordinateType weighted_sum = 0.;
CoordinateType weight_sum = 0.;
for (Size s = 0 ; s < set.size() ; ++s)
{
weighted_sum += set[s].getPos() * set[s].getIntensity();
weight_sum += set[s].getIntensity();
}
CoordinateType weighted_mean = weighted_sum / weight_sum;
int weighted_median_idx = 0;
double sum = weight_sum - set[0].getIntensity(); // sum is the total weight of all `x[i] > x[k]`
while (sum > weight_sum/2.)
{
++weighted_median_idx;
sum -= set[weighted_median_idx].getIntensity();
}
CoordinateType weighted_median = set[weighted_median_idx].getPos();
CoordinateType weighted_sd = 0.;
for (Size s = 0 ; s < set.size() ; ++s)
{
weighted_sd += std::pow(weighted_mean - set[s].getPos(), 2) * set[s].getIntensity();
}
weighted_sd /= weight_sum;
weighted_sd = std::sqrt(weighted_sd);
CoordinateType weighted_skew = std::fabs(weighted_mean - weighted_median) / weighted_sd;
CoordinateType max_peak_width = fabs(set[set.size() - 1].getPos() - set[weighted_median_idx].getPos()); // cannot be wider than this
// calculate the height of the peak
height_ = set[weighted_median_idx].getIntensity();
// calculate retention time
retention_ = weighted_mean - weighted_sd * std::pow(weighted_skew / 2., 1./3.);
// default is an asymmetric peak
symmetric_ = false;
// calculate the symmetry (fronted peak: s<1 , tailed peak: s>1)
symmetry_ = weighted_sd * std::pow(weighted_skew / 2., 1./3.);
// check the symmetry
if (std::isinf(symmetry_) || std::isnan(symmetry_))
{
symmetric_ = true;
symmetry_ = 10.0;
}
// optimize the symmetry
// The computations can lead to an overflow error at very low values of symmetry (s~0).
// For s~5 the parameter can be approximated by the Levenberg-Marquardt algorithms.
// (the other parameters are much greater than one)
if (symmetry_ < 1)
{
symmetry_ += 5;
}
// Need to ensure that we do not go beyond the maximal width of the peak
symmetry_ = std::min(symmetry_, max_peak_width);
// calculate the width of the peak
// rt-values with intensity zero are not allowed for calculation of the width
// normally: width_ = fabs( set[set.size() - 1].getPos() - set[0].getPos() );
// but its better for the emg function to proceed from narrow peaks
width_ = symmetry_;
//MOM estimator would be the following, but it is too large for the test
//width_ = weighted_sd * std::sqrt(std::pow(1. - (weighted_skew / 2.), 2./3.));
}
void EmgFitter1D::setInitialParameters_(const RawDataArrayType& set)
{
if (param_.getValue("init_mom").toBool())
{
setInitialParametersMOM_(set);
return;
}
// sum over all intensities
CoordinateType sum = 0.0;
for (Size i = 0; i < set.size(); ++i)
{
sum += set[i].getIntensity();
}
// calculate the median
Size median = 0;
float count = 0.0;
for (Size i = 0; i < set.size(); ++i)
{
count += set[i].getIntensity();
if (count <= sum / 2)
{
median = i;
}
}
double max_peak_width = fabs(set[set.size() - 1].getPos() - set[median].getPos()); // cannot be wider than this
// calculate the height of the peak
height_ = set[median].getIntensity();
// calculate retention time
retention_ = set[median].getPos();
// default is an asymmetric peak
symmetric_ = false;
// calculate the symmetry (fronted peak: s<1 , tailed peak: s>1)
symmetry_ = fabs(set[set.size() - 1].getPos() - set[median].getPos()) / fabs(set[median].getPos() - set[0].getPos());
// check the symmetry
if (std::isinf(symmetry_) || std::isnan(symmetry_))
{
symmetric_ = true;
symmetry_ = 10.0;
}
// optimize the symmetry
// The computations can lead to an overflow error at very low values of symmetry (s~0).
// For s~5 the parameter can be approximated by the Levenberg-Marquardt algorithms.
// (the other parameters are much greater than one)
if (symmetry_ < 1)
{
symmetry_ += 5;
}
// Need to ensure that we do not go beyond the maximal width of the peak
symmetry_ = std::min(symmetry_, max_peak_width);
// calculate the width of the peak
// rt-values with intensity zero are not allowed for calculation of the width
// normally: width_ = fabs( set[set.size() - 1].getPos() - set[0].getPos() );
// but its better for the emg function to proceed from narrow peaks
width_ = symmetry_;
}
void EmgFitter1D::updateMembers_()
{
LevMarqFitter1D::updateMembers_();
statistics_.setVariance(param_.getValue("statistics:variance"));
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/MultiplexFilteredPeak.cpp | .cpp | 2,776 | 101 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Lars Nilse $
// $Authors: Lars Nilse $
// --------------------------------------------------------------------------
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/FEATUREFINDER/MultiplexSatelliteCentroided.h>
#include <OpenMS/FEATUREFINDER/MultiplexSatelliteProfile.h>
#include <OpenMS/FEATUREFINDER/MultiplexFilteredPeak.h>
#include <vector>
#include <algorithm>
#include <iostream>
using namespace std;
namespace OpenMS
{
MultiplexFilteredPeak::MultiplexFilteredPeak(double mz, float rt, size_t mz_idx, size_t rt_idx) :
mz_(mz), rt_(rt), mz_idx_(mz_idx), rt_idx_(rt_idx)
{
}
double MultiplexFilteredPeak::getMZ() const
{
return mz_;
}
float MultiplexFilteredPeak::getRT() const
{
return rt_;
}
size_t MultiplexFilteredPeak::getMZidx() const
{
return mz_idx_;
}
size_t MultiplexFilteredPeak::getRTidx() const
{
return rt_idx_;
}
void MultiplexFilteredPeak::addSatellite(size_t rt_idx, size_t mz_idx, size_t pattern_idx)
{
satellites_.insert(std::make_pair(pattern_idx, MultiplexSatelliteCentroided(rt_idx, mz_idx)));
}
void MultiplexFilteredPeak::addSatellite(const MultiplexSatelliteCentroided& satellite, size_t pattern_idx)
{
satellites_.insert(std::make_pair(pattern_idx, satellite));
}
void MultiplexFilteredPeak::addSatelliteProfile(float rt, double mz, float intensity, size_t pattern_idx)
{
satellites_profile_.insert(std::make_pair(pattern_idx, MultiplexSatelliteProfile(rt, mz, intensity)));
}
void MultiplexFilteredPeak::addSatelliteProfile(const MultiplexSatelliteProfile& satellite, size_t pattern_idx)
{
satellites_profile_.insert(std::make_pair(pattern_idx, satellite));
}
bool MultiplexFilteredPeak::checkSatellite(size_t rt_idx, size_t mz_idx) const
{
for (const auto &satellite_it : satellites_)
{
if (((satellite_it.second).getRTidx() == rt_idx) && ((satellite_it.second).getMZidx() == mz_idx))
{
return true;
}
}
return false;
}
const std::multimap<size_t, MultiplexSatelliteCentroided >& MultiplexFilteredPeak::getSatellites() const
{
return satellites_;
}
const std::multimap<size_t, MultiplexSatelliteProfile >& MultiplexFilteredPeak::getSatellitesProfile() const
{
return satellites_profile_;
}
size_t MultiplexFilteredPeak::size() const
{
return satellites_.size();
}
size_t MultiplexFilteredPeak::sizeProfile() const
{
return satellites_profile_.size();
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/TraceFitter.cpp | .cpp | 4,449 | 138 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg$
// $Authors: Stephan Aiche, Marc Sturm $
// --------------------------------------------------------------------------
#include <OpenMS/FEATUREFINDER/TraceFitter.h>
#include <unsupported/Eigen/NonLinearOptimization>
#include <Eigen/Core>
namespace OpenMS
{
/// Internal adapter to wrap GenericFunctor for Eigen's LM solver
class GenericFunctorEigenAdapter
{
public:
GenericFunctorEigenAdapter(TraceFitter::GenericFunctor& functor)
: functor_(functor)
{
}
int inputs() const { return functor_.inputs(); }
int values() const { return functor_.values(); }
int operator()(const Eigen::VectorXd& x, Eigen::VectorXd& fvec)
{
return functor_(x.data(), fvec.data());
}
int df(const Eigen::VectorXd& x, Eigen::MatrixXd& J)
{
return functor_.df(x.data(), J.data());
}
private:
TraceFitter::GenericFunctor& functor_;
};
int TraceFitter::GenericFunctor::inputs() const
{
return m_inputs;
}
int TraceFitter::GenericFunctor::values() const
{
return m_values;
}
TraceFitter::GenericFunctor::GenericFunctor(int dimensions, int num_data_points) :
m_inputs(dimensions), m_values(num_data_points)
{
}
TraceFitter::GenericFunctor::~GenericFunctor() = default;
TraceFitter::TraceFitter() :
DefaultParamHandler("TraceFitter")
{
defaults_.setValue("max_iteration", 500, "Maximum number of iterations used by the Levenberg-Marquardt algorithm.", {"advanced"});
defaults_.setValue("weighted", "false", "Weight mass traces according to their theoretical intensities.", {"advanced"});
defaults_.setValidStrings("weighted", {"true","false"});
defaultsToParam_();
}
TraceFitter::TraceFitter(const TraceFitter& source) :
DefaultParamHandler(source),
max_iterations_(source.max_iterations_),
weighted_(source.weighted_)
{
updateMembers_();
}
TraceFitter& TraceFitter::operator=(const TraceFitter& source)
{
DefaultParamHandler::operator=(source);
max_iterations_ = source.max_iterations_;
weighted_ = source.weighted_;
updateMembers_();
return *this;
}
TraceFitter::~TraceFitter() = default;
double TraceFitter::computeTheoretical(const FeatureFinderAlgorithmPickedHelperStructs::MassTrace& trace, Size k) const
{
double rt = trace.peaks[k].first;
return trace.theoretical_int * getValue(rt);
}
void TraceFitter::updateMembers_()
{
max_iterations_ = this->param_.getValue("max_iteration");
weighted_ = this->param_.getValue("weighted") == "true";
}
void TraceFitter::optimize_(std::vector<double>& x_init, GenericFunctor& functor)
{
//TODO: this function is copy&paste from LevMarqFitter1d.h. Make a generic wrapper for
//LM optimization
int data_count = functor.values();
int num_params = functor.inputs();
// LM always expects N>=p, because the Jacobian is rectangular M x N with M>=N
if (data_count < num_params)
{
throw Exception::UnableToFit(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "UnableToFit-FinalSet", "Skipping feature, we always expect N>=p");
}
// Create Eigen vector and copy data from std::vector
Eigen::VectorXd x_eigen = Eigen::Map<Eigen::VectorXd>(x_init.data(), x_init.size());
// Create adapter to wrap our functor for Eigen's LM solver
GenericFunctorEigenAdapter adapter(functor);
Eigen::LevenbergMarquardt<GenericFunctorEigenAdapter> lmSolver(adapter);
lmSolver.parameters.maxfev = max_iterations_;
Eigen::LevenbergMarquardtSpace::Status status = lmSolver.minimize(x_eigen);
//the states are poorly documented. after checking the source, we believe that
//all states except NotStarted, Running and ImproperInputParameters are good
//termination states.
if (status <= Eigen::LevenbergMarquardtSpace::ImproperInputParameters)
{
throw Exception::UnableToFit(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "UnableToFit-FinalSet", "Could not fit the gaussian to the data: Error " + String(status));
}
// Copy results back to x_init
std::copy(x_eigen.data(), x_eigen.data() + x_eigen.size(), x_init.begin());
getOptimizedParameters_(x_init);
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/Biosaur2Algorithm.cpp | .cpp | 105,759 | 3,259 | // Copyright (c) 2002-present, OpenMS Inc.
// SPDX-License-Identifier: BSD-3-Clause
#include <OpenMS/FEATUREFINDER/Biosaur2Algorithm.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/IsotopeDistribution.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/CONCEPT/Exception.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/DATASTRUCTURES/ConvexHull2D.h>
#include <OpenMS/DATASTRUCTURES/DBoundingBox.h>
#include <OpenMS/IONMOBILITY/FAIMSHelper.h>
#include <OpenMS/IONMOBILITY/IMDataConverter.h>
#include <OpenMS/IONMOBILITY/IMTypes.h>
#include <OpenMS/KERNEL/Feature.h>
#include <OpenMS/KERNEL/SpectrumHelper.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <OpenMS/PROCESSING/CENTROIDING/PeakPickerHiRes.h>
#include <OpenMS/PROCESSING/FEATURE/FeatureOverlapFilter.h>
#include <OpenMS/MATH/MathFunctions.h>
#include <OpenMS/SYSTEM/StopWatch.h>
#include <algorithm>
#include <cmath>
#include <fstream>
#include <map>
#include <numeric>
#include <set>
#include <limits>
using namespace std;
namespace OpenMS
{
Biosaur2Algorithm::Biosaur2Algorithm() :
DefaultParamHandler("Biosaur2Algorithm")
{
defaults_.setValue("mini", 1.0, "Minimum intensity threshold");
defaults_.setMinFloat("mini", 0.0);
defaults_.setValue("minmz", 350.0, "Minimum m/z value");
defaults_.setMinFloat("minmz", 0.0);
defaults_.setValue("maxmz", 1500.0, "Maximum m/z value");
defaults_.setMinFloat("maxmz", 0.0);
defaults_.setValue("htol", 8.0, "Mass accuracy in ppm for combining peaks into hills");
defaults_.setMinFloat("htol", 0.0);
defaults_.setValue("itol", 8.0, "Mass accuracy in ppm for isotopic patterns");
defaults_.setMinFloat("itol", 0.0);
defaults_.setValue("hvf", 1.3, "Hill valley factor for splitting hills");
defaults_.setMinFloat("hvf", 1.0);
defaults_.setValue("ivf", 5.0, "Isotope valley factor for splitting isotope patterns");
defaults_.setMinFloat("ivf", 1.0);
defaults_.setValue("minlh", 2, "Minimum number of scans for a hill");
defaults_.setMinInt("minlh", 1);
defaults_.setValue("pasefmini", 100.0, "Minimum combined intensity for PASEF/TIMS clusters after m/z–ion-mobility centroiding.");
defaults_.setMinFloat("pasefmini", 0.0);
defaults_.setValue("pasefminlh", 1, "Minimum number of raw points per PASEF/TIMS cluster during centroiding.");
defaults_.setMinInt("pasefminlh", 1);
defaults_.setValue("cmin", 1, "Minimum charge state");
defaults_.setMinInt("cmin", 1);
defaults_.setValue("cmax", 6, "Maximum charge state");
defaults_.setMinInt("cmax", 1);
defaults_.setValue("iuse", 0, "Number of isotopes for intensity calculation (0=mono only, -1=all, 1=mono+first, etc.)");
defaults_.setMinInt("iuse", -1);
defaults_.setValue("nm", "false", "Negative mode (affects neutral mass calculation)");
defaults_.setValidStrings("nm", {"true", "false"});
defaults_.setValue("tof", "false", "Enable TOF-specific intensity filtering");
defaults_.setValidStrings("tof", {"true", "false"});
defaults_.setValue("profile", "false", "Enable profile mode processing (centroid spectra using PeakPickerHiRes)");
defaults_.setValidStrings("profile", {"true", "false"});
defaults_.setValue("paseftol", 0.05, "Ion mobility accuracy (in the same units as the ion-mobility array) for linking peaks into hills and grouping isotopes (0 = disable IM-based gating).");
defaults_.setMinFloat("paseftol", 0.0);
defaults_.setValue("use_hill_calib", "false", "Enable automatic hill mass tolerance calibration");
defaults_.setValidStrings("use_hill_calib", {"true", "false"});
defaults_.setValue("ignore_iso_calib", "false", "Disable automatic isotope mass error calibration");
defaults_.setValidStrings("ignore_iso_calib", {"true", "false"});
defaults_.setValue("hrttol", 10.0, "Maximum allowed RT difference (in seconds) between monoisotopic hill apex and isotope hill apex when assembling isotope patterns (0 disables RT gating).");
defaults_.setMinFloat("hrttol", 0.0);
defaults_.setValue("convex_hulls", "bounding_box",
"Representation of feature convex hulls in the output FeatureMap. "
"'bounding_box' stores a single RT–m/z bounding box per feature "
"(smaller featureXML, no per-trace detail), "
"whereas 'mass_traces' stores one convex hull per contributing hill using all mass-trace points "
"(larger featureXML, preserves detailed trace shape).");
defaults_.setValidStrings("convex_hulls", {"mass_traces", "bounding_box"});
defaults_.setValue("faims_merge_features", "true",
"For FAIMS data with multiple compensation voltages: Merge features representing the same analyte "
"detected at different CV values into a single feature. Only features with DIFFERENT FAIMS CV values "
"are merged (same CV = different analytes). Has no effect on non-FAIMS data.");
defaults_.setValidStrings("faims_merge_features", {"true", "false"});
defaultsToParam_();
updateMembers_();
}
void Biosaur2Algorithm::updateMembers_()
{
mini_ = param_.getValue("mini");
minmz_ = param_.getValue("minmz");
maxmz_ = param_.getValue("maxmz");
htol_ = param_.getValue("htol");
itol_ = param_.getValue("itol");
hvf_ = param_.getValue("hvf");
ivf_ = param_.getValue("ivf");
minlh_ = static_cast<Size>(param_.getValue("minlh"));
cmin_ = param_.getValue("cmin");
cmax_ = param_.getValue("cmax");
pasefmini_ = param_.getValue("pasefmini");
pasefminlh_ = static_cast<Size>(param_.getValue("pasefminlh"));
iuse_ = param_.getValue("iuse");
negative_mode_ = param_.getValue("nm").toBool();
tof_mode_ = param_.getValue("tof").toBool();
profile_mode_ = param_.getValue("profile").toBool();
use_hill_calib_ = param_.getValue("use_hill_calib").toBool();
ignore_iso_calib_ = param_.getValue("ignore_iso_calib").toBool();
paseftol_ = param_.getValue("paseftol");
hrttol_ = param_.getValue("hrttol");
convex_hull_mode_ = param_.getValue("convex_hulls").toString();
faims_merge_features_ = param_.getValue("faims_merge_features").toBool();
OPENMS_LOG_DEBUG << "Biosaur2Algorithm parameters after updateMembers_: "
<< "mini=" << mini_
<< ", minmz=" << minmz_
<< ", maxmz=" << maxmz_
<< ", htol=" << htol_
<< ", itol=" << itol_
<< ", hvf=" << hvf_
<< ", ivf=" << ivf_
<< ", minlh=" << minlh_
<< ", cmin=" << cmin_
<< ", cmax=" << cmax_
<< ", pasefmini=" << pasefmini_
<< ", pasefminlh=" << pasefminlh_
<< ", iuse=" << iuse_
<< ", negative_mode=" << negative_mode_
<< ", tof_mode=" << tof_mode_
<< ", profile_mode=" << profile_mode_
<< ", use_hill_calib=" << use_hill_calib_
<< ", ignore_iso_calib=" << ignore_iso_calib_
<< ", paseftol=" << paseftol_
<< ", hrttol=" << hrttol_
<< ", convex_hulls=" << convex_hull_mode_
<< ", faims_merge_features=" << faims_merge_features_
<< endl;
}
void Biosaur2Algorithm::setMSData(const MSExperiment& ms_data)
{
ms_data_ = ms_data;
}
void Biosaur2Algorithm::setMSData(MSExperiment&& ms_data)
{
ms_data_ = std::move(ms_data);
}
MSExperiment& Biosaur2Algorithm::getMSData()
{
return ms_data_;
}
const MSExperiment& Biosaur2Algorithm::getMSData() const
{
return ms_data_;
}
void Biosaur2Algorithm::run(FeatureMap& feature_map)
{
vector<Hill> tmp_hills;
vector<PeptideFeature> tmp_features;
run(feature_map, tmp_hills, tmp_features);
}
void Biosaur2Algorithm::run(FeatureMap& feature_map,
vector<Hill>& hills,
vector<PeptideFeature>& peptide_features)
{
feature_map.clear(true);
hills.clear();
peptide_features.clear();
// Filter to keep only MS1 spectra
ms_data_.getSpectra().erase(
remove_if(ms_data_.begin(), ms_data_.end(),
[](const MSSpectrum& s) { return s.getMSLevel() != 1; }),
ms_data_.end());
if (profile_mode_)
{
centroidProfileSpectra_(ms_data_);
}
if (tof_mode_)
{
processTOF_(ms_data_);
}
OPENMS_LOG_INFO << "Loaded " << ms_data_.size() << " MS1 spectra" << endl;
if (ms_data_.empty())
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "No MS1 spectra found in input experiment!", "");
}
// Build FAIMS-aware processing groups (one per CV or a single non-FAIMS group).
std::vector<std::pair<double, MSExperiment>> groups =
IMDataConverter::splitByFAIMSCV(std::move(ms_data_));
const Size n_groups = groups.size();
vector<vector<Hill>> hills_per_group(n_groups);
vector<vector<PeptideFeature>> features_per_group(n_groups);
vector<FeatureMap> fmap_per_group(n_groups);
const double original_paseftol = paseftol_;
// Parallelize processing across FAIMS groups. Each group is handled
// independently using its own local hills/features containers.
#pragma omp parallel for schedule(dynamic)
for (int i = 0; i < static_cast<int>(n_groups); ++i)
{
auto& group_pair = groups[static_cast<Size>(i)];
double cv = group_pair.first;
MSExperiment& group_exp = group_pair.second;
vector<Hill> local_hills;
vector<PeptideFeature> local_features;
processFAIMSGroup_(cv, group_exp, original_paseftol, local_hills, local_features);
FeatureMap local_map;
if (!local_hills.empty() && !local_features.empty())
{
local_map = convertToFeatureMap_(local_features, local_hills);
}
hills_per_group[static_cast<Size>(i)] = std::move(local_hills);
features_per_group[static_cast<Size>(i)] = std::move(local_features);
fmap_per_group[static_cast<Size>(i)] = std::move(local_map);
}
// Restore original paseftol_ value for subsequent calls.
paseftol_ = original_paseftol;
vector<Hill> all_hills;
vector<PeptideFeature> all_features;
FeatureMap combined_feature_map;
for (Size i = 0; i < n_groups; ++i)
{
all_hills.insert(all_hills.end(),
hills_per_group[i].begin(), hills_per_group[i].end());
all_features.insert(all_features.end(),
features_per_group[i].begin(), features_per_group[i].end());
FeatureMap& gm = fmap_per_group[i];
if (!gm.empty())
{
combined_feature_map.insert(combined_feature_map.end(), gm.begin(), gm.end());
}
}
// Check if we have FAIMS data (multiple CV groups)
const bool has_faims = n_groups > 1 || !std::isnan(groups[0].first);
// Optionally merge features representing the same analyte at different FAIMS CV values
if (has_faims && faims_merge_features_)
{
Size before_merge = combined_feature_map.size();
FeatureOverlapFilter::mergeFAIMSFeatures(combined_feature_map, 5.0, 0.05);
if (combined_feature_map.size() < before_merge)
{
OPENMS_LOG_INFO << "FAIMS feature merge: " << before_merge << " -> " << combined_feature_map.size()
<< " features (merged " << (before_merge - combined_feature_map.size()) << ")" << endl;
}
}
hills = std::move(all_hills);
peptide_features = std::move(all_features);
feature_map = std::move(combined_feature_map);
feature_map.applyMemberFunction(&UniqueIdInterface::ensureUniqueId);
feature_map.ensureUniqueId();
feature_map.getProteinIdentifications().resize(1);
}
double Biosaur2Algorithm::calculatePPM_(double mz1, double mz2) const
{
if (fabs(mz2) < 1e-10) return 0.0;
return Math::getPPM(mz1, mz2);
}
double Biosaur2Algorithm::calculateMedian_(const vector<double>& values) const
{
if (values.empty()) return 0.0;
vector<double> sorted = values;
return Math::median(sorted.begin(), sorted.end(), false);
}
bool Biosaur2Algorithm::shouldThrowForMissingIM_(const MSSpectrum& spectrum) const
{
// The Python reference implementation degrades gracefully when ion-mobility
// arrays are missing by simply disabling IM-based gating. To mirror this
// behavior, we never treat missing per-peak IM arrays as a hard error here.
(void)spectrum;
return false;
}
double Biosaur2Algorithm::cosineCorrelation1D_(const vector<double>& v1,
const vector<double>& v2) const
{
Size n = min(v1.size(), v2.size());
if (n == 0) return 0.0;
double dot = 0.0;
double n1 = 0.0;
double n2 = 0.0;
#pragma omp simd reduction(+:dot,n1,n2)
for (Size i = 0; i < n; ++i)
{
dot += v1[i] * v2[i];
n1 += v1[i] * v1[i];
n2 += v2[i] * v2[i];
}
if (n1 == 0.0 || n2 == 0.0) return 0.0;
return dot / (sqrt(n1) * sqrt(n2));
}
pair<double, Size> Biosaur2Algorithm::checkingCosCorrelationForCarbon_(
const vector<double>& theor_full,
const vector<double>& exp_full,
double thresh) const
{
if (exp_full.size() <= 1 || theor_full.empty())
{
return make_pair(0.0, Size(1));
}
double theor_total_sum = accumulate(theor_full.begin(), theor_full.end(), 0.0);
if (theor_total_sum <= 0.0)
{
return make_pair(0.0, Size(1));
}
double best_cor = 0.0;
Size best_pos = 1;
int pos = static_cast<int>(exp_full.size());
while (pos != 1)
{
Size suit_len = min(static_cast<Size>(pos), theor_full.size());
vector<double> theor(theor_full.begin(), theor_full.begin() + suit_len);
vector<double> exp(exp_full.begin(), exp_full.begin() + pos);
double theor_partial_sum = accumulate(theor.begin(), theor.end(), 0.0);
double averagine_explained =
(theor_total_sum > 0.0) ? (theor_partial_sum / theor_total_sum) : 0.0;
double cor = cosineCorrelation1D_(theor, exp);
if (averagine_explained >= 0.5 && cor >= thresh)
{
if (cor > best_cor)
{
best_cor = cor;
best_pos = static_cast<Size>(pos);
}
break;
}
--pos;
}
return make_pair(best_cor, best_pos);
}
vector<double> Biosaur2Algorithm::meanFilter_(const vector<double>& data, Size window) const
{
vector<double> result(data.size());
if (data.empty()) return result;
// Treat 'window' as the half-width of the kernel, i.e.
// kernel length = 2 * window + 1, mirroring the Python meanfilt
// implementation with NumPy's 'same' convolution (zero padding).
Size half_window = window;
Size kernel_len = 2 * half_window + 1;
if (kernel_len == 0) return result;
for (Size i = 0; i < data.size(); ++i)
{
double sum = 0.0;
// Convolution with implicit zeros outside [0, data.size()).
for (Size k = 0; k < kernel_len; ++k)
{
// Corresponding index in the input signal
// centered at position i.
long j = static_cast<long>(i) + static_cast<long>(k) - static_cast<long>(half_window);
if (j >= 0 && static_cast<Size>(j) < data.size())
{
sum += data[static_cast<Size>(j)];
}
}
result[i] = sum / static_cast<double>(kernel_len);
}
return result;
}
pair<double, double> Biosaur2Algorithm::calibrateMass_(const vector<double>& mass_errors, double bin_width) const
{
if (mass_errors.empty())
{
return make_pair(0.0, 10.0);
}
double min_error = *min_element(mass_errors.begin(), mass_errors.end());
double max_error = *max_element(mass_errors.begin(), mass_errors.end());
double mass_left = -min_error;
double mass_right = max_error;
int n_bins = static_cast<int>((mass_left + mass_right) / bin_width);
if (n_bins < 5)
{
return make_pair(0.0, 10.0);
}
vector<double> bin_centers;
vector<int> bin_counts(n_bins, 0);
for (int i = 0; i < n_bins; ++i)
{
bin_centers.push_back(-mass_left + (i + 0.5) * bin_width);
}
for (double error : mass_errors)
{
int bin = static_cast<int>((error + mass_left) / bin_width);
if (bin >= 0 && bin < n_bins)
{
bin_counts[bin]++;
}
}
double sum_x = 0.0, sum_x2 = 0.0, sum_w = 0.0;
for (size_t i = 0; i < bin_centers.size(); ++i)
{
double x = bin_centers[i];
double w = bin_counts[i];
sum_x += w * x;
sum_x2 += w * x * x;
sum_w += w;
}
if (sum_w < 10)
{
return make_pair(0.0, 10.0);
}
double mean = sum_x / sum_w;
double variance = (sum_x2 / sum_w) - (mean * mean);
double sigma = sqrt(max(variance, 0.01));
if (fabs(mean) >= max(mass_left, mass_right))
{
return calibrateMass_(mass_errors, 0.25);
}
if (isinf(sigma) || isnan(sigma))
{
return make_pair(0.0, 10.0);
}
return make_pair(mean, sigma);
}
pair<vector<double>, Size> Biosaur2Algorithm::computeAveragine_(double neutral_mass,
double apex_intensity) const
{
// Averagine-based theoretical isotope intensities (C-only binomial model),
// using the same neutral-mass binning (100 Da bins) and binomial
// parameters as the reference Cython/SciPy implementation.
constexpr int averagine_mass_bin_step = 100;
constexpr int averagine_max_mass_bin_index = 199; // 0..199 => 0..19900 Da
struct AveragineData
{
vector<vector<double>> table;
vector<Size> max_pos;
};
// Thread-safe (since C++11) initialization of the averagine lookup data:
static const AveragineData data = []()
{
AveragineData d;
const double averagine_mass = 111.1254;
const double averagine_C = 4.9384;
const double p = 0.0107;
d.table.assign(averagine_max_mass_bin_index + 1, vector<double>(10, 0.0));
d.max_pos.assign(averagine_max_mass_bin_index + 1, 0);
for (int bin_idx = 0; bin_idx <= averagine_max_mass_bin_index; ++bin_idx)
{
double neutral_mass_bin = static_cast<double>(bin_idx * averagine_mass_bin_step);
int n_C = static_cast<int>(round(neutral_mass_bin / averagine_mass * averagine_C));
n_C = max(n_C, 1);
boost::math::binomial_distribution<double> dist(n_C, p);
double sum_prob = 0.0;
for (Size k = 0; k < 10; ++k)
{
double prob = (static_cast<int>(k) <= n_C) ?
boost::math::pdf(dist, static_cast<unsigned>(k)) : 0.0;
d.table[bin_idx][k] = prob;
sum_prob += prob;
}
if (sum_prob <= 0.0) sum_prob = 1.0;
for (Size k = 0; k < 10; ++k)
{
d.table[bin_idx][k] /= sum_prob;
}
Size max_pos = distance(d.table[bin_idx].begin(),
max_element(d.table[bin_idx].begin(),
d.table[bin_idx].end()));
if (max_pos < 4) max_pos = 4;
d.max_pos[bin_idx] = max_pos;
}
return d;
}();
// Convenience references
const vector<vector<double>>& averagine_table = data.table;
const vector<Size>& averagine_max_pos = data.max_pos;
// Map neutral mass onto the same 100 Da grid as the Python implementation.
int bin_idx = static_cast<int>(floor(neutral_mass / static_cast<double>(averagine_mass_bin_step)));
if (bin_idx < 0)
{
bin_idx = 0;
}
if (bin_idx > averagine_max_mass_bin_index)
{
bin_idx = averagine_max_mass_bin_index;
}
vector<double> theor(10, 0.0);
const vector<double>& probs = averagine_table[bin_idx];
// Scale normalized probabilities to the mono apex intensity,
// preserving the same relative shape as in the Python code.
double mono_prob = probs[0];
if (mono_prob <= 0.0)
{
mono_prob = 1.0;
}
for (Size k = 0; k < 10; ++k)
{
theor[k] = apex_intensity * probs[k] / mono_prob;
}
Size max_pos = averagine_max_pos[bin_idx];
return make_pair(theor, max_pos);
}
double Biosaur2Algorithm::computeHillMzStep_(const MSExperiment& exp,
double htol_ppm,
double min_intensity,
double min_mz,
double max_mz) const
{
double max_mz_value = 0.0;
for (Size scan_idx = 0; scan_idx < exp.size(); ++scan_idx)
{
const MSSpectrum& spectrum = exp[scan_idx];
for (Size peak_idx = 0; peak_idx < spectrum.size(); ++peak_idx)
{
const Peak1D& peak = spectrum[peak_idx];
double mz = peak.getMZ();
double intensity = peak.getIntensity();
if (intensity < min_intensity || mz < min_mz || mz > max_mz)
{
continue;
}
if (mz > max_mz_value)
{
max_mz_value = mz;
}
}
}
if (max_mz_value <= 0.0 || htol_ppm <= 0.0)
{
return 0.0;
}
return htol_ppm * 1e-6 * max_mz_value;
}
void Biosaur2Algorithm::processTOF_(MSExperiment& exp) const
{
OPENMS_LOG_INFO << "Applying TOF-specific intensity filtering..." << endl;
StopWatch stage_timer;
stage_timer.start();
// Report if the input data contains ion-mobility information, similar to
// other stages that branch on IM availability.
bool any_im_array = false;
for (const auto& spec : exp)
{
const IMFormat im_format = IMTypes::determineIMFormat(spec);
if (im_format != IMFormat::NONE)
{
any_im_array = true;
break;
}
}
if (any_im_array)
{
OPENMS_LOG_INFO << "TOF filtering: input data contains ion-mobility arrays." << endl;
}
const double mz_bin_size = 50.0;
map<int, vector<double>> intensity_bins;
// Phase 1: learn initial per-bin thresholds (similar to Python process_tof first pass, using +2 sigma)
Size sample_size = min(Size(25), exp.size());
for (Size i = 0; i < sample_size; ++i)
{
for (Size j = 0; j < exp[i].size(); ++j)
{
double mz = exp[i][j].getMZ();
double intensity = exp[i][j].getIntensity();
// Note: unlike the original C++ code, we do not restrict the training range
// to [minmz_, maxmz_] in order to match the Python implementation more closely.
int bin = static_cast<int>(mz / mz_bin_size);
if (intensity <= 0.0)
{
continue;
}
double log_intensity = log10(intensity);
if (!std::isfinite(log_intensity))
{
continue;
}
intensity_bins[bin].push_back(log_intensity);
}
}
map<int, double> bin_thresholds;
for (auto& bin_pair : intensity_bins)
{
vector<double> finite_intensities;
finite_intensities.reserve(bin_pair.second.size());
for (double value : bin_pair.second)
{
if (std::isfinite(value))
{
finite_intensities.push_back(value);
}
}
if (finite_intensities.size() >= 150)
{
double sum = accumulate(finite_intensities.begin(), finite_intensities.end(), 0.0);
double mean = sum / finite_intensities.size();
double sq_sum = 0.0;
for (double val : finite_intensities)
{
sq_sum += (val - mean) * (val - mean);
}
double std_dev = sqrt(sq_sum / finite_intensities.size());
if (!std::isfinite(mean) || !std::isfinite(std_dev))
{
continue;
}
// Initial thresholds use mean + 2 * std in log space, mirroring the first Python pass
bin_thresholds[bin_pair.first] = pow(10.0, mean + 2.0 * std_dev);
}
}
// Phase 2: optional refinement of thresholds using a stricter +3 sigma criterion,
// conceptually similar to the Python while-cnt<=50 refinement.
Size refine_limit = min(Size(50), exp.size());
for (Size i = 0; i < refine_limit; ++i)
{
// collect log10 intensities per bin for this spectrum
map<int, vector<double>> spectrum_bins;
for (Size j = 0; j < exp[i].size(); ++j)
{
double mz = exp[i][j].getMZ();
double intensity = exp[i][j].getIntensity();
int bin = static_cast<int>(mz / mz_bin_size);
if (intensity <= 0.0)
{
continue;
}
double log_intensity = log10(intensity);
if (!std::isfinite(log_intensity))
{
continue;
}
spectrum_bins[bin].push_back(log_intensity);
}
// For bins with enough points in this spectrum, refine thresholds with mean + 3 * std
for (auto& bin_pair : spectrum_bins)
{
const vector<double>& values = bin_pair.second;
if (values.size() >= 150)
{
double sum = accumulate(values.begin(), values.end(), 0.0);
double mean = sum / values.size();
double sq_sum = 0.0;
for (double val : values)
{
sq_sum += (val - mean) * (val - mean);
}
double std_dev = sqrt(sq_sum / values.size());
if (!std::isfinite(mean) || !std::isfinite(std_dev))
{
continue;
}
bin_thresholds[bin_pair.first] = pow(10.0, mean + 3.0 * std_dev);
}
}
}
Size total_peaks_before = 0;
Size total_peaks_after = 0;
for (auto& spectrum : exp)
{
total_peaks_before += spectrum.size();
// Collect indices of peaks to keep, then use MSSpectrum::select()
// so that peaks and all associated data arrays are filtered
// consistently.
vector<Size> keep_indices;
keep_indices.reserve(spectrum.size());
for (Size i = 0; i < spectrum.size(); ++i)
{
double mz = spectrum[i].getMZ();
double intensity = spectrum[i].getIntensity();
int bin = static_cast<int>(mz / mz_bin_size);
double threshold = 150.0;
auto it = bin_thresholds.find(bin);
if (it != bin_thresholds.end())
{
threshold = it->second;
}
// Use strict '>' comparison (Python keeps intensities > threshold, not >=)
if (intensity > threshold)
{
keep_indices.push_back(i);
}
}
spectrum.select(keep_indices);
total_peaks_after += spectrum.size();
}
stage_timer.stop();
OPENMS_LOG_INFO << "TOF filtering: " << total_peaks_before
<< " peaks -> " << total_peaks_after
<< " peaks in " << stage_timer.toString() << endl;
}
void Biosaur2Algorithm::centroidProfileSpectra_(MSExperiment& exp) const
{
OPENMS_LOG_INFO << "Centroiding profile spectra using PeakPickerHiRes..." << endl;
PeakPickerHiRes picker;
Param picker_param = picker.getParameters();
picker_param.setValue("signal_to_noise", 0.0);
picker.setParameters(picker_param);
MSExperiment centroided_exp;
Size total_peaks_before = 0;
Size total_peaks_after = 0;
for (Size i = 0; i < exp.size(); ++i)
{
total_peaks_before += exp[i].size();
MSSpectrum centroided_spectrum;
if (exp[i].getType() == SpectrumSettings::SpectrumType::CENTROID)
{
centroided_spectrum = exp[i];
}
else
{
picker.pick(exp[i], centroided_spectrum);
centroided_spectrum.setRT(exp[i].getRT());
centroided_spectrum.setMSLevel(exp[i].getMSLevel());
centroided_spectrum.setType(SpectrumSettings::SpectrumType::CENTROID);
if (exp[i].getDriftTime() >= 0)
{
centroided_spectrum.setDriftTime(exp[i].getDriftTime());
}
}
centroided_exp.addSpectrum(centroided_spectrum);
total_peaks_after += centroided_spectrum.size();
}
exp = centroided_exp;
OPENMS_LOG_INFO << "Centroiding: " << total_peaks_before
<< " profile points -> " << total_peaks_after << " centroided peaks" << endl;
}
void Biosaur2Algorithm::centroidPASEFData_(MSExperiment& exp, double mz_step, double pasef_tolerance) const
{
if (mz_step <= 0.0 || pasef_tolerance <= 0.0)
{
return;
}
const double hill_mz_accuracy = htol_;
const double ion_mobility_accuracy = pasef_tolerance;
Size total_peaks_before = 0;
Size total_peaks_after = 0;
auto centroid_one_spectrum = [&](MSSpectrum& spectrum)
{
total_peaks_before += spectrum.size();
// Determine ion-mobility format and use the concatenated IM array if available,
// mirroring the logic in processFAIMSGroup_ and IMTypes::determineIMFormat().
const IMFormat im_format = IMTypes::determineIMFormat(spectrum);
if (im_format != IMFormat::CONCATENATED)
{
// Either no IM data or only per-spectrum drift time; leave spectrum unchanged.
return;
}
const auto& fda = spectrum.getFloatDataArrays();
const auto im_data = spectrum.getIMData();
const Size im_index = im_data.first;
if (im_index >= fda.size())
{
// Inconsistent IM data; leave spectrum unchanged.
return;
}
const auto& im_array = fda[im_index];
if (im_array.size() != spectrum.size())
{
// No usable per-peak ion-mobility data for this spectrum; leave it as-is.
return;
}
const Size n_peaks = spectrum.size();
if (n_peaks == 0)
{
return;
}
vector<double> mz_ar;
vector<double> intensity_ar;
vector<double> im_ar;
mz_ar.reserve(n_peaks);
intensity_ar.reserve(n_peaks);
im_ar.reserve(n_peaks);
for (Size i = 0; i < n_peaks; ++i)
{
double mz = spectrum[i].getMZ();
double intensity = spectrum[i].getIntensity();
if (intensity < mini_ || mz < minmz_ || mz > maxmz_)
{
continue;
}
double im = im_array[i];
mz_ar.push_back(mz);
intensity_ar.push_back(intensity);
im_ar.push_back(im);
}
if (mz_ar.empty())
{
spectrum.clear(false);
return;
}
auto it_max_im = max_element(im_ar.begin(), im_ar.end());
if (it_max_im == im_ar.end() || *it_max_im <= 0.0)
{
return;
}
const double ion_mobility_step = (*it_max_im) * ion_mobility_accuracy;
if (ion_mobility_step <= 0.0)
{
return;
}
const Size n = mz_ar.size();
vector<int> mz_fast(n);
vector<int> im_fast(n);
for (Size i = 0; i < n; ++i)
{
mz_fast[i] = static_cast<int>(mz_ar[i] / mz_step);
im_fast[i] = static_cast<int>(im_ar[i] / ion_mobility_step);
}
// Sort by coarse m/z bin index, mirroring centroid_pasef_scan.
vector<Size> order(n);
iota(order.begin(), order.end(), 0);
sort(order.begin(), order.end(),
[&](Size a, Size b) { return mz_fast[a] < mz_fast[b]; });
vector<double> mz_sorted(n);
vector<double> intensity_sorted(n);
vector<double> im_sorted(n);
vector<int> mz_fast_sorted(n);
vector<int> im_fast_sorted(n);
for (Size pos = 0; pos < n; ++pos)
{
Size idx = order[pos];
mz_sorted[pos] = mz_ar[idx];
intensity_sorted[pos] = intensity_ar[idx];
im_sorted[pos] = im_ar[idx];
mz_fast_sorted[pos] = mz_fast[idx];
im_fast_sorted[pos] = im_fast[idx];
}
vector<bool> banned(n, false);
vector<double> mz_new;
vector<double> intensity_new;
vector<double> im_new;
Size peak_idx = 0;
const Size max_peak_idx = n;
while (peak_idx < max_peak_idx)
{
vector<Size> tmp;
if (!banned[peak_idx])
{
const double mass_accuracy_cur = mz_sorted[peak_idx] * 1e-6 * hill_mz_accuracy;
const int mz_val_int = mz_fast_sorted[peak_idx];
const int im_val_int = im_fast_sorted[peak_idx];
tmp.push_back(peak_idx);
Size peak_idx_2 = peak_idx + 1;
while (peak_idx_2 < max_peak_idx)
{
if (!banned[peak_idx_2])
{
const int mz_val_int_2 = mz_fast_sorted[peak_idx_2];
if (mz_val_int_2 - mz_val_int > 1)
{
break;
}
if (fabs(mz_sorted[peak_idx] - mz_sorted[peak_idx_2]) <= mass_accuracy_cur)
{
const int im_val_int_2 = im_fast_sorted[peak_idx_2];
if (abs(im_val_int - im_val_int_2) <= 1)
{
if (fabs(im_sorted[peak_idx] - im_sorted[peak_idx_2]) <= ion_mobility_accuracy)
{
tmp.push_back(peak_idx_2);
peak_idx = peak_idx_2;
}
}
}
}
++peak_idx_2;
}
}
const Size l_new = tmp.size();
if (l_new >= pasefminlh_)
{
if (l_new == 1)
{
const double i_val_new = intensity_sorted[peak_idx];
if (i_val_new >= pasefmini_)
{
mz_new.push_back(mz_sorted[peak_idx]);
intensity_new.push_back(i_val_new);
im_new.push_back(im_sorted[peak_idx]);
banned[peak_idx] = true;
}
}
else
{
double i_val_new = 0.0;
vector<double> all_mz;
vector<double> all_im;
all_mz.reserve(l_new);
all_im.reserve(l_new);
for (Size idx : tmp)
{
const double intensity = intensity_sorted[idx];
i_val_new += intensity;
all_mz.push_back(mz_sorted[idx]);
all_im.push_back(im_sorted[idx]);
}
if (i_val_new >= pasefmini_)
{
double mz_weighted_sum = 0.0;
double im_weighted_sum = 0.0;
for (Size k = 0; k < l_new; ++k)
{
mz_weighted_sum += all_mz[k] * intensity_sorted[tmp[k]];
im_weighted_sum += all_im[k] * intensity_sorted[tmp[k]];
}
const double mz_val_new = mz_weighted_sum / i_val_new;
const double im_val_new = im_weighted_sum / i_val_new;
mz_new.push_back(mz_val_new);
intensity_new.push_back(i_val_new);
im_new.push_back(im_val_new);
for (Size idx : tmp)
{
banned[idx] = true;
}
}
}
}
++peak_idx;
}
// Replace spectrum peaks and ion-mobility array with centroided values.
spectrum.clear(false);
for (Size i = 0; i < mz_new.size(); ++i)
{
Peak1D peak;
peak.setMZ(mz_new[i]);
peak.setIntensity(intensity_new[i]);
spectrum.push_back(peak);
}
MSSpectrum::FloatDataArrays new_fda;
MSSpectrum::FloatDataArray im_array_out;
im_array_out.setName(Constants::UserParam::ION_MOBILITY);
im_array_out.assign(im_new.begin(), im_new.end());
new_fda.push_back(im_array_out);
spectrum.setFloatDataArrays(new_fda);
total_peaks_after += spectrum.size();
};
for (auto& spectrum : exp)
{
if (spectrum.getMSLevel() != 1)
{
continue;
}
centroid_one_spectrum(spectrum);
}
OPENMS_LOG_INFO << "PASEF centroiding: " << total_peaks_before
<< " peaks -> " << total_peaks_after << " centroided clusters" << endl;
}
void Biosaur2Algorithm::processFAIMSGroup_(double faims_cv,
MSExperiment& group_exp,
double original_paseftol,
vector<Hill>& hills_out,
vector<PeptideFeature>& features_out)
{
if (group_exp.empty())
{
return;
}
// Determine ion-mobility array availability for this group using IMTypes::determineIMFormat.
bool any_im_array = false;
bool any_missing_im = false;
Size im_spectra_with_data = 0;
for (const auto& spec : group_exp)
{
const auto im_format = IMTypes::determineIMFormat(spec);
if (im_format != IMFormat::NONE)
{
any_im_array = true;
++im_spectra_with_data;
}
else
{
any_missing_im = true;
}
}
if (any_im_array)
{
OPENMS_LOG_INFO << "Group (FAIMS CV=" << faims_cv << ") has "
<< im_spectra_with_data << " / " << group_exp.size()
<< " spectra with ion-mobility data." << endl;
}
// Compute mz_step for PASEF centroiding, mirroring Python's use of
// hill mass accuracy and the maximum m/z in the group.
double mz_step = 0.0;
double max_mz_value = 0.0;
for (const auto& spectrum : group_exp)
{
for (Size peak_idx = 0; peak_idx < spectrum.size(); ++peak_idx)
{
const Peak1D& peak = spectrum[peak_idx];
double mz = peak.getMZ();
double intensity = peak.getIntensity();
if (intensity < mini_ || mz < minmz_ || mz > maxmz_)
{
continue;
}
if (mz > max_mz_value)
{
max_mz_value = mz;
}
}
}
if (max_mz_value > 0.0 && htol_ > 0.0)
{
mz_step = htol_ * 1e-6 * max_mz_value;
}
// Decide whether to use IM-based centroiding and gating for this group.
const bool use_im_group = (any_im_array && !any_missing_im && original_paseftol > 0.0);
if (use_im_group && mz_step > 0.0)
{
OPENMS_LOG_INFO << "Applying PASEF/TIMS centroiding for group (FAIMS CV="
<< faims_cv << ") with paseftol=" << original_paseftol << endl;
StopWatch stage_timer;
stage_timer.start();
centroidPASEFData_(group_exp, mz_step, original_paseftol);
stage_timer.stop();
OPENMS_LOG_INFO << "PASEF centroiding for group (FAIMS CV=" << faims_cv
<< ") took " << stage_timer.toString() << endl;
}
else
{
// Mirror Python behavior: if ion mobility is not consistently available
// (or absent entirely), disable IM-based gating.
if (original_paseftol > 0.0 && any_im_array && any_missing_im)
{
OPENMS_LOG_WARN << "Disabling ion-mobility gating for group (FAIMS CV="
<< faims_cv
<< ") due to missing/partial IM arrays; proceeding in 1D m/z space."
<< endl;
}
}
// Hill mass calibration per group, analogous to Python's per-FAIMS pass.
double calibrated_htol = htol_;
if (use_hill_calib_)
{
OPENMS_LOG_INFO << "Performing hill mass tolerance calibration for group (FAIMS CV="
<< faims_cv << ")..." << endl;
StopWatch stage_timer;
stage_timer.start();
vector<double> mass_diffs;
Size sample_size = min(group_exp.size(), Size(1000));
Size start_idx = (group_exp.size() > 1000) ? (group_exp.size() / 2 - 500) : 0;
MSExperiment calib_exp;
for (Size i = start_idx; i < start_idx + sample_size && i < group_exp.size(); ++i)
{
calib_exp.addSpectrum(group_exp[i]);
}
vector<Hill> calib_hills = detectHills_(calib_exp, htol_, mini_, minmz_, maxmz_, /*use_im*/ false, &mass_diffs);
(void)calib_hills;
if (!mass_diffs.empty())
{
auto calib = calibrateMass_(mass_diffs);
double calibrated_sigma = calib.second;
calibrated_htol = min(htol_, 5.0 * calibrated_sigma);
OPENMS_LOG_INFO << "Automatically optimized htol parameter for group (FAIMS CV="
<< faims_cv << "): " << calibrated_htol
<< " ppm (was " << htol_ << " ppm)" << endl;
}
stage_timer.stop();
OPENMS_LOG_INFO << "Hill calibration for group (FAIMS CV=" << faims_cv
<< ") took " << stage_timer.toString() << endl;
}
// Hill detection and processing for this group.
StopWatch stage_timer;
stage_timer.start();
vector<Hill> group_hills = detectHills_(group_exp, calibrated_htol, mini_, minmz_, maxmz_, use_im_group);
stage_timer.stop();
OPENMS_LOG_INFO << "Hill detection for group (FAIMS CV=" << faims_cv
<< ") found " << group_hills.size()
<< " hills in " << stage_timer.toString() << endl;
stage_timer.reset();
stage_timer.start();
group_hills = processHills_(group_hills, minlh_);
stage_timer.stop();
OPENMS_LOG_INFO << "Hill preprocessing for group (FAIMS CV=" << faims_cv
<< ") kept " << group_hills.size()
<< " hills in " << stage_timer.toString() << endl;
stage_timer.reset();
stage_timer.start();
group_hills = splitHills_(group_hills, hvf_, minlh_);
stage_timer.stop();
OPENMS_LOG_INFO << "Hill splitting for group (FAIMS CV=" << faims_cv
<< ") produced " << group_hills.size()
<< " hills in " << stage_timer.toString() << endl;
bool enable_isotope_calib = !ignore_iso_calib_;
stage_timer.reset();
stage_timer.start();
vector<PeptideFeature> group_features =
detectIsotopePatterns_(group_hills, itol_, cmin_, cmax_, negative_mode_, ivf_, iuse_, enable_isotope_calib, use_im_group);
stage_timer.stop();
// For non-FAIMS data (no compensation voltages detected), clear the
// per-feature drift time so downstream exports do not annotate
// features with a FAIMS CV meta value.
if (std::isnan(faims_cv))
{
for (auto& pf : group_features)
{
pf.drift_time = IMTypes::DRIFTTIME_NOT_SET;
}
}
OPENMS_LOG_INFO << "Isotope pattern detection for group (FAIMS CV=" << faims_cv
<< ") produced " << group_features.size()
<< " features in " << stage_timer.toString() << endl;
hills_out = std::move(group_hills);
features_out = std::move(group_features);
}
void Biosaur2Algorithm::linkScanToHills_(const MSSpectrum& spectrum,
Size scan_idx,
double htol_ppm,
double min_intensity,
double min_mz,
double max_mz,
double mz_step,
bool use_im_global,
vector<Hill>& hills,
Size& hill_idx_counter,
vector<Size>& prev_peak_to_hill,
const MSSpectrum*& prev_spectrum_ptr,
map<int, vector<int>>& prev_fast_dict,
vector<int>& prev_im_bins,
vector<double>* hill_mass_diffs) const
{
double rt = spectrum.getRT();
// Collect indices of peaks passing basic filters.
vector<int> valid_indices;
valid_indices.reserve(spectrum.size());
for (Size peak_idx = 0; peak_idx < spectrum.size(); ++peak_idx)
{
const Peak1D& peak = spectrum[peak_idx];
double mz = peak.getMZ();
double intensity = peak.getIntensity();
if (intensity < min_intensity || mz < min_mz || mz > max_mz)
{
continue;
}
valid_indices.push_back(static_cast<int>(peak_idx));
}
const Size len_mz = valid_indices.size();
if (len_mz == 0)
{
// Reset linking if the current scan has no usable peaks.
prev_fast_dict.clear();
prev_peak_to_hill.clear();
prev_spectrum_ptr = nullptr;
return;
}
// Build intensity, m/z and optional ion-mobility-bin arrays for valid peaks.
vector<double> intensities(len_mz);
vector<double> mzs(len_mz);
vector<int> im_bin_per_peak(spectrum.size(), 0);
// Use the standardized IM format/helper logic: only build per-peak IM bins
// when we have concatenated ion-mobility data.
const IMFormat im_format = IMTypes::determineIMFormat(spectrum);
const bool use_im_current = use_im_global && (im_format == IMFormat::CONCATENATED);
const MSSpectrum::FloatDataArray* im_array_ptr = nullptr;
if (use_im_current)
{
// getIMData() may throw if IM metadata is inconsistent; let that propagate.
auto [im_index, im_unit] = spectrum.getIMData();
const auto& fda = spectrum.getFloatDataArrays();
if (im_index >= fda.size())
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Ion mobility array index out of range.",
String(im_index));
}
im_array_ptr = &fda[im_index];
}
for (Size i = 0; i < len_mz; ++i)
{
const Size peak_idx = static_cast<Size>(valid_indices[i]);
intensities[i] = spectrum[peak_idx].getIntensity();
mzs[i] = spectrum[peak_idx].getMZ();
if (use_im_current)
{
if (peak_idx >= im_array_ptr->size())
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Ion mobility array shorter than peak list.",
String(peak_idx));
}
const double im = (*im_array_ptr)[peak_idx];
int im_bin = (paseftol_ > 0.0) ? static_cast<int>(im / paseftol_) : 0;
im_bin_per_peak[peak_idx] = im_bin;
}
}
// Sort peaks in descending intensity (reference implementation behavior).
vector<int> order(len_mz);
iota(order.begin(), order.end(), 0);
sort(order.begin(), order.end(),
[&](int a, int b) { return intensities[a] > intensities[b]; });
vector<int> basic_id_sorted(len_mz);
vector<int> fast_array(len_mz);
for (Size pos = 0; pos < len_mz; ++pos)
{
const int local_idx = order[pos];
const int orig_idx = valid_indices[static_cast<Size>(local_idx)];
basic_id_sorted[pos] = orig_idx;
int fm = 0;
if (mz_step > 0.0)
{
fm = static_cast<int>(mzs[static_cast<Size>(local_idx)] / mz_step);
}
fast_array[pos] = fm;
}
// Build fast lookup structure in m/z space for the current scan.
map<int, vector<int>> fast_dict;
for (Size pos = 0; pos < len_mz; ++pos)
{
fast_dict[fast_array[pos]].push_back(basic_id_sorted[pos]);
}
// Hill assignments for peaks in the current scan (indexed by peak index).
vector<Size> curr_peak_to_hill(spectrum.size(),
numeric_limits<Size>::max());
set<int> banned_prev_idx_set;
auto append_peak_to_hill = [&](Size hill_id, int peak_idx)
{
Hill& hill = hills[hill_id];
const Size p_idx = static_cast<Size>(peak_idx);
hill.scan_indices.push_back(scan_idx);
hill.peak_indices.push_back(p_idx);
const Peak1D& peak = spectrum[p_idx];
const double mz = peak.getMZ();
const double intensity = peak.getIntensity();
hill.mz_values.push_back(mz);
hill.intensities.push_back(intensity);
hill.rt_values.push_back(rt);
const double drift_time = spectrum.getDriftTime();
hill.drift_times.push_back(drift_time);
double ion_mobility = -1.0;
const IMFormat im_format_local = IMTypes::determineIMFormat(spectrum);
if (im_format_local == IMFormat::CONCATENATED)
{
auto [im_index_local, im_unit_local] = spectrum.getIMData(); // may throw
const auto& fda_local = spectrum.getFloatDataArrays();
if (im_index_local >= fda_local.size())
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Ion mobility array index out of range.",
String(im_index_local));
}
const auto& im_array_local = fda_local[im_index_local];
if (p_idx >= im_array_local.size())
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Ion mobility array shorter than peak list.",
String(p_idx));
}
ion_mobility = im_array_local[p_idx];
}
else if (drift_time >= 0)
{
if (shouldThrowForMissingIM_(spectrum))
{
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Ion mobility array missing although drift times are present.");
}
}
hill.ion_mobilities.push_back(ion_mobility);
hill.length = hill.scan_indices.size();
hill.rt_end = rt;
if (hill.length == 1)
{
hill.rt_start = rt;
hill.rt_apex = rt;
hill.intensity_apex = intensity;
}
else if (intensity > hill.intensity_apex)
{
hill.intensity_apex = intensity;
hill.rt_apex = rt;
}
hill.intensity_sum += intensity;
curr_peak_to_hill[p_idx] = hill_id;
};
for (Size pos = 0; pos < len_mz; ++pos)
{
const int idx = basic_id_sorted[pos];
const int fm = fast_array[pos];
const int fi = use_im_current ? im_bin_per_peak[static_cast<Size>(idx)] : 0;
// Collect candidate previous-scan peaks from neighboring m/z bins.
bool flag1 = prev_fast_dict.find(fm) != prev_fast_dict.end();
bool flag2 = prev_fast_dict.find(fm - 1) != prev_fast_dict.end();
bool flag3 = prev_fast_dict.find(fm + 1) != prev_fast_dict.end();
Size assigned_hill = numeric_limits<Size>::max();
if (flag1 || flag2 || flag3)
{
vector<int> all_idx;
if (flag1)
{
const auto& v = prev_fast_dict[fm];
all_idx.insert(all_idx.end(), v.begin(), v.end());
if (flag2)
{
const auto& v2 = prev_fast_dict[fm - 1];
all_idx.insert(all_idx.end(), v2.begin(), v2.end());
}
if (flag3)
{
const auto& v3 = prev_fast_dict[fm + 1];
all_idx.insert(all_idx.end(), v3.begin(), v3.end());
}
}
else if (flag2)
{
const auto& v = prev_fast_dict[fm - 1];
all_idx.insert(all_idx.end(), v.begin(), v.end());
if (flag3)
{
const auto& v3 = prev_fast_dict[fm + 1];
all_idx.insert(all_idx.end(), v3.begin(), v3.end());
}
}
else if (flag3)
{
const auto& v = prev_fast_dict[fm + 1];
all_idx.insert(all_idx.end(), v.begin(), v.end());
}
// Optional ion-mobility gating for previous-scan candidates.
if (use_im_current && !prev_im_bins.empty())
{
vector<int> filtered;
filtered.reserve(all_idx.size());
for (int idx_prev : all_idx)
{
if (idx_prev < 0 || static_cast<Size>(idx_prev) >= prev_im_bins.size())
{
continue;
}
int prev_bin = prev_im_bins[static_cast<Size>(idx_prev)];
if (abs(prev_bin - fi) <= 1)
{
filtered.push_back(idx_prev);
}
}
all_idx.swap(filtered);
}
double best_intensity = 0.0;
int best_idx_prev = -1;
double best_mass_diff_with_sign = 0.0;
const double mz_cur = spectrum[static_cast<Size>(idx)].getMZ();
if (prev_spectrum_ptr != nullptr)
{
const MSSpectrum& prev_spectrum = *prev_spectrum_ptr;
for (int idx_prev : all_idx)
{
if (idx_prev < 0 || static_cast<Size>(idx_prev) >= prev_spectrum.size())
{
continue;
}
if (banned_prev_idx_set.find(idx_prev) != banned_prev_idx_set.end())
{
continue;
}
const Peak1D& prev_peak = prev_spectrum[static_cast<Size>(idx_prev)];
const double cur_intensity = prev_peak.getIntensity();
const double mz_prev = prev_peak.getMZ();
const double cur_mass_diff_with_sign =
(mz_cur - mz_prev) / mz_cur * 1e6;
const double cur_mass_diff = fabs(cur_mass_diff_with_sign);
if (cur_mass_diff <= htol_ppm && cur_intensity >= best_intensity)
{
best_intensity = cur_intensity;
best_idx_prev = idx_prev;
best_mass_diff_with_sign = cur_mass_diff_with_sign;
}
}
}
if (best_idx_prev >= 0)
{
banned_prev_idx_set.insert(best_idx_prev);
if (hill_mass_diffs != nullptr)
{
hill_mass_diffs->push_back(best_mass_diff_with_sign);
}
if (!prev_peak_to_hill.empty() &&
static_cast<Size>(best_idx_prev) < prev_peak_to_hill.size())
{
assigned_hill = prev_peak_to_hill[static_cast<Size>(best_idx_prev)];
}
}
}
// Attach to existing hill if a suitable previous peak was found.
if (assigned_hill != numeric_limits<Size>::max() &&
assigned_hill < hills.size())
{
append_peak_to_hill(assigned_hill, idx);
}
else
{
// Start a new hill for this peak.
Hill hill;
hill.hill_idx = hill_idx_counter++;
hills.push_back(hill);
append_peak_to_hill(hill.hill_idx, idx);
}
}
// Prepare state for linking from this scan to the next.
prev_fast_dict = std::move(fast_dict);
prev_peak_to_hill.assign(spectrum.size(), numeric_limits<Size>::max());
prev_im_bins.assign(spectrum.size(), 0);
for (Size i = 0; i < spectrum.size(); ++i)
{
prev_peak_to_hill[i] = curr_peak_to_hill[i];
prev_im_bins[i] = im_bin_per_peak[i];
}
prev_spectrum_ptr = &spectrum;
}
vector<Biosaur2Algorithm::Hill> Biosaur2Algorithm::detectHills_(const MSExperiment& exp,
double htol_ppm,
double min_intensity,
double min_mz,
double max_mz,
bool use_im,
vector<double>* hill_mass_diffs) const
{
vector<Hill> hills;
Size hill_idx_counter = 0;
// Bin width in m/z space, mirroring the Python reference implementation.
const double mz_step = computeHillMzStep_(exp, htol_ppm, min_intensity, min_mz, max_mz);
if (hill_mass_diffs != nullptr)
{
hill_mass_diffs->clear();
}
// Mapping from previous-scan peak index to hill index and ion-mobility bin.
vector<Size> prev_peak_to_hill;
const MSSpectrum* prev_spectrum_ptr = nullptr;
map<int, vector<int>> prev_fast_dict;
vector<int> prev_im_bins;
const bool use_im_global = use_im;
for (Size scan_idx = 0; scan_idx < exp.size(); ++scan_idx)
{
const MSSpectrum& spectrum = exp[scan_idx];
linkScanToHills_(spectrum,
scan_idx,
htol_ppm,
min_intensity,
min_mz,
max_mz,
mz_step,
use_im_global,
hills,
hill_idx_counter,
prev_peak_to_hill,
prev_spectrum_ptr,
prev_fast_dict,
prev_im_bins,
hill_mass_diffs);
}
return hills;
}
vector<Biosaur2Algorithm::Hill> Biosaur2Algorithm::processHills_(const vector<Hill>& hills, Size min_length) const
{
vector<Hill> processed;
for (const auto& hill : hills)
{
if (hill.length >= min_length)
{
Hill processed_hill = hill;
processed_hill.drift_time_median = calculateMedian_(hill.drift_times);
// Recompute hill center m/z and ion mobility using intensity-weighted
// averages to mirror the reference Biosaur2 implementation.
if (!hill.mz_values.empty() && hill.mz_values.size() == hill.intensities.size())
{
double weighted_mz_sum = 0.0;
double intensity_sum = 0.0;
for (Size i = 0; i < hill.mz_values.size(); ++i)
{
const double intensity = hill.intensities[i];
weighted_mz_sum += hill.mz_values[i] * intensity;
intensity_sum += intensity;
}
if (intensity_sum > 0.0)
{
processed_hill.mz_weighted_mean = weighted_mz_sum / intensity_sum;
}
}
if (!hill.ion_mobilities.empty() && hill.ion_mobilities.size() == hill.intensities.size())
{
double weighted_im_sum = 0.0;
double intensity_sum_im = 0.0;
for (Size i = 0; i < hill.ion_mobilities.size(); ++i)
{
const double intensity = hill.intensities[i];
weighted_im_sum += hill.ion_mobilities[i] * intensity;
intensity_sum_im += intensity;
}
if (intensity_sum_im > 0.0)
{
processed_hill.ion_mobility_median = weighted_im_sum / intensity_sum_im;
}
else
{
processed_hill.ion_mobility_median = calculateMedian_(hill.ion_mobilities);
}
}
else
{
processed_hill.ion_mobility_median = calculateMedian_(hill.ion_mobilities);
}
processed.push_back(processed_hill);
}
}
return processed;
}
vector<Biosaur2Algorithm::Hill> Biosaur2Algorithm::splitHills_(const vector<Hill>& hills, double hvf, Size min_length) const
{
vector<Hill> split_hills;
// Determine the next free hill index so that new split segments
// receive unique IDs, similar to the Python implementation where
// newly created segments get fresh hill indices.
Size next_hill_idx = 0;
for (const auto& h : hills)
{
next_hill_idx = max(next_hill_idx, h.hill_idx + 1);
}
for (const auto& hill : hills)
{
// Only attempt splitting for sufficiently long hills, mirroring the
// reference implementation (length >= 2 * min_length).
if (hill.length < 2 * min_length)
{
split_hills.push_back(hill);
continue;
}
const Size hill_len = hill.length;
vector<double> smoothed = meanFilter_(hill.intensities, 3);
// First pass: identify candidate valley indices (min_idx_list) and
// corresponding recheck positions, closely following split_peaks.
vector<Size> min_idx_list;
vector<Size> recheck_positions;
const int min_len = static_cast<int>(min_length);
const int c_len = static_cast<int>(hill_len) - min_len;
int idx = min_len - 1;
Size l_idx = 0;
double min_val = 0.0;
while (idx <= c_len)
{
if (!min_idx_list.empty() &&
static_cast<Size>(idx) >= min_idx_list.back() + min_length)
{
l_idx = min_idx_list.back();
}
// Left and right maxima around the candidate valley.
double valley_intensity = smoothed[static_cast<Size>(idx)];
if (valley_intensity <= 0.0)
{
++idx;
continue;
}
double left_max = 0.0;
for (Size j = l_idx; j < static_cast<Size>(idx); ++j)
{
left_max = max(left_max, smoothed[j]);
}
if (left_max == 0.0)
{
++idx;
continue;
}
double l_r = left_max / valley_intensity;
if (l_r >= hvf)
{
double right_max = 0.0;
for (Size j = static_cast<Size>(idx) + 1; j < hill_len; ++j)
{
right_max = max(right_max, smoothed[j]);
}
if (right_max > 0.0)
{
double r_r = right_max / valley_intensity;
if (r_r >= hvf)
{
double mult_val = l_r * r_r;
int include_factor = (l_r > r_r) ? 1 : 0;
int candidate_pos_int = idx + include_factor;
if (min_len <= candidate_pos_int && candidate_pos_int <= c_len)
{
Size candidate_pos = static_cast<Size>(candidate_pos_int);
if (min_idx_list.empty() ||
candidate_pos >= min_idx_list.back() + min_length)
{
min_idx_list.push_back(candidate_pos);
recheck_positions.push_back(static_cast<Size>(idx));
min_val = mult_val;
}
else if (mult_val > min_val)
{
min_idx_list.back() = candidate_pos;
recheck_positions.back() = static_cast<Size>(idx);
min_val = mult_val;
}
}
}
}
}
++idx;
}
// Second pass: recheck right-hand maxima for each candidate valley.
vector<Size> final_splits;
if (!min_idx_list.empty())
{
for (Size k = 0; k < min_idx_list.size(); ++k)
{
Size min_idx = min_idx_list[k];
Size recheck_idx = recheck_positions[k];
Size end_idx = (k + 1 < min_idx_list.size()) ? min_idx_list[k + 1]
: hill_len;
if (recheck_idx + 1 >= end_idx || recheck_idx >= hill_len)
{
continue;
}
double recheck_val = smoothed[recheck_idx];
if (recheck_val <= 0.0)
{
continue;
}
double right_max = 0.0;
for (Size j = recheck_idx + 1; j < end_idx; ++j)
{
right_max = max(right_max, smoothed[j]);
}
if (right_max / recheck_val >= hvf)
{
final_splits.push_back(min_idx);
}
}
}
if (final_splits.empty())
{
// No valleys detected: keep original hill unchanged.
split_hills.push_back(hill);
continue;
}
// Construct segments defined by split positions.
vector<Size> boundaries;
boundaries.push_back(0);
for (Size pos : final_splits)
{
boundaries.push_back(pos);
}
boundaries.push_back(hill_len);
for (Size s = 0; s + 1 < boundaries.size(); ++s)
{
Size seg_start = boundaries[s];
Size seg_end = boundaries[s + 1];
if (seg_end <= seg_start) continue;
Size seg_len = seg_end - seg_start;
if (seg_len < min_length) continue;
Hill new_hill = hill;
new_hill.scan_indices.clear();
new_hill.peak_indices.clear();
new_hill.mz_values.clear();
new_hill.intensities.clear();
new_hill.rt_values.clear();
new_hill.drift_times.clear();
new_hill.ion_mobilities.clear();
for (Size k = seg_start; k < seg_end; ++k)
{
new_hill.scan_indices.push_back(hill.scan_indices.at(k));
new_hill.peak_indices.push_back(hill.peak_indices.at(k));
new_hill.mz_values.push_back(hill.mz_values.at(k));
new_hill.intensities.push_back(hill.intensities.at(k));
new_hill.rt_values.push_back(hill.rt_values.at(k));
if (k < hill.drift_times.size())
{
new_hill.drift_times.push_back(hill.drift_times.at(k));
}
if (k < hill.ion_mobilities.size())
{
new_hill.ion_mobilities.push_back(hill.ion_mobilities.at(k));
}
}
if (new_hill.drift_times.size() != new_hill.scan_indices.size() ||
new_hill.ion_mobilities.size() != new_hill.scan_indices.size())
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Split hill meta data arrays are inconsistent.",
String(new_hill.scan_indices.size()));
}
new_hill.length = new_hill.scan_indices.size();
new_hill.rt_start = new_hill.rt_values.front();
new_hill.rt_end = new_hill.rt_values.back();
// Recompute hill center m/z for the split segment using
// an intensity-weighted mean to mirror processHills_ and
// the reference Biosaur2 implementation.
double weighted_mz_sum = 0.0;
double intensity_sum = 0.0;
for (Size idx = 0; idx < new_hill.mz_values.size(); ++idx)
{
const double intensity = new_hill.intensities[idx];
weighted_mz_sum += new_hill.mz_values[idx] * intensity;
intensity_sum += intensity;
}
if (intensity_sum > 0.0)
{
new_hill.mz_weighted_mean = weighted_mz_sum / intensity_sum;
}
new_hill.intensity_sum = intensity_sum;
auto max_it = max_element(new_hill.intensities.begin(), new_hill.intensities.end());
Size apex_idx = distance(new_hill.intensities.begin(), max_it);
new_hill.intensity_apex = *max_it;
new_hill.rt_apex = new_hill.rt_values[apex_idx];
// Assign hill indices: keep the original ID for the first
// segment and assign fresh IDs to subsequent segments,
// mirroring the Python split_peaks behavior.
if (s == 0)
{
new_hill.hill_idx = hill.hill_idx;
}
else
{
new_hill.hill_idx = next_hill_idx++;
}
split_hills.push_back(new_hill);
}
}
return split_hills;
}
Size Biosaur2Algorithm::checkIsotopeValleySplit_(const vector<IsotopeCandidate>& isotopes,
const vector<Hill>& hills,
double ivf) const
{
if (isotopes.size() <= 1)
{
return isotopes.size();
}
vector<double> isotope_intensities;
isotope_intensities.reserve(isotopes.size());
for (const auto& iso : isotopes)
{
const Hill& hill = *find_if(hills.begin(), hills.end(),
[&iso](const Hill& h) { return h.hill_idx == iso.hill_idx; });
isotope_intensities.push_back(hill.intensity_apex);
}
vector<double> smoothed = meanFilter_(isotope_intensities, 3);
Size max_pos = distance(smoothed.begin(), max_element(smoothed.begin(), smoothed.end()));
Size min_check_pos = max(Size(4), max_pos + 1);
for (Size i = min_check_pos; i < smoothed.size() - 1; ++i)
{
double local_min = smoothed[i];
double right_max = *max_element(smoothed.begin() + i + 1, smoothed.end());
if (local_min * ivf < right_max)
{
return i;
}
}
return smoothed.size();
}
map<int, pair<double, double>> Biosaur2Algorithm::performInitialIsotopeCalibration_(const vector<Hill>& hills,
double itol_ppm,
int min_charge,
int max_charge,
bool enable_isotope_calib) const
{
map<int, pair<double, double>> isotope_calib_map;
for (int ic = 1; ic <= 9; ++ic)
{
isotope_calib_map[ic] = make_pair(0.0, itol_ppm);
}
if (!enable_isotope_calib || hills.empty())
{
return isotope_calib_map;
}
OPENMS_LOG_INFO << "Performing isotope calibration..." << endl;
const double ISOTOPE_MASSDIFF = Constants::C13C12_MASSDIFF_U;
map<int, vector<double>> isotope_errors;
for (int ic = 1; ic <= 9; ++ic)
{
isotope_errors[ic] = vector<double>();
}
for (Size i = 0; i < hills.size(); ++i)
{
const Hill& mono_hill = hills[i];
double mono_mz = mono_hill.mz_weighted_mean;
for (int charge = max_charge; charge >= min_charge; --charge)
{
double mz_spacing = ISOTOPE_MASSDIFF / static_cast<double>(charge);
bool found_first = false;
for (int iso_num = 1; iso_num <= 9; ++iso_num)
{
double expected_mz = mono_mz + iso_num * mz_spacing;
double mz_tolerance = expected_mz * itol_ppm * 1e-6;
Size best_j = numeric_limits<Size>::max();
double best_intensity = -1.0;
for (Size j = i + 1; j < hills.size(); ++j)
{
if (hills[j].mz_weighted_mean > expected_mz + mz_tolerance)
{
break;
}
if (hills[j].mz_weighted_mean < expected_mz - mz_tolerance)
{
continue;
}
double diff = fabs(hills[j].mz_weighted_mean - expected_mz);
if (diff <= mz_tolerance)
{
if (hills[j].intensity_apex > best_intensity)
{
best_intensity = hills[j].intensity_apex;
best_j = j;
}
}
}
if (best_j != numeric_limits<Size>::max())
{
if (mono_hill.length >= 3)
{
double mass_diff_ppm = calculatePPM_(hills[best_j].mz_weighted_mean, expected_mz);
isotope_errors[iso_num].push_back(mass_diff_ppm);
if (iso_num == 1)
{
found_first = true;
}
}
}
}
if (found_first)
{
break;
}
}
}
for (int ic = 1; ic <= 3; ++ic)
{
if (isotope_errors[ic].size() >= 1000)
{
auto calib = calibrateMass_(isotope_errors[ic]);
isotope_calib_map[ic] = calib;
OPENMS_LOG_INFO << "Isotope " << ic << " calibration: shift="
<< calib.first << " ppm, sigma=" << calib.second << " ppm" << endl;
}
}
for (int ic = 4; ic <= 9; ++ic)
{
if (isotope_errors[ic].size() >= 1000)
{
isotope_calib_map[ic] = calibrateMass_(isotope_errors[ic]);
}
else if (ic > 1 && isotope_calib_map.find(ic - 1) != isotope_calib_map.end())
{
auto prev = isotope_calib_map[ic - 1];
auto prev2 = isotope_calib_map.find(ic - 2) != isotope_calib_map.end() ?
isotope_calib_map[ic - 2] : make_pair(0.0, itol_ppm);
double shift_delta = prev.first - prev2.first;
double sigma_ratio = prev.second / max(prev2.second, 0.1);
isotope_calib_map[ic] = make_pair(prev.first + shift_delta, prev.second * sigma_ratio);
}
}
OPENMS_LOG_INFO << "Isotope 1 calibration: shift=" << isotope_calib_map[1].first
<< " ppm, sigma=" << isotope_calib_map[1].second << " ppm" << endl;
return isotope_calib_map;
}
double Biosaur2Algorithm::buildFastMzLookup_(const vector<Hill>& hills,
bool use_im,
map<int, vector<FastHillEntry>>& hills_mz_fast,
vector<int>& hill_im_bins) const
{
hills_mz_fast.clear();
// Initialize IM bins to -1 to indicate "no valid ion-mobility bin".
// This allows bin 0 to be treated as a valid IM bin (very low IM),
// matching the Python implementation where bin 0 is not special.
hill_im_bins.assign(hills.size(), -1);
double max_mz_value = 0.0;
for (const auto& h : hills)
{
max_mz_value = max(max_mz_value, h.mz_weighted_mean);
}
double mz_step = (max_mz_value > 0.0) ? (htol_ * 1e-6 * max_mz_value) : 0.0;
for (Size idx = 0; idx < hills.size(); ++idx)
{
const Hill& h = hills[idx];
if (h.scan_indices.empty()) continue;
int mz_bin = (mz_step > 0.0) ? static_cast<int>(h.mz_weighted_mean / mz_step) : 0;
Size first_scan = h.scan_indices.front();
Size last_scan = h.scan_indices.back();
// Register each hill in the central bin and its two neighbors
// (mz_bin-1, mz_bin, mz_bin+1), analogous to the Python
// hills_mz_median_fast_dict population.
hills_mz_fast[mz_bin - 1].push_back(FastHillEntry{idx, first_scan, last_scan});
hills_mz_fast[mz_bin].push_back(FastHillEntry{idx, first_scan, last_scan});
hills_mz_fast[mz_bin + 1].push_back(FastHillEntry{idx, first_scan, last_scan});
if (use_im && h.ion_mobility_median >= 0.0)
{
hill_im_bins[idx] = static_cast<int>(h.ion_mobility_median / paseftol_);
}
}
return mz_step;
}
vector<Biosaur2Algorithm::PatternCandidate> Biosaur2Algorithm::generateIsotopeCandidates_(
const vector<Hill>& hills,
double itol_ppm,
int min_charge,
int max_charge,
double ivf,
double mz_step,
const map<int, vector<FastHillEntry>>& hills_mz_fast,
const map<Size, Size>& hill_idx_to_index,
const vector<int>& hill_im_bins,
bool use_im) const
{
vector<PatternCandidate> ready;
if (hills.empty())
{
return ready;
}
const double ISOTOPE_MASSDIFF = Constants::C13C12_MASSDIFF_U;
// Helper to check if two scan index lists share at least one scan.
auto hasScanOverlap = [](const vector<Size>& a, const vector<Size>& b) -> bool
{
Size i = 0, j = 0;
while (i < a.size() && j < b.size())
{
if (a[i] == b[j]) return true;
if (a[i] < b[j]) ++i;
else ++j;
}
return false;
};
// Charge ordering: high to low.
vector<int> charges;
for (int c = min_charge; c <= max_charge; ++c) charges.push_back(c);
reverse(charges.begin(), charges.end());
// Charge ban map as in the Python reference.
map<int, vector<int>> charge_ban_map = {
{8, {1, 2, 4}},
{7, {1}},
{6, {1, 2, 3}},
{5, {1}},
{4, {1, 2}},
{3, {1}},
{2, {1}},
{1, {1}}
};
// Initial isotope candidate generation (analogous to get_initial_isotopes).
for (Size i = 0; i < hills.size(); ++i)
{
const Hill& mono_hill = hills[i];
if (mono_hill.scan_indices.empty()) continue;
double mono_mz = mono_hill.mz_weighted_mean;
double mz_tol = itol_ppm * 1e-6 * mono_mz;
Size hill_scans_1_number = mono_hill.length;
const vector<Size>& scans1 = mono_hill.scan_indices;
Size hill_scans_1_list_first = scans1.front();
Size hill_scans_1_list_last = scans1.back();
map<int, Size> banned_charges;
for (int charge : charges)
{
vector<vector<IsotopeCandidate>> candidates;
for (int iso_num = 1; iso_num <= 9; ++iso_num)
{
vector<IsotopeCandidate> tmp_candidates;
double expected_mz = mono_mz + ISOTOPE_MASSDIFF * iso_num / static_cast<double>(charge);
int m_to_check_fast = (mz_step > 0.0) ? static_cast<int>(expected_mz / mz_step) : 0;
auto it_bin = hills_mz_fast.find(m_to_check_fast);
if (it_bin != hills_mz_fast.end())
{
for (const auto& entry : it_bin->second)
{
Size idx2 = entry.hill_index;
if (idx2 == i) continue;
const Hill& hill2 = hills[idx2];
if (hill2.scan_indices.empty()) continue;
Size hill_scans_2_list_first = entry.first_scan;
Size hill_scans_2_list_last = entry.last_scan;
if (hill_scans_1_list_last < hill_scans_2_list_first ||
hill_scans_2_list_last < hill_scans_1_list_first)
{
continue;
}
// Optional ion mobility gating for isotope hills.
// Apply IM-bin gating whenever both hills have a valid bin
// (>= 0). This treats bin 0 as a normal bin and only skips
// gating when no IM was available (bin = -1), mirroring the
// Python reference behaviour.
if (use_im && hill_im_bins.size() > i && hill_im_bins.size() > idx2 &&
hill_im_bins[i] >= 0 && hill_im_bins[idx2] >= 0)
{
if (abs(hill_im_bins[i] - hill_im_bins[idx2]) > 1)
{
continue;
}
}
double mass_diff_abs = hill2.mz_weighted_mean - expected_mz;
if (fabs(mass_diff_abs) > mz_tol)
{
continue;
}
if (!hasScanOverlap(scans1, hill2.scan_indices))
{
continue;
}
double cos_cor_RT = cosineCorrelation_(mono_hill.intensities, mono_hill.scan_indices,
hill2.intensities, hill2.scan_indices);
if (cos_cor_RT < 0.6)
{
continue;
}
IsotopeCandidate cand;
cand.hill_idx = hill2.hill_idx;
cand.isotope_number = iso_num;
cand.mass_diff_ppm = mass_diff_abs * 1e6 / expected_mz;
cand.cos_corr = cos_cor_RT;
tmp_candidates.push_back(cand);
}
}
if (!tmp_candidates.empty())
{
candidates.push_back(tmp_candidates);
}
if (candidates.size() < static_cast<Size>(iso_num))
{
break;
}
}
Size min_required = 1;
auto it_ban = banned_charges.find(charge);
if (it_ban != banned_charges.end())
{
min_required = it_ban->second;
}
if (candidates.size() >= min_required)
{
double hill_intensity_apex_1 = mono_hill.intensity_apex;
auto averagine = computeAveragine_(mono_mz * charge, hill_intensity_apex_1);
vector<double> all_theoretical_int = averagine.first;
Size max_pos = averagine.second;
// Enumerate all combinations of isotope candidates across orders.
if (!candidates.empty())
{
vector<Size> indices(candidates.size(), 0);
bool done = false;
while (!done)
{
vector<IsotopeCandidate> picked_isotopes;
picked_isotopes.reserve(candidates.size());
for (Size k = 0; k < candidates.size(); ++k)
{
picked_isotopes.push_back(candidates[k][indices[k]]);
}
vector<double> all_exp_intensity;
all_exp_intensity.reserve(picked_isotopes.size() + 1);
all_exp_intensity.push_back(hill_intensity_apex_1);
double local_minimum = 0.0;
Size local_minimum_pos = 0;
Size i_local_isotope = 1;
for (const auto& iso_cand : picked_isotopes)
{
// Find apex intensity of isotope hill (fast lookup by hill_idx).
double hill_intensity_apex_2 = 0.0;
auto it_idx = hill_idx_to_index.find(iso_cand.hill_idx);
if (it_idx != hill_idx_to_index.end())
{
hill_intensity_apex_2 = hills[it_idx->second].intensity_apex;
}
if (i_local_isotope > max_pos)
{
if (i_local_isotope == max_pos + 1 || hill_intensity_apex_2 < local_minimum)
{
local_minimum = hill_intensity_apex_2;
local_minimum_pos = i_local_isotope;
}
if (hill_intensity_apex_2 >= ivf * local_minimum)
{
if (local_minimum_pos + 1 < all_exp_intensity.size())
{
all_exp_intensity.resize(local_minimum_pos + 1);
}
break;
}
}
all_exp_intensity.push_back(hill_intensity_apex_2);
++i_local_isotope;
}
// Compute cosine correlation and optimal truncation in isotope-intensity space.
auto cc = checkingCosCorrelationForCarbon_(all_theoretical_int, all_exp_intensity, 0.6);
double cos_corr_iso = cc.first;
Size best_pos = cc.second;
if (cos_corr_iso > 0.0 && best_pos > 1)
{
Size n_iso_used = best_pos - 1;
n_iso_used = min(n_iso_used, picked_isotopes.size());
PatternCandidate pc;
pc.mono_index = i;
pc.mono_mz = mono_mz;
pc.charge = charge;
pc.cos_cor_isotopes = cos_corr_iso;
pc.n_scans = hill_scans_1_number;
pc.isotopes.assign(picked_isotopes.begin(),
picked_isotopes.begin() + n_iso_used);
if (!pc.isotopes.empty())
{
ready.push_back(pc);
for (int ch_v : charge_ban_map[charge])
{
auto& ref = banned_charges[ch_v];
ref = max(ref, n_iso_used);
}
}
}
// Next combination.
Size pos_idx = 0;
while (pos_idx < indices.size())
{
++indices[pos_idx];
if (indices[pos_idx] < candidates[pos_idx].size())
{
break;
}
indices[pos_idx] = 0;
++pos_idx;
}
if (pos_idx == indices.size())
{
done = true;
}
}
}
}
}
}
return ready;
}
vector<Biosaur2Algorithm::PatternCandidate> Biosaur2Algorithm::applyRtFiltering_(
const vector<PatternCandidate>& candidates,
const vector<Hill>& hills,
const map<Size, Size>& hill_idx_to_index) const
{
if (hrttol_ <= 0.0 || candidates.empty())
{
return candidates;
}
vector<PatternCandidate> rt_filtered;
rt_filtered.reserve(candidates.size());
for (auto pc : candidates)
{
const Hill& mono_hill = hills[pc.mono_index];
double mono_rt_apex = mono_hill.rt_apex;
vector<IsotopeCandidate> kept;
kept.reserve(pc.isotopes.size());
for (const auto& iso_cand : pc.isotopes)
{
auto it_h = hill_idx_to_index.find(iso_cand.hill_idx);
if (it_h == hill_idx_to_index.end())
{
continue;
}
const Hill& iso_hill = hills[it_h->second];
double iso_rt_apex = iso_hill.rt_apex;
if (fabs(iso_rt_apex - mono_rt_apex) <= hrttol_)
{
kept.push_back(iso_cand);
}
}
if (!kept.empty())
{
pc.isotopes.swap(kept);
rt_filtered.push_back(pc);
}
}
OPENMS_LOG_INFO << "After RT apex filter (hrttol=" << hrttol_
<< " s), " << rt_filtered.size()
<< " potential isotope patterns remain." << endl;
return rt_filtered;
}
map<int, pair<double, double>> Biosaur2Algorithm::refineIsotopeCalibration_(
const vector<PatternCandidate>& candidates,
double itol_ppm,
bool enable_isotope_calib) const
{
map<int, vector<double>> isotope_errors_ready;
for (int ic = 1; ic <= 9; ++ic) isotope_errors_ready[ic] = vector<double>();
for (const auto& pc : candidates)
{
if (pc.n_scans < 3) continue;
for (Size idx = 0; idx < pc.isotopes.size(); ++idx)
{
int iso_num = static_cast<int>(pc.isotopes[idx].isotope_number);
if (iso_num >= 1 && iso_num <= 9)
{
isotope_errors_ready[iso_num].push_back(pc.isotopes[idx].mass_diff_ppm);
}
}
}
map<int, pair<double, double>> isotope_calib_map_ready;
for (int ic = 1; ic <= 9; ++ic)
{
isotope_calib_map_ready[ic] = make_pair(0.0, itol_ppm);
}
if (!enable_isotope_calib)
{
return isotope_calib_map_ready;
}
for (int ic = 1; ic <= 3; ++ic)
{
if (isotope_errors_ready[ic].size() >= 1000)
{
auto calib = calibrateMass_(isotope_errors_ready[ic]);
isotope_calib_map_ready[ic] = calib;
}
}
for (int ic = 4; ic <= 9; ++ic)
{
if (isotope_errors_ready[ic].size() >= 1000)
{
isotope_calib_map_ready[ic] = calibrateMass_(isotope_errors_ready[ic]);
}
else if (ic > 1)
{
auto prev = isotope_calib_map_ready[ic - 1];
auto prev2 = (ic > 2) ? isotope_calib_map_ready[ic - 2] : make_pair(0.0, itol_ppm);
double shift_delta = prev.first - prev2.first;
double sigma_ratio = prev.second / max(prev2.second, 0.1);
isotope_calib_map_ready[ic] = make_pair(prev.first + shift_delta, prev.second * sigma_ratio);
}
}
return isotope_calib_map_ready;
}
vector<Biosaur2Algorithm::PatternCandidate> Biosaur2Algorithm::filterByCalibration_(
const vector<PatternCandidate>& candidates,
const vector<Hill>& hills,
const map<Size, Size>& hill_idx_to_index,
const map<int, pair<double, double>>& isotope_calib_map_ready,
bool enable_isotope_calib) const
{
vector<PatternCandidate> filtered_ready;
filtered_ready.reserve(candidates.size());
for (auto pc : candidates)
{
if (enable_isotope_calib)
{
vector<IsotopeCandidate> tmp;
for (const auto& cand : pc.isotopes)
{
int iso_num = static_cast<int>(cand.isotope_number);
auto calib_it = isotope_calib_map_ready.find(iso_num);
if (calib_it == isotope_calib_map_ready.end())
{
continue;
}
const pair<double, double>& calib = calib_it->second;
if (fabs(cand.mass_diff_ppm - calib.first) <= 5.0 * calib.second)
{
tmp.push_back(cand);
}
else
{
break;
}
}
pc.isotopes.swap(tmp);
}
if (pc.isotopes.empty())
{
continue;
}
// Recompute cosine correlation in isotope-intensity space and
// potentially truncate the series, analogous to the second pass
// in process_features_iteration.
const Hill& mono_hill = hills[pc.mono_index];
double mono_mz = mono_hill.mz_weighted_mean;
double hill_intensity_apex_1 = mono_hill.intensity_apex;
auto averagine = computeAveragine_(mono_mz * pc.charge, hill_intensity_apex_1);
vector<double> all_theoretical_int = averagine.first;
vector<double> all_exp_intensity;
all_exp_intensity.reserve(pc.isotopes.size() + 1);
all_exp_intensity.push_back(hill_intensity_apex_1);
for (const auto& iso_cand : pc.isotopes)
{
double hill_intensity_apex_2 = 0.0;
auto it_idx = hill_idx_to_index.find(iso_cand.hill_idx);
if (it_idx != hill_idx_to_index.end())
{
hill_intensity_apex_2 = hills[it_idx->second].intensity_apex;
}
all_exp_intensity.push_back(hill_intensity_apex_2);
}
auto cc = checkingCosCorrelationForCarbon_(all_theoretical_int, all_exp_intensity, 0.6);
double cos_corr_iso = cc.first;
Size best_pos = cc.second;
if (cos_corr_iso <= 0.0 || best_pos <= 1)
{
continue;
}
Size n_iso_used = best_pos - 1;
n_iso_used = min(n_iso_used, pc.isotopes.size());
pc.isotopes.resize(n_iso_used);
pc.cos_cor_isotopes = cos_corr_iso;
if (!pc.isotopes.empty())
{
filtered_ready.push_back(pc);
}
}
return filtered_ready;
}
vector<Biosaur2Algorithm::PeptideFeature> Biosaur2Algorithm::selectNonOverlappingPatterns_(
const vector<PatternCandidate>& filtered_ready,
const vector<Hill>& hills,
bool negative_mode,
int iuse,
double itol_ppm) const
{
vector<PeptideFeature> features;
if (filtered_ready.empty())
{
return features;
}
// Final greedy selection of non-overlapping patterns (analogous to ready_final).
vector<PatternCandidate> sorted = filtered_ready;
sort(sorted.begin(), sorted.end(),
[](const PatternCandidate& a, const PatternCandidate& b)
{
if (a.isotopes.size() != b.isotopes.size())
{
return a.isotopes.size() > b.isotopes.size();
}
return a.cos_cor_isotopes > b.cos_cor_isotopes;
});
// Build a lookup from hill_idx to Hill pointer for fast access to
// apex intensities during the truncation / re-correlation step.
map<Size, const Hill*> hill_lookup_for_iso;
for (const auto& h : hills)
{
hill_lookup_for_iso[h.hill_idx] = &h;
}
set<Size> occupied_hills;
for (const auto& pc_in : sorted)
{
PatternCandidate pc = pc_in; // local copy that we may truncate
const Hill& mono_hill = hills[pc.mono_index];
const double mono_mz_center = mono_hill.mz_weighted_mean;
// Skip patterns whose monoisotopic hill is already used.
if (occupied_hills.find(mono_hill.hill_idx) != occupied_hills.end())
{
continue;
}
bool iso_conflict = false;
for (const auto& iso : pc.isotopes)
{
if (occupied_hills.find(iso.hill_idx) != occupied_hills.end())
{
iso_conflict = true;
break;
}
}
if (iso_conflict)
{
// Try to keep only leading isotopes whose hills are not yet used,
// mirroring the Python ready_final truncation behavior.
vector<IsotopeCandidate> tmp_iso;
for (const auto& iso : pc.isotopes)
{
if (occupied_hills.find(iso.hill_idx) == occupied_hills.end())
{
tmp_iso.push_back(iso);
}
else
{
break;
}
}
if (tmp_iso.empty()) continue;
// Recompute cosine correlation in isotope-intensity space for the
// truncated pattern and potentially truncate further based on the
// averagine-explained / correlation criterion, analogous to
// checking_cos_correlation_for_carbon in the Python code.
double mono_mz = mono_hill.mz_weighted_mean;
double hill_intensity_apex_1 = mono_hill.intensity_apex;
auto averagine = computeAveragine_(mono_mz * pc.charge, hill_intensity_apex_1);
const vector<double>& theor_full = averagine.first;
Size len = min(static_cast<Size>(tmp_iso.size()) + 1, theor_full.size());
if (len <= 1) continue;
vector<double> theor(len);
vector<double> exp(len);
for (Size k = 0; k < len; ++k)
{
theor[k] = theor_full[k];
}
exp[0] = hill_intensity_apex_1;
for (Size k = 1; k < len; ++k)
{
Size iso_index = k - 1;
auto it_h = hill_lookup_for_iso.find(tmp_iso[iso_index].hill_idx);
if (it_h != hill_lookup_for_iso.end())
{
exp[k] = it_h->second->intensity_apex;
}
else
{
exp[k] = 0.0;
}
}
auto cc = checkingCosCorrelationForCarbon_(theor, exp, 0.6);
double cos_corr_iso = cc.first;
Size best_pos = cc.second;
if (cos_corr_iso <= 0.0 || best_pos <= 1)
{
continue;
}
Size n_iso_used = min(best_pos - 1, tmp_iso.size());
tmp_iso.resize(n_iso_used);
pc.isotopes = tmp_iso;
pc.cos_cor_isotopes = cos_corr_iso;
}
if (pc.isotopes.empty())
{
continue;
}
// Debug sanity checks for the final isotope pattern (optional).
for (const auto& iso_cand : pc.isotopes)
{
auto it_h = hill_lookup_for_iso.find(iso_cand.hill_idx);
if (it_h == hill_lookup_for_iso.end())
{
continue;
}
const Hill* iso_hill_ptr = it_h->second;
debugCheckIsotopeConsistency_("detectIsotopePatterns_",
mono_mz_center,
mono_hill.rt_apex,
mono_hill.hill_idx,
pc.charge,
itol_ppm,
*iso_hill_ptr,
iso_cand.isotope_number);
}
// At this point, either there was no conflict or we have a
// truncated pattern that still passes the cosine check.
PeptideFeature feature;
feature.mz = pc.mono_mz;
feature.rt_start = mono_hill.rt_start;
feature.rt_end = mono_hill.rt_end;
feature.rt_apex = mono_hill.rt_apex;
feature.intensity_apex = mono_hill.intensity_apex;
feature.intensity_sum = mono_hill.intensity_sum;
if (iuse != 0)
{
int isotopes_to_add = (iuse == -1) ? static_cast<int>(pc.isotopes.size())
: min(static_cast<int>(pc.isotopes.size()), iuse);
for (int iso_idx = 0; iso_idx < isotopes_to_add; ++iso_idx)
{
auto it_h = hill_lookup_for_iso.find(pc.isotopes[static_cast<Size>(iso_idx)].hill_idx);
if (it_h != hill_lookup_for_iso.end())
{
const Hill* h = it_h->second;
feature.intensity_apex += h->intensity_apex;
feature.intensity_sum += h->intensity_sum;
}
}
}
feature.charge = pc.charge;
feature.n_isotopes = pc.isotopes.size() + 1;
feature.n_scans = mono_hill.length;
feature.isotopes = pc.isotopes;
feature.mono_hill_idx = mono_hill.hill_idx;
feature.drift_time = mono_hill.drift_time_median;
feature.ion_mobility = mono_hill.ion_mobility_median;
double proton_mass = Constants::PROTON_MASS_U;
if (negative_mode)
{
feature.mass_calib = pc.mono_mz * pc.charge + proton_mass * pc.charge;
}
else
{
feature.mass_calib = pc.mono_mz * pc.charge - proton_mass * pc.charge;
}
features.push_back(feature);
occupied_hills.insert(mono_hill.hill_idx);
for (const auto& iso : pc.isotopes)
{
occupied_hills.insert(iso.hill_idx);
}
}
return features;
}
vector<Biosaur2Algorithm::PeptideFeature> Biosaur2Algorithm::detectIsotopePatterns_(vector<Hill>& hills,
double itol_ppm,
int min_charge,
int max_charge,
bool negative_mode,
double ivf,
int iuse,
bool enable_isotope_calib,
bool use_im) const
{
vector<PeptideFeature> features;
set<Size> used_hills;
OPENMS_LOG_INFO << "Detecting isotope patterns..." << endl;
sort(hills.begin(), hills.end(),
[](const Hill& a, const Hill& b) { return a.mz_weighted_mean < b.mz_weighted_mean; });
map<Size, Size> hill_idx_to_index;
for (Size idx = 0; idx < hills.size(); ++idx)
{
hill_idx_to_index[hills[idx].hill_idx] = idx;
}
// Initial isotope calibration (diagnostic only, mimics original behaviour).
performInitialIsotopeCalibration_(hills,
itol_ppm,
min_charge,
max_charge,
enable_isotope_calib);
// Build fast m/z lookup and optional ion-mobility bins.
map<int, vector<FastHillEntry>> hills_mz_fast;
vector<int> hill_im_bins;
double mz_step = buildFastMzLookup_(hills, use_im, hills_mz_fast, hill_im_bins);
// Initial isotope candidate generation.
vector<PatternCandidate> ready = generateIsotopeCandidates_(hills,
itol_ppm,
min_charge,
max_charge,
ivf,
mz_step,
hills_mz_fast,
hill_idx_to_index,
hill_im_bins,
use_im);
// Optional RT-apex gating before isotope mass calibration.
vector<PatternCandidate> rt_ready = applyRtFiltering_(ready, hills, hill_idx_to_index);
// Isotope mass error calibration based on the (optionally RT-filtered) candidates.
map<int, pair<double, double>> isotope_calib_map_ready =
refineIsotopeCalibration_(rt_ready, itol_ppm, enable_isotope_calib);
// Apply calibrated isotope mass filters and re-check isotope-intensity cosine.
vector<PatternCandidate> filtered_ready =
filterByCalibration_(rt_ready, hills, hill_idx_to_index, isotope_calib_map_ready, enable_isotope_calib);
// Final greedy selection of non-overlapping patterns and feature assembly.
features = selectNonOverlappingPatterns_(filtered_ready, hills, negative_mode, iuse, itol_ppm);
OPENMS_LOG_INFO << "Detected " << features.size() << " features with isotope patterns" << endl;
return features;
}
FeatureMap Biosaur2Algorithm::convertToFeatureMap_(const vector<PeptideFeature>& features,
const vector<Hill>& hills) const
{
FeatureMap feature_map;
const bool use_mass_trace_hulls = (convex_hull_mode_ == "mass_traces");
// Build a lookup from hill index to Hill pointer so we can reconstruct
// per-isotope convex hulls for each peptide feature.
map<Size, const Hill*> hill_lookup;
for (const auto& h : hills)
{
hill_lookup[h.hill_idx] = &h;
}
for (const auto& f : features)
{
const Hill* mono_hill_ptr = nullptr;
auto mono_it = hill_lookup.find(f.mono_hill_idx);
if (mono_it != hill_lookup.end())
{
mono_hill_ptr = mono_it->second;
}
if (mono_hill_ptr != nullptr && !f.isotopes.empty())
{
for (const auto& iso : f.isotopes)
{
auto iso_it = hill_lookup.find(iso.hill_idx);
if (iso_it == hill_lookup.end())
{
continue;
}
const Hill* iso_hill_ptr = iso_it->second;
debugCheckIsotopeConsistency_("convertToFeatureMap_",
mono_hill_ptr->mz_weighted_mean,
mono_hill_ptr->rt_apex,
mono_hill_ptr->hill_idx,
f.charge,
itol_,
*iso_hill_ptr,
iso.isotope_number);
}
}
Feature feature;
feature.setMZ(f.mz);
feature.setRT(f.rt_apex);
feature.setIntensity(f.intensity_apex);
feature.setCharge(f.charge);
feature.setOverallQuality(f.n_isotopes);
// Collect the monoisotopic hill and all isotope hills contributing to
// this feature and build one convex hull per hill, analogous to how
// FeatureFinderCentroided uses mass-trace hulls.
set<Size> pattern_hill_ids;
pattern_hill_ids.insert(f.mono_hill_idx);
for (const auto& iso : f.isotopes)
{
pattern_hill_ids.insert(iso.hill_idx);
}
DBoundingBox<2> feature_box;
bool have_box_points = false;
double im_min = numeric_limits<double>::max();
double im_max = numeric_limits<double>::lowest();
bool have_im_values = false;
for (Size hill_id : pattern_hill_ids)
{
auto it = hill_lookup.find(hill_id);
if (it == hill_lookup.end())
{
continue;
}
const Hill& hill = *(it->second);
if (hill.rt_values.empty() || hill.mz_values.empty())
{
continue;
}
// Update ion mobility range for this feature based on contributing hills
if (!hill.ion_mobilities.empty())
{
for (double im_value : hill.ion_mobilities)
{
if (im_value < 0.0) continue;
have_im_values = true;
im_min = std::min(im_min, im_value);
im_max = std::max(im_max, im_value);
}
}
const Size n_pts = min(hill.rt_values.size(), hill.mz_values.size());
if (n_pts == 0)
{
continue;
}
if (use_mass_trace_hulls)
{
ConvexHull2D::PointArrayType hull_points(n_pts);
for (Size i = 0; i < n_pts; ++i)
{
hull_points[i][0] = hill.rt_values[i];
hull_points[i][1] = hill.mz_values[i];
}
ConvexHull2D hull;
hull.addPoints(hull_points);
feature.getConvexHulls().push_back(hull);
}
else
{
for (Size i = 0; i < n_pts; ++i)
{
const double rt = hill.rt_values[i];
const double mz = hill.mz_values[i];
if (!have_box_points)
{
DBoundingBox<2>::PositionType p(rt, mz);
feature_box = DBoundingBox<2>(p, p);
have_box_points = true;
}
else
{
feature_box.enlarge(rt, mz);
}
}
}
}
if (!use_mass_trace_hulls && have_box_points)
{
ConvexHull2D::PointArrayType hull_points(4);
hull_points[0][0] = feature_box.minX();
hull_points[0][1] = feature_box.minY();
hull_points[1][0] = feature_box.maxX();
hull_points[1][1] = feature_box.minY();
hull_points[2][0] = feature_box.minX();
hull_points[2][1] = feature_box.maxY();
hull_points[3][0] = feature_box.maxX();
hull_points[3][1] = feature_box.maxY();
ConvexHull2D hull;
hull.addPoints(hull_points);
feature.getConvexHulls().push_back(hull);
}
// Fallback: if something went wrong while resolving hills, keep the
// previous simple two-point hull to avoid features without any hull.
if (feature.getConvexHulls().empty())
{
ConvexHull2D::PointArrayType hull_points(2);
hull_points[0][0] = f.rt_start;
hull_points[0][1] = f.mz;
hull_points[1][0] = f.rt_end;
hull_points[1][1] = f.mz;
ConvexHull2D hull;
hull.addPoints(hull_points);
feature.getConvexHulls().push_back(hull);
}
feature.setMetaValue("mass_calib", f.mass_calib);
feature.setMetaValue("n_isotopes", f.n_isotopes);
feature.setMetaValue("n_scans", f.n_scans);
feature.setMetaValue("intensity_sum", f.intensity_sum);
if (f.drift_time != IMTypes::DRIFTTIME_NOT_SET)
{
feature.setMetaValue("FAIMS_CV", f.drift_time);
}
// Annotate real ion-mobility data with median and range
if (f.ion_mobility >= 0.0 && have_im_values)
{
feature.setMetaValue(Constants::UserParam::IM, f.ion_mobility);
feature.setMetaValue("IM_min", im_min);
feature.setMetaValue("IM_max", im_max);
}
feature.ensureUniqueId();
feature_map.push_back(feature);
}
feature_map.applyMemberFunction(&UniqueIdInterface::ensureUniqueId);
feature_map.ensureUniqueId();
feature_map.getProteinIdentifications().resize(1);
return feature_map;
}
void Biosaur2Algorithm::debugCheckIsotopeConsistency_(const char* stage_label,
double mono_mz_center,
double mono_rt_apex,
Size mono_hill_idx,
int charge,
double itol_ppm,
const Hill& iso_hill,
Size isotope_number) const
{
// RT-apex sanity check.
if (hrttol_ > 0.0)
{
const double rt_delta = fabs(iso_hill.rt_apex - mono_rt_apex);
if (rt_delta > hrttol_ + 1e-6)
{
OPENMS_LOG_WARN << "Biosaur2 isotope debug (" << stage_label << "): "
<< "mono m/z " << mono_mz_center
<< " (charge " << charge
<< ", mono hill_idx=" << mono_hill_idx
<< ") uses isotope hill_idx=" << iso_hill.hill_idx
<< " with RT apex delta " << rt_delta
<< " s > hrttol=" << hrttol_
<< ". This indicates an inconsistent isotope assignment."
<< endl;
}
}
// m/z sanity check.
if (charge > 0)
{
const double ISOTOPE_MASSDIFF = Constants::C13C12_MASSDIFF_U;
const double expected_mz = mono_mz_center +
static_cast<double>(isotope_number) *
ISOTOPE_MASSDIFF /
static_cast<double>(charge);
const double observed_mz = iso_hill.mz_weighted_mean;
const double diff_ppm = Math::getPPM(observed_mz, expected_mz);
const double mz_ppm_threshold = std::max(80.0, 10.0 * itol_ppm);
if (fabs(diff_ppm) > mz_ppm_threshold)
{
OPENMS_LOG_WARN << "Biosaur2 isotope debug (" << stage_label << "): "
<< "mono m/z " << mono_mz_center
<< " (charge " << charge
<< ", mono hill_idx=" << mono_hill_idx
<< ") uses isotope #" << isotope_number
<< " (hill_idx=" << iso_hill.hill_idx
<< ") at m/z=" << observed_mz
<< " which is " << diff_ppm
<< " ppm away from expected " << expected_mz
<< " (itol=" << itol_ppm << " ppm)."
<< endl;
}
}
}
double Biosaur2Algorithm::cosineCorrelation_(const vector<double>& intensities1,
const vector<Size>& scans1,
const vector<double>& intensities2,
const vector<Size>& scans2) const
{
map<Size, double> map1, map2;
for (Size i = 0; i < scans1.size(); ++i)
{
map1[scans1[i]] = intensities1[i];
}
for (Size i = 0; i < scans2.size(); ++i)
{
map2[scans2[i]] = intensities2[i];
}
double dot_product = 0.0;
double norm1 = 0.0;
double norm2 = 0.0;
for (const auto& p1 : map1)
{
Size scan = p1.first;
double i1 = p1.second;
auto it = map2.find(scan);
if (it != map2.end())
{
dot_product += i1 * it->second;
}
norm1 += i1 * i1;
}
for (const auto& p2 : map2)
{
norm2 += p2.second * p2.second;
}
if (norm1 == 0.0 || norm2 == 0.0)
{
return 0.0;
}
return dot_product / (sqrt(norm1) * sqrt(norm2));
}
void Biosaur2Algorithm::writeTSV(const vector<PeptideFeature>& features, const String& filename) const
{
ofstream out(filename);
if (!out)
{
throw Exception::UnableToCreateFile(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, filename);
}
out << "massCalib\trtApex\tintensityApex\tintensitySum\tcharge\t"
<< "nIsotopes\tnScans\tmz\trtStart\trtEnd\tFAIMS\tIM" << endl;
for (const auto& f : features)
{
out << f.mass_calib << "\t"
<< f.rt_apex << "\t"
<< f.intensity_apex << "\t"
<< f.intensity_sum << "\t"
<< f.charge << "\t"
<< f.n_isotopes << "\t"
<< f.n_scans << "\t"
<< f.mz << "\t"
<< f.rt_start << "\t"
<< f.rt_end << "\t"
<< f.drift_time << "\t"
<< f.ion_mobility << endl;
}
OPENMS_LOG_INFO << "Wrote " << features.size() << " features to TSV file: " << filename << endl;
}
void Biosaur2Algorithm::writeHills(const vector<Hill>& hills, const String& filename) const
{
ofstream out(filename);
if (!out)
{
throw Exception::UnableToCreateFile(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, filename);
}
out << "hill_idx\tmz\trtStart\trtEnd\trtApex\tintensityApex\tintensitySum\tnScans\tIM_min\tIM_max" << endl;
for (const auto& hill : hills)
{
double im_min = numeric_limits<double>::max();
double im_max = numeric_limits<double>::lowest();
bool have_im = false;
if (!hill.ion_mobilities.empty())
{
for (double im_value : hill.ion_mobilities)
{
if (im_value < 0.0) continue;
have_im = true;
im_min = std::min(im_min, im_value);
im_max = std::max(im_max, im_value);
}
}
if (!have_im)
{
im_min = -1.0;
im_max = -1.0;
}
out << hill.hill_idx << "\t"
<< hill.mz_weighted_mean << "\t"
<< hill.rt_start << "\t"
<< hill.rt_end << "\t"
<< hill.rt_apex << "\t"
<< hill.intensity_apex << "\t"
<< hill.intensity_sum << "\t"
<< hill.length << "\t"
<< im_min << "\t"
<< im_max << endl;
}
OPENMS_LOG_INFO << "Wrote " << hills.size() << " hills to: " << filename << endl;
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/BiGaussFitter1D.cpp | .cpp | 3,123 | 102 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: $
// --------------------------------------------------------------------------
#include <OpenMS/FEATUREFINDER/BiGaussFitter1D.h>
#include <OpenMS/FEATUREFINDER/InterpolationModel.h>
#include <OpenMS/FEATUREFINDER/BiGaussModel.h>
namespace OpenMS
{
BiGaussFitter1D::BiGaussFitter1D() :
MaxLikeliFitter1D()
{
setName("BiGaussFitter1D");
defaults_.setValue("statistics:variance1", 1.0, "Variance of the first gaussian, used for the lower half of the model.", {"advanced"});
defaults_.setValue("statistics:variance2", 1.0, "Variance of the second gaussian, used for the upper half of the model.", {"advanced"});
defaultsToParam_();
}
BiGaussFitter1D::BiGaussFitter1D(const BiGaussFitter1D& source) :
MaxLikeliFitter1D(source)
{
updateMembers_();
}
BiGaussFitter1D::~BiGaussFitter1D() = default;
BiGaussFitter1D& BiGaussFitter1D::operator=(const BiGaussFitter1D& source)
{
if (&source == this)
{
return *this;
}
MaxLikeliFitter1D::operator=(source);
updateMembers_();
return *this;
}
BiGaussFitter1D::QualityType BiGaussFitter1D::fit1d(const RawDataArrayType& set, std::unique_ptr<InterpolationModel>& model)
{
// Calculate bounding box
CoordinateType min_bb = set[0].getPos(), max_bb = set[0].getPos();
for (UInt pos = 1; pos < set.size(); ++pos)
{
CoordinateType tmp = set[pos].getPos();
if (min_bb > tmp)
{
min_bb = tmp;
}
if (max_bb < tmp)
{
max_bb = tmp;
}
}
// Enlarge the bounding box by a few multiples of the standard deviation
const CoordinateType stdev1 = sqrt(statistics1_.variance()) * tolerance_stdev_box_;
const CoordinateType stdev2 = sqrt(statistics2_.variance()) * tolerance_stdev_box_;
min_bb -= stdev1;
max_bb += stdev2;
// build model
model = std::unique_ptr<BiGaussModel>(new BiGaussModel());
model->setInterpolationStep(interpolation_step_);
Param tmp;
tmp.setValue("bounding_box:min", min_bb);
tmp.setValue("bounding_box:max", max_bb);
tmp.setValue("statistics:mean", statistics1_.mean());
tmp.setValue("statistics:variance1", statistics1_.variance());
tmp.setValue("statistics:variance2", statistics2_.variance());
model->setParameters(tmp);
// fit offset
QualityType quality;
quality = fitOffset_(model, set, stdev1, stdev2, interpolation_step_);
if (std::isnan(quality))
{
quality = -1.0;
}
return quality;
}
void BiGaussFitter1D::updateMembers_()
{
MaxLikeliFitter1D::updateMembers_();
statistics1_.setMean(param_.getValue("statistics:mean"));
statistics1_.setVariance(param_.getValue("statistics:variance1"));
statistics2_.setMean(param_.getValue("statistics:mean"));
statistics2_.setVariance(param_.getValue("statistics:variance2"));
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/GaussFitter1D.cpp | .cpp | 2,697 | 99 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: $
// --------------------------------------------------------------------------
#include <OpenMS/FEATUREFINDER/GaussFitter1D.h>
#include <OpenMS/FEATUREFINDER/InterpolationModel.h>
#include <OpenMS/FEATUREFINDER/GaussModel.h>
namespace OpenMS
{
GaussFitter1D::GaussFitter1D() :
MaxLikeliFitter1D()
{
setName("GaussFitter1D");
defaults_.setValue("statistics:variance", 1.0, "Variance of the model.", {"advanced"});
defaults_.setValue("statistics:mean", 1.0, "Mean value of the model.", {"advanced"});
defaultsToParam_();
}
GaussFitter1D::GaussFitter1D(const GaussFitter1D& source) :
MaxLikeliFitter1D(source)
{
updateMembers_();
}
GaussFitter1D::~GaussFitter1D() = default;
GaussFitter1D& GaussFitter1D::operator=(const GaussFitter1D& source)
{
if (&source == this)
{
return *this;
}
MaxLikeliFitter1D::operator=(source);
updateMembers_();
return *this;
}
GaussFitter1D::QualityType GaussFitter1D::fit1d(const RawDataArrayType& set, std::unique_ptr<InterpolationModel>& model)
{
// Calculate bounding box
CoordinateType min_bb = set[0].getPos(), max_bb = set[0].getPos();
for (UInt pos = 1; pos < set.size(); ++pos)
{
CoordinateType tmp = set[pos].getPos();
if (min_bb > tmp)
{
min_bb = tmp;
}
if (max_bb < tmp)
{
max_bb = tmp;
}
}
// Enlarge the bounding box by a few multiples of the standard deviation
const CoordinateType stdev = sqrt(statistics_.variance()) * tolerance_stdev_box_;
min_bb -= stdev;
max_bb += stdev;
// build model
model = std::unique_ptr<InterpolationModel>(new GaussModel());
model->setInterpolationStep(interpolation_step_);
Param tmp;
tmp.setValue("bounding_box:min", min_bb);
tmp.setValue("bounding_box:max", max_bb);
tmp.setValue("statistics:mean", statistics_.mean());
tmp.setValue("statistics:variance", statistics_.variance());
model->setParameters(tmp);
// fit offset
QualityType quality;
quality = fitOffset_(model, set, stdev, stdev, interpolation_step_);
if (std::isnan(quality))
{
quality = -1.0;
}
return quality;
}
void GaussFitter1D::updateMembers_()
{
MaxLikeliFitter1D::updateMembers_();
statistics_.setMean(param_.getValue("statistics:mean"));
statistics_.setVariance(param_.getValue("statistics:variance"));
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/Fitter1D.cpp | .cpp | 2,312 | 67 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: $
// --------------------------------------------------------------------------
#include <OpenMS/FEATUREFINDER/Fitter1D.h>
// include derived classes here
#include <OpenMS/FEATUREFINDER/GaussFitter1D.h>
#include <OpenMS/FEATUREFINDER/BiGaussFitter1D.h>
#include <OpenMS/FEATUREFINDER/IsotopeFitter1D.h>
#include <OpenMS/FEATUREFINDER/ExtendedIsotopeFitter1D.h>
#include <OpenMS/FEATUREFINDER/EmgFitter1D.h>
namespace OpenMS
{
Fitter1D::Fitter1D() :
DefaultParamHandler("Fitter1D")
{
defaults_.setValue("interpolation_step", 0.2, "Sampling rate for the interpolation of the model function.", {"advanced"});
defaults_.setValue("statistics:mean", 1.0, "Centroid position of the model.", {"advanced"});
defaults_.setValue("statistics:variance", 1.0, "The variance of the model.", {"advanced"});
defaults_.setValue("tolerance_stdev_bounding_box", 3.0, "Bounding box has range [minimim of data, maximum of data] enlarged by tolerance_stdev_bounding_box times the standard deviation of the data.", {"advanced"});
defaultsToParam_();
}
Fitter1D::Fitter1D(const Fitter1D& source) :
DefaultParamHandler(source)
{
setParameters(source.getParameters());
updateMembers_();
}
Fitter1D::~Fitter1D() = default;
Fitter1D& Fitter1D::operator=(const Fitter1D& source)
{
if (&source == this)
{
return *this;
}
DefaultParamHandler::operator=(source);
setParameters(source.getParameters());
updateMembers_();
return *this;
}
void Fitter1D::updateMembers_()
{
tolerance_stdev_box_ = param_.getValue("tolerance_stdev_bounding_box");
interpolation_step_ = param_.getValue("interpolation_step");
statistics_.setMean(param_.getValue("statistics:mean"));
statistics_.setVariance(param_.getValue("statistics:variance"));
}
Fitter1D::QualityType Fitter1D::fit1d(const RawDataArrayType& /* range */, std::unique_ptr<InterpolationModel>& /* model */)
{
throw Exception::NotImplemented(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION);
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/FFIDAlgoExternalIDHandler.cpp | .cpp | 24,882 | 683 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/FEATUREFINDER/FFIDAlgoExternalIDHandler.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/MapAlignmentAlgorithmIdentification.h>
#include <OpenMS/ML/SVM/SimpleSVM.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/CONCEPT/Exception.h>
#include <OpenMS/CONCEPT/Types.h>
#include <OpenMS/DATASTRUCTURES/ListUtils.h>
#include <OpenMS/MATH/MathFunctions.h>
#include <algorithm>
#include <random>
namespace OpenMS
{
namespace Internal
{
FFIDAlgoExternalIDHandler::FFIDAlgoExternalIDHandler() :
n_external_peptides_(0),
n_external_features_(0),
svm_n_parts_(3),
svm_n_samples_(0),
svm_min_prob_(0.0),
n_internal_features_(0)
{
}
void FFIDAlgoExternalIDHandler::initSVMParameters_(const Param& param)
{
svm_min_prob_ = param.getValue("svm:min_prob");
svm_n_parts_ = param.getValue("svm:xval");
svm_n_samples_ = param.getValue("svm:samples");
svm_xval_out_ = param.getValue("svm:xval_out").toString();
svm_quality_cutoff = svm_min_prob_;
svm_predictor_names_ = ListUtils::create<String>(param.getValue("svm:predictors").toString());
debug_level_ = param.getValue("debug");
}
void FFIDAlgoExternalIDHandler::reset()
{
external_peptide_map_.clear();
rt_transformation_ = TransformationDescription();
n_external_peptides_ = 0;
n_external_features_ = 0;
svm_probs_external_.clear();
svm_probs_internal_.clear();
n_internal_features_ = 0;
}
void FFIDAlgoExternalIDHandler::addExternalPeptide(PeptideIdentification& peptide)
{
if (peptide.getHits().empty())
{
return;
}
peptide.sort();
PeptideHit& hit = peptide.getHits()[0];
peptide.getHits().resize(1);
Int charge = hit.getCharge();
double rt = peptide.getRT();
double mz = peptide.getMZ();
external_peptide_map_[hit.getSequence()][charge].emplace(rt, &peptide);
OPENMS_LOG_DEBUG_NOFILE << "Adding peptide (external) " << hit.getSequence()
<< "; CHG: " << charge << "; RT: " << rt
<< "; MZ: " << mz << std::endl;
}
void FFIDAlgoExternalIDHandler::processExternalPeptides(PeptideIdentificationList& peptides_ext)
{
for (PeptideIdentification& pep : peptides_ext)
{
addExternalPeptide(pep);
pep.setMetaValue("FFId_category", "external");
}
n_external_peptides_ = external_peptide_map_.size();
}
double FFIDAlgoExternalIDHandler::alignInternalAndExternalIDs(
const PeptideIdentificationList& peptides_internal,
const PeptideIdentificationList& peptides_external,
double rt_quantile)
{
// Reset the handler state
reset();
// Align internal and external IDs to estimate RT shifts:
MapAlignmentAlgorithmIdentification aligner;
aligner.setReference(peptides_external); // go from internal to external scale
std::vector<PeptideIdentificationList> aligner_peptides(1, peptides_internal);
std::vector<TransformationDescription> aligner_trafos;
OPENMS_LOG_INFO << "Realigning internal and external IDs...";
aligner.align(aligner_peptides, aligner_trafos);
rt_transformation_ = aligner_trafos[0];
std::vector<double> aligned_diffs;
rt_transformation_.getDeviations(aligned_diffs);
// Calculate RT uncertainty based on quantile
std::sort(aligned_diffs.begin(), aligned_diffs.end());
Size index = std::clamp(Size(rt_quantile * aligned_diffs.size()),
Size(0), aligned_diffs.size() - 1);
double rt_uncertainty = aligned_diffs[index];
try
{
aligner_trafos[0].fitModel("lowess");
rt_transformation_ = aligner_trafos[0];
}
catch (Exception::BaseException& e)
{
OPENMS_LOG_ERROR << "Error: Failed to align RTs of internal/external peptides. "
<< "RT information will not be considered in the SVM classification. "
<< "The original error message was:\n" << e.what() << std::endl;
}
return rt_uncertainty;
}
double FFIDAlgoExternalIDHandler::transformRT(double rt) const
{
return rt_transformation_.apply(rt);
}
bool FFIDAlgoExternalIDHandler::hasRTTransformation() const
{
return !rt_transformation_.getDataPoints().empty();
}
const TransformationDescription& FFIDAlgoExternalIDHandler::getRTTransformation() const
{
return rt_transformation_;
}
void FFIDAlgoExternalIDHandler::addExternalPeptideToMap_(PeptideIdentification& peptide,
std::map<AASequence,
std::map<Int, std::pair<std::multimap<double, PeptideIdentification*>,
std::multimap<double, PeptideIdentification*>>>>& peptide_map)
{
if (peptide.getHits().empty()) return;
peptide.sort();
PeptideHit& hit = peptide.getHits()[0];
peptide.getHits().resize(1);
Int charge = hit.getCharge();
double rt = peptide.getRT();
// Add to the external map (second in the pair)
peptide_map[hit.getSequence()][charge].second.emplace(rt, &peptide);
}
bool FFIDAlgoExternalIDHandler::fillExternalRTMap_(const AASequence& sequence, Int charge,
std::multimap<double, PeptideIdentification*>& rt_map)
{
if (auto seq_it = external_peptide_map_.find(sequence); seq_it == external_peptide_map_.end())
{
return false;
}
else if (auto charge_it = seq_it->second.find(charge); charge_it == seq_it->second.end())
{
return false;
}
else
{
rt_map.insert(charge_it->second.begin(), charge_it->second.end());
return true;
}
}
void FFIDAlgoExternalIDHandler::annotateFeatureWithExternalIDs_(Feature& feature)
{
feature.setMetaValue("n_total_ids", 0);
feature.setMetaValue("n_matching_ids", -1);
feature.setMetaValue("feature_class", "unknown");
}
void FFIDAlgoExternalIDHandler::addDummyPeptideID_(Feature& feature, const PeptideIdentification* ext_id)
{
if (!ext_id) return;
PeptideIdentification id = *ext_id;
id.clearMetaInfo();
id.setMetaValue("FFId_category", "implied");
id.setRT(feature.getRT());
id.setMZ(feature.getMZ());
// Only one peptide hit per ID - see function "addPeptideToMap_":
PeptideHit& hit = id.getHits()[0];
hit.clearMetaInfo();
hit.setScore(0.0);
feature.getPeptideIdentifications().push_back(id);
}
void FFIDAlgoExternalIDHandler::handleExternalFeature_(Feature& feature, double prob_positive, double quality_cutoff)
{
svm_probs_external_.insert(prob_positive);
if (prob_positive >= quality_cutoff)
{
feature.setOverallQuality(prob_positive);
++n_external_features_;
}
}
void FFIDAlgoExternalIDHandler::adjustFDRForExternalFeatures_(std::vector<double>& fdr_probs,
std::vector<double>& fdr_qvalues,
Size n_internal_features)
{
std::multiset<double>::reverse_iterator ext_it = svm_probs_external_.rbegin();
Size external_count = 0;
for (Int i = fdr_probs.size() - 1; i >= 0; --i)
{
double cutoff = fdr_probs[i];
while ((ext_it != svm_probs_external_.rend()) && (*ext_it >= cutoff))
{
++external_count;
++ext_it;
}
fdr_qvalues[i] = (fdr_qvalues[i] * external_count) /
(external_count + n_internal_features);
}
}
void FFIDAlgoExternalIDHandler::checkNumObservations_(Size n_pos, Size n_neg, const String& note) const
{
if (n_pos < svm_n_parts_)
{
String msg = "Not enough positive observations for " +
String(svm_n_parts_) + "-fold cross-validation" + note + ".";
throw Exception::MissingInformation(__FILE__, __LINE__,
OPENMS_PRETTY_FUNCTION, msg);
}
if (n_neg < svm_n_parts_)
{
String msg = "Not enough negative observations for " +
String(svm_n_parts_) + "-fold cross-validation" + note + ".";
throw Exception::MissingInformation(__FILE__, __LINE__,
OPENMS_PRETTY_FUNCTION, msg);
}
}
void FFIDAlgoExternalIDHandler::getUnbiasedSample_(const std::multimap<double, std::pair<Size, bool> >& valid_obs,
std::map<Size, double>& training_labels)
{
// Create an unbiased training sample:
// - same number of pos./neg. observations (approx.),
// - same intensity distribution of pos./neg. observations.
// We use a sliding window over the set of observations, ordered by
// intensity. At each step, we examine the proportion of both pos./neg.
// observations in the window and select the middle element with according
// probability. (We use an even window size, to cover the ideal case where
// the two classes are balanced.)
const Size window_size = 8;
const Size half_win_size = window_size / 2;
if (valid_obs.size() < half_win_size + 1)
{
String msg = "Not enough observations for intensity-bias filtering.";
throw Exception::MissingInformation(__FILE__, __LINE__,
OPENMS_PRETTY_FUNCTION, msg);
}
srand(time(nullptr)); // seed random number generator
Size n_obs[2] = {0, 0}; // counters for neg./pos. observations
Size counts[2] = {0, 0}; // pos./neg. counts in current window
// iterators to begin, middle and past-the-end of sliding window:
std::multimap<double, std::pair<Size, bool> >::const_iterator begin, middle, end;
begin = middle = end = valid_obs.begin();
// initialize ("middle" is at beginning of sequence, so no full window):
for (Size i = 0; i <= half_win_size; ++i, ++end)
{
++counts[end->second.second]; // increase counter for pos./neg. obs.
}
// "i" is the index of one of the two middle values of the sliding window:
// - in the left half of the sequence, "i" is left-middle,
// - in the right half of the sequence, "i" is right-middle.
// The counts are updated as "i" and the sliding window move to the right.
for (Size i = 0; i < valid_obs.size(); ++i, ++middle)
{
// if count for either class is zero, we don't select anything:
if ((counts[0] > 0) && (counts[1] > 0))
{
// probability thresholds for neg./pos. observations:
double thresholds[2] = {counts[1] / float(counts[0]),
counts[0] / float(counts[1])};
// check middle values:
double rnd = rand() / double(RAND_MAX); // random num. in range 0-1
if (rnd < thresholds[middle->second.second])
{
training_labels[middle->second.first] = Int(middle->second.second);
++n_obs[middle->second.second];
}
}
// update sliding window and class counts;
// when we reach the middle of the sequence, we keep the window in place
// for one step, to change from "left-middle" to "right-middle":
if (i != valid_obs.size() / 2)
{
// only move "begin" when "middle" has advanced far enough:
if (i > half_win_size)
{
--counts[begin->second.second];
++begin;
}
// don't increment "end" beyond the defined range:
if (end != valid_obs.end())
{
++counts[end->second.second];
++end;
}
}
}
checkNumObservations_(n_obs[1], n_obs[0], " after bias filtering");
}
void FFIDAlgoExternalIDHandler::getRandomSample_(std::map<Size, double>& training_labels)
{
// Pick a random subset of size "svm_n_samples_" for training: Shuffle the whole
// sequence, then select the first "svm_n_samples_" elements.
std::vector<Size> selection;
selection.reserve(training_labels.size());
for (auto it = training_labels.begin(); it != training_labels.end(); ++it)
{
selection.push_back(it->first);
}
Math::RandomShuffler shuffler;
shuffler.portable_random_shuffle(selection.begin(), selection.end());
// However, ensure that at least "svm_n_parts_" pos./neg. observations are
// included (for cross-validation) - there must be enough, otherwise
// "checkNumObservations" would have thrown an error. To this end, move
// "svm_n_parts_" pos. observations to the beginning of sequence, followed by
// "svm_n_parts_" neg. observations (pos. first - see reason below):
Size n_obs[2] = {0, 0}; // counters for neg./pos. observations
for (Int label = 1; label >= 0; --label)
{
for (Size i = n_obs[1]; i < selection.size(); ++i)
{
Size obs_index = selection[i];
if (training_labels[obs_index] == label)
{
std::swap(selection[i], selection[n_obs[label]]);
++n_obs[label];
}
if (n_obs[label] == svm_n_parts_)
{
break;
}
}
}
selection.resize(svm_n_samples_);
// copy the selected subset back:
std::map<Size, double> temp;
for (std::vector<Size>::iterator it = selection.begin(); it != selection.end();
++it)
{
temp[*it] = training_labels[*it];
}
training_labels.swap(temp);
}
void FFIDAlgoExternalIDHandler::classifyFeaturesWithSVM(FeatureMap& features, const Param& param)
{
// Initialize SVM parameters in the external ID handler
initSVMParameters_(param);
if (features.empty())
{
return;
}
if (features[0].metaValueExists("rt_delta")) // include RT feature
{
if (std::find(svm_predictor_names_.begin(), svm_predictor_names_.end(), "rt_delta") == svm_predictor_names_.end())
{
svm_predictor_names_.push_back("rt_delta");
}
}
// values for all features per predictor (this way around to simplify scaling
// of predictors):
SimpleSVM::PredictorMap predictors;
for (const String& pred : svm_predictor_names_)
{
predictors[pred].reserve(features.size());
for (Feature& feat : features)
{
if (!feat.metaValueExists(pred))
{
OPENMS_LOG_ERROR << "Meta value '" << pred << "' missing for feature '"
<< feat.getUniqueId() << "'" << std::endl;
predictors.erase(pred);
break;
}
predictors[pred].push_back(feat.getMetaValue(pred));
}
}
// get labels for SVM:
std::map<Size, double> training_labels;
bool no_selection = param.getValue("svm:no_selection") == "true";
// mapping (for bias correction): intensity -> (index, positive?)
std::multimap<double, std::pair<Size, bool> > valid_obs;
Size n_obs[2] = {0, 0}; // counters for neg./pos. observations
for (Size feat_index = 0; feat_index < features.size(); ++feat_index)
{
String feature_class = features[feat_index].getMetaValue("feature_class");
int label = -1;
if (feature_class == "positive")
{
label = 1;
}
else if (feature_class == "negative")
{
label = 0;
}
if (label != -1)
{
++n_obs[label];
if (!no_selection)
{
double intensity = features[feat_index].getIntensity();
valid_obs.insert(std::make_pair(intensity, std::make_pair(feat_index,
bool(label))));
}
else
{
training_labels[feat_index] = (double)label;
}
}
}
checkNumObservations_(n_obs[1], n_obs[0]);
if (!no_selection)
{
getUnbiasedSample_(valid_obs, training_labels);
}
if (svm_n_samples_ > 0) // limited number of samples for training
{
if (training_labels.size() < svm_n_samples_)
{
OPENMS_LOG_WARN << "Warning: There are only " << training_labels.size()
<< " valid observations for training." << std::endl;
}
else if (training_labels.size() > svm_n_samples_)
{
getRandomSample_(training_labels);
}
}
SimpleSVM svm;
// set (only) the relevant parameters:
Param svm_params = svm.getParameters();
Logger::LogStream no_log; // suppress warnings about additional parameters
svm_params.update(param.copy("svm:", true), false, no_log);
svm.setParameters(svm_params);
svm.setup(predictors, training_labels);
if (!svm_xval_out_.empty())
{
svm.writeXvalResults(svm_xval_out_);
}
if ((debug_level_ > 0) && svm_params.getValue("kernel") == "linear")
{
std::map<String, double> feature_weights;
svm.getFeatureWeights(feature_weights);
OPENMS_LOG_DEBUG << "SVM feature weights:" << std::endl;
for (std::map<String, double>::iterator it = feature_weights.begin();
it != feature_weights.end(); ++it)
{
OPENMS_LOG_DEBUG << "- " << it->first << ": " << it->second << std::endl;
}
}
std::vector<SimpleSVM::Prediction> predictions;
svm.predict(predictions);
OPENMS_POSTCONDITION(predictions.size() == features.size(),
"SVM predictions for all features expected");
for (Size i = 0; i < features.size(); ++i)
{
features[i].setMetaValue("predicted_class", predictions[i].outcome);
double prob_positive = predictions[i].probabilities[1];
features[i].setMetaValue("predicted_probability", prob_positive);
// @TODO: store previous (OpenSWATH) overall quality in a meta value?
features[i].setOverallQuality(prob_positive);
}
}
void FFIDAlgoExternalIDHandler::finalizeAssayFeatures_(Feature& best_feature, double best_quality, double quality_cutoff)
{
const String& feature_class = best_feature.getMetaValue("feature_class");
if (feature_class == "positive") // true positive prediction
{
svm_probs_internal_[best_quality].first++;
}
else if ((feature_class == "negative") || // false positive prediction
(feature_class == "ambiguous")) // let's be strict about this
{
svm_probs_internal_[best_quality].second++;
}
else if (feature_class == "unknown")
{
svm_probs_external_.insert(best_quality);
if (best_quality >= quality_cutoff)
{
best_feature.setOverallQuality(best_quality);
++n_external_features_;
}
}
}
void FFIDAlgoExternalIDHandler::filterClassifiedFeatures(FeatureMap& features, double quality_cutoff)
{
if (features.empty())
{
return;
}
// Remove features with class "negative" or "ambiguous", keep "positive".
// For class "unknown", for every assay (meta value "PeptideRef"), keep
// the feature with highest "predicted_probability" (= overall quality),
// subject to the "svm:min_prob" threshold.
// We mark features for removal by setting their overall quality to zero.
n_internal_features_ = 0;
n_external_features_ = 0;
FeatureMap::Iterator best_it = features.begin();
double best_quality = 0.0;
String previous_ref;
for (FeatureMap::Iterator it = features.begin(); it != features.end(); ++it)
{
// features from same assay (same "PeptideRef") appear consecutively;
// if this is a new assay, finalize the previous one:
String peptide_ref = it->getMetaValue("PeptideRef");
// remove region number, if present:
Size pos_slash = peptide_ref.rfind('/');
Size pos_colon = peptide_ref.find(':', pos_slash + 2);
peptide_ref = peptide_ref.substr(0, pos_colon);
if (peptide_ref != previous_ref)
{
if (!previous_ref.empty())
{
finalizeAssayFeatures_(*best_it, best_quality, quality_cutoff);
best_quality = 0.0;
}
previous_ref = peptide_ref;
}
// update qualities:
if ((it->getOverallQuality() > best_quality) ||
// break ties by intensity:
((it->getOverallQuality() == best_quality) &&
(it->getIntensity() > best_it->getIntensity())))
{
best_it = it;
best_quality = it->getOverallQuality();
}
if (it->getMetaValue("feature_class") == "positive")
{
n_internal_features_++;
}
else
{
it->setOverallQuality(0.0); // gets overwritten for "best" candidate
}
}
// set of features from the last assay:
finalizeAssayFeatures_(*best_it, best_quality, quality_cutoff);
features.erase(std::remove_if(features.begin(), features.end(),
[](const Feature& f) {
return f.getOverallQuality() == 0.0;
}),
features.end());
}
void FFIDAlgoExternalIDHandler::calculateFDR(FeatureMap& features)
{
if (getSVMProbsInternal().empty()) return;
// cumulate the true/false positive counts, in decreasing probability order:
Size n_false = 0, n_true = 0;
for (std::map<double, std::pair<Size, Size> >::reverse_iterator prob_it =
svm_probs_internal_.rbegin(); prob_it != svm_probs_internal_.rend();
++prob_it)
{
n_true += prob_it->second.first;
n_false += prob_it->second.second;
prob_it->second.first = n_true;
prob_it->second.second = n_false;
}
// print FDR for features that made the cut-off:
std::map<double, std::pair<Size, Size> >::iterator prob_it =
svm_probs_internal_.lower_bound(svm_min_prob_);
if (prob_it != svm_probs_internal_.end())
{
float fdr = float(prob_it->second.second) / (prob_it->second.first +
prob_it->second.second);
OPENMS_LOG_INFO << "Estimated FDR of features detected based on 'external' IDs: "
<< fdr * 100.0 << "%" << std::endl;
fdr = (fdr * n_external_features_) / (n_external_features_ +
n_internal_features_);
OPENMS_LOG_INFO << "Estimated FDR of all detected features: " << fdr * 100.0
<< "%" << std::endl;
}
// calculate q-values:
std::vector<double> qvalues;
qvalues.reserve(svm_probs_internal_.size());
double min_fdr = 1.0;
for (prob_it = svm_probs_internal_.begin();
prob_it != svm_probs_internal_.end(); ++prob_it)
{
double fdr = double(prob_it->second.second) / (prob_it->second.first +
prob_it->second.second);
if (fdr < min_fdr)
{
min_fdr = fdr;
}
qvalues.push_back(min_fdr);
}
// record only probabilities where q-value changes:
std::vector<double> fdr_probs, fdr_qvalues;
std::vector<double>::iterator qv_it = qvalues.begin();
double previous_qvalue = -1.0;
for (prob_it = svm_probs_internal_.begin();
prob_it != svm_probs_internal_.end(); ++prob_it, ++qv_it)
{
if (*qv_it != previous_qvalue)
{
fdr_probs.push_back(prob_it->first);
fdr_qvalues.push_back(*qv_it);
previous_qvalue = *qv_it;
}
}
features.setMetaValue("FDR_probabilities", fdr_probs);
features.setMetaValue("FDR_qvalues_raw", fdr_qvalues);
// FDRs are estimated from "internal" features, but apply only to "external"
// ones. "Internal" features are considered "correct" by definition.
// We need to adjust the q-values to take this into account:
adjustFDRForExternalFeatures_(fdr_probs, fdr_qvalues, n_internal_features_);
features.setMetaValue("FDR_qvalues_corrected", fdr_qvalues);
// @TODO: should we use "1 - qvalue" as overall quality for features?
// assign q-values to features:
for (Feature& feat : features)
{
if (feat.getMetaValue("feature_class") == "positive")
{
feat.setMetaValue("q-value", 0.0);
}
else
{
double prob = feat.getOverallQuality();
// find the highest FDR prob. that is less-or-equal to the feature prob.:
std::vector<double>::iterator pos = std::upper_bound(fdr_probs.begin(),
fdr_probs.end(), prob);
if (pos != fdr_probs.begin())
{
--pos;
}
Size dist = std::distance(fdr_probs.begin(), pos);
feat.setMetaValue("q-value", fdr_qvalues[dist]);
}
}
}
const std::map<double, std::pair<Size, Size> >& FFIDAlgoExternalIDHandler::getSVMProbsInternal() const
{
return svm_probs_internal_;
}
} // namespace Internal
} // namespace OpenMS | C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/ExtendedIsotopeModel.cpp | .cpp | 8,198 | 227 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: $
// --------------------------------------------------------------------------
#include <OpenMS/FEATUREFINDER/ExtendedIsotopeModel.h>
#include <OpenMS/MATH/STATISTICS/BasicStatistics.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/IsotopeDistribution.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/CoarseIsotopePatternGenerator.h>
#include <OpenMS/CHEMISTRY/EmpiricalFormula.h>
namespace OpenMS
{
ExtendedIsotopeModel::ExtendedIsotopeModel() :
InterpolationModel(),
charge_(0),
monoisotopic_mz_(0.0)
{
setName("ExtendedIsotopeModel");
defaults_.setValue("averagines:C", 0.04443989f, "Number of C atoms per Dalton of mass.", {"advanced"});
defaults_.setValue("averagines:H", 0.06981572f, "Number of H atoms per Dalton of mass.", {"advanced"});
defaults_.setValue("averagines:N", 0.01221773f, "Number of N atoms per Dalton of mass.", {"advanced"});
defaults_.setValue("averagines:O", 0.01329399f, "Number of O atoms per Dalton of mass.", {"advanced"});
defaults_.setValue("averagines:S", 0.00037525f, "Number of S atoms per Dalton of mass.", {"advanced"});
defaults_.setValue("isotope:trim_right_cutoff", 0.001, "Cutoff in averagine distribution, trailing isotopes below this relative intensity are not considered.", {"advanced"});
defaults_.setValue("isotope:maximum", 100, "Maximum isotopic rank to be considered.", {"advanced"});
defaults_.setValue("isotope:distance", 1.000495, "Distance between consecutive isotopic peaks.", {"advanced"});
defaults_.setValue("isotope:stdev", 0.1, "Standard deviation of gaussian applied to the averagine isotopic pattern to simulate the inaccuracy of the mass spectrometer.", {"advanced"});
defaults_.setValue("charge", 1, "Charge state of the model.", {"advanced"});
defaults_.setValue("isotope:monoisotopic_mz", 1.0, "Monoisotopic m/z of the model.", {"advanced"});
defaultsToParam_();
}
ExtendedIsotopeModel::ExtendedIsotopeModel(const ExtendedIsotopeModel & source) :
InterpolationModel(source)
{
setParameters(source.getParameters());
updateMembers_();
}
ExtendedIsotopeModel::~ExtendedIsotopeModel() = default;
ExtendedIsotopeModel & ExtendedIsotopeModel::operator=(const ExtendedIsotopeModel & source)
{
if (&source == this)
{
return *this;
}
InterpolationModel::operator=(source);
setParameters(source.getParameters());
updateMembers_();
return *this;
}
void ExtendedIsotopeModel::setSamples()
{
// MAGIC alert, num stdev for smooth table for normal distribution
CoordinateType normal_widening_num_stdev = 4.;
// Actual width for values in the smooth table for normal distribution
CoordinateType normal_widening_width = isotope_stdev_ * normal_widening_num_stdev;
typedef std::vector<double> ContainerType;
ContainerType isotopes_exact;
CoordinateType mass = monoisotopic_mz_ * charge_;
Int C_num = Int(0.5 + mass * averagine_[C]);
Int N_num = Int(0.5 + mass * averagine_[N]);
Int O_num = Int(0.5 + mass * averagine_[O]);
Int H_num = Int(0.5 + mass * averagine_[H]);
Int S_num = Int(0.5 + mass * averagine_[S]);
String form("");
if (C_num)
{
form.append("C").append(String(C_num));
}
if (H_num)
{
form.append("H").append(String(H_num));
}
if (N_num)
{
form.append("N").append(String(N_num));
}
if (O_num)
{
form.append("O").append(String(O_num));
}
if (S_num)
{
form.append("S").append(String(S_num));
}
EmpiricalFormula formula(form);
IsotopeDistribution isotope_distribution = formula.getIsotopeDistribution(CoarseIsotopePatternGenerator(max_isotope_));
isotope_distribution.trimRight(trim_right_cutoff_);
isotope_distribution.renormalize();
// compute the average mass (-offset)
for (const Peak1D& peak : isotope_distribution)
{
isotopes_exact.push_back(peak.getIntensity());
}
// "stretch" the averagine isotope distribution
Size isotopes_exact_size = isotopes_exact.size();
isotopes_exact.resize(Size((isotopes_exact_size - 1)
* isotope_distance_ / interpolation_step_ + 1.6)); // round up a bit more
for (Size i = isotopes_exact_size - 1; i; --i)
{
// we don't need to move the 0-th entry
isotopes_exact[Size(CoordinateType(i) *
isotope_distance_ / interpolation_step_ / charge_ + 0.5)]
= isotopes_exact[i];
isotopes_exact[i] = 0;
}
// compute the normal distribution (to be added for widening the averagine isotope distribution)
Math::BasicStatistics<> normal_widening_model;
normal_widening_model.setSum(1);
normal_widening_model.setMean(0);
normal_widening_model.setVariance(isotope_stdev_ * isotope_stdev_);
// fill a container with CoordinateType points
ContainerType normal_widening_coordinate;
for (double coord = -normal_widening_width;
coord <= normal_widening_width;
coord += interpolation_step_
)
{
normal_widening_coordinate.push_back(coord);
}
// compute normal approximation at these CoordinateType points
ContainerType normal_widening;
normal_widening_model.normalApproximation(normal_widening, normal_widening_coordinate);
// fill linear interpolation
const ContainerType & left = isotopes_exact;
const ContainerType & right = normal_widening;
ContainerType & result = interpolation_.getData();
result.clear();
Int rMax = std::min(Int(left.size() + right.size() - 1), Int(2 * normal_widening_width / interpolation_step_ * max_isotope_ + 1));
result.resize(rMax, 0);
// we loop backwards because then the small products tend to come first
// (for better numerics)
for (SignedSize i = left.size() - 1; i >= 0; --i)
{
if (left[i] == 0)
{
continue;
}
for (SignedSize j = std::min<SignedSize>(rMax - i, right.size()) - 1; j >= 0; --j)
{
result[i + j] += left[i] * right[j];
}
}
// set interpolation
interpolation_.setMapping(interpolation_step_, normal_widening_width / interpolation_step_, monoisotopic_mz_);
// scale data so that integral over distribution equals one
// multiply sum by interpolation_step_ -> rectangular approximation of integral
IntensityType factor = scaling_ / interpolation_step_ /
std::accumulate(result.begin(), result.end(), IntensityType(0));
for (auto& value : result)
{
value *= factor;
}
}
void ExtendedIsotopeModel::setOffset(CoordinateType offset)
{
double diff = offset - getInterpolation().getOffset();
monoisotopic_mz_ += diff;
InterpolationModel::setOffset(offset);
param_.setValue("isotope:monoisotopic_mz", monoisotopic_mz_);
}
ExtendedIsotopeModel::CoordinateType ExtendedIsotopeModel::getOffset()
{
return getInterpolation().getOffset();
}
UInt ExtendedIsotopeModel::getCharge() const
{
return charge_;
}
ExtendedIsotopeModel::CoordinateType ExtendedIsotopeModel::getCenter() const
{
return monoisotopic_mz_;
}
void ExtendedIsotopeModel::updateMembers_()
{
InterpolationModel::updateMembers_();
charge_ = param_.getValue("charge");
isotope_stdev_ = param_.getValue("isotope:stdev");
monoisotopic_mz_ = param_.getValue("isotope:monoisotopic_mz");
max_isotope_ = param_.getValue("isotope:maximum");
trim_right_cutoff_ = param_.getValue("isotope:trim_right_cutoff");
isotope_distance_ = param_.getValue("isotope:distance");
averagine_[C] = param_.getValue("averagines:C");
averagine_[H] = param_.getValue("averagines:H");
averagine_[N] = param_.getValue("averagines:N");
averagine_[O] = param_.getValue("averagines:O");
averagine_[S] = param_.getValue("averagines:S");
setSamples();
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/MassTraceDetection.cpp | .cpp | 28,650 | 696 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Erhan Kenar, Holger Franken, Mohammed Alhigaylan $
// --------------------------------------------------------------------------
#include <OpenMS/FEATUREFINDER/MassTraceDetection.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <boost/dynamic_bitset.hpp>
#include <OpenMS/KERNEL/SpectrumHelper.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/CONCEPT/Exception.h>
namespace OpenMS
{
MassTraceDetection::MassTraceDetection() :
DefaultParamHandler("MassTraceDetection"), ProgressLogger()
{
defaults_.setValue("mass_error_ppm", 20.0, "Allowed mass deviation (in ppm).");
defaults_.setValue("noise_threshold_int", 10.0, "Intensity threshold below which peaks are removed as noise.");
defaults_.setValue("chrom_peak_snr", 3.0, "Minimum intensity above noise_threshold_int (signal-to-noise) a peak should have to be considered an apex.");
defaults_.setValue("ion_mobility_tolerance", 0.01, "Allowed ion mobility deviation (in 1/k0).");
defaults_.setValue("reestimate_mt_sd", "true", "Enables dynamic re-estimation of m/z variance during mass trace collection stage.");
defaults_.setValidStrings("reestimate_mt_sd", {"true","false"});
defaults_.setValue("quant_method", String(MassTrace::names_of_quantmethod[0]), "Method of quantification for mass traces. For LC data 'area' is recommended, 'median' for direct injection data. 'max_height' simply uses the most intense peak in the trace.");
defaults_.setValidStrings("quant_method", std::vector<std::string>(MassTrace::names_of_quantmethod, MassTrace::names_of_quantmethod +(int)MassTrace::SIZE_OF_MT_QUANTMETHOD));
// advanced parameters
defaults_.setValue("trace_termination_criterion", "outlier", "Termination criterion for the extension of mass traces. In 'outlier' mode, trace extension cancels if a predefined number of consecutive outliers are found (see trace_termination_outliers parameter). In 'sample_rate' mode, trace extension in both directions stops if ratio of found peaks versus visited spectra falls below the 'min_sample_rate' threshold.", {"advanced"});
defaults_.setValidStrings("trace_termination_criterion", {"outlier","sample_rate"});
defaults_.setValue("trace_termination_outliers", 5, "Mass trace extension in one direction cancels if this number of consecutive spectra with no detectable peaks is reached.", {"advanced"});
defaults_.setValue("min_sample_rate", 0.5, "Minimum fraction of scans along the mass trace that must contain a peak.", {"advanced"});
defaults_.setValue("min_trace_length", 5.0, "Minimum expected length of a mass trace (in seconds).", {"advanced"});
defaults_.setValue("max_trace_length", -1.0, "Maximum expected length of a mass trace (in seconds). Set to a negative value to disable maximal length check during mass trace detection.", {"advanced"});
defaultsToParam_();
this->setLogType(CMD);
}
MassTraceDetection::~MassTraceDetection() = default;
MassTraceDetection::Apex::Apex(double intensity, Size scan_idx, Size peak_idx):
intensity(intensity),
scan_idx(scan_idx),
peak_idx(peak_idx)
{}
void MassTraceDetection::updateIterativeWeightedMean_(const double& added_value,
const double& added_intensity,
double& centroid_value,
double& prev_counter,
double& prev_denom)
{
double new_weight = added_intensity;
double new_val = added_value;
double counter_tmp = 1.0 + (new_weight * new_val) / prev_counter;
double denom_tmp = 1.0 + (new_weight) / prev_denom;
centroid_value *= (counter_tmp / denom_tmp);
prev_counter *= counter_tmp;
prev_denom *= denom_tmp;
}
// detect presence of ion mobility data and locate all relevant meta array indices.
void MassTraceDetection::getIMIndices_(
const PeakMap& spectra,
int& fwhm_meta_idx, bool& has_fwhm_mz,
int& im_idx, bool& has_centroid_im,
int& im_fwhm_idx, bool& has_fwhm_im
) const
{
for (const auto& spec : spectra)
{
const auto& fda = spec.getFloatDataArrays();
if (!fda.empty())
{
auto it_fwhm = getDataArrayByName(fda, Constants::UserParam::FWHM_MZ_ppm);
auto it_im = getDataArrayByName(fda, Constants::UserParam::ION_MOBILITY);
auto it_imf = getDataArrayByName(fda, Constants::UserParam::FWHM_IM);
if (it_fwhm != fda.end())
{
fwhm_meta_idx = std::distance(fda.begin(), it_fwhm);
has_fwhm_mz = true;
}
if (it_im != fda.end())
{
im_idx = std::distance(fda.begin(), it_im);
has_centroid_im = true;
}
if (it_imf != fda.end())
{
im_fwhm_idx = std::distance(fda.begin(), it_imf);
has_fwhm_im = true;
}
break;
}
}
// validate that all meta arrays are consistently present or absent across all spectra
auto validate_meta_array = [&](const String& name, int idx) {
if (idx == -1) return;
Size valid_count = 0;
for (const auto& spec : spectra)
{
const auto& fda = spec.getFloatDataArrays();
if (!fda.empty() && idx < (int)fda.size() && fda[idx].getName() == name)
{
if (fda[idx].size() != spec.size())
{
throw OpenMS::Exception::InvalidSize(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, spec.size(), "FloatDataArray size does not match spectrum size");
}
++valid_count;
}
}
if (valid_count > 0 && valid_count != spectra.size())
{
throw OpenMS::Exception::Precondition(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
name + " meta arrays must be consistently present or absent across all MS spectra ["
+ String(valid_count) + "/" + String(spectra.size()) + "].");
}
};
validate_meta_array(Constants::UserParam::FWHM_MZ_ppm, fwhm_meta_idx);
validate_meta_array(Constants::UserParam::ION_MOBILITY, im_idx);
validate_meta_array(Constants::UserParam::FWHM_IM, im_fwhm_idx);
}
void MassTraceDetection::run(PeakMap::ConstAreaIterator& begin,
PeakMap::ConstAreaIterator& end,
std::vector<MassTrace>& found_masstraces)
{
PeakMap map;
MSSpectrum current_spectrum;
if (begin == end)
{
return;
}
for (; begin != end; ++begin)
{
// AreaIterator points on novel spectrum?
if (begin.getRT() != current_spectrum.getRT())
{
// save new spectrum in map
if (current_spectrum.getRT() != -1)
{
map.addSpectrum(current_spectrum);
}
current_spectrum.clear(false);
current_spectrum.setRT(begin.getRT());
}
current_spectrum.push_back(*begin);
}
map.addSpectrum(current_spectrum);
run(map, found_masstraces);
}
void updateWeightedSDEstimateRobust(const PeakType& p, const double& mean_t1, double& sd_t, double& last_weights_sum)
{
double denom1 = std::log(last_weights_sum) + 2 * std::log(sd_t);
double denom2 = std::log(p.getIntensity()) + 2 * std::log(std::abs(p.getMZ() - mean_t1));
double denom = std::sqrt(std::exp(denom1) + std::exp(denom2));
double weights_sum = last_weights_sum + p.getIntensity();
double tmp_sd = denom / std::sqrt(weights_sum);
if (tmp_sd > std::numeric_limits<double>::epsilon())
{
sd_t = tmp_sd;
}
last_weights_sum = weights_sum;
}
void MassTraceDetection::run(const PeakMap& input_exp, std::vector<MassTrace>& found_masstraces, const Size max_traces)
{
// make sure the output vector is empty
found_masstraces.clear();
// gather all peaks that are potential chromatographic peak apices
// - use work_exp for actual work (remove peaks below noise threshold)
// - store potential apices in chrom_apices
PeakMap work_exp;
std::vector<Apex> chrom_apices;
Size total_peak_count(0);
std::vector<Size> spec_offsets;
spec_offsets.push_back(0);
Size spectra_count(0);
// *********************************************************** //
// Step 1: Detecting potential chromatographic apices
// *********************************************************** //
for (const MSSpectrum& it : input_exp)
{
// check if this is a MS1 survey scan
if (it.getMSLevel() != 1)
{
continue;
}
std::vector<Size> indices_passing;
for (Size peak_idx = 0; peak_idx < it.size(); ++peak_idx)
{
double tmp_peak_int((it)[peak_idx].getIntensity());
if (tmp_peak_int > noise_threshold_int_)
{
// Assume that noise_threshold_int_ contains the noise level of the
// data and we want to be chrom_peak_snr times above the noise level
// --> add this peak as possible chromatographic apex
if (tmp_peak_int > chrom_peak_snr_ * noise_threshold_int_)
{
chrom_apices.emplace_back(tmp_peak_int, spectra_count, indices_passing.size());
}
indices_passing.push_back(peak_idx);
++total_peak_count;
}
}
PeakMap::SpectrumType tmp_spec(it);
tmp_spec.select(indices_passing);
work_exp.addSpectrum(tmp_spec);
spec_offsets.push_back(spec_offsets.back() + tmp_spec.size());
++spectra_count;
}
if (spectra_count < 3)
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Input map consists of too few MS1 spectra (less than 3!). Aborting...", String(spectra_count));
}
// discard last spectrum's offset
spec_offsets.pop_back();
std::stable_sort(chrom_apices.begin(), chrom_apices.end(),
[](const Apex & a,
const Apex & b) -> bool
{
return a.intensity < b.intensity;
});
// *********************************************************************
// Step 2: start extending mass traces beginning with the apex peak (go
// through all peaks in order of decreasing intensity)
// *********************************************************************
run_(chrom_apices, total_peak_count, work_exp, spec_offsets, found_masstraces, max_traces);
return;
} // end of MassTraceDetection::run
MassTraceDetection::PeakCandidate MassTraceDetection::findBestPeak_(
const MSSpectrum& spectrum,
double centroid_mz,
double ftl_sd,
double centroid_im) const
{
PeakCandidate candidate;
if (spectrum.empty())
{
return candidate;
}
double right_bound = centroid_mz + 3 * ftl_sd;
double left_bound = centroid_mz - 3 * ftl_sd;
if (!has_centroid_im_)
{
// Standard LC-MS data: find peak closest to target m/z
candidate.idx = spectrum.findNearest(centroid_mz);
candidate.mz = spectrum[candidate.idx].getMZ();
candidate.intensity = spectrum[candidate.idx].getIntensity();
candidate.found = true;
}
else
{
// LC-IMS-MS data: find best peak considering both m/z and ion mobility
double right_bound_im = centroid_im + ion_mobility_tolerance_;
double left_bound_im = centroid_im - ion_mobility_tolerance_;
auto left_bound_it = spectrum.MZBegin(left_bound);
Size idx_left = left_bound_it - spectrum.begin();
auto right_bound_it = spectrum.MZEnd(right_bound);
Size idx_right = right_bound_it - spectrum.begin();
// Search within m/z window for peak with closest ion mobility match
for (Size i = idx_left; i < idx_right; ++i)
{
double im_value = spectrum.getFloatDataArrays()[ion_mobility_idx_][i];
if (im_value >= left_bound_im && im_value <= right_bound_im)
{
if (!candidate.found || std::abs(spectrum[i].getMZ() - centroid_mz) < std::abs(candidate.mz - centroid_mz))
{
candidate.idx = i;
candidate.mz = spectrum[i].getMZ();
candidate.intensity = spectrum[i].getIntensity();
candidate.im = im_value;
candidate.found = true;
}
}
}
}
return candidate;
}
bool MassTraceDetection::isPeakAcceptable_(
const PeakCandidate& candidate,
double centroid_mz,
double ftl_sd,
double centroid_im,
Size spectrum_idx,
const std::vector<Size>& spec_offsets,
const boost::dynamic_bitset<>& peak_visited) const
{
if (!candidate.found)
{
return false;
}
double right_bound = centroid_mz + 3 * ftl_sd;
double left_bound = centroid_mz - 3 * ftl_sd;
// Peak must fall within m/z tolerance window (±3 standard deviations)
if (!((candidate.mz <= right_bound) && (candidate.mz >= left_bound)))
{
return false;
}
// For ion mobility data, peak must also fall within IM tolerance window
if (has_centroid_im_)
{
double right_bound_im = centroid_im + ion_mobility_tolerance_;
double left_bound_im = centroid_im - ion_mobility_tolerance_;
if (candidate.im < left_bound_im || candidate.im > right_bound_im)
{
return false;
}
}
// Peak must not have been used in another trace already
return !peak_visited[spec_offsets[spectrum_idx] + candidate.idx];
}
void MassTraceDetection::processPeak_(
const PeakCandidate& candidate,
const MSSpectrum& spectrum,
std::list<PeakType>& current_trace,
std::vector<std::pair<Size, Size>>& gathered_idx,
std::vector<double>& fwhms_mz,
std::vector<double>& fwhms_im,
double& centroid_mz,
double& centroid_im,
double& prev_counter,
double& prev_denom,
double& prev_counter_im,
double& prev_denom_im,
double& ftl_sd,
double& intensity_so_far,
Size spectrum_idx,
bool is_upward_extension)
{
Peak2D next_peak;
next_peak.setRT(spectrum.getRT());
next_peak.setMZ(candidate.mz);
next_peak.setIntensity(candidate.intensity);
// Add peak to the growing mass trace
if (is_upward_extension)
{
current_trace.push_back(next_peak);
}
else
{
current_trace.push_front(next_peak);
}
// Update trace centroid m/z with intensity-weighted average
updateIterativeWeightedMean_(candidate.mz, candidate.intensity, centroid_mz, prev_counter, prev_denom);
gathered_idx.emplace_back(spectrum_idx, candidate.idx);
// Update ion mobility centroid if available
if (has_centroid_im_)
{
updateIterativeWeightedMean_(candidate.im, candidate.intensity, centroid_im, prev_counter_im, prev_denom_im);
}
// Collect FWHM metadata for trace quality assessment
if (has_fwhm_mz_)
{
fwhms_mz.push_back(spectrum.getFloatDataArrays()[fwhm_meta_idx_][candidate.idx]);
}
if (has_fwhm_im_)
{
fwhms_im.push_back(spectrum.getFloatDataArrays()[im_fwhm_idx_][candidate.idx]);
}
// Dynamically adjust m/z tolerance based on observed variance
if (reestimate_mt_sd_)
{
updateWeightedSDEstimateRobust(next_peak, centroid_mz, ftl_sd, intensity_so_far);
}
}
bool MassTraceDetection::isTraceValid_(
const std::list<PeakType>& trace,
Size total_scans_visited,
Size consecutive_missed_down,
Size consecutive_missed_up) const
{
double rt_range = std::fabs(trace.rbegin()->getRT() - trace.begin()->getRT());
// Check length criteria
if (rt_range < min_trace_length_)
{
return false;
}
if (max_trace_length_ >= 0.0 && rt_range > max_trace_length_)
{
return false;
}
// Check quality (sample rate)
Size adjusted_scans = total_scans_visited - consecutive_missed_down - consecutive_missed_up;
double mt_quality = static_cast<double>(trace.size()) / static_cast<double>(adjusted_scans);
return mt_quality >= min_sample_rate_;
}
void MassTraceDetection::run_(const std::vector<Apex>& chrom_apices,
const Size total_peak_count,
const PeakMap& work_exp,
const std::vector<Size>& spec_offsets,
std::vector<MassTrace>& found_masstraces,
const Size max_traces)
{
boost::dynamic_bitset<> peak_visited(total_peak_count);
Size trace_number(1);
// Detect ion mobility and FWHM metadata arrays in the dataset
getIMIndices_(work_exp,
fwhm_meta_idx_, has_fwhm_mz_,
ion_mobility_idx_, has_centroid_im_,
im_fwhm_idx_, has_fwhm_im_);
this->startProgress(0, total_peak_count, "mass trace detection");
Size peaks_detected(0);
for (auto m_it = chrom_apices.crbegin(); m_it != chrom_apices.crend(); ++m_it)
{
Size apex_scan_idx(m_it->scan_idx);
Size apex_peak_idx(m_it->peak_idx);
if (peak_visited[spec_offsets[apex_scan_idx] + apex_peak_idx]) { continue; }
Peak2D apex_peak;
apex_peak.setRT(work_exp[apex_scan_idx].getRT());
apex_peak.setMZ(work_exp[apex_scan_idx][apex_peak_idx].getMZ());
apex_peak.setIntensity(work_exp[apex_scan_idx][apex_peak_idx].getIntensity());
std::list<PeakType> current_trace;
current_trace.push_back(apex_peak);
std::vector<double> fwhms_mz; // peak-FWHM meta values of collected peaks
std::vector<double> fwhms_im; // peak-FWHM ion mobility peak FWHM of collected peaks
// Initialization for the iterative version of weighted m/z mean calculation
double centroid_mz(apex_peak.getMZ());
double prev_counter(apex_peak.getIntensity() * apex_peak.getMZ());
double prev_denom(apex_peak.getIntensity());
updateIterativeWeightedMean_(apex_peak.getMZ(), apex_peak.getIntensity(), centroid_mz, prev_counter, prev_denom);
// Initialization for the iterative version of weighted ion mobility mean calculation
double centroid_im(-1);
double prev_counter_im(-1);
double prev_denom_im(-1);
if (has_centroid_im_)
{
centroid_im = work_exp[apex_scan_idx].getFloatDataArrays()[ion_mobility_idx_][apex_peak_idx];
prev_counter_im = apex_peak.getIntensity() * centroid_im;
prev_denom_im = apex_peak.getIntensity();
updateIterativeWeightedMean_(work_exp[apex_scan_idx].getFloatDataArrays()[ion_mobility_idx_][apex_peak_idx],
apex_peak.getIntensity(), centroid_im, prev_counter_im, prev_denom_im);
}
std::vector<std::pair<Size, Size>> gathered_idx;
gathered_idx.emplace_back(apex_scan_idx, apex_peak_idx);
if (has_fwhm_mz_) { fwhms_mz.push_back(work_exp[apex_scan_idx].getFloatDataArrays()[fwhm_meta_idx_][apex_peak_idx]); }
if (has_fwhm_im_) { fwhms_im.push_back(work_exp[apex_scan_idx].getFloatDataArrays()[im_fwhm_idx_][apex_peak_idx]); }
TraceExtensionState down_state, up_state;
Size trace_down_idx(apex_scan_idx);
Size trace_up_idx(apex_scan_idx);
double ftl_sd((centroid_mz / 1e6) * mass_error_ppm_);
double intensity_so_far(apex_peak.getIntensity());
while ((trace_down_idx > 0 && down_state.active) || (trace_up_idx < work_exp.size() - 1 && up_state.active))
{
// *********************************************************** //
// Step 2.1 MOVE DOWN in RT dim
// *********************************************************** //
if (trace_down_idx > 0 && down_state.active)
{
const MSSpectrum& spec_trace_down = work_exp[trace_down_idx - 1];
// Only process spectra that contain peaks
if (!spec_trace_down.empty())
{
PeakCandidate candidate = findBestPeak_(spec_trace_down, centroid_mz, ftl_sd, centroid_im);
if (isPeakAcceptable_(candidate, centroid_mz, ftl_sd, centroid_im, trace_down_idx - 1, spec_offsets, peak_visited))
{
processPeak_(candidate, spec_trace_down, current_trace, gathered_idx,
fwhms_mz, fwhms_im, centroid_mz, centroid_im,
prev_counter, prev_denom, prev_counter_im, prev_denom_im,
ftl_sd, intensity_so_far, trace_down_idx - 1, false);
++down_state.hitting_peak_count;
down_state.consecutive_missed = 0;
}
else
{
++down_state.consecutive_missed;
}
}
// Empty spectra don't affect termination counters
// Move to next spectrum regardless of whether peak was found
--trace_down_idx;
++down_state.scan_counter;
// Apply termination criteria based on user configuration
if (trace_termination_criterion_ == OUTLIER)
{
if (down_state.consecutive_missed > trace_termination_outliers_)
{
down_state.active = false;
}
}
else if (trace_termination_criterion_ == SAMPLE_RATE)
{
Size min_scans_to_consider = 5;
Size total_hits = down_state.hitting_peak_count + up_state.hitting_peak_count + 1; // +1 for apex
Size total_scans = down_state.scan_counter + up_state.scan_counter + 1;
double current_sample_rate = static_cast<double>(total_hits) / static_cast<double>(total_scans);
if (down_state.scan_counter > min_scans_to_consider && current_sample_rate < min_sample_rate_)
{
down_state.active = false;
}
}
}
// *********************************************************** //
// Step 2.2 MOVE UP in RT dim
// *********************************************************** //
if (trace_up_idx < work_exp.size() - 1 && up_state.active)
{
const MSSpectrum& spec_trace_up = work_exp[trace_up_idx + 1];
// Only process spectra that contain peaks
if (!spec_trace_up.empty())
{
PeakCandidate candidate = findBestPeak_(spec_trace_up, centroid_mz, ftl_sd, centroid_im);
if (isPeakAcceptable_(candidate, centroid_mz, ftl_sd, centroid_im, trace_up_idx + 1, spec_offsets, peak_visited))
{
processPeak_(candidate, spec_trace_up, current_trace, gathered_idx,
fwhms_mz, fwhms_im, centroid_mz, centroid_im,
prev_counter, prev_denom, prev_counter_im, prev_denom_im,
ftl_sd, intensity_so_far, trace_up_idx + 1, true);
++up_state.hitting_peak_count;
up_state.consecutive_missed = 0;
}
else
{
++up_state.consecutive_missed;
}
}
// Empty spectra don't affect termination counters
// Move to next spectrum regardless of whether peak was found
++trace_up_idx;
++up_state.scan_counter;
// Apply termination criteria based on user configuration
if (trace_termination_criterion_ == OUTLIER)
{
if (up_state.consecutive_missed > trace_termination_outliers_)
{
up_state.active = false;
}
}
else if (trace_termination_criterion_ == SAMPLE_RATE)
{
Size min_scans_to_consider = 5;
Size total_hits = down_state.hitting_peak_count + up_state.hitting_peak_count + 1; // +1 for apex
Size total_scans = down_state.scan_counter + up_state.scan_counter + 1;
double current_sample_rate = static_cast<double>(total_hits) / static_cast<double>(total_scans);
if (up_state.scan_counter > min_scans_to_consider && current_sample_rate < min_sample_rate_)
{
up_state.active = false;
}
}
}
}
// *********************************************************** //
// Step 2.3 check if minimum length and quality of mass trace criteria are met
// *********************************************************** //
Size total_scans = down_state.scan_counter + up_state.scan_counter + 1;
if (isTraceValid_(current_trace, total_scans, down_state.consecutive_missed, up_state.consecutive_missed))
{
// mark all peaks as visited
for (Size i = 0; i < gathered_idx.size(); ++i)
{
peak_visited[spec_offsets[gathered_idx[i].first] + gathered_idx[i].second] = true;
}
// create new MassTrace object and store collected peaks from list current_trace
MassTrace new_trace(current_trace);
new_trace.updateWeightedMeanRT();
new_trace.updateWeightedMeanMZ();
if (!fwhms_mz.empty()) { new_trace.fwhm_mz_avg = Math::median(fwhms_mz.begin(), fwhms_mz.end()); }
if (!fwhms_im.empty()) { new_trace.fwhm_im_avg = Math::median(fwhms_im.begin(), fwhms_im.end()); }
if (has_centroid_im_) { new_trace.setCentroidIM(centroid_im); }
new_trace.setQuantMethod(quant_method_);
new_trace.updateWeightedMZsd();
new_trace.setLabel("T" + String(trace_number));
++trace_number;
found_masstraces.push_back(new_trace);
peaks_detected += new_trace.getSize();
this->setProgress(peaks_detected);
// check if we already reached the (optional) maximum number of traces
if (max_traces > 0 && found_masstraces.size() == max_traces) { break; }
}
}
this->endProgress();
}
void MassTraceDetection::updateMembers_()
{
mass_error_ppm_ = (double)param_.getValue("mass_error_ppm");
noise_threshold_int_ = (double)param_.getValue("noise_threshold_int");
chrom_peak_snr_ = (double)param_.getValue("chrom_peak_snr");
ion_mobility_tolerance_ = (double)param_.getValue("ion_mobility_tolerance");
quant_method_ = MassTrace::getQuantMethod((String)param_.getValue("quant_method").toString());
String criterion_str = (String)param_.getValue("trace_termination_criterion").toString();
if (criterion_str == "outlier")
{
trace_termination_criterion_ = OUTLIER;
}
else if (criterion_str == "sample_rate")
{
trace_termination_criterion_ = SAMPLE_RATE;
}
else
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Invalid trace_termination_criterion: " + criterion_str);
}
trace_termination_outliers_ = (Size)param_.getValue("trace_termination_outliers");
min_sample_rate_ = (double)param_.getValue("min_sample_rate");
min_trace_length_ = (double)param_.getValue("min_trace_length");
max_trace_length_ = (double)param_.getValue("max_trace_length");
reestimate_mt_sd_ = param_.getValue("reestimate_mt_sd").toBool();
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/GaussModel.cpp | .cpp | 3,179 | 107 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: $
// --------------------------------------------------------------------------
#include <OpenMS/FEATUREFINDER/GaussModel.h>
namespace OpenMS
{
GaussModel::GaussModel() :
InterpolationModel(),
statistics_()
{
setName("GaussModel");
defaults_.setValue("bounding_box:min", 0.0, "Lower end of bounding box enclosing the data used to fit the model.", {"advanced"});
defaults_.setValue("bounding_box:max", 1.0, "Upper end of bounding box enclosing the data used to fit the model.", {"advanced"});
defaults_.setValue("statistics:mean", 0.0, "Centroid position of the model (Gaussian).", {"advanced"});
defaults_.setValue("statistics:variance", 1.0, "The variance of the Gaussian.", {"advanced"});
defaultsToParam_();
}
GaussModel::GaussModel(const GaussModel & source) :
InterpolationModel(source)
{
setParameters(source.getParameters());
updateMembers_();
}
GaussModel::~GaussModel() = default;
GaussModel & GaussModel::operator=(const GaussModel & source)
{
if (&source == this)
{
return *this;
}
setParameters(source.getParameters());
InterpolationModel::operator=(source);
updateMembers_();
return *this;
}
void GaussModel::setSamples()
{
LinearInterpolation::container_type & data = interpolation_.getData();
data.clear();
if (max_ == min_)
{
return;
}
data.reserve(UInt((max_ - min_) / interpolation_step_ + 1));
CoordinateType pos = min_;
for (UInt i = 0; pos < max_; ++i)
{
pos = min_ + i * interpolation_step_;
data.push_back(statistics_.normalDensity_sqrt2pi(pos));
}
// scale data so that integral over distribution equals one
// multiply sum by interpolation_step_ -> rectangular approximation of integral
IntensityType factor = scaling_ / interpolation_step_ /
std::accumulate(data.begin(), data.end(), IntensityType(0));
for (auto& value : data)
value *= factor;
interpolation_.setScale(interpolation_step_);
interpolation_.setOffset(min_);
}
void GaussModel::updateMembers_()
{
InterpolationModel::updateMembers_();
min_ = param_.getValue("bounding_box:min");
max_ = param_.getValue("bounding_box:max");
statistics_.setMean(param_.getValue("statistics:mean"));
statistics_.setVariance(param_.getValue("statistics:variance"));
setSamples();
}
void GaussModel::setOffset(CoordinateType offset)
{
double diff = offset - getInterpolation().getOffset();
min_ += diff;
max_ += diff;
statistics_.setMean(statistics_.mean() + diff);
InterpolationModel::setOffset(offset);
param_.setValue("bounding_box:min", min_);
param_.setValue("bounding_box:max", max_);
param_.setValue("statistics:mean", statistics_.mean());
}
GaussModel::CoordinateType GaussModel::getCenter() const
{
return statistics_.mean();
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/IsotopeModel.cpp | .cpp | 9,636 | 265 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Clemens Groepl, Chris Bielow $
// --------------------------------------------------------------------------
#include <boost/math/distributions/cauchy.hpp>
#include <OpenMS/FEATUREFINDER/IsotopeModel.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/CoarseIsotopePatternGenerator.h>
#include <OpenMS/MATH/STATISTICS/BasicStatistics.h>
#include <OpenMS/CHEMISTRY/EmpiricalFormula.h>
namespace OpenMS
{
IsotopeModel::IsotopeModel() :
InterpolationModel(),
charge_(0),
monoisotopic_mz_(0.0)
{
setName("IsotopeModel");
defaults_.setValue("averagines:C", 0.04443989f, "Number of C atoms per Dalton of mass.", {"advanced"});
defaults_.setValue("averagines:H", 0.06981572f, "Number of H atoms per Dalton of mass.", {"advanced"});
defaults_.setValue("averagines:N", 0.01221773f, "Number of N atoms per Dalton of mass.", {"advanced"});
defaults_.setValue("averagines:O", 0.01329399f, "Number of O atoms per Dalton of mass.", {"advanced"});
defaults_.setValue("averagines:S", 0.00037525f, "Number of S atoms per Dalton of mass.", {"advanced"});
defaults_.setValue("isotope:trim_right_cutoff", 0.001, "Cutoff in averagine distribution, trailing isotopes below this relative intensity are not considered.", {"advanced"});
defaults_.setValue("isotope:maximum", 100, "Maximum isotopic rank to be considered.", {"advanced"});
defaults_.setValue("isotope:distance", 1.000495, "Distance between consecutive isotopic peaks.", {"advanced"});
defaults_.setValue("isotope:mode:mode", "Gaussian", "Peak Shape used around each isotope peak.", {"advanced"});
defaults_.setValidStrings("isotope:mode:mode", {"Gaussian","Lorentzian"});
defaults_.setValue("isotope:mode:LorentzFWHM", 0.3, "Full width of the Lorentzian (Cauchy) function applied to the averagine isotopic pattern to simulate the inaccuracy of the mass spectrometer.", {"advanced"});
defaults_.setValue("isotope:mode:GaussianSD", 0.1, "Standard deviation of Gaussian applied to the averagine isotopic pattern to simulate the inaccuracy of the mass spectrometer.", {"advanced"});
defaults_.setValue("charge", 1, "Charge state of the model.", {"advanced"});
defaults_.setValue("statistics:mean", 0.0, "Centroid m/z (as opposed to monoisotopic m/z).", {"advanced"});
defaultsToParam_();
}
IsotopeModel::IsotopeModel(const IsotopeModel & source) :
InterpolationModel(source)
{
setParameters(source.getParameters());
updateMembers_();
}
IsotopeModel::~IsotopeModel() = default;
IsotopeModel & IsotopeModel::operator=(const IsotopeModel & source)
{
if (&source == this)
{
return *this;
}
InterpolationModel::operator=(source);
setParameters(source.getParameters());
updateMembers_();
return *this;
}
EmpiricalFormula IsotopeModel::getFormula()
{
CoordinateType mass = mean_ * charge_;
Int C_num = Int(0.5 + mass * averagine_[C]);
Int N_num = Int(0.5 + mass * averagine_[N]);
Int O_num = Int(0.5 + mass * averagine_[O]);
Int H_num = Int(0.5 + mass * averagine_[H]);
Int S_num = Int(0.5 + mass * averagine_[S]);
String form;
if (C_num)
{
form.append("C").append(String(C_num));
}
if (H_num)
{
form.append("H").append(String(H_num));
}
if (N_num)
{
form.append("N").append(String(N_num));
}
if (O_num)
{
form.append("O").append(String(O_num));
}
if (S_num)
{
form.append("S").append(String(S_num));
}
return EmpiricalFormula(form);
}
const IsotopeDistribution & IsotopeModel::getIsotopeDistribution() const
{
return isotope_distribution_;
}
void IsotopeModel::setSamples(const EmpiricalFormula & formula)
{
typedef std::vector<double> ContainerType;
ContainerType isotopes_exact;
isotope_distribution_ = formula.getIsotopeDistribution(CoarseIsotopePatternGenerator(max_isotope_, true));
isotope_distribution_.trimRight(trim_right_cutoff_);
isotope_distribution_.renormalize();
// compute the average mass (-offset)
CoordinateType isotopes_mean = 0;
{
Int cnt = 0;
for (Peak1D& peak : isotope_distribution_)
{
isotopes_exact.push_back(peak.getIntensity());
isotopes_mean += peak.getIntensity() * cnt;
++cnt;
}
isotopes_mean *= isotope_distance_ / charge_;
}
// (Need not divide by sum of probabilities, which is 1.)
///
// "stretch" the averagine isotope distribution (so we can add datapoints between isotope peaks)
///
size_t isotopes_exact_size = isotopes_exact.size();
isotopes_exact.resize(size_t((isotopes_exact_size - 1) * isotope_distance_ / interpolation_step_ + 1.6)); // round up a bit more
for (Size i = isotopes_exact_size - 1; i; --i)
{
// we don't need to move the 0-th entry
isotopes_exact[size_t(CoordinateType(i) * isotope_distance_ / interpolation_step_ / charge_ + 0.5)]
= isotopes_exact[i];
isotopes_exact[i] = 0;
}
////
// compute the Gaussian/Cauchy distribution (to be added for widening the averagine isotope distribution)
////
ContainerType peak_shape_values_y;
// fill a container with CoordinateType points (x values)
CoordinateType peak_width = 0.0;
if (param_.getValue("isotope:mode:mode") == "Gaussian")
{
// Actual width for values in the smooth table for normal distribution
peak_width = isotope_stdev_ * 4.0; // MAGIC alert, num stdev for smooth table for normal distribution
ContainerType peak_shape_values_x;
for (double coord = -peak_width; coord <= peak_width;
coord += interpolation_step_)
{
peak_shape_values_x.push_back(coord);
}
// compute normal approximation at these CoordinateType points (y values)
Math::BasicStatistics<> normal_widening_model;
normal_widening_model.setSum(1);
normal_widening_model.setMean(0);
normal_widening_model.setVariance(isotope_stdev_ * isotope_stdev_);
normal_widening_model.normalApproximation(peak_shape_values_y, peak_shape_values_x);
}
else if (param_.getValue("isotope:mode:mode") == "Lorentzian")
{
peak_width = isotope_lorentz_fwhm_ * 8.0; // MAGIC alert: Lorentzian has infinite support, but we need to stop sampling at some point: 8*FWHM
for (double coord = -peak_width; coord <= peak_width;
coord += interpolation_step_)
{
boost::math::cauchy_distribution<double> cauchy(0., isotope_lorentz_fwhm_ / 2.0);
double x = boost::math::pdf(cauchy, coord);
peak_shape_values_y.push_back(x); //cauchy is using HWHM not FWHM
}
}
///
// fold the Gaussian/Lorentzian at each averagine peak, i.e. fill linear interpolation
///
const ContainerType & left = isotopes_exact;
const ContainerType & right = peak_shape_values_y;
ContainerType & result = interpolation_.getData();
result.clear();
SignedSize r_max = std::min(SignedSize(left.size() + right.size() - 1),
SignedSize(2 * peak_width / interpolation_step_ * max_isotope_ + 1));
result.resize(r_max, 0);
// we loop backwards because then the small products tend to come first
// (for better numerics)
for (SignedSize i = left.size() - 1; i >= 0; --i)
{
if (left[i] == 0)
{
continue;
}
for (SignedSize j = std::min(r_max - i, SignedSize(right.size())) - 1; j >= 0; --j)
{
result[i + j] += left[i] * right[j];
}
}
monoisotopic_mz_ = mean_ - isotopes_mean;
interpolation_.setMapping(interpolation_step_, peak_width / interpolation_step_, monoisotopic_mz_);
// scale data so that integral over distribution equals one
// multiply sum by interpolation_step_ -> rectangular approximation of integral
IntensityType factor = scaling_ / (interpolation_step_ * std::accumulate(result.begin(), result.end(), IntensityType(0)));
for (auto& value : result)
{
value *= factor;
}
}
void IsotopeModel::setOffset(CoordinateType offset)
{
double diff = offset - getInterpolation().getOffset();
mean_ += diff;
monoisotopic_mz_ += diff;
InterpolationModel::setOffset(offset);
param_.setValue("statistics:mean", mean_);
}
IsotopeModel::CoordinateType IsotopeModel::getOffset()
{
return getInterpolation().getOffset();
}
UInt IsotopeModel::getCharge() const
{
return charge_;
}
IsotopeModel::CoordinateType IsotopeModel::getCenter() const
{
return monoisotopic_mz_;
}
void IsotopeModel::updateMembers_()
{
InterpolationModel::updateMembers_();
charge_ = param_.getValue("charge");
isotope_stdev_ = param_.getValue("isotope:mode:GaussianSD");
isotope_lorentz_fwhm_ = param_.getValue("isotope:mode:LorentzFWHM");
mean_ = param_.getValue("statistics:mean");
max_isotope_ = param_.getValue("isotope:maximum");
trim_right_cutoff_ = param_.getValue("isotope:trim_right_cutoff");
isotope_distance_ = param_.getValue("isotope:distance");
averagine_[C] = param_.getValue("averagines:C");
averagine_[H] = param_.getValue("averagines:H");
averagine_[N] = param_.getValue("averagines:N");
averagine_[O] = param_.getValue("averagines:O");
averagine_[S] = param_.getValue("averagines:S");
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/PeakWidthEstimator.cpp | .cpp | 2,332 | 76 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Lars Nilse $
// $Authors: Lars Nilse $
// --------------------------------------------------------------------------
#include <OpenMS/FEATUREFINDER/PeakWidthEstimator.h>
namespace OpenMS
{
PeakWidthEstimator::PeakWidthEstimator(const PeakMap & exp_picked, const std::vector<std::vector<PeakPickerHiRes::PeakBoundary> > & boundaries)
{
std::vector<double> peaks_mz;
std::vector<double> peaks_width;
PeakMap::ConstIterator it_rt;
std::vector<std::vector<PeakPickerHiRes::PeakBoundary> >::const_iterator it_rt_boundaries;
for (it_rt = exp_picked.begin(), it_rt_boundaries = boundaries.begin();
it_rt < exp_picked.end() && it_rt_boundaries < boundaries.end();
++it_rt, ++it_rt_boundaries)
{
MSSpectrum::ConstIterator it_mz;
std::vector<PeakPickerHiRes::PeakBoundary>::const_iterator it_mz_boundary;
for (it_mz = it_rt->begin(), it_mz_boundary = it_rt_boundaries->begin();
it_mz < it_rt->end() && it_mz_boundary < it_rt_boundaries->end();
++it_mz, ++it_mz_boundary)
{
peaks_mz.push_back(it_mz->getMZ());
peaks_width.push_back((*it_mz_boundary).mz_max - (*it_mz_boundary).mz_min);
}
}
mz_min_ = peaks_mz.front();
mz_max_ = peaks_mz.back();
bspline_ = new BSpline2d(peaks_mz, peaks_width, std::min(500.0, (mz_max_ - mz_min_)/2), BSpline2d::BC_ZERO_SECOND, 1);
if (!(*bspline_).ok())
{
throw Exception::UnableToFit(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Unable to fit B-spline to data.", "");
}
}
PeakWidthEstimator::~PeakWidthEstimator()
{
delete bspline_;
}
double PeakWidthEstimator::getPeakWidth(double mz)
{
double width;
if (mz < mz_min_)
{
width = (*bspline_).eval(mz_min_);
}
else if (mz > mz_max_)
{
width = (*bspline_).eval(mz_max_);
}
else
{
width = (*bspline_).eval(mz);
}
if (width < 0)
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Estimated peak width is negative.", "");
}
return width;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/BaseModel.cpp | .cpp | 473 | 15 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: $
// --------------------------------------------------------------------------
#include <OpenMS/FEATUREFINDER/BaseModel.h>
#include <OpenMS/FEATUREFINDER/BaseModel_impl.h>
namespace OpenMS
{
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/MultiplexSatelliteCentroided.cpp | .cpp | 761 | 31 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Lars Nilse $
// $Authors: Lars Nilse $
// --------------------------------------------------------------------------
#include <OpenMS/FEATUREFINDER/MultiplexSatelliteCentroided.h>
using namespace std;
namespace OpenMS
{
MultiplexSatelliteCentroided::MultiplexSatelliteCentroided(size_t rt_idx, size_t mz_idx) :
rt_idx_(rt_idx), mz_idx_(mz_idx)
{
}
size_t MultiplexSatelliteCentroided::getMZidx() const
{
return mz_idx_;
}
size_t MultiplexSatelliteCentroided::getRTidx() const
{
return rt_idx_;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/IsotopeFitter1D.cpp | .cpp | 4,002 | 127 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: $
// --------------------------------------------------------------------------
#include <OpenMS/FEATUREFINDER/IsotopeFitter1D.h>
#include <OpenMS/FEATUREFINDER/IsotopeModel.h>
#include <OpenMS/FEATUREFINDER/GaussModel.h>
#include <OpenMS/CHEMISTRY/AASequence.h>
namespace OpenMS
{
IsotopeFitter1D::IsotopeFitter1D() :
MaxLikeliFitter1D()
{
setName("IsotopeFitter1D");
defaults_.setValue("statistics:variance", 1.0, "Variance of the model.", {"advanced"});
defaults_.setValue("charge", 1, "Charge state of the model.", {"advanced"});
defaults_.setValue("isotope:stdev", 1.0, "Standard deviation of gaussian applied to the averagine isotopic pattern to simulate the inaccuracy of the mass spectrometer.", {"advanced"});
defaults_.setValue("isotope:maximum", 100, "Maximum isotopic rank to be considered.", {"advanced"});
defaults_.setValue("interpolation_step", 0.1, "Sampling rate for the interpolation of the model function.", {"advanced"});
defaultsToParam_();
}
IsotopeFitter1D::IsotopeFitter1D(const IsotopeFitter1D& source) :
MaxLikeliFitter1D(source)
{
updateMembers_();
}
IsotopeFitter1D::~IsotopeFitter1D() = default;
IsotopeFitter1D& IsotopeFitter1D::operator=(const IsotopeFitter1D& source)
{
if (&source == this)
{
return *this;
}
MaxLikeliFitter1D::operator=(source);
updateMembers_();
return *this;
}
IsotopeFitter1D::QualityType IsotopeFitter1D::fit1d(const RawDataArrayType& set, std::unique_ptr<InterpolationModel>& model)
{
// Calculate bounding box
CoordinateType min_bb = set[0].getPos(), max_bb = set[0].getPos();
for (UInt pos = 1; pos < set.size(); ++pos)
{
CoordinateType tmp = set[pos].getPos();
if (min_bb > tmp)
{
min_bb = tmp;
}
if (max_bb < tmp)
{
max_bb = tmp;
}
}
// Enlarge the bounding box by a few multiples of the standard deviation
const CoordinateType stdev = sqrt(statistics_.variance()) * tolerance_stdev_box_;
min_bb -= stdev;
max_bb += stdev;
// build model
if (charge_ == 0)
{
model = std::unique_ptr<InterpolationModel>(new GaussModel());
model->setInterpolationStep(interpolation_step_);
Param tmp;
tmp.setValue("bounding_box:min", min_bb);
tmp.setValue("bounding_box:max", max_bb);
tmp.setValue("statistics:variance", statistics_.variance());
tmp.setValue("statistics:mean", statistics_.mean());
model->setParameters(tmp);
}
else
{
model = std::unique_ptr<InterpolationModel>(new IsotopeModel());
Param iso_param = this->param_.copy("isotope_model:", true);
iso_param.removeAll("stdev");
model->setParameters(iso_param);
model->setInterpolationStep(interpolation_step_);
Param tmp;
tmp.setValue("statistics:mean", statistics_.mean());
tmp.setValue("charge", static_cast<Int>(charge_));
tmp.setValue("isotope:mode:GaussianSD", isotope_stdev_);
tmp.setValue("isotope:maximum", max_isotope_);
model->setParameters(tmp);
(dynamic_cast<IsotopeModel*>(model.get()))->setSamples((dynamic_cast<IsotopeModel*>(model.get()))->getFormula());
}
// fit offset
QualityType quality;
quality = fitOffset_(model, set, stdev, stdev, interpolation_step_);
if (std::isnan(quality))
{
quality = -1.0;
}
return quality;
}
void IsotopeFitter1D::updateMembers_()
{
MaxLikeliFitter1D::updateMembers_();
statistics_.setVariance(param_.getValue("statistics:variance"));
charge_ = param_.getValue("charge");
isotope_stdev_ = param_.getValue("isotope:stdev");
max_isotope_ = param_.getValue("isotope:maximum");
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/MaxLikeliFitter1D.cpp | .cpp | 2,613 | 77 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: $
// --------------------------------------------------------------------------
#include <OpenMS/FEATUREFINDER/MaxLikeliFitter1D.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <OpenMS/FEATUREFINDER/InterpolationModel.h>
namespace OpenMS
{
void MaxLikeliFitter1D::updateMembers_()
{
Fitter1D::updateMembers_();
}
MaxLikeliFitter1D::QualityType MaxLikeliFitter1D::fitOffset_(std::unique_ptr<InterpolationModel>& model,
const RawDataArrayType & set,
const CoordinateType stdev1,
const CoordinateType stdev2,
const CoordinateType offset_step) const
{
const CoordinateType offset_min = model->getInterpolation().supportMin() - stdev1;
const CoordinateType offset_max = model->getInterpolation().supportMin() + stdev2;
CoordinateType offset;
QualityType correlation;
//test model with default offset
std::vector<float> real_data;
real_data.reserve(set.size());
std::vector<float> model_data;
model_data.reserve(set.size());
for (Size i = 0; i < set.size(); ++i)
{
real_data.push_back(set[i].getIntensity());
model_data.push_back(model->getIntensity(DPosition<1>(set[i].getPosition())));
}
CoordinateType max_offset = model->getInterpolation().getOffset();
QualityType max_correlation = Math::pearsonCorrelationCoefficient(real_data.begin(), real_data.end(), model_data.begin(), model_data.end());
//test different offsets
for (offset = offset_min; offset <= offset_max; offset += offset_step)
{
// set offset
model->setOffset(offset);
// get samples
model_data.clear();
for (Size i = 0; i < set.size(); ++i)
{
model_data.push_back(model->getIntensity(DPosition<1>(set[i].getPosition())));
}
correlation = Math::pearsonCorrelationCoefficient(real_data.begin(), real_data.end(), model_data.begin(), model_data.end());
if (correlation > max_correlation)
{
max_correlation = correlation;
max_offset = offset;
}
}
model->setOffset(max_offset);
return max_correlation;
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/FeatureFinderAlgorithmPicked.cpp | .cpp | 86,316 | 2,202 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Marc Sturm, Tom Lukas Lankenau $
// --------------------------------------------------------------------------
#include <OpenMS/FEATUREFINDER/FeatureFinderAlgorithmPicked.h>
#include <OpenMS/FEATUREFINDER/EGHTraceFitter.h>
#include <OpenMS/FEATUREFINDER/GaussTraceFitter.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/FORMAT/TextFile.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <OpenMS/MATH/MathFunctions.h>
#include <OpenMS/CHEMISTRY/Element.h>
#include <OpenMS/CHEMISTRY/ElementDB.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/IsotopeDistribution.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/CoarseIsotopePatternGenerator.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <QtCore/QDir>
#include <boost/math/special_functions/fpclassify.hpp> // isnan
#ifdef _OPENMP
#include <omp.h>
#endif
namespace OpenMS
{
FeatureFinderAlgorithmPicked::FeatureFinderAlgorithmPicked() :
DefaultParamHandler("FeatureFinderAlgorithmPicked")
{
//debugging
defaults_.setValue("write_debug", "false", "When debug mode is activated, several files with intermediate results are written to the folder 'debug' (do not use in parallel mode).");
defaults_.setValidStrings("write_debug", {"true","false"});
//intensity
defaults_.setValue("intensity:bins", 10, "Number of bins per dimension (RT and m/z). The higher this value, the more local the intensity significance score is.\nThis parameter should be decreased, if the algorithm is used on small regions of a map.");
defaults_.setMinInt("intensity:bins", 1);
defaults_.setSectionDescription("intensity", "Settings for the calculation of a score indicating if a peak's intensity is significant in the local environment (between 0 and 1)");
//mass trace search parameters
defaults_.setValue("mass_trace:mz_tolerance", 0.03, "Tolerated m/z deviation of peaks belonging to the same mass trace.\nIt should be larger than the m/z resolution of the instrument.\nThis value must be smaller than that 1/charge_high!");
defaults_.setMinFloat("mass_trace:mz_tolerance", 0.0);
defaults_.setValue("mass_trace:min_spectra", 10, "Number of spectra that have to show a similar peak mass in a mass trace.");
defaults_.setMinInt("mass_trace:min_spectra", 1);
defaults_.setValue("mass_trace:max_missing", 1, "Number of consecutive spectra where a high mass deviation or missing peak is acceptable.\nThis parameter should be well below 'min_spectra'!");
defaults_.setMinInt("mass_trace:max_missing", 0);
defaults_.setValue("mass_trace:slope_bound", 0.1, "The maximum slope of mass trace intensities when extending from the highest peak.\nThis parameter is important to separate overlapping elution peaks.\nIt should be increased if feature elution profiles fluctuate a lot.");
defaults_.setMinFloat("mass_trace:slope_bound", 0.0);
defaults_.setSectionDescription("mass_trace", "Settings for the calculation of a score indicating if a peak is part of a mass trace (between 0 and 1).");
//Isotopic pattern search parameters
defaults_.setValue("isotopic_pattern:charge_low", 1, "Lowest charge to search for.");
defaults_.setMinInt("isotopic_pattern:charge_low", 1);
defaults_.setValue("isotopic_pattern:charge_high", 4, "Highest charge to search for.");
defaults_.setMinInt("isotopic_pattern:charge_high", 1);
defaults_.setValue("isotopic_pattern:mz_tolerance", 0.03, "Tolerated m/z deviation from the theoretical isotopic pattern.\nIt should be larger than the m/z resolution of the instrument.\nThis value must be smaller than that 1/charge_high!");
defaults_.setMinFloat("isotopic_pattern:mz_tolerance", 0.0);
defaults_.setValue("isotopic_pattern:intensity_percentage", 10.0, "Isotopic peaks that contribute more than this percentage to the overall isotope pattern intensity must be present.", {"advanced"});
defaults_.setMinFloat("isotopic_pattern:intensity_percentage", 0.0);
defaults_.setMaxFloat("isotopic_pattern:intensity_percentage", 100.0);
defaults_.setValue("isotopic_pattern:intensity_percentage_optional", 0.1, "Isotopic peaks that contribute more than this percentage to the overall isotope pattern intensity can be missing.", {"advanced"});
defaults_.setMinFloat("isotopic_pattern:intensity_percentage_optional", 0.0);
defaults_.setMaxFloat("isotopic_pattern:intensity_percentage_optional", 100.0);
defaults_.setValue("isotopic_pattern:optional_fit_improvement", 2.0, "Minimal percental improvement of isotope fit to allow leaving out an optional peak.", {"advanced"});
defaults_.setMinFloat("isotopic_pattern:optional_fit_improvement", 0.0);
defaults_.setMaxFloat("isotopic_pattern:optional_fit_improvement", 100.0);
defaults_.setValue("isotopic_pattern:mass_window_width", 25.0, "Window width in Dalton for precalculation of estimated isotope distributions.", {"advanced"});
defaults_.setMinFloat("isotopic_pattern:mass_window_width", 1.0);
defaults_.setMaxFloat("isotopic_pattern:mass_window_width", 200.0);
defaults_.setValue("isotopic_pattern:abundance_12C", 98.93, "Rel. abundance of the light carbon. Modify if labeled.", {"advanced"});
defaults_.setMinFloat("isotopic_pattern:abundance_12C", 0.0);
defaults_.setMaxFloat("isotopic_pattern:abundance_12C", 100.0);
defaults_.setValue("isotopic_pattern:abundance_14N", 99.632, "Rel. abundance of the light nitrogen. Modify if labeled.", {"advanced"});
defaults_.setMinFloat("isotopic_pattern:abundance_14N", 0.0);
defaults_.setMaxFloat("isotopic_pattern:abundance_14N", 100.0);
defaults_.setSectionDescription("isotopic_pattern", "Settings for the calculation of a score indicating if a peak is part of a isotopic pattern (between 0 and 1).");
//Seed settings
defaults_.setValue("seed:min_score", 0.8, "Minimum seed score a peak has to reach to be used as seed.\nThe seed score is the geometric mean of intensity score, mass trace score and isotope pattern score.\nIf your features show a large deviation from the averagene isotope distribution or from an gaussian elution profile, lower this score.");
defaults_.setMinFloat("seed:min_score", 0.0);
defaults_.setMaxFloat("seed:min_score", 1.0);
defaults_.setSectionDescription("seed", "Settings that determine which peaks are considered a seed");
//Fitting settings
defaults_.setValue("fit:max_iterations", 500, "Maximum number of iterations of the fit.", {"advanced"});
defaults_.setMinInt("fit:max_iterations", 1);
defaults_.setSectionDescription("fit", "Settings for the model fitting");
//Feature settings
defaults_.setValue("feature:min_score", 0.7, "Feature score threshold for a feature to be reported.\nThe feature score is the geometric mean of the average relative deviation and the correlation between the model and the observed peaks.");
defaults_.setMinFloat("feature:min_score", 0.0);
defaults_.setMaxFloat("feature:min_score", 1.0);
defaults_.setValue("feature:min_isotope_fit", 0.8, "Minimum isotope fit of the feature before model fitting.", {"advanced"});
defaults_.setMinFloat("feature:min_isotope_fit", 0.0);
defaults_.setMaxFloat("feature:min_isotope_fit", 1.0);
defaults_.setValue("feature:min_trace_score", 0.5, "Trace score threshold.\nTraces below this threshold are removed after the model fitting.\nThis parameter is important for features that overlap in m/z dimension.", {"advanced"});
defaults_.setMinFloat("feature:min_trace_score", 0.0);
defaults_.setMaxFloat("feature:min_trace_score", 1.0);
defaults_.setValue("feature:min_rt_span", 0.333, "Minimum RT span in relation to extended area that has to remain after model fitting.", {"advanced"});
defaults_.setMinFloat("feature:min_rt_span", 0.0);
defaults_.setMaxFloat("feature:min_rt_span", 1.0);
defaults_.setValue("feature:max_rt_span", 2.5, "Maximum RT span in relation to extended area that the model is allowed to have.", {"advanced"});
defaults_.setMinFloat("feature:max_rt_span", 0.5);
defaults_.setValue("feature:rt_shape", "symmetric", "Choose model used for RT profile fitting. If set to symmetric a gauss shape is used, in case of asymmetric an EGH shape is used.", {"advanced"});
defaults_.setValidStrings("feature:rt_shape", {"symmetric","asymmetric"});
defaults_.setValue("feature:max_intersection", 0.35, "Maximum allowed intersection of features.", {"advanced"});
defaults_.setMinFloat("feature:max_intersection", 0.0);
defaults_.setMaxFloat("feature:max_intersection", 1.0);
defaults_.setValue("feature:reported_mz", "monoisotopic", "The mass type that is reported for features.\n'maximum' returns the m/z value of the highest mass trace.\n'average' returns the intensity-weighted average m/z value of all contained peaks.\n'monoisotopic' returns the monoisotopic m/z value derived from the fitted isotope model.");
defaults_.setValidStrings("feature:reported_mz", {"maximum","average","monoisotopic"});
defaults_.setSectionDescription("feature", "Settings for the features (intensity, quality assessment, ...)");
//user-specified seed settings
defaults_.setValue("user-seed:rt_tolerance", 5.0, "Allowed RT deviation of seeds from the user-specified seed position.");
defaults_.setMinFloat("user-seed:rt_tolerance", 0.0);
defaults_.setValue("user-seed:mz_tolerance", 1.1, "Allowed m/z deviation of seeds from the user-specified seed position.");
defaults_.setMinFloat("user-seed:mz_tolerance", 0.0);
defaults_.setValue("user-seed:min_score", 0.5, "Overwrites 'seed:min_score' for user-specified seeds. The cutoff is typically a bit lower in this case.");
defaults_.setMinFloat("user-seed:min_score", 0.0);
defaults_.setMaxFloat("user-seed:min_score", 1.0);
defaults_.setSectionDescription("user-seed", "Settings for user-specified seeds.");
//advanced/debugging settings
defaults_.setValue("advanced:pseudo_rt_shift", 500.0, "Pseudo RT shift used when .", {"advanced"});
defaults_.setMinFloat("advanced:pseudo_rt_shift", 1.0);
this->defaultsToParam_();
}
void FeatureFinderAlgorithmPicked::setSeeds(const FeatureMap& seeds)
{
seeds_ = seeds;
}
void FeatureFinderAlgorithmPicked::setData(const MSExperiment& map, FeatureMap& features)
{
map_ = map;
features_ = &features;
}
void FeatureFinderAlgorithmPicked::run()
{
//-------------------------------------------------------------------------
// General initialization
//---------------------------------------------------------------------------
//quality estimation
double min_feature_score = param_.getValue("feature:min_score");
// charges to look at
SignedSize charge_low = (Int)param_.getValue("isotopic_pattern:charge_low");
SignedSize charge_high = (Int)param_.getValue("isotopic_pattern:charge_high");
//fitting settings
UInt max_iterations = param_.getValue("fit:max_iterations");
Size max_isotopes = 20;
//check if non-natural isotopic abundances are set. If so modify
double abundance_12C = param_.getValue("isotopic_pattern:abundance_12C");
double abundance_14N = param_.getValue("isotopic_pattern:abundance_14N");
const Element* carbon_const = ElementDB::getInstance()->getElement("Carbon");
Element* carbon = const_cast<Element*>(carbon_const);
if (param_.getValue("isotopic_pattern:abundance_12C") != defaults_.getValue("isotopic_pattern:abundance_12C"))
{
max_isotopes += 1000; // Why?
IsotopeDistribution isotopes;
isotopes.insert(12, abundance_12C / 100.0);
isotopes.insert(13, 1.0 - (abundance_12C / 100.0));
carbon->setIsotopeDistribution(isotopes);
}
const Element* nitrogen_const = ElementDB::getInstance()->getElement("Nitrogen");
Element* nitrogen = const_cast<Element*>(nitrogen_const);
if (param_.getValue("isotopic_pattern:abundance_14N") != defaults_.getValue("isotopic_pattern:abundance_14N"))
{
max_isotopes += 1000; // Why?
IsotopeDistribution isotopes;
isotopes.insert(14, abundance_14N / 100.0);
isotopes.insert(15, 1.0 - (abundance_14N / 100.0));
nitrogen->setIsotopeDistribution(isotopes);
}
// initialize trace fitter parameters here to avoid
// https://github.com/OpenMS/OpenMS/issues/147
Param trace_fitter_params;
trace_fitter_params.setValue("max_iteration", max_iterations);
//flag for user-specified seed mode
bool user_seeds = (!seeds_.empty());
if (user_seeds)
{
seeds_.sortByMZ();
}
double user_rt_tol = param_.getValue("user-seed:rt_tolerance");
double user_mz_tol = param_.getValue("user-seed:mz_tolerance");
double user_seed_score = param_.getValue("user-seed:min_score");
//reserve space for calculated scores
UInt charge_count = charge_high - charge_low + 1;
for (auto& s : map_)
{
Size scan_size = s.size();
s.getFloatDataArrays().resize(3 + 2 * charge_count);
s.getFloatDataArrays()[0].setName("trace_score");
s.getFloatDataArrays()[0].assign(scan_size, 0.0);
s.getFloatDataArrays()[1].setName("intensity_score");
s.getFloatDataArrays()[1].assign(scan_size, 0.0);
s.getFloatDataArrays()[2].setName("local_max");
s.getFloatDataArrays()[2].assign(scan_size, 0.0);
//create isotope pattern score arrays
UInt charge = charge_low;
for (Size i = 3; i < 3 + charge_count; ++i)
{
s.getFloatDataArrays()[i].setName(String("pattern_score_") + charge);
s.getFloatDataArrays()[i].assign(scan_size, 0.0);
++charge;
}
//create overall score arrays
charge = charge_low;
for (Size i = 3 + charge_count; i < 3 + 2 * charge_count; ++i)
{
s.getFloatDataArrays()[i].setName(String("overall_score_") + charge);
s.getFloatDataArrays()[i].assign(scan_size, 0.0);
++charge;
}
}
debug_ = param_.getValue("write_debug").toBool();
//clean up / create folders for debug information
if (debug_)
{
QDir dir(".");
dir.mkpath("debug/features");
log_.open("debug/log.txt");
}
//---------------------------------------------------------------------------
// Step 1:
// Precalculate intensity scores for peaks
//---------------------------------------------------------------------------
if (debug_) log_ << "Precalculating intensity thresholds ...\n";
//new scope to make local variables disappear
{
startProgress(0, intensity_bins_ * intensity_bins_, "Precalculating intensity scores");
double rt_start = map_.spectrumRanges().byMSLevel(1).getMinRT();
double mz_start = map_.spectrumRanges().byMSLevel(1).getMinMZ();
intensity_rt_step_ = (map_.spectrumRanges().byMSLevel(1).getMaxRT() - rt_start) / (double)intensity_bins_;
intensity_mz_step_ = (map_.spectrumRanges().byMSLevel(1).getMaxMZ() - mz_start) / (double)intensity_bins_;
intensity_thresholds_.resize(intensity_bins_);
for (Size rt = 0; rt < intensity_bins_; ++rt)
{
intensity_thresholds_[rt].resize(intensity_bins_);
double min_rt = rt_start + rt * intensity_rt_step_;
double max_rt = rt_start + (rt + 1) * intensity_rt_step_;
std::vector<double> tmp;
for (Size mz = 0; mz < intensity_bins_; ++mz)
{
setProgress(rt * intensity_bins_ + mz);
double min_mz = mz_start + mz * intensity_mz_step_;
double max_mz = mz_start + (mz + 1) * intensity_mz_step_;
//std::cout << "rt range: " << min_rt << " - " << max_rt << '\n';
//std::cout << "mz range: " << min_mz << " - " << max_mz << '\n';
tmp.clear();
for (MapType::ConstAreaIterator it = map_.areaBeginConst(min_rt, max_rt, min_mz, max_mz); it != map_.areaEndConst(); ++it)
{
tmp.push_back(it->getIntensity());
}
//init vector
intensity_thresholds_[rt][mz].assign(21, 0.0);
//store quantiles (20)
if (!tmp.empty())
{
std::sort(tmp.begin(), tmp.end());
for (Size i = 0; i < 21; ++i)
{
Size index = (Size)std::floor(0.05 * i * (tmp.size() - 1));
intensity_thresholds_[rt][mz][i] = tmp[index];
}
}
}
}
//store intensity score in PeakInfo
for (Size s = 0; s < map_.size(); ++s)
{
for (Size p = 0; p < map_[s].size(); ++p)
{
map_[s].getFloatDataArrays()[1][p] = intensityScore_(s, p);
}
}
endProgress();
}
//---------------------------------------------------------------------------
// Step 2:
// Precalculate mass trace scores and local trace maximum for each peak
//---------------------------------------------------------------------------
//new scope to make local variables disappear
{
Size end_iteration = map_.size() - std::min((Size)min_spectra_, map_.size());
startProgress(min_spectra_, end_iteration, "Precalculating mass trace scores");
// skip first and last scans since we cannot extend the mass traces there
for (Size s = min_spectra_; s < end_iteration; ++s)
{
setProgress(s);
SpectrumType& spectrum = map_[s];
//iterate over all peaks of the scan
for (Size p = 0; p < spectrum.size(); ++p)
{
double trace_score = 0.0;
double pos = spectrum[p].getMZ();
float intensity = spectrum[p].getIntensity();
//if(debug_) log_ << '\n' << "Peak: " << pos << '\n';
bool is_max_peak = true; // checking the maximum intensity peaks -> use them later as feature seeds.
for (Size i = 1; i <= min_spectra_; ++i)
{
SpectrumType& next_spectrum = map_[s + i];
if (!next_spectrum.empty()) // There are peaks in the spectrum
{
Size spec_index = next_spectrum.findNearest(pos);
double position_score = positionScore_(pos, next_spectrum[spec_index].getMZ(), trace_tolerance_);
if (position_score > 0 && next_spectrum[spec_index].getIntensity() > intensity) is_max_peak = false;
trace_score += position_score;
}
}
for (Size i = 1; i <= min_spectra_; ++i)
{
SpectrumType& next_spectrum = map_[s - i];
if (!next_spectrum.empty()) // There are peaks in the spectrum
{
Size spec_index = next_spectrum.findNearest(pos);
double position_score = positionScore_(pos, next_spectrum[spec_index].getMZ(), trace_tolerance_);
if (position_score > 0 && next_spectrum[spec_index].getIntensity() > intensity)
{
is_max_peak = false;
}
trace_score += position_score;
}
}
//Calculate a consensus score out of the scores calculated before
trace_score /= 2 * min_spectra_;
//store final score for later use
spectrum.getFloatDataArrays()[0][p] = trace_score;
spectrum.getFloatDataArrays()[2][p] = is_max_peak;
}
}
endProgress();
}
//---------------------------------------------------------------------------
// Step 2.5:
// Precalculate isotope distributions for interesting mass ranges
//---------------------------------------------------------------------------
//new scope to make local variables disappear
{
double max_mass = map_.spectrumRanges().byMSLevel(1).getMaxMZ() * charge_high;
Size num_isotopes = std::ceil(max_mass / mass_window_width_) + 1;
startProgress(0, num_isotopes, "Precalculating isotope distributions");
//reserve enough space
isotope_distributions_.resize(num_isotopes);
//calculate distribution if necessary
for (Size index = 0; index < num_isotopes; ++index)
{
//if(debug_) log_ << "Calculating iso dist for mass: " << 0.5*mass_window_width_ + index * mass_window_width_ << '\n';
CoarseIsotopePatternGenerator solver(max_isotopes);
auto d = solver.estimateFromPeptideWeight(0.5 * mass_window_width_ + index * mass_window_width_);
//trim left and right. And store the number of isotopes on the left, to reconstruct the monoisotopic peak
Size size_before = d.size();
d.trimLeft(intensity_percentage_optional_);
isotope_distributions_[index].trimmed_left = size_before - d.size();
d.trimRight(intensity_percentage_optional_);
for (auto& peak : d)
{
isotope_distributions_[index].intensity.push_back(peak.getIntensity());
//if(debug_) log_ << " - " << it->second << '\n';
}
//determine the number of optional peaks at the beginning/end
Size begin = 0;
Size end = 0;
bool is_begin = true;
bool is_end = false;
for (double i : isotope_distributions_[index].intensity)
{
if (i < intensity_percentage_)
{
if (!is_end && !is_begin)
{
is_end = true;
}
if (is_begin)
{
++begin;
}
else if (is_end)
{
++end;
}
}
else if (is_begin)
{
is_begin = false;
}
}
isotope_distributions_[index].optional_begin = begin;
isotope_distributions_[index].optional_end = end;
//scale the distribution to a maximum of 1
double max = 0.0;
for (double i : isotope_distributions_[index].intensity)
{
if (i > max)
{
max = i;
}
}
isotope_distributions_[index].max = max;
for (double& i : isotope_distributions_[index].intensity)
{
i /= max;
}
//if(debug_) log_ << " - optional begin/end:" << begin << " / " << end << '\n';
}
endProgress();
}
//-------------------------------------------------------------------------
// Step 3:
// Charge loop (create seeds and features for each charge separately)
//-------------------------------------------------------------------------
Int plot_nr_global = -1; // counter for the number of plots (debug info)
Int feature_nr_global = 0; // counter for the number of features (debug info)
for (SignedSize c = charge_low; c <= charge_high; ++c)
{
UInt meta_index_isotope = 3 + c - charge_low;
UInt meta_index_overall = 3 + charge_count + c - charge_low;
Size feature_candidates = 0;
std::vector<Seed> seeds;
//-----------------------------------------------------------
// Step 3.1: Precalculate IsotopePattern score
//-----------------------------------------------------------
startProgress(0, map_.size(), String("Calculating isotope pattern scores for charge ") + String(c));
for (Size s = 0; s < map_.size(); ++s)
{
setProgress(s);
const SpectrumType& spectrum = map_[s];
for (Size p = 0; p < spectrum.size(); ++p)
{
double mz = spectrum[p].getMZ();
//get isotope distribution for this mass
const TheoreticalIsotopePattern& isotopes = getIsotopeDistribution_(mz * c);
//determine highest peak in isotope distribution
Size max_isotope = std::max_element(isotopes.intensity.begin(), isotopes.intensity.end()) - isotopes.intensity.begin();
//Look up expected isotopic peaks (in the current spectrum or adjacent spectra)
Size peak_index = spectrum.findNearest(mz - ((double)(isotopes.size() + 1) / c));
IsotopePattern pattern(isotopes.size());
for (Size i = 0; i < isotopes.size(); ++i)
{
double isotope_pos = mz + ((double)i - max_isotope) / c;
findIsotope_(isotope_pos, s, pattern, i, peak_index);
}
double pattern_score = isotopeScore_(isotopes, pattern, true);
//update pattern scores of all contained peaks (if necessary)
if (pattern_score > 0.0)
{
for (Size i = 0; i < pattern.peak.size(); ++i)
{
if (pattern.peak[i] >= 0 && pattern_score > map_[pattern.spectrum[i]].getFloatDataArrays()[meta_index_isotope][pattern.peak[i]])
{
map_[pattern.spectrum[i]].getFloatDataArrays()[meta_index_isotope][pattern.peak[i]] = pattern_score;
}
}
}
}
}
endProgress();
//-----------------------------------------------------------
// Step 3.2:
// Find seeds for this charge
//-----------------------------------------------------------
Size end_of_iteration = map_.size() - std::min((Size)min_spectra_, map_.size());
startProgress(min_spectra_, end_of_iteration, String("Finding seeds for charge ") + String(c));
double min_seed_score = param_.getValue("seed:min_score");
//do nothing for the first few and last few spectra as the scans required to search for traces are missing
for (Size s = min_spectra_; s < end_of_iteration; ++s)
{
setProgress(s);
//iterate over peaks
for (Size p = 0; p < map_[s].size(); ++p)
{
FloatDataArrays& meta = map_[s].getFloatDataArrays();
double overall_score = std::pow(meta[0][p] * meta[1][p] * meta[meta_index_isotope][p], 1.0f / 3.0f);
meta[meta_index_overall][p] = overall_score;
//add seed to vector if certain conditions are fulfilled
if (meta[2][p] != 0.0) // local maximum of mass trace is prerequisite for all features
{
//automatic seeds: overall score greater than the min seed score
if (!user_seeds && overall_score >= min_seed_score)
{
Seed seed;
seed.spectrum = s;
seed.peak = p;
seed.intensity = map_[s][p].getIntensity();
seeds.push_back(seed);
}
//user-specified seeds: overall score greater than USER min seed score
else if (user_seeds && overall_score >= user_seed_score)
{
//only consider seeds, if they are near a user-specified seed
Feature tmp;
tmp.setMZ(map_[s][p].getMZ() - user_mz_tol);
for (FeatureMap::const_iterator it = std::lower_bound(seeds_.begin(), seeds_.end(), tmp, Feature::MZLess()); it < seeds_.end(); ++it)
{
if (it->getMZ() > map_[s][p].getMZ() + user_mz_tol)
{
break;
}
if (fabs(it->getMZ() - map_[s][p].getMZ()) < user_mz_tol && fabs(it->getRT() - map_[s].getRT()) < user_rt_tol)
{
Seed seed;
seed.spectrum = s;
seed.peak = p;
seed.intensity = map_[s][p].getIntensity();
seeds.push_back(seed);
break;
}
}
}
}
}
}
//sort seeds according to intensity
std::sort(seeds.rbegin(), seeds.rend());
//create and store seeds map and selected peak map
if (debug_)
{
//seeds
FeatureMap seed_map;
seed_map.reserve(seeds.size());
for (auto& seed : seeds)
{
Size spectrum = seed.spectrum;
Size peak = seed.peak;
const FloatDataArrays& meta = map_[spectrum].getFloatDataArrays();
Feature tmp;
tmp.setIntensity(seed.intensity);
tmp.setOverallQuality(meta[meta_index_overall][peak]);
tmp.setRT(map_[spectrum].getRT());
tmp.setMZ(map_[spectrum][peak].getMZ());
tmp.setMetaValue("intensity_score", meta[1][peak]);
tmp.setMetaValue("pattern_score", meta[meta_index_isotope][peak]);
tmp.setMetaValue("trace_score", meta[0][peak]);
seed_map.push_back(tmp);
}
FileHandler().storeFeatures(String("debug/seeds_") + String(c) + ".featureXML", seed_map);
}
endProgress();
std::cout << "Found " << seeds.size() << " seeds for charge " << c << ".\n";
//------------------------------------------------------------------
// Step 3.3:
// Extension of seeds
//------------------------------------------------------------------
// We do not want to store features whose seeds lie within other
// features with higher intensity. We thus store this information in
// the map seeds_in_features which contains for each seed i a vector
// of other seeds that are contained in the corresponding feature i.
//
// The features are stored in an temporary feature map until it is
// decided whether they are contained within a seed of higher
// intensity.
std::map<Size, std::vector<Size>> seeds_in_features;
typedef std::map<Size, Feature> FeatureMapType;
FeatureMapType tmp_feature_map;
int gl_progress = 0;
startProgress(0, seeds.size(), String("Extending seeds for charge ") + String(c));
#pragma omp parallel for
for (SignedSize i = 0; i < (SignedSize)seeds.size(); ++i)
{
//------------------------------------------------------------------
// Step 3.3.1:
// Extend all mass traces
//------------------------------------------------------------------
const SpectrumType& spectrum = map_[seeds[i].spectrum];
const PeakType& peak = spectrum[seeds[i].peak];
IF_MASTERTHREAD
{
setProgress(gl_progress++);
if (debug_)
{
log_ << '\n' << "Seed " << i << ":\n";
//If the intensity is zero this seed is already uses in another feature
log_ << " - Int: " << peak.getIntensity() << '\n';
log_ << " - RT: " << spectrum.getRT() << '\n';
log_ << " - MZ: " << peak.getMZ() << '\n';
}
}
//----------------------------------------------------------------
//Find best fitting isotope pattern for this charge (using averagine)
IsotopePattern best_pattern(0);
double isotope_fit_quality = findBestIsotopeFit_(seeds[i], c, best_pattern);
if (isotope_fit_quality < min_isotope_fit_)
{
abort_(seeds[i], "Could not find good enough isotope pattern containing the seed");
continue;
}
//extend the convex hull in RT dimension (starting from the trace peaks)
MassTraces traces;
traces.reserve(best_pattern.peak.size());
extendMassTraces_(best_pattern, traces, meta_index_overall);
//check if the traces are still valid
double seed_mz = map_[seeds[i].spectrum][seeds[i].peak].getMZ();
if (!traces.isValid(seed_mz, trace_tolerance_))
{
abort_(seeds[i], "Could not extend seed");
continue;
}
//------------------------------------------------------------------
// Step 3.3.2:
// Gauss/EGH fit (first fit to find the feature boundaries)
//------------------------------------------------------------------
Int plot_nr = -1;
#pragma omp critical(FeatureFinderAlgorithmPicked_PLOTNR)
{
plot_nr = ++plot_nr_global;
}
//------------------------------------------------------------------
//TODO try fit with baseline term once more
//baseline estimate
traces.updateBaseline();
traces.baseline = 0.75 * traces.baseline;
traces[traces.max_trace].updateMaximum();
//choose fitter
double egh_tau = 0.0;
std::shared_ptr<TraceFitter> fitter = chooseTraceFitter_(egh_tau);
fitter->setParameters(trace_fitter_params);
fitter->fit(traces);
#if 0
TraceFitter<PeakType>* alt_fitter = new GaussTraceFitter<PeakType>();
Param alt_p;
alt_p.setValue("max_iteration", max_iterations);
alt_fitter->setParameters(alt_p);
alt_fitter->fit(traces);
OPENMS_LOG_DEBUG << "EGH: " << fitter->getCenter() << " " << fitter->getHeight() << '\n';
OPENMS_LOG_DEBUG << "GAUSS: " << alt_fitter->getCenter() << " " << alt_fitter->getHeight() << '\n';
#endif
// what should come out
// left "sigma"
// right "sigma"
// x0 .. "center" position of RT fit
// height .. "height" of RT fit
//------------------------------------------------------------------
//------------------------------------------------------------------
// Step 3.3.3:
// Crop feature according to RT fit (2.5*sigma) and remove badly fitting traces
//------------------------------------------------------------------
MassTraces new_traces;
cropFeature_(fitter, traces, new_traces);
//------------------------------------------------------------------
// Step 3.3.4:
// Check if feature is ok
//------------------------------------------------------------------
String error_msg = "";
double fit_score = 0.0;
double correlation = 0.0;
double final_score = 0.0;
int number_of_datapoints = 0;
bool feature_ok = checkFeatureQuality_(fitter, new_traces, seed_mz, min_feature_score, error_msg, fit_score, correlation, final_score);
{
//write debug output of feature
if (debug_)
{
#pragma omp critical(FeatureFinderAlgorithmPicked_DEBUG)
writeFeatureDebugInfo_(fitter, traces, new_traces, feature_ok, error_msg, final_score, plot_nr, peak);
}
}
//validity output
if (!feature_ok)
{
abort_(seeds[i], error_msg);
continue;
}
traces = new_traces;
//------------------------------------------------------------------
// Step 3.3.5:
// Feature creation
//------------------------------------------------------------------
Feature f;
//set label
f.setMetaValue(3, plot_nr);
f.setCharge(c);
f.setOverallQuality(final_score);
f.setMetaValue("score_fit", fit_score);
f.setMetaValue("score_correlation", correlation);
f.setRT(fitter->getCenter());
f.setWidth(fitter->getFWHM());
// metavalue num_of_datapoints
for (size_t t = 0; t < traces.size(); ++t)
{
number_of_datapoints += traces[t].peaks.size();
}
f.setMetaValue(Constants::UserParam::NUM_OF_DATAPOINTS, number_of_datapoints);
//Extract some of the model parameters.
if (egh_tau != 0.0)
{
egh_tau = (std::dynamic_pointer_cast<EGHTraceFitter>(fitter))->getTau();
f.setMetaValue("EGH_tau", egh_tau);
f.setMetaValue("EGH_height", (std::dynamic_pointer_cast<EGHTraceFitter>(fitter))->getHeight());
f.setMetaValue("EGH_sigma", (std::dynamic_pointer_cast<EGHTraceFitter>(fitter))->getSigma());
}
//Calculate the mass of the feature: maximum, average, monoisotopic
if (reported_mz_ == "maximum")
{
f.setMZ(traces[traces.getTheoreticalmaxPosition()].getAvgMZ());
}
else if (reported_mz_ == "average")
{
double total_intensity = 0.0;
double average_mz = 0.0;
for (Size t = 0; t < traces.size(); ++t)
{
for (auto& p : traces[t].peaks)
{
average_mz += p.second->getMZ() * p.second->getIntensity();
total_intensity += p.second->getIntensity();
}
}
average_mz /= total_intensity;
f.setMZ(average_mz);
}
else if (reported_mz_ == "monoisotopic")
{
double mono_mz = traces[traces.getTheoreticalmaxPosition()].getAvgMZ();
mono_mz -= (Constants::PROTON_MASS_U / c) * (traces.getTheoreticalmaxPosition() + best_pattern.theoretical_pattern.trimmed_left);
f.setMZ(mono_mz);
}
// Calculate intensity based on model only
// - the model does not include the baseline, so we ignore it here
// - as we scaled the isotope distribution to
f.setIntensity(fitter->getArea() / getIsotopeDistribution_(f.getMZ()).max);
//add convex hulls of mass traces
for (Size j = 0; j < traces.size(); ++j)
{
f.getConvexHulls().push_back(traces[j].getConvexhull());
}
#pragma omp critical(FeatureFinderAlgorithmPicked_TMPFEATUREMAP)
{
tmp_feature_map[i] = f;
}
//----------------------------------------------------------------
//Remember all seeds that lie inside the convex hull of the new feature
DBoundingBox<2> bb = f.getConvexHull().getBoundingBox();
for (Size j = i + 1; j < seeds.size(); ++j)
{
double rt = map_[seeds[j].spectrum].getRT();
double mz = map_[seeds[j].spectrum][seeds[j].peak].getMZ();
if (bb.encloses(rt, mz) && f.encloses(rt, mz))
{
#pragma omp critical(FeatureFinderAlgorithmPicked_SEEDSINFEATURES)
{
seeds_in_features[i].push_back(j);
}
}
}
} //end of OPENMP over seeds
// Here we have to evaluate which seeds are already contained in
// features of seeds with higher intensities. Only if the seed is not
// used in any feature with higher intensity, we can add it to the
// features_ list.
std::vector<Size> seeds_contained;
for (auto& f : tmp_feature_map)
{
Size seed_nr = f.first;
bool is_used = false;
for (Size i : seeds_contained)
{
if (seed_nr == i)
{
is_used = true;
break;
}
}
if (!is_used)
{
++feature_candidates;
//re-set label
f.second.setMetaValue(3, feature_nr_global);
++feature_nr_global;
features_->push_back(f.second);
std::vector<Size> curr_seed = seeds_in_features[seed_nr];
for (Size k : curr_seed)
{
seeds_contained.push_back(k);
}
}
}
IF_MASTERTHREAD endProgress();
std::cout << "Found " << feature_candidates << " feature candidates for charge " << c << ".\n";
}
// END OPENMP
//------------------------------------------------------------------
//Step 4:
//Resolve contradicting and overlapping features
//------------------------------------------------------------------
startProgress(0, features_->size() * features_->size(), "Resolving overlapping features");
if (debug_) log_ << "Resolving intersecting features (" << features_->size() << " candidates)\n";
//sort features according to m/z in order to speed up the resolution
features_->sortByMZ();
//precalculate BBs and maximum mz span
std::vector<DBoundingBox<2> > bbs(features_->size());
double max_mz_span = 0.0;
for (Size i = 0; i < features_->size(); ++i)
{
bbs[i] = (*features_)[i].getConvexHull().getBoundingBox();
if (bbs[i].height() > max_mz_span)
{
max_mz_span = bbs[i].height();
}
}
Size removed(0);
//intersect
for (Size i = 0; i < features_->size(); ++i)
{
Feature& f1((*features_)[i]);
for (Size j = i + 1; j < features_->size(); ++j)
{
setProgress(i * features_->size() + j);
Feature& f2((*features_)[j]);
//features that are more than 2 times the maximum m/z span apart do not overlap => abort
if (f2.getMZ() - f1.getMZ() > 2.0 * max_mz_span)
{
break;
}
//do nothing if one of the features is already removed
if (f1.getIntensity() == 0.0 || f2.getIntensity() == 0.0)
{
continue;
}
//do nothing if the overall convex hulls do not overlap
if (!bbs[i].intersects(bbs[j]))
{
continue;
}
//act depending on the intersection
double intersection = intersection_(f1, f2);
if (intersection >= max_feature_intersection_)
{
++removed;
if (debug_)
{
log_ << " - Intersection (" << (i + 1) << "/" << (j + 1) << "): " << intersection << '\n';
}
if (f1.getCharge() == f2.getCharge())
{
if (f1.getIntensity() * f1.getOverallQuality() > f2.getIntensity() * f2.getOverallQuality())
{
if (debug_)
{
log_ << " - same charge -> removing duplicate " << (j + 1) << '\n';
}
f1.getSubordinates().push_back(f2);
f2.setIntensity(0.0);
}
else
{
if (debug_)
{
log_ << " - same charge -> removing duplicate " << (i + 1) << '\n';
}
f2.getSubordinates().push_back(f1);
f1.setIntensity(0.0);
}
}
else if (f2.getCharge() % f1.getCharge() == 0)
{
if (debug_)
{
log_ << " - different charge (one is the multiple of the other) -> removing lower charge " << (i + 1) << '\n';
}
f2.getSubordinates().push_back(f1);
f1.setIntensity(0.0);
}
else if (f1.getCharge() % f2.getCharge() == 0)
{
if (debug_)
{
log_ << " - different charge (one is the multiple of the other) -> removing lower charge " << (i + 1) << '\n';
}
f1.getSubordinates().push_back(f2);
f2.setIntensity(0.0);
}
else
{
if (f1.getOverallQuality() > f2.getOverallQuality())
{
if (debug_)
{
log_ << " - different charge -> removing lower score " << (j + 1) << '\n';
}
f1.getSubordinates().push_back(f2);
f2.setIntensity(0.0);
}
else
{
if (debug_)
{
log_ << " - different charge -> removing lower score " << (i + 1) << '\n';
}
f2.getSubordinates().push_back(f1);
f1.setIntensity(0.0);
}
}
}
}
}
OPENMS_LOG_INFO << "Removed " << removed << " overlapping features.\n";
// finally remove features with intensity 0
FeatureMap tmp;
tmp.reserve(features_->size());
for (Size i = 0; i < features_->size(); ++i)
{
if (features_->operator[](i).getIntensity() != 0.0)
{
tmp.push_back(features_->operator[](i));
}
}
tmp.swapFeaturesOnly(*features_);
// sort features by intensity
features_->sortByIntensity(true);
endProgress();
// Abort reasons
OPENMS_LOG_INFO << '\n';
OPENMS_LOG_INFO << "Info: reasons for not finalizing a feature during its construction:\n";
for (const auto& reason : aborts_)
{
OPENMS_LOG_INFO << " - " << reason.first << ": " << reason.second << " times\n";
}
OPENMS_LOG_INFO << "\n" << features_->size() << " features found.\n";
if (debug_)
{
//store map of abort reasons for failed seeds
FeatureMap abort_map;
abort_map.reserve(abort_reasons_.size());
Size counter = 0;
for (std::map<Seed, String>::iterator it2 = abort_reasons_.begin(); it2 != abort_reasons_.end(); ++it2, ++counter)
{
Feature f;
f.setRT(map_[it2->first.spectrum].getRT());
f.setMZ(map_[it2->first.spectrum][it2->first.peak].getMZ());
f.setIntensity(map_[it2->first.spectrum][it2->first.peak].getIntensity());
f.setMetaValue("label", it2->second);
f.setUniqueId(counter); // ID = index
abort_map.push_back(f);
}
abort_map.setUniqueId();
FileHandler().storeFeatures("debug/abort_reasons.featureXML", abort_map);
//store input map with calculated scores (without overall score)
for (auto& s : map_)
{
s.getFloatDataArrays().erase(s.getFloatDataArrays().begin() + 2);
}
FileHandler().storeExperiment("debug/input.mzML", map_, {FileTypes::MZML});
}
}
void FeatureFinderAlgorithmPicked::run(PeakMap& input_map, FeatureMap& features, const Param& param, const FeatureMap& seeds)
{
// Nothing to do if there is no data
if (input_map.empty())
{
features.clear(true);
return;
}
// check input
{
// We need updated ranges => check number of peaks
if (input_map.getSize() == 0)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "FeatureFinder needs updated ranges on input map. Aborting.");
}
// We need MS1 data only => check levels
if (input_map.getMSLevels().size() != 1 || input_map.getMSLevels()[0] != 1)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "FeatureFinder can only operate on MS level 1 data. Please do not use MS/MS data. Aborting.");
}
//Check if the peaks are sorted according to m/z
if (!input_map.isSorted(true))
{
OPENMS_LOG_WARN << "Input map is not sorted by RT and m/z! This is done now, before applying the algorithm!\n";
input_map.sortSpectra(true);
input_map.sortChromatograms(true);
}
for (Size s = 0; s < input_map.size(); ++s)
{
if (input_map[s].empty())
{
continue;
}
if (input_map[s][0].getMZ() < 0)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "FeatureFinder can only operate on spectra that contain peaks with positive m/z values. Filter the data accordingly beforehand! Aborting.");
}
}
}
// do the work
setParameters(param);
setData(input_map, features);
setSeeds(seeds);
run();
//report RT apex spectrum index and native ID for each feature
for (Size i = 0; i < features.size(); ++i)
{
//index
Size spectrum_index = input_map.RTBegin(features[i].getRT()) - input_map.begin();
features[i].setMetaValue("spectrum_index", spectrum_index);
//native id
if (spectrum_index < input_map.size())
{
String native_id = input_map[spectrum_index].getNativeID();
features[i].setMetaValue("spectrum_native_id", native_id);
}
else
{
/// @todo that happens sometimes using IsotopeWaveletFeatureFinder (Rene, Marc, Andreas, Clemens)
std::cerr << "FeatureFinderAlgorithm_impl, line=" << __LINE__ << "; FixMe this cannot be, but happens" << std::endl;
}
}
}
void FeatureFinderAlgorithmPicked::updateMembers_()
{
pattern_tolerance_ = param_.getValue("mass_trace:mz_tolerance");
trace_tolerance_ = param_.getValue("isotopic_pattern:mz_tolerance");
min_spectra_ = (UInt) std::floor((double)param_.getValue("mass_trace:min_spectra") * 0.5);
max_missing_trace_peaks_ = param_.getValue("mass_trace:max_missing");
slope_bound_ = param_.getValue("mass_trace:slope_bound");
intensity_percentage_ = (double)param_.getValue("isotopic_pattern:intensity_percentage") / 100.0;
intensity_percentage_optional_ = (double)param_.getValue("isotopic_pattern:intensity_percentage_optional") / 100.0;
optional_fit_improvement_ = (double)param_.getValue("isotopic_pattern:optional_fit_improvement") / 100.0;
mass_window_width_ = param_.getValue("isotopic_pattern:mass_window_width");
intensity_bins_ = param_.getValue("intensity:bins");
min_isotope_fit_ = param_.getValue("feature:min_isotope_fit");
min_trace_score_ = param_.getValue("feature:min_trace_score");
min_rt_span_ = param_.getValue("feature:min_rt_span");
max_rt_span_ = param_.getValue("feature:max_rt_span");
max_feature_intersection_ = param_.getValue("feature:max_intersection");
reported_mz_ = param_.getValue("feature:reported_mz").toString();
}
/// Writes the abort reason to the log file and counts occurrences for each reason
void FeatureFinderAlgorithmPicked::abort_(const Seed& seed, const String& reason)
{
if (debug_)
{
log_ << "Abort: " << reason << '\n';
}
aborts_[reason]++;
if (debug_)
{
abort_reasons_[seed] = reason;
}
}
double FeatureFinderAlgorithmPicked::intersection_(const Feature& f1, const Feature& f2) const
{
//calculate the RT range sum of feature 1
double s1 = 0.0;
const std::vector<ConvexHull2D>& hulls1 = f1.getConvexHulls();
for (const auto& i : hulls1)
{
s1 += i.getBoundingBox().width();
}
//calculate the RT range sum of feature 2
double s2 = 0.0;
const std::vector<ConvexHull2D>& hulls2 = f2.getConvexHulls();
for (const auto& j : hulls2)
{
s2 += j.getBoundingBox().width();
}
//calculate overlap
double overlap = 0.0;
for (const auto& i : hulls1)
{
DBoundingBox<2> bb1 = i.getBoundingBox();
for (const auto& j : hulls2)
{
DBoundingBox<2> bb2 = j.getBoundingBox();
if (bb1.intersects(bb2))
{
if (bb1.minPosition()[0] <= bb2.minPosition()[0] &&
bb1.maxPosition()[0] >= bb2.maxPosition()[0]) //bb1 contains bb2
{
overlap += bb2.width();
}
else if (bb2.minPosition()[0] <= bb1.minPosition()[0] &&
bb2.maxPosition()[0] >= bb1.maxPosition()[0]) //bb2 contains bb1
{
overlap += bb1.width();
}
else if (bb1.minPosition()[0] <= bb2.minPosition()[0] &&
bb1.maxPosition()[0] <= bb2.maxPosition()[0]) //the end of bb1 overlaps with bb2
{
overlap += bb1.maxPosition()[0] - bb2.minPosition()[0];
}
else if (bb2.minPosition()[0] <= bb1.minPosition()[0] &&
bb2.maxPosition()[0] <= bb1.maxPosition()[0]) //the end of bb2 overlaps with bb1
{
overlap += bb2.maxPosition()[0] - bb1.minPosition()[0];
}
}
}
}
return overlap / std::min(s1, s2);
}
const FeatureFinderAlgorithmPickedHelperStructs::TheoreticalIsotopePattern& FeatureFinderAlgorithmPicked::getIsotopeDistribution_(double mass) const
{
//calculate index in the vector
Size index = (Size) std::floor(mass / mass_window_width_);
if (index >= isotope_distributions_.size())
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "IsotopeDistribution not precalculated. Maximum allowed index is " + String(isotope_distributions_.size()), String(index));
}
//Return distribution
return isotope_distributions_[index];
}
double FeatureFinderAlgorithmPicked::findBestIsotopeFit_(const Seed& center, UInt charge, IsotopePattern& best_pattern) const
{
if (debug_)
{
log_ << "Testing isotope patterns for charge " << charge << ": \n";
}
const SpectrumType& spectrum = map_[center.spectrum];
const TheoreticalIsotopePattern& isotopes = getIsotopeDistribution_(spectrum[center.peak].getMZ() * charge);
if (debug_)
{
log_ << " - Seed: " << center.peak << " (mz:" << spectrum[center.peak].getMZ() << ")\n";
}
//Find m/z boundaries of search space (linear search as this is local and we have the center already)
double mass_window = (double)(isotopes.size() + 1) / (double)charge;
if (debug_)
{
log_ << " - Mass window: " << mass_window << '\n';
}
Size end = center.peak;
while (end < spectrum.size() &&
spectrum[end].getMZ() < spectrum[center.peak].getMZ() + mass_window)
{
++end;
}
--end;
//search begin
SignedSize begin = center.peak;
while (begin >= 0 &&
spectrum[begin].getMZ() > spectrum[center.peak].getMZ() - mass_window)
{
--begin;
}
++begin;
if (debug_)
{
log_ << " - Begin: " << begin << " (mz:" << spectrum[begin].getMZ() << ")\n";
}
if (debug_)
{
log_ << " - End: " << end << " (mz:" << spectrum[end].getMZ() << ")\n";
}
//fit isotope distribution to peaks
double max_score = 0.0;
for (Size start = begin; start <= end; ++start)
{
//find isotope peaks for the current start peak
Size peak_index = start;
IsotopePattern pattern(isotopes.size());
if (debug_)
{
log_ << " - Fitting at " << start << " (mz:" << spectrum[start].getMZ() << ")\n";
}
for (Size iso = 0; iso < isotopes.size(); ++iso)
{
double pos = spectrum[start].getMZ() + iso / (double)charge;
findIsotope_(pos, center.spectrum, pattern, iso, peak_index);
}
//check if the seed is contained, otherwise abort
bool seed_contained = false;
for (Size iso = 0; iso < pattern.peak.size(); ++iso)
{
if (pattern.peak[iso] == (Int)center.peak && pattern.spectrum[iso] == center.spectrum)
{
seed_contained = true;
break;
}
}
if (!seed_contained)
{
if (debug_)
{
log_ << " - aborting: seed is not contained!\n";
}
continue;
}
double score = isotopeScore_(isotopes, pattern, false);
//check if the seed is still contained, otherwise abort
seed_contained = false;
for (Size iso = 0; iso < pattern.peak.size(); ++iso)
{
if (pattern.peak[iso] == (Int)center.peak &&
pattern.spectrum[iso] == center.spectrum)
{
seed_contained = true;
break;
}
}
if (!seed_contained)
{
if (debug_)
{
log_ << " - aborting: seed was removed during isotope fit!\n";
}
continue;
}
if (debug_)
{
log_ << " - final score: " << score << '\n';
}
if (score > max_score)
{
max_score = score;
best_pattern = pattern;
}
}
if (debug_)
{
log_ << " - best score : " << max_score << '\n';
}
best_pattern.theoretical_pattern = isotopes;
return max_score;
}
void FeatureFinderAlgorithmPicked::extendMassTraces_(const IsotopePattern& pattern, MassTraces& traces, Size meta_index_overall) const
{
//find index of the trace with the maximum intensity
double max_int = 0.0;
Size max_trace_index = 0;
for (Size p = 0; p < pattern.peak.size(); ++p)
{
if (pattern.peak[p] < 0)
{
continue; //skip missing and removed traces
}
if (map_[pattern.spectrum[p]][pattern.peak[p]].getIntensity() > max_int)
{
max_int = map_[pattern.spectrum[p]][pattern.peak[p]].getIntensity();
max_trace_index = p;
}
}
//extend the maximum intensity trace to determine the boundaries in RT dimension
Size start_index = pattern.spectrum[max_trace_index];
const PeakType* start_peak = &(map_[pattern.spectrum[max_trace_index]][pattern.peak[max_trace_index]]);
double start_mz = start_peak->getMZ();
double start_rt = map_[start_index].getRT();
if (debug_)
{
log_ << " - Trace " << max_trace_index << " (maximum intensity)\n";
}
if (debug_)
{
log_ << " - extending from: " << map_[start_index].getRT() << " / " << start_mz << " (int: " << start_peak->getIntensity() << ")\n";
}
//initialize the trace and extend
MassTrace max_trace;
max_trace.peaks.emplace_back(start_rt, start_peak);
extendMassTrace_(max_trace, start_index, start_mz, false, meta_index_overall);
extendMassTrace_(max_trace, start_index, start_mz, true, meta_index_overall);
double rt_max = max_trace.peaks.back().first;
double rt_min = max_trace.peaks.begin()->first;
if (debug_)
{
log_ << " - rt bounds: " << rt_min << "-" << rt_max << '\n';
}
//Abort if too few peak were found
if (!max_trace.isValid() || max_trace.peaks.size() < 2 * min_spectra_ - max_missing_trace_peaks_)
{
if (debug_)
{
log_ << " - could not extend trace with maximum intensity => abort\n";
}
return;
}
for (Size p = 0; p < pattern.peak.size(); ++p)
{
if (debug_)
{
log_ << " - Trace " << p << '\n';
}
if (p == max_trace_index)
{
if (debug_)
{
log_ << " - previously extended maximum trace\n";
}
traces.push_back(std::move(max_trace));
traces.back().theoretical_int = pattern.theoretical_pattern.intensity[p];
traces.max_trace = traces.size() - 1;
continue;
}
Seed starting_peak;
starting_peak.spectrum = pattern.spectrum[p];
starting_peak.peak = pattern.peak[p];
if (pattern.peak[p] == -2)
{
if (debug_)
{
log_ << " - removed during isotope fit\n";
}
continue;
}
else if (pattern.peak[p] == -1)
{
if (debug_)
{
log_ << " - missing\n";
}
continue;
}
starting_peak.intensity = map_[starting_peak.spectrum][starting_peak.peak].getIntensity();
if (debug_) log_ << " - trace seed: " << map_[starting_peak.spectrum].getRT() << " / " << map_[starting_peak.spectrum][starting_peak.peak].getMZ() << " (int: " << map_[starting_peak.spectrum][starting_peak.peak].getIntensity() << ")\n";
//search for nearby maximum of the mass trace as the extension assumes that it starts at the maximum
Size begin = std::max((Size)0, starting_peak.spectrum - min_spectra_);
Size end = std::min(starting_peak.spectrum + min_spectra_, (Size)map_.size());
double mz = map_[starting_peak.spectrum][starting_peak.peak].getMZ();
double inte = map_[starting_peak.spectrum][starting_peak.peak].getIntensity();
for (Size spectrum_index = begin; spectrum_index < end; ++spectrum_index)
{
//find better seeds (no-empty scan/low mz diff/higher intensity)
SignedSize peak_index = -1;
if (!map_[spectrum_index].empty())
{
peak_index = map_[spectrum_index].findNearest(map_[starting_peak.spectrum][starting_peak.peak].getMZ());
}
if (peak_index < 0 ||
map_[spectrum_index][peak_index].getIntensity() <= inte ||
std::fabs(mz - map_[spectrum_index][peak_index].getMZ()) >= pattern_tolerance_
)
{
continue;
}
starting_peak.spectrum = spectrum_index;
starting_peak.peak = peak_index;
inte = map_[spectrum_index][peak_index].getIntensity();
}
if (debug_)
{
log_ << " - extending from: " << map_[starting_peak.spectrum].getRT() << " / " << map_[starting_peak.spectrum][starting_peak.peak].getMZ() << " (int: " << map_[starting_peak.spectrum][starting_peak.peak].getIntensity() << ")\n";
}
//------------------------------------------------------------------
//Extend seed to a mass trace
MassTrace trace;
const PeakType* seed = &(map_[starting_peak.spectrum][starting_peak.peak]);
//initialize trace with seed data and extend
trace.peaks.emplace_back(map_[starting_peak.spectrum].getRT(), seed);
extendMassTrace_(trace, starting_peak.spectrum, seed->getMZ(), false, meta_index_overall, rt_min, rt_max);
extendMassTrace_(trace, starting_peak.spectrum, seed->getMZ(), true, meta_index_overall, rt_min, rt_max);
//check if enough peaks were found
if (!trace.isValid())
{
if (debug_)
{
log_ << " - could not extend trace \n";
}
//Missing traces in the middle of a pattern are not acceptable => fix this
if (p < traces.max_trace)
{
traces.clear(); //remove earlier traces
continue;
}
else if (p > traces.max_trace)
{
break; //no more traces are possible
}
}
traces.push_back(trace);
traces.back().theoretical_int = pattern.theoretical_pattern.intensity[p];
}
}
void FeatureFinderAlgorithmPicked::extendMassTrace_(MassTrace& trace, SignedSize spectrum_index, double mz, bool increase_rt, Size meta_index_overall, double min_rt, double max_rt) const
{
//Reverse peaks if we run the method for the second time (to keep them in chronological order)
if (increase_rt)
{
++spectrum_index;
std::reverse(trace.peaks.begin(), trace.peaks.end());
}
else
{
--spectrum_index;
}
//check if boundaries are set
bool boundaries = false;
if (max_rt != min_rt)
{
boundaries = true;
}
//Relax slope threshold if there is a hard boundary for the extension
double current_slope_bound = (1.0 + (double)boundaries) * slope_bound_;
Size delta_count = min_spectra_;
std::vector<double> deltas(delta_count - 1, 0);
double last_observed_intensity = trace.peaks.back().second->getIntensity();
UInt missing_peaks = 0;
Size peaks_before_extension = trace.peaks.size();
String abort_reason = "";
while ((!increase_rt && spectrum_index >= 0) || (increase_rt && spectrum_index < (SignedSize)map_.size()))
{
if (boundaries &&
((!increase_rt && map_[spectrum_index].getRT() < min_rt) ||
(increase_rt && map_[spectrum_index].getRT() > max_rt))
)
{
abort_reason = "Hit upper/lower boundary";
break;
}
SignedSize peak_index = -1;
if (!map_[spectrum_index].empty())
{
peak_index = map_[spectrum_index].findNearest(mz);
}
// check if the peak is "missing"
if (
peak_index < 0 // no peak found
|| map_[spectrum_index].getFloatDataArrays()[meta_index_overall][peak_index] < 0.01 // overall score is to low
|| positionScore_(mz, map_[spectrum_index][peak_index].getMZ(), trace_tolerance_) == 0.0 // deviation of mz is too big
)
{
++missing_peaks;
if (missing_peaks > max_missing_trace_peaks_)
{
abort_reason = "too many peaks missing";
break;
}
}
else
{
missing_peaks = 0;
//add found peak to trace
trace.peaks.emplace_back(map_[spectrum_index].getRT(), &(map_[spectrum_index][peak_index]));
//update deltas and intensities
deltas.push_back((map_[spectrum_index][peak_index].getIntensity() - last_observed_intensity) / last_observed_intensity);
last_observed_intensity = map_[spectrum_index][peak_index].getIntensity();
//Abort if the average delta is too big (as intensity increases then)
double average_delta = std::accumulate(deltas.end() - delta_count, deltas.end(), 0.0) / (double)delta_count;
if (average_delta > current_slope_bound)
{
abort_reason = String("Average delta above threshold: ") + average_delta + "/" + current_slope_bound;
//remove last peaks as we extended too far
Size remove = std::min((Size)(trace.peaks.size() - peaks_before_extension), delta_count - 1);
trace.peaks.erase(trace.peaks.end() - remove, trace.peaks.end());
break;
}
}
//increase/decrease scan index
if (increase_rt)
{
++spectrum_index;
}
else
{
--spectrum_index;
}
}
if (debug_)
{
log_ << " - Added " << (trace.peaks.size() - peaks_before_extension) << " peaks (abort: " << abort_reason << ")\n";
}
}
Size FeatureFinderAlgorithmPicked::nearest_(double pos, const MSSpectrum& spec, Size start) const
{
Size index = start;
double distance = std::fabs(pos - spec[index].getMZ());
++index;
while (index < spec.size())
{
double new_distance = std::fabs(pos - spec[index].getMZ());
if (new_distance < distance)
{
distance = new_distance;
++index;
}
else
{
break;
}
}
return --index;
}
void FeatureFinderAlgorithmPicked::findIsotope_(double pos, Size spectrum_index, IsotopePattern& pattern, Size pattern_index, Size& peak_index) const
{
if (debug_)
{
log_ << " - Isotope " << pattern_index << ": ";
}
double intensity = 0.0;
double pos_score = 0.0;
UInt matches = 0;
//search in the center spectrum
const SpectrumType& spectrum = map_[spectrum_index];
peak_index = nearest_(pos, spectrum, peak_index);
double this_mz_score = positionScore_(pos, spectrum[peak_index].getMZ(), pattern_tolerance_);
pattern.theoretical_mz[pattern_index] = pos;
if (this_mz_score != 0.0)
{
if (debug_)
{
log_ << String::number(spectrum[peak_index].getIntensity(), 1) << " ";
}
pattern.peak[pattern_index] = peak_index;
pattern.spectrum[pattern_index] = spectrum_index;
intensity += spectrum[peak_index].getIntensity();
pos_score += this_mz_score;
++matches;
}
//previous spectrum
if (spectrum_index != 0 && !map_[spectrum_index - 1].empty())
{
const SpectrumType& spectrum_before = map_[spectrum_index - 1];
Size index_before = spectrum_before.findNearest(pos);
double mz_score = positionScore_(pos, spectrum_before[index_before].getMZ(), pattern_tolerance_);
if (mz_score != 0.0)
{
if (debug_) log_ << String::number(spectrum_before[index_before].getIntensity(), 1) << "b ";
intensity += spectrum_before[index_before].getIntensity();
pos_score += mz_score;
++matches;
if (pattern.peak[pattern_index] == -1)
{
pattern.peak[pattern_index] = index_before;
pattern.spectrum[pattern_index] = spectrum_index - 1;
}
}
}
//next spectrum
if (spectrum_index != map_.size() - 1 && !map_[spectrum_index + 1].empty())
{
const SpectrumType& spectrum_after = map_[spectrum_index + 1];
Size index_after = spectrum_after.findNearest(pos);
double mz_score = positionScore_(pos, spectrum_after[index_after].getMZ(), pattern_tolerance_);
if (mz_score != 0.0)
{
if (debug_) log_ << String::number(spectrum_after[index_after].getIntensity(), 1) << "a ";
intensity += spectrum_after[index_after].getIntensity();
pos_score += mz_score;
++matches;
if (pattern.peak[pattern_index] == -1)
{
pattern.peak[pattern_index] = index_after;
pattern.spectrum[pattern_index] = spectrum_index + 1;
}
}
}
//no isotope found
if (matches == 0)
{
if (debug_)
{
log_ << " missing\n";
}
pattern.peak[pattern_index] = -1;
pattern.mz_score[pattern_index] = 0.0;
pattern.intensity[pattern_index] = 0.0;
}
else
{
if (debug_)
{
log_ << "=> " << intensity / matches << '\n';
}
pattern.mz_score[pattern_index] = pos_score / matches;
pattern.intensity[pattern_index] = intensity / matches;
}
}
double FeatureFinderAlgorithmPicked::positionScore_(double pos1, double pos2, double allowed_deviation) const
{
double diff = fabs(pos1 - pos2);
if (diff <= 0.5 * allowed_deviation)
{
return 0.1 * (0.5 * allowed_deviation - diff) / (0.5 * allowed_deviation) + 0.9;
}
else if (diff <= allowed_deviation)
{
return 0.9 * (allowed_deviation - diff) / (0.5 * allowed_deviation);
}
return 0.0;
}
/// Calculates a score between 0 and 1 for the correlation between theoretical and found isotope pattern
double FeatureFinderAlgorithmPicked::isotopeScore_(const TheoreticalIsotopePattern& isotopes, IsotopePattern& pattern, bool consider_mz_distances) const
{
if (debug_) log_ << " - fitting " << pattern.intensity.size() << " peaks\n";
//Abort if a core peak is missing
for (Size iso = 0 + isotopes.optional_begin; iso < pattern.peak.size() - isotopes.optional_end; ++iso)
{
if (pattern.peak[iso] == -1)
{
if (debug_)
{
log_ << " - aborting: core peak is missing\n";
}
return 0.0;
}
}
//Find best isotope fit
// - try to leave out optional isotope peaks to improve the fit
// - do not allow gaps inside the pattern
double best_int_score = 0.01; //Not 0 as this would result in problems when checking for the percental improvement
Size best_begin = 0;
for (Size i = isotopes.optional_begin; i > 0; --i)
{
if (pattern.peak[i - 1] == -1)
{
best_begin = i;
break;
}
}
Size best_end = 0;
for (Size i = isotopes.optional_end; i > 0; --i)
{
if (pattern.peak[pattern.peak.size() - i] == -1)
{
best_end = i;
break;
}
}
if (debug_)
{
log_ << " - best_begin/end: " << best_begin << "/" << best_end << '\n';
}
for (Size b = best_begin; b <= isotopes.optional_begin; ++b)
{
for (Size e = best_end; e <= isotopes.optional_end; ++e)
{
//Make sure we have more than 2 peaks (unless in the first loop iteration, there we allow two points)
if (isotopes.size() - b - e > 2 || (b == best_begin &&
e == best_end &&
isotopes.size() - b - e > 1))
{
double int_score = Math::pearsonCorrelationCoefficient(isotopes.intensity.begin() + b, isotopes.intensity.end() - e, pattern.intensity.begin() + b, pattern.intensity.end() - e);
if (std::isnan(int_score))
{
int_score = 0.0;
}
if (isotopes.size() - b - e == 2 && int_score > min_isotope_fit_)
{
int_score = min_isotope_fit_; //special case for the first loop iteration (otherwise the score is 1)
}
if (debug_)
{
log_ << " - fit (" << b << "/" << e << "): " << int_score;
}
if (int_score / best_int_score >= 1.0 + optional_fit_improvement_)
{
if (debug_)
{
log_ << " - new best fit ";
}
best_int_score = int_score;
best_begin = b;
best_end = e;
}
if (debug_)
{
log_ << '\n';
}
}
}
}
//if the best fit is empty, abort
if (pattern.mz_score.size() - best_begin - best_end == 0)
{
return 0.0;
}
//remove left out peaks from the beginning
for (Size i = 0; i < best_begin; ++i)
{
pattern.peak[i] = -2;
pattern.intensity[i] = 0.0;
pattern.mz_score[i] = 0.0;
}
//remove left out peaks from the end
for (Size i = 0; i < best_end; ++i)
{
pattern.peak[isotopes.size() - 1 - i] = -2;
pattern.intensity[isotopes.size() - 1 - i] = 0.0;
pattern.mz_score[isotopes.size() - 1 - i] = 0.0;
}
//calculate m/z score (if required)
if (consider_mz_distances)
{
best_int_score *= std::accumulate(pattern.mz_score.begin() + best_begin, pattern.mz_score.end() - best_end, 0.0) / (pattern.mz_score.size() - best_begin - best_end);
}
//return final score
OPENMS_POSTCONDITION(best_int_score >= 0.0, (String("Internal error: Isotope score (") + best_int_score + ") should be >=0.0").c_str())
OPENMS_POSTCONDITION(best_int_score <= 1.0, (String("Internal error: Isotope score (") + best_int_score + ") should be <=1.0").c_str())
return best_int_score;
}
double FeatureFinderAlgorithmPicked::intensityScore_(Size spectrum, Size peak) const
{
// calculate (half) bin numbers
double intensity = map_[spectrum][peak].getIntensity();
double rt = map_[spectrum].getRT();
double mz = map_[spectrum][peak].getMZ();
double rt_min = map_.spectrumRanges().byMSLevel(1).getMinRT();
double mz_min = map_.spectrumRanges().byMSLevel(1).getMinMZ();
UInt rt_bin = std::min(2 * intensity_bins_ - 1, (UInt) std::floor((rt - rt_min) / intensity_rt_step_ * 2.0));
UInt mz_bin = std::min(2 * intensity_bins_ - 1, (UInt) std::floor((mz - mz_min) / intensity_mz_step_ * 2.0));
// determine mz bins
UInt ml, mh;
if (mz_bin == 0 || mz_bin == 2 * intensity_bins_ - 1)
{
ml = mz_bin / 2;
mh = mz_bin / 2;
}
else if (Math::isOdd(mz_bin))
{
ml = mz_bin / 2;
mh = mz_bin / 2 + 1;
}
else
{
ml = mz_bin / 2 - 1;
mh = mz_bin / 2;
}
// determine rt bins
UInt rl, rh;
if (rt_bin == 0 || rt_bin == 2 * intensity_bins_ - 1)
{
rl = rt_bin / 2;
rh = rt_bin / 2;
}
else if (Math::isOdd(rt_bin))
{
rl = rt_bin / 2;
rh = rt_bin / 2 + 1;
}
else
{
rl = rt_bin / 2 - 1;
rh = rt_bin / 2;
}
// calculate distances to surrounding bin centers (normalized to [0,1])
double drl = std::fabs(rt_min + (0.5 + rl) * intensity_rt_step_ - rt) / intensity_rt_step_;
double drh = std::fabs(rt_min + (0.5 + rh) * intensity_rt_step_ - rt) / intensity_rt_step_;
double dml = std::fabs(mz_min + (0.5 + ml) * intensity_mz_step_ - mz) / intensity_mz_step_;
double dmh = std::fabs(mz_min + (0.5 + mh) * intensity_mz_step_ - mz) / intensity_mz_step_;
// Calculate weights for the intensity scores based on the distances to the
// bin center(the nearer to better)
double d1 = std::sqrt(std::pow(1.0 - drl, 2) + std::pow(1.0 - dml, 2));
double d2 = std::sqrt(std::pow(1.0 - drh, 2) + std::pow(1.0 - dml, 2));
double d3 = std::sqrt(std::pow(1.0 - drl, 2) + std::pow(1.0 - dmh, 2));
double d4 = std::sqrt(std::pow(1.0 - drh, 2) + std::pow(1.0 - dmh, 2));
double d_sum = d1 + d2 + d3 + d4;
// Final score .. intensityScore in the surrounding bins, weighted by the distance of the
// bin center to the peak
double final = intensityScore_(rl, ml, intensity) * (d1 / d_sum)
+ intensityScore_(rh, ml, intensity) * (d2 / d_sum)
+ intensityScore_(rl, mh, intensity) * (d3 / d_sum)
+ intensityScore_(rh, mh, intensity) * (d4 / d_sum);
OPENMS_POSTCONDITION(final >= 0.0, (String("Internal error: Intensity score (") + final + ") should be >=0.0").c_str())
OPENMS_POSTCONDITION(final <= 1.0001, (String("Internal error: Intensity score (") + final + ") should be <=1.0").c_str())
return final;
}
std::unique_ptr<TraceFitter> FeatureFinderAlgorithmPicked::chooseTraceFitter_(double& tau)
{
// choose fitter
if (param_.getValue("feature:rt_shape") == "asymmetric")
{
OPENMS_LOG_DEBUG << "use asymmetric rt peak shape\n";
tau = -1.0;
return std::make_unique<EGHTraceFitter>();
}
else // if (param_.getValue("feature:rt_shape") == "symmetric")
{
OPENMS_LOG_DEBUG << "use symmetric rt peak shape\n";
return std::make_unique<GaussTraceFitter>();
}
}
double FeatureFinderAlgorithmPicked::intensityScore_(Size rt_bin, Size mz_bin, double intensity) const
{
// interpolate score value according to quantiles(20)
const std::vector<double>& quantiles20 = intensity_thresholds_[rt_bin][mz_bin];
// get iterator pointing to quantile that is >= intensity
std::vector<double>::const_iterator it = std::lower_bound(quantiles20.begin(), quantiles20.end(), intensity);
// bigger than the biggest value => return 1.0
if (it == quantiles20.end())
{
return 1.0;
}
// interpolate inside the bin
double bin_score = 0.0;
if (it == quantiles20.begin())
{
bin_score = 0.05 * intensity / *it;
}
else
{
// (intensity - vigintile_low) / (vigintile_high - vigintile_low)
bin_score = 0.05 * (intensity - *(it - 1)) / (*it - *(it - 1));
}
double final = bin_score +
0.05 * ((it - quantiles20.begin()) - 1.0); // determine position of lower bound in the vector
//fix numerical problems
if (final < 0.0)
{
final = 0.0;
}
if (final > 1.0)
{
final = 1.0;
}
// final = 1/20 * [ index(vigintile_low) + (intensity-vigintile_low) / (vigintile_high - vigintile_low) ]
return final;
}
void FeatureFinderAlgorithmPicked::cropFeature_(const std::shared_ptr<TraceFitter>& fitter,
const MassTraces& traces,
MassTraces& new_traces)
{
double low_bound = fitter->getLowerRTBound();
double high_bound = fitter->getUpperRTBound();
if (debug_)
{
log_ << " => RT bounds: " << low_bound << " - " << high_bound << '\n';
}
std::vector<double> v_theo, v_real;
for (Size t = 0; t < traces.size(); ++t)
{
const MassTrace& trace = traces[t];
if (debug_)
{
log_ << " - Trace " << t << ": (" << trace.theoretical_int << ")\n";
}
MassTrace new_trace;
//compute average relative deviation and correlation
double deviation = 0.0;
v_theo.clear();
v_real.clear();
for (Size k = 0; k < trace.peaks.size(); ++k)
{
//consider peaks when inside RT bounds only
if (trace.peaks[k].first >= low_bound && trace.peaks[k].first <= high_bound)
{
new_trace.peaks.push_back(trace.peaks[k]);
double theo = traces.baseline + fitter->computeTheoretical(trace, k);
v_theo.push_back(theo);
double real = trace.peaks[k].second->getIntensity();
v_real.push_back(real);
deviation += std::fabs(real - theo) / theo;
}
}
double fit_score = 0.0;
double correlation = 0.0;
double final_score = 0.0;
if (!new_trace.peaks.empty())
{
fit_score = deviation / new_trace.peaks.size();
correlation = std::max(0.0, Math::pearsonCorrelationCoefficient(v_theo.begin(), v_theo.end(), v_real.begin(), v_real.end()));
final_score = std::sqrt(correlation * std::max(0.0, 1.0 - fit_score));
}
if (debug_)
{
log_ << " - peaks: " << new_trace.peaks.size() << " / " << trace.peaks.size() << " - relative deviation: " << fit_score << " - correlation: " << correlation << " - final score: " << correlation << '\n';
}
//remove badly fitting traces
if (!new_trace.isValid() || final_score < min_trace_score_)
{
if (t < traces.max_trace)
{
new_traces = MassTraces();
if (debug_)
{
log_ << " - removed this and previous traces due to bad fit\n";
}
new_traces.clear(); //remove earlier traces
continue;
}
else if (t == traces.max_trace)
{
new_traces = MassTraces();
if (debug_)
{
log_ << " - aborting (max trace was removed)\n";
}
break;
}
else if (t > traces.max_trace)
{
if (debug_)
{
log_ << " - removed due to bad fit => omitting the rest\n";
}
break; //no more traces are possible
}
}
//add new trace
else
{
new_trace.theoretical_int = trace.theoretical_int;
new_traces.push_back(new_trace);
if (t == traces.max_trace)
{
new_traces.max_trace = new_traces.size() - 1;
}
}
}
new_traces.baseline = traces.baseline;
}
bool FeatureFinderAlgorithmPicked::checkFeatureQuality_(const std::shared_ptr<TraceFitter>& fitter,
MassTraces& feature_traces,
const double& seed_mz, const double& min_feature_score,
String& error_msg, double& fit_score, double& correlation, double& final_score)
{
//check if the sigma fit was ok (if it is larger than 'max_rt_span')
// 5.0 * sigma > max_rt_span_ * region_rt_span
if (fitter->checkMaximalRTSpan(max_rt_span_))
{
error_msg = "Invalid fit: Fitted model is bigger than 'max_rt_span'";
return false;
}
//check if the feature is valid
if (!feature_traces.isValid(seed_mz, trace_tolerance_))
{
error_msg = "Invalid feature after fit - too few traces or peaks left";
return false;
}
//check if x0 is inside feature bounds
{
std::pair<double, double> rt_bounds = feature_traces.getRTBounds();
if (fitter->getCenter() < rt_bounds.first || fitter->getCenter() > rt_bounds.second)
{
error_msg = "Invalid fit: Center outside of feature bounds";
return false;
}
}
//check if the remaining traces fill out at least 'min_rt_span' of the RT span
{
std::pair<double, double> rt_bounds = feature_traces.getRTBounds();
if (fitter->checkMinimalRTSpan(rt_bounds, min_rt_span_))
{
error_msg = "Invalid fit: Less than 'min_rt_span' left after fit";
return false;
}
}
//check if feature quality is high enough (average relative deviation and correlation of the whole feature)
{
std::vector<double> v_theo, v_real;
double deviation = 0.0;
for (Size t = 0; t < feature_traces.size(); ++t)
{
MassTrace& trace = feature_traces[t];
for (Size k = 0; k < trace.peaks.size(); ++k)
{
// was double theo = new_traces.baseline + trace.theoretical_int * height * exp(-0.5 * pow(trace.peaks[k].first - x0, 2) / pow(sigma, 2) );
double theo = feature_traces.baseline + fitter->computeTheoretical(trace, k);
v_theo.push_back(theo);
double real = trace.peaks[k].second->getIntensity();
v_real.push_back(real);
deviation += std::fabs(real - theo) / theo;
}
}
fit_score = std::max(0.0, 1.0 - (deviation / feature_traces.getPeakCount()));
correlation = std::max(0.0, Math::pearsonCorrelationCoefficient(v_theo.begin(), v_theo.end(), v_real.begin(), v_real.end()));
final_score = std::sqrt(correlation * fit_score);
//quality output
if (debug_)
{
log_ << "Quality estimation:\n";
log_ << " - relative deviation: " << fit_score << '\n';
log_ << " - correlation: " << correlation << '\n';
log_ << " => final score: " << final_score << '\n';
}
if (final_score < min_feature_score)
{
error_msg = "Feature quality too low after fit";
return false;
}
}
return true;
}
void FeatureFinderAlgorithmPicked::writeFeatureDebugInfo_(const std::shared_ptr<TraceFitter>& fitter,
const MassTraces& traces,
const MassTraces& new_traces,
bool feature_ok, const String& error_msg, const double final_score, const Int plot_nr, const PeakType& peak,
const String& path)
{
double pseudo_rt_shift = param_.getValue("debug:pseudo_rt_shift");
String script;
{
TextFile tf;
//gnuplot script
script = String("plot \"") + path + plot_nr + ".dta\" title 'before fit (RT: " + String::number(fitter->getCenter(), 2) + " m/z: " + String::number(peak.getMZ(), 4) + ")' with points 1";
//feature before fit
for (Size k = 0; k < traces.size(); ++k)
{
for (Size j = 0; j < traces[k].peaks.size(); ++j)
{
tf.addLine(String(pseudo_rt_shift * k + traces[k].peaks[j].first) + "\t" + traces[k].peaks[j].second->getIntensity());
}
}
tf.store(path + plot_nr + ".dta");
}
{
//fitted feature
if (new_traces.getPeakCount() != 0)
{
TextFile tf_new_trace;
for (Size k = 0; k < new_traces.size(); ++k)
{
for (Size j = 0; j < new_traces[k].peaks.size(); ++j)
{
tf_new_trace.addLine(String(pseudo_rt_shift * k + new_traces[k].peaks[j].first) + "\t" + new_traces[k].peaks[j].second->getIntensity());
}
}
tf_new_trace.store(path + plot_nr + "_cropped.dta");
script = script + ", \"" + path + plot_nr + "_cropped.dta\" title 'feature ";
if (!feature_ok)
{
script = script + " - " + error_msg;
}
else
{
script = script + (features_->size() + 1) + " (score: " + String::number(final_score, 3) + ")";
}
script = script + "' with points 3";
}
}
{
//fitted functions
TextFile tf_fitted_func;
for (Size k = 0; k < traces.size(); ++k)
{
char fun = 'f';
fun += (char)k;
tf_fitted_func.addLine(fitter->getGnuplotFormula(traces[k], fun, traces.baseline, pseudo_rt_shift * k));
//tf.push_back(String(fun)+"(x)= " + traces.baseline + " + " + fitter->getGnuplotFormula(traces[k], pseudo_rt_shift * k));
script = script + ", " + fun + "(x) title 'Trace " + k + " (m/z: " + String::number(traces[k].getAvgMZ(), 4) + ")'";
}
//output
tf_fitted_func.addLine("set xlabel \"pseudo RT (mass traces side-by-side)\"");
tf_fitted_func.addLine("set ylabel \"intensity\"");
tf_fitted_func.addLine("set samples 1000");
tf_fitted_func.addLine(script);
tf_fitted_func.addLine("pause -1");
tf_fitted_func.store(path + plot_nr + ".plot");
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/MultiplexIsotopicPeakPattern.cpp | .cpp | 2,031 | 75 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Lars Nilse $
// $Authors: Lars Nilse $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/FEATUREFINDER/MultiplexDeltaMasses.h>
#include <OpenMS/FEATUREFINDER/MultiplexIsotopicPeakPattern.h>
#include <utility>
using namespace std;
namespace OpenMS
{
MultiplexIsotopicPeakPattern::MultiplexIsotopicPeakPattern(int c, int ppp, MultiplexDeltaMasses ms, int msi) :
charge_(c), peaks_per_peptide_(ppp), mass_shifts_(std::move(ms)), mass_shift_index_(msi)
{
// generate m/z shifts
for (unsigned i = 0; i < mass_shifts_.getDeltaMasses().size(); ++i)
{
for (int j = 0; j < peaks_per_peptide_; ++j)
{
const std::vector<MultiplexDeltaMasses::DeltaMass>& delta_masses = mass_shifts_.getDeltaMasses();
mz_shifts_.push_back((delta_masses[i].delta_mass + j * Constants::C13C12_MASSDIFF_U) / charge_);
}
}
}
int MultiplexIsotopicPeakPattern::getCharge() const
{
return charge_;
}
int MultiplexIsotopicPeakPattern::getPeaksPerPeptide() const
{
return peaks_per_peptide_;
}
MultiplexDeltaMasses MultiplexIsotopicPeakPattern::getMassShifts() const
{
return mass_shifts_;
}
int MultiplexIsotopicPeakPattern::getMassShiftIndex() const
{
return mass_shift_index_;
}
unsigned MultiplexIsotopicPeakPattern::getMassShiftCount() const
{
return mass_shifts_.getDeltaMasses().size();
}
double MultiplexIsotopicPeakPattern::getMassShiftAt(size_t i) const
{
return mass_shifts_.getDeltaMasses()[i].delta_mass;
}
double MultiplexIsotopicPeakPattern::getMZShiftAt(size_t i) const
{
return mz_shifts_[i];
}
unsigned MultiplexIsotopicPeakPattern::getMZShiftCount() const
{
return mz_shifts_.size();
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/MultiplexFilteringProfile.cpp | .cpp | 17,318 | 428 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Lars Nilse $
// $Authors: Lars Nilse $
// --------------------------------------------------------------------------
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/KERNEL/BaseFeature.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/IsotopeDistribution.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/CoarseIsotopePatternGenerator.h>
#include <OpenMS/PROCESSING/CENTROIDING/PeakPickerHiRes.h>
#include <OpenMS/FEATUREFINDER/MultiplexFilteringProfile.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <sstream>
#include <utility>
//#define DEBUG_FFMULTIPLEX
using namespace std;
namespace OpenMS
{
MultiplexFilteringProfile::MultiplexFilteringProfile(MSExperiment& exp_profile, const MSExperiment& exp_centroided, const std::vector<std::vector<PeakPickerHiRes::PeakBoundary> >& boundaries, const std::vector<MultiplexIsotopicPeakPattern>& patterns, int isotopes_per_peptide_min, int isotopes_per_peptide_max, double intensity_cutoff, double rt_band, double mz_tolerance, bool mz_tolerance_unit, double peptide_similarity, double averagine_similarity, double averagine_similarity_scaling, String averagine_type) :
MultiplexFiltering(exp_centroided, patterns, isotopes_per_peptide_min, isotopes_per_peptide_max, intensity_cutoff, rt_band, mz_tolerance, mz_tolerance_unit, peptide_similarity, averagine_similarity, averagine_similarity_scaling, std::move(averagine_type))
{
// initialise peak boundaries
// In the MultiplexFiltering() constructor we initialise the centroided experiment exp_centroided_.
// (We run a simple intensity filter. Peaks below the intensity cutoff can be discarded right from the start.)
// Now we still need to discard boundaries of low intensity peaks, in order to preserve the one-to-one mapping between peaks and boundaries.
boundaries_.reserve(boundaries.size());
// loop over spectra and boundaries
for (const auto &it_rt : exp_centroided)
{
size_t idx_rt = &it_rt - &exp_centroided[0];
// new boundaries of a single spectrum
std::vector<PeakPickerHiRes::PeakBoundary> boundaries_temp;
// loop over m/z peaks and boundaries
for (const auto &it_mz : it_rt)
{
size_t idx_mz = &it_mz - &it_rt[0];
if (it_mz.getIntensity() > intensity_cutoff_)
{
boundaries_temp.push_back(boundaries[idx_rt][idx_mz]);
// Check consistency of peaks and their peak boundaries, i.e. check that the peak lies in the boundary interval.
if (boundaries[idx_rt][idx_mz].mz_min > it_mz.getMZ() || it_mz.getMZ() > boundaries[idx_rt][idx_mz].mz_max)
{
throw Exception::InvalidRange(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION);
}
}
}
boundaries_.push_back(boundaries_temp);
}
if (exp_profile.size() != exp_centroided.size())
{
stringstream stream;
stream << "Profile and centroided data do not contain same number of spectra. (";
stream << exp_profile.size();
stream << "!=";
stream << exp_centroided.size();
stream << ")";
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, stream.str());
}
if (exp_centroided.size() != boundaries.size())
{
stringstream stream;
stream << "Centroided data and the corresponding list of peak boundaries do not contain same number of spectra. (";
stream << exp_centroided.size();
stream << "!=";
stream << boundaries.size();
stream << ")";
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, stream.str());
}
// spline interpolate the profile data
for (const auto& spectrum : exp_profile)
{
exp_spline_profile_.emplace_back(spectrum);
}
// TODO: Constructing the navigators here instead in the beginning of the filter() method results in segmentation faults. Why?
}
vector<MultiplexFilteredMSExperiment> MultiplexFilteringProfile::filter()
{
// progress logger
unsigned progress = 0;
startProgress(0, patterns_.size() * exp_spline_profile_.size(), "filtering LC-MS data");
// list of filter results for each peak pattern
std::vector<MultiplexFilteredMSExperiment> filter_results;
#ifdef DEBUG_FFMULTIPLEX
// clock for monitoring run time performance
unsigned int start = clock();
#endif
// construct navigators for all spline spectra
std::vector<SplineInterpolatedPeaks::Navigator> navigators;
for (SplineInterpolatedPeaks& spl : exp_spline_profile_)
{
SplineInterpolatedPeaks::Navigator nav = spl.getNavigator();
navigators.push_back(nav);
}
// loop over all patterns
for (unsigned pattern_idx = 0; pattern_idx < patterns_.size(); ++pattern_idx)
{
// current pattern
MultiplexIsotopicPeakPattern pattern = patterns_[pattern_idx];
// data structure storing peaks which pass all filters
MultiplexFilteredMSExperiment result;
// update white experiment
updateWhiteMSExperiment_();
// loop over spectra
// loop simultaneously over RT in the spline interpolated profile and (white) centroided experiment (including peak boundaries)
for (const auto &it_rt : exp_centroided_white_)
{
// retention time
double rt = it_rt.getRT();
// spectral index in exp_centroided_white_, boundaries_ and exp_spline_profile_
size_t idx_rt = &it_rt - &exp_centroided_white_[0];
// skip empty spectra
if (it_rt.empty() || boundaries_[idx_rt].empty() || exp_spline_profile_[idx_rt].size() == 0)
{
continue;
}
setProgress(++progress);
MSExperiment::ConstIterator it_rt_picked_band_begin = exp_centroided_white_.RTBegin(rt - rt_band_/2);
MSExperiment::ConstIterator it_rt_picked_band_end = exp_centroided_white_.RTEnd(rt + rt_band_/2);
// loop over mz
#pragma omp parallel for
for (SignedSize s = 0; s < (SignedSize) it_rt.size(); s++)
{
double mz = it_rt[s].getMZ();
MultiplexFilteredPeak peak(mz, rt, exp_centroided_mapping_[idx_rt][s], idx_rt);
if (!(filterPeakPositions_(mz, exp_centroided_white_.begin(), it_rt_picked_band_begin, it_rt_picked_band_end, pattern, peak)))
{
continue;
}
size_t mz_idx = exp_centroided_mapping_[idx_rt][s];
double peak_min = boundaries_[idx_rt][mz_idx].mz_min;
double peak_max = boundaries_[idx_rt][mz_idx].mz_max;
//double rt_peak = peak.getRT();
double mz_peak = peak.getMZ();
std::multimap<size_t, MultiplexSatelliteCentroided > satellites = peak.getSatellites();
// Arrangement of peaks looks promising. Now scan through the spline fitted profile data around the peak i.e. from peak boundary to peak boundary.
for (double mz_profile = peak_min; mz_profile < peak_max; mz_profile = navigators[idx_rt].getNextPos(mz_profile))
{
// determine m/z shift relative to the centroided peak at which the profile data will be sampled
double mz_shift = mz_profile - mz_peak;
std::multimap<size_t, MultiplexSatelliteProfile > satellites_profile;
// construct the set of spline-interpolated satellites for this specific mz_profile
for (const auto &satellite_it : satellites)
{
// find indices of the peak
size_t rt_idx = (satellite_it.second).getRTidx();
size_t mz_idx = (satellite_it.second).getMZidx();
// find peak itself
MSExperiment::ConstIterator it_rt = exp_centroided_.begin();
std::advance(it_rt, rt_idx);
MSSpectrum::ConstIterator it_mz = it_rt->begin();
std::advance(it_mz, mz_idx);
double rt_satellite = it_rt->getRT();
double mz_satellite = it_mz->getMZ();
// determine m/z and corresponding intensity
double mz = mz_satellite + mz_shift;
double intensity = navigators[rt_idx].eval(mz);
satellites_profile.insert(std::make_pair(satellite_it.first, MultiplexSatelliteProfile(rt_satellite, mz, intensity)));
}
if (!(filterAveragineModel_(pattern, peak, satellites_profile)))
{
continue;
}
if (!(filterPeptideCorrelation_(pattern, satellites_profile)))
{
continue;
}
/**
* All filters passed.
*/
// add the satellite data points to the peak
for (const auto &it : satellites_profile)
{
peak.addSatelliteProfile(it.second, it.first);
}
}
// If some satellite data points passed all filters, we can add the peak to the filter result.
if (peak.sizeProfile() > 0)
{
#pragma omp critical
{
result.addPeak(peak);
blacklistPeak_(peak, pattern_idx);
};
}
}
}
#ifdef DEBUG_FFMULTIPLEX
// write filtered peaks to debug output
std::stringstream debug_out;
debug_out << "filter_result_" << pattern_idx << ".consensusXML";
result.writeDebugOutput(exp_centroided_, debug_out.str());
#endif
// add results of this pattern to list
filter_results.push_back(result);
}
#ifdef DEBUG_FFMULTIPLEX
// clock for monitoring run time performance
OPENMS_LOG_INFO << "\nThe filtering step of the algorithm took " << (float)(clock()-start)/CLOCKS_PER_SEC << " seconds.\n\n";
#endif
endProgress();
return filter_results;
}
std::vector<std::vector<PeakPickerHiRes::PeakBoundary> >& MultiplexFilteringProfile::getPeakBoundaries()
{
return boundaries_;
}
bool MultiplexFilteringProfile::filterAveragineModel_(const MultiplexIsotopicPeakPattern& pattern, const MultiplexFilteredPeak& peak, const std::multimap<size_t, MultiplexSatelliteProfile >& satellites_profile) const
{
// construct averagine distribution
// Note that the peptide(s) are very close in mass. We therefore calculate the averagine distribution only once (for the lightest peptide).
double mass = peak.getMZ() * pattern.getCharge();
CoarseIsotopePatternGenerator solver(isotopes_per_peptide_max_);
IsotopeDistribution distribution;
if (averagine_type_ == "peptide")
{
distribution = solver.estimateFromPeptideWeight(mass);
}
else if (averagine_type_ == "RNA")
{
distribution = solver.estimateFromRNAWeight(mass);
}
else if (averagine_type_ == "DNA")
{
distribution = solver.estimateFromDNAWeight(mass);
}
else
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Invalid averagine type.");
}
// loop over peptides
for (size_t peptide = 0; peptide < pattern.getMassShiftCount(); ++peptide)
{
// intensities for the Pearson and Spearman rank correlations
std::vector<double> intensities_model;
std::vector<double> intensities_data;
// loop over isotopes i.e. mass traces of the peptide
for (size_t isotope = 0; isotope < isotopes_per_peptide_max_; ++isotope)
{
size_t idx = peptide * isotopes_per_peptide_max_ + isotope;
std::pair<std::multimap<size_t, MultiplexSatelliteProfile >::const_iterator, std::multimap<size_t, MultiplexSatelliteProfile >::const_iterator> satellites;
satellites = satellites_profile.equal_range(idx);
int count = 0;
double sum_intensities = 0;
// loop over satellites in mass trace
for (std::multimap<size_t, MultiplexSatelliteProfile >::const_iterator it = satellites.first; it != satellites.second; ++it)
{
++count;
sum_intensities += (it->second).getIntensity();
}
if (count > 0)
{
intensities_model.push_back(distribution[isotope].getIntensity());
intensities_data.push_back(sum_intensities/count);
}
}
// Use a more restrictive averagine similarity when we are searching for peptide singlets.
double similarity;
if (pattern.getMassShiftCount() == 1)
{
// We are detecting peptide singlets.
similarity = averagine_similarity_ + averagine_similarity_scaling_*(1 - averagine_similarity_);
}
else
{
// We are detecting peptide doublets or triplets or ...
similarity = averagine_similarity_;
}
// Calculate Pearson and Spearman rank correlations
if ((intensities_model.size() < isotopes_per_peptide_min_) || (intensities_data.size() < isotopes_per_peptide_min_))
{
throw Exception::InvalidSize(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, 0, "intensity vectors too small for correlation calculation");
}
double correlation_Pearson = OpenMS::Math::pearsonCorrelationCoefficient(intensities_model.begin(), intensities_model.end(), intensities_data.begin(), intensities_data.end());
double correlation_Spearman = OpenMS::Math::rankCorrelationCoefficient(intensities_model.begin(), intensities_model.end(), intensities_data.begin(), intensities_data.end());
if ((correlation_Pearson < similarity) || (correlation_Spearman < similarity))
{
return false;
}
}
return true;
}
bool MultiplexFilteringProfile::filterPeptideCorrelation_(const MultiplexIsotopicPeakPattern& pattern,
const std::multimap<size_t, MultiplexSatelliteProfile >& satellites_profile) const
{
if (pattern.getMassShiftCount() < 2)
{
// filter irrelevant for singlet feature detection
return true;
}
// We will calculate the correlations between all possible peptide combinations.
// For example (light, medium), (light, heavy) and (medium, heavy) in the case of triplets.
// If one of the correlations is below the <peptide_similarity_> limit, the filter fails.
// loop over the first peptide
for (size_t peptide_1 = 0; peptide_1 < pattern.getMassShiftCount() - 1; ++peptide_1)
{
// loop over the second peptide
for (size_t peptide_2 = peptide_1 + 1; peptide_2 < pattern.getMassShiftCount(); ++peptide_2)
{
std::vector<double> intensities_1;
std::vector<double> intensities_2;
// loop over isotopes i.e. mass traces of both peptides
for (size_t isotope = 0; isotope < isotopes_per_peptide_max_; ++isotope)
{
size_t idx_1 = peptide_1 * isotopes_per_peptide_max_ + isotope;
size_t idx_2 = peptide_2 * isotopes_per_peptide_max_ + isotope;
auto satellites_1 = satellites_profile.equal_range(idx_1);
auto satellites_2 = satellites_profile.equal_range(idx_2);
// loop over satellites in mass trace 1
for (auto satellite_it_1 = satellites_1.first; satellite_it_1 != satellites_1.second; ++satellite_it_1) //OMS_CODING_TEST_EXCLUDE
{
double rt_1 = (satellite_it_1->second).getRT();
// loop over satellites in mass trace 2
for (auto satellite_it_2 = satellites_2.first; satellite_it_2 != satellites_2.second; ++satellite_it_2) //OMS_CODING_TEST_EXCLUDE
{
double rt_2 = (satellite_it_2->second).getRT();
if (rt_1 == rt_2)
{
intensities_1.push_back((satellite_it_1->second).getIntensity());
intensities_2.push_back((satellite_it_2->second).getIntensity());
}
}
}
}
// It is well possible that no corresponding satellite peaks exist, in which case the filter fails.
if ((intensities_1.empty()) || (intensities_2.empty()))
{
return false;
}
// calculate correlation between peak intensities in peptides 1 and 2
double correlation_Pearson = OpenMS::Math::pearsonCorrelationCoefficient(intensities_1.begin(), intensities_1.end(), intensities_2.begin(), intensities_2.end());
double correlation_Spearman = OpenMS::Math::rankCorrelationCoefficient(intensities_1.begin(), intensities_1.end(), intensities_2.begin(), intensities_2.end());
if ((correlation_Pearson < peptide_similarity_) || (correlation_Spearman < peptide_similarity_))
//if (correlation_Pearson < peptide_similarity_)
{
return false;
}
}
}
return true;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/MultiplexFiltering.cpp | .cpp | 24,025 | 586 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Lars Nilse $
// $Authors: Lars Nilse $
// --------------------------------------------------------------------------
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/KERNEL/BaseFeature.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/IsotopeDistribution.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/CoarseIsotopePatternGenerator.h>
#include <OpenMS/PROCESSING/CENTROIDING/PeakPickerHiRes.h>
#include <OpenMS/FEATUREFINDER/MultiplexFiltering.h>
#include <OpenMS/FEATUREFINDER/MultiplexIsotopicPeakPattern.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <utility>
using namespace std;
namespace OpenMS
{
MultiplexFiltering::MultiplexFiltering(const MSExperiment& exp_centroided, const std::vector<MultiplexIsotopicPeakPattern>& patterns,
int isotopes_per_peptide_min, int isotopes_per_peptide_max, double intensity_cutoff, double rt_band,
double mz_tolerance, bool mz_tolerance_unit, double peptide_similarity, double averagine_similarity,
double averagine_similarity_scaling, String averagine_type) :
patterns_(patterns), isotopes_per_peptide_min_(isotopes_per_peptide_min), isotopes_per_peptide_max_(isotopes_per_peptide_max),
intensity_cutoff_(intensity_cutoff), rt_band_(rt_band), mz_tolerance_(mz_tolerance), mz_tolerance_unit_in_ppm_(mz_tolerance_unit),
peptide_similarity_(peptide_similarity), averagine_similarity_(averagine_similarity),
averagine_similarity_scaling_(averagine_similarity_scaling), averagine_type_(std::move(averagine_type))
{
// initialise experiment exp_centroided_
// Any peaks below the intensity cutoff cannot be relevant. They are therefore removed resulting in reduced memory footprint and runtime.
exp_centroided_.reserve(exp_centroided.getNrSpectra());
// loop over spectra
for (const auto &it_rt : exp_centroided)
{
MSSpectrum spectrum;
spectrum.setRT(it_rt.getRT());
// loop over m/z
for (const auto &it_mz : it_rt)
{
if (it_mz.getIntensity() > intensity_cutoff_)
{
spectrum.push_back(it_mz);
}
}
exp_centroided_.addSpectrum(std::move(spectrum));
}
exp_centroided_.updateRanges();
exp_centroided_.sortSpectra();
// initialise blacklist <blacklist_>
blacklist_.reserve(exp_centroided_.getNrSpectra());
// loop over spectra
for (const auto &it_rt : exp_centroided_)
{
std::vector<int> blacklist_spectrum(it_rt.size(), -1);
blacklist_.push_back(blacklist_spectrum);
}
}
MSExperiment& MultiplexFiltering::getCentroidedExperiment()
{
return exp_centroided_;
}
void MultiplexFiltering::updateWhiteMSExperiment_()
{
// reset both the white MS experiment and the corresponding mapping to the complete i.e. original MS experiment
exp_centroided_white_.clear(true);
exp_centroided_mapping_.clear();
// loop over spectra
for (const auto &it_rt : exp_centroided_)
{
MSSpectrum spectrum_picked_white;
spectrum_picked_white.setRT(it_rt.getRT());
std::map<int, int> mapping_spectrum;
int count = 0;
// loop over m/z
for (const auto &it_mz : it_rt)
{
if (blacklist_[&it_rt - &exp_centroided_[0]][&it_mz - &it_rt[0]] == -1)
{
spectrum_picked_white.push_back(it_mz);
mapping_spectrum[count] = &it_mz - &it_rt[0];
++count;
}
}
exp_centroided_white_.addSpectrum(spectrum_picked_white);
exp_centroided_mapping_.push_back(mapping_spectrum);
}
exp_centroided_white_.updateRanges();
}
int MultiplexFiltering::checkForSignificantPeak_(double mz, double mz_tolerance, MSExperiment::ConstIterator& it_rt, double intensity_first_peak) const
{
// Check that there is a peak.
int mz_idx = it_rt->findNearest(mz, mz_tolerance);
if (mz_idx != -1)
{
// Any peak with an intensity greater than <threshold>*<intensity_first_peak> is significant.
double threshold = 0.3;
MSSpectrum::ConstIterator it_mz = it_rt->begin();
std::advance(it_mz, mz_idx);
double intensity = it_mz->getIntensity();
// Check that the peak is significant.
if (intensity > threshold * intensity_first_peak)
{
// There is a high-intensity peak at the position mz.
return mz_idx;
}
}
return -1;
}
bool MultiplexFiltering::filterPeakPositions_(double mz, const MSExperiment::ConstIterator& it_rt_begin, const MSExperiment::ConstIterator& it_rt_band_begin, const MSExperiment::ConstIterator& it_rt_band_end, const MultiplexIsotopicPeakPattern& pattern, MultiplexFilteredPeak& peak) const
{
// check if peak position is blacklisted
// i.e. -1 = white or 0 = mono-isotopic peak of the lightest (or only) peptide are ok.
if (blacklist_[peak.getRTidx()][peak.getMZidx()] > 0)
{
return false;
}
// determine absolute m/z tolerance in Th
double mz_tolerance;
if (mz_tolerance_unit_in_ppm_)
{
// m/z tolerance in ppm
// Note that the absolute tolerance varies minimally within an m/z pattern.
// Hence we calculate it only once here.
mz_tolerance = mz * mz_tolerance_ * 1e-6;
}
else
{
// m/z tolerance in Th
mz_tolerance = mz_tolerance_;
}
// The mass traces of the peptide(s) form a m/z shift pattern. Starting with the mono-isotopic mass trace of each peptide,
// how long is the series of m/z shifts until the first expected mass trace is missing? We want to see
// at least isotopes_per_peptide_min_ of these m/z shifts in each peptide. Note that we need to demand subsequent(!) mass traces
// to be present. Otherwise it would be easy to mistake say a 2+ peptide for a 4+ peptide.
size_t length = 0;
bool interrupted = false;
// loop over isotopes i.e. mass traces within the peptide
for (size_t isotope = 0; isotope < isotopes_per_peptide_max_; ++isotope)
{
bool found_in_all_peptides = true;
// loop over peptides
for (size_t peptide = 0; peptide < pattern.getMassShiftCount(); ++peptide)
{
// calculate m/z shift index in pattern
size_t mz_shift_idx = peptide * isotopes_per_peptide_max_ + isotope;
double mz_shift = pattern.getMZShiftAt(mz_shift_idx);
bool found = false;
// loop over spectra in RT band
for (MSExperiment::ConstIterator it_rt = it_rt_band_begin; it_rt < it_rt_band_end; ++it_rt)
{
int i = it_rt->findNearest(mz + mz_shift, mz_tolerance);
if (i != -1)
{
// Note that as primary peaks, satellite peaks are also restricted by the blacklist.
// The peak can either be pure white i.e. untouched, or have been seen earlier as part of the same mass trace.
size_t rt_idx = it_rt - it_rt_begin;
size_t mz_idx = exp_centroided_mapping_.at(it_rt - it_rt_begin).at(i);
// Check that the peak has not been blacklisted and is not already in the satellite set.
if (((blacklist_[rt_idx][mz_idx] == -1) || (blacklist_[rt_idx][mz_idx] == static_cast<int>(mz_shift_idx))) && (!(peak.checkSatellite(rt_idx, mz_idx))))
{
found = true;
peak.addSatellite(rt_idx, mz_idx, mz_shift_idx);
}
}
}
if (!found)
{
found_in_all_peptides = false;
}
}
if (found_in_all_peptides && (!interrupted))
{
++length;
}
else
{
interrupted = true;
if (length < isotopes_per_peptide_min_)
{
return false;
}
}
}
// Check that there is no significant peak (aka zeroth peak) to the left of the mono-isotopic peak (aka first peak).
// Further check that there is no mistaken charge state identity. For example, check that a 2+ pattern isn't really a 4+ or 6+ pattern.
// Let's use the double m/z tolerance when checking for these peaks.
// loop over peptides
for (size_t peptide = 0; peptide < pattern.getMassShiftCount(); ++peptide)
{
MSExperiment::ConstIterator it_rt = it_rt_begin;
std::advance(it_rt, peak.getRTidx());
// Check that there is a first i.e. mono-isotopic peak for this peptide.
double mz_first_peak = peak.getMZ() + pattern.getMZShiftAt(peptide * isotopes_per_peptide_max_);
int mz_idx_first_peak = it_rt->findNearest(mz_first_peak, mz_tolerance);
if (mz_idx_first_peak != -1)
{
MSSpectrum::ConstIterator it_mz_first_peak = it_rt->begin();
std::advance(it_mz_first_peak, mz_idx_first_peak);
double intensity_first_peak = it_mz_first_peak->getIntensity();
double mz;
// Check if there is a zeroth peak.
mz = peak.getMZ() + 2 * pattern.getMZShiftAt(peptide * isotopes_per_peptide_max_) - pattern.getMZShiftAt(peptide * isotopes_per_peptide_max_ + 1);
int mz_idx = checkForSignificantPeak_(mz, 2 * mz_tolerance, it_rt, intensity_first_peak);
if (mz_idx != -1)
{
// So there is a significant peak to the left. This is only a problem, if this peak is not part of the pattern which we currently detect.
if (!(peak.checkSatellite(peak.getRTidx(), mz_idx)))
{
return false;
}
}
// Check mistaken charge state identities
// We are searching the patterns in the order of the most common occurrence (and not decreasing charge state).
// That can lead to mistaken charge state identities. Here we check that this is not the case.
if (pattern.getCharge() == 2)
{
// Is the 2+ pattern really a 4+ pattern?
mz = peak.getMZ() + pattern.getMZShiftAt(peptide * isotopes_per_peptide_max_)/2 + pattern.getMZShiftAt(peptide * isotopes_per_peptide_max_ + 1)/2;
if (checkForSignificantPeak_(mz, 2 * mz_tolerance, it_rt, intensity_first_peak) != -1)
{
return false;
}
// Is the 2+ pattern really a 6+ pattern?
mz = peak.getMZ() + pattern.getMZShiftAt(peptide * isotopes_per_peptide_max_)*2/3 + pattern.getMZShiftAt(peptide * isotopes_per_peptide_max_ + 1)/3;
if (checkForSignificantPeak_(mz, 2 * mz_tolerance, it_rt, intensity_first_peak) != -1)
{
return false;
}
}
if (pattern.getCharge() == 3)
{
// Is the 3+ pattern really a 6+ pattern?
mz = peak.getMZ() + pattern.getMZShiftAt(peptide * isotopes_per_peptide_max_)/2 + pattern.getMZShiftAt(peptide * isotopes_per_peptide_max_ + 1)/2;
if (checkForSignificantPeak_(mz, 2 * mz_tolerance, it_rt, intensity_first_peak) != -1)
{
return false;
}
}
if (pattern.getCharge() == 1)
{
for (int c = 2; c < 7; ++c)
{
// Is the 1+ pattern really a c+ pattern?
// (In theory, any charge state c >= 2+ could be mistaken as a 1+. For the sake of run time performance, we only check up to 7+.
// If we see in any dataset significant number of mistakes for c >= 8+, we will modify this part of the code.)
mz = peak.getMZ() + pattern.getMZShiftAt(peptide * isotopes_per_peptide_max_)*(c-1)/c + pattern.getMZShiftAt(peptide * isotopes_per_peptide_max_ + 1)/c;
if (checkForSignificantPeak_(mz, 2 * mz_tolerance, it_rt, intensity_first_peak) != -1)
{
return false;
}
}
}
}
}
// Automatically length >= isotopes_per_peptide_min_
return true;
}
void MultiplexFiltering::blacklistPeak_(const MultiplexFilteredPeak& peak, unsigned pattern_idx)
{
// determine absolute m/z tolerance in Th
double mz_tolerance;
if (mz_tolerance_unit_in_ppm_)
{
// m/z tolerance in ppm
// Note that the absolute tolerance varies minimally within an m/z pattern.
// Hence we calculate it only once here.
mz_tolerance = peak.getMZ() * mz_tolerance_ * 1e-6;
}
else
{
// m/z tolerance in Th
mz_tolerance = mz_tolerance_;
}
// Determine the RT boundaries for each of the mass traces.
std::multimap<size_t, MultiplexSatelliteCentroided > satellites = peak.getSatellites();
// <rt_boundaries> is a map from the mass trace index to the spectrum indices for beginning and end of the mass trace.
std::map<size_t, std::pair<size_t, size_t> > rt_boundaries;
// loop over satellites
for (const auto &it : satellites)
{
size_t idx_masstrace = it.first; // mass trace index i.e. the index within the peptide multiplet pattern
if (rt_boundaries.find(idx_masstrace) == rt_boundaries.end())
{
// That's the first satellite within this mass trace.
rt_boundaries[idx_masstrace] = std::make_pair((it.second).getRTidx(), (it.second).getRTidx());
}
else
{
// We have seen a satellite of this mass trace before.
size_t idx_min = std::min((it.second).getRTidx(), rt_boundaries[idx_masstrace].first);
size_t idx_max = std::max((it.second).getRTidx(), rt_boundaries[idx_masstrace].second);
rt_boundaries[idx_masstrace] = std::make_pair(idx_min, idx_max);
}
}
// Blacklist all peaks along the mass traces
// loop over mass traces (i.e. the mass trace boundaries)
for (const auto &it : rt_boundaries)
{
double mz = peak.getMZ() + patterns_[pattern_idx].getMZShiftAt(it.first);
// Extend the RT boundary by rt_band_ earlier
MSExperiment::ConstIterator it_rt_begin = exp_centroided_.begin() + (it.second).first;
it_rt_begin = exp_centroided_.RTBegin(it_rt_begin->getRT() - 2 * rt_band_);
// Extend the RT boundary by rt_band_ later
MSExperiment::ConstIterator it_rt_end = exp_centroided_.begin() + (it.second).second;
it_rt_end = exp_centroided_.RTBegin(it_rt_end->getRT() + 2 * rt_band_);
// prepare for loop
if (it_rt_end != exp_centroided_.end())
{
++it_rt_end;
}
// loop over RT along the mass trace
for (MSExperiment::ConstIterator it_rt = it_rt_begin; it_rt < it_rt_end; ++it_rt)
{
int idx_mz = it_rt->findNearest(mz, mz_tolerance);
if (idx_mz != -1)
{
// blacklist entries: -1 = white, any isotope pattern index (it.first) = black
blacklist_[it_rt - exp_centroided_.begin()][idx_mz] = it.first;
}
}
}
}
MSExperiment MultiplexFiltering::getBlacklist()
{
MSExperiment exp_blacklist;
// loop over spectra
for (const auto &it_rt : exp_centroided_)
{
MSSpectrum spectrum_black;
spectrum_black.setRT(it_rt.getRT());
// loop over m/z
for (const auto &it_mz : it_rt)
{
// transfer all peaks which are not white (i.e. not -1)
if (blacklist_[&it_rt - &exp_centroided_[0]][&it_mz - &it_rt[0]] != -1)
{
spectrum_black.push_back(it_mz);
}
}
exp_blacklist.addSpectrum(spectrum_black);
}
exp_blacklist.updateRanges();
return exp_blacklist;
}
bool MultiplexFiltering::filterAveragineModel_(const MultiplexIsotopicPeakPattern& pattern, const MultiplexFilteredPeak& peak) const
{
// construct averagine distribution
double mass = peak.getMZ() * pattern.getCharge();
CoarseIsotopePatternGenerator solver(isotopes_per_peptide_max_);
IsotopeDistribution distribution;
if (averagine_type_ == "peptide")
{
distribution = solver.estimateFromPeptideWeight(mass);
}
else if (averagine_type_ == "RNA")
{
distribution = solver.estimateFromRNAWeight(mass);
}
else if (averagine_type_ == "DNA")
{
distribution = solver.estimateFromDNAWeight(mass);
}
else
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Invalid averagine type.");
}
// loop over peptides
for (size_t peptide = 0; peptide < pattern.getMassShiftCount(); ++peptide)
{
// intensities for the Pearson and Spearman rank correlations
std::vector<double> intensities_model;
std::vector<double> intensities_data;
// loop over isotopes i.e. mass traces of the peptide
for (size_t isotope = 0; isotope < isotopes_per_peptide_max_; ++isotope)
{
size_t idx = peptide * isotopes_per_peptide_max_ + isotope;
std::pair<std::multimap<size_t, MultiplexSatelliteCentroided >::const_iterator, std::multimap<size_t, MultiplexSatelliteCentroided >::const_iterator> satellites;
satellites = peak.getSatellites().equal_range(idx);
int count = 0;
double sum_intensities = 0;
// loop over satellites in mass trace
for (std::multimap<size_t, MultiplexSatelliteCentroided >::const_iterator satellite_it = satellites.first; satellite_it != satellites.second; ++satellite_it)
{
// find indices of the peak
size_t rt_idx = (satellite_it->second).getRTidx();
size_t mz_idx = (satellite_it->second).getMZidx();
// find peak itself
MSExperiment::ConstIterator it_rt = exp_centroided_.begin();
std::advance(it_rt, rt_idx);
MSSpectrum::ConstIterator it_mz = it_rt->begin();
std::advance(it_mz, mz_idx);
++count;
sum_intensities += it_mz->getIntensity();
}
if (count > 0)
{
//intensities_model.push_back(distribution.getContainer()[isotope].second);
intensities_model.push_back(distribution[isotope].getIntensity());
intensities_data.push_back(sum_intensities/count);
}
}
// Use a more restrictive averagine similarity when we are searching for peptide singlets.
double similarity;
if (pattern.getMassShiftCount() == 1)
{
// We are detecting peptide singlets.
similarity = averagine_similarity_ + averagine_similarity_scaling_*(1 - averagine_similarity_);
}
else
{
// We are detecting peptide doublets or triplets or ...
similarity = averagine_similarity_;
}
// Calculate Pearson and Spearman rank correlations
if ((intensities_model.size() < isotopes_per_peptide_min_) || (intensities_data.size() < isotopes_per_peptide_min_))
{
throw Exception::InvalidSize(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, 0, "intensity vectors too small for correlation calculation");
}
double correlation_Pearson = OpenMS::Math::pearsonCorrelationCoefficient(intensities_model.begin(), intensities_model.end(), intensities_data.begin(), intensities_data.end());
double correlation_Spearman = OpenMS::Math::rankCorrelationCoefficient(intensities_model.begin(), intensities_model.end(), intensities_data.begin(), intensities_data.end());
if ((correlation_Pearson < similarity) || (correlation_Spearman < similarity))
{
return false;
}
}
return true;
}
bool MultiplexFiltering::filterPeptideCorrelation_(const MultiplexIsotopicPeakPattern& pattern, const MultiplexFilteredPeak& peak) const
{
if (pattern.getMassShiftCount() < 2)
{
// filter irrelevant for singlet feature detection
return true;
}
// We will calculate the correlations between all possible peptide combinations.
// For example (light, medium), (light, heavy) and (medium, heavy) in the case of triplets.
// If one of the correlations is below the <peptide_similarity_> limit, the filter fails.
// loop over the first peptide
for (size_t peptide_1 = 0; peptide_1 < pattern.getMassShiftCount() - 1; ++peptide_1)
{
// loop over the second peptide
for (size_t peptide_2 = peptide_1 + 1; peptide_2 < pattern.getMassShiftCount(); ++peptide_2)
{
std::vector<double> intensities_1;
std::vector<double> intensities_2;
// loop over isotopes i.e. mass traces of both peptides
for (size_t isotope = 0; isotope < isotopes_per_peptide_max_; ++isotope)
{
size_t idx_1 = peptide_1 * isotopes_per_peptide_max_ + isotope;
size_t idx_2 = peptide_2 * isotopes_per_peptide_max_ + isotope;
std::pair<std::multimap<size_t, MultiplexSatelliteCentroided >::const_iterator, std::multimap<size_t, MultiplexSatelliteCentroided >::const_iterator> satellites_1;
std::pair<std::multimap<size_t, MultiplexSatelliteCentroided >::const_iterator, std::multimap<size_t, MultiplexSatelliteCentroided >::const_iterator> satellites_2;
satellites_1 = peak.getSatellites().equal_range(idx_1);
satellites_2 = peak.getSatellites().equal_range(idx_2);
// loop over satellites in mass trace 1
for (std::multimap<size_t, MultiplexSatelliteCentroided >::const_iterator satellite_it_1 = satellites_1.first; satellite_it_1 != satellites_1.second; ++satellite_it_1)
{
size_t rt_idx_1 = (satellite_it_1->second).getRTidx();
// loop over satellites in mass trace 2
for (std::multimap<size_t, MultiplexSatelliteCentroided >::const_iterator satellite_it_2 = satellites_2.first; satellite_it_2 != satellites_2.second; ++satellite_it_2)
{
size_t rt_idx_2 = (satellite_it_2->second).getRTidx();
if (rt_idx_1 == rt_idx_2)
{
size_t mz_idx_1 = (satellite_it_1->second).getMZidx();
size_t mz_idx_2 = (satellite_it_2->second).getMZidx();
// find peak itself
MSExperiment::ConstIterator it_rt_1 = exp_centroided_.begin();
MSExperiment::ConstIterator it_rt_2 = exp_centroided_.begin();
std::advance(it_rt_1, rt_idx_1);
std::advance(it_rt_2, rt_idx_2);
MSSpectrum::ConstIterator it_mz_1 = it_rt_1->begin();
MSSpectrum::ConstIterator it_mz_2 = it_rt_2->begin();
std::advance(it_mz_1, mz_idx_1);
std::advance(it_mz_2, mz_idx_2);
intensities_1.push_back(it_mz_1->getIntensity());
intensities_2.push_back(it_mz_2->getIntensity());
}
}
}
}
// It is well possible that no corresponding satellite peaks exist, in which case the filter fails.
if ((intensities_1.empty()) || (intensities_2.empty()))
{
return false;
}
// calculate correlation between peak intensities in peptides 1 and 2
double correlation_Pearson = OpenMS::Math::pearsonCorrelationCoefficient(intensities_1.begin(), intensities_1.end(), intensities_2.begin(), intensities_2.end());
double correlation_Spearman = OpenMS::Math::rankCorrelationCoefficient(intensities_1.begin(), intensities_1.end(), intensities_2.begin(), intensities_2.end());
if ((correlation_Pearson < peptide_similarity_) || (correlation_Spearman < peptide_similarity_))
//if (correlation_Pearson < peptide_similarity_)
{
return false;
}
}
}
return true;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/ElutionModelFitter.cpp | .cpp | 16,779 | 451 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/FEATUREFINDER/ElutionModelFitter.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/TransformationModelLinear.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <OpenMS/FEATUREFINDER/EGHTraceFitter.h>
#include <OpenMS/FEATUREFINDER/GaussTraceFitter.h>
using namespace OpenMS;
using namespace std;
ElutionModelFitter::ElutionModelFitter():
DefaultParamHandler("ElutionModelFitter")
{
std::vector<std::string> truefalse = {"true","false"};
std::vector<std::string> advanced = {"advanced"};
defaults_.setValue("asymmetric", "false", "Fit an asymmetric (exponential-Gaussian hybrid) model? By default a symmetric (Gaussian) model is used.");
defaults_.setValidStrings("asymmetric", truefalse);
defaults_.setValue("add_zeros", 0.2, "Add zero-intensity points outside the feature range to constrain the model fit. This parameter sets the weight given to these points during model fitting; '0' to disable.", advanced);
defaults_.setMinFloat("add_zeros", 0.0);
defaults_.setValue("unweighted_fit", "false", "Suppress weighting of mass traces according to theoretical intensities when fitting elution models", advanced);
defaults_.setValidStrings("unweighted_fit", truefalse);
defaults_.setValue("no_imputation", "false", "If fitting the elution model fails for a feature, set its intensity to zero instead of imputing a value from the initial intensity estimate", advanced);
defaults_.setValidStrings("no_imputation", truefalse);
defaults_.setValue("each_trace", "false", "Fit elution model to each individual mass trace", advanced);
defaults_.setValidStrings("each_trace", truefalse);
defaults_.setValue("check:min_area", 1.0, "Lower bound for the area under the curve of a valid elution model", advanced);
defaults_.setMinFloat("check:min_area", 0.0);
defaults_.setValue("check:boundaries", 0.5, "Time points corresponding to this fraction of the elution model height have to be within the data region used for model fitting", advanced);
defaults_.setMinFloat("check:boundaries", 0.0);
defaults_.setMaxFloat("check:boundaries", 1.0);
defaults_.setValue("check:width", 10.0, "Upper limit for acceptable widths of elution models (Gaussian or EGH), expressed in terms of modified (median-based) z-scores. '0' to disable. Not applied to individual mass traces (parameter 'each_trace').", advanced);
defaults_.setMinFloat("check:width", 0.0);
defaults_.setValue("check:asymmetry", 10.0, "Upper limit for acceptable asymmetry of elution models (EGH only), expressed in terms of modified (median-based) z-scores. '0' to disable. Not applied to individual mass traces (parameter 'each_trace').", advanced);
defaults_.setMinFloat("check:asymmetry", 0.0);
defaults_.setSectionDescription("check", "Parameters for checking the validity of elution models (and rejecting them if necessary)");
defaultsToParam_();
}
ElutionModelFitter::~ElutionModelFitter() = default;
double ElutionModelFitter::calculateFitQuality_(const TraceFitter* fitter,
const MassTraces& traces)
{
double mre = 0.0;
double total_weights = 0.0;
double rt_start = max(fitter->getLowerRTBound(), traces[0].peaks[0].first);
double rt_end = min(fitter->getUpperRTBound(),
traces[0].peaks.back().first);
for (MassTraces::const_iterator tr_it = traces.begin();
tr_it != traces.end(); ++tr_it)
{
for (vector<pair<double, const Peak1D*> >::const_iterator p_it =
tr_it->peaks.begin(); p_it != tr_it->peaks.end(); ++p_it)
{
double rt = p_it->first;
if ((rt >= rt_start) && (rt <= rt_end))
{
double model_value = fitter->getValue(rt);
double diff = fabs(model_value * tr_it->theoretical_int -
p_it->second->getIntensity());
mre += diff / model_value;
total_weights += tr_it->theoretical_int;
}
}
}
return mre / total_weights;
}
void ElutionModelFitter::fitAndValidateModel_(
TraceFitter* fitter, MassTraces& traces, Feature& feature,
double region_start, double region_end, bool asymmetric,
double area_limit, double check_boundaries)
{
bool fit_success = true;
try
{
fitter->fit(traces);
}
catch (Exception::UnableToFit& except)
{
OPENMS_LOG_ERROR << "Error fitting model to feature '"
<< feature.getUniqueId() << "': " << except.getName()
<< " - " << except.what() << endl;
fit_success = false;
}
// record model parameters:
double center = fitter->getCenter(), height = fitter->getHeight();
feature.setMetaValue("model_height", height);
feature.setMetaValue("model_FWHM", fitter->getFWHM());
feature.setMetaValue("model_center", center);
feature.setMetaValue("model_lower", fitter->getLowerRTBound());
feature.setMetaValue("model_upper", fitter->getUpperRTBound());
if (asymmetric)
{
EGHTraceFitter* egh = static_cast<EGHTraceFitter*>(fitter);
double sigma = egh->getSigma();
double tau = egh->getTau();
feature.setMetaValue("model_EGH_tau", tau);
feature.setMetaValue("model_EGH_sigma", sigma);
// see implementation of "EGHTraceFitter::getArea":
double width = sigma * 0.6266571 + abs(tau);
feature.setMetaValue("model_width", width);
double asymmetry = abs(tau) / sigma;
feature.setMetaValue("model_asymmetry", asymmetry);
}
else
{
GaussTraceFitter* gauss = static_cast<GaussTraceFitter*>(fitter);
double sigma = gauss->getSigma();
feature.setMetaValue("model_Gauss_sigma", sigma);
feature.setMetaValue("model_width", sigma); // yes, this is redundant
}
// goodness of fit:
double mre = -1.0; // mean relative error
if (fit_success)
{
mre = calculateFitQuality_(fitter, traces);
}
feature.setMetaValue("model_error", mre);
// check model validity:
double area = fitter->getArea();
feature.setMetaValue("model_area", area);
if ((area != area) || (area <= area_limit)) // x != x: test for NaN
{
feature.setMetaValue("model_status", "1 (invalid area)");
}
else if ((center <= region_start) || (center >= region_end))
{
feature.setMetaValue("model_status", "2 (center out of bounds)");
}
else if (fitter->getValue(region_start) > check_boundaries * height)
{
feature.setMetaValue("model_status", "3 (left side out of bounds)");
}
else if (fitter->getValue(region_end) > check_boundaries * height)
{
feature.setMetaValue("model_status", "4 (right side out of bounds)");
}
else
{
feature.setMetaValue("model_status", "0 (valid)");
}
}
void ElutionModelFitter::fitElutionModels(FeatureMap& features)
{
if (features.empty())
{
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "No features provided.");
}
bool asymmetric = param_.getValue("asymmetric").toBool();
double add_zeros = param_.getValue("add_zeros");
bool weighted = !param_.getValue("unweighted_fit").toBool();
bool impute = !param_.getValue("no_imputation").toBool();
bool each_trace = param_.getValue("each_trace").toBool();
double check_boundaries = param_.getValue("check:boundaries");
double area_limit = param_.getValue("check:min_area");
double width_limit = param_.getValue("check:width");
double asym_limit = (asymmetric ?
double(param_.getValue("check:asymmetry")) : 0.0);
TraceFitter* fitter;
if (asymmetric)
{
fitter = new EGHTraceFitter();
}
else
{
fitter = new GaussTraceFitter();
}
if (weighted)
{
Param params = fitter->getDefaults();
params.setValue("weighted", "true");
fitter->setParameters(params);
}
// collect peaks that constitute mass traces:
//TODO make progress logger?
OPENMS_LOG_DEBUG << "Fitting elution models to features:" << endl;
Size index = 0;
for (Feature& feat : features)
{
// OPENMS_LOG_DEBUG << String(feat->getMetaValue("PeptideRef")) << endl;
double region_start = double(feat.getMetaValue("leftWidth"));
double region_end = double(feat.getMetaValue("rightWidth"));
if (feat.getSubordinates().empty())
{
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "No subordinate features for mass traces available.");
}
const Feature& sub = feat.getSubordinates()[0];
if (sub.getConvexHulls().empty())
{
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "No hull points for mass trace in subordinate feature available.");
}
vector<Peak1D> peaks;
// reserve space once, to avoid copying and invalidating pointers:
Size points_per_hull = sub.getConvexHulls()[0].getHullPoints().size();
peaks.reserve(feat.getSubordinates().size() * points_per_hull +
(add_zeros > 0.0)); // don't forget additional zero point
MassTraces traces;
traces.max_trace = 0;
// need a mass trace for every transition, plus maybe one for add. zeros:
traces.reserve(feat.getSubordinates().size() + (add_zeros > 0.0));
for (Feature& sub : feat.getSubordinates())
{
MassTrace trace;
trace.peaks.reserve(points_per_hull);
const ConvexHull2D& hull = sub.getConvexHulls()[0];
for (ConvexHull2D::PointArrayTypeConstIterator point_it =
hull.getHullPoints().begin(); point_it !=
hull.getHullPoints().end(); ++point_it)
{
double intensity = point_it->getY();
if (intensity > 0.0) // only use non-zero intensities for fitting
{
Peak1D peak;
peak.setMZ(sub.getMZ());
peak.setIntensity(intensity);
peaks.push_back(peak);
trace.peaks.emplace_back(point_it->getX(), &peaks.back());
}
}
trace.updateMaximum();
if (trace.peaks.empty())
{
++index;
continue;
}
if (each_trace)
{
MassTraces temp;
trace.theoretical_int = 1.0;
temp.push_back(trace);
temp.max_trace = 0;
fitAndValidateModel_(fitter, temp, sub, region_start, region_end,
asymmetric, area_limit, check_boundaries);
}
trace.theoretical_int = sub.getMetaValue("isotope_probability");
traces.push_back(trace);
}
// find the trace with maximal intensity:
Size max_trace = 0;
double max_intensity = 0;
for (Size i = 0; i < traces.size(); ++i)
{
if (traces[i].max_peak->getIntensity() > max_intensity)
{
max_trace = i;
max_intensity = traces[i].max_peak->getIntensity();
}
}
traces.max_trace = max_trace;
traces.baseline = 0.0;
if (add_zeros > 0.0)
{
MassTrace trace;
trace.peaks.reserve(2);
trace.theoretical_int = add_zeros;
trace.max_rt = 0.0;
Peak1D peak;
peak.setMZ(feat.getSubordinates()[0].getMZ());
peak.setIntensity(0.0);
peaks.push_back(peak);
double offset = 0.2 * (region_start - region_end);
trace.peaks.emplace_back(region_start - offset, &peaks.back());
trace.peaks.emplace_back(region_end + offset, &peaks.back());
traces.push_back(trace);
}
// fit the model:
fitAndValidateModel_(fitter, traces, feat, region_start, region_end,
asymmetric, area_limit, check_boundaries);
++index;
}
delete fitter;
// check if fit worked for at least one feature
bool has_valid_models{false};
for (Feature& feature : features)
{
if (feature.getMetaValue("model_status") == "0 (valid)")
{
has_valid_models = true;
break;
}
}
// no valid feature e.g. because of empty file or blank? return empty features. (subsequent steps assume valid features)
if (!has_valid_models) { features.clear(); return; }
// find outliers in model parameters:
if (width_limit > 0)
{
vector<double> widths;
for (Feature& feature : features)
{
if (feature.getMetaValue("model_status") == "0 (valid)")
{
widths.push_back(feature.getMetaValue("model_width"));
}
}
double median_width = Math::median(widths.begin(), widths.end());
vector<double> abs_diffs(widths.size());
for (Size i = 0; i < widths.size(); ++i)
{
abs_diffs[i] = fabs(widths[i] - median_width);
}
// median absolute deviation (constant factor to approximate std. dev.):
double mad_width = 1.4826 * Math::median(abs_diffs.begin(),
abs_diffs.end());
for (Feature& feature : features)
{
if (feature.getMetaValue("model_status") == "0 (valid)")
{
double width = feature.getMetaValue("model_width");
double z_width = (width - median_width) / mad_width; // mod. z-score
if (z_width > width_limit)
{
feature.setMetaValue("model_status", "5 (width too large)");
}
else if (z_width < -width_limit)
{
feature.setMetaValue("model_status", "6 (width too small)");
}
}
}
}
if (asym_limit > 0)
{
vector<double> asyms;
for (Feature& feature : features)
{
if (feature.getMetaValue("model_status") == "0 (valid)")
{
asyms.push_back(feature.getMetaValue("model_asymmetry"));
}
}
double median_asym = Math::median(asyms.begin(), asyms.end());
vector<double> abs_diffs(asyms.size());
for (Size i = 0; i < asyms.size(); ++i)
{
abs_diffs[i] = fabs(asyms[i] - median_asym);
}
// median absolute deviation (constant factor to approximate std. dev.):
double mad_asym = 1.4826 * Math::median(abs_diffs.begin(),
abs_diffs.end());
for (Feature& feature : features)
{
if (feature.getMetaValue("model_status") == "0 (valid)")
{
double asym = feature.getMetaValue("model_asymmetry");
double z_asym = (asym - median_asym) / mad_asym; // mod. z-score
if (z_asym > asym_limit)
{
feature.setMetaValue("model_status", "7 (asymmetry too high)");
}
else if (z_asym < -asym_limit) // probably shouldn't happen in practice
{
feature.setMetaValue("model_status", "8 (asymmetry too low)");
}
}
}
}
// impute approximate results for failed model fits (basically bring the
// OpenSWATH intensity estimates to the same scale as the model-based ones):
TransformationModel::DataPoints quant_values;
vector<FeatureMap::Iterator> failed_models;
Size model_successes = 0, model_failures = 0;
for (FeatureMap::Iterator feat_it = features.begin();
feat_it != features.end(); ++feat_it, ++index)
{
feat_it->setMetaValue("raw_intensity", feat_it->getIntensity());
if (String(feat_it->getMetaValue("model_status"))[0] != '0')
{
if (impute) failed_models.push_back(feat_it);
else feat_it->setIntensity(0.0);
model_failures++;
}
else
{
double area = feat_it->getMetaValue("model_area");
if (impute)
{ // apply log-transform to weigh down high outliers:
double raw_intensity = feat_it->getIntensity();
OPENMS_LOG_DEBUG << "Successful model: x = " << raw_intensity << ", y = "
<< area << "; log(x) = " << log(raw_intensity)
<< ", log(y) = " << log(area) << endl;
quant_values.push_back(make_pair(log(raw_intensity), log(area)));
}
feat_it->setIntensity(area);
model_successes++;
}
}
OPENMS_LOG_INFO << "Model fitting: " << model_successes << " successes, "
<< model_failures << " failures" << endl;
if (impute) // impute results for cases where the model fit failed
{
TransformationModelLinear lm(quant_values, Param());
double slope, intercept;
String x_weight, y_weight;
double x_datum_min, x_datum_max, y_datum_min, y_datum_max;
lm.getParameters(slope, intercept, x_weight, y_weight, x_datum_min, x_datum_max, y_datum_min, y_datum_max);
OPENMS_LOG_INFO << "Imputing model failures with a linear model based on log(rawIntensities). Slope: " << slope << ", Intercept: " << intercept << endl;
for (vector<FeatureMap::Iterator>::iterator it = failed_models.begin();
it != failed_models.end(); ++it)
{
double area = exp(lm.evaluate(log((*it)->getIntensity())));
(*it)->setIntensity(area);
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/FeatureFindingMetabo.cpp | .cpp | 37,765 | 1,056 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Erhan Kenar, Holger Franken $
// --------------------------------------------------------------------------
#include <OpenMS/FEATUREFINDER/FeatureFindingMetabo.h>
#include <OpenMS/ANALYSIS/OPENSWATH/OpenSwathHelper.h>
#include <OpenMS/CHEMISTRY/ISOTOPEDISTRIBUTION/CoarseIsotopePatternGenerator.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/CONCEPT/UniqueIdGenerator.h>
#include <OpenMS/SYSTEM/File.h>
#include <fstream>
#include <boost/dynamic_bitset.hpp>
#include "svm.h"
// #define FFM_DEBUG
namespace OpenMS
{
void FeatureHypothesis::addMassTrace(const MassTrace& mt_ptr)
{
iso_pattern_.push_back(&mt_ptr);
}
double FeatureHypothesis::getMonoisotopicFeatureIntensity(bool smoothed = false) const
{
if (iso_pattern_.empty())
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"FeatureHypothesis is empty, no traces contained!", String(iso_pattern_.size()));
}
return iso_pattern_[0]->getIntensity(smoothed);
}
double FeatureHypothesis::getSummedFeatureIntensity(bool smoothed = false) const
{
double int_sum(0.0);
for (Size i = 0; i < iso_pattern_.size(); ++i)
{
int_sum += iso_pattern_[i]->getIntensity(smoothed);
}
return int_sum;
}
double FeatureHypothesis::getMaxIntensity(bool smoothed) const
{
double int_max(0.0);
for (Size i = 0; i < iso_pattern_.size(); ++i)
{
const double height = iso_pattern_[i]->getMaxIntensity(smoothed);
if (int_max < height)
{
int_max = height;
}
}
return int_max;
}
Size FeatureHypothesis::getNumFeatPoints() const
{
Size num_points(0);
for (Size mt_idx = 0; mt_idx < iso_pattern_.size(); ++mt_idx)
{
num_points += iso_pattern_[mt_idx]->getSize();
}
return num_points;
}
std::vector<ConvexHull2D> FeatureHypothesis::getConvexHulls() const
{
std::vector<ConvexHull2D> tmp_hulls;
for (Size mt_idx = 0; mt_idx < iso_pattern_.size(); ++mt_idx)
{
ConvexHull2D::PointArrayType hull_points(iso_pattern_[mt_idx]->getSize());
Size i = 0;
for (MassTrace::const_iterator l_it = iso_pattern_[mt_idx]->begin(); l_it != iso_pattern_[mt_idx]->end(); ++l_it)
{
hull_points[i][0] = (*l_it).getRT();
hull_points[i][1] = (*l_it).getMZ();
++i;
}
ConvexHull2D hull;
hull.addPoints(hull_points);
tmp_hulls.push_back(hull);
}
return tmp_hulls;
}
std::vector< OpenMS::MSChromatogram > FeatureHypothesis::getChromatograms(UInt64 feature_id) const
{
double mz = iso_pattern_[0]->getCentroidMZ();
Precursor prec;
prec.setMZ(mz);
prec.setCharge(charge_);
prec.setMetaValue("peptide_sequence", String(feature_id));
std::vector< OpenMS::MSChromatogram > tmp_chromatograms;
for (Size mt_idx = 0; mt_idx < iso_pattern_.size(); ++mt_idx)
{
OpenMS::MSChromatogram chromatogram;
for (MassTrace::const_iterator l_it = iso_pattern_[mt_idx]->begin(); l_it != iso_pattern_[mt_idx]->end(); ++l_it)
{
ChromatogramPeak peak;
peak.setRT((*l_it).getRT());
peak.setIntensity((*l_it).getIntensity());
chromatogram.push_back(peak);
}
chromatogram.setNativeID(String(feature_id) + "_" + String(mt_idx));
chromatogram.setName(String(feature_id) + "_" + String(mt_idx));
chromatogram.setChromatogramType(ChromatogramSettings::ChromatogramType::BASEPEAK_CHROMATOGRAM);
chromatogram.setPrecursor(prec);
chromatogram.sortByPosition();
tmp_chromatograms.push_back(chromatogram);
}
return tmp_chromatograms;
}
OpenMS::String FeatureHypothesis::getLabel() const
{
return ListUtils::concatenate(getLabels(), "_");
}
Size FeatureHypothesis::getSize() const
{
return iso_pattern_.size();
}
std::vector<String> FeatureHypothesis::getLabels() const
{
std::vector<String> tmp_labels;
for (Size i = 0; i < iso_pattern_.size(); ++i)
{
tmp_labels.push_back(iso_pattern_[i]->getLabel());
}
return tmp_labels;
}
void FeatureHypothesis::setScore( const double& score )
{
feat_score_ = score;
}
OpenMS::SignedSize FeatureHypothesis::getCharge() const
{
return charge_;
}
void FeatureHypothesis::setCharge( const SignedSize& ch )
{
charge_ = ch;
}
std::vector<double> FeatureHypothesis::getAllIntensities( bool smoothed /*= false*/ ) const
{
std::vector<double> tmp;
for (Size i = 0; i < iso_pattern_.size(); ++i)
{
tmp.push_back(iso_pattern_[i]->getIntensity(smoothed));
}
return tmp;
}
// TODO: e.g. check
std::vector<double> FeatureHypothesis::getAllCentroidMZ() const
{
std::vector<double> tmp;
for (Size i = 0; i < iso_pattern_.size(); ++i)
{
tmp.push_back(iso_pattern_[i]->getCentroidMZ());
}
return tmp;
}
std::vector<double> FeatureHypothesis::getAllCentroidRT() const
{
std::vector<double> tmp;
for (Size i = 0; i < iso_pattern_.size(); ++i)
{
tmp.push_back(iso_pattern_[i]->getCentroidRT());
}
return tmp;
}
std::vector<double> FeatureHypothesis::getIsotopeDistances() const
{
std::vector<double> tmp;
for (Size i = 1; i < iso_pattern_.size(); ++i)
{
tmp.push_back(iso_pattern_[i]->getCentroidMZ() - iso_pattern_[i-1]->getCentroidMZ());
}
return tmp;
}
double FeatureHypothesis::getCentroidMZ() const
{
if (iso_pattern_.empty())
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"FeatureHypothesis is empty, no centroid MZ!", String(iso_pattern_.size()));
}
return iso_pattern_[0]->getCentroidMZ();
}
double FeatureHypothesis::getCentroidRT() const
{
if (iso_pattern_.empty())
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"FeatureHypothesis is empty, no centroid RT!", String(iso_pattern_.size()));
}
return iso_pattern_[0]->getCentroidRT();
}
double FeatureHypothesis::getFWHM() const
{
if (iso_pattern_.empty())
{
return 0.0;
}
return iso_pattern_[0]->getFWHM();
}
double FeatureHypothesis::getScore() const
{
return feat_score_;
}
FeatureFindingMetabo::FeatureFindingMetabo() :
DefaultParamHandler("FeatureFindingMetabo"), ProgressLogger()
{
defaults_.setValue("local_rt_range", 10.0, "RT range where to look for coeluting mass traces", {"advanced"}); // 5.0
defaults_.setValue("local_mz_range", 6.5, "MZ range where to look for isotopic mass traces", {"advanced"}); // 6.5
defaults_.setValue("charge_lower_bound", 1, "Lowest charge state to consider"); // 1
defaults_.setValue("charge_upper_bound", 3, "Highest charge state to consider"); // 3
defaults_.setValue("chrom_fwhm", 5.0, "Expected chromatographic peak width (in seconds)."); // 5.0
defaults_.setValue("report_summed_ints", "false", "Set to true for a feature intensity summed up over all traces rather than using monoisotopic trace intensity alone.", {"advanced"});
defaults_.setValidStrings("report_summed_ints", {"false","true"});
defaults_.setValue("enable_RT_filtering", "true", "Require sufficient overlap in RT while assembling mass traces. Disable for direct injection data..");
defaults_.setValidStrings("enable_RT_filtering", {"false","true"});
defaults_.setValue("isotope_filtering_model", "metabolites (5% RMS)", "Remove/score candidate assemblies based on isotope intensities. SVM isotope models for metabolites were trained with either 2% or 5% RMS error. For peptides, an averagine cosine scoring is used. Select the appropriate noise model according to the quality of measurement or MS device.");
defaults_.setValidStrings("isotope_filtering_model", {"metabolites (2% RMS)","metabolites (5% RMS)","peptides","none"});
defaults_.setValue("mz_scoring_13C", "false", "Use the 13C isotope peak position (~1.003355 Da) as the expected shift in m/z for isotope mass traces (highly recommended for lipidomics!). Disable for general metabolites (as described in Kenar et al. 2014, MCP.).");
defaults_.setValidStrings("mz_scoring_13C", {"false","true"});
defaults_.setValue("use_smoothed_intensities", "true", "Use LOWESS intensities instead of raw intensities.", {"advanced"});
defaults_.setValidStrings("use_smoothed_intensities", {"false","true"});
defaults_.setValue("report_smoothed_intensities", "true", "Report smoothed intensities (only if use_smoothed_intensities is true).", {"advanced"});
defaults_.setValidStrings("report_smoothed_intensities", {"false","true"});
defaults_.setValue("report_convex_hulls", "false", "Augment each reported feature with the convex hull of the underlying mass traces (increases featureXML file size considerably).");
defaults_.setValidStrings("report_convex_hulls", {"false","true"});
defaults_.setValue("report_chromatograms", "false", "Adds Chromatogram for each reported feature (Output in mzml).");
defaults_.setValidStrings("report_chromatograms", {"false","true"});
defaults_.setValue("remove_single_traces", "false", "Remove unassembled traces (single traces).");
defaults_.setValidStrings("remove_single_traces", {"false","true"});
defaults_.setValue("mz_scoring_by_elements", "false", "Use the m/z range of the assumed elements to detect isotope peaks. A expected m/z range is computed from the isotopes of the assumed elements. If enabled, this ignores 'mz_scoring_13C'");
defaults_.setValidStrings("mz_scoring_by_elements", {"false","true"});
defaults_.setValue("elements", "CHNOPS", "Elements assumes to be present in the sample (this influences isotope detection).");
defaultsToParam_();
this->setLogType(CMD);
}
FeatureFindingMetabo::~FeatureFindingMetabo()
{
if (isotope_filt_svm_ != nullptr)
{
svm_free_and_destroy_model(&isotope_filt_svm_);
}
}
void FeatureFindingMetabo::updateMembers_()
{
local_rt_range_ = (double)param_.getValue("local_rt_range");
local_mz_range_ = (double)param_.getValue("local_mz_range");
chrom_fwhm_ = (double)param_.getValue("chrom_fwhm");
charge_lower_bound_ = (Size)param_.getValue("charge_lower_bound");
charge_upper_bound_ = (Size)param_.getValue("charge_upper_bound");
report_summed_ints_ = param_.getValue("report_summed_ints").toBool();
enable_RT_filtering_ = param_.getValue("enable_RT_filtering").toBool();
isotope_filtering_model_ = param_.getValue("isotope_filtering_model").toString();
use_smoothed_intensities_ = param_.getValue("use_smoothed_intensities").toBool();
bool use_smoothed = param_.getValue("use_smoothed_intensities").toBool();
bool report_smoothed = param_.getValue("report_smoothed_intensities").toBool();
if (report_smoothed && !use_smoothed) {
OPENMS_LOG_WARN << "Warning: 'report_smoothed_intensities' is set to true, but 'use_smoothed_intensities' is false. Ignoring 'report_smoothed_intensities'.\n";
report_smoothed = false;
}
use_smoothed_intensities_ = use_smoothed;
report_smoothed_intensities_ = report_smoothed;
use_mz_scoring_C13_ = param_.getValue("mz_scoring_13C").toBool();
report_convex_hulls_ = param_.getValue("report_convex_hulls").toBool();
report_chromatograms_ = param_.getValue("report_chromatograms").toBool();
remove_single_traces_ = param_.getValue("remove_single_traces").toBool();
use_mz_scoring_by_element_range_ = param_.getValue("mz_scoring_by_elements").toBool();
std::string elements_list_ = param_.getValue("elements");
elements_ = elementsFromString_(elements_list_);
}
std::vector<const Element*> FeatureFindingMetabo::elementsFromString_(const std::string& elements_string) const
{
std::vector<const Element*> elements;
for (const auto& element_with_amount : EmpiricalFormula(elements_string))
{
elements.push_back(element_with_amount.first);
}
return elements;
}
double FeatureFindingMetabo::computeAveragineSimScore_(const std::vector<double>& hypo_ints, const double& mol_weight) const
{
CoarseIsotopePatternGenerator solver(hypo_ints.size());
auto isodist = solver.estimateFromPeptideWeight(mol_weight);
// isodist.renormalize();
IsotopeDistribution::ContainerType averagine_dist = isodist.getContainer();
double max_int(0.0), theo_max_int(0.0);
for (Size i = 0; i < hypo_ints.size(); ++i)
{
if (hypo_ints[i] > max_int)
{
max_int = hypo_ints[i];
}
if (averagine_dist[i].getIntensity() > theo_max_int)
{
theo_max_int = averagine_dist[i].getIntensity();
}
}
// compute normalized intensities
std::vector<double> averagine_ratios, hypo_isos;
for (Size i = 0; i < hypo_ints.size(); ++i)
{
averagine_ratios.push_back(averagine_dist[i].getIntensity() / theo_max_int);
hypo_isos.push_back(hypo_ints[i] / max_int);
}
double iso_score = computeCosineSim_(averagine_ratios, hypo_isos);
return iso_score;
}
int FeatureFindingMetabo::isLegalIsotopePattern_(const FeatureHypothesis& feat_hypo) const
{
if (feat_hypo.getSize() == 1)
{
return -1;
}
if (svm_feat_centers_.empty() || svm_feat_scales_.empty())
{
throw Exception::Precondition(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Isotope filtering invoked, but no model loaded. Internal error. Please report this!");
}
std::vector<double> all_ints = feat_hypo.getAllIntensities(use_smoothed_intensities_);
double mono_int(all_ints[0]); // monoisotopic intensity
const Size FEAT_NUM(4);
svm_node* nodes = new svm_node[FEAT_NUM + 1];
double act_mass(feat_hypo.getCentroidMZ() * feat_hypo.getCharge());
// isotope model currently restricted to formulas up to 1000 Da
if (act_mass > 1000.0)
{
act_mass = 1000.0;
}
nodes[0].index = 1;
nodes[0].value = (act_mass - svm_feat_centers_[0]) / svm_feat_scales_[0];
// Iterate, start with first isotopic trace (skip monoisotopic)
Size i = 2;
Size feat_size(feat_hypo.getSize());
if (feat_size > FEAT_NUM)
{
feat_size = FEAT_NUM;
}
for (; i - 1 < feat_size; ++i)
{
nodes[i - 1].index = static_cast<Int>(i);
// compute ratio of trace to monoisotopic intensity
double ratio((all_ints[i - 1] / mono_int));
double tmp_val((ratio - svm_feat_centers_[i - 1]) / svm_feat_scales_[i - 1]);
nodes[i - 1].value = tmp_val;
}
for (; i < FEAT_NUM + 1; ++i)
{
nodes[i - 1].index = static_cast<Int>(i);
nodes[i - 1].value = (-svm_feat_centers_[i - 1]) / svm_feat_scales_[i - 1];
}
nodes[FEAT_NUM].index = -1;
nodes[FEAT_NUM].value = 0;
// debug output
// std::cout << "isocheck for " << feat_hypo.getLabel() << " " << feat_hypo.getSize() << '\n';
// for (Size i = 0; i < FEAT_NUM + 1; ++i)
// {
// std::cout << "idx: " << nodes[i].index << " val: " << nodes[i].value << '\n';
// }
// Use SVM model to predict the category in which the current trace group
// belongs ...
double predict = svm_predict(isotope_filt_svm_, nodes);
// std::cout << "predict: " << predict << '\n';
delete[] nodes;
return (predict == 2.0) ? 1 : 0;
}
void FeatureFindingMetabo::loadIsotopeModel_(const String& model_name)
{
String search_name("CHEMISTRY/" + model_name);
std::string model_filename = File::find(search_name + ".svm");
std::string scale_filename = File::find(search_name + ".scale");
if (isotope_filt_svm_ != nullptr)
{
svm_free_and_destroy_model(&isotope_filt_svm_);
}
isotope_filt_svm_ = svm_load_model(model_filename.c_str());
if (isotope_filt_svm_ == nullptr)
{
throw Exception::ParseError(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Loading " + model_filename + " failed", model_filename);
}
std::ifstream ifs(scale_filename.c_str());
std::string line;
std::stringstream str_buf;
std::istream_iterator<double> eol;
svm_feat_centers_.clear();
svm_feat_scales_.clear();
while (getline(ifs, line))
{
str_buf.clear();
str_buf << line;
std::istream_iterator<double> istr_it(str_buf);
while (istr_it != eol)
{
svm_feat_centers_.push_back(*istr_it);
++istr_it;
svm_feat_scales_.push_back(*istr_it);
++istr_it;
}
}
if (svm_feat_centers_.size() != svm_feat_scales_.size())
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Numbers of centers and scales from file " + scale_filename + " are different!",
String(svm_feat_centers_.size()) + " and " + String(svm_feat_scales_.size()));
}
}
double FeatureFindingMetabo::scoreMZ_(const MassTrace& tr1, const MassTrace& tr2, Size iso_pos, Size charge, Range isotope_window) const
{
double mz1(tr1.getCentroidMZ());
double mz2(tr2.getCentroidMZ());
// double centered_mz(std::fabs(mz2 - mz1) - mu);
double diff_mz(std::fabs(mz2 - mz1));
double mt_sigma1(tr1.getCentroidSD());
double mt_sigma2(tr2.getCentroidSD());
// double mt_variances1(mt_sigma1*mt_sigma1 + mt_sigma2*mt_sigma2);
double mt_variances(std::exp(2 * std::log(mt_sigma1)) + std::exp(2 * std::log(mt_sigma2)));
// std::cout << "mt1: " << mt_sigma1 << " mt2: " << mt_sigma2 << " mt_variances: " << mt_variances << " old " << mt_variances1 << std::endl;
// double score_sigma_old(std::sqrt(sd*sd + mt_variances));
double mz_score(0.0);
if (use_mz_scoring_by_element_range_)
{
mz_score = scoreMZByExpectedRange_(charge, diff_mz, mt_variances, isotope_window);
}
else
{
mz_score = scoreMZByExpectedMean_(iso_pos, charge, diff_mz, mt_variances);
}
// std::cout << tr1.getLabel() << "_" << tr2.getLabel() << " diffmz: " << diff_mz << " charge " << charge << " isopos: " << iso_pos << " score: " << mz_score << std::endl ;
return mz_score;
}
double FeatureFindingMetabo::scoreMZByExpectedMean_(Size iso_pos, Size charge, const double diff_mz, double mt_variances) const
{
double mu, sd;
if (use_mz_scoring_C13_)
{ // this reflects some data better (at least all Orbitrap)
mu = (Constants::C13C12_MASSDIFF_U * iso_pos) / charge; // using '1.0033548378'
sd = (0.0016633 * iso_pos - 0.0004751) / charge;
}
else
{ // original implementation from Kenar et al.;
mu = (1.000857 * iso_pos + 0.001091) / charge;
sd = (0.0016633 * iso_pos - 0.0004751) / charge;
}
double sigma_mult(3.0);
double mz_score(0.0);
//standard deviation including the estimated isotope deviation
double score_sigma(std::sqrt(std::exp(2 * std::log(sd)) + mt_variances));
// std::cout << std::setprecision(15) << "old " << score_sigma_old << " new " << score_sigma << '\n';
if ((diff_mz < mu + sigma_mult * score_sigma) && (diff_mz > mu - sigma_mult * score_sigma))
{
double tmp_exponent((diff_mz - mu) / score_sigma);
mz_score = std::exp(-0.5 * tmp_exponent * tmp_exponent);
}
return mz_score;
}
double FeatureFindingMetabo::scoreMZByExpectedRange_(Size charge, const double diff_mz, double mt_variances, Range isotope_window) const
{
//This isotope picking using m/z differences of elements' isotopes is based on the approach used in SIRIUS
double sigma_mult(3.0);
double mz_score(0.0);
//standard deviation of m/z distance between the 2 mass traces
double mt_sigma(std::sqrt(mt_variances));
double max_allowed_deviation = mt_sigma * sigma_mult;
double lbound = isotope_window.left_boundary / charge;
double rbound = isotope_window.right_boundary / charge;
if ((diff_mz < rbound) && (diff_mz > lbound))
{
//isotope masstrace lies in the expected range
mz_score = 1.0;
}
else if ((diff_mz < rbound + max_allowed_deviation) && (diff_mz > lbound - max_allowed_deviation))
{
//score only the m/z difference which cannot explained by the elements m/z ranges
double tmp_exponent;
if (diff_mz < lbound)
{
tmp_exponent = (lbound - diff_mz) / mt_sigma;
}
else
{
tmp_exponent = (diff_mz - rbound) / mt_sigma;
}
mz_score = std::exp(-0.5 * tmp_exponent * tmp_exponent);
}
//else mz_score stays 0
return mz_score;
}
double FeatureFindingMetabo::scoreRT_(const MassTrace& tr1, const MassTrace& tr2) const
{
// return success if this filter is disabled
if (!enable_RT_filtering_) return 1.0;
// continue to check overlap and cosine similarity
// ...
std::map<double, std::vector<double> > coinciding_rts;
std::pair<Size, Size> tr1_fwhm_idx(tr1.getFWHMborders());
std::pair<Size, Size> tr2_fwhm_idx(tr2.getFWHMborders());
// std::cout << tr1_fwhm_idx.first << " " << tr1_fwhm_idx.second << '\n';
// std::cout << tr2_fwhm_idx.first << " " << tr2_fwhm_idx.second << '\n';
// Size tr1_fwhm_size(tr1_fwhm_idx.second - tr1_fwhm_idx.first);
// Size tr2_fwhm_size(tr2_fwhm_idx.second - tr2_fwhm_idx.first);
// double max_length = (tr1_fwhm_size > tr2_fwhm_size) ? tr1_fwhm_size : tr2_fwhm_size;
double tr1_length(tr1.getFWHM());
double tr2_length(tr2.getFWHM());
double max_length = (tr1_length > tr2_length) ? tr1_length : tr2_length;
// std::cout << "tr1 " << tr1_length << " tr2 " << tr2_length << '\n';
// Extract peak shape between FWHM borders for both peaks
for (Size i = tr1_fwhm_idx.first; i <= tr1_fwhm_idx.second; ++i)
{
coinciding_rts[tr1[i].getRT()].push_back(tr1[i].getIntensity());
}
for (Size i = tr2_fwhm_idx.first; i <= tr2_fwhm_idx.second; ++i)
{
coinciding_rts[tr2[i].getRT()].push_back(tr2[i].getIntensity());
}
// Look at peaks at the same RT
// TODO: this only works if both traces are sampled with equal rate at the same RT
std::vector<double> x, y, overlap_rts;
for (std::map<double, std::vector<double> >::const_iterator m_it = coinciding_rts.begin(); m_it != coinciding_rts.end(); ++m_it)
{
if (m_it->second.size() == 2)
{
x.push_back(m_it->second[0]);
y.push_back(m_it->second[1]);
overlap_rts.push_back(m_it->first);
}
}
// if (x.size() < std::floor(0.8*max_length))
// {
// return 0.0;
// }
// double rt_range(0.0)
// if (coinciding_rts.size() > 0)
// {
// rt_range = std::fabs(coinciding_rts.rbegin()->first - coinciding_rts.begin()->first);
// }
double overlap(0.0);
if (!overlap_rts.empty())
{
double start_rt(*(overlap_rts.begin())), end_rt(*(overlap_rts.rbegin()));
overlap = std::fabs(end_rt - start_rt);
}
double proportion(overlap / max_length);
if (proportion < 0.7)
{
return 0.0;
}
return computeCosineSim_(x, y);
}
Range FeatureFindingMetabo::getTheoreticIsotopicMassWindow_(const std::vector<Element const *>& alphabet, int peakOffset) const
{
if (peakOffset < 1)
{
throw std::invalid_argument("Expect a peak offset of at least 1");
}
double minmz = std::numeric_limits<double>::infinity();
double maxmz = -std::numeric_limits<double>::infinity();
for (const Element* e : alphabet) {
IsotopeDistribution iso = e->getIsotopeDistribution();
for (unsigned int k = 1; k < iso.size(); ++k) {
const double mz_mono = iso[0].getMZ();
const double mz_iso = iso[k].getMZ();
const int integer_mz_mono = (int)round(mz_mono);
const int integer_mz_iso = (int)round(mz_iso);
const int i = integer_mz_iso - integer_mz_mono;
if (i > peakOffset) break;
const double mz_diff_iso_mono = mz_iso - mz_mono;
double diff = mz_diff_iso_mono - i;
diff *= (peakOffset / i);
minmz = std::min(minmz, diff);
maxmz = std::max(maxmz, diff);
}
}
Range range = Range();
range.left_boundary = peakOffset + minmz;
range.right_boundary = peakOffset + maxmz;
return range;
}
double FeatureFindingMetabo::computeCosineSim_(const std::vector<double>& x, const std::vector<double>& y) const
{
if (x.size() != y.size())
{
return 0.0;
}
double mixed_sum(0.0);
double x_squared_sum(0.0);
double y_squared_sum(0.0);
for (Size i = 0; i < x.size(); ++i)
{
mixed_sum += x[i] * y[i];
x_squared_sum += x[i] * x[i];
y_squared_sum += y[i] * y[i];
}
double denom(std::sqrt(x_squared_sum) * std::sqrt(y_squared_sum));
return (denom > 0.0) ? mixed_sum / denom : 0.0;
}
void FeatureFindingMetabo::findLocalFeatures_(const std::vector<const MassTrace*>& candidates, const double total_intensity, std::vector<FeatureHypothesis>& output_hypotheses) const
{
// single Mass trace hypothesis
FeatureHypothesis tmp_hypo;
tmp_hypo.addMassTrace(*candidates[0]);
tmp_hypo.setScore((candidates[0]->getIntensity(use_smoothed_intensities_)) / total_intensity);
#ifdef _OPENMP
#pragma omp critical (OPENMS_FFMetabo_output_hypos)
#endif
{
// pushing back to shared vector needs to be synchronized
output_hypotheses.push_back(tmp_hypo);
}
for (Size charge = charge_lower_bound_; charge <= charge_upper_bound_; ++charge)
{
FeatureHypothesis fh_tmp;
fh_tmp.addMassTrace(*candidates[0]);
fh_tmp.setScore((candidates[0]->getIntensity(use_smoothed_intensities_)) / total_intensity);
// double mono_iso_rt(candidates[0]->getCentroidRT());
// double mono_iso_mz(candidates[0]->getCentroidMZ());
// double mono_iso_int(candidates[0]->computePeakArea());
Size last_iso_idx(0);
Size iso_pos_max(static_cast<Size>(std::floor(charge * local_mz_range_)));
for (Size iso_pos = 1; iso_pos <= iso_pos_max; ++iso_pos)
{
//estimate expected m/z window for iso_pos
Range isotope_window = getTheoreticIsotopicMassWindow_(elements_, iso_pos);
// Find mass trace that best agrees with current hypothesis of charge
// and isotopic position
double best_so_far(0.0);
Size best_idx(0);
for (Size mt_idx = last_iso_idx + 1; mt_idx < candidates.size(); ++mt_idx)
{
// double tmp_iso_rt(candidates[mt_idx]->getCentroidRT());
// double tmp_iso_mz(candidates[mt_idx]->getCentroidMZ());
// double tmp_iso_int(candidates[mt_idx]->computePeakArea());
#ifdef FFM_DEBUG
std::cout << "scoring " << candidates[0]->getLabel() << " " << candidates[0]->getCentroidMZ() <<
" with " << candidates[mt_idx]->getLabel() << " " << candidates[mt_idx]->getCentroidMZ() << '\n';
#endif
// Score current mass trace candidates against hypothesis
double rt_score(scoreRT_(*candidates[0], *candidates[mt_idx]));
double mz_score(scoreMZ_(*candidates[0], *candidates[mt_idx], iso_pos, charge, isotope_window));
// disable intensity scoring for now...
double int_score(1.0);
// double int_score((candidates[0]->getIntensity(use_smoothed_intensities_))/total_weight + (candidates[mt_idx]->getIntensity(use_smoothed_intensities_))/total_weight);
if (isotope_filtering_model_ == "peptides")
{
std::vector<double> tmp_ints(fh_tmp.getAllIntensities());
tmp_ints.push_back(candidates[mt_idx]->getIntensity(use_smoothed_intensities_));
int_score = computeAveragineSimScore_(tmp_ints, candidates[mt_idx]->getCentroidMZ() * charge);
}
#ifdef FFM_DEBUG
std::cout << fh_tmp.getLabel() << "_" << candidates[mt_idx]->getLabel() <<
"\t" << "ch: " << charge << " isopos: " << iso_pos << " rt: " <<
rt_score << "mz: " << mz_score << "int: " << int_score << '\n';
#endif
double total_pair_score(0.0);
if (rt_score > 0.0 && mz_score > 0.0 && int_score > 0.0)
{
total_pair_score = std::exp(std::log(rt_score) + log(mz_score) + log(int_score));
}
if (total_pair_score > best_so_far)
{
best_so_far = total_pair_score;
best_idx = mt_idx;
}
} // end mt_idx
// Store mass trace that best agrees with current hypothesis of charge
// and isotopic position
if (best_so_far > 0.0)
{
fh_tmp.addMassTrace(*candidates[best_idx]);
double weighted_score(((candidates[best_idx]->getIntensity(use_smoothed_intensities_)) * best_so_far) / total_intensity);
fh_tmp.setScore(fh_tmp.getScore() + weighted_score);
fh_tmp.setCharge(charge);
last_iso_idx = best_idx;
#ifdef _OPENMP
#pragma omp critical (OPENMS_FFMetabo_output_hypos)
#endif
{
// pushing back to shared vector needs to be synchronized
output_hypotheses.push_back(fh_tmp);
}
}
else
{
break;
}
} // end for iso_pos
#ifdef FFM_DEBUG
std::cout << "best found for ch " << charge << ":" << fh_tmp.getLabel() << " score: " << fh_tmp.getScore() << '\n';
#endif
} // end for charge
} // end of findLocalFeatures_(...)
void FeatureFindingMetabo::run(std::vector<MassTrace>& input_mtraces, FeatureMap& output_featmap, std::vector<std::vector< OpenMS::MSChromatogram > >& output_chromatograms)
{
if (use_mz_scoring_by_element_range_ && isotope_filtering_model_ != "none")
{
OPENMS_LOG_WARN << "Isotope filtering is not supported, when using the mz scoring by elements.\n"
<< "The parameter isotope_filtering_model will be set to 'none'."
<< '\n';
isotope_filtering_model_ = "none";
}
output_featmap.clear();
output_chromatograms.clear();
if (input_mtraces.empty())
{
return;
}
// mass traces must be sorted by their centroid MZ
std::sort(input_mtraces.begin(), input_mtraces.end(), CmpMassTraceByMZ());
this->startProgress(0, input_mtraces.size(), "assembling mass traces to features");
// *********************************************************** //
// Step 1 initialize SVM model for isotope ratio filtering
// *********************************************************** //
if (isotope_filtering_model_ == "metabolites (2% RMS)")
{
OPENMS_LOG_INFO << "Loading metabolite isotope model with 2% RMS error\n";
loadIsotopeModel_("MetaboliteIsoModelNoised2");
}
else if (isotope_filtering_model_ == "metabolites (5% RMS)")
{
OPENMS_LOG_INFO << "Loading metabolite isotope model with 5% RMS error\n";
loadIsotopeModel_("MetaboliteIsoModelNoised5");
}
double total_intensity(0.0);
for (Size i = 0; i < input_mtraces.size(); ++i)
{
total_intensity += input_mtraces[i].getIntensity(use_smoothed_intensities_);
}
// *********************************************************** //
// Step 2 Iterate through all mass traces to find likely matches
// and generate isotopic / charge hypotheses
// *********************************************************** //
std::vector<FeatureHypothesis> feat_hypos;
Size progress(0);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (SignedSize i = 0; i < (SignedSize)input_mtraces.size(); ++i)
{
IF_MASTERTHREAD this->setProgress(progress);
#ifdef _OPENMP
#pragma omp atomic
#endif
++progress;
std::vector<const MassTrace*> local_traces;
double ref_trace_mz(input_mtraces[i].getCentroidMZ());
double ref_trace_rt(input_mtraces[i].getCentroidRT());
local_traces.push_back(&input_mtraces[i]);
for (Size ext_idx = i + 1; ext_idx < input_mtraces.size(); ++ext_idx)
{
// traces are sorted by m/z, so we can break when we leave the allowed window
double diff_mz = std::fabs(input_mtraces[ext_idx].getCentroidMZ() - ref_trace_mz);
if (diff_mz > local_mz_range_)
{
break;
}
double diff_rt = std::fabs(input_mtraces[ext_idx].getCentroidRT() - ref_trace_rt);
if (diff_rt <= local_rt_range_)
{
// std::cout << " accepted!\n";
local_traces.push_back(&input_mtraces[ext_idx]);
}
}
findLocalFeatures_(local_traces, total_intensity, feat_hypos);
}
this->endProgress();
// sort feature candidates by their score
std::sort(feat_hypos.begin(), feat_hypos.end(), CmpHypothesesByScore());
#ifdef FFM_DEBUG
std::cout << "size of hypotheses: " << feat_hypos.size() << '\n';
// output all hypotheses:
for (Size hypo_idx = 0; hypo_idx < feat_hypos.size(); ++ hypo_idx)
{
bool legal = isLegalIsotopePattern_(feat_hypos[hypo_idx]) > 0;
std::cout << feat_hypos[hypo_idx].getLabel() << " ch: " << feat_hypos[hypo_idx].getCharge() <<
" score: " << feat_hypos[hypo_idx].getScore() << " legal: " << legal << '\n';
}
#endif
// *********************************************************** //
// Step 3 Iterate through all hypotheses, starting with the highest
// scoring one. Accept them if they do not contain traces that have
// already been used by a higher scoring hypothesis.
// *********************************************************** //
std::map<String, bool> trace_excl_map;
for (Size hypo_idx = 0; hypo_idx < feat_hypos.size(); ++hypo_idx)
{
// std::cout << "score now: " << feat_hypos[hypo_idx].getScore() << '\n';
std::vector<String> labels(feat_hypos[hypo_idx].getLabels());
bool trace_coll = false; // trace collision?
for (Size lab_idx = 0; lab_idx < labels.size(); ++lab_idx)
{
if (trace_excl_map.find(labels[lab_idx]) != trace_excl_map.end())
{
trace_coll = true;
break;
}
}
#ifdef FFM_DEBUG
if (feat_hypos[hypo_idx].getSize() > 1)
{
std::cout << "check for collision: " << trace_coll << " " <<
feat_hypos[hypo_idx].getLabel() << " " << isLegalIsotopePattern_(feat_hypos[hypo_idx]) <<
" " << feat_hypos[hypo_idx].getScore() << '\n';
}
#endif
// Skip hypotheses that contain a mass trace that has already been used
if (trace_coll)
{
continue;
}
// Check whether the trace passes the intensity filter (metabolites
// only). This is based on a pre-trained SVM model of isotopic
// intensities.
int pass_isotope_filter = -1; // -1 == 'did not test'; 0 = no pass; 1 = pass
if (isotope_filtering_model_ != "none" && isotope_filtering_model_ != "peptides")
{
pass_isotope_filter = isLegalIsotopePattern_(feat_hypos[hypo_idx]);
}
// std::cout << "\nlegal iso? " << feat_hypos[hypo_idx].getLabel() << " score: " << feat_hypos[hypo_idx].getScore() << " " << result << '\n';
if (pass_isotope_filter == 0) // not passing filter
{
continue;
}
// filter out single traces if option is set
if (remove_single_traces_ && feat_hypos[hypo_idx].getCharge() == 0)
{
continue;
}
//
// Now accept hypothesis
//
Feature f;
f.setRT(feat_hypos[hypo_idx].getCentroidRT());
f.setMZ(feat_hypos[hypo_idx].getCentroidMZ());
if (report_summed_ints_)
{
f.setIntensity(feat_hypos[hypo_idx].getSummedFeatureIntensity(report_smoothed_intensities_));
}
else
{
f.setIntensity(feat_hypos[hypo_idx].getMonoisotopicFeatureIntensity(report_smoothed_intensities_));
}
f.setWidth(feat_hypos[hypo_idx].getFWHM());
f.setCharge(feat_hypos[hypo_idx].getCharge());
f.setMetaValue(3, feat_hypos[hypo_idx].getLabel());
f.setMetaValue("max_height", feat_hypos[hypo_idx].getMaxIntensity(report_smoothed_intensities_));
// store isotope intensities
std::vector<double> all_ints(feat_hypos[hypo_idx].getAllIntensities(report_smoothed_intensities_));
f.setMetaValue(Constants::UserParam::NUM_OF_MASSTRACES, all_ints.size());
if (report_convex_hulls_) f.setConvexHulls(feat_hypos[hypo_idx].getConvexHulls());
f.setOverallQuality(feat_hypos[hypo_idx].getScore());
f.setMetaValue("masstrace_intensity", all_ints);
f.setMetaValue("masstrace_centroid_rt", feat_hypos[hypo_idx].getAllCentroidRT());
f.setMetaValue("masstrace_centroid_mz", feat_hypos[hypo_idx].getAllCentroidMZ());;
f.setMetaValue("isotope_distances", feat_hypos[hypo_idx].getIsotopeDistances());
f.setMetaValue("legal_isotope_pattern", pass_isotope_filter);
f.applyMemberFunction(&UniqueIdInterface::setUniqueId);
output_featmap.push_back(f);
if (report_chromatograms_ && f.getIntensity() != 0)
{
output_chromatograms.push_back(feat_hypos[hypo_idx].getChromatograms(f.getUniqueId()));
}
// add used traces to exclusion map
for (Size lab_idx = 0; lab_idx < labels.size(); ++lab_idx)
{
trace_excl_map[labels[lab_idx]] = true;
}
}
output_featmap.setUniqueId(UniqueIdGenerator::getUniqueId());
output_featmap.sortByMZ();
} // end of FeatureFindingMetabo::run
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/MultiplexClustering.cpp | .cpp | 6,504 | 191 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Lars Nilse $
// $Authors: Lars Nilse $
// --------------------------------------------------------------------------
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/KERNEL/ConsensusMap.h>
#include <OpenMS/PROCESSING/CENTROIDING/PeakPickerHiRes.h>
#include <OpenMS/FEATUREFINDER/MultiplexFiltering.h>
#include <OpenMS/FEATUREFINDER/MultiplexClustering.h>
#include <OpenMS/FEATUREFINDER/PeakWidthEstimator.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <OpenMS/ML/CLUSTERING/GridBasedClustering.h>
#include <QtCore/QDir>
using namespace std;
namespace OpenMS
{
MultiplexClustering::MultiplexClustering(const MSExperiment& exp_profile, const MSExperiment& exp_picked, const std::vector<std::vector<PeakPickerHiRes::PeakBoundary> >& boundaries, double rt_typical) :
rt_typical_(rt_typical)
{
if (exp_picked.size() != boundaries.size())
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Centroided data and the corresponding list of peak boundaries do not contain same number of spectra.");
}
// ranges of the experiment
double mz_min = exp_profile.spectrumRanges().getMinMZ();
double mz_max = exp_profile.spectrumRanges().getMaxMZ();
double rt_min = exp_profile.spectrumRanges().getMinRT();
double rt_max = exp_profile.spectrumRanges().getMaxRT();
// extend the grid by a small absolute margin
double mz_margin = 1e-2;
double rt_margin = 1e-2;
mz_min -= mz_margin;
mz_max += mz_margin;
rt_min -= rt_margin;
rt_max += rt_margin;
// generate grid spacing
PeakWidthEstimator estimator(exp_picked, boundaries);
// We assume that the jitter of the peak centres are less than <scaling> times the peak width.
// This factor ensures that two neighbouring peaks at the same RT cannot be in the same cluster.
double scaling = 0.4;
for (double mz = mz_min; mz < mz_max; mz = mz + scaling * estimator.getPeakWidth(mz))
{
grid_spacing_mz_.push_back(mz);
}
grid_spacing_mz_.push_back(mz_max);
for (double rt = rt_min; rt < rt_max; rt = rt + rt_typical)
{
grid_spacing_rt_.push_back(rt);
}
grid_spacing_rt_.push_back(rt_max);
// determine RT scaling
std::vector<double> mz;
for (const MSSpectrum& spec : exp_picked)
{
MSSpectrum::ConstIterator it_mz;
for (it_mz = spec.begin(); it_mz != spec.end(); ++it_mz)
{
mz.push_back(it_mz->getMZ());
}
}
std::sort(mz.begin(), mz.end());
rt_scaling_ = estimator.getPeakWidth(mz[(int) mz.size() / 2]) / rt_typical_;
}
MultiplexClustering::MultiplexClustering(const MSExperiment& exp, double mz_tolerance, bool mz_tolerance_unit, double rt_typical) :
rt_typical_(rt_typical)
{
// ranges of the experiment
double mz_min = exp.spectrumRanges().byMSLevel(1).getMinMZ();
double mz_max = exp.spectrumRanges().byMSLevel(1).getMaxMZ();
double rt_min = exp.spectrumRanges().byMSLevel(1).getMinRT();
double rt_max = exp.spectrumRanges().byMSLevel(1).getMaxRT();
if (!RangeMZ(0.0, 1.0e12).containsMZ({mz_min, mz_max}) ||
!RangeRT(-1.0e12, 1.0e12).containsRT({rt_min, rt_max}) )
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "MinMZ,MaxMZ,MinRT,MaxRT values outside of sensible value ranges. Are they uninitialized? (" + String(mz_min) + "/" + String(mz_max) + "/" + String(rt_min) + "/" + String(rt_max));
}
// extend the grid by a small absolute margin
double mz_margin = 1e-2;
double rt_margin = 1e-2;
mz_min -= mz_margin;
mz_max += mz_margin;
rt_min -= rt_margin;
rt_max += rt_margin;
// generate grid spacing
// We assume that the jitter of the peak centres are less than <scaling> times the user specified m/z tolerance.
double scaling = 1.0;
if (mz_tolerance_unit)
{
for (double mz = mz_min; mz < mz_max; mz = mz * (1 + scaling * mz_tolerance/1000000))
{
grid_spacing_mz_.push_back(mz);
}
}
else
{
for (double mz = mz_min; mz < mz_max; mz = mz + scaling * mz_tolerance)
{
grid_spacing_mz_.push_back(mz);
}
}
grid_spacing_mz_.push_back(mz_max);
for (double rt = rt_min; rt < rt_max; rt = rt + rt_typical)
{
grid_spacing_rt_.push_back(rt);
}
grid_spacing_rt_.push_back(rt_max);
// determine RT scaling
std::vector<double> mz;
for (const MSSpectrum& spec : exp)
{
MSSpectrum::ConstIterator it_mz;
for (it_mz = spec.begin(); it_mz != spec.end(); ++it_mz)
{
mz.push_back(it_mz->getMZ());
}
}
std::sort(mz.begin(), mz.end());
if (mz_tolerance_unit)
{
rt_scaling_ = (mz[(int) mz.size() / 2] * mz_tolerance/1000000) / rt_typical_;
}
else
{
rt_scaling_ = mz_tolerance / rt_typical_;
}
}
std::vector<std::map<int, GridBasedCluster> > MultiplexClustering::cluster(const std::vector<MultiplexFilteredMSExperiment>& filter_results)
{
// progress logger
unsigned progress = 0;
startProgress(0, filter_results.size(), "clustering filtered LC-MS data");
std::vector<std::map<int, GridBasedCluster> > cluster_results;
// loop over patterns i.e. cluster each of the corresponding filter results
for (unsigned i = 0; i < filter_results.size(); ++i)
{
setProgress(++progress);
GridBasedClustering<MultiplexDistance> clustering(MultiplexDistance(rt_scaling_), filter_results[i].getMZ(), filter_results[i].getRT(), grid_spacing_mz_, grid_spacing_rt_);
clustering.cluster();
//clustering.extendClustersY();
cluster_results.push_back(clustering.getResults());
}
endProgress();
return cluster_results;
}
MultiplexClustering::MultiplexDistance::MultiplexDistance(double rt_scaling)
: rt_scaling_(rt_scaling)
{
}
MultiplexClustering::MultiplexDistance::MultiplexDistance()
: rt_scaling_(1)
{
}
double MultiplexClustering::MultiplexDistance::operator()(const Point& p1, const Point& p2) const
{
return sqrt((p1.getX() - p2.getX())*(p1.getX() - p2.getX()) + rt_scaling_ * rt_scaling_ * (p1.getY() - p2.getY())*(p1.getY() - p2.getY()));
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/ModelDescription.cpp | .cpp | 512 | 17 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: $
// --------------------------------------------------------------------------
#include <OpenMS/FEATUREFINDER/ModelDescription.h>
namespace OpenMS
{
ModelDescription<1> default_modeldescription_1;
ModelDescription<2> default_modeldescription_2;
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/MultiplexDeltaMassesGenerator.cpp | .cpp | 25,937 | 608 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Lars Nilse $
// $Authors: Lars Nilse $
// --------------------------------------------------------------------------
#include <OpenMS/FEATUREFINDER/MultiplexDeltaMasses.h>
#include <OpenMS/FEATUREFINDER/MultiplexDeltaMassesGenerator.h>
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/CONCEPT/Exception.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <boost/algorithm/string/classification.hpp>
#include <boost/algorithm/string/replace.hpp>
#include <boost/algorithm/string/split.hpp>
#include <iostream>
#include <ostream>
#include <sstream>
#include <utility>
using namespace std;
namespace OpenMS
{
MultiplexDeltaMassesGenerator::Label::Label(String sn, String ln, String d, double dm) :
short_name(std::move(sn)),
long_name(std::move(ln)),
description(std::move(d)),
delta_mass(dm)
{
}
MultiplexDeltaMassesGenerator::MultiplexDeltaMassesGenerator() :
DefaultParamHandler("labels"),
labels_(),
labels_list_(),
samples_labels_(),
missed_cleavages_(),
label_delta_mass_()
{
// fill label master list
fillLabelMasterList_();
// set user parameters
for (const MultiplexDeltaMassesGenerator::Label& multi : label_master_list_)
{
defaults_.setValue(multi.short_name, multi.delta_mass, multi.description);
defaults_.setMinFloat(multi.short_name, 0);
}
defaultsToParam_();
}
MultiplexDeltaMassesGenerator::MultiplexDeltaMassesGenerator(String labels, int missed_cleavages, std::map<String,double> label_delta_mass) :
DefaultParamHandler("labels"),
labels_(std::move(labels)),
labels_list_(),
samples_labels_(),
missed_cleavages_(missed_cleavages),
label_delta_mass_(std::move(label_delta_mass))
{
// fill label master list
fillLabelMasterList_();
// generate short/long label mappings
for (const MultiplexDeltaMassesGenerator::Label& multi : label_master_list_)
{
label_short_long_.insert(make_pair(multi.short_name, multi.long_name));
label_long_short_.insert(make_pair(multi.long_name, multi.short_name));
}
// split the labels_ string
String temp_labels_string(labels_);
std::vector<String> temp_samples;
boost::replace_all(temp_labels_string, "[]", "no_label");
boost::replace_all(temp_labels_string, "()", "no_label");
boost::replace_all(temp_labels_string, "{}", "no_label");
boost::split(temp_samples, temp_labels_string, boost::is_any_of("[](){}")); // any bracket allowed to separate samples
for (String::size_type i = 0; i < temp_samples.size(); ++i)
{
if (!temp_samples[i].empty())
{
if (temp_samples[i]=="no_label")
{
vector<String> temp_labels;
temp_labels.emplace_back("no_label");
samples_labels_.push_back(temp_labels);
}
else
{
vector<String> temp_labels;
boost::split(temp_labels, temp_samples[i], boost::is_any_of(",;: ")); // various separators allowed to separate labels
samples_labels_.push_back(temp_labels);
}
}
}
if (samples_labels_.empty())
{
vector<String> temp_labels;
temp_labels.emplace_back("no_label");
samples_labels_.push_back(temp_labels);
}
// What kind of labelling do we have?
// SILAC, Leu, Dimethyl, ICPL, numeric labelling or no labelling ??
bool no_label = (samples_labels_.size() == 1) &&
(samples_labels_[0].size() == 1) &&
samples_labels_[0][0] == "no_label";
bool labelling_SILAC = ((labels_.find("Arg") != std::string::npos) || (labels_.find("Lys") != std::string::npos));
bool labelling_Leu = (labels_.find("Leu") != std::string::npos);
bool labelling_Dimethyl = (labels_.find("Dimethyl") != std::string::npos);
bool labelling_ICPL = (labels_.find("ICPL") != std::string::npos);
bool labelling_numeric = false;
if (!(no_label || labelling_SILAC || labelling_Leu || labelling_Dimethyl || labelling_ICPL))
{
bool all_numeric = true;
// Check whether each label string represents a double. If yes, use these doubles as mass shifts.
for (size_t i = 0; i < samples_labels_.size(); i++)
{
for (size_t j = 0; j < samples_labels_[i].size(); j++)
{
try
{
double mass_shift = std::stod(samples_labels_[i][j]);
// For numeric mass shifts, long and short label names as well as the numerical mass shift are trivial.
// For example, long label name ("3.1415"), short label name ("3.1415") and numerical mass shift (3.1415).
label_delta_mass_.insert(make_pair(samples_labels_[i][j], mass_shift));
label_short_long_.insert(make_pair(samples_labels_[i][j], samples_labels_[i][j]));
label_long_short_.insert(make_pair(samples_labels_[i][j], samples_labels_[i][j]));
}
catch(...)
{
OPENMS_LOG_WARN << "Unrecognized non-numeric label found. Assuming label-free." << std::endl;
all_numeric = false;
break;
}
}
}
labelling_numeric = all_numeric;
}
bool labelling_none = labels_.empty() || (labels_ == "[]") || (labels_ == "()") || (labels_ == "{}");
bool SILAC = (labelling_SILAC && !labelling_Leu && !labelling_Dimethyl && !labelling_ICPL && !labelling_numeric && !labelling_none);
bool Leu = (!labelling_SILAC && labelling_Leu && !labelling_Dimethyl && !labelling_ICPL && !labelling_numeric && !labelling_none);
bool Dimethyl = (!labelling_SILAC && !labelling_Leu && labelling_Dimethyl && !labelling_ICPL && !labelling_numeric && !labelling_none);
bool ICPL = (!labelling_SILAC && !labelling_Leu && !labelling_Dimethyl && labelling_ICPL && !labelling_numeric && !labelling_none);
bool numeric = (!labelling_SILAC && !labelling_Leu && !labelling_Dimethyl && !labelling_ICPL && labelling_numeric && !labelling_none);
bool none = (!labelling_SILAC && !labelling_Leu && !labelling_Dimethyl && !labelling_ICPL && !labelling_numeric && labelling_none);
if (!(SILAC || Leu || Dimethyl || ICPL || numeric || none))
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Unknown labelling. Neither SILAC, Leu, Dimethyl nor ICPL.");
}
// Check if the labels are included in advanced section "labels"
// unless the labelling is numeric.
if (!numeric)
{
String all_labels = "Arg6 Arg10 Lys4 Lys6 Lys8 Leu3 Dimethyl0 Dimethyl4 Dimethyl6 Dimethyl8 ICPL0 ICPL4 ICPL6 ICPL10 no_label";
for (std::vector<std::vector<String> >::size_type i = 0; i < samples_labels_.size(); i++)
{
for (std::vector<String>::size_type j = 0; j < samples_labels_[i].size(); ++j)
{
if (all_labels.find(samples_labels_[i][j]) == std::string::npos)
{
std::stringstream stream;
stream << "The label " << samples_labels_[i][j] << " is unknown.";
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, stream.str());
}
}
}
}
// generate mass pattern list
if (SILAC)
{
// SILAC
// We assume the first sample to be unlabelled. Even if the "[]" for the first sample in the label string has not been specified.
for (unsigned ArgPerPeptide = 0; ArgPerPeptide <= (unsigned) missed_cleavages_ + 1; ArgPerPeptide++)
{
for (unsigned LysPerPeptide = 0; LysPerPeptide <= (unsigned) missed_cleavages_ + 1; LysPerPeptide++)
{
if (ArgPerPeptide + LysPerPeptide <= (unsigned) missed_cleavages_ + 1)
{
MultiplexDeltaMasses delta_masses_temp; // single mass shift pattern
delta_masses_temp.getDeltaMasses().emplace_back(0, "no_label");
for (unsigned i = 0; i < samples_labels_.size(); i++)
{
double mass_shift = 0;
MultiplexDeltaMasses::LabelSet label_set;
// Considering the case of an amino acid (e.g. LysPerPeptide != 0) for which no label is present (e.g. Lys4There && Lys6There && Lys8There == false) makes no sense. Therefore each amino acid will have to give its "Go Ahead" before the shift is calculated.
bool goAhead_Lys = false;
bool goAhead_Arg = false;
for (unsigned j = 0; j < samples_labels_[i].size(); ++j)
{
bool Arg6There = (samples_labels_[i][j].find("Arg6") != std::string::npos); // Is Arg6 in the SILAC label?
bool Arg10There = (samples_labels_[i][j].find("Arg10") != std::string::npos);
bool Lys4There = (samples_labels_[i][j].find("Lys4") != std::string::npos);
bool Lys6There = (samples_labels_[i][j].find("Lys6") != std::string::npos);
bool Lys8There = (samples_labels_[i][j].find("Lys8") != std::string::npos);
// construct label set
for (unsigned k = 1; k < Arg6There * (ArgPerPeptide + 1); ++k)
{
label_set.insert("Arg6");
}
for (unsigned k = 1; k < Arg10There * (ArgPerPeptide + 1); ++k)
{
label_set.insert("Arg10");
}
for (unsigned k = 1; k < Lys4There * (LysPerPeptide + 1); ++k)
{
label_set.insert("Lys4");
}
for (unsigned k = 1; k < Lys6There * (LysPerPeptide + 1); ++k)
{
label_set.insert("Lys6");
}
for (unsigned k = 1; k < Lys8There * (LysPerPeptide + 1); ++k)
{
label_set.insert("Lys8");
}
mass_shift = mass_shift + ArgPerPeptide * (Arg6There * label_delta_mass_["Arg6"] + Arg10There * label_delta_mass_["Arg10"]) + LysPerPeptide * (Lys4There * label_delta_mass_["Lys4"] + Lys6There * label_delta_mass_["Lys6"] + Lys8There * label_delta_mass_["Lys8"]);
goAhead_Arg = goAhead_Arg || !(ArgPerPeptide != 0 && !Arg6There && !Arg10There);
goAhead_Lys = goAhead_Lys || !(LysPerPeptide != 0 && !Lys4There && !Lys6There && !Lys8There);
}
if (goAhead_Arg && goAhead_Lys && (mass_shift != 0))
{
delta_masses_temp.getDeltaMasses().emplace_back(mass_shift, label_set);
}
}
if (delta_masses_temp.getDeltaMasses().size() > 1)
{
delta_masses_list_.push_back(delta_masses_temp);
}
}
}
}
}
else if (Leu)
{
// Leu
// We assume each sample to be labelled only once. Hence, we only consider samples_labels_[...][0] below.
// Unlike in classical SILAC where two labels with two different specificities are available, in Leu labelling
// there is only one specificity Leu. Hence, [Lys8,Arg10] but [Leu3].
for (unsigned mc = 0; mc <= (unsigned) missed_cleavages_; ++mc)
{
MultiplexDeltaMasses delta_masses_temp; // single mass shift pattern
delta_masses_temp.getDeltaMasses().emplace_back(0, "no_label");
double mass_shift = (mc + 1) * (label_delta_mass_[samples_labels_[1][0]] - label_delta_mass_[samples_labels_[0][0]]);
MultiplexDeltaMasses::LabelSet label_set;
// construct label set
for (unsigned k = 1; k < (mc + 2); ++k)
{
label_set.insert(samples_labels_[1][0]);
}
delta_masses_temp.getDeltaMasses().emplace_back(mass_shift, label_set);
delta_masses_list_.push_back(delta_masses_temp);
}
}
else if (Dimethyl || ICPL)
{
// Dimethyl or ICPL
// We assume each sample to be labelled only once. Hence, we only consider samples_labels_[...][0] below.
// Unlike in classical SILAC where two labels with two different specificities are available, in Dimethyl labelling
// there is only one specificity (Lys and N-term). Two labels [Lys8,Arg10] in a SILAC sample are fine, since
// both have different specificities. But two labels [Dimethyl4,Dimethyl8] make no sense, since both have the same
// specificity. With only one specificity in Dimethyl labelling available, each sample can have only one label.
for (unsigned mc = 0; mc <= (unsigned) missed_cleavages_; ++mc)
{
MultiplexDeltaMasses delta_masses_temp; // single mass shift pattern
for (unsigned i = 0; i < samples_labels_.size(); i++)
{
double mass_shift = (mc + 1) * (label_delta_mass_[samples_labels_[i][0]] - label_delta_mass_[samples_labels_[0][0]]);
MultiplexDeltaMasses::LabelSet label_set;
// construct label set
for (unsigned k = 1; k < (mc + 2); ++k)
{
label_set.insert(samples_labels_[i][0]);
}
delta_masses_temp.getDeltaMasses().emplace_back(mass_shift, label_set);
}
delta_masses_list_.push_back(delta_masses_temp);
}
}
else if (numeric)
{
for (unsigned mc = 0; mc <= (unsigned) missed_cleavages_; ++mc)
{
MultiplexDeltaMasses delta_masses_temp; // single mass shift pattern
for (unsigned i = 0; i < samples_labels_.size(); i++)
{
double mass_shift = (mc + 1) * (label_delta_mass_[samples_labels_[i][0]] - label_delta_mass_[samples_labels_[0][0]]);
MultiplexDeltaMasses::LabelSet label_set;
for (unsigned k = 1; k < (mc + 2); ++k)
{
label_set.insert(samples_labels_[i][0]);
}
delta_masses_temp.getDeltaMasses().emplace_back(mass_shift, label_set);
}
delta_masses_list_.push_back(delta_masses_temp);
}
}
else
{
// none (singlet detection)
MultiplexDeltaMasses delta_masses_temp;
delta_masses_temp.getDeltaMasses().emplace_back(0, "no_label");
delta_masses_list_.push_back(delta_masses_temp);
}
// sort mass patterns
// (from small mass shifts to larger ones, i.e. few miscleavages = simple explanation first)
std::sort(delta_masses_list_.begin(), delta_masses_list_.end());
// generate flat list of all occurring isotopic labels
for (unsigned i = 0; i < samples_labels_.size(); ++i)
{
for (unsigned j = 0; j < samples_labels_[i].size(); ++j)
{
if (samples_labels_[i][j] != "no_label")
{
labels_list_.push_back(samples_labels_[i][j]);
}
}
}
}
void MultiplexDeltaMassesGenerator::generateKnockoutDeltaMasses()
{
if (delta_masses_list_.empty())
{
// Even in the case of a singlet search, there should be one mass shift (zero mass shift) in the list.
throw OpenMS::Exception::InvalidSize(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, 0, "delta_masses_list_ must not be empty");
}
unsigned n = delta_masses_list_[0].getDeltaMasses().size(); // n=1 for singlets, n=2 for doublets, n=3 for triplets, n=4 for quadruplets
unsigned m = delta_masses_list_.size(); // number of mass shift patterns before extension of the list
if (n == 1)
{
throw OpenMS::Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Knock-outs for singlet detection not relevant.");
}
else if (n == 2)
{
// add singlets
MultiplexDeltaMasses dm;
dm.getDeltaMasses().emplace_back(0,"any_label_set"); // There are two singlets with different label sets. But only a single singlet with "any_label_set" is added.
delta_masses_list_.push_back(dm);
}
else if (n == 3)
{
for (unsigned i = 0; i < m; ++i)
{
// add doublets
MultiplexDeltaMasses doublet1;
doublet1.getDeltaMasses().push_back(delta_masses_list_[i].getDeltaMasses()[0]);
doublet1.getDeltaMasses().push_back(delta_masses_list_[i].getDeltaMasses()[1]);
delta_masses_list_.push_back(doublet1);
MultiplexDeltaMasses doublet2;
doublet2.getDeltaMasses().push_back(delta_masses_list_[i].getDeltaMasses()[1]);
doublet2.getDeltaMasses().push_back(delta_masses_list_[i].getDeltaMasses()[2]);
delta_masses_list_.push_back(doublet2);
MultiplexDeltaMasses doublet3;
doublet3.getDeltaMasses().push_back(delta_masses_list_[i].getDeltaMasses()[0]);
doublet3.getDeltaMasses().push_back(delta_masses_list_[i].getDeltaMasses()[2]);
delta_masses_list_.push_back(doublet3);
}
// add singlets
MultiplexDeltaMasses dm;
dm.getDeltaMasses().emplace_back(0, "any_label_set"); // There are three singlets with different label sets. But only a single singlet with "any_label_set" is added.
delta_masses_list_.push_back(dm);
}
else if (n == 4)
{
for (unsigned i = 0; i < m; ++i)
{
// add triplets
MultiplexDeltaMasses triplet1;
triplet1.getDeltaMasses().push_back(delta_masses_list_[i].getDeltaMasses()[1]);
triplet1.getDeltaMasses().push_back(delta_masses_list_[i].getDeltaMasses()[2]);
triplet1.getDeltaMasses().push_back(delta_masses_list_[i].getDeltaMasses()[3]);
delta_masses_list_.push_back(triplet1);
MultiplexDeltaMasses triplet2;
triplet2.getDeltaMasses().push_back(delta_masses_list_[i].getDeltaMasses()[0]);
triplet2.getDeltaMasses().push_back(delta_masses_list_[i].getDeltaMasses()[2]);
triplet2.getDeltaMasses().push_back(delta_masses_list_[i].getDeltaMasses()[3]);
delta_masses_list_.push_back(triplet2);
// Knockout combination previously forgotten. Will be un-commented in final FFM/MultiplexResolver version.
/*MultiplexDeltaMasses triplet3;
triplet3.getDeltaMasses().push_back(delta_masses_list_[i].getDeltaMasses()[0]);
triplet3.getDeltaMasses().push_back(delta_masses_list_[i].getDeltaMasses()[1]);
triplet3.getDeltaMasses().push_back(delta_masses_list_[i].getDeltaMasses()[3]);
delta_masses_list_.push_back(triplet3);*/
MultiplexDeltaMasses triplet4;
triplet4.getDeltaMasses().push_back(delta_masses_list_[i].getDeltaMasses()[0]);
triplet4.getDeltaMasses().push_back(delta_masses_list_[i].getDeltaMasses()[1]);
triplet4.getDeltaMasses().push_back(delta_masses_list_[i].getDeltaMasses()[2]);
delta_masses_list_.push_back(triplet4);
// add doublets
MultiplexDeltaMasses doublet1;
doublet1.getDeltaMasses().push_back(delta_masses_list_[i].getDeltaMasses()[0]);
doublet1.getDeltaMasses().push_back(delta_masses_list_[i].getDeltaMasses()[1]);
delta_masses_list_.push_back(doublet1);
MultiplexDeltaMasses doublet2;
doublet2.getDeltaMasses().push_back(delta_masses_list_[i].getDeltaMasses()[0]);
doublet2.getDeltaMasses().push_back(delta_masses_list_[i].getDeltaMasses()[2]);
delta_masses_list_.push_back(doublet2);
MultiplexDeltaMasses doublet3;
doublet3.getDeltaMasses().push_back(delta_masses_list_[i].getDeltaMasses()[0]);
doublet3.getDeltaMasses().push_back(delta_masses_list_[i].getDeltaMasses()[3]);
delta_masses_list_.push_back(doublet3);
MultiplexDeltaMasses doublet4;
doublet4.getDeltaMasses().push_back(delta_masses_list_[i].getDeltaMasses()[1]);
doublet4.getDeltaMasses().push_back(delta_masses_list_[i].getDeltaMasses()[2]);
delta_masses_list_.push_back(doublet4);
MultiplexDeltaMasses doublet5;
doublet5.getDeltaMasses().push_back(delta_masses_list_[i].getDeltaMasses()[1]);
doublet5.getDeltaMasses().push_back(delta_masses_list_[i].getDeltaMasses()[3]);
delta_masses_list_.push_back(doublet5);
MultiplexDeltaMasses doublet6;
doublet6.getDeltaMasses().push_back(delta_masses_list_[i].getDeltaMasses()[2]);
doublet6.getDeltaMasses().push_back(delta_masses_list_[i].getDeltaMasses()[3]);
delta_masses_list_.push_back(doublet6);
}
// add singlets
MultiplexDeltaMasses dm;
dm.getDeltaMasses().emplace_back(0,"any_label_set");
delta_masses_list_.push_back(dm);
}
else if (n > 4)
{
throw OpenMS::Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Knock-outs for multiplex experiments with more than 4 samples not supported.");
}
// sort mass patterns
// (from small mass shifts to larger ones, i.e. few miscleavages = simple explanation first)
std::sort(delta_masses_list_.begin(), delta_masses_list_.end());
}
void MultiplexDeltaMassesGenerator::printSamplesLabelsList(std::ostream &stream) const
{
stream << "\n";
for (unsigned i = 0; i < samples_labels_.size(); ++i)
{
stream << "sample " << (i + 1) << ": ";
for (unsigned j = 0; j < samples_labels_[i].size(); ++j)
{
stream << samples_labels_[i][j] << " ";
}
stream << "\n";
}
}
void MultiplexDeltaMassesGenerator::printDeltaMassesList(std::ostream &stream) const
{
stream << "\n";
for (unsigned i = 0; i < delta_masses_list_.size(); ++i)
{
stream << "mass shift " << (i + 1) << ": ";
for (unsigned j = 0; j < delta_masses_list_[i].getDeltaMasses().size(); ++j)
{
double mass_shift = delta_masses_list_[i].getDeltaMasses()[j].delta_mass;
MultiplexDeltaMasses::LabelSet label_set = delta_masses_list_[i].getDeltaMasses()[j].label_set;
stream << mass_shift << " (";
bool first = true;
for (const auto& label : label_set)
{
if (!first)
{
stream << ",";
}
stream << label;
first = false;
}
stream << ") ";
}
stream << "\n";
}
stream << "\n";
}
std::vector<MultiplexDeltaMasses> MultiplexDeltaMassesGenerator::getDeltaMassesList()
{
return delta_masses_list_;
}
const std::vector<MultiplexDeltaMasses>& MultiplexDeltaMassesGenerator::getDeltaMassesList() const
{
return delta_masses_list_;
}
std::vector<std::vector<String> > MultiplexDeltaMassesGenerator::getSamplesLabelsList()
{
return samples_labels_;
}
const std::vector<std::vector<String> >& MultiplexDeltaMassesGenerator::getSamplesLabelsList() const
{
return samples_labels_;
}
String MultiplexDeltaMassesGenerator::getLabelShort(const String& label)
{
return label_long_short_[label];
}
String MultiplexDeltaMassesGenerator::getLabelLong(const String& label)
{
return label_short_long_[label];
}
MultiplexDeltaMasses::LabelSet MultiplexDeltaMassesGenerator::extractLabelSet(const AASequence& sequence)
{
String s(sequence.toString());
MultiplexDeltaMasses::LabelSet label_set;
// loop over all labels that might occur
for (std::vector<String>::size_type i = 0; i < labels_list_.size(); ++i)
{
String label("(" + getLabelLong(labels_list_[i]) + ")");
String::size_type length_label = label.size();
// check if label occurs in peptide sequence
if (s.hasSubstring(label))
{
String::size_type length_before = s.size();
s.substitute(label, "");
String::size_type length_after = s.size();
String::size_type multiple = (length_before - length_after)/length_label;
// add as many labels to the set as occurred in the peptide sequence
for (String::size_type j = 0; j < multiple; ++j)
{
label_set.insert(labels_list_[i]);
}
}
}
// add no_label if nothing was found
// (either way if no_label is in the theoretical list or not)
if (label_set.empty())
{
label_set.insert("no_label");
}
return label_set;
}
void MultiplexDeltaMassesGenerator::fillLabelMasterList_()
{
label_master_list_.emplace_back("Arg6", "Label:13C(6)", "Label:13C(6) | C(-6) 13C(6) | unimod #188", 6.0201290268);
label_master_list_.emplace_back("Arg10", "Label:13C(6)15N(4)", "Label:13C(6)15N(4) | C(-6) 13C(6) N(-4) 15N(4) | unimod #267", 10.008268600);
label_master_list_.emplace_back("Lys4", "Label:2H(4)", "Label:2H(4) | H(-4) 2H(4) | unimod #481", 4.0251069836);
label_master_list_.emplace_back("Lys6", "Label:13C(6)", "Label:13C(6) | C(-6) 13C(6) | unimod #188", 6.0201290268);
label_master_list_.emplace_back("Lys8", "Label:13C(6)15N(2)", "Label:13C(6)15N(2) | C(-6) 13C(6) N(-2) 15N(2) | unimod #259", 8.0141988132);
label_master_list_.emplace_back("Leu3", "Label:2H(3)", "Label:2H(3) | H(-3) 2H(3) | unimod #262", 3.018830);
label_master_list_.emplace_back("Dimethyl0", "Dimethyl", "Dimethyl | H(4) C(2) | unimod #36", 28.031300);
label_master_list_.emplace_back("Dimethyl4", "Dimethyl:2H(4)", "Dimethyl:2H(4) | 2H(4) C(2) | unimod #199", 32.056407);
label_master_list_.emplace_back("Dimethyl6", "Dimethyl:2H(4)13C(2)", "Dimethyl:2H(4)13C(2) | 2H(4) 13C(2) | unimod #510", 34.063117);
label_master_list_.emplace_back("Dimethyl8", "Dimethyl:2H(6)13C(2)", "Dimethyl:2H(6)13C(2) | H(-2) 2H(6) 13C(2) | unimod #330", 36.075670);
label_master_list_.emplace_back("ICPL0", "ICPL", "ICPL | H(3) C(6) N O | unimod #365", 105.021464);
label_master_list_.emplace_back("ICPL4", "ICPL:2H(4)", "ICPL:2H(4) | H(-1) 2H(4) C(6) N O | unimod #687", 109.046571);
label_master_list_.emplace_back("ICPL6", "ICPL:13C(6)", "ICPL:13C(6) | H(3) 13C(6) N O | unimod #364", 111.041593);
label_master_list_.emplace_back("ICPL10", "ICPL:13C(6)2H(4)", "ICPL:13C(6)2H(4) | H(-1) 2H(4) 13C(6) N O | unimod #866", 115.066700);
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/ExtendedIsotopeFitter1D.cpp | .cpp | 4,693 | 140 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: $
// --------------------------------------------------------------------------
#include <OpenMS/FEATUREFINDER/ExtendedIsotopeFitter1D.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <OpenMS/FEATUREFINDER/InterpolationModel.h>
#include <OpenMS/FEATUREFINDER/GaussModel.h>
#include <OpenMS/FEATUREFINDER/ExtendedIsotopeModel.h>
namespace OpenMS
{
ExtendedIsotopeFitter1D::ExtendedIsotopeFitter1D() :
MaxLikeliFitter1D()
{
setName("ExtendedIsotopeFitter1D");
defaults_.setValue("statistics:variance", 1.0, "Variance of the model.", {"advanced"});
defaults_.setValue("charge", 1, "Charge state of the model.", {"advanced"});
defaults_.setValue("isotope:stdev", 0.1, "Standard deviation of gaussian applied to the averagine isotopic pattern to simulate the inaccuracy of the mass spectrometer.", {"advanced"});
defaults_.setValue("isotope:monoisotopic_mz", 1.0, "Monoisotopic m/z of the model.", {"advanced"});
defaults_.setValue("isotope:maximum", 100, "Maximum isotopic rank to be considered.", {"advanced"});
defaults_.setValue("interpolation_step", 0.2, "Sampling rate for the interpolation of the model function.", {"advanced"});
defaultsToParam_();
}
ExtendedIsotopeFitter1D::ExtendedIsotopeFitter1D(const ExtendedIsotopeFitter1D& source) :
MaxLikeliFitter1D(source)
{
updateMembers_();
}
ExtendedIsotopeFitter1D::~ExtendedIsotopeFitter1D() = default;
ExtendedIsotopeFitter1D& ExtendedIsotopeFitter1D::operator=(const ExtendedIsotopeFitter1D& source)
{
if (&source == this)
{
return *this;
}
MaxLikeliFitter1D::operator=(source);
updateMembers_();
return *this;
}
ExtendedIsotopeFitter1D::QualityType ExtendedIsotopeFitter1D::fit1d(const RawDataArrayType& set, std::unique_ptr<InterpolationModel>& model)
{
// build model
if (charge_ == 0)
{
// Calculate bounding box
CoordinateType min_bb = set[0].getPos(), max_bb = set[0].getPos();
for (UInt pos = 1; pos < set.size(); ++pos)
{
CoordinateType tmp = set[pos].getPos();
if (min_bb > tmp)
{
min_bb = tmp;
}
if (max_bb < tmp)
{
max_bb = tmp;
}
}
// Enlarge the bounding box by a few multiples of the standard deviation
const CoordinateType stdev = sqrt(statistics_.variance()) * tolerance_stdev_box_;
min_bb -= stdev;
max_bb += stdev;
model = std::unique_ptr<InterpolationModel>(new GaussModel());
model->setInterpolationStep(interpolation_step_);
Param tmp;
tmp.setValue("bounding_box:min", min_bb);
tmp.setValue("bounding_box:max", max_bb);
tmp.setValue("statistics:variance", statistics_.variance());
tmp.setValue("statistics:mean", statistics_.mean());
model->setParameters(tmp);
}
else
{
model = std::unique_ptr<InterpolationModel>(new ExtendedIsotopeModel());
Param iso_param = this->param_.copy("isotope_model:", true);
iso_param.removeAll("stdev");
model->setParameters(iso_param);
model->setInterpolationStep(interpolation_step_);
Param tmp;
tmp.setValue("isotope:monoisotopic_mz", monoisotopic_mz_);
tmp.setValue("charge", static_cast<Int>(charge_));
tmp.setValue("isotope:stdev", isotope_stdev_);
tmp.setValue("isotope:maximum", max_isotope_);
model->setParameters(tmp);
}
// calculate pearson correlation
std::vector<float> real_data;
real_data.reserve(set.size());
std::vector<float> model_data;
model_data.reserve(set.size());
for (Size i = 0; i < set.size(); ++i)
{
real_data.push_back(set[i].getIntensity());
model_data.push_back(model->getIntensity(DPosition<1>(set[i].getPosition())));
}
QualityType correlation = Math::pearsonCorrelationCoefficient(real_data.begin(), real_data.end(), model_data.begin(), model_data.end());
if (std::isnan(correlation))
{
correlation = -1.0;
}
return correlation;
}
void ExtendedIsotopeFitter1D::updateMembers_()
{
MaxLikeliFitter1D::updateMembers_();
statistics_.setVariance(param_.getValue("statistics:variance"));
charge_ = param_.getValue("charge");
isotope_stdev_ = param_.getValue("isotope:stdev");
monoisotopic_mz_ = param_.getValue("isotope:monoisotopic_mz");
max_isotope_ = param_.getValue("isotope:maximum");
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/FEATUREFINDER/SeedListGenerator.cpp | .cpp | 3,998 | 118 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/FEATUREFINDER/SeedListGenerator.h>
#include <map>
using namespace std;
namespace OpenMS
{
SeedListGenerator::SeedListGenerator() = default;
void SeedListGenerator::generateSeedList(const PeakMap& experiment,
SeedList& seeds)
{
seeds.clear();
for (PeakMap::ConstIterator exp_it = experiment.begin();
exp_it != experiment.end(); ++exp_it)
{
if (exp_it->getMSLevel() == 2) // MS2 spectrum -> look for precursor
{
PeakMap::ConstIterator prec_it =
experiment.getPrecursorSpectrum(exp_it);
const vector<Precursor>& precursors = exp_it->getPrecursors();
DPosition<2> point(prec_it->getRT(), precursors[0].getMZ());
seeds.push_back(point);
}
}
}
void SeedListGenerator::generateSeedList(PeptideIdentificationList&
peptides, SeedList& seeds,
bool use_peptide_mass)
{
seeds.clear();
for (PeptideIdentification& pep : peptides)
{
double mz;
if (!pep.getHits().empty() && use_peptide_mass)
{
pep.sort();
const PeptideHit& hit = pep.getHits().front();
Int charge = hit.getCharge();
mz = hit.getSequence().getMZ(charge);
}
else
{
mz = pep.getMZ();
}
DPosition<2> point(pep.getRT(), mz);
seeds.push_back(point);
}
}
void SeedListGenerator::generateSeedLists(const ConsensusMap& consensus,
std::map<UInt64, SeedList>& seed_lists)
{
seed_lists.clear();
// iterate over all consensus features...
for (ConsensusMap::ConstIterator cons_it = consensus.begin();
cons_it != consensus.end(); ++cons_it)
{
DPosition<2> point(cons_it->getRT(), cons_it->getMZ());
// for each sub-map in the consensus map, add a seed at the position of
// this consensus feature:
for (ConsensusMap::ColumnHeaders::const_iterator file_it =
consensus.getColumnHeaders().begin(); file_it !=
consensus.getColumnHeaders().end(); ++file_it)
seed_lists[file_it->first].push_back(point);
// for each feature contained in the consensus feature, remove the seed of
// the corresponding map:
for (ConsensusFeature::HandleSetType::const_iterator feat_it =
cons_it->getFeatures().begin(); feat_it !=
cons_it->getFeatures().end(); ++feat_it)
{
seed_lists[feat_it->getMapIndex()].pop_back();
}
// this leaves seeds for maps where no feature was found near the
// consensus position
}
}
void SeedListGenerator::convertSeedList(const SeedList& seeds,
FeatureMap& features)
{
features.clear(true); // "true" should really be a default value here...
Size counter = 0;
for (SeedList::const_iterator seed_it = seeds.begin();
seed_it != seeds.end(); ++seed_it, ++counter)
{
Feature feature;
feature.setRT(seed_it->getX());
feature.setMZ(seed_it->getY());
feature.setUniqueId(counter);
features.push_back(feature);
}
// // assign unique ids:
// features.applyMemberFunction(&UniqueIdInterface::setUniqueId);
}
void SeedListGenerator::convertSeedList(const FeatureMap& features,
SeedList& seeds)
{
seeds.clear();
for (const Feature& feat : features)
{
DPosition<2> point(feat.getRT(), feat.getMZ());
seeds.push_back(point);
}
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/APPLICATIONS/ParameterInformation.cpp | .cpp | 1,974 | 71 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Marc Sturm, Clemens Groepl $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/ParameterInformation.h>
namespace OpenMS
{
ParameterInformation::ParameterInformation(const String& n, ParameterTypes t, const String& arg, const ParamValue& def, const String& desc, bool req, bool adv, const StringList& tag_values) :
name(n),
type(t),
default_value(def),
description(desc),
argument(arg),
required(req),
advanced(adv),
tags(tag_values),
valid_strings(),
min_int(-std::numeric_limits<Int>::max()),
max_int(std::numeric_limits<Int>::max()),
min_float(-std::numeric_limits<double>::max()),
max_float(std::numeric_limits<double>::max())
{
}
ParameterInformation::ParameterInformation() :
name(),
type(NONE),
default_value(),
description(),
argument(),
required(true),
advanced(false),
tags(),
valid_strings(),
min_int(-std::numeric_limits<Int>::max()),
max_int(std::numeric_limits<Int>::max()),
min_float(-std::numeric_limits<double>::max()),
max_float(std::numeric_limits<double>::max())
{
}
ParameterInformation& ParameterInformation::operator=(const ParameterInformation& rhs)
{
if (&rhs == this)
{
return *this;
}
name = rhs.name;
type = rhs.type;
default_value = rhs.default_value;
description = rhs.description;
argument = rhs.argument;
required = rhs.required;
advanced = rhs.advanced;
tags = rhs.tags;
valid_strings = rhs.valid_strings;
min_int = rhs.min_int;
max_int = rhs.max_int;
min_float = rhs.min_float;
max_float = rhs.max_float;
return *this;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/APPLICATIONS/ConsoleUtils.cpp | .cpp | 6,871 | 198 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/ConsoleUtils.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/DATASTRUCTURES/StringListUtils.h>
#ifdef OPENMS_WINDOWSPLATFORM
#include <windows.h> // for GetConsoleScreenBufferInfo()
#undef min
#undef max
#endif
#include <cstdlib>
#include <cstdio>
namespace OpenMS
{
ConsoleUtils::ConsoleUtils()
{
// initialize the console width
readConsoleSize_();
}
const ConsoleUtils& ConsoleUtils::getInstance()
{
static ConsoleUtils cu;
return cu;
}
int ConsoleUtils::readConsoleSize_()
{
// avoid calling this function more than once
static bool been_here = false;
if (been_here)
{
return console_width_;
}
been_here = true;
// determine column width of current console
try
{
console_width_ = -1;
char* p_env = getenv("COLUMNS");
if (p_env)
{
console_width_ = String(p_env).toInt();
}
else
{
OPENMS_LOG_DEBUG << "output shaping: COLUMNS env does not exist!" << std::endl;
#ifdef OPENMS_WINDOWSPLATFORM
HANDLE hOut;
CONSOLE_SCREEN_BUFFER_INFO SBInfo;
hOut = GetStdHandle(STD_OUTPUT_HANDLE);
GetConsoleScreenBufferInfo(hOut, &SBInfo);
console_width_ = SBInfo.dwSize.X;
#else // Linux / MacOS
// try "stty size" command
// don't use QProcess, since stty will not work there
FILE* fp = popen("stty size", "r");
if (fp != nullptr)
{
char buff[100];
if (fgets(buff, sizeof(buff), fp) != nullptr)
{
String output(buff);
StringList components;
output.split(' ', components);
if (components.size() == 2)
console_width_ = components[1].toInt();
}
else
{
// TODO: throw ?
OPENMS_LOG_DEBUG << "Could not read 100 characters from file." << std::endl;
}
pclose(fp); //moved pclose outside of fgets condition - cant move it out of not nullpointer condition because pclose(null pointer is undefined behaviour)
}
else
{
OPENMS_LOG_DEBUG << "output shaping: stty size command failed." << std::endl;
}
#endif
}
--console_width_; // to add the \n at the end of each line without forcing another line break on windows
}
catch (...)
{
}
// if console_width_ is still -1 or too small, we do not use command line reshaping
if (console_width_ < 10)
{
OPENMS_LOG_DEBUG << "Console width could not be determined or is smaller than 10. Not using output shaping!" << std::endl;
console_width_ = std::numeric_limits<int>::max();
}
return console_width_;
}
String ConsoleUtils::breakString(const String& input, const Size indentation, const Size max_lines, const Size first_line_prefill)
{
return ListUtils::concatenate(getInstance().breakString_(input, indentation, max_lines, first_line_prefill), '\n');
}
StringList ConsoleUtils::breakStringList(const String& input, const Size indentation, const Size max_lines, const Size first_line_prefill)
{
return getInstance().breakString_(input, indentation, max_lines, first_line_prefill);
}
StringList ConsoleUtils::breakString_(const OpenMS::String& input, const Size indentation, const Size max_lines, Size first_line_prefill) const
{
StringList result;
if (input.empty())
{
return result;
}
Size short_line_len = console_width_ - indentation;
if (short_line_len < 1)
{
//std::cerr << "INTERNAL ERROR: cannot split lines into empty strings! see breakString_()";
result.push_back(input);
return result;
}
if ((int)first_line_prefill > console_width_)
{ // first line is already longer than console width. Assume console did an automatic linebreak.
first_line_prefill = first_line_prefill % console_width_;
}
String line; /// our current line as extracted from @p input
for (Size i = 0; i < input.size(); /* i+=? computed below */)
{
// first line has full length (minus the prefilled chars)
const Size remaining_line_chars = result.empty() ? console_width_ - first_line_prefill : short_line_len;
// the first line does not need indentation
const Size prefix_size_current_line = result.empty() ? 0 : indentation;
line = input.substr(i, remaining_line_chars);
// how many chars to advance in 'input'
Size advance_size = line.size();
// break by internal '\n' as well
if (auto pos = line.find('\n', 0); pos != String::npos)
{
line = line.substr(0, pos); // do NOT include the '\n' (it is implicitly represented by adding a new string to 'result')
advance_size = line.size() + 1; // skip the '\n' in the input
}
// check if we are using the full length and split a word at the same time
// --> cut a little earlier in that case for nicer looks
// e.g. "test this br" + '\n' + "eak" would become "test this " + '\n' + "break"
if (line.size() == remaining_line_chars && short_line_len > 8 && line.rfind(' ') != String::npos)
{
String last_word = line.suffix(' ');
if (last_word.length() < 4)
{ // shorten by last word (will move to the next line)
line = line.prefix(line.size() - last_word.length());
advance_size -= last_word.size(); // + actual chars
}
}
i += advance_size;
String s_intend = String(prefix_size_current_line, ' ');
String r = s_intend + line;
result.push_back(r); //(r.fillRight(' ', (UInt) line_len));
}
if (input.back() == '\n')
{ // last char input was a linebreak (which would put the cursor at column 0 in the next line)
// --> but we want indentation!
result.push_back(String(indentation, ' '));
}
if (result.size() > max_lines) // remove lines from end if we get too many (but leave the last one)...
{
String last = result.back();
result.erase(result.begin() + max_lines - 2, result.end());
result.push_back((String(indentation, ' ') + String("..."))); //.fillRight(' ',(UInt) line_len));
result.push_back(last);
}
// remove last " " from last line to prevent automatic line break
//if (result.size()>0 && result[result.size()-1].hasSuffix(" ")) result[result.size()-1] = result[result.size()-1].substr(0,result[result.size()-1].size()-1);
return result;
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/APPLICATIONS/SearchEngineBase.cpp | .cpp | 6,363 | 155 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/SearchEngineBase.h>
#include <OpenMS/ANALYSIS/ID/PeptideIndexing.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/FORMAT/MzMLFile.h>
#include <OpenMS/SYSTEM/File.h>
using namespace std;
namespace OpenMS
{
SearchEngineBase::SearchEngineBase(const String& tool_name, const String& tool_description, bool official, const std::vector<Citation>& citations, bool toolhandler_test) :
TOPPBase(tool_name, tool_description, official, citations, toolhandler_test)
{
}
SearchEngineBase::~SearchEngineBase() = default;
String SearchEngineBase::getRawfileName(int ms_level) const
{
String inputfile_name = getStringOption_("in");
FileHandler fh;
auto type = fh.getType(inputfile_name);
switch (type)
{
case FileTypes::MZML:
{
MzMLFile mzml;
mzml.getOptions().setMSLevels({ ms_level }); // only query MS2 (or whatever ms_level is)
const auto& centroid_info = mzml.getCentroidInfo(inputfile_name);
const auto& lvl_info = centroid_info.find(ms_level);
if (lvl_info == centroid_info.end())
{
throw Exception::FileEmpty(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Error: No MS" + String(ms_level) + " spectra in input file.");
}
if (lvl_info->second.count_profile > 0)
{
if (getFlag_("force"))
{
OPENMS_LOG_WARN << "Warning: Profile data found, but centroid MS spectra required. "
"Since '-force' flag is in effect, we will continue, but results are likely bogus." << std::endl;
}
else
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Error: Profile data provided but centroided MS" + String(ms_level) + " spectra required. To enforce processing (unwise!) of the data enable the -force flag (results will be bogus!).");
}
}
if (lvl_info->second.count_centroided == 0)
{
if (getFlag_("force"))
{
OPENMS_LOG_WARN << "Warning: No centroided MS" + String(ms_level) + " were found, but are required. "
"Since '-force' flag is in effect, we will continue, but results might be bogus." << std::endl;
}
else
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Error: No centroided MS" + String(ms_level) + " spectra were found, but are required. To enforce processing of the data enable the -force flag (results will likely be bogus!).");
}
}
// do no check for UNKNOWN, since it does not really tell much (UNKNOWN can only occur if meta data is missing and our peak type estimation fails (which only happens for (almost) empty spectra))
}
case FileTypes::MGF:
// no warning required. MGF files should be centroided by definition
break;
default:
OPENMS_LOG_WARN << "Warning: make sure that MS" << ms_level << " spectra in '" << inputfile_name << "' are centroided. Otherwise the results may be undefined!";
}
return inputfile_name;
}
String SearchEngineBase::getDBFilename(const String& db) const
{
String db_name(db.empty() ? getStringOption_("database") : db);
if (!File::readable(db_name))
{
db_name = File::findDatabase(db_name);
}
return db_name;
}
SearchEngineBase::ExitCodes SearchEngineBase::reindex_(std::vector<ProteinIdentification>& protein_identifications,
PeptideIdentificationList& peptide_identifications) const
{
if (getStringOption_("reindex") == "true")
{
PeptideIndexing indexer;
// extract parameter subtree
Param param = getParam_().copy("PeptideIndexing:", true);
Param param_pi = indexer.getParameters();
// copy search engine specific default parameter for peptide indexing into param_pi
param_pi.update(param, false, false, false, false, getGlobalLogDebug()); // suppress param. update message
indexer.setParameters(param_pi);
indexer.setLogType(this->log_type_);
FASTAContainer<TFI_File> proteins(getDBFilename());
PeptideIndexing::ExitCodes indexer_exit = indexer.run(proteins, protein_identifications, peptide_identifications);
if ((indexer_exit != PeptideIndexing::ExitCodes::EXECUTION_OK) &&
(indexer_exit != PeptideIndexing::ExitCodes::PEPTIDE_IDS_EMPTY))
{
if (indexer_exit == PeptideIndexing::ExitCodes::DATABASE_EMPTY)
{
return INPUT_FILE_EMPTY;
}
else if (indexer_exit == PeptideIndexing::ExitCodes::UNEXPECTED_RESULT)
{
return UNEXPECTED_RESULT;
}
else
{
return UNKNOWN_ERROR;
}
}
}
return EXECUTION_OK;
}
void SearchEngineBase::registerPeptideIndexingParameter_(Param peptide_indexing_parameter)
{
registerStringOption_("reindex", "<choice>", "true", "Recalculate peptide to protein association using OpenMS. Annotates target-decoy information.", false);
setValidStrings_("reindex", { "true", "false" });
peptide_indexing_parameter.setValue("missing_decoy_action", "warn");
// hide entries
for (const auto& s : {"decoy_string", "decoy_string_position", "missing_decoy_action", "enzyme:name", "enzyme:specificity",
"write_protein_sequence", "write_protein_description", "keep_unreferenced_proteins", "unmatched_action",
"aaa_max","mismatches_max", "IL_equivalent"})
{
peptide_indexing_parameter.addTag(s, "advanced");
}
// move parameter to PeptideIndexing subtree so we don't accidently overwrite duplicate keys in the tool and indexer
Param combined;
combined.insert("PeptideIndexing:", peptide_indexing_parameter);
registerFullParam_(combined);
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/APPLICATIONS/ToolHandler.cpp | .cpp | 22,642 | 412 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/ToolHandler.h>
#include <OpenMS/FORMAT/ToolDescriptionFile.h>
#include <OpenMS/SYSTEM/File.h>
#include <QStringList>
#include <QtCore/QDir>
namespace OpenMS
{
ToolListType ToolHandler::getTOPPToolList(const bool includeGenericWrapper)
{
ToolListType tools_map;
// Note: don't use special characters like slashes in category names (leads to subcategories in KNIME)
const auto cat_calibration = "Mass Correction and Calibration";
const auto cat_centroiding = "Spectrum processing: Centroiding";
const auto cat_crosslinking = "Cross-Linking";
const auto cat_dev = "[for Developers]";
const auto cat_file_converter = "File Converter";
const auto cat_file_filter_extract_merge = "File Filtering, Extraction and Merging";
const auto cat_ID_MTX = "Metabolite Identification";
const auto cat_ID_proc = "Identification Processing";
const auto cat_ID_search = "Identification of Proteins and Peptides (SearchEngines)";
const auto cat_linking = "Feature Linking";
const auto cat_map_align = "Map Alignment";
const auto cat_misc = "Misc";
const auto cat_QC = "Quality Control";
const auto cat_quant = "Quantitation";
const auto cat_rna = "RNA";
const auto cat_signal_proc_misc = "Spectrum processing: Misc";
const auto cat_signal_proc_smooth_normalize = "Spectrum Processing: Peak Smoothing and Normalization";
const auto cat_targeted = "Targeted Experiments and OpenSWATH";
const auto cat_topdown = "Top-Down";
// STOP and read!
// 1) add your tool in alphabetical order!
// 2) if you add/change categories, also mirror your changes in doc/doxygen/public/TOPP.doxygen
tools_map["AccurateMassSearch"] = Internal::ToolDescription("AccurateMassSearch", cat_ID_MTX);
tools_map["AssayGeneratorMetabo"] = Internal::ToolDescription("AssayGeneratorMetabo", cat_targeted);
tools_map["AssayGeneratorMetaboSirius"] = Internal::ToolDescription("AssayGeneratorMetaboSirius", cat_targeted);
tools_map["BaselineFilter"] = Internal::ToolDescription("BaselineFilter", cat_signal_proc_smooth_normalize);
tools_map["FeatureFinderLFQ"] = Internal::ToolDescription("FeatureFinderLFQ", cat_quant);
tools_map["ClusterMassTraces"] = Internal::ToolDescription("ClusterMassTraces", cat_misc);
tools_map["ClusterMassTracesByPrecursor"] = Internal::ToolDescription("ClusterMassTracesByPrecursor", cat_targeted);
tools_map["CometAdapter"] = Internal::ToolDescription("CometAdapter", cat_ID_search);
tools_map["ConsensusID"] = Internal::ToolDescription("ConsensusID", cat_ID_proc);
tools_map["ConsensusMapNormalizer"] = Internal::ToolDescription("ConsensusMapNormalizer", cat_quant);
tools_map["CVInspector"] = Internal::ToolDescription("CVInspector", cat_dev);
tools_map["DatabaseFilter"] = Internal::ToolDescription("DatabaseFilter", cat_file_filter_extract_merge);
tools_map["DatabaseSuitability"] = Internal::ToolDescription("DatabaseSuitability", cat_QC);
tools_map["Decharger"] = Internal::ToolDescription("Decharger", cat_quant);
tools_map["DecoyDatabase"] = Internal::ToolDescription("DecoyDatabase", cat_file_filter_extract_merge);
tools_map["DeMeanderize"] = Internal::ToolDescription("DeMeanderize", cat_misc);
tools_map["Digestor"] = Internal::ToolDescription("Digestor", cat_ID_proc);
tools_map["DigestorMotif"] = Internal::ToolDescription("DigestorMotif", cat_ID_proc);
tools_map["DTAExtractor"] = Internal::ToolDescription("DTAExtractor", cat_file_filter_extract_merge);
tools_map["EICExtractor"] = Internal::ToolDescription("EICExtractor", cat_quant);
tools_map["Epifany"] = Internal::ToolDescription("Epifany", cat_ID_proc);
tools_map["ExecutePipeline"] = Internal::ToolDescription("ExecutePipeline", cat_misc);
tools_map["ExternalCalibration"] = Internal::ToolDescription("ExternalCalibration", cat_calibration);
tools_map["FalseDiscoveryRate"] = Internal::ToolDescription("FalseDiscoveryRate", cat_ID_proc);
tools_map["FeatureFinderCentroided"] = Internal::ToolDescription("FeatureFinderCentroided", cat_quant);
tools_map["FeatureFinderIdentification"] = Internal::ToolDescription("FeatureFinderIdentification", cat_quant);
tools_map["FeatureFinderMetabo"] = Internal::ToolDescription("FeatureFinderMetabo", cat_quant);
tools_map["FeatureFinderMetaboIdent"] = Internal::ToolDescription("FeatureFinderMetaboIdent", cat_quant);
tools_map["FeatureFinderMultiplex"] = Internal::ToolDescription("FeatureFinderMultiplex", cat_quant);
tools_map["FeatureLinkerLabeled"] = Internal::ToolDescription("FeatureLinkerLabeled", cat_linking);
tools_map["FeatureLinkerUnlabeled"] = Internal::ToolDescription("FeatureLinkerUnlabeled", cat_linking);
tools_map["FeatureLinkerUnlabeledKD"] = Internal::ToolDescription("FeatureLinkerUnlabeledKD", cat_linking);
tools_map["FeatureLinkerUnlabeledQT"] = Internal::ToolDescription("FeatureLinkerUnlabeledQT", cat_linking);
tools_map["FileConverter"] = Internal::ToolDescription("FileConverter", cat_file_converter);
tools_map["FileFilter"] = Internal::ToolDescription("FileFilter", cat_file_filter_extract_merge);
tools_map["FileInfo"] = Internal::ToolDescription("FileInfo", cat_file_filter_extract_merge);
tools_map["FileMerger"] = Internal::ToolDescription("FileMerger", cat_file_filter_extract_merge);
tools_map["FLASHDeconv"] = Internal::ToolDescription("FLASHDeconv", cat_topdown);
tools_map["FuzzyDiff"] = Internal::ToolDescription("FuzzyDiff", cat_dev);
// tools_map["GenericWrapper"] = ... (place any extra handling here)
tools_map["GNPSExport"] = Internal::ToolDescription("GNPSExport", cat_file_converter);
tools_map["HighResPrecursorMassCorrector"] = Internal::ToolDescription("HighResPrecursorMassCorrector", cat_calibration);
tools_map["IDConflictResolver"] = Internal::ToolDescription("IDConflictResolver", cat_ID_proc);
tools_map["IDDecoyProbability"] = Internal::ToolDescription("IDDecoyProbability", cat_ID_proc);
tools_map["IDExtractor"] = Internal::ToolDescription("IDExtractor", cat_ID_proc);
tools_map["IDFileConverter"] = Internal::ToolDescription("IDFileConverter", cat_file_converter);
tools_map["IDFilter"] = Internal::ToolDescription("IDFilter", cat_file_filter_extract_merge);
tools_map["IDMapper"] = Internal::ToolDescription("IDMapper", cat_ID_proc);
tools_map["IDMerger"] = Internal::ToolDescription("IDMerger", cat_file_filter_extract_merge);
tools_map["IDPosteriorErrorProbability"] = Internal::ToolDescription("IDPosteriorErrorProbability", cat_ID_proc);
tools_map["IDRipper"] = Internal::ToolDescription("IDRipper", cat_file_filter_extract_merge);
tools_map["IDRTCalibration"] = Internal::ToolDescription("IDRTCalibration", cat_calibration);
tools_map["IDScoreSwitcher"] = Internal::ToolDescription("IDScoreSwitcher", cat_ID_proc);
tools_map["IDSplitter"] = Internal::ToolDescription("IDSplitter", cat_file_filter_extract_merge);
tools_map["ImageCreator"] = Internal::ToolDescription("ImageCreator", cat_misc);
tools_map["INIUpdater"] = Internal::ToolDescription("INIUpdater", cat_misc);
tools_map["InternalCalibration"] = Internal::ToolDescription("InternalCalibration", cat_calibration);
tools_map["IonMobilityBinning"] = Internal::ToolDescription("IonMobilityBinning", cat_file_filter_extract_merge);
tools_map["IsobaricAnalyzer"] = Internal::ToolDescription("IsobaricAnalyzer", cat_quant);
tools_map["IsobaricWorkflow"] = Internal::ToolDescription("IsobaricWorkflow", cat_quant);
tools_map["JSONExporter"] = Internal::ToolDescription("JSONExporter", cat_dev);
tools_map["LuciphorAdapter"] = Internal::ToolDescription("LuciphorAdapter", cat_ID_search);
tools_map["MapAlignerIdentification"] = Internal::ToolDescription("MapAlignerIdentification", cat_map_align);
tools_map["MapAlignerPoseClustering"] = Internal::ToolDescription("MapAlignerPoseClustering", cat_map_align);
tools_map["MapAlignerTreeGuided"] = Internal::ToolDescription("MapAlignerTreeGuided", cat_map_align);
tools_map["MapNormalizer"] = Internal::ToolDescription("MapNormalizer", cat_signal_proc_smooth_normalize);
tools_map["MapRTTransformer"] = Internal::ToolDescription("MapRTTransformer", cat_map_align);
tools_map["MapStatistics"] = Internal::ToolDescription("MapStatistics", cat_file_filter_extract_merge);
tools_map["MaRaClusterAdapter"] = Internal::ToolDescription("MaRaClusterAdapter", cat_signal_proc_misc);
tools_map["MascotAdapterOnline"] = Internal::ToolDescription("MascotAdapterOnline", cat_ID_search);
tools_map["MassCalculator"] = Internal::ToolDescription("MassCalculator", cat_misc);
tools_map["MassTraceExtractor"] = Internal::ToolDescription("MassTraceExtractor", cat_quant);
tools_map["MetaboliteAdductDecharger"] = Internal::ToolDescription("MetaboliteAdductDecharger", cat_quant);
tools_map["MetaboliteSpectralMatcher"] = Internal::ToolDescription("MetaboliteSpectralMatcher", cat_ID_MTX);
tools_map["MetaProSIP"] = Internal::ToolDescription("MetaProSIP", cat_quant);
tools_map["MRMMapper"] = Internal::ToolDescription("MRMMapper", cat_targeted);
tools_map["MRMPairFinder"] = Internal::ToolDescription("MRMPairFinder", cat_targeted);
tools_map["MRMTransitionGroupPicker"] = Internal::ToolDescription("MRMTransitionGroupPicker", cat_targeted);
tools_map["MSFraggerAdapter"] = Internal::ToolDescription("MSFraggerAdapter", cat_ID_search);
tools_map["MSGFPlusAdapter"] = Internal::ToolDescription("MSGFPlusAdapter", cat_ID_search);
tools_map["MSstatsConverter"] = Internal::ToolDescription("MSstatsConverter", cat_file_converter);
tools_map["MultiplexResolver"] = Internal::ToolDescription("MultiplexResolver", cat_quant);
tools_map["MzMLSplitter"] = Internal::ToolDescription("MzMLSplitter", cat_file_filter_extract_merge);
tools_map["MzTabExporter"] = Internal::ToolDescription("MzTabExporter", cat_file_converter);
tools_map["NoiseFilterGaussian"] = Internal::ToolDescription("NoiseFilterGaussian", cat_signal_proc_smooth_normalize);
tools_map["NoiseFilterSGolay"] = Internal::ToolDescription("NoiseFilterSGolay", cat_signal_proc_smooth_normalize);
tools_map["NovorAdapter"] = Internal::ToolDescription("NovorAdapter", cat_ID_search);
tools_map["NucleicAcidSearchEngine"] = Internal::ToolDescription("NucleicAcidSearchEngine", cat_rna);
tools_map["OpenMSDatabasesInfo"] = Internal::ToolDescription("OpenMSDatabasesInfo", cat_dev);
tools_map["OpenMSInfo"] = Internal::ToolDescription("OpenMSInfo", cat_misc);
tools_map["OpenNuXL"] = Internal::ToolDescription("OpenNuXL", cat_crosslinking);
tools_map["OpenPepXL"] = Internal::ToolDescription("OpenPepXL", cat_crosslinking);
tools_map["OpenSwathAnalyzer"] = Internal::ToolDescription("OpenSwathAnalyzer", cat_targeted);
tools_map["OpenSwathAssayGenerator"] = Internal::ToolDescription("OpenSwathAssayGenerator", cat_targeted);
tools_map["OpenSwathChromatogramExtractor"] = Internal::ToolDescription("OpenSwathChromatogramExtractor", cat_targeted);
tools_map["OpenSwathConfidenceScoring"] = Internal::ToolDescription("OpenSwathConfidenceScoring", cat_targeted);
tools_map["OpenSwathDecoyGenerator"] = Internal::ToolDescription("OpenSwathDecoyGenerator", cat_targeted);
tools_map["OpenSwathDIAPreScoring"] = Internal::ToolDescription("OpenSwathDIAPreScoring", cat_targeted);
tools_map["OpenSwathFeatureXMLToTSV"] = Internal::ToolDescription("OpenSwathFeatureXMLToTSV", cat_targeted);
tools_map["OpenSwathFileSplitter"] = Internal::ToolDescription("OpenSwathFileSplitter", cat_targeted);
tools_map["OpenSwathMzMLFileCacher"] = Internal::ToolDescription("OpenSwathMzMLFileCacher", cat_targeted);
tools_map["OpenSwathRewriteToFeatureXML"] = Internal::ToolDescription("OpenSwathRewriteToFeatureXML", cat_targeted);
tools_map["OpenSwathRTNormalizer"] = Internal::ToolDescription("OpenSwathRTNormalizer", cat_targeted);
tools_map["OpenSwathWorkflow"] = Internal::ToolDescription("OpenSwathWorkflow", cat_targeted);
tools_map["PeakPickerHiRes"] = Internal::ToolDescription("PeakPickerHiRes", cat_centroiding);
tools_map["PeakPickerIM"] = Internal::ToolDescription("PeakPickerIM", cat_centroiding);
tools_map["PeakPickerIterative"] = Internal::ToolDescription("PeakPickerIterative", cat_centroiding);
tools_map["PeptideIndexer"] = Internal::ToolDescription("PeptideIndexer", cat_ID_proc);
tools_map["PeptideDataBaseSearchFI"] = Internal::ToolDescription("PeptideDataBaseSearchFI", cat_ID_search);
tools_map["PercolatorAdapter"] = Internal::ToolDescription("PercolatorAdapter", cat_ID_proc);
tools_map["PhosphoScoring"] = Internal::ToolDescription("PhosphoScoring", cat_ID_proc);
tools_map["ProteinInference"] = Internal::ToolDescription("ProteinInference", cat_ID_proc);
tools_map["ProteinQuantifier"] = Internal::ToolDescription("ProteinQuantifier", cat_quant);
tools_map["ProteomicsLFQ"] = Internal::ToolDescription("ProteomicsLFQ", cat_quant);
tools_map["PSMFeatureExtractor"] = Internal::ToolDescription("PSMFeatureExtractor", cat_ID_proc);
#ifdef WITH_PARQUET
tools_map["QuantmsIOConverter"] = Internal::ToolDescription("QuantmsIOConverter", cat_file_converter);
#endif
tools_map["QCCalculator"] = Internal::ToolDescription("QCCalculator", cat_QC);
tools_map["QCEmbedder"] = Internal::ToolDescription("QCEmbedder", cat_QC);
tools_map["QCExporter"] = Internal::ToolDescription("QCExporter", cat_QC);
tools_map["QCExtractor"] = Internal::ToolDescription("QCExtractor", cat_QC);
tools_map["QCImporter"] = Internal::ToolDescription("QCImporter", cat_QC);
tools_map["QCMerger"] = Internal::ToolDescription("QCMerger", cat_QC);
tools_map["QCShrinker"] = Internal::ToolDescription("QCExporter", cat_QC);
tools_map["QualityControl"] = Internal::ToolDescription("QualityControl", cat_QC);
tools_map["Resampler"] = Internal::ToolDescription("Resampler", cat_signal_proc_misc);
tools_map["RNADigestor"] = Internal::ToolDescription("RNADigestor", cat_rna);
tools_map["RNAMassCalculator"] = Internal::ToolDescription("RNAMassCalculator", cat_rna);
tools_map["RNPxlXICFilter"] = Internal::ToolDescription("RNPxlXICFilter", cat_crosslinking);
tools_map["SageAdapter"] = Internal::ToolDescription("SageAdapter", cat_ID_search);
tools_map["SeedListGenerator"] = Internal::ToolDescription("SeedListGenerator", cat_quant);
tools_map["SemanticValidator"] = Internal::ToolDescription("SemanticValidator", cat_dev);
tools_map["SequenceCoverageCalculator"] = Internal::ToolDescription("SequenceCoverageCalculator", cat_ID_proc);
tools_map["SimpleSearchEngine"] = Internal::ToolDescription("SimpleSearchEngine", cat_ID_search);
tools_map["SiriusExport"] = Internal::ToolDescription("SiriusExport", cat_ID_MTX);
tools_map["SpectraFilterNLargest"] = Internal::ToolDescription("SpectraFilterNLargest", cat_signal_proc_smooth_normalize);
tools_map["SpectraFilterNormalizer"] = Internal::ToolDescription("SpectraFilterNormalizer", cat_signal_proc_smooth_normalize);
tools_map["SpectraFilterThresholdMower"] = Internal::ToolDescription("SpectraFilterThresholdMower", cat_signal_proc_smooth_normalize);
tools_map["SpectraFilterWindowMower"] = Internal::ToolDescription("SpectraFilterWindowMower", cat_signal_proc_smooth_normalize);
tools_map["SpectraMerger"] = Internal::ToolDescription("SpectraMerger", cat_signal_proc_misc);
tools_map["SpectraSTSearchAdapter"] = Internal::ToolDescription("SpectraSTSearchAdapter", cat_ID_search);
tools_map["StaticModification"] = Internal::ToolDescription("StaticModification", cat_ID_proc);
tools_map["TargetedFileConverter"] = Internal::ToolDescription("TargetedFileConverter", cat_file_converter);
tools_map["TextExporter"] = Internal::ToolDescription("TextExporter", cat_file_converter);
tools_map["TICCalculator"] = Internal::ToolDescription("TICCalculator", cat_misc);
tools_map["TriqlerConverter"] = Internal::ToolDescription("TriqlerConverter", cat_file_converter);
tools_map["XFDR"] = Internal::ToolDescription("XFDR", cat_crosslinking);
tools_map["XMLValidator"] = Internal::ToolDescription("XMLValidator", cat_dev);
// STOP! insert your tool in alphabetical order for easier maintenance (tools requiring the GUI lib should be added below **in addition**)
// ATTENTION: tools requiring the GUI lib
#ifndef WITH_GUI
StringList GUI_tools = {
"ExecutePipeline",
"ImageCreator",
"INIUpdater",
"Resampler",
};
for (const auto& tool : GUI_tools) {
tools_map.erase(tool);
}
#endif
// INTERNAL tools
// this operation is expensive, as we need to parse configuration files (*.ttd)
std::vector<Internal::ToolDescription> internal_tools = getInternalTools_();
for (std::vector<Internal::ToolDescription>::const_iterator it = internal_tools.begin(); it != internal_tools.end(); ++it)
{
if (tools_map.find(it->name) == tools_map.end())
{
tools_map[it->name] = *it;
}
else
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Duplicate tool name error: Trying to add internal tool '" + it->name, it->name);
}
}
// EXTERNAL tools
// this operation is expensive, as we need to parse configuration files (*.ttd)
if (includeGenericWrapper)
{
tools_map["GenericWrapper"] = getExternalTools_();
}
return tools_map;
}
StringList ToolHandler::getTypes(const String& toolname)
{
Internal::ToolDescription ret;
ToolListType tools;
if (toolname == "GenericWrapper")
{
tools = getTOPPToolList(true);
}
else
{
tools = getTOPPToolList();
}
if (tools.find(toolname) != tools.end())
{
return tools[toolname].types;
}
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Requested tool '" + toolname + "' does not exist!", toolname);
}
std::vector<Internal::ToolDescription> ToolHandler::getInternalTools_()
{
if (!tools_internal_loaded_)
{
loadInternalToolConfig_();
tools_internal_loaded_ = true;
}
return tools_internal_;
}
String ToolHandler::getExternalToolsPath()
{
return File::getOpenMSDataPath() + "/TOOLS/EXTERNAL";
}
String ToolHandler::getInternalToolsPath()
{
return File::getOpenMSDataPath() + "/TOOLS/INTERNAL";
}
Internal::ToolDescription ToolHandler::getExternalTools_()
{
if (!tools_external_loaded_)
{
loadExternalToolConfig_();
tools_external_loaded_ = true;
}
return tools_external_;
}
void ToolHandler::loadExternalToolConfig_()
{
QStringList files = getExternalToolConfigFiles_();
for (int i = 0; i < files.size(); ++i)
{
ToolDescriptionFile tdf;
std::vector<Internal::ToolDescription> tools;
tdf.load(String(files[i]), tools);
// add every tool from file to list
for (Size i_t = 0; i_t < tools.size(); ++i_t)
{
if (i == 0 && i_t == 0)
{
tools_external_ = tools[i_t]; // init
}
else
{
tools_external_.append(tools[i_t]); // append
}
}
}
tools_external_.name = "GenericWrapper";
tools_external_.category = "EXTERNAL";
}
void ToolHandler::loadInternalToolConfig_()
{
QStringList files = getInternalToolConfigFiles_();
for (int i = 0; i < files.size(); ++i)
{
ToolDescriptionFile tdf;
std::vector<Internal::ToolDescription> tools;
tdf.load(String(files[i]), tools);
// add every tool from file to list
for (Size i_t = 0; i_t < tools.size(); ++i_t)
{
tools_internal_.push_back(tools[i_t]);
tools_external_.category = "INTERNAL";
}
}
}
QStringList ToolHandler::getExternalToolConfigFiles_()
{
QStringList paths;
// *.ttd default path
paths << getExternalToolsPath().toQString();
// OS-specific path
#ifdef OPENMS_WINDOWSPLATFORM
paths << (getExternalToolsPath() + "/WINDOWS").toQString();
#else
paths << (getExternalToolsPath() + "/LINUX").toQString();
#endif
// additional environment
if (getenv("OPENMS_TTD_PATH") != nullptr)
{
paths << String(getenv("OPENMS_TTD_PATH")).toQString();
}
QStringList all_files;
for (int p = 0; p < paths.size(); ++p)
{
QDir dir(paths[p], "*.ttd");
QStringList files = dir.entryList();
for (int i = 0; i < files.size(); ++i)
{
files[i] = dir.absolutePath() + QDir::separator() + files[i];
}
all_files << files;
}
//StringList list = ListUtils::create<String>(getExternalToolsPath() + "/" + "msconvert.ttd");
return all_files;
}
QStringList ToolHandler::getInternalToolConfigFiles_()
{
QStringList paths;
// *.ttd default path
paths << getInternalToolsPath().toQString();
// OS-specific path
#ifdef OPENMS_WINDOWSPLATFORM
paths << (getInternalToolsPath() + "/WINDOWS").toQString();
#else
paths << (getInternalToolsPath() + "/LINUX").toQString();
#endif
// additional environment
if (getenv("OPENMS_TTD_INTERNAL_PATH") != nullptr)
{
paths << String(getenv("OPENMS_TTD_INTERNAL_PATH")).toQString();
}
QStringList all_files;
for (int p = 0; p < paths.size(); ++p)
{
QDir dir(paths[p], "*.ttd");
QStringList files = dir.entryList();
for (int i = 0; i < files.size(); ++i)
{
files[i] = dir.absolutePath() + QDir::separator() + files[i];
}
all_files << files;
}
return all_files;
}
String ToolHandler::getCategory(const String& toolname)
{
ToolListType tools = getTOPPToolList(true);
String s;
if (tools.find(toolname) != tools.end())
{
s = tools[toolname].category;
}
return s;
}
// static
Internal::ToolDescription ToolHandler::tools_external_ = Internal::ToolDescription();
std::vector<Internal::ToolDescription> ToolHandler::tools_internal_;
bool ToolHandler::tools_external_loaded_ = false;
bool ToolHandler::tools_internal_loaded_ = false;
} // namespace
| C++ |
3D | OpenMS/OpenMS | src/openms/source/APPLICATIONS/OpenSwathBase.cpp | .cpp | 15,108 | 336 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest, Justin Sing$
// $Authors: Hannes Roest, Justin Sing$
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/OpenSwathBase.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/FORMAT/SwathFile.h>
#include <OpenMS/ANALYSIS/OPENSWATH/SwathWindowLoader.h>
#include <OpenMS/ANALYSIS/OPENSWATH/TransitionTSVFile.h>
#include <OpenMS/ANALYSIS/OPENSWATH/TransitionPQPFile.h>
#include <OpenMS/ANALYSIS/OPENSWATH/OpenSwathWorkflow.h>
#include <OpenMS/FORMAT/DATAACCESS/MSDataWritingConsumer.h>
#include <OpenMS/FORMAT/DATAACCESS/MSDataSqlConsumer.h>
#include <algorithm>
#include <iostream>
#include <utility>
using namespace std;
namespace OpenMS
{
TOPPOpenSwathBase::TOPPOpenSwathBase(String name, String description, bool official, const std::vector<Citation>& citations) :
TOPPBase(name, description, official, citations)
{
}
TOPPOpenSwathBase::~TOPPOpenSwathBase() = default;
// Private
void TOPPOpenSwathBase::loadSwathFiles_(const StringList& file_list,
const bool split_file,
const String& tmp,
const String& readoptions,
std::shared_ptr<ExperimentalSettings > & exp_meta,
std::vector< OpenSwath::SwathMap > & swath_maps,
Interfaces::IMSDataConsumer* plugin_consumer)
{
SwathFile swath_file;
swath_file.setLogType(log_type_);
if (split_file || file_list.size() > 1)
{
// TODO cannot use data reduction here any more ...
swath_maps = swath_file.loadSplit(file_list, tmp, exp_meta, readoptions);
}
else
{
FileTypes::Type in_file_type = FileHandler::getTypeByFileName(file_list[0]);
if (in_file_type == FileTypes::MZML)
{
swath_maps = swath_file.loadMzML(file_list[0], tmp, exp_meta, readoptions, plugin_consumer);
}
else if (in_file_type == FileTypes::MZXML)
{
swath_maps = swath_file.loadMzXML(file_list[0], tmp, exp_meta, readoptions);
}
else if (in_file_type == FileTypes::SQMASS)
{
swath_maps = swath_file.loadSqMass(file_list[0], exp_meta);
}
else
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Input file needs to have ending mzML or mzXML");
}
}
}
// Protected
bool TOPPOpenSwathBase::loadSwathFiles(const StringList& file_list,
std::shared_ptr<ExperimentalSettings >& exp_meta,
std::vector< OpenSwath::SwathMap >& swath_maps,
const bool split_file,
const String& tmp,
const String& readoptions,
const String& swath_windows_file,
const double min_upper_edge_dist,
const bool force,
const bool sort_swath_maps,
const bool prm,
const bool pasef,
Interfaces::IMSDataConsumer* plugin_consumer)
{
// (i) Load files
loadSwathFiles_(file_list, split_file, tmp, readoptions, exp_meta, swath_maps, plugin_consumer);
// (ii) Allow the user to specify the SWATH windows
if (!swath_windows_file.empty())
{
SwathWindowLoader::annotateSwathMapsFromFile(swath_windows_file, swath_maps, sort_swath_maps, force);
}
for (Size i = 0; i < swath_maps.size(); i++)
{
OPENMS_LOG_DEBUG << "Found swath map " << i
<< " with lower " << swath_maps[i].lower
<< " and upper " << swath_maps[i].upper
<< " and im Lower bounds of " << swath_maps[i].imLower
<< " and im Upper bounds of " << swath_maps[i].imUpper
<< " and " << swath_maps[i].sptr->getNrSpectra()
<< " spectra." << std::endl;
}
// (iii) Sanity check: there should be no overlap between the windows:
std::vector<std::pair<double, double>> sw_windows;
for (Size i = 0; i < swath_maps.size(); i++)
{
if (!swath_maps[i].ms1)
{
sw_windows.push_back(std::make_pair(swath_maps[i].lower, swath_maps[i].upper));
}
}
// sort by lower bound (first entry in pair)
std::sort(sw_windows.begin(), sw_windows.end());
for (Size i = 1; i < sw_windows.size(); i++)
{
double lower_map_end = sw_windows[i-1].second - min_upper_edge_dist;
double upper_map_start = sw_windows[i].first;
OPENMS_LOG_DEBUG << "Extraction will go up to " << lower_map_end << " and continue at " << upper_map_start << std::endl;
if (prm) {continue;} // skip next step as expect them to overlap and have gaps...
if (upper_map_start - lower_map_end > 0.01)
{
OPENMS_LOG_WARN << "Extraction will have a gap between " << lower_map_end << " and " << upper_map_start << std::endl;
if (!force)
{
OPENMS_LOG_ERROR << "Extraction windows have a gap. Will abort (override with -force)" << std::endl;
return false;
}
}
if (pasef) {continue;} // skip this step, expect there to be overlap ...
if (lower_map_end - upper_map_start > 0.01)
{
OPENMS_LOG_WARN << "Extraction will overlap between " << lower_map_end << " and " << upper_map_start << "!\n"
<< "This will lead to multiple extraction of the transitions in the overlapping region "
<< "which will lead to duplicated output. It is very unlikely that you want this." << "\n"
<< "Please fix this by providing an appropriate extraction file with -swath_windows_file" << "\n"
<< "Did you mean to set the -pasef Flag?" << std::endl;
if (!force)
{
OPENMS_LOG_ERROR << "Extraction windows overlap. Will abort (override with -force)" << std::endl;
return false;
}
}
}
return true;
}
void TOPPOpenSwathBase::prepareChromOutput(Interfaces::IMSDataConsumer ** chromatogramConsumer,
const std::shared_ptr<ExperimentalSettings>& exp_meta,
const OpenSwath::LightTargetedExperiment& transition_exp,
const String& out_chrom,
const UInt64 run_id)
{
if (!out_chrom.empty())
{
String tmp = out_chrom;
if (tmp.toLower().hasSuffix(".sqmass"))
{
bool full_meta = false; // can lead to very large files in memory
bool lossy_compression = true;
*chromatogramConsumer = new MSDataSqlConsumer(out_chrom, run_id, 500, full_meta, lossy_compression);
}
else
{
PlainMSDataWritingConsumer * chromConsumer = new PlainMSDataWritingConsumer(out_chrom);
int expected_chromatograms = transition_exp.transitions.size();
chromConsumer->setExpectedSize(0, expected_chromatograms);
chromConsumer->setExperimentalSettings(*exp_meta);
chromConsumer->getOptions().setWriteIndex(true); // ensure that we write the index
chromConsumer->addDataProcessing(getProcessingInfo_(DataProcessing::SMOOTHING));
// prepare data structures for lossy compression
MSNumpressCoder::NumpressConfig npconfig_mz;
MSNumpressCoder::NumpressConfig npconfig_int;
npconfig_mz.estimate_fixed_point = true; // critical
npconfig_int.estimate_fixed_point = true; // critical
npconfig_mz.numpressErrorTolerance = -1.0; // skip check, faster
npconfig_int.numpressErrorTolerance = -1.0; // skip check, faster
npconfig_mz.setCompression("linear");
npconfig_int.setCompression("slof");
npconfig_mz.linear_fp_mass_acc = 0.05; // set the desired RT accuracy in seconds
chromConsumer->getOptions().setNumpressConfigurationMassTime(npconfig_mz);
chromConsumer->getOptions().setNumpressConfigurationIntensity(npconfig_int);
chromConsumer->getOptions().setCompression(true);
*chromatogramConsumer = chromConsumer;
}
}
else
{
*chromatogramConsumer = new NoopMSDataWritingConsumer("");
}
}
OpenSwath::LightTargetedExperiment TOPPOpenSwathBase::loadTransitionList(const FileTypes::Type& tr_type,
const String& tr_file,
const Param& tsv_reader_param)
{
OpenSwath::LightTargetedExperiment transition_exp;
ProgressLogger progresslogger;
progresslogger.setLogType(log_type_);
if (tr_type == FileTypes::TRAML)
{
progresslogger.startProgress(0, 1, "Load TraML file");
TargetedExperiment targeted_exp;
FileHandler().loadTransitions(tr_file, targeted_exp, {FileTypes::TRAML});
OpenSwathDataAccessHelper::convertTargetedExp(targeted_exp, transition_exp);
progresslogger.endProgress();
}
else if (tr_type == FileTypes::PQP)
{
progresslogger.startProgress(0, 1, "Load PQP file");
TransitionPQPFile().convertPQPToTargetedExperiment(tr_file.c_str(), transition_exp);
progresslogger.endProgress();
}
else if (tr_type == FileTypes::TSV)
{
progresslogger.startProgress(0, 1, "Load TSV file");
TransitionTSVFile tsv_reader;
tsv_reader.setParameters(tsv_reader_param);
tsv_reader.convertTSVToTargetedExperiment(tr_file.c_str(), tr_type, transition_exp);
progresslogger.endProgress();
}
else
{
OPENMS_LOG_ERROR << "Provide valid TraML, TSV or PQP transition file." << std::endl;
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Need to provide valid input file.");
}
return transition_exp;
}
TOPPOpenSwathBase::CalibrationResult TOPPOpenSwathBase::performCalibration(String trafo_in,
const OpenSwath::LightTargetedExperiment& irt_transitions,
std::vector<OpenSwath::SwathMap>& swath_maps,
double min_rsq,
double min_coverage,
const Param& feature_finder_param,
const ChromExtractParams& cp_irt,
const Param& irt_detection_param,
const Param& calibration_param,
Size debug_level,
bool pasef,
bool load_into_memory,
const String& irt_trafo_out,
const String& irt_mzml_out)
{
TransformationDescription trafo_rtnorm;
double auto_mz_w = 0;
double auto_im_w = -1;
double auto_ms1_mz_w = 0;
double auto_ms1_im_w = -1;
if (! trafo_in.empty())
{
// get read RT normalization file
FileHandler().loadTransformations(trafo_in, trafo_rtnorm, false, {FileTypes::TRANSFORMATIONXML});
Param model_params = getParam_().copy("model:", true);
model_params.setValue("symmetric_regression", "false");
model_params.setValue("span", irt_detection_param.getValue("lowess:span"));
model_params.setValue("auto_span", irt_detection_param.getValue("lowess:auto_span"));
model_params.setValue("auto_span_min", irt_detection_param.getValue("lowess:auto_span_min"));
model_params.setValue("auto_span_max", irt_detection_param.getValue("lowess:auto_span_max"));
model_params.setValue("auto_span_grid", irt_detection_param.getValue("lowess:auto_span_grid"));
model_params.setValue("num_nodes", irt_detection_param.getValue("b_spline:num_nodes"));
String model_type = irt_detection_param.getValue("alignmentMethod").toString();
trafo_rtnorm.fitModel(model_type, model_params);
// We don't calibrate for mz and IM if a user supplies a transformation function
// TODO: Should we deprecate and remove the option of providing a transformation function?
// Not sure how often this is used in practice. @singjc, 2025-08-01
auto_mz_w = -1.0;
auto_im_w = -1.0;
auto_ms1_mz_w = -1.0;
auto_ms1_im_w = -1.0;
}
else if (! irt_transitions.getTransitions().empty())
{
// Loading iRT file
std::cout << "Will load iRT transitions and try to find iRT peptides" << std::endl;
// If pasef flag is set, validate that IM is present
if (pasef)
{
const auto& transitions = irt_transitions.getTransitions();
for (Size k = 0; k < (Size)transitions.size(); k++)
{
if (transitions[k].precursor_im == -1)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Error: iRT Transition " + transitions[k].getNativeID()
+ " does not have a valid IM value, this must be set to use the -pasef flag");
}
}
}
// perform extraction
OpenSwathCalibrationWorkflow wf;
wf.setLogType(log_type_);
TransformationDescription im_trafo;
trafo_rtnorm = wf.performRTNormalization(irt_transitions, swath_maps, im_trafo, min_rsq, min_coverage, feature_finder_param, cp_irt,
irt_detection_param, calibration_param, irt_mzml_out, debug_level, pasef, load_into_memory);
// Retrieve estimated mz and IM extraction windows
auto_mz_w = wf.getEstimatedMzWindow();
auto_im_w = wf.getEstimatedImWindow();
// Retrieve estimate MS1 mz and IM extraction windows
auto_ms1_mz_w = wf.getEstimatedMs1MzWindow();
auto_ms1_im_w = wf.getEstimatedMs1ImWindow();
if (! irt_trafo_out.empty()) { FileHandler().storeTransformations(irt_trafo_out, trafo_rtnorm, {FileTypes::TRANSFORMATIONXML}); }
}
CalibrationResult out;
out.rt_trafo = std::move(trafo_rtnorm);
out.ms2_mz_window_ppm = auto_mz_w;
out.ms2_im_window = auto_im_w;
out.ms1_mz_window_ppm = auto_ms1_mz_w;
out.ms1_im_window = auto_ms1_im_w;
return out;
}
} // end NS OpenMS | C++ |
3D | OpenMS/OpenMS | src/openms/source/APPLICATIONS/INIUpdater.cpp | .cpp | 3,791 | 103 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/INIUpdater.h>
#include <OpenMS/APPLICATIONS/ToolHandler.h>
#include <QtCore/QString>
namespace OpenMS
{
using namespace Internal;
typedef ToolDescriptionInternal TDE;
INIUpdater::INIUpdater()
{
getNameMapping(); // init map_
}
StringList INIUpdater::getToolNamesFromINI(const Param & ini) const
{
StringList tool_names;
for (Param::ParamIterator it = ini.begin(); it != ini.end(); ++it)
{
String name = it.getName();
if (name.toQString().count(':') == 1 && name.hasSuffix(":version"))
{
tool_names.push_back(name.prefix(':'));
}
}
return tool_names;
}
const ToolMapping & INIUpdater::getNameMapping()
{
if (map_.empty())
{
map_[TDE("FeatureFinder", ListUtils::create<String>("centroided"))] = TDE("FeatureFinderCentroided", ListUtils::create<String>(""));
map_[TDE("FeatureFinder", ListUtils::create<String>("isotope_wavelet"))] = TDE("FeatureFinderIsotopeWavelet", ListUtils::create<String>(""));
map_[TDE("FeatureFinder", ListUtils::create<String>("mrm"))] = TDE("FeatureFinderMRM", ListUtils::create<String>(""));
map_[TDE("FeatureLinker", ListUtils::create<String>("labeled"))] = TDE("FeatureLinkerLabeled", ListUtils::create<String>(""));
map_[TDE("FeatureLinker", ListUtils::create<String>("unlabeled"))] = TDE("FeatureLinkerUnlabeled", ListUtils::create<String>(""));
map_[TDE("FeatureLinker", ListUtils::create<String>("unlabeled_qt"))] = TDE("FeatureLinkerUnlabeledQT", ListUtils::create<String>(""));
map_[TDE("NoiseFilter", ListUtils::create<String>("gaussian"))] = TDE("NoiseFilterGaussian", ListUtils::create<String>(""));
map_[TDE("NoiseFilter", ListUtils::create<String>("sgolay"))] = TDE("NoiseFilterSGolay", ListUtils::create<String>(""));
map_[TDE("MapAligner", ListUtils::create<String>("apply_given_trafo"))] = TDE("MapRTTransformer", ListUtils::create<String>(""));
map_[TDE("MapAligner", ListUtils::create<String>("identification"))] = TDE("MapAlignerIdentification", ListUtils::create<String>(""));
map_[TDE("MapAligner", ListUtils::create<String>("pose_clustering"))] = TDE("MapAlignerPoseClustering", ListUtils::create<String>(""));
// SpectraFilter ...
map_[TDE("PeakPicker", ListUtils::create<String>("wavelet"))] = TDE("PeakPickerWavelet", ListUtils::create<String>(""));
map_[TDE("PeakPicker", ListUtils::create<String>("high_res"))] = TDE("PeakPickerHiRes", ListUtils::create<String>(""));
}
return map_;
}
bool INIUpdater::getNewToolName(const String & old_name, const String & tools_type, String & new_name)
{
new_name = "";
// try with type (as some new tools for one type might have the exact same name as old ones with several types)
TDE old_withtype(old_name, ListUtils::create<String>(tools_type));
if (map_.find(old_withtype) != map_.end())
{
new_name = map_[old_withtype].name;
return true;
}
// try without type
TDE old_notype(old_name, StringList());
if (map_.find(old_notype) != map_.end())
{
new_name = map_[old_notype].name;
return true;
}
// default to ToolHandler
const auto& topp = ToolHandler::getTOPPToolList(true);
if (topp.find(old_name) != topp.end())
{
new_name = old_name;
return true;
}
return false;
}
ToolMapping INIUpdater::map_;
} // namespace
| C++ |
3D | OpenMS/OpenMS | src/openms/source/APPLICATIONS/TOPPBase.cpp | .cpp | 98,786 | 2,618 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Marc Sturm, Clemens Groepl, Johannes Junker, Stephan Aiche, Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/APPLICATIONS/ConsoleUtils.h>
#include <OpenMS/APPLICATIONS/ParameterInformation.h>
#include <OpenMS/APPLICATIONS/ToolHandler.h>
#include <OpenMS/CONCEPT/Colorizer.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/CONCEPT/UniqueIdGenerator.h>
#include <OpenMS/CONCEPT/VersionInfo.h>
#include <OpenMS/DATASTRUCTURES/Date.h>
#include <OpenMS/DATASTRUCTURES/Param.h>
#include <OpenMS/DATASTRUCTURES/ListUtilsIO.h>
#include <OpenMS/DATASTRUCTURES/StringListUtils.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/FORMAT/FileTypes.h>
#include <OpenMS/FORMAT/IndentedStream.h>
#include <OpenMS/FORMAT/ParamCTDFile.h>
#include <OpenMS/FORMAT/ParamCWLFile.h>
#include <OpenMS/FORMAT/ParamJSONFile.h>
#include <OpenMS/FORMAT/ParamXMLFile.h>
#include <OpenMS/KERNEL/FeatureMap.h>
#include <OpenMS/KERNEL/ConsensusMap.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/SYSTEM/ExternalProcess.h>
#include <OpenMS/SYSTEM/File.h>
#include <OpenMS/SYSTEM/StopWatch.h>
#include <OpenMS/SYSTEM/SysInfo.h>
#include <OpenMS/SYSTEM/UpdateCheck.h>
#include <QtCore/QDir>
#include <iostream>
// OpenMP support
#ifdef _OPENMP
#include <omp.h>
#endif
#ifdef OPENMS_WINDOWSPLATFORM
#undef min
#undef max
#endif
#include <cmath>
using namespace std;
namespace OpenMS
{
using namespace Exception;
String TOPPBase::topp_ini_file_ = String(QDir::homePath()) + "/.TOPP.ini";
const Citation TOPPBase::cite_openms
= {"Pfeuffer, J., Bielow, C., Wein, S. et al.", "OpenMS 3 enables reproducible analysis of large-scale mass spectrometry data",
"Nat Methods (2024)", "10.1038/s41592-024-02197-7"};
void TOPPBase::setMaxNumberOfThreads(int
#ifdef _OPENMP
num_threads // to avoid the unused warning we enable this
// argument only if openmp is available
#endif
)
{
#ifdef _OPENMP
omp_set_num_threads(num_threads);
#endif
}
String TOPPBase::getToolPrefix() const
{
return tool_name_ + ":" + instance_number_ + ":";
}
TOPPBase::TOPPBase(const String& tool_name, const String& tool_description, bool official, const std::vector<Citation>& citations, bool toolhandler_test) :
tool_name_(tool_name),
tool_description_(tool_description),
instance_number_(-1),
official_(official),
citations_(citations),
toolhandler_test_(toolhandler_test),
log_type_(ProgressLogger::NONE),
test_mode_(false),
debug_level_(-1)
{
version_ = VersionInfo::getVersion();
verboseVersion_ = version_ + " " + VersionInfo::getTime();
// if the revision info is meaningful, show it as well
if (!VersionInfo::getRevision().empty() && VersionInfo::getRevision() != "exported")
{
verboseVersion_ += String(", Revision: ") + VersionInfo::getRevision() + "";
}
// can be disabled to allow unit tests
if (toolhandler_test_)
{
// check if tool is in official tools list
if (official_ && tool_name_ != "GenericWrapper" && !ToolHandler::getTOPPToolList().count(tool_name_))
{
throw Exception::InvalidValue(__FILE__,
__LINE__,
OPENMS_PRETTY_FUNCTION,
String("If '" + tool_name_ + "' is an official TOPP tool, add it to the tools list in ToolHandler. If it is not, set the 'official' flag of the TOPPBase constructor to false."),
tool_name_);
}
}
}
TOPPBase::~TOPPBase()
{
// delete log file if empty
const std::string& topplog = getParam_("log").toString();
if (!topplog.empty() && File::empty(topplog))
{
File::remove(topplog);
}
}
TOPPBase::ExitCodes TOPPBase::main(int argc, const char** argv)
{
//----------------------------------------------------------
//parse command line
//----------------------------------------------------------
// register values from derived TOPP tool
registerOptionsAndFlags_();
addEmptyLine_();
// common section for all tools
if (ToolHandler::getTOPPToolList().count(tool_name_))
addText_("Common TOPP options:");
else
addText_("Common UTIL options:");
registerStringOption_("ini", "<file>", "", "Use the given TOPP INI file", false);
registerStringOption_("log", "<file>", "", "Name of log file (created only when specified)", false, true);
registerIntOption_("instance", "<n>", 1, "Instance number for the TOPP INI file", false, true);
registerIntOption_("debug", "<n>", 0, "Sets the debug level", false, true);
registerIntOption_("threads", "<n>", 1, "Sets the number of threads allowed to be used by the TOPP tool", false);
registerStringOption_("write_ini", "<file>", "", "Writes the default configuration file", false);
registerStringOption_("write_ctd", "<out_dir>", "", "Writes the common tool description file(s) (Toolname(s).ctd) to <out_dir>", false, true);
registerStringOption_("write_nested_cwl", "<out_dir>", "", "Writes the Common Workflow Language file(s) (Toolname(s).cwl) to <out_dir>", false, true);
registerStringOption_("write_cwl", "<out_dir>", "", "Writes the Common Workflow Language file(s) (Toolname(s).cwl) to <out_dir>, but enforce a flat parameter hierarchy", false, true);
registerStringOption_("write_nested_json", "<out_dir>", "", "Writes the default configuration file", false, true);
registerStringOption_("write_json", "<out_dir>", "", "Writes the default configuration file, but compatible to the flat hierarchy", false, true);
registerFlag_("no_progress", "Disables progress logging to command line", true);
registerFlag_("force", "Overrides tool-specific checks", true);
registerFlag_("test", "Enables the test mode (needed for internal use only)", true);
registerFlag_("-help", "Shows options");
registerFlag_("-helphelp", "Shows all options (including advanced)", false);
// parse command line parameters:
try
{
param_cmdline_ = parseCommandLine_(argc, argv);
}
catch (Exception::BaseException& e)
{
writeLogError_("Invalid parameter values (" + String(e.getName()) + "): " + String(e.what()) + ". Aborting!");
printUsage_();
return ILLEGAL_PARAMETERS;
}
// for now command line is all we have, final assembly will follow below
param_ = param_cmdline_;
// assign instance number
*const_cast<int*>(&instance_number_) = getParamAsInt_("instance", 1);
writeDebug_(String("Instance: ") + String(instance_number_), 1);
// assign ini location
*const_cast<String*>(&ini_location_) = this->getToolPrefix();
writeDebug_(String("Ini_location: ") + getIniLocation_(), 1);
// set debug level
debug_level_ = getParamAsInt_("debug", 0);
writeDebug_(String("Debug level: ") + String(debug_level_), 1);
// print command line to console
StringList args;
for (int i = 0; i < argc; ++i)
{
if (String(argv[i]).has(' '))
{
args.push_back(String(argv[i]).quote()); // surround with quotes if argument contains a space
}
else
{
args.push_back(argv[i]);
}
}
writeDebug_(String(" >> ") + ListUtils::concatenate(args, " "), 1);
// test if no options were given
if (argc == 1)
{
printUsage_();
writeLogError_("No options given. Aborting!");
return ILLEGAL_PARAMETERS;
}
// '--help' given
if (param_cmdline_.exists("-help") || param_cmdline_.exists("-helphelp"))
{
printUsage_();
return EXECUTION_OK;
}
// test if unknown options were given
if (param_cmdline_.exists("unknown"))
{
writeLogError_(String("Unknown option(s) '") + getParamAsString_("unknown") + "' given. Aborting!");
printUsage_();
return ILLEGAL_PARAMETERS;
}
// test if unknown text argument were given (we do not use them)
if (param_cmdline_.exists("misc"))
{
writeLogError_(String("Trailing text argument(s) '") + getParamAsString_("misc") + "' given. Aborting!");
printUsage_();
return ILLEGAL_PARAMETERS;
}
ExitCodes result;
try
{
//-------------------------------------------------------------
// store configuration or tool description files
//-------------------------------------------------------------
// '-write_ini' given
if (param_cmdline_.exists("write_ini"))
{
String write_ini_file = param_cmdline_.getValue("write_ini").toString();
outputFileWritable_(write_ini_file, "write_ini");
Param default_params = getDefaultParameters_();
// check if augmentation with -ini param is needed
ParamValue in_ini;
if (param_cmdline_.exists("ini"))
{
in_ini = param_cmdline_.getValue("ini");
Param ini_params;
const std::string in_ini_path = in_ini.toString();
if (FileHandler::getTypeByFileName(in_ini_path) == FileTypes::Type::JSON)
{
// The JSON file doesn't carry any information about the parameter tree structure.
// We hand an additional parameter object with the default values, so we have information
// about the tree when parsing the JSON file.
ini_params = getDefaultParameters_();
if (!ParamJSONFile::load(in_ini_path, ini_params))
{
return ILLEGAL_PARAMETERS;
}
} else {
ParamXMLFile().load(in_ini_path, ini_params);
}
// check if ini parameters are applicable to this tool
checkIfIniParametersAreApplicable_(ini_params);
// update default params with outdated params given in -ini and be verbose
default_params.update(ini_params, false);
}
ParamXMLFile paramFile{};
paramFile.store(write_ini_file, default_params);
return EXECUTION_OK;
}
// '-write_ctd' given
if (param_cmdline_.exists("write_ctd"))
{
ParamCTDFile paramFile{};
writeToolDescription_(paramFile, "write_ctd", ".ctd");
return EXECUTION_OK;
}
// '-write_cwl' given
if (param_cmdline_.exists("write_nested_cwl"))
{
ParamCWLFile paramFile{};
writeToolDescription_(paramFile, "write_nested_cwl", ".cwl");
return EXECUTION_OK;
}
// '-write_flat_cwl' given
if (param_cmdline_.exists("write_cwl"))
{
ParamCWLFile paramFile{};
paramFile.flatHierarchy = true;
writeToolDescription_(paramFile, "write_cwl", ".cwl");
return EXECUTION_OK;
}
// '-write_json' given
if (param_cmdline_.exists("write_nested_json"))
{
ParamJSONFile paramFile{};
writeToolDescription_(paramFile, "write_nested_json", ".json");
return EXECUTION_OK;
}
// '-write_flat_json' given
if (param_cmdline_.exists("write_json"))
{
ParamJSONFile paramFile{};
paramFile.flatHierarchy = true;
writeToolDescription_(paramFile, "write_json", ".json");
return EXECUTION_OK;
}
//-------------------------------------------------------------
// load INI file
//-------------------------------------------------------------
{
String value_ini;
if (param_cmdline_.exists("ini"))
{
value_ini = param_cmdline_.getValue("ini").toString();
writeDebug_("INI file: " + value_ini, 1);
writeDebug_("INI location: " + getIniLocation_(), 1);
if (FileHandler::getTypeByFileName(value_ini) == FileTypes::Type::JSON)
{
writeDebug_("Assuming INI is a cwl file", 1);
// The JSON file doesn't carry any information about the parameter tree structure.
// We prepopulate the param object with the default values, so we have information
// about the tree when parsing the JSON file.
param_inifile_ = getDefaultParameters_();
if (!ParamJSONFile::load(value_ini, param_inifile_))
{
return ILLEGAL_PARAMETERS;
}
} else {
ParamXMLFile().load(value_ini, param_inifile_);
}
checkIfIniParametersAreApplicable_(param_inifile_);
// dissect loaded INI parameters
param_instance_ = param_inifile_.copy(getIniLocation_(), true);
writeDebug_("Parameters from instance section:", param_instance_, 2);
param_common_tool_ = param_inifile_.copy("common:" + tool_name_ + ":", true);
writeDebug_("Parameters from common section with tool name:", param_common_tool_, 2);
param_common_ = param_inifile_.copy("common:", true);
writeDebug_("Parameters from common section without tool name:", param_common_, 2);
// set type on command line if given in .ini file
if (param_inifile_.exists(getIniLocation_() + "type") && !param_cmdline_.exists("type"))
param_cmdline_.setValue("type", param_inifile_.getValue(getIniLocation_() + "type"));
}
// construct the set of final parameters as they will be available in the main_ method
Param finalParam;
// 1. the CMD parameters
writeDebug_("Initialize final param with cmd line:", param_cmdline_, 2);
finalParam = param_cmdline_;
// 2. the instance values from the ini-file
writeDebug_("Merging instance section into param:", param_instance_, 2);
finalParam.merge(param_instance_);
// 3. the tools data from the common section
writeDebug_("Merging common section with tool name into param:", param_common_tool_, 2);
finalParam.merge(param_common_tool_);
// 4. everything else from the common section
writeDebug_("Merging common section without tool name into param:", param_common_, 2);
finalParam.merge(param_common_);
finalParam.remove("ini"); // not contained in default params; remove to avoid "unknown param" in update()
// finally: augment default values with INI/CLI values
// note the copy(getIniLocation_(),..) as we want the param tree without instance
// information
param_ = this->getDefaultParameters_().copy(getIniLocation_(), true);
if (!param_.update(finalParam, false, false, true, true, getGlobalLogWarn()))
{
OPENMS_LOG_ERROR << "Parameters passed to '" << this->tool_name_ << "' are invalid. To prevent usage of wrong defaults, please update/fix the parameters!" << std::endl;
return ILLEGAL_PARAMETERS;
}
if (finalParam.exists("type"))
{
param_.setValue("type", finalParam.getValue("type"));
}
// check if all parameters are registered and have the correct type
checkParam_(param_instance_, value_ini, getIniLocation_());
checkParam_(param_common_tool_, value_ini, "common:" + tool_name_ + "::");
checkParam_(param_common_, value_ini, "common:");
// check if the version of the parameters file matches the version of this tool
// the parameters and values are all ok, but there might be more valid values now or new parameters which are currently not visible in the outdated INI
String file_version = "";
if (param_inifile_.exists(tool_name_ + ":version"))
{
file_version = param_inifile_.getValue(tool_name_ + ":version").toString();
if (file_version != version_)
{
writeLogInfo_(String("Warning: Parameters file version (") + file_version + ") does not match the version of this tool (" + version_ + ").\n"
"Your current parameters are still valid, but there might be new valid values or even new parameters. Upgrading the INI might be useful.");
}
}
}
// 'test' flag is set
if (getFlag_("test"))
{
test_mode_ = true;
// initialize the random generator as early as possible!
UniqueIdGenerator::setSeed(19991231235959);
}
// enable / disable collection of usage statistics by build variable
#ifdef ENABLE_UPDATE_CHECK
// disable collection of usage statistics if environment variable is present
char* disable_usage = getenv("OPENMS_DISABLE_UPDATE_CHECK");
// only perform check if variable is not set or explicitly enabled by setting it to "OFF"
if (!test_mode_ && (disable_usage == nullptr || strcmp(disable_usage, "OFF") == 0))
{
UpdateCheck::run(tool_name_, version_, debug_level_);
}
#endif
//-------------------------------------------------------------
// debug level
//-------------------------------------------------------------
debug_level_ = getParamAsInt_("debug", 0);
writeDebug_(String("Debug level (after ini file): ") + String(debug_level_), 1);
if (debug_level_ > 0) getGlobalLogDebug().insert(cout); // allows to use OPENMS_LOG_DEBUG << "something" << std::endl;
//-------------------------------------------------------------
//progress logging
//-------------------------------------------------------------
if (!getFlag_("no_progress"))
{
log_type_ = ProgressLogger::CMD;
}
//----------------------------------------------------------
//threads
//----------------------------------------------------------
TOPPBase::setMaxNumberOfThreads(getParamAsInt_("threads", 1));
//----------------------------------------------------------
//main
//----------------------------------------------------------
StopWatch sw;
sw.start();
result = main_(argc, argv);
sw.stop();
// useful for benchmarking and for execution on clusters with schedulers
String mem_usage;
{
size_t mem_virtual(0);
SysInfo::getProcessPeakMemoryConsumption(mem_virtual);
if (mem_virtual != 0) mem_usage = String("; Peak Memory Usage: ") + (mem_virtual / 1024) + " MB";
}
OPENMS_LOG_INFO << this->tool_name_ << " took " << sw.toString() << mem_usage << "." << std::endl;
} // end try{}
//----------------------------------------------------------
//error handling
//----------------------------------------------------------
// Errors caused by the user
catch (UnableToCreateFile& e)
{
writeLogError_(String("Error: Unable to write file (") + e.what() + ")");
writeDebug_(String("Error occurred in line ") + e.getLine() + " of file " + e.getFile() + " (in function: " + e.getFunction() + ")!", 1);
return CANNOT_WRITE_OUTPUT_FILE;
}
catch (FileNotFound& e)
{
writeLogError_(String("Error: File not found (") + e.what() + ")");
writeDebug_(String("Error occurred in line ") + e.getLine() + " of file " + e.getFile() + " (in function: " + e.getFunction() + ") !", 1);
return INPUT_FILE_NOT_FOUND;
}
catch (ExternalExecutableNotFound& e)
{
writeLogError_(String("Error: Executable not found (") + e.what() + ")");
writeDebug_(String("Error occurred in line ") + e.getLine() + " of file " + e.getFile() + " (in function: " + e.getFunction() + ") !", 1);
return EXTERNAL_PROGRAM_NOTFOUND;
}
catch (FileNotReadable& e)
{
writeLogError_(String("Error: File not readable (") + e.what() + ")");
writeDebug_(String("Error occurred in line ") + e.getLine() + " of file " + e.getFile() + " (in function: " + e.getFunction() + ") !", 1);
return INPUT_FILE_NOT_READABLE;
}
catch (FileEmpty& e)
{
writeLogError_(String("Error: File empty (") + e.what() + ")");
writeDebug_(String("Error occurred in line ") + e.getLine() + " of file " + e.getFile() + " (in function: " + e.getFunction() + ") !", 1);
return INPUT_FILE_EMPTY;
}
catch (ParseError& e)
{
writeLogError_(String("Error: Unable to read file (") + e.what() + ")");
writeDebug_(String("Error occurred in line ") + e.getLine() + " of file " + e.getFile() + " (in function: " + e.getFunction() + ") !", 1);
return INPUT_FILE_CORRUPT;
}
catch (RequiredParameterNotGiven& e)
{
String what = e.what();
if (!what.hasPrefix("'"))
what = "'" + what + "'";
writeLogError_(String("Error: The required parameter ") + what + " was not given or is empty!");
writeDebug_(String("Error occurred in line ") + e.getLine() + " of file " + e.getFile() + " (in function: " + e.getFunction() + ") !", 1);
return MISSING_PARAMETERS;
}
catch (InvalidParameter& e)
{
writeLogError_(String("Invalid parameter: ") + e.what());
writeDebug_(String("Error occurred in line ") + e.getLine() + " of file " + e.getFile() + " (in function: " + e.getFunction() + ") !", 1);
return ILLEGAL_PARAMETERS;
}
// Internal errors because of wrong use of this class
catch (UnregisteredParameter& e)
{
writeLogError_(String("Internal error: Request for unregistered parameter '") + e.what() + "'");
writeDebug_(String("Error occurred in line ") + e.getLine() + " of file " + e.getFile() + " (in function: " + e.getFunction() + ") !", 1);
return INTERNAL_ERROR;
}
catch (WrongParameterType& e)
{
writeLogError_(String("Internal error: Request for parameter with wrong type '") + e.what() + "'");
writeDebug_(String("Error occurred in line ") + e.getLine() + " of file " + e.getFile() + " (in function: " + e.getFunction() + ") !", 1);
return INTERNAL_ERROR;
}
// All other errors
catch (BaseException& e)
{
writeLogError_(String("Error: Unexpected internal error (") + e.what() + ")");
writeDebug_(String("Error occurred in line ") + e.getLine() + " of file " + e.getFile() + " (in function: " + e.getFunction() + ") !", 1);
return UNKNOWN_ERROR;
}
log_.close();
return result;
}
void TOPPBase::printUsage_()
{
// show advanced options?
bool verbose = getFlag_("-helphelp");
String docurl = getDocumentationURL();
IndentedStream is(cerr, 0, 10);
// common output
is << "\n"
<< invert(tool_name_) << " -- " << tool_description_ << "\n"
<< bright("Full documentation: ") << underline(docurl) // the space is needed, otherwise the remaining line will be underlined on Windows..
<< "\n"
<< bright("Version: ") << verboseVersion_ << "\n"
<< bright("To cite OpenMS:\n") << " + " << is.indent(3) << cite_openms.toString()
<< is.indent(0) << "\n";
if (!citations_.empty())
{
is << bright() << "To cite " << tool_name_ << ':' << bright().undo() << is.indent(0) << "\n";
for (const Citation& c : citations_)
is << " + " << is.indent(3) << c.toString() << is.indent(0) << "\n";
}
is << is.indent(0) << "\n";
is << invert("Usage:") << "\n" // line break needs to be separate, to avoid colored trailing whitespaces
<< " " << bright(tool_name_) << " <options>" << "\n"
<< "\n";
// print warning regarding not shown parameters
if (!subsections_.empty() && !verbose)
{
is << "This tool has algorithm parameters that are not shown here! Please check the ini file for a detailed description or use the --helphelp option\n\n";
}
if (verbose)
{
// add all subsection parameters to the command line
try
{
Param p = getSubsectionDefaults_();
registerFullParam_(p);
}
catch (BaseException& /*e*/)
{
writeDebug_("Failed to add subsection parameters", 1);
}
}
is << bright("Options") << " (" << green("mandatory options marked with '*'") << "):\n";
// determine max length of parameters (including argument) for indentation
UInt max_size = 0;
for (const auto& par : parameters_)
{
if (!par.advanced || verbose)
{
max_size = max((UInt)max_size, (UInt)(par.name.size() + par.argument.size() + par.required));
}
}
//offset of the descriptions
UInt offset = 6 + max_size;
//keep track of the current subsection we are in, to display the subsection help when a new section starts
String current_TOPP_subsection("");
// PRINT parameters && description, restrictions and default
for (vector<ParameterInformation>::const_iterator it = parameters_.begin(); it != parameters_.end(); ++it)
{
if (it->advanced && !verbose)
{
continue;
}
// new subsection?
String subsection = getSubsection_(it->name);
if (!subsection.empty() && current_TOPP_subsection != subsection)
{
current_TOPP_subsection = subsection;
map<String, String>::const_iterator subsec_it = subsections_TOPP_.find(current_TOPP_subsection);
if (subsec_it == subsections_TOPP_.end())
{
throw ElementNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "'" + current_TOPP_subsection + "' (TOPP subsection not registered)");
}
is << "\n"; // print newline for new subsection
String subsection_description = subsec_it->second;
if (subsection_description.length() == 0)
{
subsection_description = current_TOPP_subsection;
}
is << subsection_description << ":\n"; // print subsection description
}
else if (subsection.empty() && !current_TOPP_subsection.empty()) // subsection ended and normal parameters start again
{
current_TOPP_subsection = "";
is << "\n"; // print newline to separate ending subsection
}
//NAME + ARGUMENT
String str_tmp = " -";
str_tmp += it->name + " " + it->argument;
if (it->required)
str_tmp += '*';
if (it->type == ParameterInformation::NEWLINE)
str_tmp = "";
//OFFSET
str_tmp.fillRight(' ', offset);
if (it->type == ParameterInformation::TEXT)
str_tmp = "";
//DESCRIPTION
String desc_tmp = String(it->description).firstToUpper();
//DEFAULT
StringList addons;
switch (it->type)
{
case ParameterInformation::STRING:
case ParameterInformation::DOUBLE:
case ParameterInformation::INT:
case ParameterInformation::STRINGLIST:
case ParameterInformation::INTLIST:
case ParameterInformation::DOUBLELIST:
{
String tmp_s = ((String)it->default_value.toString()).substitute(", ", " ");
if (!tmp_s.empty() && tmp_s != "[]")
{
addons.push_back(String("default: '") + tmp_s + "'");
}
}
break;
default:
break;
}
//RESTRICTIONS
StringList restrictions;
switch (it->type)
{
case ParameterInformation::STRING:
case ParameterInformation::INPUT_FILE:
case ParameterInformation::OUTPUT_FILE:
case ParameterInformation::OUTPUT_PREFIX:
case ParameterInformation::OUTPUT_DIR:
case ParameterInformation::STRINGLIST:
case ParameterInformation::INPUT_FILE_LIST:
case ParameterInformation::OUTPUT_FILE_LIST:
if (!it->valid_strings.empty())
{
StringList copy = it->valid_strings;
for (auto& str : copy)
{
str.quote('\'');
}
String add = "";
if (it->type == ParameterInformation::INPUT_FILE
|| it->type == ParameterInformation::OUTPUT_FILE
|| it->type == ParameterInformation::OUTPUT_PREFIX
|| it->type == ParameterInformation::OUTPUT_DIR
|| it->type == ParameterInformation::INPUT_FILE_LIST
|| it->type == ParameterInformation::OUTPUT_FILE_LIST)
add = " formats";
restrictions.push_back("valid" + add + ": " + ListUtils::concatenate(copy, ", ")); // concatenate restrictions by comma
}
break;
case ParameterInformation::INT:
case ParameterInformation::INTLIST:
if (it->min_int != -std::numeric_limits<Int>::max())
{
restrictions.push_back(String("min: '") + it->min_int + "'");
}
if (it->max_int != std::numeric_limits<Int>::max())
{
restrictions.push_back(String("max: '") + it->max_int + "'");
}
break;
case ParameterInformation::DOUBLE:
case ParameterInformation::DOUBLELIST:
if (it->min_float != -std::numeric_limits<double>::max())
{
restrictions.push_back(String("min: '") + it->min_float + "'");
}
if (it->max_float != std::numeric_limits<double>::max())
{
restrictions.push_back(String("max: '") + it->max_float + "'");
}
break;
default:
break;
}
string addon_concat;
//add DEFAULT and RESTRICTIONS
if (!addons.empty())
{
addon_concat = String(" (") + ListUtils::concatenate(addons, " ") + ")";
}
string restrict_concat;
// add DEFAULT and RESTRICTIONS
if (!restrictions.empty())
{
restrict_concat = String(" (") + ListUtils::concatenate(restrictions, " ") + ")";
}
if (it->type == ParameterInformation::TEXT)
{
is << str_tmp << desc_tmp; // no indentation for text
}
else
{
is << is.indent(offset);
if (it->required)
is << green(str_tmp);
else
is << str_tmp;
is << desc_tmp << cyan(addon_concat) << magenta(restrict_concat);
is << is.indent(0);
}
is << "\n";
}
// SUBSECTION's at the end
if (!subsections_.empty() && !verbose)
{
//determine indentation of description
UInt indent = 0;
for (map<String, String>::const_iterator it = subsections_.begin(); it != subsections_.end(); ++it)
{
indent = max((UInt)it->first.size(), indent);
}
indent += 6;
//output
is << "\n"
<< "The following configuration subsections are valid:\n";
for (map<String, String>::const_iterator it = subsections_.begin(); it != subsections_.end(); ++it)
{
String tmp = String(" - ") + it->first;
tmp.fillRight(' ', indent);
is << ConsoleUtils::breakString(tmp + it->second, indent, 10);
is << "\n";
}
is << "\n"
<< "You can write an example INI file using the '-write_ini' option.\n"
<< "Documentation of subsection parameters can be found in the doxygen documentation or the INIFileEditor.\n"
<< "For more information, please consult the online documentation for this tool:\n"
<< " - " << underline(docurl) << "\n";
}
is << endl;
}
ParameterInformation TOPPBase::paramEntryToParameterInformation_(const Param::ParamEntry& entry, const String& argument, const String& full_name) const
{
String name = full_name.empty() ? entry.name : full_name;
bool advanced = entry.tags.count("advanced");
// special case for flags:
if ((entry.value.valueType() == ParamValue::STRING_VALUE) &&
/*entry.tags.count("flag") && */ // This would avoid autoconversion from true/false String Params when they default to false
(entry.value == "false") && // This is the current default
(entry.valid_strings.size() == 2) &&
(entry.valid_strings[0] == "true") && (entry.valid_strings[1] == "false"))
{
return ParameterInformation(name, ParameterInformation::FLAG, "", "", entry.description, false, advanced);
}
const bool input_file = entry.tags.count(TAG_INPUT_FILE);
const bool output_file = entry.tags.count(TAG_OUTPUT_FILE);
const bool output_prefix = entry.tags.count(TAG_OUTPUT_PREFIX);
const bool output_dir = entry.tags.count(TAG_OUTPUT_DIR);
assert(input_file + output_file + output_prefix + output_dir <= 1); // at most one of these should be true (or none)
enum ParameterInformation::ParameterTypes type = ParameterInformation::NONE;
switch (entry.value.valueType())
{
case ParamValue::STRING_VALUE:
if (input_file)
type = ParameterInformation::INPUT_FILE;
else if (output_file)
type = ParameterInformation::OUTPUT_FILE;
else if (output_prefix)
type = ParameterInformation::OUTPUT_PREFIX;
else if (output_dir)
type = ParameterInformation::OUTPUT_DIR;
else
type = ParameterInformation::STRING;
break;
case ParamValue::INT_VALUE:
type = ParameterInformation::INT;
break;
case ParamValue::DOUBLE_VALUE:
type = ParameterInformation::DOUBLE;
break;
case ParamValue::STRING_LIST:
if (input_file)
type = ParameterInformation::INPUT_FILE_LIST;
else if (output_file)
type = ParameterInformation::OUTPUT_FILE_LIST;
else
type = ParameterInformation::STRINGLIST;
break;
case ParamValue::INT_LIST:
type = ParameterInformation::INTLIST;
break;
case ParamValue::DOUBLE_LIST:
type = ParameterInformation::DOUBLELIST;
break;
case ParamValue::EMPTY_VALUE:
type = ParameterInformation::NONE;
break;
}
bool required = entry.tags.count("required");
ParameterInformation param(name, type, argument, entry.value, entry.description, required, advanced);
param.valid_strings = ListUtils::toStringList<std::string>(entry.valid_strings);
// here, we rely on the fact that defaults (meaning "not set") are the same for both:
param.min_int = entry.min_int;
param.max_int = entry.max_int;
param.min_float = entry.min_float;
param.max_float = entry.max_float;
return param;
}
String TOPPBase::getParamArgument_(const Param::ParamEntry& entry) const
{
String argument = "";
switch (entry.value.valueType())
{
case ParamValue::STRING_VALUE:
if (entry.valid_strings.empty())
argument = "<text>"; // name?
else
argument = "<choice>";
break;
case ParamValue::INT_VALUE:
argument = "<number>"; // integer?
break;
case ParamValue::DOUBLE_VALUE:
argument = "<value>"; // float?
break;
case ParamValue::STRING_LIST:
argument = "<list>";
break;
case ParamValue::INT_LIST:
argument = "<numbers>";
break;
case ParamValue::DOUBLE_LIST:
argument = "<values>";
break;
case ParamValue::EMPTY_VALUE:
argument = "";
break;
}
return argument;
}
std::vector<ParameterInformation> TOPPBase::paramToParameterInformation_(const Param& param) const
{
std::vector<ParameterInformation> parameter_information;
for (Param::ParamIterator it = param.begin(); it != param.end(); ++it)
{
String full_name = it.getName();
// make up a value for "argument":
String argument = getParamArgument_(*it);
// transform to ParameterInformation and register
parameter_information.push_back(paramEntryToParameterInformation_(*it, argument, full_name));
}
return parameter_information;
}
void TOPPBase::registerParamSubsectionsAsTOPPSubsections_(const Param& param)
{
for (Param::ParamIterator it = param.begin(); it != param.end(); ++it)
{
String full_name = it.getName();
String subsection = getSubsection_(full_name);
if (!subsection.empty() && (subsections_TOPP_.count(subsection) == 0))
{
subsections_TOPP_[subsection] = param.getSectionDescription(subsection);
}
}
}
void TOPPBase::registerFullParam_(const Param& param)
{
// register subsections
registerParamSubsectionsAsTOPPSubsections_(param);
// add the actual parameters
std::vector<ParameterInformation> parameter_information = paramToParameterInformation_(param);
parameters_.insert(parameters_.end(), parameter_information.begin(), parameter_information.end());
}
void TOPPBase::registerStringOption_(const String& name, const String& argument, const String& default_value, const String& description, bool required, bool advanced)
{
if (required && !default_value.empty())
throw InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Registering a required StringOption param (" + name + ") with a non-empty default is forbidden!", default_value);
parameters_.emplace_back(name, ParameterInformation::STRING, argument, default_value, description, required, advanced);
}
ParameterInformation& TOPPBase::getParameterByName_(const String& name)
{
typedef std::vector<ParameterInformation>::iterator TParamInfoIterator;
//search the right parameter
for (TParamInfoIterator it = parameters_.begin(); it != parameters_.end(); ++it)
{
if (it->name == name)
return *it;
}
//parameter not found
throw UnregisteredParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, name);
}
void TOPPBase::setValidStrings_(const String& name, const std::string vstrings[], int count)
{
std::vector<String> vec;
vec.assign(vstrings, vstrings + count);
setValidStrings_(name, vec);
}
void TOPPBase::setValidStrings_(const String& name, const std::vector<String>& strings)
{
//check for commas
for (Size i = 0; i < strings.size(); ++i)
{
if (strings[i].has(','))
{
throw InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Comma characters in Param string restrictions are not allowed!");
}
}
// get the matching parameter
ParameterInformation& p = getParameterByName_(name);
//check if the type matches
if (p.type != ParameterInformation::STRING && p.type != ParameterInformation::STRINGLIST)
{
throw ElementNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, name);
}
const StringList& valids = strings;
StringList defaults;
if (p.type == ParameterInformation::STRING)
defaults.push_back(String(p.default_value.toString()));
else
defaults = ListUtils::toStringList<std::string>(p.default_value);
for (Size j = 0; j < defaults.size(); ++j) // allow the empty string even if not in restrictions
{
if (!defaults[j].empty() && !ListUtils::contains(valids, defaults[j]))
{
throw InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "TO THE DEVELOPER: The TOPP tool option '" + name + "' with default value " + std::string(p.default_value) + " does not meet restrictions!");
}
}
p.valid_strings = strings;
}
void TOPPBase::setValidFormats_(const String& name, const std::vector<String>& formats, const bool force_OpenMS_format)
{
// check if formats are known
if (force_OpenMS_format)
{
for (const auto& f : formats)
{
if (f != "fid")
{
auto ft = FileHandler::getTypeByFileName(String(".") + f);
if (ft == FileTypes::UNKNOWN)
{
throw InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "The file format '" + f + "' is invalid!");
}
}
}
}
ParameterInformation& p = getParameterByName_(name);
// check if the type matches
if (p.type != ParameterInformation::INPUT_FILE
&& p.type != ParameterInformation::OUTPUT_FILE
&& p.type != ParameterInformation::INPUT_FILE_LIST
&& p.type != ParameterInformation::OUTPUT_FILE_LIST
&& p.type != ParameterInformation::OUTPUT_PREFIX)
// && p.type != ParameterInformation::OUTPUT_DIR ) // output dir is not a file format, hence does not support restricting the format
{
throw Exception::WrongParameterType(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, name);
}
if (!p.valid_strings.empty())
{
throw Exception::Precondition(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Internal error: Valid formats are already set for '" + name + "'. Please check for typos!");
}
p.valid_strings = formats;
}
void TOPPBase::setMinInt_(const String& name, Int min)
{
ParameterInformation& p = getParameterByName_(name);
// check if the type matches
if (p.type != ParameterInformation::INT && p.type != ParameterInformation::INTLIST)
{
throw ElementNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, name);
}
IntList defaults;
if (p.type == ParameterInformation::INT)
defaults.push_back(Int(p.default_value));
else
defaults = p.default_value;
for (Size j = 0; j < defaults.size(); ++j)
{
if (defaults[j] < min)
{
throw InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "TO THE DEVELOPER: The TOPPS tool option '" + name + "' with default value " + std::string(p.default_value) + " does not meet restrictions!");
}
}
p.min_int = min;
}
void TOPPBase::setMaxInt_(const String& name, Int max)
{
ParameterInformation& p = getParameterByName_(name);
//check if the type matches
if (p.type != ParameterInformation::INT && p.type != ParameterInformation::INTLIST)
{
throw ElementNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, name);
}
IntList defaults;
if (p.type == ParameterInformation::INT)
defaults.push_back(Int(p.default_value));
else
defaults = p.default_value;
for (Size j = 0; j < defaults.size(); ++j)
{
if (defaults[j] > max)
{
throw InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "TO THE DEVELOPER: The TOPPS tool option '" + name + "' with default value " + std::string(p.default_value) + " does not meet restrictions!");
}
}
p.max_int = max;
}
void TOPPBase::setMinFloat_(const String& name, double min)
{
ParameterInformation& p = getParameterByName_(name);
//check if the type matches
if (p.type != ParameterInformation::DOUBLE && p.type != ParameterInformation::DOUBLELIST)
{
throw ElementNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, name);
}
DoubleList defaults;
if (p.type == ParameterInformation::DOUBLE)
defaults.push_back(double(p.default_value));
else
defaults = p.default_value;
for (Size j = 0; j < defaults.size(); ++j)
{
if (defaults[j] < min)
{
throw InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "TO THE DEVELOPER: The TOPPS tool option '" + name + "' with default value " + std::string(p.default_value) + " does not meet restrictions!");
}
}
p.min_float = min;
}
void TOPPBase::setMaxFloat_(const String& name, double max)
{
ParameterInformation& p = getParameterByName_(name);
//check if the type matches
if (p.type != ParameterInformation::DOUBLE && p.type != ParameterInformation::DOUBLELIST)
{
throw ElementNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, name);
}
DoubleList defaults;
if (p.type == ParameterInformation::DOUBLE)
defaults.push_back(double(p.default_value));
else
defaults = p.default_value;
for (Size j = 0; j < defaults.size(); ++j)
{
if (defaults[j] > max)
{
throw InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "TO THE DEVELOPER: The TOPPS tool option '" + name + "' with default value " + std::string(p.default_value) + " does not meet restrictions!");
}
}
p.max_float = max;
}
void TOPPBase::registerInputFile_(const String& name, const String& argument, const String& default_value, const String& description, bool required, bool advanced, const StringList& tags)
{
int count_conflicting_tags = (ListUtils::contains(tags, "skipexists") + ListUtils::contains(tags, "is_executable"));
if (count_conflicting_tags >= 2)
{
throw Exception::WrongParameterType(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "'skipexists' and 'is_executable' cannot be combined");
}
if (required && !default_value.empty() && count_conflicting_tags == 0)
throw InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Registering a required InputFile param (" + name + ") with a non-empty default is forbidden!", default_value);
parameters_.emplace_back(name, ParameterInformation::INPUT_FILE, argument, default_value, description, required, advanced, tags);
}
void TOPPBase::registerOutputFile_(const String& name, const String& argument, const String& default_value, const String& description, bool required, bool advanced)
{
if (required && !default_value.empty())
throw InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Registering a required OutputFile param (" + name + ") with a non-empty default is forbidden!", default_value);
parameters_.emplace_back(name, ParameterInformation::OUTPUT_FILE, argument, default_value, description, required, advanced);
}
void TOPPBase::registerOutputPrefix_(const String& name, const String& argument, const String& default_value, const String& description, bool required, bool advanced)
{
if (required && !default_value.empty())
throw InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Registering a required OutputPrefix param (" + name + ") with a non-empty default is forbidden!", default_value);
parameters_.emplace_back(name, ParameterInformation::OUTPUT_PREFIX, argument, default_value, description, required, advanced);
}
void TOPPBase::registerOutputDir_(const String& name, const String& argument, const String& default_value, const String& description, bool required, bool advanced)
{
if (required && !default_value.empty())
throw InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Registering a required OutputDir param (" + name + ") with a non-empty default is forbidden!", default_value);
parameters_.emplace_back(name, ParameterInformation::OUTPUT_DIR, argument, default_value, description, required, advanced);
}
void TOPPBase::registerDoubleOption_(const String& name, const String& argument, double default_value, const String& description, bool required, bool advanced)
{
if (required)
{
throw InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Registering a double param (" + name + ") as 'required' is forbidden (there is no value to indicate it is missing)!", String(default_value));
}
parameters_.emplace_back(name, ParameterInformation::DOUBLE, argument, default_value, description, required, advanced);
}
void TOPPBase::registerIntOption_(const String& name, const String& argument, Int default_value, const String& description, bool required, bool advanced)
{
if (required)
{
throw InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Registering an Int param (" + name + ") as 'required' is forbidden (there is no value to indicate it is missing)!", String(default_value));
}
parameters_.emplace_back(name, ParameterInformation::INT, argument, default_value, description, required, advanced);
}
void TOPPBase::registerOutputFileList_(const String& name, const String& argument, const StringList& default_value, const String& description, bool required, bool advanced)
{
if (required && !default_value.empty())
throw InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Registering a required OutputFileList param (" + name + ") with a non-empty default is forbidden!", ListUtils::concatenate(default_value, ","));
parameters_.emplace_back(name, ParameterInformation::OUTPUT_FILE_LIST, argument, ListUtils::create<std::string>(default_value), description, required, advanced);
}
void TOPPBase::registerInputFileList_(const String& name, const String& argument, const StringList& default_value, const String& description, bool required, bool advanced, const StringList& tags)
{
int count_conflicting_tags = (ListUtils::contains(tags, "skipexists") + ListUtils::contains(tags, "is_executable"));
if (count_conflicting_tags >= 2)
{
throw Exception::WrongParameterType(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "'skipexists' and 'is_executable' cannot be combined");
}
if (required && !default_value.empty() && count_conflicting_tags == 0)
throw InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Registering a required InputFileList param (" + name + ") with a non-empty default is forbidden!", ListUtils::concatenate(default_value, ","));
parameters_.emplace_back(name, ParameterInformation::INPUT_FILE_LIST, argument, ListUtils::create<std::string>(default_value), description, required, advanced, tags);
}
void TOPPBase::registerStringList_(const String& name, const String& argument, const StringList& default_value, const String& description, bool required, bool advanced)
{
if (required && !default_value.empty())
throw InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Registering a required StringList param (" + name + ") with a non-empty default is forbidden!", ListUtils::concatenate(default_value, ","));
parameters_.emplace_back(name, ParameterInformation::STRINGLIST, argument, ListUtils::create<std::string>(default_value), description, required, advanced);
}
void TOPPBase::registerIntList_(const String& name, const String& argument, const IntList& default_value, const String& description, bool required, bool advanced)
{
stringstream ss;
ss << default_value;
if (required && !default_value.empty())
throw InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Registering a required IntList param (" + name + ") with a non-empty default is forbidden!", String(ss.str()));
parameters_.emplace_back(name, ParameterInformation::INTLIST, argument, default_value, description, required, advanced);
}
void TOPPBase::registerDoubleList_(const String& name, const String& argument, const DoubleList& default_value, const String& description, bool required, bool advanced)
{
stringstream ss;
ss << default_value;
if (required && !default_value.empty())
throw InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Registering a required DoubleList param (" + name + ") with a non-empty default is forbidden!", String(ss.str()));
parameters_.emplace_back(name, ParameterInformation::DOUBLELIST, argument, default_value, description, required, advanced);
}
void TOPPBase::registerFlag_(const String& name, const String& description, bool advanced)
{
parameters_.emplace_back(name, ParameterInformation::FLAG, "", "", description, false, advanced);
}
void TOPPBase::addEmptyLine_()
{
parameters_.emplace_back("", ParameterInformation::NEWLINE, "", "", "", false, false);
}
void TOPPBase::addText_(const String& text)
{
parameters_.emplace_back("", ParameterInformation::TEXT, "", "", text, false, false);
}
const ParameterInformation& TOPPBase::findEntry_(const String& name) const
{
vector<ParameterInformation>::const_iterator it = parameters_.begin();
while (it != parameters_.end() && it->name != name)
{
++it;
}
if (it == parameters_.end())
{
throw UnregisteredParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, name);
}
return *it;
}
String TOPPBase::getStringOption_(const String& name) const
{
const ParameterInformation& p = findEntry_(name);
if (p.type != ParameterInformation::STRING
&& p.type != ParameterInformation::INPUT_FILE
&& p.type != ParameterInformation::OUTPUT_FILE
&& p.type != ParameterInformation::OUTPUT_PREFIX)
{
throw WrongParameterType(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, name);
}
if (p.required && (getParam_(name).isEmpty() || getParam_(name) == ""))
{
String message = "'" + name + "'";
if (!p.valid_strings.empty())
{
message += " [valid: " + ListUtils::concatenate(p.valid_strings, ", ") + "]";
}
throw RequiredParameterNotGiven(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, message);
}
String tmp = getParamAsString_(name, p.default_value.toString());
writeDebug_(String("Value of string option '") + name + "': " + tmp, 1);
// if required or set by user, do some validity checks
if (p.required || !tmp.empty())
{
fileParamValidityCheck_(tmp, name, p);
}
return tmp;
}
String TOPPBase::getOutputDirOption(const String& name) const
{
const ParameterInformation& p = findEntry_(name);
if (p.type != ParameterInformation::OUTPUT_DIR)
{
throw WrongParameterType(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, name);
}
if (p.required && (getParam_(name).isEmpty() || getParam_(name) == ""))
{
String message = "'" + name + "'";
if (! p.valid_strings.empty()) { message += " [valid: " + ListUtils::concatenate(p.valid_strings, ", ") + "]"; }
throw RequiredParameterNotGiven(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, message);
}
String tmp = getParamAsString_(name, p.default_value.toString());
writeDebug_(String("Value of string(outdir) option '") + name + "': " + tmp, 1);
// create directory if it does not exist
File::makeDir(tmp);
return tmp;
}
double TOPPBase::getDoubleOption_(const String& name) const
{
const ParameterInformation& p = findEntry_(name);
if (p.type != ParameterInformation::DOUBLE)
{
throw WrongParameterType(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, name);
}
if (p.required && getParam_(name).isEmpty())
{
throw RequiredParameterNotGiven(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, name);
}
double tmp = getParamAsDouble_(name, (double)p.default_value);
if (p.required && std::isnan(tmp))
{
throw RequiredParameterNotGiven(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, name);
}
writeDebug_(String("Value of double option '") + name + "': " + String(tmp), 1);
//check if in valid range
if (p.required || (!getParam_(name).isEmpty() && tmp != (double)p.default_value))
{
if (tmp < p.min_float || tmp > p.max_float)
{
throw InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("Invalid value '") + tmp + "' for float parameter '" + name + "' given. Out of valid range: '" + p.min_float + "'-'" + p.max_float + "'.");
}
}
return tmp;
}
Int TOPPBase::getIntOption_(const String& name) const
{
const ParameterInformation& p = findEntry_(name);
if (p.type != ParameterInformation::INT)
{
throw WrongParameterType(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, name);
}
if (p.required && getParam_(name).isEmpty())
{
throw RequiredParameterNotGiven(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, name);
}
Int tmp = getParamAsInt_(name, (Int)p.default_value);
// not checking if NAN here (as done with double, as NAN is not supported for Int)
writeDebug_(String("Value of int option '") + name + "': " + String(tmp), 1);
//check if in valid range
if (p.required || (!getParam_(name).isEmpty() && tmp != (Int)p.default_value))
{
if (tmp < p.min_int || tmp > p.max_int)
{
throw InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("Invalid value '") + tmp + "' for integer parameter '" + name + "' given. Out of valid range: '" + p.min_int + "'-'" + p.max_int + "'.");
}
}
return tmp;
}
void TOPPBase::fileParamValidityCheck_(const StringList& param_value, const String& param_name, const ParameterInformation& p) const
{
// check if all input files are readable
if (p.type == ParameterInformation::INPUT_FILE_LIST)
{
for (String t : param_value)
{
if (!ListUtils::contains(p.tags, "skipexists")) inputFileReadable_(t, param_name);
// check restrictions
if (p.valid_strings.empty()) continue;
// determine file type as string
FileTypes::Type f_type = FileHandler::getType(t);
// unknown ending is 'ok'
if (f_type == FileTypes::UNKNOWN)
{
writeLogWarn_("Warning: Could not determine format of input file '" + t + "'!");
}
else if (!ListUtils::contains(p.valid_strings, FileTypes::typeToName(f_type).toUpper(), ListUtils::CASE::INSENSITIVE))
{
throw InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
String("Input file '" + t + "' has invalid format '") +
FileTypes::typeToName(f_type) +
"'. Valid formats are: '" + ListUtils::concatenate(p.valid_strings, "','") +
"'.");
}
}
}
}
void TOPPBase::fileParamValidityCheck_(String& param_value, const String& param_name, const ParameterInformation& p) const
{
// check if files are readable/writable
if (p.type == ParameterInformation::INPUT_FILE)
{
if (ListUtils::contains(p.tags, "is_executable"))
{ // will update to absolute path
if (File::findExecutable(param_value))
{
writeDebug_("Input file resolved to '" + param_value + "'", 2);
}
else
{
writeLogWarn_("Input file '" + param_value + "' could not be found (by searching on PATH). "
"Either provide a full filepath via the '-" +
param_name + "' option or fix your PATH environment !" +
(p.required ? "" : " Since this file is not strictly required, you might also pass the empty string \"\" as "
"argument to prevent its usage (this might limit the usability of the tool)."));
throw ExternalExecutableNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, param_value);
}
}
if (!ListUtils::contains(p.tags, "skipexists")) inputFileReadable_(param_value, param_name);
}
else if (p.type == ParameterInformation::OUTPUT_FILE)
{
outputFileWritable_(param_value, param_name);
}
else if (p.type == ParameterInformation::OUTPUT_PREFIX)
{
outputFileWritable_(param_value + "_0", param_name); // only test one file
}
// check restrictions
if (p.valid_strings.empty()) return;
switch (p.type)
{
case ParameterInformation::STRING:
if (!ListUtils::contains(p.valid_strings, param_value))
{
throw InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
String("Invalid value '") + param_value + "' for string parameter '" + param_name + "' given. Valid strings are: '" +
ListUtils::concatenate(p.valid_strings, "', '") + "'.");
}
break;
case ParameterInformation::INPUT_FILE:
{
// determine file type as string
FileTypes::Type f_type = FileHandler::getType(param_value);
// unknown ending is 'ok'
if (f_type == FileTypes::UNKNOWN)
{
writeLogWarn_("Warning: Could not determine format of input file '" + param_value + "'!");
}
else if (!ListUtils::contains(p.valid_strings, FileTypes::typeToName(f_type).toUpper(), ListUtils::CASE::INSENSITIVE))
{
throw InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
String("Input file '" + param_value + "' has invalid format '") +
FileTypes::typeToName(f_type) +
"'. Valid formats are: '" + ListUtils::concatenate(p.valid_strings, "','") +
"'.");
}
break;
}
case ParameterInformation::OUTPUT_FILE:
{
// determine file type as string
FileTypes::Type f_type = FileHandler::getTypeByFileName(param_value);
// Wrong ending, unknown is is ok.
if (f_type != FileTypes::UNKNOWN
&& !ListUtils::contains(p.valid_strings, FileTypes::typeToName(f_type).toUpper(), ListUtils::CASE::INSENSITIVE))
{
throw InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
String("Invalid output file extension for file '") + param_value + "'. Valid file extensions are: '" +
ListUtils::concatenate(p.valid_strings, "','") + "'.");
}
break;
}
case ParameterInformation::OUTPUT_PREFIX: /* no file extension check for out prefixes */
break;
default: /*nothing */
break;
}
}
StringList TOPPBase::getStringList_(const String& name) const
{
const ParameterInformation& p = findEntry_(name);
if (p.type != ParameterInformation::STRINGLIST
&& p.type != ParameterInformation::INPUT_FILE_LIST
&& p.type != ParameterInformation::OUTPUT_FILE_LIST)
{
throw WrongParameterType(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, name);
}
if (p.required && getParam_(name).isEmpty())
{
throw RequiredParameterNotGiven(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, name);
}
StringList tmp_list = getParamAsStringList_(name, ListUtils::toStringList<std::string>(p.default_value));
if (p.required && tmp_list.empty())
{
throw RequiredParameterNotGiven(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, name);
}
for (String& tmp : tmp_list)
{
writeDebug_(String("Value of string option '") + name + "': " + tmp, 1);
}
// if required or set by user, do some validity checks
if (p.required || (!getParam_(name).isEmpty() && tmp_list != ListUtils::toStringList<std::string>(p.default_value)))
{
fileParamValidityCheck_(tmp_list, name, p);
}
return tmp_list;
}
DoubleList TOPPBase::getDoubleList_(const String& name) const
{
const ParameterInformation& p = findEntry_(name);
if (p.type != ParameterInformation::DOUBLELIST)
{
throw WrongParameterType(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, name);
}
if (p.required && getParam_(name).isEmpty())
{
throw RequiredParameterNotGiven(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, name);
}
DoubleList tmp_list = getParamAsDoubleList_(name, p.default_value);
if (p.required && tmp_list.empty())
{
throw RequiredParameterNotGiven(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, name);
}
for (DoubleList::iterator it = tmp_list.begin(); it < tmp_list.end(); ++it)
{
double tmp = *it;
writeDebug_(String("Value of string option '") + name + "': " + String(tmp), 1);
//check if in valid range
if (p.required || (!getParam_(name).isEmpty() && tmp_list != p.default_value))
{
if (tmp < p.min_float || tmp > p.max_float)
{
throw InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("Invalid value '") + tmp + "' for float parameter '" + name + "' given. Out of valid range: '" + p.min_float + "'-'" + p.max_float + "'.");
}
}
}
return tmp_list;
}
IntList TOPPBase::getIntList_(const String& name) const
{
const ParameterInformation& p = findEntry_(name);
if (p.type != ParameterInformation::INTLIST)
{
throw WrongParameterType(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, name);
}
if (p.required && getParam_(name).isEmpty())
{
throw RequiredParameterNotGiven(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, name);
}
IntList tmp_list = getParamAsIntList_(name, p.default_value);
if (p.required && tmp_list.empty())
{
throw RequiredParameterNotGiven(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, name);
}
for (const Int tmp : tmp_list)
{
writeDebug_(String("Value of string option '") + name + "': " + String(tmp), 1);
//check if in valid range
if (p.required || (!getParam_(name).isEmpty() && tmp_list != p.default_value))
{
if (tmp < p.min_int || tmp > p.max_int)
{
throw InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("Invalid value '") + tmp + "' for integer parameter '" + name + "' given. Out of valid range: '" + p.min_int + "'-'" + p.max_int + "'.");
}
}
}
return tmp_list;
}
bool TOPPBase::getFlag_(const String& name) const
{
const ParameterInformation& p = findEntry_(name);
if (p.type != ParameterInformation::FLAG)
{
throw WrongParameterType(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, name);
}
bool tmp = getParamAsBool_(name);
writeDebug_(String("Value of string option '") + name + "': " + String(tmp), 1);
return tmp;
}
void TOPPBase::writeLogInfo_(const String& text) const
{
OPENMS_LOG_INFO << text << endl;
enableLogging_();
log_ << QDateTime::currentDateTime().toString("yyyy-MM-dd hh:mm:ss").toStdString() << ' ' << getIniLocation_() << ": " << text << endl;
}
void TOPPBase::writeLogWarn_(const String& text) const
{
OPENMS_LOG_WARN << text << endl;
enableLogging_();
log_ << QDateTime::currentDateTime().toString("yyyy-MM-dd hh:mm:ss").toStdString() << ' ' << getIniLocation_() << ": " << text << endl;
}
void TOPPBase::writeLogError_(const String& text) const
{
OPENMS_LOG_ERROR << text << endl;
enableLogging_();
log_ << QDateTime::currentDateTime().toString("yyyy-MM-dd hh:mm:ss").toStdString() << ' ' << getIniLocation_() << ": " << text << endl;
}
void TOPPBase::writeDebug_(const String& text, UInt min_level) const
{
if (debug_level_ >= (Int)min_level)
{
OPENMS_LOG_DEBUG << text << endl;
enableLogging_();
log_ << QDateTime::currentDateTime().toString("yyyy-MM-dd hh:mm:ss").toStdString() << ' ' << getIniLocation_() << ": " << text << endl;
}
}
void TOPPBase::writeDebug_(const String& text, const Param& param, UInt min_level) const
{
if (debug_level_ >= (Int)min_level)
{
OPENMS_LOG_DEBUG << " - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - " << endl
<< QDateTime::currentDateTime().toString("yyyy-MM-dd hh:mm:ss").toStdString() << ' ' << getIniLocation_() << " " << text << endl
<< param
<< " - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - " << endl;
enableLogging_();
log_ << " - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - " << endl
<< QDateTime::currentDateTime().toString("yyyy-MM-dd hh:mm:ss").toStdString() << ' ' << getIniLocation_() << " " << text << endl
<< param
<< " - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - " << endl;
}
}
TOPPBase::ExitCodes TOPPBase::runExternalProcess_(const QString& executable, const QStringList& arguments, const QString& workdir, const std::map<QString, QString>& env) const
{
String proc_stdout, proc_stderr; // collect all output (might be useful if program crashes, see below)
return runExternalProcess_(executable, arguments, proc_stdout, proc_stderr, workdir, env);
}
TOPPBase::ExitCodes TOPPBase::runExternalProcess_(const QString& executable, const QStringList& arguments, String& proc_stdout, String& proc_stderr, const QString& workdir, const std::map<QString, QString>& env) const
{
proc_stdout.clear();
proc_stderr.clear();
// callbacks: invoked whenever output is available.
auto lam_out = [&](const String& out) { proc_stdout += out; if (debug_level_ >= 4) OPENMS_LOG_INFO << out; };
auto lam_err = [&](const String& out) { proc_stderr += out; if (debug_level_ >= 4) OPENMS_LOG_INFO << out; };
ExternalProcess ep(lam_out, lam_err);
const auto& rt = ep.run(executable, arguments, workdir, true, ExternalProcess::IO_MODE::READ_WRITE, env); // does automatic escaping etc... start
if (debug_level_ < 4 && rt != ExternalProcess::RETURNSTATE::SUCCESS)
{ // error occurred: if not written already in callback, do it now
writeLogError_("Standard output: " + proc_stdout);
writeLogError_("Standard error: " + proc_stderr);
}
switch (rt)
{
case ExternalProcess::RETURNSTATE::SUCCESS:
return EXECUTION_OK;
case ExternalProcess::RETURNSTATE::NONZERO_EXIT:
case ExternalProcess::RETURNSTATE::CRASH:
return EXTERNAL_PROGRAM_ERROR;
case ExternalProcess::RETURNSTATE::FAILED_TO_START:
return EXTERNAL_PROGRAM_NOTFOUND;
default:
throw Exception::InternalToolError(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Unknown return state of external process.");
}
}
String TOPPBase::getParamAsString_(const String& key, const String& default_value) const
{
const ParamValue& tmp = getParam_(key);
if (!tmp.isEmpty())
{
return tmp.toString();
}
else
{
return default_value;
}
}
Int TOPPBase::getParamAsInt_(const String& key, Int default_value) const
{
const ParamValue& tmp = getParam_(key);
if (!tmp.isEmpty())
{
if (tmp.valueType() == ParamValue::INT_VALUE)
{
return (Int)tmp;
}
throw WrongParameterType(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, key);
}
else
{
return default_value;
}
}
double TOPPBase::getParamAsDouble_(const String& key, double default_value) const
{
const ParamValue& tmp = getParam_(key);
if (!tmp.isEmpty())
{
if (tmp.valueType() == ParamValue::DOUBLE_VALUE)
{
return (double)tmp;
}
throw WrongParameterType(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, key);
}
else
{
return default_value;
}
}
StringList TOPPBase::getParamAsStringList_(const String& key, const StringList& default_value) const
{
const ParamValue& tmp = getParam_(key);
if (!tmp.isEmpty())
{
return ListUtils::toStringList<std::string>(tmp);
}
else
{
return default_value;
}
}
IntList TOPPBase::getParamAsIntList_(const String& key, const IntList& default_value) const
{
const ParamValue& tmp = getParam_(key);
if (!tmp.isEmpty())
{
if (tmp.valueType() == ParamValue::INT_LIST)
{
return tmp;
}
throw WrongParameterType(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, key);
}
else
{
return default_value;
}
}
DoubleList TOPPBase::getParamAsDoubleList_(const String& key, const DoubleList& default_value) const
{
const ParamValue& tmp = getParam_(key);
if (!tmp.isEmpty())
{
if (tmp.valueType() == ParamValue::DOUBLE_LIST)
{
return tmp;
}
throw WrongParameterType(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, key);
}
else
{
return default_value;
}
}
bool TOPPBase::getParamAsBool_(const String& key) const
{
ParamValue tmp = getParam_(key);
if (tmp.valueType() == ParamValue::EMPTY_VALUE)
{
return false;
}
else if (tmp.valueType() == ParamValue::STRING_VALUE)
{
if ((std::string)tmp == "false")
{
return false;
}
else if ((std::string)tmp == "true")
{
return true;
}
}
throw InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, std::string("Invalid value '") + (std::string)tmp + "' for flag parameter '" + key + "'. Valid values are 'true' and 'false' only.");
}
ParamValue const& TOPPBase::getParam_(const String& key) const
{
if (param_.exists(key))
{
return param_.getValue(key);
}
else
{
// if look up fails everywhere, return EMPTY
writeDebug_(String("Parameter '") + key + String("' not found."), 1);
return ParamValue::EMPTY;
}
}
Param const& TOPPBase::getParam_() const
{
return param_;
}
String TOPPBase::getSubsection_(const String& name) const
{
size_t pos = name.find_last_of(':');
if (pos == std::string::npos)
return ""; // delimiter not found
return name.substr(0, pos);
}
void TOPPBase::enableLogging_() const
{
if (log_.is_open() || !param_.exists("log")) return;
std::string log_destination = param_.getValue("log");
if (log_destination.empty()) return;
log_.open(log_destination, ofstream::out | ofstream::app);
if (debug_level_ >= 1)
{
cout << "Writing to '" << log_destination << '\'' << "\n";
log_ << QDateTime::currentDateTime().toString("yyyy-MM-dd hh:mm:ss").toStdString() << ' ' << getIniLocation_() << ": " << "Writing to '" << log_destination << '\'' << "\n";
}
}
void TOPPBase::checkParam_(const Param& param, const String& filename, const String& location) const
{
//cout << endl << "--"<< location<< "--" << endl << param << endl << endl;
for (Param::ParamIterator it = param.begin(); it != param.end(); ++it)
{
// subsections (do not check content, but warn if not registered)
String subsection = getSubsection_(it.getName());
if (!subsection.empty() && subsections_TOPP_.count(subsection) == 0) // not found in TOPP subsections
{
// for multi-level subsections, check only the first level:
if (subsections_.count(subsection.substr(0, subsection.find(':'))) == 0) // not found in normal subsections
{
if (!(location == "common::" && subsection == tool_name_))
{
writeLogWarn_("Warning: Unknown subsection '" + subsection + "' in '" + filename + "' (location '" + location + "')!");
}
}
continue;
}
// normal parameter: check its value type
// if no such parameter is registered an exception is thrown
try
{
//check type
switch (findEntry_(it.getName()).type)
{
case ParameterInformation::STRING:
case ParameterInformation::INPUT_FILE:
case ParameterInformation::OUTPUT_FILE:
case ParameterInformation::OUTPUT_PREFIX:
case ParameterInformation::FLAG:
if (it->value.valueType() != ParamValue::STRING_VALUE)
{
writeLogWarn_("Warning: Wrong parameter type of '" + location + it.getName() + "' in '" + filename + "'. Type should be 'string'!");
}
break;
case ParameterInformation::DOUBLE:
if (it->value.valueType() != ParamValue::DOUBLE_VALUE)
{
writeLogWarn_("Warning: Wrong parameter type of '" + location + it.getName() + "' in '" + filename + "'. Type should be 'double'!");
}
break;
case ParameterInformation::INT:
if (it->value.valueType() != ParamValue::INT_VALUE)
{
writeLogWarn_("Warning: Wrong parameter type of '" + location + it.getName() + "' in '" + filename + "'. Type should be 'int'!");
}
break;
case ParameterInformation::STRINGLIST:
case ParameterInformation::INPUT_FILE_LIST:
case ParameterInformation::OUTPUT_FILE_LIST:
if (it->value.valueType() != ParamValue::STRING_LIST)
{
writeLogWarn_("Warning: Wrong parameter type of '" + location + it.getName() + "' in '" + filename + "'. Type should be 'string list'!");
}
break;
case ParameterInformation::INTLIST:
if (it->value.valueType() != ParamValue::INT_LIST)
{
writeLogWarn_("Warning: Wrong parameter type of '" + location + it.getName() + "' in '" + filename + "'. Type should be 'int list'!");
}
break;
case ParameterInformation::DOUBLELIST:
if (it->value.valueType() != ParamValue::DOUBLE_LIST)
{
writeLogWarn_("Warning: Wrong parameter type of '" + location + it.getName() + "' in '" + filename + "'. Type should be 'double list'!");
}
break;
default:
break;
}
}
catch (UnregisteredParameter&)
{
writeLogWarn_("Warning: Unknown parameter '" + location + it.getName() + "' in '" + filename + "'!");
}
}
}
void TOPPBase::checkIfIniParametersAreApplicable_(const Param& ini_params)
{
Param tool_params = ini_params.copy(getIniLocation_());
if (tool_params.empty())
{
// the ini file does not contain a section for our tool -> warn the user
writeLogWarn_(String("Warning: The provided INI file does not contain any parameters specific for this tool (expected in '") + getIniLocation_() +
"'). Please check your .ini file. The default parameters for this tool will be applied.");
}
}
void TOPPBase::inputFileReadable_(const String& filename, const String& param_name) const
{
writeDebug_("Checking input file '" + filename + "'", 2);
// prepare error message
String message;
if (param_name.empty())
message = "Cannot read input file!\n";
else
message = "Cannot read input file given from parameter '-" + param_name + "'!\n";
// check file existence
if (!File::exists(filename))
{
OPENMS_LOG_ERROR << message;
throw FileNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, filename);
}
if (!File::readable(filename))
{
OPENMS_LOG_ERROR << message;
throw FileNotReadable(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, filename);
}
if (!File::isDirectory(filename) && File::empty(filename))
{
OPENMS_LOG_ERROR << message;
throw FileEmpty(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, filename);
}
}
void TOPPBase::outputFileWritable_(const String& filename, const String& param_name) const
{
writeDebug_("Checking output file '" + filename + "'", 2);
// prepare error message
String message;
if (param_name.empty())
message = "Cannot write output file!\n";
else
message = "Cannot write output file given from parameter '-" + param_name + "'!\n";
if (!File::writable(filename))
{
OPENMS_LOG_ERROR << message;
throw UnableToCreateFile(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, filename);
}
}
void TOPPBase::registerSubsection_(const String& name, const String& description)
{
subsections_[name] = description;
}
void TOPPBase::registerTOPPSubsection_(const String& name, const String& description)
{
subsections_TOPP_[name] = description;
}
bool TOPPBase::parseRange_(const String& text, double& low, double& high) const
{
bool any_set = false;
try
{
String tmp = text.prefix(':');
if (!tmp.empty())
{
low = tmp.toDouble();
any_set = true;
}
tmp = text.suffix(':');
if (!tmp.empty())
{
high = tmp.toDouble();
any_set = true;
}
}
catch (Exception::ConversionError&)
{
throw Exception::ConversionError(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Could not convert string '" + text +
"' to a range of floating point values");
}
return any_set;
}
bool TOPPBase::parseRange_(const String& text, Int& low, Int& high) const
{
bool any_set = false;
try
{
String tmp = text.prefix(':');
if (!tmp.empty())
{
low = tmp.toInt();
any_set = true;
}
tmp = text.suffix(':');
if (!tmp.empty())
{
high = tmp.toInt();
any_set = true;
}
}
catch (Exception::ConversionError&)
{
throw Exception::ConversionError(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"Could not convert string '" + text +
"' to a range of integer values");
}
return any_set;
}
Param TOPPBase::getSubsectionDefaults_(const String& /*section*/) const
{
throw NotImplemented(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION);
}
Param TOPPBase::getDefaultParameters_() const
{
Param tmp;
String loc = this->getToolPrefix();
//parameters
for (vector<ParameterInformation>::const_iterator it = parameters_.begin(); it != parameters_.end(); ++it)
{
if (std::unordered_set<std::string>{"ini", "-help", "-helphelp", "instance", "write_ini", "write_ctd", "write_cwl", "write_nested_cwl", "write_json", "write_nested_json"}.count(it->name) > 0) // do not store these params in ini file
{
continue;
}
String name = loc + it->name;
std::vector<std::string> tags;
if (it->advanced)
{
tags.emplace_back("advanced");
}
if (it->required)
{
tags.emplace_back("required");
}
if (it->type == ParameterInformation::INPUT_FILE || it->type == ParameterInformation::INPUT_FILE_LIST)
{
tags.emplace_back(TAG_INPUT_FILE);
}
if (it->type == ParameterInformation::INPUT_FILE && std::find(it->tags.begin(), it->tags.end(), "is_executable") != it->tags.end())
{
tags.emplace_back("is_executable");
}
if (it->type == ParameterInformation::OUTPUT_FILE || it->type == ParameterInformation::OUTPUT_FILE_LIST) { tags.emplace_back(TAG_OUTPUT_FILE); }
if (it->type == ParameterInformation::OUTPUT_PREFIX) { tags.emplace_back(TAG_OUTPUT_PREFIX); }
if (it->type == ParameterInformation::OUTPUT_DIR) { tags.emplace_back(TAG_OUTPUT_DIR); }
switch (it->type)
{
case ParameterInformation::STRING:
tmp.setValue(name, (String)it->default_value.toString(), it->description, tags);
if (!it->valid_strings.empty())
{
tmp.setValidStrings(name, ListUtils::create<std::string>(it->valid_strings));
}
break;
case ParameterInformation::INPUT_FILE:
case ParameterInformation::OUTPUT_FILE:
case ParameterInformation::OUTPUT_PREFIX:
case ParameterInformation::OUTPUT_DIR:
tmp.setValue(name, (String)it->default_value.toString(), it->description, tags);
if (!it->valid_strings.empty())
{
StringList vss_tmp = it->valid_strings;
for (auto& vs : vss_tmp)
{
vs = "*." + vs;
}
tmp.setValidStrings(name, ListUtils::create<std::string>(vss_tmp));
}
break;
case ParameterInformation::DOUBLE:
tmp.setValue(name, it->default_value, it->description, tags);
tmp.setMinFloat(name, it->min_float);
tmp.setMaxFloat(name, it->max_float);
break;
case ParameterInformation::INT:
tmp.setValue(name, (Int)it->default_value, it->description, tags);
tmp.setMinInt(name, it->min_int);
tmp.setMaxInt(name, it->max_int);
break;
case ParameterInformation::FLAG:
tmp.setValue(name, "false", it->description, tags);
tmp.setValidStrings(name, {"true","false"});
break;
case ParameterInformation::INPUT_FILE_LIST:
case ParameterInformation::OUTPUT_FILE_LIST:
tmp.setValue(name, it->default_value, it->description, tags);
if (!it->valid_strings.empty())
{
std::vector<std::string> vss = ListUtils::create<std::string>(it->valid_strings);
std::transform(vss.begin(), vss.end(), vss.begin(), [](const std::string& s) {return "*." + s;});
tmp.setValidStrings(name, vss);
}
break;
case ParameterInformation::STRINGLIST:
tmp.setValue(name, it->default_value, it->description, tags);
if (!it->valid_strings.empty())
{
tmp.setValidStrings(name, ListUtils::create<std::string>(it->valid_strings));
}
break;
case ParameterInformation::INTLIST:
tmp.setValue(name, it->default_value, it->description, tags);
if (it->min_int != -std::numeric_limits<Int>::max())
{
tmp.setMinInt(name, it->min_int);
}
if (it->max_int != std::numeric_limits<Int>::max())
{
tmp.setMaxInt(name, it->max_int);
}
break;
case ParameterInformation::DOUBLELIST:
tmp.setValue(name, it->default_value, it->description, tags);
if (it->min_float != -std::numeric_limits<double>::max())
{
tmp.setMinFloat(name, it->min_float);
}
if (it->max_float != std::numeric_limits<double>::max())
{
tmp.setMaxFloat(name, it->max_float);
}
break;
default:
break;
}
}
//subsections intrinsic to TOPP tool (i.e. a command line param with a ':')
for (map<String, String>::const_iterator it = subsections_TOPP_.begin(); it != subsections_TOPP_.end(); ++it)
{
tmp.setSectionDescription(loc + it->first, it->second);
}
// set tool version
tmp.setValue(tool_name_ + ":version", version_, "Version of the tool that generated this parameters file.", {"advanced"});
// Descriptions
tmp.setSectionDescription(tool_name_, tool_description_);
tmp.setSectionDescription(tool_name_ + ":" + String(instance_number_), String("Instance '") + String(instance_number_) + "' section for '" + tool_name_ + "'");
// add type (as default type is "", but .ini file should have it)
if (param_cmdline_.exists("type"))
tmp.setValue(loc + "type", param_cmdline_.getValue("type"));
// Subsections
Param sub_sections = getSubsectionDefaults_();
if (!sub_sections.empty())
{
tmp.insert(loc, sub_sections);
}
// 2nd stage, use TOPP tool defaults from home (if existing)
Param tool_user_defaults(getToolUserDefaults_(tool_name_));
tmp.update(tool_user_defaults);
// 3rd stage, use OpenMS.ini from library to override settings
// -> currently disabled as we cannot write back those values to the params
return tmp;
}
Param TOPPBase::getSubsectionDefaults_() const
{
Param tmp;
// Subsections
for (map<String, String>::const_iterator it = subsections_.begin(); it != subsections_.end(); ++it)
{
Param tmp2 = getSubsectionDefaults_(it->first);
if (!tmp2.empty())
{
tmp.insert(it->first + ":", tmp2);
tmp.setSectionDescription(it->first, it->second);
}
}
return tmp;
}
Param TOPPBase::getToolUserDefaults_(const String& tool_name) const
{
Param p;
String ini_name(File::getUserDirectory() + "/" + tool_name + ".ini");
if (File::readable(ini_name))
{
ParamXMLFile paramFile;
paramFile.load(ini_name, p);
}
return p;
}
const String& TOPPBase::toolName_() const
{
return tool_name_;
}
DataProcessing TOPPBase::getProcessingInfo_(DataProcessing::ProcessingAction action) const
{
std::set<DataProcessing::ProcessingAction> actions;
actions.insert(action);
return getProcessingInfo_(actions);
}
DataProcessing TOPPBase::getProcessingInfo_(const std::set<DataProcessing::ProcessingAction>& actions) const
{
DataProcessing p;
//actions
p.setProcessingActions(actions);
//software
p.getSoftware().setName(tool_name_);
if (test_mode_)
{
//version
p.getSoftware().setVersion("version_string");
//time
DateTime date_time;
date_time.set("1999-12-31 23:59:59");
p.setCompletionTime(date_time);
//parameters
p.setMetaValue("parameter: mode", "test_mode");
}
else
{
//version
p.getSoftware().setVersion(version_);
//time
p.setCompletionTime(DateTime::now());
//parameters
const Param& param = getParam_();
for (Param::ParamIterator it = param.begin(); it != param.end(); ++it)
{
p.setMetaValue(String("parameter: " + it.getName()), it->value);
}
}
return p;
}
void TOPPBase::addDataProcessing_(ConsensusMap& map, const DataProcessing& dp) const
{
map.getDataProcessing().push_back(dp);
//remove absolute map paths
if (test_mode_)
{
for (Size d = 0; d < map.getColumnHeaders().size(); ++d)
{
map.getColumnHeaders()[d].filename = File::basename(map.getColumnHeaders()[d].filename);
}
}
}
void TOPPBase::addDataProcessing_(FeatureMap& map, const DataProcessing& dp) const
{
map.getDataProcessing().push_back(dp);
}
///Data processing setter for peak maps
void TOPPBase::addDataProcessing_(PeakMap& map, const DataProcessing& dp) const
{
std::shared_ptr< DataProcessing > dp_(new DataProcessing(dp));
for (Size i = 0; i < map.size(); ++i)
{
map[i].getDataProcessing().push_back(dp_);
}
for (Size i = 0; i < map.getNrChromatograms(); ++i)
{
map.getChromatogram(i).getDataProcessing().push_back(dp_);
}
}
String TOPPBase::getDocumentationURL() const
{
VersionInfo::VersionDetails ver = VersionInfo::getVersionStruct();
String tool_prefix = "TOPP_";
// it is only empty if the GIT_BRANCH inferred or set during CMake config was release/* or master
// see https://github.com/OpenMS/OpenMS/blob/develop/CMakeLists.txt#L122
if (ver.pre_release_identifier.empty())
{
String release_version = String(ver.version_major) + "." + String(ver.version_minor) + "." + String(ver.version_patch);
return String("http://www.openms.de/doxygen/release/") + release_version + "/html/"+ tool_prefix + tool_name_ + ".html";
}
else
{
return String("http://www.openms.de/doxygen/nightly/html/") + tool_prefix + tool_name_ + ".html";
}
}
template <typename Writer>
void TOPPBase::writeToolDescription_(Writer& writer, std::string write_type, std::string fileExtension)
{
//store ini-file content in ini_file_str
QString out_dir_str = String(param_cmdline_.getValue(write_type).toString()).toQString();
if (out_dir_str == "")
{
out_dir_str = QDir::currentPath();
}
StringList type_list = ToolHandler::getTypes(tool_name_);
if (type_list.empty())
type_list.push_back(""); // no type for most tools (except GenericWrapper)
for (Size i = 0; i < type_list.size(); ++i)
{
// check file is writable
QString write_file = out_dir_str + QDir::separator() + tool_name_.toQString() + type_list[i].toQString() + fileExtension.c_str();
outputFileWritable_(write_file, write_type);
// set type on command line, so that getDefaultParameters_() does not fail (as it calls getSubSectionDefaults() of tool)
if (!type_list[i].empty())
param_cmdline_.setValue("type", type_list[i]);
Param default_params = getDefaultParameters_();
// add type to ini file
if (!type_list[i].empty())
default_params.setValue(this->ini_location_ + "type", type_list[i]);
std::stringstream ss;
// fill program category and docurl
std::string docurl = getDocumentationURL();
std::string category;
if (official_)
{ // we can only get the docurl/category from registered/official tools
category = ToolHandler::getCategory(tool_name_);
}
// collect citation information
std::vector<std::string> citation_dois;
citation_dois.reserve(citations_.size() + 1);
citation_dois.push_back(cite_openms.doi);
for (auto& citation : citations_)
{
citation_dois.push_back(citation.doi);
}
// fill tool information
ToolInfo toolInfo{};
toolInfo.version_ = version_;
toolInfo.name_ = tool_name_;
toolInfo.docurl_ = docurl;
toolInfo.category_ = category;
toolInfo.description_ = tool_description_;
toolInfo.citations_ = citation_dois;
// this will write the actual data to disk
writer.store(write_file.toStdString(), default_params, toolInfo);
}
}
Param TOPPBase::parseCommandLine_(const int argc, const char** argv, const String& misc, const String& unknown)
{
Param cmd_params;
// current state:
// 'parameters_' contains all commandline params which were registered using 'registerOptionsAndFlags_()' + the common ones (-write_ini etc)
// .. they are empty/default at this point
// We now fetch the (so-far unknown) subsection parameters (since they can be addressed on command line as well)
// special case of GenericWrapper: since we need the subSectionDefaults before pushing the cmd arguments in there
// but the 'type' is empty currently,
// we extract and set it beforehand
StringList sl_args = StringList(argv, argv + argc);
StringList::iterator it_type = std::find(sl_args.begin(), sl_args.end(), "-type");
if (it_type != sl_args.end())
{ // found it
++it_type; // advance to next argument -- this should be the value of -type
if (it_type != sl_args.end()) param_.setValue("type", *it_type);
}
// prepare map of parameters:
typedef map<String, vector<ParameterInformation>::const_iterator> ParamMap;
ParamMap param_map;
for (vector<ParameterInformation>::const_iterator it = parameters_.begin(); it != parameters_.end(); ++it)
{
param_map["-" + it->name] = it;
}
vector<ParameterInformation> subsection_param;
try
{
// the parameters from the subsections
subsection_param = paramToParameterInformation_(getSubsectionDefaults_());
for (vector<ParameterInformation>::const_iterator it = subsection_param.begin(); it != subsection_param.end(); ++it)
{
param_map["-" + it->name] = it;
}
}
catch (BaseException& e)
{ // this only happens for GenericWrapper, if 'type' is not given or invalid (then we do not have subsection params) -- enough to issue a warning
writeLogWarn_(String("Warning: Unable to fetch subsection parameters! Addressing subsection parameters will not work for this tool (did you forget to specify '-type'?)."));
writeDebug_(String("Error occurred in line ") + e.getLine() + " of file " + e.getFile() + " (in function: " + e.getFunction() + ")!", 1);
}
// list to store "misc"/"unknown" items:
map<std::string, std::vector<std::string> > misc_unknown;
list<String> queue; // queue for arguments
// we parse the arguments in reverse order, so that we have arguments already when we encounter the option that uses them!
for (int i = argc - 1; i > 0; --i)
{
String arg = argv[i];
// options start with "-" or "--" followed by a letter:
bool is_option = (arg.size() >= 2) && (arg[0] == '-') && (isalpha(arg[1]) || ((arg[1] == '-') && (arg.size() >= 3) && isalpha(arg[2])));
if (is_option) // process content of the queue
{
ParamMap::iterator pos = param_map.find(arg);
if (pos != param_map.end()) // parameter is defined
{
ParamValue value;
if (pos->second->type == ParameterInformation::FLAG) // flag
{
value = "true";
}
else // option with argument(s)
{
switch (pos->second->type)
{
case ParameterInformation::STRING:
case ParameterInformation::INPUT_FILE:
case ParameterInformation::OUTPUT_FILE:
case ParameterInformation::OUTPUT_PREFIX:
case ParameterInformation::OUTPUT_DIR:
if (queue.empty())
value = std::string();
else
value = queue.front();
break;
case ParameterInformation::INT:
if (!queue.empty())
value = queue.front().toInt();
break;
case ParameterInformation::DOUBLE:
if (!queue.empty())
value = queue.front().toDouble();
break;
case ParameterInformation::INPUT_FILE_LIST:
case ParameterInformation::OUTPUT_FILE_LIST:
case ParameterInformation::STRINGLIST:
{
vector<std::string> arg_list(queue.begin(), queue.end());
value = arg_list;
queue.clear();
break;
}
case ParameterInformation::INTLIST:
{
IntList arg_list;
for (list<String>::iterator it = queue.begin(); it != queue.end(); ++it)
{
arg_list.push_back(it->toInt());
}
value = arg_list;
queue.clear();
break;
}
case ParameterInformation::DOUBLELIST:
{
DoubleList arg_list;
for (list<String>::iterator it = queue.begin(); it != queue.end(); ++it)
{
arg_list.push_back(it->toDouble());
}
value = arg_list;
queue.clear();
break;
}
default:
break;
}
if (!queue.empty())
queue.pop_front(); // argument was already used
}
OPENMS_LOG_DEBUG << "Command line: setting parameter value: '" << pos->second->name << "' to '" << value << "'" << std::endl;
cmd_params.setValue(pos->second->name, value);
}
else // unknown argument -> append to "unknown" list
{
misc_unknown[unknown].push_back(arg);
}
// rest of the queue is just text -> insert into "misc" list:
std::vector<std::string>& misc_list = misc_unknown[misc];
misc_list.insert(misc_list.begin(), queue.begin(), queue.end());
queue.clear();
}
else // more arguments
{
queue.push_front(arg); // order in the queue is not reversed!
}
}
// remaining items in the queue are leading text arguments:
std::vector<std::string>& misc_list = misc_unknown[misc];
misc_list.insert(misc_list.begin(), queue.begin(), queue.end());
// store "misc"/"unknown" items, if there were any:
for (map<std::string, std::vector<std::string> >::iterator it = misc_unknown.begin();
it != misc_unknown.end(); ++it)
{
if (it->second.empty())
continue;
if (!cmd_params.exists(it->first))
{
cmd_params.setValue(it->first, it->second);
}
else
{
std::vector<std::string> new_value = cmd_params.getValue(it->first);
new_value.insert(new_value.end(), it->second.begin(), it->second.end());
cmd_params.setValue(it->first, new_value);
}
}
return cmd_params;
}
} // namespace OpenMS
| C++ |
3D | OpenMS/OpenMS | src/openms/source/PROCESSING/FEATURE/FeatureOverlapFilter.cpp | .cpp | 18,749 | 527 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Julianus Pfeuffer $
// $Authors: Julianus Pfeuffer $
// --------------------------------------------------------------------------
#include <OpenMS/PROCESSING/FEATURE/FeatureOverlapFilter.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <Quadtree.h>
#include <Box.h>
#include <unordered_set>
#include <cmath>
namespace OpenMS
{
/// Boundaries for a mass trace in a feature
struct MassTraceBounds
{
Size sub_index;
double rt_min, rt_max, mz_min, mz_max;
};
/// Boundaries for all mass traces per feature
using FeatureBoundsMap = std::map<UInt64, std::vector<MassTraceBounds>>;
/// Get bounding boxes for all mass traces in all features of a feature map
FeatureBoundsMap getFeatureBounds(const FeatureMap& features)
{
FeatureBoundsMap feature_bounds;
for (const auto& feat : features)
{
for (Size i = 0; i < feat.getSubordinates().size(); ++i)
{
MassTraceBounds mtb;
mtb.sub_index = i;
const ConvexHull2D::PointArrayType& points =
feat.getConvexHulls()[i].getHullPoints();
mtb.mz_min = points.front().getY();
mtb.mz_max = points.back().getY();
const Feature& sub = feat.getSubordinates()[i];
// convex hulls should be written out by "MRMFeatureFinderScoring" (see
// parameter "write_convex_hull"):
if (sub.getConvexHulls().empty())
{
String error = "convex hulls for mass traces missing";
throw Exception::MissingInformation(__FILE__, __LINE__,
OPENMS_PRETTY_FUNCTION, error);
}
const ConvexHull2D& hull = sub.getConvexHulls()[0];
// find beginning of mass trace (non-zero intensity):
if (hull.getHullPoints().empty())
{
continue;
}
double rt_min = hull.getHullPoints().back().getX();
for (auto p_it = hull.getHullPoints().begin(); p_it != hull.getHullPoints().end(); ++p_it)
{
if (p_it->getY() > 0)
{
rt_min = p_it->getX();
break;
}
}
// find end of mass trace (non-zero intensity):
double rt_max = hull.getHullPoints().front().getX();
for (auto p_it =
hull.getHullPoints().rbegin(); p_it !=
hull.getHullPoints().rend(); ++p_it)
{
if (p_it->getX() < rt_min)
{
break;
}
if (p_it->getY() > 0)
{
rt_max = p_it->getX();
break;
}
}
if (rt_min > rt_max)
{
continue; // no peak -> skip
}
mtb.rt_min = rt_min;
mtb.rt_max = rt_max;
feature_bounds[feat.getUniqueId()].push_back(std::move(mtb));
}
}
return feature_bounds;
}
/// Check if two sets of mass trace boundaries overlap
bool hasOverlappingBounds(const std::vector<MassTraceBounds>& mtb1, const std::vector<MassTraceBounds>& mtb2)
{
for (const MassTraceBounds& mt1 : mtb1)
{
for (const MassTraceBounds& mt2 : mtb2)
{
if (!((mt1.rt_max < mt2.rt_min) ||
(mt1.rt_min > mt2.rt_max) ||
(mt1.mz_max < mt2.mz_min) ||
(mt1.mz_min > mt2.mz_max)))
{
return true;
}
}
}
return false;
}
bool tracesOverlap(const Feature& a, const Feature& b, const FeatureBoundsMap& feature_bounds)
{
auto fbm_it1 = feature_bounds.find(a.getUniqueId());
auto fbm_it2 = feature_bounds.find(b.getUniqueId());
return hasOverlappingBounds(fbm_it1->second, fbm_it2->second);
}
void FeatureOverlapFilter::filter(FeatureMap& fmap,
std::function<bool(const Feature&, const Feature&)> FeatureComparator,
std::function<bool(Feature&, Feature&)> FeatureOverlapCallback,
bool check_overlap_at_trace_level)
{
// Delegate to the new overload with appropriate mode
FeatureOverlapMode mode = check_overlap_at_trace_level ? FeatureOverlapMode::TRACE_LEVEL : FeatureOverlapMode::CONVEX_HULL;
CentroidTolerances tolerances; // Use default values
filter(fmap, FeatureComparator, FeatureOverlapCallback, mode, tolerances);
}
void FeatureOverlapFilter::filter(FeatureMap& fmap,
std::function<bool(const Feature&, const Feature&)> FeatureComparator,
std::function<bool(Feature&, Feature&)> FeatureOverlapCallback,
FeatureOverlapMode mode,
const CentroidTolerances& tolerances)
{
fmap.updateRanges();
// Sort all features according to the comparator. After the sort, the "smallest" == best feature will be the first entry we will start processing with...
std::stable_sort(fmap.begin(), fmap.end(), FeatureComparator);
// Define getBox function based on mode
std::function<quadtree::Box<float>(const Feature*)> getBox;
if (mode == FeatureOverlapMode::CENTROID_BASED)
{
// For centroid-based mode, create tolerance boxes around centroids
getBox = [&tolerances](const Feature* f)
{
float rt = f->getRT();
float mz = f->getMZ();
return quadtree::Box<float>(
mz - tolerances.mz_tolerance,
rt - tolerances.rt_tolerance,
2 * tolerances.mz_tolerance,
2 * tolerances.rt_tolerance
);
};
}
else
{
// For convex hull/trace modes, use full convex hull bounding boxes
getBox = [](const Feature* f)
{
const auto& bb = f->getConvexHull().getBoundingBox();
return quadtree::Box<float>(bb.minY(), bb.minX(), bb.maxY()-bb.minY(), bb.maxX()-bb.minX());
};
}
float minMZ = fmap.getMinMZ();
float maxMZ = fmap.getMaxMZ();
float minRT = fmap.getMinRT();
float maxRT = fmap.getMaxRT();
// Expand boundaries for centroid mode to accommodate tolerance boxes
if (mode == FeatureOverlapMode::CENTROID_BASED)
{
minMZ -= tolerances.mz_tolerance;
maxMZ += tolerances.mz_tolerance;
minRT -= tolerances.rt_tolerance;
maxRT += tolerances.rt_tolerance;
}
// Build quadtree with all features
quadtree::Box<float> fullExp(minMZ-1, minRT-1, maxMZ-minMZ+2, maxRT-minRT+2);
auto quadtree = quadtree::Quadtree<Feature*, decltype(getBox)>(fullExp, getBox);
for (auto& f : fmap)
{
quadtree.add(&f);
}
// If we check for overlapping traces we need a faster lookup structure
FeatureBoundsMap fbm;
if (mode == FeatureOverlapMode::TRACE_LEVEL)
{
fbm = getFeatureBounds(fmap);
}
std::unordered_set<Size> removed_uids;
for (auto& f : fmap)
{
if (removed_uids.count(f.getUniqueId()) == 0)
{
for (auto& overlap : quadtree.query(getBox(&f)))
{
if ((overlap != &f))
{
bool is_true_overlap = true;
if (mode == FeatureOverlapMode::CENTROID_BASED)
{
// Check charge requirement
if (tolerances.require_same_charge && f.getCharge() != overlap->getCharge())
{
is_true_overlap = false;
}
// Check FAIMS CV requirement
else if (tolerances.require_same_im)
{
bool f_has_im = f.metaValueExists(Constants::UserParam::FAIMS_CV);
bool overlap_has_im = overlap->metaValueExists(Constants::UserParam::FAIMS_CV);
if (f_has_im != overlap_has_im)
{
// One has FAIMS CV, the other doesn't - not same group
is_true_overlap = false;
}
else if (f_has_im && overlap_has_im)
{
// Both have FAIMS CV - must match
double f_cv = f.getMetaValue(Constants::UserParam::FAIMS_CV);
double overlap_cv = overlap->getMetaValue(Constants::UserParam::FAIMS_CV);
if (f_cv != overlap_cv)
{
is_true_overlap = false;
}
}
// else: both don't have FAIMS CV - same group, continue to distance check
if (is_true_overlap)
{
// Check exact centroid distances within tolerance
double rt_diff = std::abs(f.getRT() - overlap->getRT());
double mz_diff = std::abs(f.getMZ() - overlap->getMZ());
is_true_overlap = (rt_diff <= tolerances.rt_tolerance && mz_diff <= tolerances.mz_tolerance);
}
}
else
{
// Check exact centroid distances within tolerance
double rt_diff = std::abs(f.getRT() - overlap->getRT());
double mz_diff = std::abs(f.getMZ() - overlap->getMZ());
is_true_overlap = (rt_diff <= tolerances.rt_tolerance && mz_diff <= tolerances.mz_tolerance);
}
}
else if (mode == FeatureOverlapMode::TRACE_LEVEL)
{
is_true_overlap = tracesOverlap(f, *overlap, fbm);
}
// For CONVEX_HULL mode, is_true_overlap remains true (quadtree query already handles overlap)
if (is_true_overlap)
{
// callback allows to e.g., transfer information from the to-be-removed feature to the representative feature
// if the callback returns false, overlap will not be removed (at least not because of an overlap with f)
if (FeatureOverlapCallback(f, *overlap))
{
removed_uids.insert(overlap->getUniqueId());
}
}
}
}
}
}
const auto filtered = [&removed_uids](const Feature& f)
{
return removed_uids.count(f.getUniqueId()) == 1;
};
fmap.erase(std::remove_if(fmap.begin(), fmap.end(), filtered), fmap.end());
}
std::function<bool(Feature&, Feature&)> FeatureOverlapFilter::createFAIMSMergeCallback(
MergeIntensityMode intensity_mode,
bool write_meta_values)
{
return [intensity_mode, write_meta_values](Feature& best_in_cluster, Feature& f) -> bool
{
double best_intensity = best_in_cluster.getIntensity();
double f_intensity = f.getIntensity();
if (write_meta_values)
{
// Collect centroid RT positions
std::vector<double> merged_rts;
if (best_in_cluster.metaValueExists("merged_centroid_rts"))
{
merged_rts = best_in_cluster.getMetaValue("merged_centroid_rts");
}
else
{
merged_rts.push_back(best_in_cluster.getRT());
}
merged_rts.push_back(f.getRT());
best_in_cluster.setMetaValue("merged_centroid_rts", merged_rts);
// Collect centroid m/z positions
std::vector<double> merged_mzs;
if (best_in_cluster.metaValueExists("merged_centroid_mzs"))
{
merged_mzs = best_in_cluster.getMetaValue("merged_centroid_mzs");
}
else
{
merged_mzs.push_back(best_in_cluster.getMZ());
}
merged_mzs.push_back(f.getMZ());
best_in_cluster.setMetaValue("merged_centroid_mzs", merged_mzs);
// Collect FAIMS CV values (only if present on features)
std::vector<double> merged_ims;
if (best_in_cluster.metaValueExists("merged_centroid_IMs"))
{
merged_ims = best_in_cluster.getMetaValue("merged_centroid_IMs");
}
else if (best_in_cluster.metaValueExists(Constants::UserParam::FAIMS_CV))
{
merged_ims.push_back(best_in_cluster.getMetaValue(Constants::UserParam::FAIMS_CV));
best_in_cluster.removeMetaValue(Constants::UserParam::FAIMS_CV);
}
if (f.metaValueExists(Constants::UserParam::FAIMS_CV))
{
merged_ims.push_back(f.getMetaValue(Constants::UserParam::FAIMS_CV));
}
if (!merged_ims.empty())
{
best_in_cluster.setMetaValue("merged_centroid_IMs", merged_ims);
best_in_cluster.setMetaValue("FAIMS_merge_count", static_cast<int>(merged_ims.size()));
}
}
// Combine intensities according to mode
double new_intensity = best_intensity + f_intensity; // default: SUM
if (intensity_mode == MergeIntensityMode::MAX)
{
new_intensity = std::max(best_intensity, f_intensity);
}
best_in_cluster.setIntensity(new_intensity);
return true; // Remove the overlapping feature
};
}
void FeatureOverlapFilter::mergeOverlappingFeatures(FeatureMap& feature_map,
double max_rt_diff,
double max_mz_diff,
bool require_same_charge,
bool require_same_im,
MergeIntensityMode intensity_mode,
bool write_meta_values)
{
CentroidTolerances tolerances;
tolerances.rt_tolerance = max_rt_diff;
tolerances.mz_tolerance = max_mz_diff;
tolerances.require_same_charge = require_same_charge;
tolerances.require_same_im = require_same_im;
// Use intensity-based comparator (higher intensity = better = "smaller" in sort order)
auto intensity_comparator = [](const Feature& left, const Feature& right)
{
return left.getIntensity() > right.getIntensity();
};
filter(feature_map,
intensity_comparator,
createFAIMSMergeCallback(intensity_mode, write_meta_values),
FeatureOverlapMode::CENTROID_BASED,
tolerances);
}
void FeatureOverlapFilter::mergeFAIMSFeatures(FeatureMap& feature_map,
double max_rt_diff,
double max_mz_diff)
{
// Check if any features have FAIMS_CV - if not, nothing to do
bool has_faims_features = false;
for (const auto& f : feature_map)
{
if (f.metaValueExists(Constants::UserParam::FAIMS_CV))
{
has_faims_features = true;
break;
}
}
if (!has_faims_features)
{
return; // No FAIMS features, nothing to merge
}
// Separate features into FAIMS and non-FAIMS groups
FeatureMap faims_features;
FeatureMap non_faims_features;
for (auto& f : feature_map)
{
if (f.metaValueExists(Constants::UserParam::FAIMS_CV))
{
faims_features.push_back(std::move(f));
}
else
{
non_faims_features.push_back(std::move(f));
}
}
// Only merge the FAIMS features if we have more than one
if (faims_features.size() > 1)
{
CentroidTolerances tolerances;
tolerances.rt_tolerance = max_rt_diff;
tolerances.mz_tolerance = max_mz_diff;
tolerances.require_same_charge = true;
tolerances.require_same_im = false; // We handle IM check in callback
// Custom callback that only merges features with DIFFERENT FAIMS CV values
auto merge_callback = [](Feature& best_in_cluster, Feature& f) -> bool
{
// After a merge, FAIMS_CV is removed and replaced with merged_centroid_IMs.
// If the best feature was already merged, skip (can't merge again without CV).
if (!best_in_cluster.metaValueExists(Constants::UserParam::FAIMS_CV) ||
!f.metaValueExists(Constants::UserParam::FAIMS_CV))
{
return false;
}
// Only merge if FAIMS CVs are DIFFERENT
// (same CV features should not be merged - they are different analytes)
double best_cv = best_in_cluster.getMetaValue(Constants::UserParam::FAIMS_CV);
double f_cv = f.getMetaValue(Constants::UserParam::FAIMS_CV);
if (best_cv == f_cv)
{
return false; // Don't merge features with same CV
}
// Merge features with different CVs - sum intensities
double best_intensity = best_in_cluster.getIntensity();
double f_intensity = f.getIntensity();
// Collect centroid RT positions
std::vector<double> merged_rts;
if (best_in_cluster.metaValueExists("merged_centroid_rts"))
{
merged_rts = best_in_cluster.getMetaValue("merged_centroid_rts");
}
else
{
merged_rts.push_back(best_in_cluster.getRT());
}
merged_rts.push_back(f.getRT());
best_in_cluster.setMetaValue("merged_centroid_rts", merged_rts);
// Collect centroid m/z positions
std::vector<double> merged_mzs;
if (best_in_cluster.metaValueExists("merged_centroid_mzs"))
{
merged_mzs = best_in_cluster.getMetaValue("merged_centroid_mzs");
}
else
{
merged_mzs.push_back(best_in_cluster.getMZ());
}
merged_mzs.push_back(f.getMZ());
best_in_cluster.setMetaValue("merged_centroid_mzs", merged_mzs);
// Collect FAIMS CV values
std::vector<double> merged_ims;
if (best_in_cluster.metaValueExists("merged_centroid_IMs"))
{
merged_ims = best_in_cluster.getMetaValue("merged_centroid_IMs");
}
else
{
merged_ims.push_back(best_cv);
best_in_cluster.removeMetaValue(Constants::UserParam::FAIMS_CV);
}
merged_ims.push_back(f_cv);
best_in_cluster.setMetaValue("merged_centroid_IMs", merged_ims);
best_in_cluster.setMetaValue("FAIMS_merge_count", static_cast<int>(merged_ims.size()));
// Sum intensities
best_in_cluster.setIntensity(best_intensity + f_intensity);
return true; // Remove the merged feature
};
// Use intensity-based comparator
auto intensity_comparator = [](const Feature& left, const Feature& right)
{
return left.getIntensity() > right.getIntensity();
};
filter(faims_features,
intensity_comparator,
merge_callback,
FeatureOverlapMode::CENTROID_BASED,
tolerances);
}
// Combine back: merged FAIMS features + untouched non-FAIMS features
feature_map.clear();
for (auto& f : faims_features)
{
feature_map.push_back(std::move(f));
}
for (auto& f : non_faims_features)
{
feature_map.push_back(std::move(f));
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/PROCESSING/CALIBRATION/PrecursorCorrection.cpp | .cpp | 17,196 | 430 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Timo Sachsenberg $
// --------------------------------------------------------------------------
//
#include <OpenMS/PROCESSING/CALIBRATION/PrecursorCorrection.h>
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/METADATA/Precursor.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <iomanip>
#include <fstream>
using namespace std;
using namespace OpenMS;
namespace OpenMS
{
const std::string PrecursorCorrection::csv_header = "RT,uncorrectedMZ,correctedMZ,deltaMZ";
void PrecursorCorrection::getPrecursors(const MSExperiment & exp,
vector<Precursor> & precursors,
vector<double> & precursors_rt,
vector<Size> & precursor_scan_index)
{
for (Size i = 0; i != exp.size(); ++i)
{
const vector<Precursor>& pcs = exp[i].getPrecursors();
if (pcs.empty()) { continue; }
vector<double> pcs_rt(pcs.size(), exp[i].getRT());
copy(pcs.begin(), pcs.end(), back_inserter(precursors));
copy(pcs_rt.begin(), pcs_rt.end(), back_inserter(precursors_rt));
precursor_scan_index.push_back(i);
}
}
void PrecursorCorrection::writeHist(const String& out_csv,
const vector<double> & delta_mzs,
const vector<double> & mzs,
const vector<double> & rts)
{
//cout << "writing data" << endl;
ofstream csv_file(out_csv.c_str());
csv_file << setprecision(9);
// header
csv_file << ListUtils::concatenate(ListUtils::create<String>(PrecursorCorrection::csv_header), "\t") << "\n";
// entries
for (vector<double>::const_iterator it = delta_mzs.begin(); it != delta_mzs.end(); ++it)
{
UInt index = it - delta_mzs.begin();
csv_file << rts[index] << "\t" << mzs[index] << "\t" << mzs[index] + *it << "\t" << *it << "\n";
}
csv_file.close();
}
set<Size> PrecursorCorrection::correctToNearestMS1Peak(MSExperiment & exp,
double mz_tolerance,
bool ppm,
vector<double> & delta_mzs,
vector<double> & mzs,
vector<double> & rts)
{
set<Size> corrected_precursors;
// load experiment and extract precursors
vector<Precursor> precursors; // precursor
vector<double> precursors_rt; // RT of precursor MS2 spectrum
vector<Size> precursor_scan_index;
getPrecursors(exp, precursors, precursors_rt, precursor_scan_index);
for (Size i = 0; i != precursors_rt.size(); ++i)
{
// get precursor rt
double rt = precursors_rt[i];
// get precursor MZ
double mz = precursors[i].getMZ();
//cout << rt << " " << mz << endl;
// get precursor spectrum
MSExperiment::ConstIterator rt_it = exp.RTBegin(rt - 1e-8);
// store index of MS2 spectrum
UInt precursor_spectrum_idx = rt_it - exp.begin();
// get parent (MS1) of precursor spectrum
rt_it = exp.getPrecursorSpectrum(rt_it);
if (rt_it == exp.end()
|| rt_it->getMSLevel() != 1)
{
OPENMS_LOG_WARN << "Warning: no MS1 spectrum for this precursor" << endl;
continue;
}
//cout << rt_it->getRT() << " " << rt_it->size() << endl;
// find peak (index) closest to expected position
Size nearest_peak_idx = rt_it->findNearest(mz);
// get actual position of closest peak
double nearest_peak_mz = (*rt_it)[nearest_peak_idx].getMZ();
// calculate error between expected and actual position
double nearestPeakError = ppm ? abs(nearest_peak_mz - mz)/mz * 1e6 : abs(nearest_peak_mz - mz);
// check if error is small enough
if (nearestPeakError < mz_tolerance)
{
// sanity check: do we really have the same precursor in the original and the picked spectrum
if (fabs(exp[precursor_spectrum_idx].getPrecursors()[0].getMZ() - mz) > 0.0001)
{
OPENMS_LOG_WARN << "Error: index is referencing different precursors in original and picked spectrum." << endl;
}
// cout << mz << " -> " << nearest_peak_mz << endl;
double delta_mz = nearest_peak_mz - mz;
delta_mzs.push_back(delta_mz);
mzs.push_back(mz);
rts.push_back(rt);
// correct entries
Precursor corrected_prec = precursors[i];
corrected_prec.setMZ(nearest_peak_mz);
exp[precursor_spectrum_idx].getPrecursors()[0] = corrected_prec;
corrected_precursors.insert(precursor_spectrum_idx);
}
}
return corrected_precursors;
}
//Selection of the peak with the highest intensity as corrected precursor mass in a given mass range (e.g. precursor mass +/- 0.2 Da)
set<Size> PrecursorCorrection::correctToHighestIntensityMS1Peak(MSExperiment & exp,
double mz_tolerance,
bool ppm,
vector<double> & delta_mzs,
vector<double> & mzs,
vector<double> & rts)
{
set<Size> corrected_precursors;
// load experiment and extract precursors
vector<Precursor> precursors; // precursor
vector<double> precursors_rt; // RT of precursor MS2 spectrum
vector<Size> precursor_scan_index;
getPrecursors(exp, precursors, precursors_rt, precursor_scan_index);
int count_error_highest_intenstiy = 0;
for (Size i = 0; i != precursors_rt.size(); ++i)
{
double rt = precursors_rt[i]; // get precursor rt
double mz = precursors[i].getMZ(); // get precursor MZ
// retrieves iterator of the MS2 fragment spectrum
MSExperiment::ConstIterator rt_it = exp.RTBegin(rt - 1e-8);
// store index of MS2 spectrum
UInt precursor_spectrum_idx = rt_it - exp.begin();
// get parent (MS1) of precursor spectrum
rt_it = exp.getPrecursorSpectrum(rt_it);
if (rt_it == exp.end()
|| rt_it->getMSLevel() != 1)
{
OPENMS_LOG_WARN << "Warning: no MS1 spectrum for this precursor" << endl;
continue;
}
// get tolerance window and index of highest peak
std::pair<double,double> tolerance_window = Math::getTolWindow(mz, mz_tolerance, ppm);
int highest_peak_idx = rt_it->findHighestInWindow(mz, mz-tolerance_window.first, tolerance_window.second-mz);
// no MS1 precursor peak in +- tolerance window found
if (highest_peak_idx == -1)
{
count_error_highest_intenstiy += 1;
continue;
}
// get actual position and intensity of highest intensity peak
double highest_peak_mz = (*rt_it)[highest_peak_idx].getMZ();
double highest_peak_int = (*rt_it)[highest_peak_idx].getIntensity();
// cout << mz << " -> " << nearest_peak_mz << endl;
double delta_mz = highest_peak_mz - mz;
delta_mzs.push_back(delta_mz);
mzs.push_back(mz);
rts.push_back(rt);
// correct entries
Precursor corrected_prec = precursors[i];
corrected_prec.setMZ(highest_peak_mz);
corrected_prec.setIntensity(highest_peak_int);
exp[precursor_spectrum_idx].getPrecursors()[0] = corrected_prec;
corrected_precursors.insert(precursor_spectrum_idx);
}
if (count_error_highest_intenstiy != 0)
{
OPENMS_LOG_INFO << "Correction to the highest intensity peak failed "
<< count_error_highest_intenstiy
<< " times because of missing peaks in the MS1. No changes were applied in these cases."
<< std::endl;
}
return corrected_precursors;
}
set<Size> PrecursorCorrection::correctToNearestFeature(const FeatureMap& features,
MSExperiment & exp,
double rt_tolerance_s,
double mz_tolerance,
bool ppm,
bool believe_charge,
bool keep_original,
bool all_matching_features,
int max_trace,
int debug_level)
{
set<Size> corrected_precursors;
// for each precursor/MS2 find all features that are in the given tolerance window (bounding box + rt tolerances)
// if believe_charge is set, only add features that match the precursor charge
map<Size, set<Size> > scan_idx_to_feature_idx;
size_t overlap_checks(0);
for (Size scan = 0; scan != exp.size(); ++scan)
{
// skip non-tandem mass spectra
if (exp[scan].getMSLevel() != 2 || exp[scan].getPrecursors().empty()) continue;
// extract precursor / MS2 information
const double pc_mz = exp[scan].getPrecursors()[0].getMZ();
const double rt = exp[scan].getRT();
const int pc_charge = exp[scan].getPrecursors()[0].getCharge();
for (Size f = 0; f != features.size(); ++f)
{
// feature is incompatible if believe_charge is set and charges don't match
if (believe_charge && features[f].getCharge() != pc_charge)
{
continue;
}
// check if precursor/MS2 position overlap with feature
if (overlaps_(features[f], rt, pc_mz, rt_tolerance_s))
{
scan_idx_to_feature_idx[scan].insert(f);
}
++overlap_checks;
}
}
if (debug_level > 0)
{
OPENMS_LOG_INFO << "Total number of overlap checks: " << overlap_checks << endl;
OPENMS_LOG_INFO << "Number of precursors with overlapping features: " << scan_idx_to_feature_idx.size() << endl;
}
// filter sets to retain compatible features:
// if precursor_mz = feature_mz + n * feature_charge (+/- mz_tolerance) a feature is compatible, others are removed from the set
for (map<Size, set<Size> >::iterator it = scan_idx_to_feature_idx.begin(); it != scan_idx_to_feature_idx.end(); ++it)
{
const Size scan = it->first;
const double pc_mz = exp[scan].getPrecursors()[0].getMZ();
const double mz_tolerance_da = ppm ? pc_mz * mz_tolerance * 1e-6 : mz_tolerance;
// Note: This is the "delete while iterating" pattern so mind the pre- and postincrement
for (set<Size>::iterator sit = it->second.begin(); sit != it->second.end(); )
{
if (!compatible_(features[*sit], pc_mz, mz_tolerance_da, max_trace))
{
it->second.erase(sit++);
}
else
{
++sit;
}
}
}
// remove entries with no compatible features (empty sets).
// Note: This is the "delete while iterating" pattern so mind the pre- and postincrement
for (map<Size, set<Size> >::iterator it = scan_idx_to_feature_idx.begin(); it != scan_idx_to_feature_idx.end(); )
{
if (it->second.empty())
{
scan_idx_to_feature_idx.erase(it++);
}
else
{
++it;
}
}
if (debug_level > 0)
{
OPENMS_LOG_INFO << "Number of precursors with compatible features: " << scan_idx_to_feature_idx.size() << endl;
}
if (!all_matching_features)
{
// keep only nearest features in set
for (map<Size, set<Size> >::iterator it = scan_idx_to_feature_idx.begin(); it != scan_idx_to_feature_idx.end(); ++it)
{
const Size scan = it->first;
const double pc_rt = exp[scan].getRT();
double min_distance = 1e16;
set<Size>::iterator best_feature = it->second.begin();
// determine nearest/best feature
for (set<Size>::iterator sit = it->second.begin(); sit != it->second.end(); ++sit)
{
const double current_distance = fabs(pc_rt - features[*sit].getRT());
if (current_distance < min_distance)
{
min_distance = current_distance;
best_feature = sit;
}
}
// delete all except the nearest/best feature
// Note: This is the "delete while iterating" pattern so mind the pre- and postincrement
for (set<Size>::iterator sit = it->second.begin(); sit != it->second.end(); )
{
if (sit != best_feature)
{
it->second.erase(sit++);
}
else
{
++sit;
}
}
}
}
// depending on all_matching_features option, only the nearest or all features are contained in the sets
// depending on options: move/copy corrected precursor and tandem spectrum
if (keep_original)
{
// duplicate spectra for each feature in set and adapt precursor_mz and precursor_charge to feature_mz and feature_charge
for (map<Size, set<Size> >::iterator it = scan_idx_to_feature_idx.begin(); it != scan_idx_to_feature_idx.end(); ++it)
{
const Size scan = it->first;
MSSpectrum spectrum = exp[scan];
corrected_precursors.insert(scan);
for (set<Size>::iterator f_it = it->second.begin(); f_it != it->second.end(); ++f_it)
{
spectrum.getPrecursors()[0].setMZ(features[*f_it].getMZ());
spectrum.getPrecursors()[0].setCharge(features[*f_it].getCharge());
exp.addSpectrum(spectrum);
}
}
}
else
{
// set precursor_mz and _charge to the feature_mz and _charge
for (map<Size, set<Size> >::iterator it = scan_idx_to_feature_idx.begin(); it != scan_idx_to_feature_idx.end(); ++it)
{
const Size scan = it->first;
exp[scan].getPrecursors()[0].setMZ(features[*it->second.begin()].getMZ());
exp[scan].getPrecursors()[0].setCharge(features[*it->second.begin()].getCharge());
corrected_precursors.insert(scan);
}
}
return corrected_precursors;
}
bool PrecursorCorrection::overlaps_(const Feature& feature,
const double rt,
const double pc_mz,
const double rt_tolerance)
{
if (feature.getConvexHulls().empty())
{
OPENMS_LOG_WARN << "HighResPrecursorMassCorrector warning: at least one feature has no convex hull - omitting feature for matching" << std::endl;
}
// get bounding box and extend by retention time tolerance
DBoundingBox<2> box = feature.getConvexHull().getBoundingBox();
DPosition<2> extend_rt(rt_tolerance, 0.01);
box.setMin(box.minPosition() - extend_rt);
box.setMax(box.maxPosition() + extend_rt);
DPosition<2> pc_pos(rt, pc_mz);
if (box.encloses(pc_pos))
{
return true;
}
else
{
return false;
}
}
bool PrecursorCorrection::compatible_(const Feature& feature,
double pc_mz,
double mz_tolerance,
Size max_trace_number,
int debug_level)
{
const int f_charge = feature.getCharge();
const double f_mz = feature.getMZ();
double trace = Math::round((pc_mz - f_mz) / (Constants::C13C12_MASSDIFF_U / f_charge)); // isotopic trace number at precursor mz
double mass_error = fabs(pc_mz - (f_mz + trace * (Constants::C13C12_MASSDIFF_U / f_charge)));
if (mass_error < mz_tolerance && (trace < max_trace_number + 0.01))
{
if (debug_level > 1)
{
OPENMS_LOG_INFO << "trace: " << (int)(trace + 0.5) << " feature_rt:" << feature.getRT() << " feature_mz:" << feature.getMZ() << " precursor_mz:" << pc_mz << endl;
}
return true;
}
else
{
return false;
}
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/PROCESSING/CALIBRATION/MZTrafoModel.cpp | .cpp | 12,035 | 391 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/PROCESSING/CALIBRATION/MZTrafoModel.h>
#include <OpenMS/ML/REGRESSION/LinearRegression.h>
#include <OpenMS/ML/REGRESSION/QuadraticRegression.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <OpenMS/ML/RANSAC/RANSACModelQuadratic.h>
#include <OpenMS/MATH/MathFunctions.h>
#include <OpenMS/CONCEPT/LogStream.h>
namespace OpenMS
{
MZTrafoModel::MZTrafoModel()
: coeff_(),
use_ppm_(true),
rt_(std::numeric_limits<double>::quiet_NaN())
{
}
MZTrafoModel::MZTrafoModel(bool ppm_model)
: coeff_(),
use_ppm_(ppm_model),
rt_(std::numeric_limits<double>::quiet_NaN())
{
}
const std::string MZTrafoModel::names_of_modeltype[] = {"linear", "linear_weighted", "quadratic", "quadratic_weighted", "size_of_modeltype"};
Math::RANSACParam* MZTrafoModel::ransac_params_ = nullptr;
int MZTrafoModel::ransac_seed_ = time(nullptr);
double MZTrafoModel::limit_offset_ = std::numeric_limits<double>::max(); // no limit by default
double MZTrafoModel::limit_scale_ = std::numeric_limits<double>::max(); // no limit by default
double MZTrafoModel::limit_power_ = std::numeric_limits<double>::max(); // no limit by default
MZTrafoModel::MODELTYPE MZTrafoModel::nameToEnum(const std::string& name)
{
const std::string* qb = names_of_modeltype;
const std::string* qe = qb + static_cast<int>(MODELTYPE::SIZE_OF_MODELTYPE);
const std::string* qm = std::find(qb, qe, name);
return static_cast<MODELTYPE>(std::distance(qb, qm));
}
const std::string& MZTrafoModel::enumToName(MZTrafoModel::MODELTYPE mt)
{
return names_of_modeltype[static_cast<int>(mt)];
}
void MZTrafoModel::setRANSACParams(const Math::RANSACParam& p)
{
delete ransac_params_;
ransac_params_ = new Math::RANSACParam(p);
}
void MZTrafoModel::setRANSACSeed(int seed)
{
ransac_seed_ = seed;
}
void MZTrafoModel::setCoefficientLimits(double offset, double scale, double power)
{
limit_offset_ = fabs(offset);
limit_scale_ = fabs(scale);
limit_power_ = fabs(power);
}
bool MZTrafoModel::isValidModel( const MZTrafoModel& trafo )
{
if (trafo.coeff_.empty()) return false;
// go through coefficients and see if they are too extreme
if (limit_offset_ < fabs(trafo.coeff_[0]))
{
return false;
}
if (limit_scale_ < fabs(trafo.coeff_[1]))
{
return false;
}
if (limit_power_ < fabs(trafo.coeff_[2]))
{
return false;
}
return (true);
}
bool MZTrafoModel::isTrained() const
{
return !coeff_.empty();
}
double MZTrafoModel::getRT() const
{
return rt_;
}
double MZTrafoModel::predict( double mz ) const
{
// mz = a + b * mz + c * mz^2
double predict =
coeff_[0] +
coeff_[1] * mz +
coeff_[2] * mz * mz;
if (use_ppm_) // the above prediction is the ppm error
{ // ... so we convert to actual mass diff
predict = Math::ppmToMass(-predict, mz) + mz;
}
else
{
predict = (-predict) + mz;
}
return predict;
}
bool MZTrafoModel::train( const CalibrationData& cd, MODELTYPE md, bool use_RANSAC, double rt_left /*= -std::numeric_limits<double>::max()*/, double rt_right /*= std::numeric_limits<double>::max() */ )
{
std::vector<double> obs_mz;
std::vector<double> theo_mz;
std::vector<double> weights;
const CalibrationData* p_cd;
CalibrationData cdm;
Size i, ie; // CalibrationData's start-to-end interval
if (cd.getNrOfGroups() > 0) // we have lock mass traces
{ // this is extra work, since we need to collect peak groups and compute the median
cdm = cd.median(rt_left, rt_right);
p_cd = &cdm;
i = 0;
ie = cdm.size();
}
else
{
i = std::distance(cd.begin(), lower_bound(cd.begin(), cd.end(), rt_left, RichPeak2D::RTLess()));
ie = std::distance(cd.begin(), upper_bound(cd.begin(), cd.end(), rt_right, RichPeak2D::RTLess()));
p_cd = &cd;
}
for (Size j = i; j != ie; ++j)
{
obs_mz.push_back(p_cd->getError(j)); // could be ppm or [Th], depending on cd::use_ppm_
theo_mz.push_back(p_cd->getRefMZ(j));
weights.push_back(p_cd->getWeight(j));
}
this->rt_ = (rt_left + rt_right) / 2;
return (train(obs_mz, theo_mz, weights, md, use_RANSAC));
}
bool MZTrafoModel::train( std::vector<double> obs_mz, std::vector<double> theo_mz, std::vector<double> weights, MODELTYPE md, bool use_RANSAC )
{
coeff_.clear();
if (obs_mz.empty())
{
//OPENMS_LOG_ERROR << "Input to calibration model is empty!" << std::endl;
return false;
}
if (use_RANSAC)
{
if (ransac_params_ == nullptr)
{
throw Exception::Precondition(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "TrafoModel::train(): no RANSAC parameters were set before calling train(). Internal error!");
}
if (!(md == MODELTYPE::LINEAR || md == MODELTYPE::QUADRATIC))
{
OPENMS_LOG_ERROR << "RANSAC is implemented for LINEAR and QUADRATIC models only! Please disable RANSAC or choose the LINEAR or QUADRATIC model." << std::endl;
throw Exception::NotImplemented(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION);
}
}
try
{
if (md == MODELTYPE::LINEAR)
{
if (obs_mz.size() < 2)
{
return false;
}
if (use_RANSAC &&
(obs_mz.size() > ransac_params_->n)) // with fewer points, RANSAC will fail
{
std::vector<std::pair<double, double> > r, pairs;
for (Size i = 0; i < obs_mz.size(); ++i)
{
pairs.emplace_back(theo_mz[i], obs_mz[i]);
}
r = Math::RANSAC<Math::RansacModelLinear>(ransac_seed_).ransac(pairs, *ransac_params_);
if (r.size() < 2)
{
return false; // RANSAC failed
}
obs_mz.clear();
theo_mz.clear();
for (Size i = 0; i < r.size(); ++i)
{
theo_mz.push_back(r[i].first);
obs_mz.push_back(r[i].second);
}
}
double confidence_interval_P(0.0);
Math::LinearRegression lr;
lr.computeRegression(confidence_interval_P, theo_mz.begin(), theo_mz.end(), obs_mz.begin(), false);
coeff_.push_back(lr.getIntercept());
coeff_.push_back(lr.getSlope());
coeff_.push_back(0.0);
}
else if (md == MODELTYPE::LINEAR_WEIGHTED)
{
if (obs_mz.size() < 2)
{
return false;
}
double confidence_interval_P(0.0);
Math::LinearRegression lr;
lr.computeRegressionWeighted(confidence_interval_P, theo_mz.begin(), theo_mz.end(), obs_mz.begin(), weights.begin(), false);
coeff_.push_back(lr.getIntercept());
coeff_.push_back(lr.getSlope());
coeff_.push_back(0.0);
}
else if (md == MODELTYPE::QUADRATIC)
{
if (obs_mz.size() < 3)
{
return false;
}
if (use_RANSAC &&
(obs_mz.size() > ransac_params_->n)) // with fewer points, RANSAC will fail
{
std::vector<std::pair<double, double> > r, pairs;
for (Size i = 0; i < obs_mz.size(); ++i)
{
pairs.emplace_back(theo_mz[i], obs_mz[i]);
}
r = Math::RANSAC<Math::RansacModelQuadratic>(ransac_seed_).ransac(pairs, *ransac_params_);
obs_mz.clear();
theo_mz.clear();
for (Size i = 0; i < r.size(); ++i)
{
theo_mz.push_back(r[i].first);
obs_mz.push_back(r[i].second);
}
}
// Quadratic fit
Math::QuadraticRegression qr;
qr.computeRegression(theo_mz.begin(), theo_mz.end(), obs_mz.begin());
coeff_.push_back(qr.getA());
coeff_.push_back(qr.getB());
coeff_.push_back(qr.getC());
}
else if (md == MODELTYPE::QUADRATIC_WEIGHTED)
{
if (obs_mz.size() < 3)
{
return false;
}
// Quadratic fit (weighted)
Math::QuadraticRegression qr;
qr.computeRegressionWeighted(theo_mz.begin(), theo_mz.end(), obs_mz.begin(), weights.begin());
coeff_.push_back(qr.getA());
coeff_.push_back(qr.getB());
coeff_.push_back(qr.getC());
}
#ifdef DEBUG_TRAFOMODEL
printf("# mz regression parameters: Y = %3.10f + %3.10f X + %3.10f X^2\n",
coeff_[0],
coeff_[1],
coeff_[2]);
// print results
std::cout << "Calibration details:\n\n";
std::cout << "m/z(theo) m/z(obs) ppm(before) | ppm(after)\n";
std::vector<double> st_ppm_before, st_ppm_after;
for (Size i = 0; i < obs_mz.size(); i++)
{
if (use_ppm_)
{
st_ppm_before.push_back(obs_mz[i]);
}
else
{
st_ppm_before.push_back(Math::getPPM(theo_mz[i], obs_mz[i]));
}
double obs_mz_v = obs_mz[i];
if (use_ppm_)
{
obs_mz_v = Math::ppmToMass(obs_mz_v, theo_mz[i]) + theo_mz[i];
}
st_ppm_after.push_back(Math::getPPM(theo_mz[i], predict(obs_mz_v))); // predict() is ppm-aware itself
printf("%4.5f %4.5f %2.1f | %2.1f\n", theo_mz[i], obs_mz_v, st_ppm_before.back(), st_ppm_after.back());
}
// use median and MAD to ignore outliers
double m = Math::median(st_ppm_before.begin(), st_ppm_before.end());
std::cout << "ppm before: median = " << m << " MAD = " << Math::MAD(st_ppm_before.begin(), st_ppm_before.end(), m) << "\n";
m = Math::median(st_ppm_after.begin(), st_ppm_after.end());
std::cout << "ppm after : median = " << m << " MAD = " << Math::MAD(st_ppm_after.begin(), st_ppm_after.end(), m) << "\n";
#endif
return true;
}
catch (Exception::BaseException& /*e*/)
{
//OPENMS_LOG_ERROR << "Exception during model fitting: " << e.what() << std::endl;
return false;
}
}
Size MZTrafoModel::findNearest( const std::vector<MZTrafoModel>& tms, double rt )
{
// no peak => no search
if (tms.empty())
{
throw Exception::Precondition(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "There must be at least one model to determine the nearest model!");
}
// search for position for inserting
std::vector<MZTrafoModel>::const_iterator it = lower_bound(tms.begin(), tms.end(), rt, MZTrafoModel::RTLess());
// border cases
if (it == tms.begin())
{
return 0;
}
if (it == tms.end())
{
return tms.size() - 1;
}
// the model before or the current model are closest
std::vector<MZTrafoModel>::const_iterator it2 = it;
--it2;
if (std::fabs(it->rt_ - rt) < std::fabs(it2->rt_ - rt))
{
return Size(it - tms.begin());
}
else
{
return Size(it2 - tms.begin());
}
}
void MZTrafoModel::setCoefficients( const MZTrafoModel& rhs )
{
coeff_ = rhs.coeff_;
}
void MZTrafoModel::setCoefficients( double intercept, double slope, double power )
{
coeff_.clear();
coeff_.push_back(intercept);
coeff_.push_back(slope);
coeff_.push_back(power);
}
OpenMS::String MZTrafoModel::toString() const
{
String s;
if (coeff_.empty())
{
s = "nan, nan, nan";
}
else
{
s = ListUtils::concatenate(coeff_, ", ");
}
return s;
}
void MZTrafoModel::getCoefficients( double& intercept, double& slope, double& power )
{
if (!isTrained())
{
throw Exception::Precondition(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Model is not trained yet.");
}
intercept = coeff_[0];
slope = coeff_[1];
power = coeff_[2];
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/PROCESSING/CALIBRATION/InternalCalibration.cpp | .cpp | 21,030 | 554 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/PROCESSING/CALIBRATION/InternalCalibration.h>
#include <OpenMS/FORMAT/SVOutStream.h>
#include <OpenMS/KERNEL/FeatureMap.h>
#include <OpenMS/MATH/StatisticFunctions.h>
#include <OpenMS/MATH/MathFunctions.h>
#include <OpenMS/SYSTEM/File.h>
#include <OpenMS/SYSTEM/RWrapper.h>
#include <QtCore/QStringList>
#include <cstdio>
namespace OpenMS
{
InternalCalibration::InternalCalibration()
: ProgressLogger()
{
}
void InternalCalibration::applyTransformation(std::vector<Precursor>& pcs, const MZTrafoModel& trafo)
{
// calibrate the precursor mass
if (!pcs.empty())
{
for (Size i = 0; i < pcs.size(); ++i)
{
pcs[i].setMetaValue("mz_raw", pcs[i].getMZ());
pcs[i].setMZ(trafo.predict(pcs[i].getMZ()));
}
}
}
void InternalCalibration::applyTransformation_(PeakMap::SpectrumType& spec, const MZTrafoModel& trafo)
{
typedef PeakMap::SpectrumType::Iterator SpecIt;
// calibrate the spectrum itself
for (SpecIt it = spec.begin(); it != spec.end(); ++it)
{
it->setMZ(trafo.predict(it->getMZ()));
}
}
void InternalCalibration::applyTransformation(PeakMap::SpectrumType& spec, const IntList& target_mslvl, const MZTrafoModel& trafo)
{
// calibrate the peaks?
if (ListUtils::contains(target_mslvl, spec.getMSLevel()))
{
applyTransformation_(spec, trafo);
}
// apply PC correction (only if target is MS1, and current spec is MS2; or target is MS2 and cs is MS3,...)
if (ListUtils::contains(target_mslvl, spec.getMSLevel() - 1))
{
applyTransformation(spec.getPrecursors(), trafo);
}
}
void InternalCalibration::applyTransformation(PeakMap& exp, const IntList& target_mslvl, const MZTrafoModel& trafo)
{
for (PeakMap::Iterator it = exp.begin(); it != exp.end(); ++it)
{
applyTransformation(*it, target_mslvl, trafo);
}
}
Size InternalCalibration::fillCalibrants(const PeakMap& exp,
const std::vector<InternalCalibration::LockMass>& ref_masses,
double tol_ppm,
bool lock_require_mono,
bool lock_require_iso,
CalibrationData& failed_lock_masses,
bool verbose /*= true*/)
{
cal_data_.clear();
//
// find lock masses in data and build calibrant table
//
std::map<Size, Size> stats_cal_per_spectrum;
typedef PeakMap::ConstIterator ExpCIt;
for (ExpCIt it = exp.begin(); it != exp.end(); ++it)
{
// empty spectrum
if (it->empty()) {
++stats_cal_per_spectrum[0];
continue;
}
Size cnt_cd = cal_data_.size();
// iterate over calibrants
for (std::vector<InternalCalibration::LockMass>::const_iterator itl = ref_masses.begin(); itl != ref_masses.end(); ++itl)
{
// calibrant meant for this MS level?
if (it->getMSLevel() != itl->ms_level) continue;
Size s = it->findNearest(itl->mz);
const double mz_obs = (*it)[s].getMZ();
if (Math::getPPMAbs(mz_obs, itl->mz) > tol_ppm)
{
failed_lock_masses.insertCalibrationPoint(it->getRT(), itl->mz, 0.0, itl->mz, 0.0, std::distance(ref_masses.begin(), itl));
}
else
{
if (lock_require_mono)
{
// check if its the monoisotopic .. discard otherwise
const double mz_iso_left = mz_obs - (Constants::C13C12_MASSDIFF_U / itl->charge);
Size s_left = it->findNearest(mz_iso_left);
if (Math::getPPMAbs(mz_iso_left, (*it)[s_left].getMZ()) < 0.5) // intra-scan ppm should be very good!
{ // peak nearby lock mass was not the monoisotopic
if (verbose)
{
OPENMS_LOG_INFO << "peak at [RT, m/z] " << it->getRT() << ", " << (*it)[s].getMZ() << " is NOT monoisotopic. Skipping it!\n";
}
failed_lock_masses.insertCalibrationPoint(it->getRT(), itl->mz, 1.0, itl->mz, 0.0, std::distance(ref_masses.begin(), itl));
continue;
}
}
if (lock_require_iso)
{
// require it to have a +1 isotope?!
const double mz_iso_right = mz_obs + Constants::C13C12_MASSDIFF_U / itl->charge;
Size s_right = it->findNearest(mz_iso_right);
if (!(Math::getPPMAbs(mz_iso_right, (*it)[s_right].getMZ()) < 0.5)) // intra-scan ppm should be very good!
{ // peak has no +1iso.. weird
if (verbose)
{
OPENMS_LOG_INFO << "peak at [RT, m/z] " << it->getRT() << ", " << (*it)[s].getMZ() << " has no +1 isotope (ppm to closest: " << Math::getPPM(mz_iso_right, (*it)[s_right].getMZ()) << ")... Skipping it!\n";
}
failed_lock_masses.insertCalibrationPoint(it->getRT(), itl->mz, 2.0, itl->mz, 0.0, std::distance(ref_masses.begin(), itl));
continue;
}
}
cal_data_.insertCalibrationPoint(it->getRT(), mz_obs, (*it)[s].getIntensity(), itl->mz, std::log((*it)[s].getIntensity()), std::distance(ref_masses.begin(), itl));
}
}
// how many locks found in this spectrum?!
++stats_cal_per_spectrum[cal_data_.size()-cnt_cd];
}
OPENMS_LOG_INFO << "Lock masses found across viable spectra:\n";
for (std::map<Size, Size>::const_iterator its = stats_cal_per_spectrum.begin(); its != stats_cal_per_spectrum.end(); ++its)
{
OPENMS_LOG_INFO << " " << its->first << " [of " << ref_masses.size() << "] lock masses: " << its->second << "x\n";
}
OPENMS_LOG_INFO << '\n';
// sort CalData by RT
cal_data_.sortByRT();
return cal_data_.size();
}
Size InternalCalibration::fillCalibrants(const FeatureMap& fm, double tol_ppm)
{
cal_data_.clear();
CalibrantStats_ stats(tol_ppm);
stats.cnt_total = fm.size() + fm.getUnassignedPeptideIdentifications().size();
for (const auto& f : fm)
{
const PeptideIdentificationList& ids = f.getPeptideIdentifications();
double mz_ref;
if (ids.empty())
{
continue;
}
if (isDecalibrated_(ids[0], f.getMZ(), tol_ppm, stats, mz_ref))
{
continue;
}
cal_data_.insertCalibrationPoint(f.getRT(), f.getMZ(), f.getIntensity(), mz_ref, log(f.getIntensity()));
}
// unassigned peptide IDs
fillIDs_(fm.getUnassignedPeptideIdentifications(), tol_ppm, stats);
OPENMS_LOG_INFO << "Found " << cal_data_.size() << " calibrants (incl. unassigned) in FeatureMap.\n";
stats.print();
// sort CalData by RT
cal_data_.sortByRT();
return cal_data_.size();
}
void InternalCalibration::fillID_(const PeptideIdentification& pep_id, const double tol_ppm, CalibrantStats_& stats)
{
if (pep_id.empty())
{
++stats.cnt_empty;
return;
}
if (!pep_id.hasMZ())
{
++stats.cnt_nomz;
return;
}
if (!pep_id.hasRT())
{
++stats.cnt_nort;
return;
}
double mz_ref;
if (isDecalibrated_(pep_id, pep_id.getMZ(), tol_ppm, stats, mz_ref))
{
return;
}
cal_data_.insertCalibrationPoint(pep_id.getRT(), pep_id.getMZ(), 1.0, mz_ref, 1.0);
}
void InternalCalibration::fillIDs_( const PeptideIdentificationList& pep_ids, const double tol_ppm, CalibrantStats_& stats)
{
for (const auto& id : pep_ids)
{
fillID_(id, tol_ppm, stats);
}
}
bool InternalCalibration::isDecalibrated_(const PeptideIdentification& pep_id, const double mz_obs, const double tol_ppm, CalibrantStats_& stats, double& mz_ref)
{
PeptideIdentification pid = pep_id;
pid.sort();
int q = pid.getHits()[0].getCharge();
mz_ref = pid.getHits()[0].getSequence().getMZ(q);
// Only use ID if precursor m/z and theoretical mass don't deviate too much.
// as they may occur due to isotopic peak misassignments
double delta = Math::getPPMAbs(mz_obs, mz_ref);
if (tol_ppm < delta)
{
if (stats.cnt_decal < 10)
{
OPENMS_LOG_INFO << "Peptide " << pid.getHits()[0].getSequence().toString() << " is " << delta << " (>" << tol_ppm << ") ppm away from theoretical mass and is omitted as calibration point.\n";
}
else if (stats.cnt_decal == 10)
{
OPENMS_LOG_INFO << "More than 10 peptides are at least " << tol_ppm << " ppm away from theoretical mass and are omitted as calibration point.";
}
++stats.cnt_decal;
return true;
}
return false;
}
Size InternalCalibration::fillCalibrants(const PeptideIdentificationList& pep_ids, double tol_ppm)
{
cal_data_.clear();
CalibrantStats_ stats(tol_ppm);
stats.cnt_total = pep_ids.size();
fillIDs_(pep_ids, tol_ppm, stats);
OPENMS_LOG_INFO << "Found " << cal_data_.size() << " calibrants in peptide IDs.\n";
stats.print();
// sort CalData by RT
cal_data_.sortByRT();
return cal_data_.size();
}
const CalibrationData& InternalCalibration::getCalibrationPoints() const
{
return cal_data_;
}
bool InternalCalibration::calibrate(PeakMap& exp,
const IntList& target_mslvl,
MZTrafoModel::MODELTYPE model_type,
double rt_chunk,
bool use_RANSAC,
double post_ppm_median,
double post_ppm_MAD,
const String& file_models,
const String& file_models_plot,
const String& file_residuals,
const String& file_residuals_plot,
const String& rscript_executable_)
{
QString rscript_executable = rscript_executable_.toQString();
// ensure sorting; required for finding RT ranges and lock masses
if (!exp.isSorted(true))
{
exp.sortSpectra(true);
}
startProgress(0, exp.size(), "Applying calibration to data");
std::vector<MZTrafoModel> tms; // each spectrum gets its own model (params are cheap to store)
std::map<Size, Size> invalid_models; // indices from tms[] -> exp[]; where model creation failed (e..g, not enough calibration points)
bool hasValidModels(false); // was at least one model valid?
bool global_model = (rt_chunk < 0);
if (global_model)
{ // build one global modal
OPENMS_LOG_INFO << "Building a global model...\n";
tms.emplace_back();
tms[0].train(cal_data_, model_type, use_RANSAC);
if (MZTrafoModel::isValidModel(tms[0]))
{
applyTransformation(exp, target_mslvl, tms[0]);
hasValidModels = true;
}
}
else
{ // one model per spectrum (not all might be needed, if certain MS levels are excluded from calibration)
tms.reserve(exp.size());
// go through spectra and calibrate
Size i(0), i_mslvl(0);
for (PeakMap::Iterator it = exp.begin(); it != exp.end(); ++it, ++i)
{
setProgress(i);
// skip this MS level?
if (!(ListUtils::contains(target_mslvl, it->getMSLevel()) || // scan m/z needs correction
ListUtils::contains(target_mslvl, it->getMSLevel() - 1))) // precursor m/z needs correction
{
continue;
}
//
// build model
//
tms.emplace_back();
tms.back().train(cal_data_, model_type, use_RANSAC, it->getRT() - rt_chunk, it->getRT() + rt_chunk);
if (!MZTrafoModel::isValidModel(tms.back())) // model not trained or coefficients are too extreme
{
invalid_models[i_mslvl] = i;
}
else
{
applyTransformation(*it, target_mslvl, tms.back());
}
++i_mslvl;
} // MSExp::iter
//////////////////////////////////////////////////////////////////////////
// CHECK Models -- use neighbors if needed
//////////////////////////////////////////////////////////////////////////
hasValidModels = (std::find_if(tms.begin(), tms.end(), MZTrafoModel::isValidModel) != tms.end());
// did we build any model at all?
if (hasValidModels && !invalid_models.empty())
{
// 2nd attempt to calibrate spectra using neighboring models
// (will not be entered for global model since could_not_cal is empty)
OPENMS_LOG_INFO << "\nCalibration failed on " << invalid_models.size() << "/" << tms.size() << " [" << invalid_models.size() * 100 / tms.size() << " %] spectra. "
<< "Using the closest successful model on these.\n";
std::vector<MZTrafoModel> tms_new = tms; // will contain corrected models (this wastes a bit of memory)
for (std::map<Size, Size>::const_iterator it = invalid_models.begin(); it != invalid_models.end(); ++it)
{
Size p = it->first;
// find model closest valid model to p'th model
std::vector<MZTrafoModel>::iterator it_center_r = tms.begin() + p; // points to 'p'
std::vector<MZTrafoModel>::iterator it_right = std::find_if(it_center_r, tms.end(), MZTrafoModel::isValidModel);
std::vector<MZTrafoModel>::reverse_iterator it_center_l = tms.rbegin() + (tms.size() - p - 1); // points to 'p'
std::vector<MZTrafoModel>::reverse_iterator it_left = std::find_if(it_center_l, tms.rend(), MZTrafoModel::isValidModel);
Size dist_right(0), dist_left(0);
if (it_right != tms.end())
{
dist_right = std::distance(it_center_r, it_right);
}
if (it_left != tms.rend())
{
dist_left = std::distance(it_center_l, it_left);
}
Size model_index;
if (((dist_left <= dist_right) || dist_right == 0) && dist_left != 0) // left is closer in #spectra, i.e. time; or is the only valid direction
{
model_index = p - dist_left;
}
else
{
model_index = p + dist_right;
}
applyTransformation(exp[it->second], target_mslvl, tms[model_index]);
tms_new[p].setCoefficients(tms[model_index]); // overwrite invalid model
}
tms_new.swap(tms);
// consistency check: all models must be valid at this point
for (Size j = 0; j < tms.size(); ++j)
{
if (!MZTrafoModel::isValidModel(tms[j]))
{
throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "InternalCalibration::calibrate(): Internal error. Not all models are valid!", String(j));
}
}
}
}
endProgress();
// check if Rscript is available
if (!file_models_plot.empty() || !file_residuals_plot.empty())
{
if (!RWrapper::findR(rscript_executable, true))
{
OPENMS_LOG_ERROR << "The R interpreter is required to create PNG plot files. To avoid the error, either do not request 'quality_control:*_plot' (not recommended) or fix your R installation.\n";
return false;
}
}
//
// write the model parameters to file and/or plot them
//
if (!file_models.empty() || !file_models_plot.empty())
{
String out_table = File::getTemporaryFile(file_models);
{ // we need this scope, to ensure that SVOutStream writes its cache, before we call RWrapper!
SVOutStream sv(out_table, ", ", ", ", String::NONE);
sv << "# model parameters (for all successfully trained models)" << nl
<< "RT" << "A (offset)" << "B (slope)" << "C (power)" << "source" << nl;
for (Size i = 0; i < tms.size(); ++i)
{
sv << tms[i].getRT() << tms[i].toString();
if (!MZTrafoModel::isValidModel(tms[i]))
{
sv << "invalid"; // this only happens if ALL models are invalid (since otherwise they would use 'neighbour')
}
else if (invalid_models.count(i) > 0)
{
sv << "neighbor";
}
else
{
sv << "local";
}
sv << nl;
}
}
// plot it
if (!file_models_plot.empty())
{
if (!RWrapper::runScript("InternalCalibration_Models.R", QStringList() << out_table.toQString() << file_models_plot.toQString(), rscript_executable))
{
OPENMS_LOG_ERROR << "R script failed. To avoid the error, either disable the creation of 'quality_control:models_plot' (not recommended) or fix your R installation.\n";
return false;
}
}
}
//
// plot the residual error (after calibration)
// go through Calibration data points
//
SVOutStream* sv = nullptr;
String out_table_residuals;
if (!file_residuals.empty() || !file_residuals_plot.empty())
{
out_table_residuals = File::getTemporaryFile(file_residuals);
sv = new SVOutStream(out_table_residuals, ", ", ", ", String::NONE);
}
std::vector<double> vec_ppm_before, vec_ppm_after;
vec_ppm_before.reserve(cal_data_.size());
vec_ppm_after.reserve(cal_data_.size());
if (sv != nullptr)
{
*sv << "# residual error after calibration" << nl
<< "RT" << "intensity" << "mz ref" << "mz before" << "mz after" << "ppm before" << "ppm after" << nl;
}
Size ii(0);
for (CalibrationData::const_iterator itc = cal_data_.begin(); itc != cal_data_.end(); ++itc, ++ii)
{
double rt = itc->getRT();
// find closest model in RT
Size idx = (global_model ? 0 : MZTrafoModel::findNearest(tms, rt));
double mz_corrected = std::numeric_limits<double>::quiet_NaN();
if (MZTrafoModel::isValidModel(tms[idx]))
{
mz_corrected = tms[idx].predict(itc->getMZ());
}
double mz_ref = cal_data_.getRefMZ(ii);
double ppm_before = Math::getPPM(itc->getMZ(), mz_ref);
double ppm_after = Math::getPPM(mz_corrected, mz_ref);
vec_ppm_before.push_back(ppm_before);
vec_ppm_after.push_back(ppm_after);
if (sv != nullptr)
{
*sv << rt
<< itc->getIntensity()
<< mz_ref
<< itc->getMZ();
sv->writeValueOrNan(mz_corrected)
<< ppm_before;
sv->writeValueOrNan(ppm_after)
<< nl;
}
}
delete sv;
// plot it
if (!file_residuals_plot.empty())
{
if (!RWrapper::runScript("InternalCalibration_Residuals.R", QStringList() << out_table_residuals.toQString() << file_residuals_plot.toQString(), rscript_executable))
{
OPENMS_LOG_ERROR << "R script failed. To avoid the error, either disable the creation of 'quality_control:residuals_plot' (not recommended) or fix your R installation.\n";
return false;
}
}
if (!hasValidModels)
{ // QC tables are done; quit
OPENMS_LOG_ERROR << "Error: Could not build a single local calibration model! Check your calibrants and/or extend the search window!\n";
if (use_RANSAC)
{
OPENMS_LOG_ERROR << " Since you are using RANSAC, check the parameters as well and test different setups.\n";
}
return false;
}
// use median and MAD to ignore outliers
double median_ppm_before = Math::median(vec_ppm_before.begin(), vec_ppm_before.end());
double MAD_ppm_before = Math::MAD(vec_ppm_before.begin(), vec_ppm_before.end(), median_ppm_before);
OPENMS_LOG_INFO << "\n-----\n" <<
"ppm stats before calibration: median = " << median_ppm_before << " MAD = " << MAD_ppm_before << "\n";
double median_ppm_after = Math::median(vec_ppm_after.begin(), vec_ppm_after.end());
double MAD_ppm_after = Math::MAD(vec_ppm_after.begin(), vec_ppm_after.end(), median_ppm_after);
OPENMS_LOG_INFO << "ppm stats after calibration: median = " << median_ppm_after << " MAD = " << MAD_ppm_after << "\n";
// check desired limits
if (post_ppm_median < fabs(median_ppm_after))
{
OPENMS_LOG_INFO << "Post calibration median threshold (" << post_ppm_median << " ppm) not reached (median = |" << median_ppm_after << "| ppm). Failed to calibrate!\n";
return false;
}
if (post_ppm_MAD < fabs(MAD_ppm_after))
{
OPENMS_LOG_INFO << "Post calibration MAD threshold (" << post_ppm_MAD << " ppm) not reached (MAD = |" << MAD_ppm_after << "| ppm). Failed to calibrate!\n";
return false;
}
return true; // success
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/PROCESSING/FILTERING/NLargest.cpp | .cpp | 1,505 | 74 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Mathias Walzer $
// $Authors: $
// --------------------------------------------------------------------------
//
#include <OpenMS/PROCESSING/FILTERING/NLargest.h>
using namespace std;
namespace OpenMS
{
NLargest::NLargest() :
DefaultParamHandler("NLargest")
{
init_();
}
NLargest::NLargest(UInt n) :
DefaultParamHandler("NLargest")
{
init_();
// after initialising with the default value, use the provided n
param_.setValue("n", n);
updateMembers_();
}
void NLargest::init_()
{
defaults_.setValue("n", 200, "The number of peaks to keep");
defaultsToParam_();
}
NLargest::~NLargest() = default;
NLargest::NLargest(const NLargest & source) :
DefaultParamHandler(source)
{
updateMembers_();
}
NLargest & NLargest::operator=(const NLargest & source)
{
if (this != &source)
{
DefaultParamHandler::operator=(source);
updateMembers_();
}
return *this;
}
void NLargest::filterPeakSpectrum(PeakSpectrum & spectrum)
{
filterSpectrum(spectrum);
}
void NLargest::filterPeakMap(PeakMap & exp)
{
for (auto& spectrum : exp)
{
filterSpectrum(spectrum);
}
}
void NLargest::updateMembers_()
{
peakcount_ = (UInt)param_.getValue("n");
}
}
| C++ |
3D | OpenMS/OpenMS | src/openms/source/PROCESSING/FILTERING/WindowMower.cpp | .cpp | 1,934 | 73 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Mathias Walzer $
// $Authors: $
// --------------------------------------------------------------------------
//
#include <OpenMS/PROCESSING/FILTERING/WindowMower.h>
using namespace std;
namespace OpenMS
{
WindowMower::WindowMower() :
DefaultParamHandler("WindowMower")
{
defaults_.setValue("windowsize", 50.0, "The size of the sliding window along the m/z axis.");
defaults_.setValue("peakcount", 2, "The number of peaks that should be kept.");
defaults_.setValue("movetype", "slide", "Whether sliding window (one peak steps) or jumping window (window size steps) should be used.");
defaults_.setValidStrings("movetype", {"slide","jump"});
defaultsToParam_();
}
WindowMower::~WindowMower() = default;
WindowMower::WindowMower(const WindowMower & source) :
DefaultParamHandler(source)
{
}
WindowMower & WindowMower::operator=(const WindowMower & source)
{
if (this != &source)
{
DefaultParamHandler::operator=(source);
}
return *this;
}
void WindowMower::filterPeakSpectrum(PeakSpectrum & spectrum)
{
bool sliding = param_.getValue("movetype").toString() == "slide" ? true : false;
if (sliding)
{
filterPeakSpectrumForTopNInSlidingWindow(spectrum);
}
else
{
filterPeakSpectrumForTopNInJumpingWindow(spectrum);
}
}
void WindowMower::filterPeakMap(PeakMap & exp)
{
bool sliding = param_.getValue("movetype").toString() == "slide" ? true : false;
for (auto& spectrum : exp)
{
if (sliding)
{
filterPeakSpectrumForTopNInSlidingWindow(spectrum);
}
else
{
filterPeakSpectrumForTopNInJumpingWindow(spectrum);
}
}
}
}
| C++ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.