keyword stringclasses 7 values | repo_name stringlengths 8 98 | file_path stringlengths 4 244 | file_extension stringclasses 29 values | file_size int64 0 84.1M | line_count int64 0 1.6M | content stringlengths 1 84.1M ⌀ | language stringclasses 14 values |
|---|---|---|---|---|---|---|---|
3D | OpenMS/OpenMS | src/tests/class_tests/openms/source/CVMappingRule_test.cpp | .cpp | 8,848 | 321 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Andreas Bertsch $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/ClassTest.h>
#include <OpenMS/test_config.h>
///////////////////////////
#include <OpenMS/DATASTRUCTURES/CVMappingRule.h>
///////////////////////////
#include <OpenMS/DATASTRUCTURES/CVMappingTerm.h>
using namespace OpenMS;
using namespace std;
START_TEST(CVMappingRule, "$Id$")
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
CVMappingRule* ptr = nullptr;
CVMappingRule* nullPointer = nullptr;
START_SECTION(CVMappingRule())
{
ptr = new CVMappingRule();
TEST_NOT_EQUAL(ptr, nullPointer)
}
END_SECTION
START_SECTION(virtual ~CVMappingRule())
{
delete ptr;
}
END_SECTION
ptr = new CVMappingRule();
START_SECTION((CVMappingRule(const CVMappingRule &rhs)))
{
CVMappingRule cvmr;
cvmr.setIdentifier("my_test_identifier");
TEST_STRING_EQUAL(CVMappingRule(cvmr).getIdentifier(), "my_test_identifier")
cvmr.setElementPath("my_test_elementpath");
TEST_STRING_EQUAL(CVMappingRule(cvmr).getElementPath(), "my_test_elementpath")
cvmr.setRequirementLevel(CVMappingRule::MUST);
TEST_EQUAL(CVMappingRule(cvmr).getRequirementLevel(), CVMappingRule::MUST);
cvmr.setRequirementLevel(CVMappingRule::SHOULD);
TEST_EQUAL(CVMappingRule(cvmr).getRequirementLevel(), CVMappingRule::SHOULD);
cvmr.setCombinationsLogic(CVMappingRule::AND);
TEST_EQUAL(CVMappingRule(cvmr).getCombinationsLogic(), CVMappingRule::AND);
cvmr.setCombinationsLogic(CVMappingRule::XOR);
TEST_EQUAL(CVMappingRule(cvmr).getCombinationsLogic(), CVMappingRule::XOR);
cvmr.setScopePath("my_test_scopepath");
TEST_STRING_EQUAL(CVMappingRule(cvmr).getScopePath(), "my_test_scopepath")
CVMappingTerm term1, term2;
term1.setAccession("BLA:1");
term2.setAccession("BLA:2");
vector<CVMappingTerm> terms;
terms.push_back(term1);
terms.push_back(term2);
cvmr.setCVTerms(terms);
TEST_EQUAL(CVMappingRule(cvmr).getCVTerms() == terms, true)
}
END_SECTION
START_SECTION((CVMappingRule& operator=(const CVMappingRule &rhs)))
{
CVMappingRule cvmr, cvmr_copy;
cvmr.setIdentifier("my_test_identifier");
cvmr_copy = cvmr;
TEST_STRING_EQUAL(cvmr_copy.getIdentifier(), "my_test_identifier")
cvmr.setElementPath("my_test_elementpath");
cvmr_copy = cvmr;
TEST_STRING_EQUAL(cvmr_copy.getElementPath(), "my_test_elementpath")
cvmr.setRequirementLevel(CVMappingRule::MUST);
cvmr_copy = cvmr;
TEST_EQUAL(cvmr_copy.getRequirementLevel(), CVMappingRule::MUST);
cvmr.setRequirementLevel(CVMappingRule::SHOULD);
cvmr_copy = cvmr;
TEST_EQUAL(cvmr_copy.getRequirementLevel(), CVMappingRule::SHOULD);
cvmr.setCombinationsLogic(CVMappingRule::AND);
cvmr_copy = cvmr;
TEST_EQUAL(cvmr_copy.getCombinationsLogic(), CVMappingRule::AND);
cvmr.setCombinationsLogic(CVMappingRule::XOR);
cvmr_copy = cvmr;
TEST_EQUAL(cvmr_copy.getCombinationsLogic(), CVMappingRule::XOR);
cvmr.setScopePath("my_test_scopepath");
cvmr_copy = cvmr;
TEST_STRING_EQUAL(cvmr_copy.getScopePath(), "my_test_scopepath")
CVMappingTerm term1, term2;
term1.setAccession("BLA:1");
term2.setAccession("BLA:2");
vector<CVMappingTerm> terms;
terms.push_back(term1);
terms.push_back(term2);
cvmr.setCVTerms(terms);
cvmr_copy = cvmr;
TEST_EQUAL(cvmr_copy.getCVTerms() == terms, true)
}
END_SECTION
START_SECTION((bool operator != (const CVMappingRule& rhs) const))
{
CVMappingRule cvmr, cvmr_copy;
cvmr.setIdentifier("my_test_identifier");
TEST_FALSE(cvmr == cvmr_copy)
cvmr_copy = cvmr;
TEST_EQUAL(cvmr != cvmr_copy, false)
cvmr.setElementPath("my_test_elementpath");
TEST_FALSE(cvmr == cvmr_copy)
cvmr_copy = cvmr;
TEST_EQUAL(cvmr != cvmr_copy, false)
cvmr.setRequirementLevel(CVMappingRule::MUST); // default
TEST_EQUAL(cvmr != cvmr_copy, false)
cvmr_copy = cvmr;
TEST_EQUAL(cvmr != cvmr_copy, false)
cvmr.setRequirementLevel(CVMappingRule::SHOULD);
TEST_FALSE(cvmr == cvmr_copy)
cvmr_copy = cvmr;
TEST_EQUAL(cvmr != cvmr_copy, false)
cvmr.setCombinationsLogic(CVMappingRule::AND);
TEST_FALSE(cvmr == cvmr_copy)
cvmr_copy = cvmr;
TEST_EQUAL(cvmr != cvmr_copy, false)
cvmr.setCombinationsLogic(CVMappingRule::XOR);
TEST_FALSE(cvmr == cvmr_copy)
cvmr_copy = cvmr;
TEST_EQUAL(cvmr != cvmr_copy, false)
cvmr.setScopePath("my_test_scopepath");
TEST_FALSE(cvmr == cvmr_copy)
cvmr_copy = cvmr;
TEST_EQUAL(cvmr != cvmr_copy, false)
CVMappingTerm term1, term2;
term1.setAccession("BLA:1");
term2.setAccession("BLA:2");
vector<CVMappingTerm> terms;
terms.push_back(term1);
terms.push_back(term2);
cvmr.setCVTerms(terms);
TEST_FALSE(cvmr == cvmr_copy)
cvmr_copy = cvmr;
TEST_EQUAL(cvmr != cvmr_copy, false)
}
END_SECTION
START_SECTION((bool operator == (const CVMappingRule& rhs) const))
{
CVMappingRule cvmr, cvmr_copy;
cvmr.setIdentifier("my_test_identifier");
TEST_EQUAL(cvmr == cvmr_copy, false)
cvmr_copy = cvmr;
TEST_TRUE(cvmr == cvmr_copy)
cvmr.setElementPath("my_test_elementpath");
TEST_EQUAL(cvmr == cvmr_copy, false)
cvmr_copy = cvmr;
TEST_TRUE(cvmr == cvmr_copy)
cvmr.setRequirementLevel(CVMappingRule::MUST); // default
TEST_TRUE(cvmr == cvmr_copy)
cvmr_copy = cvmr;
TEST_TRUE(cvmr == cvmr_copy)
cvmr.setRequirementLevel(CVMappingRule::SHOULD);
TEST_EQUAL(cvmr == cvmr_copy, false)
cvmr_copy = cvmr;
TEST_TRUE(cvmr == cvmr_copy)
cvmr.setCombinationsLogic(CVMappingRule::AND);
TEST_EQUAL(cvmr == cvmr_copy, false)
cvmr_copy = cvmr;
TEST_TRUE(cvmr == cvmr_copy)
cvmr.setCombinationsLogic(CVMappingRule::XOR);
TEST_EQUAL(cvmr == cvmr_copy, false)
cvmr_copy = cvmr;
TEST_TRUE(cvmr == cvmr_copy)
cvmr.setScopePath("my_test_scopepath");
TEST_EQUAL(cvmr == cvmr_copy, false)
cvmr_copy = cvmr;
TEST_TRUE(cvmr == cvmr_copy)
CVMappingTerm term1, term2;
term1.setAccession("BLA:1");
term2.setAccession("BLA:2");
vector<CVMappingTerm> terms;
terms.push_back(term1);
terms.push_back(term2);
cvmr.setCVTerms(terms);
TEST_EQUAL(cvmr == cvmr_copy, false)
cvmr_copy = cvmr;
TEST_TRUE(cvmr == cvmr_copy)
}
END_SECTION
START_SECTION((void setIdentifier(const String &identifier)))
{
ptr->setIdentifier("my_test_identifier");
TEST_STRING_EQUAL(ptr->getIdentifier(), "my_test_identifier")
}
END_SECTION
START_SECTION((const String& getIdentifier() const ))
{
NOT_TESTABLE
}
END_SECTION
START_SECTION((void setElementPath(const String &element_path)))
{
ptr->setElementPath("my_test_elementpath");
TEST_STRING_EQUAL(ptr->getElementPath(), "my_test_elementpath")
}
END_SECTION
START_SECTION((const String& getElementPath() const ))
{
NOT_TESTABLE
}
END_SECTION
START_SECTION((void setRequirementLevel(RequirementLevel level)))
{
ptr->setRequirementLevel(CVMappingRule::MUST);
TEST_EQUAL(ptr->getRequirementLevel(), CVMappingRule::MUST)
ptr->setRequirementLevel(CVMappingRule::MAY);
TEST_EQUAL(ptr->getRequirementLevel(), CVMappingRule::MAY)
}
END_SECTION
START_SECTION((RequirementLevel getRequirementLevel() const ))
{
NOT_TESTABLE
}
END_SECTION
START_SECTION((void setCombinationsLogic(CombinationsLogic combinations_logic)))
{
ptr->setCombinationsLogic(CVMappingRule::AND);
TEST_EQUAL(ptr->getCombinationsLogic(), CVMappingRule::AND)
ptr->setCombinationsLogic(CVMappingRule::XOR);
TEST_EQUAL(ptr->getCombinationsLogic(), CVMappingRule::XOR)
}
END_SECTION
START_SECTION((CombinationsLogic getCombinationsLogic() const ))
{
NOT_TESTABLE
}
END_SECTION
START_SECTION((void setScopePath(const String &path)))
{
ptr->setScopePath("my_test_scopepath");
TEST_STRING_EQUAL(ptr->getScopePath(), "my_test_scopepath")
}
END_SECTION
START_SECTION((const String& getScopePath() const ))
{
NOT_TESTABLE
}
END_SECTION
START_SECTION((void setCVTerms(const std::vector< CVMappingTerm > &cv_terms)))
{
CVMappingTerm cv_term1, cv_term2;
cv_term1.setAccession("BLA:1");
cv_term2.setAccession("BLA:2");
vector<CVMappingTerm> terms;
terms.push_back(cv_term1);
terms.push_back(cv_term2);
ptr->setCVTerms(terms);
TEST_EQUAL(ptr->getCVTerms() == terms, true)
}
END_SECTION
START_SECTION((const std::vector<CVMappingTerm>& getCVTerms() const ))
{
NOT_TESTABLE
}
END_SECTION
START_SECTION((void addCVTerm(const CVMappingTerm &cv_terms)))
{
TEST_EQUAL(ptr->getCVTerms().size(), 2)
CVMappingTerm cv_term;
cv_term.setAccession("BLA:3");
ptr->addCVTerm(cv_term);
TEST_EQUAL(ptr->getCVTerms().size(), 3)
}
END_SECTION
delete ptr;
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
END_TEST
| C++ |
3D | OpenMS/OpenMS | src/tests/class_tests/openms/source/FeatureFinderIdentificationAlgorithm_test.cpp | .cpp | 1,229 | 45 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Timo Sachsenberg $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/ClassTest.h>
///////////////////////////
#include <OpenMS/FEATUREFINDER/FeatureFinderIdentificationAlgorithm.h>
///////////////////////////
using namespace OpenMS;
using namespace std;
START_TEST(FeatureFinderIdentificationAlgorithm, "$Id$")
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
FeatureFinderIdentificationAlgorithm* ptr = 0;
FeatureFinderIdentificationAlgorithm* null_ptr = 0;
START_SECTION(FeatureFinderIdentificationAlgorithm())
{
ptr = new FeatureFinderIdentificationAlgorithm();
TEST_NOT_EQUAL(ptr, null_ptr)
}
END_SECTION
START_SECTION(~FeatureFinderIdentificationAlgorithm())
{
delete ptr;
}
END_SECTION
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
END_TEST
| C++ |
3D | OpenMS/OpenMS | src/tests/class_tests/openms/source/MassDecomposer_test.cpp | .cpp | 1,801 | 67 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Stephan Aiche $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/ClassTest.h>
#include <OpenMS/test_config.h>
///////////////////////////
#include <OpenMS/CHEMISTRY/MASSDECOMPOSITION/IMS/MassDecomposer.h>
///////////////////////////
using namespace OpenMS;
using namespace ims;
using namespace std;
START_TEST(MassDecomposer, "$Id$")
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
START_SECTION((virtual ~MassDecomposer()))
{
// MassDecomposer is an abstract base class, without any implementation
NOT_TESTABLE
}
END_SECTION
START_SECTION((virtual bool exist(value_type mass)=0))
{
// MassDecomposer is an abstract base class, without any implementation
NOT_TESTABLE
}
END_SECTION
START_SECTION((virtual decomposition_type getDecomposition(value_type mass)=0))
{
// MassDecomposer is an abstract base class, without any implementation
NOT_TESTABLE
}
END_SECTION
START_SECTION((virtual decompositions_type getAllDecompositions(value_type mass)=0))
{
// MassDecomposer is an abstract base class, without any implementation
NOT_TESTABLE
}
END_SECTION
START_SECTION((virtual decomposition_value_type getNumberOfDecompositions(value_type mass)=0))
{
// MassDecomposer is an abstract base class, without any implementation
NOT_TESTABLE
}
END_SECTION
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
END_TEST
| C++ |
3D | OpenMS/OpenMS | src/tests/class_tests/openms/source/ConsensusIDAlgorithmBest_test.cpp | .cpp | 5,877 | 184 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Marc Sturm, Andreas Bertsch, Sven Nahnsen, Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/ClassTest.h>
#include <OpenMS/test_config.h>
///////////////////////////
#include <OpenMS/ANALYSIS/ID/ConsensusIDAlgorithmBest.h>
using namespace OpenMS;
using namespace std;
///////////////////////////
START_TEST(ConsensusIDAlgorithmBest, "$Id$")
/////////////////////////////////////////////////////////////
ConsensusIDAlgorithm* ptr = nullptr;
ConsensusIDAlgorithm* null_pointer = nullptr;
START_SECTION(ConsensusIDAlgorithmBest())
{
ptr = new ConsensusIDAlgorithmBest();
TEST_NOT_EQUAL(ptr, null_pointer);
}
END_SECTION
START_SECTION(~ConsensusIDAlgorithmBest())
{
delete(ptr);
}
END_SECTION
// create 3 ID runs:
PeptideIdentification temp;
temp.setScoreType("Posterior Error Probability");
temp.setHigherScoreBetter(false);
PeptideIdentificationList ids(3, temp);
vector<PeptideHit> hits;
// the first ID has 5 hits
hits.resize(5);
hits[0].setSequence(AASequence::fromString("A"));
hits[0].setScore(0.1);
hits[1].setSequence(AASequence::fromString("B"));
hits[1].setScore(0.2);
hits[2].setSequence(AASequence::fromString("C"));
hits[2].setScore(0.3);
hits[3].setSequence(AASequence::fromString("D"));
hits[3].setScore(0.4);
hits[4].setSequence(AASequence::fromString("E"));
hits[4].setScore(0.5);
ids[0].setHits(hits);
// the second ID has 3 hits
hits.resize(3);
hits[0].setSequence(AASequence::fromString("C"));
hits[0].setScore(0.2);
hits[1].setSequence(AASequence::fromString("A"));
hits[1].setScore(0.4);
hits[2].setSequence(AASequence::fromString("B"));
hits[2].setScore(0.6);
ids[1].setHits(hits);
// the third ID has 10 hits
hits.resize(10);
hits[0].setSequence(AASequence::fromString("F"));
hits[0].setScore(0.0);
hits[1].setSequence(AASequence::fromString("C"));
hits[1].setScore(0.1);
hits[2].setSequence(AASequence::fromString("G"));
hits[2].setScore(0.2);
hits[3].setSequence(AASequence::fromString("D"));
hits[3].setScore(0.3);
hits[4].setSequence(AASequence::fromString("B"));
hits[4].setScore(0.4);
hits[5].setSequence(AASequence::fromString("E"));
hits[5].setScore(0.5);
hits[6].setSequence(AASequence::fromString("H"));
hits[6].setScore(0.6);
hits[7].setSequence(AASequence::fromString("I"));
hits[7].setScore(0.7);
hits[8].setSequence(AASequence::fromString("J"));
hits[8].setScore(0.8);
hits[9].setSequence(AASequence::fromString("K"));
hits[9].setScore(0.9);
ids[2].setHits(hits);
START_SECTION(void apply(PeptideIdentificationList& ids))
{
TOLERANCE_ABSOLUTE(0.01)
ConsensusIDAlgorithmBest consensus;
// define parameters:
Param param;
param.setValue("filter:considered_hits", 0);
consensus.setParameters(param);
// apply:
PeptideIdentificationList f = ids;
map<String,String> empty;
consensus.apply(f, empty);
TEST_EQUAL(f.size(), 1);
hits = f[0].getHits();
TEST_EQUAL(hits.size(), 11);
TEST_EQUAL(hits[0].getSequence(), AASequence::fromString("F"));
TEST_REAL_SIMILAR(hits[0].getScore(), 0.0);
// hits with the same score get assigned the same rank:
TEST_EQUAL(hits[1].getSequence(), AASequence::fromString("A"));
TEST_REAL_SIMILAR(hits[1].getScore(), 0.1);
TEST_EQUAL(hits[2].getSequence(), AASequence::fromString("C"));
TEST_REAL_SIMILAR(hits[2].getScore(), 0.1);
TEST_EQUAL(hits[3].getSequence(), AASequence::fromString("B"));
TEST_REAL_SIMILAR(hits[3].getScore(), 0.2);
TEST_EQUAL(hits[4].getSequence(), AASequence::fromString("G"));
TEST_REAL_SIMILAR(hits[4].getScore(), 0.2);
TEST_EQUAL(hits[5].getSequence(), AASequence::fromString("D"));
TEST_REAL_SIMILAR(hits[5].getScore(), 0.3);
TEST_EQUAL(hits[6].getSequence(), AASequence::fromString("E"));
TEST_REAL_SIMILAR(hits[6].getScore(), 0.5);
TEST_EQUAL(hits[7].getSequence(), AASequence::fromString("H"));
TEST_REAL_SIMILAR(hits[7].getScore(), 0.6);
TEST_EQUAL(hits[8].getSequence(), AASequence::fromString("I"));
TEST_REAL_SIMILAR(hits[8].getScore(), 0.7);
TEST_EQUAL(hits[9].getSequence(), AASequence::fromString("J"));
TEST_REAL_SIMILAR(hits[9].getScore(), 0.8);
TEST_EQUAL(hits[10].getSequence(), AASequence::fromString("K"));
TEST_REAL_SIMILAR(hits[10].getScore(), 0.9);
ids[2].setHigherScoreBetter(true);
TEST_EXCEPTION(Exception::InvalidValue, consensus.apply(ids, empty));
}
END_SECTION
START_SECTION([EXTRA] void apply(PeptideIdentificationList& ids))
{
// test edge cases for consensus support calculation (issue #2020):
ConsensusIDAlgorithmBest consensus;
PeptideIdentificationList id(1);
id[0].getHits().resize(2);
id[0].getHits()[0].setSequence(AASequence::fromString("PEPTIDE"));
id[0].getHits()[1] = id[0].getHits()[0]; // duplicated peptide hit
map<String,String> empty;
consensus.apply(id, empty, 2); // two runs, but one produced no hits
TEST_EQUAL(id.size(), 1);
TEST_EQUAL(id[0].getHits().size(), 1);
TEST_EQUAL(id[0].getHits()[0].getSequence().toString(), "PEPTIDE");
TEST_REAL_SIMILAR(id[0].getHits()[0].getMetaValue("consensus_support"), 1.0);
// change parameter:
Param param;
param.setValue("filter:count_empty", "true");
consensus.setParameters(param);
id[0].getHits().push_back(id[0].getHits()[0]);
consensus.apply(id, empty, 2);
TEST_EQUAL(id.size(), 1);
TEST_EQUAL(id[0].getHits().size(), 1);
TEST_EQUAL(id[0].getHits()[0].getSequence().toString(), "PEPTIDE");
TEST_REAL_SIMILAR(id[0].getHits()[0].getMetaValue("consensus_support"), 0.0);
}
END_SECTION
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
END_TEST
| C++ |
3D | OpenMS/OpenMS | src/tests/class_tests/openms/source/XQuestScores_test.cpp | .cpp | 10,952 | 156 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Eugen Netz $
// $Authors: Eugen Netz $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/ClassTest.h>
#include <OpenMS/test_config.h>
///////////////////////////
#include <OpenMS/ANALYSIS/XLMS/XQuestScores.h>
#include <OpenMS/CHEMISTRY/TheoreticalSpectrumGeneratorXLMS.h>
#include <OpenMS/ANALYSIS/XLMS/OPXLSpectrumProcessingAlgorithms.h>
using namespace OpenMS;
START_TEST(XQuestScores, "$Id$")
TheoreticalSpectrumGeneratorXLMS specGen;
Param param = specGen.getParameters();
param.setValue("add_isotopes", "false");
param.setValue("add_metainfo", "true");
param.setValue("add_first_prefix_ion", "false");
specGen.setParameters(param);
PeakSpectrum theo_spec_1, theo_spec_2, theo_spec_3;
AASequence peptide1 = AASequence::fromString("PEPTIDEPEPTIDEPEPTIDE");
AASequence peptide2 = AASequence::fromString("PEPTIDEEDITPEPTIDE");
AASequence peptide3 = AASequence::fromString("EDITPEPTIDE");
specGen.getLinearIonSpectrum(theo_spec_1, peptide1, 3, true, 2);
specGen.getLinearIonSpectrum(theo_spec_2, peptide2, 3, true, 2);
specGen.getLinearIonSpectrum(theo_spec_3, peptide3, 5, true, 2);
// specGen.getLinearIonSpectrum(theo_spec_4, peptide3, 2, true);
std::vector <std::pair <Size, Size> > alignment1;
std::vector <std::pair <Size, Size> > alignment2;
DataArrays::FloatDataArray dummy_array1;
DataArrays::FloatDataArray dummy_array2;
OPXLSpectrumProcessingAlgorithms::getSpectrumAlignmentFastCharge(alignment1, 20, true, theo_spec_1, theo_spec_2, theo_spec_1.getIntegerDataArrays()[0], theo_spec_2.getIntegerDataArrays()[0], dummy_array1);
OPXLSpectrumProcessingAlgorithms::getSpectrumAlignmentFastCharge(alignment2, 20, true, theo_spec_1, theo_spec_3, theo_spec_1.getIntegerDataArrays()[0], theo_spec_3.getIntegerDataArrays()[0], dummy_array2);
START_SECTION(static float preScore(Size matched_alpha, Size ions_alpha, Size matched_beta, Size ions_beta))
TEST_REAL_SIMILAR(XQuestScores::preScore(1, 1, 1, 1), 1.0)
TEST_REAL_SIMILAR(XQuestScores::preScore(2, 4, 3, 6), 0.5)
TEST_REAL_SIMILAR(XQuestScores::preScore(3, 2, 9, 6), 1.5) // more matched peaks, than theoretical peaks. practically impossible
TEST_REAL_SIMILAR(XQuestScores::preScore(0, 5, 0, 5), 0.0)
TEST_REAL_SIMILAR(XQuestScores::preScore(0, 5, 3, 5), 0.10954)
TEST_REAL_SIMILAR(XQuestScores::preScore(2, 5, 0, 5), 0.08944)
TEST_REAL_SIMILAR(XQuestScores::preScore(0, 50, 0, 50), 0.0)
TEST_REAL_SIMILAR(XQuestScores::preScore(0, 50, 3, 50), 0.01095)
TEST_REAL_SIMILAR(XQuestScores::preScore(2, 50, 0, 50), 0.00894)
TEST_REAL_SIMILAR(XQuestScores::preScore(5, 50, 0, 50), 0.01414)
TEST_REAL_SIMILAR(XQuestScores::preScore(45, 50, 0, 50), 0.04242)
TEST_REAL_SIMILAR(XQuestScores::preScore(2, 50, 3, 50), 0.04898)
TEST_REAL_SIMILAR(XQuestScores::preScore(1, 50, 1, 50), 0.02)
TEST_REAL_SIMILAR(XQuestScores::preScore(2, 50, 2, 50), 0.04)
TEST_REAL_SIMILAR(XQuestScores::preScore(45, 50, 5, 50), 0.3)
TEST_REAL_SIMILAR(XQuestScores::preScore(25, 50, 25, 50), 0.5)
END_SECTION
START_SECTION(static float preScore(Size matched_alpha, Size ions_alpha))
TEST_REAL_SIMILAR(XQuestScores::preScore(1, 1), 1.0)
TEST_REAL_SIMILAR(XQuestScores::preScore(2, 1), 2.0)
TEST_REAL_SIMILAR(XQuestScores::preScore(0, 2), 0.0)
TEST_REAL_SIMILAR(XQuestScores::preScore(0, 50), 0.0)
TEST_REAL_SIMILAR(XQuestScores::preScore(1, 50), 0.02)
TEST_REAL_SIMILAR(XQuestScores::preScore(3, 50), 0.06)
TEST_REAL_SIMILAR(XQuestScores::preScore(9, 18), 0.5)
END_SECTION
START_SECTION(static double matchOddsScore(const PeakSpectrum& theoretical_spec, const Size matched_size, double fragment_mass_tolerance, bool fragment_mass_tolerance_unit_ppm, bool is_xlink_spectrum = false, Size n_charges = 1))
TEST_EQUAL(theo_spec_1.size(), 46)
TEST_EQUAL(alignment1.size(), 28)
TEST_EQUAL(alignment2.size(), 10)
TEST_REAL_SIMILAR(theo_spec_1.back().getMZ() - theo_spec_1[0].getMZ(), 1903.33405)
TEST_REAL_SIMILAR(std::log(theo_spec_1.back().getMZ()) - std::log(theo_spec_1[0].getMZ()), 3.99930)
TEST_REAL_SIMILAR(XQuestScores::matchOddsScore(theo_spec_1, alignment1.size(), 0.1, false), 106.63674);
TEST_REAL_SIMILAR(XQuestScores::matchOddsScore(theo_spec_2, alignment1.size(), 0.1, false), 111.87796);
TEST_REAL_SIMILAR(XQuestScores::matchOddsScore(theo_spec_1, alignment2.size(), 0.1, false), 28.07671);
TEST_REAL_SIMILAR(XQuestScores::matchOddsScore(theo_spec_3, alignment2.size(), 0.1, false), 24.22081);
TEST_REAL_SIMILAR(XQuestScores::matchOddsScore(theo_spec_1, alignment1.size(), 0.2, false, true, 2), 106.63373);
TEST_REAL_SIMILAR(XQuestScores::matchOddsScore(theo_spec_2, alignment1.size(), 0.2, false, true, 2), 111.87432);
TEST_REAL_SIMILAR(XQuestScores::matchOddsScore(theo_spec_3, alignment2.size(), 0.2, false), 17.11504);
TEST_REAL_SIMILAR(XQuestScores::matchOddsScore(theo_spec_1, alignment1.size(), 10, true), 187.24386);
TEST_REAL_SIMILAR(XQuestScores::matchOddsScore(theo_spec_2, alignment1.size(), 10, true), 198.42811);
TEST_REAL_SIMILAR(XQuestScores::matchOddsScore(theo_spec_1, alignment2.size(), 10, true), 58.41773);
TEST_REAL_SIMILAR(XQuestScores::matchOddsScore(theo_spec_3, alignment2.size(), 10, true), 63.85680);
TEST_REAL_SIMILAR(XQuestScores::matchOddsScore(theo_spec_1, alignment1.size(), 20, true, true, 2), 187.24367);
TEST_REAL_SIMILAR(XQuestScores::matchOddsScore(theo_spec_3, alignment2.size(), 20, true), 56.24576);
END_SECTION
START_SECTION(static double logOccupancyProb(const PeakSpectrum& theoretical_spec, const Size matched_size, double fragment_mass_tolerance, bool fragment_mass_tolerance_unit_ppm))
TEST_REAL_SIMILAR(XQuestScores::logOccupancyProb(theo_spec_1, alignment1.size(), 0.1, false), 126.59011);
TEST_REAL_SIMILAR(XQuestScores::logOccupancyProb(theo_spec_3, alignment2.size(), 0.1, false), 31.58523);
TEST_REAL_SIMILAR(XQuestScores::logOccupancyProb(theo_spec_1, alignment2.size(), 0.1, false), 35.52062);
TEST_REAL_SIMILAR(XQuestScores::logOccupancyProb(theo_spec_1, alignment1.size(), 0.2, false), 106.63674);
TEST_REAL_SIMILAR(XQuestScores::logOccupancyProb(theo_spec_1, alignment2.size(), 0.2, false), 28.07671);
TEST_REAL_SIMILAR(XQuestScores::logOccupancyProb(theo_spec_3, alignment2.size(), 0.2, false), 24.22081);
TEST_REAL_SIMILAR(XQuestScores::logOccupancyProb(theo_spec_1, alignment1.size(), 10, true), 214.75707);
TEST_REAL_SIMILAR(XQuestScores::logOccupancyProb(theo_spec_1, alignment2.size(), 10, true), 68.84436);
TEST_REAL_SIMILAR(XQuestScores::logOccupancyProb(theo_spec_3, alignment2.size(), 10, true), 73.47408);
TEST_REAL_SIMILAR(XQuestScores::logOccupancyProb(theo_spec_1, alignment1.size(), 20, true), 194.66285);
TEST_REAL_SIMILAR(XQuestScores::logOccupancyProb(theo_spec_1, alignment2.size(), 20, true), 61.22836);
TEST_REAL_SIMILAR(XQuestScores::logOccupancyProb(theo_spec_3, alignment2.size(), 20, true), 65.85512);
TEST_REAL_SIMILAR(XQuestScores::logOccupancyProb(theo_spec_1, alignment1.size(), 200, true), 128.01463);
TEST_REAL_SIMILAR(XQuestScores::logOccupancyProb(theo_spec_1, alignment2.size(), 200, true), 36.05495);
TEST_REAL_SIMILAR(XQuestScores::logOccupancyProb(theo_spec_3, alignment2.size(), 200, true), 40.62847);
END_SECTION
START_SECTION(static double weightedTICScoreXQuest(Size alpha_size, Size beta_size, double intsum_alpha, double intsum_beta, double total_current, bool type_is_cross_link))
TEST_REAL_SIMILAR(XQuestScores::weightedTICScoreXQuest(20, 10, 500.0, 500.0, 1500.0, true), 0.13636)
TEST_REAL_SIMILAR(XQuestScores::weightedTICScoreXQuest(20, 10, 1000.0, 500.0, 1500.0, true), 0.18181)
TEST_REAL_SIMILAR(XQuestScores::weightedTICScoreXQuest(20, 10, 500.0, 1000.0, 1500.0, true), 0.22727)
TEST_REAL_SIMILAR(XQuestScores::weightedTICScoreXQuest(20, 10, 1450.0, 50.0, 1500.0, true), 0.14090)
TEST_REAL_SIMILAR(XQuestScores::weightedTICScoreXQuest(20, 10, 50.0, 1450.0, 1500.0, true), 0.26818)
TEST_REAL_SIMILAR(XQuestScores::weightedTICScoreXQuest(20, 0, 500.0, 0.0, 1500.0, false), 0.08333)
END_SECTION
START_SECTION(static double weightedTICScore(Size alpha_size, Size beta_size, double intsum_alpha, double intsum_beta, double total_current, bool type_is_cross_link))
TEST_REAL_SIMILAR(XQuestScores::weightedTICScore(20, 10, 500.0, 500.0, 1500.0, true), 0.5)
TEST_REAL_SIMILAR(XQuestScores::weightedTICScore(20, 10, 1000.0, 500.0, 1500.0, true), 0.66666)
TEST_REAL_SIMILAR(XQuestScores::weightedTICScore(20, 10, 500.0, 1000.0, 1500.0, true), 0.83333)
TEST_REAL_SIMILAR(XQuestScores::weightedTICScore(20, 10, 1450.0, 50.0, 1500.0, true), 0.51666)
TEST_REAL_SIMILAR(XQuestScores::weightedTICScore(20, 10, 50.0, 1450.0, 1500.0, true), 0.98333)
TEST_REAL_SIMILAR(XQuestScores::weightedTICScore(20, 0, 500.0, 0.0, 1500.0, false), 0.33333)
END_SECTION
START_SECTION(static double matchedCurrentChain(const std::vector< std::pair< Size, Size > >& matched_spec_linear, const std::vector< std::pair< Size, Size > >& matched_spec_xlinks, const PeakSpectrum& spectrum_linear_peaks, const PeakSpectrum& spectrum_xlink_peaks))
TEST_REAL_SIMILAR(XQuestScores::matchedCurrentChain(alignment1, alignment2, theo_spec_2, theo_spec_3), 38.0)
END_SECTION
START_SECTION(static double totalMatchedCurrent(const std::vector< std::pair< Size, Size > >& matched_spec_linear_alpha, const std::vector< std::pair< Size, Size > >& matched_spec_linear_beta, const std::vector< std::pair< Size, Size > >& matched_spec_xlinks_alpha, const std::vector< std::pair< Size, Size > >& matched_spec_xlinks_beta, const PeakSpectrum& spectrum_linear_peaks, const PeakSpectrum& spectrum_xlink_peaks))
TEST_REAL_SIMILAR(XQuestScores::totalMatchedCurrent(alignment1, alignment1, alignment2, alignment2, theo_spec_2, theo_spec_3), 38.0)
END_SECTION
START_SECTION(static std::vector< double > xCorrelation(const PeakSpectrum & spec1, const PeakSpectrum & spec2, Int maxshift, double tolerance))
std::vector <double> xcorr_scores = XQuestScores::xCorrelation(theo_spec_1, theo_spec_2, 2, 0.2);
TEST_EQUAL(xcorr_scores [0] < 0.5, true)
TEST_EQUAL(xcorr_scores [1] < 0, true)
TEST_REAL_SIMILAR(xcorr_scores [2], 0.65121)
TEST_EQUAL(xcorr_scores [3] < 0, true)
TEST_EQUAL(xcorr_scores [4] < 0.5, true)
END_SECTION
START_SECTION(static double XQuestScores::xCorrelationPrescore(const PeakSpectrum & spec1, const PeakSpectrum & spec2, double tolerance))
double xcorr_fast = XQuestScores::xCorrelationPrescore(theo_spec_1, theo_spec_2, 0.2);
TEST_REAL_SIMILAR(xcorr_fast, 0.7)
END_SECTION
END_TEST
| C++ |
3D | OpenMS/OpenMS | src/tests/class_tests/openms/source/MobilityPeak1D_test.cpp | .cpp | 11,755 | 371 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/ClassTest.h>
///////////////////////////
#include <OpenMS/KERNEL/MobilityPeak1D.h>
#include <unordered_set>
#include <unordered_map>
///////////////////////////
START_TEST(MobilityPeak1D, "$Id$")
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
using namespace OpenMS;
MobilityPeak1D* d10_ptr = nullptr;
MobilityPeak1D* d10_nullPointer = nullptr;
static_assert(std::is_trivially_destructible<MobilityPeak1D> {});
//static_assert(std::is_trivially_default_constructible<MobilityPeak1D> {});
static_assert(std::is_trivially_copy_constructible<MobilityPeak1D> {});
static_assert(std::is_trivially_copy_assignable<MobilityPeak1D> {});
static_assert(std::is_trivially_move_constructible<MobilityPeak1D> {});
static_assert(std::is_nothrow_move_constructible<MobilityPeak1D> {});
static_assert(std::is_trivially_move_assignable<MobilityPeak1D> {});
START_SECTION((MobilityPeak1D()))
d10_ptr = new MobilityPeak1D;
TEST_NOT_EQUAL(d10_ptr, d10_nullPointer)
END_SECTION
START_SECTION((~MobilityPeak1D()))
delete d10_ptr;
END_SECTION
START_SECTION((IntensityType getIntensity() const))
TEST_REAL_SIMILAR(MobilityPeak1D().getIntensity(), 0.0)
END_SECTION
START_SECTION((PositionType const& getPosition() const))
TEST_REAL_SIMILAR(MobilityPeak1D().getPosition()[0], 0.0)
END_SECTION
START_SECTION((CoordinateType getMobility() const))
TEST_REAL_SIMILAR(MobilityPeak1D().getMobility(), 0.0)
END_SECTION
START_SECTION((CoordinateType getPos() const))
TEST_REAL_SIMILAR(MobilityPeak1D().getPos(), 0.0)
END_SECTION
START_SECTION((void setIntensity(IntensityType intensity)))
MobilityPeak1D p;
p.setIntensity(17.8f);
TEST_REAL_SIMILAR(p.getIntensity(), 17.8)
END_SECTION
START_SECTION((void setPosition(PositionType const& position)))
MobilityPeak1D::PositionType pos;
pos[0] = 1.0;
MobilityPeak1D p;
p.setPosition(pos);
TEST_REAL_SIMILAR(p.getPosition()[0], 1.0)
END_SECTION
START_SECTION((PositionType & getPosition()))
MobilityPeak1D::PositionType pos;
pos[0] = 1.0;
MobilityPeak1D p;
p.getPosition() = pos;
TEST_REAL_SIMILAR(p.getPosition()[0], 1.0)
END_SECTION
START_SECTION((void setMobility(CoordinateType mb)))
MobilityPeak1D p;
p.setMobility(5.0);
TEST_REAL_SIMILAR(p.getMobility(), 5.0)
END_SECTION
START_SECTION((void setPos(CoordinateTypepos)))
MobilityPeak1D p;
p.setPos(5.0);
TEST_REAL_SIMILAR(p.getPos(), 5.0)
END_SECTION
START_SECTION((MobilityPeak1D(const MobilityPeak1D& p)))
MobilityPeak1D::PositionType pos;
pos[0] = 21.21;
MobilityPeak1D p;
p.setIntensity(123.456f);
p.setPosition(pos);
MobilityPeak1D::PositionType pos2;
MobilityPeak1D::IntensityType i2;
MobilityPeak1D copy_of_p(p);
i2 = copy_of_p.getIntensity();
pos2 = copy_of_p.getPosition();
TEST_REAL_SIMILAR(i2, 123.456)
TEST_REAL_SIMILAR(pos2[0], 21.21)
END_SECTION
START_SECTION((MobilityPeak1D & operator=(const MobilityPeak1D& rhs)))
MobilityPeak1D::PositionType pos;
pos[0] = 21.21;
MobilityPeak1D p;
p.setIntensity(123.456f);
p.setPosition(pos);
MobilityPeak1D::PositionType pos2;
MobilityPeak1D::IntensityType i2;
MobilityPeak1D copy_of_p;
copy_of_p = p;
i2 = copy_of_p.getIntensity();
pos2 = copy_of_p.getPosition();
TEST_REAL_SIMILAR(i2, 123.456)
TEST_REAL_SIMILAR(pos2[0], 21.21)
END_SECTION
START_SECTION((bool operator==(const MobilityPeak1D& rhs) const))
MobilityPeak1D p1;
MobilityPeak1D p2(p1);
TEST_TRUE(p1 == p2)
p1.setIntensity(5.0f);
TEST_FALSE(p1 == p2)
p2.setIntensity(5.0f);
TEST_TRUE(p1 == p2)
p1.getPosition()[0] = 5;
TEST_FALSE(p1 == p2)
p2.getPosition()[0] = 5;
TEST_TRUE(p1 == p2)
END_SECTION
START_SECTION((bool operator!=(const MobilityPeak1D& rhs) const))
MobilityPeak1D p1;
MobilityPeak1D p2(p1);
TEST_FALSE(p1 != p2)
p1.setIntensity(5.0f);
TEST_TRUE(p1 != p2)
p2.setIntensity(5.0f);
TEST_FALSE(p1 != p2)
p1.getPosition()[0] = 5;
TEST_TRUE(p1 != p2)
p2.getPosition()[0] = 5;
TEST_FALSE(p1 != p2)
END_SECTION
/////////////////////////////////////////////////////////////
// Nested stuff
/////////////////////////////////////////////////////////////
MobilityPeak1D p1;
p1.setIntensity(10.0);
p1.setMobility(10.0);
MobilityPeak1D p2;
p2.setIntensity(12.0);
p2.setMobility(12.0);
// IntensityLess
START_SECTION(([MobilityPeak1D::IntensityLess] bool operator()(MobilityPeak1D const& left, MobilityPeak1D const& right) const))
std::vector<MobilityPeak1D> v;
MobilityPeak1D p;
p.setIntensity(2.5f);
v.push_back(p);
p.setIntensity(3.5f);
v.push_back(p);
p.setIntensity(1.5f);
v.push_back(p);
std::sort(v.begin(), v.end(), MobilityPeak1D::IntensityLess());
TEST_REAL_SIMILAR(v[0].getIntensity(), 1.5)
TEST_REAL_SIMILAR(v[1].getIntensity(), 2.5)
TEST_REAL_SIMILAR(v[2].getIntensity(), 3.5)
v[0] = v[2];
v[2] = p;
std::sort(v.begin(), v.end(), MobilityPeak1D::IntensityLess());
TEST_REAL_SIMILAR(v[0].getIntensity(), 1.5)
TEST_REAL_SIMILAR(v[1].getIntensity(), 2.5)
TEST_REAL_SIMILAR(v[2].getIntensity(), 3.5)
// some more
TEST_TRUE(MobilityPeak1D::IntensityLess()(p1, p2))
TEST_FALSE(MobilityPeak1D::IntensityLess()(p2, p1))
TEST_FALSE(MobilityPeak1D::IntensityLess()(p2, p2))
END_SECTION
START_SECTION(([MobilityPeak1D::IntensityLess] bool operator()(MobilityPeak1D const& left, IntensityType right) const))
TEST_TRUE(MobilityPeak1D::IntensityLess()(p1, p2.getIntensity()))
TEST_FALSE(MobilityPeak1D::IntensityLess()(p2, p1.getIntensity()))
TEST_FALSE(MobilityPeak1D::IntensityLess()(p2, p2.getIntensity()))
END_SECTION
START_SECTION(([MobilityPeak1D::IntensityLess] bool operator()(IntensityType left, MobilityPeak1D const& right) const))
TEST_TRUE(MobilityPeak1D::IntensityLess()(p1.getIntensity(), p2))
TEST_FALSE(MobilityPeak1D::IntensityLess()(p2.getIntensity(), p1))
TEST_FALSE(MobilityPeak1D::IntensityLess()(p2.getIntensity(), p2))
END_SECTION
START_SECTION(([MobilityPeak1D::IntensityLess] bool operator()(IntensityType left, IntensityType right) const))
TEST_TRUE(MobilityPeak1D::IntensityLess()(p1.getIntensity(), p2.getIntensity()))
TEST_FALSE(MobilityPeak1D::IntensityLess()(p2.getIntensity(), p1.getIntensity()))
TEST_FALSE(MobilityPeak1D::IntensityLess()(p2.getIntensity(), p2.getIntensity()))
END_SECTION
// MobilityLess
START_SECTION(([MobilityPeak1D::MobilityLess] bool operator()(const MobilityPeak1D& left, const MobilityPeak1D& right) const))
std::vector<MobilityPeak1D> v;
MobilityPeak1D p;
p.setMobility(3.0);
v.push_back(p);
p.setMobility(2.0);
v.push_back(p);
p.setMobility(1.0);
v.push_back(p);
std::sort(v.begin(), v.end(), MobilityPeak1D::MobilityLess());
TEST_REAL_SIMILAR(v[0].getPosition()[0], 1.0)
TEST_REAL_SIMILAR(v[1].getPosition()[0], 2.0)
TEST_REAL_SIMILAR(v[2].getPosition()[0], 3.0)
//
TEST_EQUAL(MobilityPeak1D::MobilityLess()(p1, p2), true)
TEST_EQUAL(MobilityPeak1D::MobilityLess()(p2, p1), false)
TEST_EQUAL(MobilityPeak1D::MobilityLess()(p2, p2), false)
END_SECTION
START_SECTION(([MobilityPeak1D::MobilityLess] bool operator()(MobilityPeak1D const& left, CoordinateType right) const))
TEST_EQUAL(MobilityPeak1D::MobilityLess()(p1, p2.getMobility()), true)
TEST_EQUAL(MobilityPeak1D::MobilityLess()(p2, p1.getMobility()), false)
TEST_EQUAL(MobilityPeak1D::MobilityLess()(p2, p2.getMobility()), false)
END_SECTION
START_SECTION(([MobilityPeak1D::MobilityLess] bool operator()(CoordinateType left, MobilityPeak1D const& right) const))
TEST_EQUAL(MobilityPeak1D::MobilityLess()(p1.getMobility(), p2), true)
TEST_EQUAL(MobilityPeak1D::MobilityLess()(p2.getMobility(), p1), false)
TEST_EQUAL(MobilityPeak1D::MobilityLess()(p2.getMobility(), p2), false)
END_SECTION
START_SECTION(([MobilityPeak1D::MobilityLess] bool operator()(CoordinateType left, CoordinateType right) const))
TEST_EQUAL(MobilityPeak1D::MobilityLess()(p1.getMobility(), p2.getMobility()), true)
TEST_EQUAL(MobilityPeak1D::MobilityLess()(p2.getMobility(), p1.getMobility()), false)
TEST_EQUAL(MobilityPeak1D::MobilityLess()(p2.getMobility(), p2.getMobility()), false)
END_SECTION
// PositionLess
START_SECTION(([MobilityPeak1D::PositionLess] bool operator()(const MobilityPeak1D& left, const MobilityPeak1D& right) const))
std::vector<MobilityPeak1D> v;
MobilityPeak1D p;
p.getPosition()[0] = 3.0;
v.push_back(p);
p.getPosition()[0] = 2.0;
v.push_back(p);
p.getPosition()[0] = 1.0;
v.push_back(p);
std::sort(v.begin(), v.end(), MobilityPeak1D::PositionLess());
TEST_REAL_SIMILAR(v[0].getPosition()[0], 1.0)
TEST_REAL_SIMILAR(v[1].getPosition()[0], 2.0)
TEST_REAL_SIMILAR(v[2].getPosition()[0], 3.0)
//
TEST_EQUAL(MobilityPeak1D::PositionLess()(p1, p2), true)
TEST_EQUAL(MobilityPeak1D::PositionLess()(p2, p1), false)
TEST_EQUAL(MobilityPeak1D::PositionLess()(p2, p2), false)
END_SECTION
START_SECTION(([MobilityPeak1D::PositionLess] bool operator()(const MobilityPeak1D& left, const PositionType& right) const))
TEST_EQUAL(MobilityPeak1D::PositionLess()(p1, p2.getPosition()), true)
TEST_EQUAL(MobilityPeak1D::PositionLess()(p2, p1.getPosition()), false)
TEST_EQUAL(MobilityPeak1D::PositionLess()(p2, p2.getPosition()), false)
END_SECTION
START_SECTION(([MobilityPeak1D::PositionLess] bool operator()(const PositionType& left, const MobilityPeak1D& right) const))
TEST_EQUAL(MobilityPeak1D::PositionLess()(p1.getPosition(), p2), true)
TEST_EQUAL(MobilityPeak1D::PositionLess()(p2.getPosition(), p1), false)
TEST_EQUAL(MobilityPeak1D::PositionLess()(p2.getPosition(), p2), false)
END_SECTION
START_SECTION(([MobilityPeak1D::PositionLess] bool operator()(const PositionType& left, const PositionType& right) const))
TEST_EQUAL(MobilityPeak1D::PositionLess()(p1.getPosition(), p2.getPosition()), true)
TEST_EQUAL(MobilityPeak1D::PositionLess()(p2.getPosition(), p1.getPosition()), false)
TEST_EQUAL(MobilityPeak1D::PositionLess()(p2.getPosition(), p2.getPosition()), false)
END_SECTION
/////////////////////////////////////////////////////////////
// Hash function tests
/////////////////////////////////////////////////////////////
START_SECTION(([EXTRA] std::hash<MobilityPeak1D>))
{
// Test that equal peaks have equal hashes
MobilityPeak1D mp1, mp2;
mp1.setMobility(1.5);
mp1.setIntensity(1000.0f);
mp2.setMobility(1.5);
mp2.setIntensity(1000.0f);
std::hash<MobilityPeak1D> hasher;
TEST_EQUAL(hasher(mp1), hasher(mp2))
// Test that hash changes when values change
MobilityPeak1D mp3;
mp3.setMobility(2.5);
mp3.setIntensity(1000.0f);
TEST_NOT_EQUAL(hasher(mp1), hasher(mp3))
// Test use in unordered_set
std::unordered_set<MobilityPeak1D> peak_set;
peak_set.insert(mp1);
TEST_EQUAL(peak_set.size(), 1)
peak_set.insert(mp2); // same as mp1
TEST_EQUAL(peak_set.size(), 1) // should not increase
peak_set.insert(mp3);
TEST_EQUAL(peak_set.size(), 2)
// Test use in unordered_map
std::unordered_map<MobilityPeak1D, int> peak_map;
peak_map[mp1] = 42;
TEST_EQUAL(peak_map[mp1], 42)
TEST_EQUAL(peak_map[mp2], 42) // mp2 == mp1, should get same value
peak_map[mp3] = 99;
TEST_EQUAL(peak_map[mp3], 99)
TEST_EQUAL(peak_map.size(), 2)
}
END_SECTION
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
END_TEST
| C++ |
3D | OpenMS/OpenMS | src/tests/class_tests/openms/source/TextFile_test.cpp | .cpp | 5,515 | 162 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Marc Sturm, Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/ClassTest.h>
#include <OpenMS/test_config.h>
///////////////////////////
#include <OpenMS/FORMAT/TextFile.h>
#include <iostream>
#include <vector>
using namespace OpenMS;
using namespace std;
///////////////////////////
START_TEST(String, "$Id$")
/////////////////////////////////////////////////////////////
TextFile* ptr = nullptr;
TextFile* nullPointer = nullptr;
START_SECTION((TextFile()))
ptr = new TextFile();
TEST_NOT_EQUAL(ptr, nullPointer)
END_SECTION
START_SECTION((TextFile(const String& filename, bool trim_lines = false, Int first_n = -1, bool skip_empty_lines = false) ))
// just some basic stuff, since the C'Tor calls load() directly
TextFile file(OPENMS_GET_TEST_DATA_PATH("TextFile_test_infile.txt"));
TextFile::ConstIterator file_it = file.begin();
TEST_EQUAL(String(*file_it).trim() == "first_line", true)
file_it += 3;
TEST_EQUAL(String(*file_it).trim() == "middle_line", true)
file_it += 7;
TEST_EQUAL(String(*file_it).trim() == "last_line", true)
TEST_EQUAL((file.end() - file.begin()), 11)
TextFile file2(OPENMS_GET_TEST_DATA_PATH("TextFile_test_empty_infile.txt"));
TEST_EQUAL((file2.end() - file2.begin()), 0)
END_SECTION
START_SECTION((~TextFile()))
delete ptr;
END_SECTION
START_SECTION((void load(const String& filename, bool trim_lines = false, Int first_n = -1, bool skip_empty_lines = false) ))
TextFile file;
TEST_EXCEPTION(Exception::FileNotFound, file.load("FileDoesNotExist.txt"))
file.load(OPENMS_GET_TEST_DATA_PATH("TextFile_test_infile.txt"));
TEST_EQUAL((file.end() - file.begin()), 11)
TextFile::ConstIterator file_it = file.begin();
TEST_EQUAL(String(*file_it).trim() == "first_line", true)
file_it += 3;
TEST_EQUAL(String(*file_it).trim() == "middle_line", true)
file_it += 7;
TEST_EQUAL(String(*file_it).trim() == "last_line", true)
//trimmed
file.load(OPENMS_GET_TEST_DATA_PATH("TextFile_test_infile.txt"),true);
TEST_EQUAL((file.end() - file.begin()), 11)
file_it = file.begin();
TEST_EQUAL(String(*file_it).trim() == "first_line", true)
file_it += 3;
TEST_EQUAL(String(*file_it).trim() == "middle_line", true)
file_it += 2;
TEST_EQUAL(String(*file_it).trim() == "space_line", true)
++file_it;
TEST_EQUAL(String(*file_it).trim() == "tab_line", true)
++file_it;
TEST_EQUAL(String(*file_it).trim() == "back_space_line", true)
++file_it;
TEST_EQUAL(String(*file_it).trim() == "back_tab_line", true)
file_it += 2;
TEST_EQUAL(String(*file_it).trim() == "last_line", true)
//only first few
file.load(OPENMS_GET_TEST_DATA_PATH("TextFile_test_infile.txt"),true,1);
TEST_EQUAL((file.end() - file.begin()), 1)
file_it = file.begin();
TEST_EQUAL(String(*file_it).trim() == "first_line", true)
file.load(OPENMS_GET_TEST_DATA_PATH("TextFile_test_infile.txt"),true,3);
TEST_EQUAL((file.end() - file.begin()), 3)
file_it = file.begin();
TEST_EQUAL(String(*file_it).trim() == "first_line", true)
++file_it;
TEST_EQUAL(String(*file_it).trim().empty(), true)
++file_it;
TEST_EQUAL(String(*file_it).trim().empty(), true)
file.load(OPENMS_GET_TEST_DATA_PATH("TextFile_test_infile.txt"),true,4);
TEST_EQUAL((file.end() - file.begin()), 4)
file_it = file.begin();
TEST_EQUAL(String(*file_it).trim() == "first_line", true)
++file_it;
TEST_EQUAL(String(*file_it).trim().empty(), true)
++file_it;
TEST_EQUAL(String(*file_it).trim().empty(), true)
++file_it;
TEST_EQUAL(String(*file_it).trim() == "middle_line", true)
file.load(OPENMS_GET_TEST_DATA_PATH("TextFile_test_infile.txt"),true, -1, true);
TEST_EQUAL((file.end() - file.begin()), 7)
file_it = file.begin();
TEST_EQUAL(String(*file_it).trim() == "first_line", true)
++file_it;
TEST_EQUAL(String(*file_it).trim() == "middle_line", true)
++file_it;
TEST_EQUAL(String(*file_it).trim() == "space_line", true)
file_it += 4;
TEST_EQUAL(String(*file_it).trim() == "last_line", true)
file.load(OPENMS_GET_TEST_DATA_PATH("TextFile_test_infile.txt"),true, 4, true);
TEST_EQUAL((file.end() - file.begin()), 4)
file_it = file.begin();
TEST_EQUAL(String(*file_it).trim() == "first_line", true)
++file_it;
TEST_EQUAL(String(*file_it).trim() == "middle_line", true)
++file_it;
TEST_EQUAL(String(*file_it).trim() == "space_line", true)
++file_it;
TEST_EQUAL(String(*file_it).trim() == "tab_line", true)
END_SECTION
START_SECTION((void store(const String& filename) ))
TextFile file;
TEST_EXCEPTION(Exception::UnableToCreateFile, file.store("/does/not/exist/FileDoesNotExist.txt"))
file.addLine("line1");
file.addLine("line2\n");
file.addLine("line3\r\n");
String filename;
NEW_TMP_FILE(filename);
file.store(filename);
file.load(filename);
// validate loaded content
TextFile::ConstIterator file_it = file.begin();
TEST_EQUAL(String(*file_it).trim() == "line1",true);
++file_it;
TEST_EQUAL(String(*file_it).trim() == "line2",true);
++file_it;
TEST_EQUAL(String(*file_it).trim() == "line3",true);
END_SECTION
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
END_TEST
| C++ |
3D | OpenMS/OpenMS | src/tests/class_tests/openms/source/BayesianProteinInferenceAlgorithm_test.cpp | .cpp | 10,788 | 225 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Julianus Pfeuffer $
// $Authors: Julianus Pfeuffer $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/ClassTest.h>
#include <OpenMS/ANALYSIS/ID/BayesianProteinInferenceAlgorithm.h>
#include <OpenMS/FORMAT/IdXMLFile.h>
#include <OpenMS/test_config.h>
using namespace OpenMS;
using namespace std;
START_TEST(BayesianProteinInferenceAlgorithm, "$Id$")
START_SECTION(BayesianProteinInferenceAlgorithm on Protein Peptide ID)
{
vector<ProteinIdentification> prots;
PeptideIdentificationList peps;
IdXMLFile idf;
idf.load(OPENMS_GET_TEST_DATA_PATH("newMergerTest_out.idXML"),prots,peps);
BayesianProteinInferenceAlgorithm bpia;
bpia.inferPosteriorProbabilities(prots,peps,false);
}
END_SECTION
TOLERANCE_ABSOLUTE(0.002)
TOLERANCE_RELATIVE(1.002)
START_SECTION(BayesianProteinInferenceAlgorithm test)
{
vector<ProteinIdentification> prots;
PeptideIdentificationList peps;
IdXMLFile idf;
idf.load(OPENMS_GET_TEST_DATA_PATH("BayesianProteinInference_test.idXML"),prots,peps);
BayesianProteinInferenceAlgorithm bpia;
Param p = bpia.getParameters();
p.setValue("update_PSM_probabilities", "false");
bpia.setParameters(p);
bpia.inferPosteriorProbabilities(prots,peps,false);
TEST_EQUAL(peps.size(), 9)
TEST_EQUAL(peps[0].getHits()[0].getScore(), 0.6)
TEST_REAL_SIMILAR(prots[0].getHits()[0].getScore(), 0.624641)
TEST_REAL_SIMILAR(prots[0].getHits()[1].getScore(), 0.648346)
}
END_SECTION
START_SECTION(BayesianProteinInferenceAlgorithm test2)
{
vector<ProteinIdentification> prots;
PeptideIdentificationList peps;
IdXMLFile idf;
idf.load(OPENMS_GET_TEST_DATA_PATH("BayesianProteinInference_test.idXML"),prots,peps);
BayesianProteinInferenceAlgorithm bpia;
Param p = bpia.getParameters();
p.setValue("model_parameters:pep_emission", 0.9);
p.setValue("model_parameters:prot_prior", 0.3);
p.setValue("model_parameters:pep_spurious_emission", 0.1);
p.setValue("model_parameters:pep_prior", 0.3);
bpia.setParameters(p);
bpia.inferPosteriorProbabilities(prots,peps,false);
TEST_EQUAL(peps.size(), 9)
TEST_REAL_SIMILAR(peps[0].getHits()[0].getScore(), 0.827132)
TEST_REAL_SIMILAR(prots[0].getHits()[0].getScore(), 0.755653)
TEST_REAL_SIMILAR(prots[0].getHits()[1].getScore(), 0.580705)
}
END_SECTION
START_SECTION(BayesianProteinInferenceAlgorithm test2 filter)
{
vector<ProteinIdentification> prots;
PeptideIdentificationList peps;
IdXMLFile idf;
idf.load(OPENMS_GET_TEST_DATA_PATH("BayesianProteinInference_test.idXML"),prots,peps);
BayesianProteinInferenceAlgorithm bpia;
Param p = bpia.getParameters();
p.setValue("model_parameters:pep_emission", 0.9);
p.setValue("model_parameters:prot_prior", 0.3);
p.setValue("model_parameters:pep_spurious_emission", 0.1);
p.setValue("model_parameters:pep_prior", 0.3);
p.setValue("psm_probability_cutoff",0.61);
//TODO setParams needs to update the filter function or we need to make a member.
//p.setValue("model_parameters:regularize","true");
bpia.setParameters(p);
bpia.inferPosteriorProbabilities(prots,peps,false);
TEST_EQUAL(peps.size(), 8)
TEST_REAL_SIMILAR(peps[0].getHits()[0].getScore(), 0.77821544)
TEST_REAL_SIMILAR(prots[0].getHits()[0].getScore(), 0.787325)
TEST_REAL_SIMILAR(prots[0].getHits()[1].getScore(), 0.609742)
}
END_SECTION
START_SECTION(BayesianProteinInferenceAlgorithm test2 regularize)
{
vector<ProteinIdentification> prots;
PeptideIdentificationList peps;
IdXMLFile idf;
idf.load(OPENMS_GET_TEST_DATA_PATH("BayesianProteinInference_test.idXML"),prots,peps);
BayesianProteinInferenceAlgorithm bpia;
Param p = bpia.getParameters();
p.setValue("model_parameters:pep_emission", 0.9);
p.setValue("model_parameters:prot_prior", 0.3);
p.setValue("model_parameters:pep_spurious_emission", 0.1);
p.setValue("model_parameters:pep_prior", 0.3);
//p.setValue("loopy_belief_propagation:p_norm_inference", -1.)
p.setValue("model_parameters:regularize","true");
bpia.setParameters(p);
bpia.inferPosteriorProbabilities(prots,peps,false);
TEST_EQUAL(peps.size(), 9)
TEST_REAL_SIMILAR(peps[0].getHits()[0].getScore(), 0.779291)
TEST_REAL_SIMILAR(prots[0].getHits()[0].getScore(), 0.684165)
TEST_REAL_SIMILAR(prots[0].getHits()[1].getScore(), 0.458033)
}
END_SECTION
START_SECTION(BayesianProteinInferenceAlgorithm test2 regularize max-product)
{
vector<ProteinIdentification> prots;
PeptideIdentificationList peps;
IdXMLFile idf;
idf.load(OPENMS_GET_TEST_DATA_PATH("BayesianProteinInference_test.idXML"),prots,peps);
BayesianProteinInferenceAlgorithm bpia;
Param p = bpia.getParameters();
p.setValue("model_parameters:pep_emission", 0.9);
p.setValue("model_parameters:prot_prior", 0.3);
p.setValue("model_parameters:pep_spurious_emission", 0.1);
p.setValue("model_parameters:pep_prior", 0.3);
p.setValue("loopy_belief_propagation:p_norm_inference", -1.);
p.setValue("model_parameters:regularize","true");
bpia.setParameters(p);
bpia.inferPosteriorProbabilities(prots,peps,false);
TEST_EQUAL(peps.size(), 9)
TEST_REAL_SIMILAR(peps[0].getHits()[0].getScore(), 0.83848989)
TEST_REAL_SIMILAR(prots[0].getHits()[0].getScore(), 0.784666)
TEST_REAL_SIMILAR(prots[0].getHits()[1].getScore(), 0.548296)
}
END_SECTION
START_SECTION(BayesianProteinInferenceAlgorithm test2 max-product)
{
vector<ProteinIdentification> prots;
PeptideIdentificationList peps;
IdXMLFile idf;
idf.load(OPENMS_GET_TEST_DATA_PATH("BayesianProteinInference_test.idXML"),prots,peps);
BayesianProteinInferenceAlgorithm bpia;
Param p = bpia.getParameters();
p.setValue("model_parameters:pep_emission", 0.9);
p.setValue("model_parameters:prot_prior", 0.3);
p.setValue("model_parameters:pep_spurious_emission", 0.1);
p.setValue("model_parameters:pep_prior", 0.3);
p.setValue("loopy_belief_propagation:p_norm_inference", -1.);
//p.setValue("model_parameters:regularize","true");
bpia.setParameters(p);
bpia.inferPosteriorProbabilities(prots,peps,false);
TEST_EQUAL(peps.size(), 9)
TEST_REAL_SIMILAR(peps[0].getHits()[0].getScore(), 0.9117111)
TEST_REAL_SIMILAR(prots[0].getHits()[0].getScore(), 0.879245)
TEST_REAL_SIMILAR(prots[0].getHits()[1].getScore(), 0.708133)
}
END_SECTION
START_SECTION(BayesianProteinInferenceAlgorithm test2 super-easy)
{
vector<ProteinIdentification> prots;
PeptideIdentificationList peps;
IdXMLFile idf;
idf.load(OPENMS_GET_TEST_DATA_PATH("BayesianProteinInference_2_test.idXML"),prots,peps);
BayesianProteinInferenceAlgorithm bpia;
Param p = bpia.getParameters();
p.setValue("model_parameters:pep_emission", 0.7);
p.setValue("model_parameters:prot_prior", 0.5);
p.setValue("model_parameters:pep_spurious_emission", 0.0);
p.setValue("model_parameters:pep_prior", 0.5);
p.setValue("loopy_belief_propagation:dampening_lambda", 0.0);
p.setValue("loopy_belief_propagation:p_norm_inference", 1.);
//p.setValue("model_parameters:regularize","true");
bpia.setParameters(p);
bpia.inferPosteriorProbabilities(prots,peps,false);
TEST_EQUAL(peps.size(), 3)
TEST_REAL_SIMILAR(peps[0].getHits()[0].getScore(), 0.843211)
TEST_REAL_SIMILAR(peps[1].getHits()[0].getScore(), 0.944383)
TEST_REAL_SIMILAR(peps[2].getHits()[0].getScore(), 0.701081)
std::cout << prots[0].getHits()[0].getAccession() << std::endl;
TEST_REAL_SIMILAR(prots[0].getHits()[0].getScore(), 0.883060)
std::cout << prots[0].getHits()[1].getAccession() << std::endl;
TEST_REAL_SIMILAR(prots[0].getHits()[1].getScore(), 0.519786)
std::cout << prots[0].getHits()[2].getAccession() << std::endl;
TEST_REAL_SIMILAR(prots[0].getHits()[2].getScore(), 0.775994)
}
END_SECTION
START_SECTION(BayesianProteinInferenceAlgorithm test2 mini-loop)
{
vector<ProteinIdentification> prots;
PeptideIdentificationList peps;
IdXMLFile idf;
idf.load(OPENMS_GET_TEST_DATA_PATH("BayesianProteinInference_3_test.idXML"),prots,peps);
BayesianProteinInferenceAlgorithm bpia;
Param p = bpia.getParameters();
p.setValue("model_parameters:pep_emission", 0.7);
p.setValue("model_parameters:prot_prior", 0.5);
p.setValue("model_parameters:pep_spurious_emission", 0.0);
p.setValue("model_parameters:pep_prior", 0.5);
p.setValue("loopy_belief_propagation:dampening_lambda", 0.0);
p.setValue("loopy_belief_propagation:p_norm_inference", 1.);
//p.setValue("model_parameters:regularize","true");
bpia.setParameters(p);
bpia.inferPosteriorProbabilities(prots,peps,false);
TEST_EQUAL(peps.size(), 3)
TEST_REAL_SIMILAR(peps[0].getHits()[0].getScore(), 0.934571)
TEST_REAL_SIMILAR(peps[1].getHits()[0].getScore(), 0.944383)
TEST_REAL_SIMILAR(peps[2].getHits()[0].getScore(), 0.701081)
std::cout << prots[0].getHits()[0].getAccession() << std::endl;
TEST_REAL_SIMILAR(prots[0].getHits()[0].getScore(), 0.675421)
std::cout << prots[0].getHits()[1].getAccession() << std::endl;
TEST_REAL_SIMILAR(prots[0].getHits()[1].getScore(), 0.675421)
std::cout << prots[0].getHits()[2].getAccession() << std::endl;
TEST_REAL_SIMILAR(prots[0].getHits()[2].getScore(), 0.775994)
}
END_SECTION
END_TEST
| C++ |
3D | OpenMS/OpenMS | src/tests/class_tests/openms/source/ConversionHelper_test.cpp | .cpp | 3,666 | 140 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/ClassTest.h>
#include <OpenMS/test_config.h>
#include <OpenMS/KERNEL/StandardTypes.h>
///////////////////////////
#include <OpenMS/KERNEL/ConversionHelper.h>
///////////////////////////
using namespace OpenMS;
using namespace std;
START_TEST(ConsensusMap, "$Id$")
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
START_SECTION((template < typename FeatureT > static void convert(UInt64 const input_map_index, FeatureMap< FeatureT > const &input_map, ConsensusMap &output_map, Size n=-1)))
{
FeatureMap fm;
Feature f;
for ( UInt i = 0; i < 3; ++i )
{
f.setRT(i*77.7);
f.setMZ(i+100.35);
f.setUniqueId(i*33+17);
fm.push_back(f);
}
ConsensusMap cm;
MapConversion::convert(33,fm,cm);
TEST_EQUAL(cm.size(),3);
TEST_EQUAL(cm.getColumnHeaders()[33].size,3);
for ( UInt i = 0; i < 3; ++i )
{
TEST_EQUAL(cm[i].size(),1);
TEST_EQUAL(cm[i].begin()->getMapIndex(),33);
TEST_EQUAL(cm[i].begin()->getUniqueId(),i*33+17);
TEST_REAL_SIMILAR(cm[i].begin()->getRT(),i*77.7);
TEST_REAL_SIMILAR(cm[i].begin()->getMZ(),i+100.35);
}
cm.clear();
MapConversion::convert(33,fm,cm,2);
TEST_EQUAL(cm.size(),2);
TEST_EQUAL(cm.getColumnHeaders()[33].size,3);
}
END_SECTION
/////
// Prepare data
PeakMap mse;
{
MSSpectrum mss;
Peak1D p;
for ( UInt m = 0; m < 3; ++m )
{
mss.clear(true);
for ( UInt i = 0; i < 4; ++i )
{
p.setMZ( 10* m + i + 100.35);
p.setIntensity( 900 + 7*m + 5*i );
mss.push_back(p);
}
mse.addSpectrum(mss);
mse.getSpectra().back().setRT(m*5);
}
}
START_SECTION((static void convert(UInt64 const input_map_index, PeakMap & input_map, ConsensusMap& output_map, Size n = -1)))
{
ConsensusMap cm;
MapConversion::convert(33,mse,cm,8);
TEST_EQUAL(cm.size(),8);
for ( UInt i = 0; i < cm.size(); ++i)
{
STATUS("\n" << i << ": " << cm[i] );
}
TEST_EQUAL(cm.back().getIntensity(),912);
}
END_SECTION
/////
ConsensusMap cm;
MapConversion::convert(33,mse,cm,8);
START_SECTION((template < typename FeatureT > static void convert(ConsensusMap const &input_map, const bool keep_uids, FeatureMap< FeatureT > &output_map)))
{
FeatureMap out_fm;
MapConversion::convert(cm, true, out_fm);
TEST_EQUAL(cm.getUniqueId(), out_fm.getUniqueId());
TEST_EQUAL(cm.getProteinIdentifications().size(), out_fm.getProteinIdentifications().size());
TEST_EQUAL(cm.getUnassignedPeptideIdentifications().size(), out_fm.getUnassignedPeptideIdentifications().size());
TEST_EQUAL(cm.size(), out_fm.size());
for (Size i = 0; i < cm.size(); ++i)
{
TEST_EQUAL(cm[i], out_fm[i]);
}
out_fm.clear();
MapConversion::convert(cm, false, out_fm);
TEST_NOT_EQUAL(cm.getUniqueId(), out_fm.getUniqueId());
for (Size i = 0; i < cm.size(); ++i)
{
TEST_REAL_SIMILAR(cm[i].getRT(), out_fm[i].getRT());
TEST_REAL_SIMILAR(cm[i].getMZ(), out_fm[i].getMZ());
TEST_REAL_SIMILAR(cm[i].getIntensity(), out_fm[i].getIntensity());
TEST_NOT_EQUAL(cm[i].getUniqueId(), out_fm[i].getUniqueId());
}
}
END_SECTION
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
END_TEST
| C++ |
3D | OpenMS/OpenMS | src/tests/class_tests/openms/source/RibonucleotideDB_test.cpp | .cpp | 3,676 | 110 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Hendrik Weisser $
// --------------------------------------------------------------------------
//
#include <OpenMS/CONCEPT/ClassTest.h>
#include <OpenMS/test_config.h>
///////////////////////////
#include <OpenMS/CHEMISTRY/RibonucleotideDB.h>
using namespace OpenMS;
using namespace std;
///////////////////////////
START_TEST(RibonucleotideDB, "$Id$")
/////////////////////////////////////////////////////////////
RibonucleotideDB* ptr = nullptr;
RibonucleotideDB* null = nullptr;
START_SECTION(RibonucleotideDB* getInstance())
{
ptr = RibonucleotideDB::getInstance();
TEST_NOT_EQUAL(ptr, null);
}
END_SECTION
START_SECTION(virtual ~RibonucleotideDB())
NOT_TESTABLE
END_SECTION
START_SECTION(void readFromJSON_(void const std::string& path))
// Reading from the JSON gets tested as part of the constructor above.
// We check the contents below in begin() and getRibonucleotide
NOT_TESTABLE
END_SECTION
START_SECTION(void readFromFile_(void const std::string& path))
// Reading from the TSV gets tested as part of the constructor above.
// We check the contents below in getRibonucleotide and getRibonucleotideAlternatives
NOT_TESTABLE
END_SECTION
START_SECTION(ConstIterator begin())
{
//Loading of the JSON and TSV files gets tested during the
RibonucleotideDB::ConstIterator it = ptr->begin();
TEST_STRING_EQUAL((*it)->getCode(), "io6A");
}
END_SECTION
START_SECTION(ConstIterator end())
{
RibonucleotideDB::ConstIterator it = ptr->end();
TEST_EQUAL(it != ptr->begin(), true);
}
END_SECTION
START_SECTION((const Ribonucleotide& getRibonucleotide(const String& code)))
{
// These three load from the Modomics.json
const Ribonucleotide * ribo = ptr->getRibonucleotide("Am");
TEST_STRING_EQUAL(ribo->getCode(), "Am");
TEST_STRING_EQUAL(ribo->getName(), "2'-O-methyladenosine");
// This loads from Custom_RNA_modifications.tsv
const Ribonucleotide * customRibo = ptr->getRibonucleotide("msU?");
TEST_STRING_EQUAL(customRibo->getCode(), "msU?");
TEST_EXCEPTION(Exception::ElementNotFound,
ptr->getRibonucleotide("bla"));
}
END_SECTION
START_SECTION( (pair<RibonucleotideDB::ConstRibonucleotidePtr, RibonucleotideDB::ConstRibonucleotidePtr> RibonucleotideDB::getRibonucleotideAlternatives(const std::string& code)))
{
// THis also tests that loading from the TSV went well
const pair<RibonucleotideDB::ConstRibonucleotidePtr, RibonucleotideDB::ConstRibonucleotidePtr> alts = ptr->getRibonucleotideAlternatives("msU?");
TEST_STRING_EQUAL(alts.first->getCode(), "m5s2U");
TEST_STRING_EQUAL(alts.second->getCode(), "s2Um");
}
END_SECTION
START_SECTION((const Ribonucleotide& getRibonucleotidePrefix(const String& seq)))
{
const Ribonucleotide* ribo = ptr->getRibonucleotidePrefix("m1AmCGU");
TEST_STRING_EQUAL(ribo->getCode(), "m1Am");
TEST_EXCEPTION(Exception::ElementNotFound,
ptr->getRibonucleotidePrefix("blam1A"));
}
END_SECTION
START_SECTION(EmpiricalFormula getBaselossFormula())
{
const Ribonucleotide* dna = ptr->getRibonucleotide("dT");
TEST_EQUAL(EmpiricalFormula("C5H10O4") == dna->getBaselossFormula(), true);
const Ribonucleotide* rnam = ptr->getRibonucleotide("Um");
TEST_EQUAL(EmpiricalFormula("C6H12O5") == rnam->getBaselossFormula(), true);
}
END_SECTION
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
END_TEST
| C++ |
3D | OpenMS/OpenMS | src/tests/class_tests/openms/source/SpectraSTSimilarityScore_test.cpp | .cpp | 8,533 | 287 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/ClassTest.h>
#include <OpenMS/test_config.h>
///////////////////////////
#include <iostream>
#include <OpenMS/COMPARISON/SpectraSTSimilarityScore.h>
#include <OpenMS/FORMAT/MSPFile.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/KERNEL/MSSpectrum.h>
#include <OpenMS/METADATA/PeptideIdentification.h>
#include <OpenMS/DATASTRUCTURES/String.h>
/// typedef for the index into the sparse vector
#include <Eigen/Sparse>
using SparseVectorIteratorType = Eigen::SparseVector<float>::InnerIterator;
///////////////////////////
START_TEST(SpectraSTSimilarityScore, "$Id$")
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
using namespace OpenMS;
using namespace std;
SpectraSTSimilarityScore* ptr = nullptr;
SpectraSTSimilarityScore* nullPointer = nullptr;
START_SECTION(SpectraSTSimilarityScore())
ptr = new SpectraSTSimilarityScore();
TEST_NOT_EQUAL(ptr, nullPointer)
END_SECTION
START_SECTION(~SpectraSTSimilarityScore())
delete ptr;
END_SECTION
TOLERANCE_ABSOLUTE(0.01)
ptr = new SpectraSTSimilarityScore();
START_SECTION(SpectraSTSimilarityScore(const SpectraSTSimilarityScore& source))
SpectraSTSimilarityScore copy(*ptr);
TEST_EQUAL(copy.getName(), ptr->getName());
TEST_EQUAL(copy.getParameters(), ptr->getParameters());
END_SECTION
START_SECTION(SpectraSTSimilarityScore& operator = (const SpectraSTSimilarityScore& source))
SpectraSTSimilarityScore copy;
copy = *ptr;
TEST_EQUAL(copy.getName(), ptr->getName());
TEST_EQUAL(copy.getParameters(), ptr->getParameters());
END_SECTION
START_SECTION(double operator () (const PeakSpectrum& spec) const)
PeakMap exp;
PeakSpectrum s1;
MSPFile msp;
PeptideIdentificationList ids;
const String filename(OPENMS_GET_TEST_DATA_PATH("SpectraSTSimilarityScore_1.msp"));
msp.load(filename, ids, exp);
for(Size k = 0; k < exp[0].size(); ++k)
{
Peak1D peak;
peak.setIntensity(exp[0][k].getIntensity());
peak.setMZ(exp[0][k].getMZ());
peak.setPosition(exp[0][k].getPosition());
s1.push_back(peak);
}
double score = (*ptr)(s1);
TEST_REAL_SIMILAR(score, 1);
END_SECTION
START_SECTION(double operator () (const PeakSpectrum& spec1, const PeakSpectrum& spec2) const)
PeakSpectrum s1, s2, s3;
PeakMap exp;
MSPFile msp;
PeptideIdentificationList ids;
const String filename(OPENMS_GET_TEST_DATA_PATH("SpectraSTSimilarityScore_1.msp"));
msp.load(filename, ids, exp);
for(Size k = 0; k < exp[0].size(); ++k)
{
Peak1D peak;
peak.setIntensity(exp[0][k].getIntensity());
peak.setMZ(exp[0][k].getMZ());
peak.setPosition(exp[0][k].getPosition());
s1.push_back(peak);
}
for(Size k = 0; k < exp[1].size(); ++k)
{
Peak1D peak;
peak.setIntensity(exp[1][k].getIntensity());
peak.setMZ(exp[1][k].getMZ());
peak.setPosition(exp[1][k].getPosition());
s2.push_back(peak);
}
TOLERANCE_ABSOLUTE(0.01)
double score = (*ptr)(s1, s2);
TEST_REAL_SIMILAR(score, 1)
for(Size k = 0; k < exp[2].size(); ++k)
{
Peak1D peak;
peak.setIntensity(exp[2][k].getIntensity());
peak.setMZ(exp[2][k].getMZ());
peak.setPosition(exp[2][k].getPosition());
s3.push_back(peak);
}
score = (*ptr)(s1, s3);
TEST_REAL_SIMILAR(score, 0)
END_SECTION
START_SECTION((double operator()(const BinnedSpectrum &bin1, const BinnedSpectrum &bin2) const))
PeakSpectrum s1, s2, s3;
PeakMap exp;
MSPFile msp;
PeptideIdentificationList ids;
const String filename(OPENMS_GET_TEST_DATA_PATH("SpectraSTSimilarityScore_1.msp"));
msp.load(filename, ids, exp);
for(Size k = 0; k < exp[0].size(); ++k)
{
Peak1D peak;
peak.setIntensity(exp[0][k].getIntensity());
peak.setMZ(exp[0][k].getMZ());
peak.setPosition(exp[0][k].getPosition());
s1.push_back(peak);
}
for(Size k = 0; k < exp[1].size(); ++k)
{
Peak1D peak;
peak.setIntensity(exp[1][k].getIntensity());
peak.setMZ(exp[0][k].getMZ());
peak.setPosition(exp[1][k].getPosition());
s2.push_back(peak);
}
TOLERANCE_ABSOLUTE(0.01)
double score = (*ptr)(ptr->transform(s1), ptr->transform(s2));
TEST_REAL_SIMILAR(score, 1)
for(Size k = 0; k < exp[2].size(); ++k)
{
Peak1D peak;
peak.setIntensity(exp[2][k].getIntensity());
peak.setMZ(exp[2][k].getMZ());
peak.setPosition(exp[2][k].getPosition());
s3.push_back(peak);
}
score = (*ptr)(ptr->transform(s1), ptr->transform(s3));
TEST_REAL_SIMILAR(score, 0)
END_SECTION
START_SECTION(bool preprocess(PeakSpectrum &spec, float remove_peak_intensity_threshold=2.01, UInt cut_peaks_below=1000, Size min_peak_number=5, Size max_peak_number=150))
PeakSpectrum s1, s2, s3;
PeakMap exp;
MSPFile msp;
PeptideIdentificationList ids;
const String filename(OPENMS_GET_TEST_DATA_PATH("SpectraSTSimilarityScore_1.msp"));
msp.load(filename, ids, exp);
for(Size k = 0; k < exp[0].size(); ++k)
{
Peak1D peak;
peak.setIntensity(exp[0][k].getIntensity());
peak.setMZ(exp[0][k].getMZ());
peak.setPosition(exp[0][k].getPosition());
s1.push_back(peak);
}
for(Size k = 0; k < exp[1].size(); ++k)
{
Peak1D peak;
peak.setIntensity(exp[1][k].getIntensity());
peak.setMZ(exp[0][k].getMZ());
peak.setPosition(exp[1][k].getPosition());
s2.push_back(peak);
}
for(Size k = 0; k < exp[2].size(); ++k)
{
Peak1D peak;
peak.setIntensity(exp[2][k].getIntensity());
peak.setMZ(exp[2][k].getMZ());
peak.setPosition(exp[2][k].getPosition());
s3.push_back(peak);
}
TOLERANCE_ABSOLUTE(0.01)
ptr->preprocess(s1,2,10000);
TEST_EQUAL(s1.size(),6)
//min_peaks
TEST_EQUAL(ptr->preprocess(s2,2,1000,12),false)
//max_peaks
ptr->preprocess(s3,1,10000,5,8);
TEST_EQUAL(s3.size(),8)
END_SECTION
START_SECTION(double delta_D(double top_hit, double runner_up))
SpectraSTSimilarityScore spectrast;
TEST_EXCEPTION( Exception::DivisionByZero, spectrast.delta_D(0,5))
TEST_REAL_SIMILAR(spectrast.delta_D(5,4),0.2)
TEST_REAL_SIMILAR(spectrast.delta_D(25,1),0.96)
END_SECTION
START_SECTION((double compute_F(double dot_product, double delta_D, double dot_bias)))
//pretty straightforward function
NOT_TESTABLE
END_SECTION
START_SECTION(double dot_bias(const BinnedSpectrum &bin1, const BinnedSpectrum &bin2, double dot_product=-1) const)
PeakSpectrum s1,s2;
Peak1D peak;
peak.setIntensity(1);
peak.setMZ(1);
s1.push_back(peak);
peak.setIntensity(0);
peak.setMZ(2);
s1.push_back(peak);
peak.setIntensity(2);
peak.setMZ(3);
s1.push_back(peak);
peak.setIntensity(3);
peak.setMZ(4);
s1.push_back(peak);
peak.setIntensity(0);
peak.setMZ(1);
s2.push_back(peak);
peak.setIntensity(4);
peak.setMZ(2);
s2.push_back(peak);
peak.setIntensity(5);
peak.setMZ(3);
s2.push_back(peak);
peak.setIntensity(6);
peak.setMZ(4);
s2.push_back(peak);
peak.setIntensity(0);
peak.setMZ(5);
s2.push_back(peak);
BinnedSpectrum bin1(s1, 1, false, 1, BinnedSpectrum::DEFAULT_BIN_OFFSET_LOWRES);
BinnedSpectrum bin2(s2, 1, false, 1, BinnedSpectrum::DEFAULT_BIN_OFFSET_LOWRES);
TEST_REAL_SIMILAR(ptr->dot_bias(bin1, bin2, 1), 98.585);
TEST_REAL_SIMILAR(ptr->dot_bias(bin2, bin1, 1), 98.585);
END_SECTION
START_SECTION(BinnedSpectrum transform(const PeakSpectrum& spec))
PeakSpectrum s1;
Peak1D peak;
peak.setIntensity(1);
peak.setMZ(0.5);
s1.push_back(peak);
peak.setIntensity(0);
peak.setMZ(1.5);
s1.push_back(peak);
peak.setIntensity(2);
peak.setMZ(2.5);
s1.push_back(peak);
peak.setIntensity(3);
peak.setMZ(3.5);
s1.push_back(peak);
BinnedSpectrum bin = ptr->transform(s1);
SparseVectorIteratorType iter(*bin.getBins());
TEST_REAL_SIMILAR(iter.value(), 0.1205);
++iter;
TEST_REAL_SIMILAR(iter.value(), 0.3614);
++iter;
TEST_REAL_SIMILAR(iter.value(), 0.602);
++iter;
TEST_REAL_SIMILAR(iter.value(), 0.602);
delete ptr;
END_SECTION
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
END_TEST
| C++ |
3D | OpenMS/OpenMS | src/tests/class_tests/openms/source/SysInfo_test.cpp | .cpp | 2,153 | 67 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/ClassTest.h>
#include <OpenMS/test_config.h>
///////////////////////////
#include <OpenMS/SYSTEM/SysInfo.h>
#include <OpenMS/FORMAT/MzMLFile.h>
#include <iostream>
///////////////////////////
#include <OpenMS/KERNEL/MSSpectrum.h>
#include <OpenMS/KERNEL/MSExperiment.h>
using namespace OpenMS;
START_TEST(SysInfo, "$Id$")
START_SECTION(std::string bytesToHumanReadable(UInt64 bytes))
{
TEST_EQUAL(bytesToHumanReadable( 2ull << 00), "2 byte")
TEST_EQUAL(bytesToHumanReadable(2048ull << 00), "2 KiB")
TEST_EQUAL(bytesToHumanReadable(2048ull << 10), "2 MiB")
TEST_EQUAL(bytesToHumanReadable(2048ull << 20), "2 GiB")
TEST_EQUAL(bytesToHumanReadable(2048ull << 30), "2 TiB")
TEST_EQUAL(bytesToHumanReadable(2048ull << 40), "2 PiB")
}
END_SECTION
START_SECTION(static bool getProcessMemoryConsumption(size_t& mem_virtual))
{
size_t first, after, final;
TEST_EQUAL(SysInfo::getProcessMemoryConsumption(first), true);
std::cout << "Memory consumed initally: " << first << " KB" << std::endl;
{
PeakMap exp;
MzMLFile().load(OPENMS_GET_TEST_DATA_PATH("MzMLFile_5_long.mzML"), exp);
TEST_EQUAL(SysInfo::getProcessMemoryConsumption(after), true);
std::cout << "Memory consumed after reading 20 MB mzML : " << after << " KB" << std::endl;
TEST_EQUAL(after - first > 10000, true)
}
TEST_EQUAL(SysInfo::getProcessMemoryConsumption(final), true);
std::cout << "Memory consumed after release of MSExperiment: " << final << " KB" << std::endl;
// just for fun. There is probably no guarantee that we get the whole mem back by the memory manager
// (and indeed, it does not work on all OS's; e.g. on Linux, the page tables will remain in RAM, unless mem pressure is high)
//TEST_EQUAL(after > final, true)
}
END_SECTION
END_TEST
| C++ |
3D | OpenMS/OpenMS | src/tests/class_tests/openms/source/KroenikFile_test.cpp | .cpp | 2,420 | 84 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/ClassTest.h>
#include <OpenMS/test_config.h>
///////////////////////////
#include <OpenMS/FORMAT/KroenikFile.h>
///////////////////////////
#include <OpenMS/KERNEL/FeatureMap.h>
#include <OpenMS/KERNEL/MSSpectrum.h>
using namespace OpenMS;
using namespace std;
START_TEST(KroenikFile, "$Id$")
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
KroenikFile* ptr = nullptr;
KroenikFile* null_ptr = nullptr;
START_SECTION(KroenikFile())
{
ptr = new KroenikFile();
TEST_NOT_EQUAL(ptr, null_ptr)
}
END_SECTION
START_SECTION(virtual ~KroenikFile())
{
delete ptr;
}
END_SECTION
START_SECTION((template < typename FeatureMapType > void load(const String &filename, FeatureMapType &feature_map)))
{
KroenikFile f;
FeatureMap fm;
f.load(OPENMS_GET_TEST_DATA_PATH("KroenikFile_test_1.krf"), fm);
TEST_EQUAL(fm.size(),3)
ABORT_IF(fm.size()!=3)
TEST_EQUAL(fm[0].getRT(), 63.2)
TEST_REAL_SIMILAR(fm[0].getMZ(), 1002.11)
TEST_EQUAL(fm[0].getIntensity(), 999999)
TEST_EQUAL(fm[0].getCharge(), 1)
TEST_EQUAL(String(fm[0].getMetaValue("AveragineModifications")), String("Carbamido"))
TEST_EQUAL(fm[1].getRT(), 62.2)
TEST_REAL_SIMILAR(fm[1].getMZ(), 252.057 )
TEST_EQUAL(fm[1].getIntensity(), 9999)
TEST_EQUAL(fm[1].getCharge(), 2)
TEST_EQUAL(String(fm[1].getMetaValue("AveragineModifications")), String("Carbamido2"))
TEST_EXCEPTION(Exception::ParseError, f.load(OPENMS_GET_TEST_DATA_PATH("KroenikFile_test_2.krf"), fm));
TEST_EXCEPTION(Exception::FileNotFound, f.load(OPENMS_GET_TEST_DATA_PATH("KroenikFile_test_2_doesnotexist.edta"), fm));
}
END_SECTION
START_SECTION((template < typename SpectrumType > void store(const String &filename, const SpectrumType &spectrum) const ))
{
KroenikFile f;
MSSpectrum spec;
TEST_EXCEPTION(Exception::NotImplemented, f.store("bla", spec))
}
END_SECTION
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
END_TEST
| C++ |
3D | OpenMS/OpenMS | src/tests/class_tests/openms/source/PeakGroup_test.cpp | .cpp | 10,575 | 393 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Jihyung Kim$
// $Authors: Jihyung Kim$
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/ClassTest.h>
///////////////////////////
#include <OpenMS/ANALYSIS/TOPDOWN/FLASHHelperClasses.h>
#include <OpenMS/ANALYSIS/TOPDOWN/PeakGroup.h>
#include <unordered_set>
///////////////////////////
using namespace OpenMS;
using namespace std;
typedef FLASHHelperClasses::LogMzPeak LogMzPeak;
LogMzPeak fillPeak(double mz, float it, int cs, int iso_idx)
{
Peak1D p;
p.setIntensity(it);
p.setMZ(mz);
LogMzPeak lmp(p, true);
lmp.abs_charge = cs;
lmp.isotopeIndex = iso_idx;
return lmp;
}
START_TEST(PeakGroup, "$Id$")
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
PeakGroup* ptr = 0;
PeakGroup* null_ptr = 0;
START_SECTION(PeakGroup())
{
ptr = new PeakGroup();
TEST_NOT_EQUAL(ptr, null_ptr)
}
END_SECTION
START_SECTION(~PeakGroup())
{
delete ptr;
}
END_SECTION
/// test data
PeakGroup sample_pg(1, 2, true);
sample_pg.setScanNumber(3);
LogMzPeak tmp_peak0 = fillPeak(1125.5118055019082, 443505.625, 2, 0);
sample_pg.push_back(tmp_peak0);
LogMzPeak tmp_peak1 = fillPeak(1126.0134829208082, 11212854, 2, 1);
sample_pg.push_back(tmp_peak1);
LogMzPeak tmp_peak2 = fillPeak(1126.515160339708, 1214510.5, 2, 2);
sample_pg.push_back(tmp_peak2);
LogMzPeak tmp_peak3 = fillPeak(1127.0168377586081, 7506.6767578125, 2, 3);
sample_pg.push_back(tmp_peak3);
sample_pg.updateMonoMassAndIsotopeIntensities(1e-5);
/// detailed constructor test
START_SECTION((PeakGroup(const int min_abs_charge, const int max_abs_charge, const bool is_positive)))
{
PeakGroup tmp_pg(1, 2, true);
TEST_EQUAL(std::get<0>(tmp_pg.getAbsChargeRange()), std::get<0>(sample_pg.getAbsChargeRange()));
TEST_EQUAL(std::get<1>(tmp_pg.getAbsChargeRange()), std::get<1>(sample_pg.getAbsChargeRange()));
TEST_EQUAL(tmp_pg.isPositive(), tmp_pg.isPositive());
}
END_SECTION
/// copy constructor test
START_SECTION((PeakGroup(const PeakGroup &)))
{
PeakGroup copy_pg(sample_pg);
TEST_EQUAL(std::get<0>(sample_pg.getAbsChargeRange()), std::get<0>(copy_pg.getAbsChargeRange()));
TEST_EQUAL(sample_pg.size(), copy_pg.size());
TEST_REAL_SIMILAR(sample_pg[0].intensity, copy_pg[0].intensity);
TEST_REAL_SIMILAR(sample_pg[1].mz, copy_pg[1].mz);
}
END_SECTION
/// assignment constructor test
START_SECTION((PeakGroup& operator=(const PeakGroup &t)))
{
PeakGroup tmp_pg = sample_pg;
TEST_EQUAL(std::get<0>(sample_pg.getAbsChargeRange()), std::get<0>(tmp_pg.getAbsChargeRange()));
TEST_EQUAL(sample_pg.size(), tmp_pg.size());
TEST_REAL_SIMILAR(sample_pg[0].intensity, tmp_pg[0].intensity);
TEST_REAL_SIMILAR(sample_pg[1].mz, tmp_pg[1].mz);
}
END_SECTION
/////////////////////////////////////////////////////////////
// accessor method tests
/////////////////////////////////////////////////////////////
START_SECTION((std::tuple<double, double> getMzRange(int abs_charge) const))
{
std::tuple<double, double> temp_range = sample_pg.getMzRange(2);
TEST_REAL_SIMILAR(std::get<0>(temp_range), 1125.5118055019082);
TEST_REAL_SIMILAR(std::get<1>(temp_range), 1127.0168377586081);
}
END_SECTION
START_SECTION((bool isPositive() const))
{
bool test_positive = sample_pg.isPositive();
TEST_EQUAL(test_positive, true);
}
END_SECTION
START_SECTION((int getScanNumber() const))
{
int test_scan_num = sample_pg.getScanNumber();
TEST_EQUAL(test_scan_num, 3);
}
END_SECTION
START_SECTION((void setScanNumber(const int scan_number)))
{
sample_pg.setScanNumber(5);
int test_scan_num = sample_pg.getScanNumber();
TEST_EQUAL(test_scan_num, 5);
}
END_SECTION
/// not testable : setChargePower_, setChargeSignalPower_ - no getter for private variable
START_SECTION((void setChargeIsotopeCosine(const int abs_charge, const float cos)))
{
sample_pg.setChargeIsotopeCosine(2, 0.4);
TEST_REAL_SIMILAR(sample_pg.getChargeIsotopeCosine(2), 0.4);
}
END_SECTION
START_SECTION((float getChargeIsotopeCosine(const int abs_charge) const))
{
TEST_REAL_SIMILAR(sample_pg.getChargeIsotopeCosine(0), .0);
TEST_REAL_SIMILAR(sample_pg.getChargeIsotopeCosine(2), 0.4);
}
END_SECTION
START_SECTION((float getChargeIntensity(const int abs_charge) const))
{
TEST_REAL_SIMILAR(sample_pg.getChargeIntensity(2), .0);
}
END_SECTION
START_SECTION((void setRepAbsCharge(const int max_qscore_charge)))
{
sample_pg.setRepAbsCharge(2);
int temp_abs = sample_pg.getRepAbsCharge();
TEST_EQUAL(temp_abs, 2);
}
END_SECTION
START_SECTION((std::tuple<double, double> getRepMzRange() const))
{
std::tuple<double, double> tmp_range = sample_pg.getRepMzRange();
TEST_REAL_SIMILAR(std::get<0>(tmp_range), 1125.5118055019082);
TEST_REAL_SIMILAR(std::get<1>(tmp_range), 1127.0168377586081);
}
END_SECTION
START_SECTION((std::tuple<int, int> getAbsChargeRange() const))
{
std::tuple<int, int> test_cs_range = sample_pg.getAbsChargeRange();
TEST_EQUAL(std::get<0>(test_cs_range), 1);
TEST_EQUAL(std::get<1>(test_cs_range), 2);
}
END_SECTION
START_SECTION((void setAbsChargeRange(const int min_abs_charge, const int max_abs_charge)))
{
PeakGroup sample_pg2(4, 9, true); // for operator test
std::tuple<int, int> test_cs_range = sample_pg2.getAbsChargeRange();
TEST_EQUAL(std::get<0>(test_cs_range), 4);
TEST_EQUAL(std::get<1>(test_cs_range), 9);
}
END_SECTION
START_SECTION((void setIsotopeCosine(const float cos)))
{
sample_pg.setIsotopeCosine(0.3);
double temp_iso = sample_pg.getIsotopeCosine();
TEST_REAL_SIMILAR(temp_iso, 0.3);
}
END_SECTION
START_SECTION((float getIsotopeCosine() const))
{
double temp_iso = sample_pg.getIsotopeCosine();
TEST_REAL_SIMILAR(temp_iso, 0.3);
}
END_SECTION
START_SECTION((int getRepAbsCharge() const))
{
int temp_abs = sample_pg.getRepAbsCharge();
TEST_EQUAL(temp_abs, 2);
}
END_SECTION
START_SECTION((void setQscore(const float qscore)))
{
sample_pg.setQscore(0.1);
double temp_score = sample_pg.getQscore();
TEST_REAL_SIMILAR(temp_score, 0.1);
}
END_SECTION
START_SECTION((float getQscore() const))
{
double temp_score = sample_pg.getQscore();
TEST_REAL_SIMILAR(temp_score, 0.1);
}
END_SECTION
START_SECTION((void setChargeScore(const float charge_score)))
{
sample_pg.setChargeScore(0.2);
double temp_score = sample_pg.getChargeScore();
TEST_REAL_SIMILAR(temp_score, 0.2);
}
END_SECTION
START_SECTION((float getChargeScore() const))
{
double temp_score = sample_pg.getChargeScore();
TEST_REAL_SIMILAR(temp_score, 0.2);
}
END_SECTION
START_SECTION((void setAvgPPMError(const float error)))
{
sample_pg.setAvgPPMError(0.2);
double temp_score = sample_pg.getAvgPPMError();
TEST_REAL_SIMILAR(temp_score, 0.2);
}
END_SECTION
START_SECTION((float getAvgPPMError() const))
{
double temp_score = sample_pg.getAvgPPMError();
TEST_REAL_SIMILAR(temp_score, 0.2);
}
END_SECTION
START_SECTION((void setSNR(const float snr)))
{
sample_pg.setSNR(0.2);
double temp_score = sample_pg.getSNR();
TEST_REAL_SIMILAR(temp_score, 0.2);
}
END_SECTION
START_SECTION((float getSNR() const))
{
double temp_score = sample_pg.getSNR();
TEST_REAL_SIMILAR(temp_score, 0.2);
}
END_SECTION
START_SECTION((void setChargeSNR(const int abs_charge, const float c_snr)))
{
sample_pg.setChargeSNR(2, 0.2);
TEST_REAL_SIMILAR(sample_pg.getChargeSNR(2), 0.2);
}
END_SECTION
START_SECTION((float getChargeSNR(const int abs_charge) const))
{
TEST_REAL_SIMILAR(sample_pg.getChargeSNR(0), .0);
TEST_REAL_SIMILAR(sample_pg.getChargeSNR(2), 0.2);
}
END_SECTION
START_SECTION((double getMonoMass() const))
{
double tmp_mass = sample_pg.getMonoMass();
TEST_REAL_SIMILAR(tmp_mass, 2249.0101019557173);
}
END_SECTION
START_SECTION((double getIntensity() const))
{
double tmp_inty = sample_pg.getIntensity();
TEST_REAL_SIMILAR(tmp_inty, 12878380.801757813)
}
END_SECTION
PeakGroup sample_pg2(sample_pg);
LogMzPeak tmp_peak4 = fillPeak(1127.5185151766082, 2504.3433, 2, 4);
sample_pg2.push_back(tmp_peak4);
sample_pg2.updateMonoMassAndIsotopeIntensities(1e-5);
START_SECTION((void updateMonom assAndIsotopeIntensities()))
{
double temp_mass = sample_pg2.getMonoMass();
double temp_inty = sample_pg2.getIntensity();
TEST_REAL_SIMILAR(temp_mass, 2249.0101025181098);
TEST_REAL_SIMILAR(temp_inty, 12880881);
}
END_SECTION
/// operator constructor test
START_SECTION((bool operator<(const PeakGroup &a) const))
{
// between two masses with different monoisotopic masses
bool is_pg2_bigger = sample_pg < sample_pg2;
TEST_EQUAL(is_pg2_bigger, true);
}
END_SECTION
START_SECTION((bool operator>(const PeakGroup &a) const))
{
// between two masses with different monoisotopic masses
bool is_pg2_bigger = sample_pg2 > sample_pg;
TEST_EQUAL(is_pg2_bigger, true);
}
END_SECTION
START_SECTION((bool operator==(const PeakGroup &a) const))
{
PeakGroup sample_pg4(sample_pg);
bool are_two_pgs_same = sample_pg == sample_pg4;
TEST_EQUAL(are_two_pgs_same, true);
}
END_SECTION
START_SECTION(([EXTRA] std::hash<PeakGroup>))
{
std::hash<PeakGroup> hasher;
// Test that equal PeakGroups have equal hashes
PeakGroup pg_copy(sample_pg);
TEST_EQUAL(hasher(sample_pg), hasher(pg_copy));
// Test that the hash is consistent (same object hashes the same)
std::size_t hash1 = hasher(sample_pg);
std::size_t hash2 = hasher(sample_pg);
TEST_EQUAL(hash1, hash2);
// Test that different PeakGroups have different hashes (with high probability)
// sample_pg2 has different mass/intensity than sample_pg
std::size_t hash_pg1 = hasher(sample_pg);
std::size_t hash_pg2 = hasher(sample_pg2);
TEST_NOT_EQUAL(hash_pg1, hash_pg2);
// Test usability in unordered_set
std::unordered_set<PeakGroup> pg_set;
pg_set.insert(sample_pg);
pg_set.insert(sample_pg2);
TEST_EQUAL(pg_set.size(), 2);
// Inserting equal element should not change size
pg_set.insert(pg_copy);
TEST_EQUAL(pg_set.size(), 2);
}
END_SECTION
/// TODOs
/// - updateIsotopeCosineAndQscore, recruitAllPeaksInSpectrum, isSignalMZ, setTargeted, getIsotopeIntensities
/// - isTargeted, getTargetDecoyType, setTargetDecoyType, getQvalue, setQvalue, getQvalueWithChargeDecoyOnly, setQvalueWithChargeDecoyOnly
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
END_TEST
| C++ |
3D | OpenMS/OpenMS | src/tests/class_tests/openms/source/ProteaseDB_test.cpp | .cpp | 4,149 | 132 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Xiao Liang $
// $Authors: Xiao Liang, Chris Bielow $
// --------------------------------------------------------------------------
//
#include <OpenMS/CONCEPT/ClassTest.h>
#include <OpenMS/test_config.h>
///////////////////////////
#include <OpenMS/CHEMISTRY/ProteaseDB.h>
#include <OpenMS/CHEMISTRY/DigestionEnzymeProtein.h>
#include <OpenMS/CHEMISTRY/EmpiricalFormula.h>
using namespace OpenMS;
using namespace std;
///////////////////////////
START_TEST(ProteaseDB, "$Id$")
/////////////////////////////////////////////////////////////
ProteaseDB* ptr = nullptr;
ProteaseDB* nullPointer = nullptr;
String RKP("(?<=[RX])(?!P)");
START_SECTION(ProteaseDB* getInstance())
ptr = ProteaseDB::getInstance();
TEST_NOT_EQUAL(ptr, nullPointer)
END_SECTION
START_SECTION(virtual ~ProteaseDB())
NOT_TESTABLE
END_SECTION
START_SECTION((bool hasEnzyme(const String &name) const))
TEST_EQUAL(ptr->hasEnzyme("Try"), false)
TEST_EQUAL(ptr->hasEnzyme("Trypsin"), true)
END_SECTION
START_SECTION((const DigestionEnzymeProtein* getEnzyme(const String &name) const))
TEST_EQUAL(ptr->getEnzyme("Trypsin")->getName(), "Trypsin")
// test the synonyms
TEST_EQUAL(ptr->getEnzyme("Clostripain")->getName(), "Arg-C")
TEST_EXCEPTION(Exception::ElementNotFound, ptr->getEnzyme("DOESNOTEXIST"))
END_SECTION
START_SECTION((bool hasRegEx(const String& cleavage_regex) const))
TEST_EQUAL(ptr->hasRegEx("(?<=[P])(?!P)"), false)
TEST_EQUAL(ptr->hasRegEx(RKP), true)
END_SECTION
START_SECTION((const DigestionEnzymeProtein* getEnzymeByRegEx(const String& cleavage_regex) const))
TEST_EQUAL(ptr->getEnzymeByRegEx(RKP)->getName(), "Arg-C")
END_SECTION
START_SECTION(bool hasEnzyme(const DigestionEnzymeProtein* enzyme) const)
TEST_EQUAL(ptr->hasEnzyme(ptr->getEnzyme("Trypsin")), true)
DigestionEnzymeProtein myNewEnzyme("bla", "blubb");
TEST_EQUAL(ptr->hasEnzyme(&myNewEnzyme), false);
END_SECTION
START_SECTION(ConstEnzymeIterator beginEnzyme() const)
ProteaseDB::EnzymeIterator it = ptr->beginEnzyme();
Size count(0);
while (it != ptr->endEnzyme())
{
++it;
++count;
}
TEST_EQUAL(count >= 10, true)
END_SECTION
START_SECTION(ConstEnzymeIterator endEnzyme() const)
NOT_TESTABLE // tested above
END_SECTION
START_SECTION((void getAllNames(std::vector<String>& all_names) const))
vector<String> names;
ptr->getAllNames(names);
TEST_EQUAL(find(names.begin(), names.end(), "Trypsin") != names.end(), true)
TEST_EQUAL(find(names.begin(), names.end(), "Tryptryp") != names.end(), false)
Size old_size=names.size();
ptr->getAllNames(names);
TEST_EQUAL(names.size(), old_size)
END_SECTION
START_SECTION((void getAllXTandemNames(std::vector<String>& all_names) const))
vector<String> names;
ptr->getAllXTandemNames(names);
TEST_EQUAL(find(names.begin(), names.end(), "Trypsin") != names.end(), true)
TEST_EQUAL(find(names.begin(), names.end(), "no cleavage") != names.end(), true)
Size old_size=names.size();
ptr->getAllXTandemNames(names);
TEST_EQUAL(names.size(), old_size)
END_SECTION
START_SECTION((void getAllOMSSANames(std::vector<String>& all_names) const))
vector<String> names;
ptr->getAllOMSSANames(names);
TEST_EQUAL(find(names.begin(), names.end(), "Trypsin") != names.end(), true)
TEST_EQUAL(find(names.begin(), names.end(), "leukocyte elastase") != names.end(), false)
Size old_size=names.size();
ptr->getAllOMSSANames(names);
TEST_EQUAL(names.size(), old_size)
END_SECTION
START_SECTION([EXTRA] multithreaded example)
{
int nr_iterations (1e2), test (0);
#pragma omp parallel for reduction (+: test)
for (int k = 1; k < nr_iterations + 1; k++)
{
auto p = ProteaseDB::getInstance();
int tmp (0);
if (p->hasEnzyme("Trypsin"), true)
{
tmp++;
}
test += tmp;
}
TEST_EQUAL(test, nr_iterations)
}
END_SECTION
END_TEST
| C++ |
3D | OpenMS/OpenMS | src/tests/class_tests/openms/source/ConsensusMapMergerAlgorithm_test.cpp | .cpp | 3,569 | 78 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Julianus Pfeuffer $
// $Authors: Julianus Pfeuffer $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/ClassTest.h>
#include <OpenMS/ANALYSIS/ID/ConsensusMapMergerAlgorithm.h>
#include <OpenMS/test_config.h>
#include <OpenMS/FORMAT/ConsensusXMLFile.h>
using namespace OpenMS;
using namespace std;
START_TEST(ConsensusMapMergerAlgorithm, "$Id$")
START_SECTION(mergeAllIDRuns)
{
ConsensusXMLFile cf;
ConsensusMap cmap;
cf.load(OPENMS_GET_TEST_DATA_PATH("BSA.consensusXML"), cmap);
ConsensusMapMergerAlgorithm cmerge;
cmerge.mergeAllIDRuns(cmap);
TEST_EQUAL(cmap.getProteinIdentifications().size(), 1)
}
END_SECTION
START_SECTION(mergeProteinsAcrossFractionsAndReplicates (no Design))
{
ConsensusXMLFile cf;
ConsensusMap cmap;
cf.load(OPENMS_GET_TEST_DATA_PATH("BSA.consensusXML"), cmap);
ConsensusMapMergerAlgorithm cmerge;
ExperimentalDesign ed = ExperimentalDesign::fromConsensusMap(cmap);
cmerge.mergeProteinsAcrossFractionsAndReplicates(cmap, ed);
//without a special experimental design on sample level, runs are treated like replicates
// or fractions and all are merged
TEST_EQUAL(cmap.getProteinIdentifications().size(), 1)
StringList toFill; cmap.getProteinIdentifications()[0].getPrimaryMSRunPath(toFill);
TEST_EQUAL(toFill.size(), 6)
}
END_SECTION
START_SECTION(mergeProteinsAcrossFractionsAndReplicates)
{
ConsensusXMLFile cf;
ConsensusMap cmap;
cf.load(OPENMS_GET_TEST_DATA_PATH("BSA.consensusXML"), cmap);
ConsensusMapMergerAlgorithm cmerge;
ExperimentalDesign ed = ExperimentalDesign::fromConsensusMap(cmap);
ExperimentalDesign::SampleSection ss{
{{"1","C1"},{"2","C2"},{"3","C3"}},
{{1,0},{2,1},{3,2}},
{{"Sample",0},{"Condition",1}}
};
ed.setSampleSection(ss);
cmerge.mergeProteinsAcrossFractionsAndReplicates(cmap, ed);
TEST_EQUAL(cmap.getProteinIdentifications().size(), 3)
StringList toFill; cmap.getProteinIdentifications()[0].getPrimaryMSRunPath(toFill);
TEST_EQUAL(toFill.size(), 2)
TEST_EQUAL(toFill[0], "/Users/pfeuffer/git/OpenMS-inference-src/share/OpenMS/examples/FRACTIONS/BSA1_F1.mzML")
TEST_EQUAL(toFill[1], "/Users/pfeuffer/git/OpenMS-inference-src/share/OpenMS/examples/FRACTIONS/BSA1_F2.mzML")
toFill.clear(); cmap.getProteinIdentifications()[1].getPrimaryMSRunPath(toFill);
TEST_EQUAL(toFill.size(), 2)
TEST_EQUAL(toFill[0], "/Users/pfeuffer/git/OpenMS-inference-src/share/OpenMS/examples/FRACTIONS/BSA2_F1.mzML")
TEST_EQUAL(toFill[1], "/Users/pfeuffer/git/OpenMS-inference-src/share/OpenMS/examples/FRACTIONS/BSA2_F2.mzML")
toFill.clear(); cmap.getProteinIdentifications()[2].getPrimaryMSRunPath(toFill);
TEST_EQUAL(toFill.size(), 2)
TEST_EQUAL(toFill[0], "/Users/pfeuffer/git/OpenMS-inference-src/share/OpenMS/examples/FRACTIONS/BSA3_F1.mzML")
TEST_EQUAL(toFill[1], "/Users/pfeuffer/git/OpenMS-inference-src/share/OpenMS/examples/FRACTIONS/BSA3_F2.mzML")
TEST_EQUAL(cmap.getProteinIdentifications().size(), 3)
}
END_SECTION
END_TEST
| C++ |
3D | OpenMS/OpenMS | src/tests/class_tests/openms/source/MSSpectrum_test.cpp | .cpp | 48,747 | 1,565 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg$
// $Authors: Marc Sturm $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/ClassTest.h>
#include <OpenMS/test_config.h>
///////////////////////////
#include <OpenMS/KERNEL/MSSpectrum.h>
///////////////////////////
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/IONMOBILITY/IMDataConverter.h>
#include <sstream>
using namespace OpenMS;
using namespace std;
static_assert(OpenMS::Test::fulfills_rule_of_5<MSSpectrum>(), "Must fulfill rule of 5");
static_assert(OpenMS::Test::fulfills_rule_of_6<MSSpectrum>(), "Must fulfill rule of 6");
static_assert(OpenMS::Test::fulfills_fast_vector<MSSpectrum>(), "Must have fast vector semantics");
static_assert(std::is_nothrow_move_constructible_v<MSSpectrum>, "Must have nothrow move constructible");
/// A spec with RT, m/z, intensity, and meta data arrays, marked as an IM spectrum (i.e. spec.containsIMData() == true)
MSSpectrum getPrefilledSpec()
{
MSSpectrum ds;
MSSpectrum::FloatDataArray float_array {56.0, 201.0, 31, 31, 31, 37, 29, 34, 60, 29};
MSSpectrum::StringDataArray string_array {"56", "201", "31", "31", "31", "37", "29", "34", "60", "29"};
MSSpectrum::IntegerDataArray int_array {56, 201, 31, 31, 31, 37, 29, 34, 60, 29};
std::vector<double> mzs {423.269, 420.130, 419.113, 418.232, 416.293, 415.287, 414.301, 413.800, 412.824, 412.321};
std::vector<double> intensities {56, 201, 31, 31, 31, 37, 29, 34, 60, 29};
for (Size i = 0; i < mzs.size(); ++i)
{
ds.emplace_back(mzs[i], intensities[i]);
}
ds.getFloatDataArrays() = std::vector<MSSpectrum::FloatDataArray>(3, float_array);
ds.getFloatDataArrays()[0].setName("f1");
ds.getFloatDataArrays()[1].setName("f2");
ds.getFloatDataArrays()[2].setName("f3");
IMDataConverter::setIMUnit(ds.getFloatDataArrays()[1], DriftTimeUnit::MILLISECOND);
TEST_TRUE(ds.containsIMData())
ds.getStringDataArrays() = std::vector<MSSpectrum::StringDataArray>(2, string_array);
ds.getStringDataArrays()[0].setName("s1");
ds.getStringDataArrays()[1].setName("s2");
ds.getIntegerDataArrays() = std::vector<MSSpectrum::IntegerDataArray>(2, int_array);
ds.getIntegerDataArrays()[0].setName("i1");
ds.setRT(5.0);
return ds;
}
START_TEST(MSSpectrum, "$Id$")
/////////////////////////////////////////////////////////////
// Dummy peak data
Peak1D p1;
p1.setIntensity(1.0f);
p1.setMZ(2.0);
Peak1D p2;
p2.setIntensity(2.0f);
p2.setMZ(10.0);
Peak1D p3;
p3.setIntensity(3.0f);
p3.setMZ(30.0);
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
MSSpectrum* ptr = nullptr;
MSSpectrum* nullPointer = nullptr;
START_SECTION((MSSpectrum()))
{
ptr = new MSSpectrum();
TEST_NOT_EQUAL(ptr, nullPointer)
}
END_SECTION
START_SECTION((~MSSpectrum()))
{
delete ptr;
}
END_SECTION
START_SECTION(([EXTRA] MSSpectrum()))
{
MSSpectrum tmp;
Peak1D peak;
peak.getPosition()[0] = 47.11;
tmp.push_back(peak);
TEST_EQUAL(tmp.size(),1);
TEST_REAL_SIMILAR(tmp[0].getMZ(), 47.11);
}
END_SECTION
START_SECTION((MSSpectrum(const std::initializer_list<Peak1D>& init)))
{
MSSpectrum tmp {{47.11, 2}, {500.0, 3}};
TEST_EQUAL(tmp.size(), 2);
TEST_REAL_SIMILAR(tmp[0].getMZ(), 47.11);
TEST_REAL_SIMILAR(tmp[1].getMZ(), 500.0);
TEST_REAL_SIMILAR(tmp[0].getIntensity(), 2)
TEST_REAL_SIMILAR(tmp[1].getIntensity(), 3)
}
END_SECTION
/////////////////////////////////////////////////////////////
// Member accessors
START_SECTION((UInt getMSLevel() const))
{
MSSpectrum spec;
TEST_EQUAL(spec.getMSLevel(), 1)
}
END_SECTION
START_SECTION((void setMSLevel(UInt ms_level)))
{
MSSpectrum spec;
spec.setMSLevel(17);
TEST_EQUAL(spec.getMSLevel(), 17)
}
END_SECTION
START_SECTION((const String& getName() const))
{
MSSpectrum s;
TEST_STRING_EQUAL(s.getName(), "")
}
END_SECTION
START_SECTION((void setName(const String &name)))
{
MSSpectrum s;
s.setName("bla");
TEST_STRING_EQUAL(s.getName(),"bla")
}
END_SECTION
START_SECTION((double getRT() const ))
{
MSSpectrum s;
TEST_REAL_SIMILAR(s.getRT(), -1.0)
}
END_SECTION
START_SECTION((void setRT(double rt)))
{
MSSpectrum s;
s.setRT(0.451);
TEST_REAL_SIMILAR(s.getRT(), 0.451)
}
END_SECTION
START_SECTION((double getDriftTime() const ))
{
MSSpectrum s;
TEST_REAL_SIMILAR(s.getDriftTime(), -1.0)
}
END_SECTION
START_SECTION((void setDriftTime(double dt)))
{
MSSpectrum s;
s.setDriftTime(0.451);
TEST_REAL_SIMILAR(s.getDriftTime(), 0.451)
}
END_SECTION
START_SECTION((double getDriftTimeUnit() const ))
{
MSSpectrum s;
TEST_EQUAL(s.getDriftTimeUnit() == DriftTimeUnit::NONE, true);
}
END_SECTION
START_SECTION((double getDriftTimeUnitAsString() const))
{
MSSpectrum s;
TEST_EQUAL(s.getDriftTimeUnitAsString(), "<NONE>");
}
END_SECTION
START_SECTION((void setDriftTimeUnit(double dt)))
{
MSSpectrum s;
s.setDriftTimeUnit(DriftTimeUnit::MILLISECOND);
TEST_EQUAL(s.getDriftTimeUnit() == DriftTimeUnit::MILLISECOND, true);
TEST_EQUAL(s.getDriftTimeUnitAsString(), "ms");
}
END_SECTION
START_SECTION((const FloatDataArrays& getFloatDataArrays() const))
{
MSSpectrum s;
TEST_EQUAL(s.getFloatDataArrays().size(),0)
}
END_SECTION
START_SECTION((FloatDataArrays& getFloatDataArrays()))
{
MSSpectrum s;
s.getFloatDataArrays().resize(2);
TEST_EQUAL(s.getFloatDataArrays().size(),2)
}
END_SECTION
START_SECTION((const StringDataArrays& getStringDataArrays() const))
{
MSSpectrum s;
TEST_EQUAL(s.getStringDataArrays().size(),0)
}
END_SECTION
START_SECTION((StringDataArrays& getStringDataArrays()))
{
MSSpectrum s;
s.getStringDataArrays().resize(2);
TEST_EQUAL(s.getStringDataArrays().size(),2)
}
END_SECTION
START_SECTION((const IntegerDataArrays& getIntegerDataArrays() const))
{
MSSpectrum s;
TEST_EQUAL(s.getIntegerDataArrays().size(),0)
}
END_SECTION
START_SECTION((IntegerDataArrays& getIntegerDataArrays()))
{
MSSpectrum s;
s.getIntegerDataArrays().resize(2);
TEST_EQUAL(s.getIntegerDataArrays().size(),2)
}
END_SECTION
START_SECTION((MSSpectrum& select(const std::vector<Size>& indices)))
{
MSSpectrum s;
s.push_back(p1);
s.push_back(p2);
s.push_back(p3);
s.push_back(p3);
s.push_back(p2);
MSSpectrum::IntegerDataArray aia{1, 2, 3, 4, 5};
MSSpectrum::FloatDataArray afa{1.0, 2.0, 3.0, 4.0, 5.0};
MSSpectrum::StringDataArray asa{"1", "2", "3", "4", "5"};
s.getFloatDataArrays().push_back(afa);
s.getIntegerDataArrays().push_back(aia);
s.getStringDataArrays().push_back(asa);
s.getFloatDataArrays().push_back(afa);
s.getIntegerDataArrays().push_back(aia);
s.getStringDataArrays().push_back(asa);
TEST_REAL_SIMILAR(s[0].getIntensity(), 1.0)
TEST_REAL_SIMILAR(s[4].getIntensity(), 2.0)
TEST_EQUAL(s.getFloatDataArrays().size(), 2)
TEST_EQUAL(s.getFloatDataArrays()[0].size(), 5)
TEST_EQUAL(s.getIntegerDataArrays().size(), 2)
TEST_EQUAL(s.getIntegerDataArrays()[0].size(), 5)
TEST_EQUAL(s.getStringDataArrays().size(), 2)
TEST_EQUAL(s.getStringDataArrays()[0].size(), 5)
// re-order
MSSpectrum s2 = s;
Size order[] = {4, 2, 3, 1, 0};
s2.select(std::vector<Size>(&order[0], &order[5]));
TEST_REAL_SIMILAR(s2[0].getIntensity(), 2.0)
TEST_REAL_SIMILAR(s2[4].getIntensity(), 1.0)
TEST_EQUAL(s2.getFloatDataArrays().size(), 2)
TEST_EQUAL(s2.getFloatDataArrays()[0].size(), 5)
TEST_EQUAL(s2.getIntegerDataArrays().size(), 2)
TEST_EQUAL(s2.getIntegerDataArrays()[0].size(), 5)
TEST_EQUAL(s2.getStringDataArrays().size(), 2)
TEST_EQUAL(s2.getStringDataArrays()[0].size(), 5)
TEST_REAL_SIMILAR(s2.getFloatDataArrays()[0][1], 3.0)
TEST_EQUAL(s2.getIntegerDataArrays()[0][1], 3)
TEST_EQUAL(s2.getStringDataArrays()[0][1], "3")
// subset
s2 = s;
Size subset[] = {4, 2, 3};
// --> new values in Meta arrays are:
// 5, 3, 4
s2.select(std::vector<Size>(&subset[0], &subset[3]));
TEST_REAL_SIMILAR(s2[0].getIntensity(), 2.0)
TEST_REAL_SIMILAR(s2[1].getIntensity(), 3.0)
TEST_REAL_SIMILAR(s2[2].getIntensity(), 3.0)
TEST_EQUAL(s2.getFloatDataArrays().size(), 2)
TEST_EQUAL(s2.getFloatDataArrays()[0].size(), 3)
TEST_EQUAL(s2.getIntegerDataArrays().size(), 2)
TEST_EQUAL(s2.getIntegerDataArrays()[0].size(), 3)
TEST_EQUAL(s2.getStringDataArrays().size(), 2)
TEST_EQUAL(s2.getStringDataArrays()[0].size(), 3)
TEST_REAL_SIMILAR(s2.getFloatDataArrays()[0][1], 3.0)
TEST_EQUAL(s2.getIntegerDataArrays()[0][1], 3)
TEST_EQUAL(s2.getStringDataArrays()[0][1], "3")
}
END_SECTION
/////////////////////////////////////////////////////////////
// RangeManager
START_SECTION((virtual void updateRanges()))
{
MSSpectrum s = getPrefilledSpec();
for (int i = 0; i < 2; ++i) // second time to check the initialization
{
s.updateRanges();
TEST_REAL_SIMILAR(s.getMinIntensity(), 29)
TEST_REAL_SIMILAR(s.getMaxIntensity(), 201)
TEST_REAL_SIMILAR(s.getMinMZ(), 412.321)
TEST_REAL_SIMILAR(s.getMaxMZ(), 423.269)
TEST_REAL_SIMILAR(s.getMinMobility(), 29)
TEST_REAL_SIMILAR(s.getMaxMobility(), 201)
}
//test with only one peak
s = MSSpectrum{};
s.push_back(p1);
s.updateRanges();
TEST_REAL_SIMILAR(s.getMaxIntensity(), 1)
TEST_REAL_SIMILAR(s.getMinIntensity(), 1)
TEST_REAL_SIMILAR(s.getMaxMZ(), 2)
TEST_REAL_SIMILAR(s.getMinMZ(), 2)
TEST_TRUE(s.RangeMobility::isEmpty())
}
END_SECTION
/////////////////////////////////////////////////////////////
// Copy constructor, move constructor, assignment operator, move assignment operator, equality
START_SECTION((MSSpectrum(const MSSpectrum& source)))
{
MSSpectrum tmp;
tmp.getInstrumentSettings().getScanWindows().resize(1);
tmp.setMetaValue("label",5.0);
tmp.setMSLevel(17);
tmp.setRT(7.0);
tmp.setDriftTime(8.0);
tmp.setDriftTimeUnit(DriftTimeUnit::MILLISECOND);
tmp.setName("bla");
//peaks
MSSpectrum::PeakType peak;
peak.getPosition()[0] = 47.11;
tmp.push_back(peak);
MSSpectrum tmp2(tmp);
TEST_EQUAL(tmp2.getInstrumentSettings().getScanWindows().size(),1);
TEST_REAL_SIMILAR(tmp2.getMetaValue("label"), 5.0)
TEST_EQUAL(tmp2.getMSLevel(), 17)
TEST_REAL_SIMILAR(tmp2.getRT(), 7.0)
TEST_REAL_SIMILAR(tmp2.getDriftTime(), 8.0)
TEST_EQUAL(tmp2.getDriftTimeUnit() == DriftTimeUnit::MILLISECOND, true);
TEST_EQUAL(tmp2.getName(),"bla")
//peaks
TEST_EQUAL(tmp2.size(),1);
TEST_REAL_SIMILAR(tmp2[0].getPosition()[0],47.11);
}
END_SECTION
START_SECTION((MSSpectrum(const MSSpectrum&& source)))
{
// Ensure that MSSpectrum has a no-except move constructor (otherwise
// std::vector is inefficient and will copy instead of move).
TEST_EQUAL(noexcept(MSSpectrum(std::declval<MSSpectrum&&>())), true)
MSSpectrum tmp;
tmp.setRT(9.0);
tmp.setDriftTime(5.0);
tmp.setDriftTimeUnit(DriftTimeUnit::VSSC);
tmp.setMSLevel(18);
tmp.setName("bla2");
tmp.setMetaValue("label2",5.0);
tmp.getInstrumentSettings().getScanWindows().resize(2);
//peaks
MSSpectrum::PeakType peak;
peak.getPosition()[0] = 47.11;
tmp.push_back(peak);
peak.getPosition()[0] = 48.11;
tmp.push_back(peak);
//copy tmp so we can move one of them
MSSpectrum orig = tmp;
MSSpectrum tmp2(std::move(tmp));
TEST_EQUAL(tmp2, orig); // should be equal to the original
TEST_EQUAL(tmp2.getInstrumentSettings().getScanWindows().size(),2);
TEST_REAL_SIMILAR(tmp2.getMetaValue("label2"), 5.0)
TEST_EQUAL(tmp2.getMSLevel(), 18)
TEST_REAL_SIMILAR(tmp2.getRT(), 9.0)
TEST_REAL_SIMILAR(tmp2.getDriftTime(), 5.0)
TEST_EQUAL(tmp2.getDriftTimeUnit() == DriftTimeUnit::VSSC, true);
TEST_EQUAL(tmp2.getName(),"bla2")
TEST_EQUAL(tmp2.size(),2);
TEST_REAL_SIMILAR(tmp2[0].getPosition()[0],47.11);
TEST_REAL_SIMILAR(tmp2[1].getPosition()[0],48.11);
// test move
TEST_EQUAL(tmp.size(),0);
TEST_EQUAL(tmp.metaValueExists("label2"), false);
}
END_SECTION
START_SECTION((MSSpectrum& operator= (const MSSpectrum& source)))
{
MSSpectrum tmp;
tmp.getInstrumentSettings().getScanWindows().resize(1);
tmp.setMetaValue("label",5.0);
tmp.setMSLevel(17);
tmp.setRT(7.0);
tmp.setDriftTime(8.0);
tmp.setDriftTimeUnit(DriftTimeUnit::MILLISECOND);
tmp.setName("bla");
//peaks
MSSpectrum::PeakType peak;
peak.getPosition()[0] = 47.11;
tmp.push_back(peak);
//normal assignment
MSSpectrum tmp2;
tmp2 = tmp;
TEST_EQUAL(tmp2.getInstrumentSettings().getScanWindows().size(), 1);
TEST_REAL_SIMILAR(tmp2.getMetaValue("label"), 5.0)
TEST_EQUAL(tmp2.getMSLevel(), 17)
TEST_REAL_SIMILAR(tmp2.getRT(), 7.0)
TEST_REAL_SIMILAR(tmp2.getDriftTime(), 8.0)
TEST_EQUAL(tmp2.getDriftTimeUnit() == DriftTimeUnit::MILLISECOND, true);
TEST_EQUAL(tmp2.getName(), "bla")
TEST_EQUAL(tmp2.size(), 1);
TEST_REAL_SIMILAR(tmp2[0].getPosition()[0], 47.11);
//Assignment of empty object
//normal assignment
tmp2 = MSSpectrum();
TEST_EQUAL(tmp2.getInstrumentSettings().getScanWindows().size(), 0);
TEST_EQUAL(tmp2.metaValueExists("label"), false)
TEST_EQUAL(tmp2.getMSLevel(),1)
TEST_REAL_SIMILAR(tmp2.getRT(), -1.0)
TEST_REAL_SIMILAR(tmp2.getDriftTime(), -1.0)
TEST_EQUAL(tmp2.getDriftTimeUnit() == DriftTimeUnit::NONE, true);
TEST_EQUAL(tmp2.getName(), "")
TEST_EQUAL(tmp2.size(), 0);
}
END_SECTION
START_SECTION((MSSpectrum& operator= (const MSSpectrum&& source)))
{
MSSpectrum tmp {{47.11, 0}, {48.11, 0}};
tmp.setRT(9.0);
tmp.setDriftTime(5.0);
tmp.setDriftTimeUnit(DriftTimeUnit::VSSC);
tmp.setMSLevel(18);
tmp.setName("bla2");
tmp.setMetaValue("label2",5.0);
tmp.getInstrumentSettings().getScanWindows().resize(2);
//copy tmp so we can move one of them
MSSpectrum orig = tmp;
//move assignment
MSSpectrum tmp2;
tmp2 = std::move(tmp);
TEST_EQUAL(tmp2, orig); // should be equal to the original
TEST_EQUAL(tmp2.getInstrumentSettings().getScanWindows().size(),2);
TEST_REAL_SIMILAR(tmp2.getMetaValue("label2"), 5.0)
TEST_EQUAL(tmp2.getMSLevel(), 18)
TEST_REAL_SIMILAR(tmp2.getRT(), 9.0)
TEST_REAL_SIMILAR(tmp2.getDriftTime(), 5.0)
TEST_EQUAL(tmp2.getDriftTimeUnit() == DriftTimeUnit::VSSC, true);
TEST_EQUAL(tmp2.getName(),"bla2")
TEST_EQUAL(tmp2.size(),2);
TEST_REAL_SIMILAR(tmp2[0].getPosition()[0],47.11);
TEST_REAL_SIMILAR(tmp2[1].getPosition()[0],48.11);
// test move
TEST_EQUAL(tmp.size(),0);
TEST_EQUAL(tmp.metaValueExists("label2"), false);
//Assignment of empty object
//normal assignment
#ifndef OPENMS_WINDOWSPLATFORM
#pragma clang diagnostic push
// Ignore -Wpessimizing-move, because we want to test the move assignment operator.
#pragma clang diagnostic ignored "-Wpessimizing-move"
#endif
tmp2 = std::move(MSSpectrum());
#ifndef OPENMS_WINDOWSPLATFORM
#pragma clang diagnostic pop
#endif
TEST_EQUAL(tmp2.getInstrumentSettings().getScanWindows().size(),0);
TEST_FALSE(tmp2.metaValueExists("label"))
TEST_EQUAL(tmp2.getMSLevel(),1)
TEST_REAL_SIMILAR(tmp2.getRT(), -1.0)
TEST_REAL_SIMILAR(tmp2.getDriftTime(), -1.0)
TEST_EQUAL(tmp2.getDriftTimeUnit() == DriftTimeUnit::NONE, true);
TEST_EQUAL(tmp2.getName(),"")
TEST_EQUAL(tmp2.size(),0);
}
END_SECTION
START_SECTION((bool operator== (const MSSpectrum& rhs) const))
{
MSSpectrum edit, empty;
TEST_TRUE(edit==empty);
edit = empty;
edit.getInstrumentSettings().getScanWindows().resize(1);
TEST_FALSE(edit==empty);
edit = empty;
edit.resize(1);
TEST_FALSE(edit == empty);
edit = empty;
edit.setMetaValue("label",String("bla"));
TEST_FALSE(empty == edit);
edit = empty;
edit.setDriftTime(5);
TEST_FALSE(empty == edit);
edit = empty;
edit.setDriftTimeUnit(DriftTimeUnit::MILLISECOND);
TEST_FALSE(empty == edit);
edit = empty;
edit.setRT(5);
TEST_FALSE(empty == edit);
edit = empty;
edit.setMSLevel(5);
TEST_FALSE(empty == edit);
edit = empty;
edit.getFloatDataArrays().resize(5);
TEST_FALSE(empty == edit);
edit = empty;
edit.getStringDataArrays().resize(5);
TEST_FALSE(empty == edit);
edit = empty;
edit.getIntegerDataArrays().resize(5);
TEST_FALSE(empty == edit);
//name is not checked => no change
edit = empty;
edit.setName("bla");
TEST_TRUE(empty == edit);
edit = empty;
edit.push_back(p1);
edit.push_back(p2);
edit.updateRanges();
edit.clear(false);
TEST_TRUE(empty == edit);
}
END_SECTION
START_SECTION((bool operator!= (const MSSpectrum& rhs) const))
{
MSSpectrum edit, empty;
TEST_FALSE(edit != empty);
edit = empty;
edit.getInstrumentSettings().getScanWindows().resize(1);
TEST_TRUE(edit != empty);
edit = empty;
edit.resize(1);
TEST_TRUE(edit != empty);
edit = empty;
edit.setMetaValue("label",String("bla"));
TEST_TRUE(edit != empty);
edit = empty;
edit.setDriftTime(5);
TEST_TRUE(edit != empty);
edit = empty;
edit.setDriftTimeUnit(DriftTimeUnit::MILLISECOND);
TEST_TRUE(edit != empty);
edit = empty;
edit.setRT(5);
TEST_TRUE(edit != empty);
edit = empty;
edit.setMSLevel(5);
TEST_TRUE(edit != empty);
edit = empty;
edit.getFloatDataArrays().resize(5);
TEST_TRUE(edit != empty);
edit = empty;
edit.getIntegerDataArrays().resize(5);
TEST_TRUE(edit != empty);
edit = empty;
edit.getStringDataArrays().resize(5);
TEST_TRUE(edit != empty);
//name is not checked => no change
edit = empty;
edit.setName("bla");
TEST_FALSE(edit != empty);
edit = empty;
edit.push_back(p1);
edit.push_back(p2);
edit.updateRanges();
edit.clear(false);
TEST_TRUE(edit == empty);
}
END_SECTION
/////////////////////////////////////////////////////////////
// Sorting
START_SECTION((void sortByIntensity(bool reverse=false)))
{
MSSpectrum ds;
Peak1D p;
MSSpectrum::FloatDataArray float_array { 420.13f, 412.824f, 423.269f, 415.287f, 413.8f, 419.113f, 416.293f, 418.232f, 414.301f, 412.321f };
MSSpectrum::StringDataArray string_array {"420.13", "412.82", "423.27", "415.29", "413.80", "419.11", "416.29", "418.23", "414.30", "412.32"};
MSSpectrum::IntegerDataArray int_array {420, 412, 423, 415, 413, 419, 416, 418, 414, 412};
std::vector<double> mzs {420.130, 412.824, 423.269, 415.287, 413.800, 419.113, 416.293, 418.232, 414.301, 412.321};
std::vector<double> intensities {201, 60, 56, 37, 34, 31, 31, 31, 29, 29};
for (Size i = 0; i < mzs.size(); ++i)
{
p.setIntensity(intensities[i]); p.setMZ(mzs[i]);
ds.push_back(p);
}
ds.sortByIntensity();
std::vector<double> intensities_copy(intensities);
std::sort(intensities_copy.begin(), intensities_copy.end());
MSSpectrum::iterator it_ds = ds.begin();
ABORT_IF(ds.size() != intensities_copy.size())
for(std::vector<double>::iterator it = intensities_copy.begin(); it != intensities_copy.end(); ++it)
{
TEST_EQUAL(it_ds->getIntensity(), *it);
++it_ds;
}
ds.clear(true);
for (Size i = 0; i < mzs.size(); ++i)
{
p.setIntensity(intensities[i]); p.setMZ(mzs[i]);
ds.push_back(p);
}
ds.getFloatDataArrays() = std::vector<MSSpectrum::FloatDataArray>(3,float_array);
ds.getFloatDataArrays()[0].setName("f1");
ds.getFloatDataArrays()[1].setName("f2");
ds.getFloatDataArrays()[2].setName("f3");
ds.getStringDataArrays() = std::vector<MSSpectrum::StringDataArray>(2, string_array);
ds.getStringDataArrays()[0].setName("s1");
ds.getStringDataArrays()[1].setName("s2");
ds.getIntegerDataArrays() = std::vector<MSSpectrum::IntegerDataArray>(1, int_array);
ds.getIntegerDataArrays()[0].setName("i1");
ds.sortByIntensity();
TEST_STRING_EQUAL(ds.getFloatDataArrays()[0].getName(),"f1")
TEST_STRING_EQUAL(ds.getFloatDataArrays()[1].getName(),"f2")
TEST_STRING_EQUAL(ds.getFloatDataArrays()[2].getName(),"f3")
TEST_STRING_EQUAL(ds.getStringDataArrays()[0].getName(),"s1")
TEST_STRING_EQUAL(ds.getStringDataArrays()[1].getName(),"s2")
TEST_STRING_EQUAL(ds.getIntegerDataArrays()[0].getName(),"i1")
MSSpectrum::iterator it1 = ds.begin();
MSSpectrum::FloatDataArray::iterator it2 = ds.getFloatDataArrays()[1].begin();
MSSpectrum::StringDataArray::iterator it3 = ds.getStringDataArrays()[0].begin();
MSSpectrum::IntegerDataArray::iterator it4 = ds.getIntegerDataArrays()[0].begin();
TOLERANCE_ABSOLUTE(0.0001)
for (std::vector<double>::iterator it = intensities_copy.begin(); it != intensities_copy.end(); ++it)
{
if (it1 != ds.end() && it2 != ds.getFloatDataArrays()[1].end() && it3 != ds.getStringDataArrays()[0].end() && it4 != ds.getIntegerDataArrays()[0].end())
{
//metadataarray values == mz values
TEST_REAL_SIMILAR(it1->getIntensity(), *it);
TEST_REAL_SIMILAR(*it2 , it1->getMZ());
TEST_STRING_EQUAL(*it3 , String::number(it1->getMZ(),2));
TEST_EQUAL(*it4 , (Int)floor(it1->getMZ()));
++it1;
++it2;
++it3;
++it4;
}
else
{
TEST_EQUAL(true,false)
}
}
}
END_SECTION
START_SECTION((void sortByPosition()))
{
MSSpectrum ds;
MSSpectrum::FloatDataArray float_array {56.0, 201.0, 31, 31, 31, 37, 29, 34, 60, 29};
MSSpectrum::StringDataArray string_array {"56", "201", "31", "31", "31", "37", "29", "34", "60", "29"};
MSSpectrum::IntegerDataArray int_array {56, 201, 31, 31, 31, 37, 29, 34, 60, 29};
std::vector<double> mzs {423.269, 420.130, 419.113, 418.232, 416.293, 415.287, 414.301, 413.800, 412.824, 412.321};
std::vector<double> intensities {56, 201, 31, 31, 31, 37, 29, 34, 60, 29};
for (Size i = 0; i < mzs.size(); ++i)
{
ds.emplace_back(mzs[i], intensities[i]);
}
ds.sortByPosition();
MSSpectrum::iterator it = ds.begin();
for (std::vector<double>::reverse_iterator rit = intensities.rbegin(); rit != intensities.rend(); ++rit)
{
if(it == ds.end())
{
TEST_EQUAL(true,false)
}
TEST_EQUAL(it->getIntensity(), *rit);
++it;
}
ds.clear(true);
for (Size i = 0; i < mzs.size(); ++i)
{
ds.emplace_back(mzs[i], intensities[i]);
}
ds.getFloatDataArrays() = std::vector<MSSpectrum::FloatDataArray>(3,float_array);
ds.getFloatDataArrays()[0].setName("f1");
ds.getFloatDataArrays()[1].setName("f2");
ds.getFloatDataArrays()[2].setName("f3");
ds.getStringDataArrays() = std::vector<MSSpectrum::StringDataArray>(2, string_array);
ds.getStringDataArrays()[0].setName("s1");
ds.getStringDataArrays()[1].setName("s2");
ds.getIntegerDataArrays() = std::vector<MSSpectrum::IntegerDataArray>(2, int_array);
ds.getIntegerDataArrays()[0].setName("i1");
ds.sortByPosition();
TEST_STRING_EQUAL(ds.getFloatDataArrays()[0].getName(),"f1")
TEST_STRING_EQUAL(ds.getFloatDataArrays()[1].getName(),"f2")
TEST_STRING_EQUAL(ds.getFloatDataArrays()[2].getName(),"f3")
TEST_STRING_EQUAL(ds.getStringDataArrays()[0].getName(),"s1")
TEST_STRING_EQUAL(ds.getStringDataArrays()[1].getName(),"s2")
TEST_STRING_EQUAL(ds.getIntegerDataArrays()[0].getName(),"i1")
Size size = intensities.size();
ABORT_IF(ds.size() != size);
ABORT_IF(ds.getFloatDataArrays()[1].size() != size);
ABORT_IF(ds.getStringDataArrays()[0].size() != size);
ABORT_IF(ds.getIntegerDataArrays()[0].size() != size);
MSSpectrum::iterator it1 = ds.begin();
MSSpectrum::FloatDataArray::iterator it2 = ds.getFloatDataArrays()[1].begin();
MSSpectrum::StringDataArray::iterator it3 = ds.getStringDataArrays()[0].begin();
MSSpectrum::IntegerDataArray::iterator it4 = ds.getIntegerDataArrays()[0].begin();
for (std::vector<double>::reverse_iterator rit = intensities.rbegin(); rit != intensities.rend(); ++rit)
{
//metadataarray values == intensity values
TEST_REAL_SIMILAR(it1->getIntensity(), *rit);
TEST_REAL_SIMILAR(*it2 , *rit);
TEST_STRING_EQUAL(*it3 , String::number(*rit,0));
TEST_EQUAL(*it4 , (Int)floor(*rit));
++it1;
++it2;
++it3;
++it4;
}
}
END_SECTION
START_SECTION(void sortByIonMobility())
{
auto ds = getPrefilledSpec();
TEST_FALSE(ds.isSortedByIM())
ds.sortByIonMobility();
TEST_TRUE(ds.isSortedByIM())
auto [idx, unit] = ds.getIMData();
TEST_EQUAL(idx, 1)
const auto& im = ds.getFloatDataArrays()[idx];
TEST_TRUE(std::is_sorted(im.begin(), im.end()))
}
END_SECTION
START_SECTION(void isSortedByIM() const)
{
NOT_TESTABLE // tested above
}
END_SECTION
START_SECTION((void sortByPositionPresorted()))
{
MSSpectrum ds;
MSSpectrum::FloatDataArray float_array {19, 20, 23, 15, 16, 18, 13, 14, 12, 12};
MSSpectrum::StringDataArray string_array {"19", "20", "23", "15", "16", "18", "13", "14", "12", "12"};
MSSpectrum::IntegerDataArray int_array {19, 20, 23, 15, 16, 18, 13, 14, 12, 12};
std::vector<double> mzs {419.113, 420.130, 423.269, 415.287, 416.293, 418.232, 413.800, 414.301, 412.824, 412.321};
std::vector<double> intensities {19, 20, 23, 15, 16, 18, 13, 14, 12, 12};
MSSpectrum::Chunks chunks(ds);
double last_added = 0;
for (Size i = 0; i < mzs.size(); ++i)
{
if (mzs[i] < last_added) chunks.add(true);
last_added = mzs[i];
ds.emplace_back(mzs[i], intensities[i]);
}
chunks.add(true); // Add the last chunk
ds.getFloatDataArrays() = std::vector<MSSpectrum::FloatDataArray>(3,float_array);
ds.getFloatDataArrays()[0].setName("f1");
ds.getFloatDataArrays()[1].setName("f2");
ds.getFloatDataArrays()[2].setName("f3");
ds.getStringDataArrays() = std::vector<MSSpectrum::StringDataArray>(2, string_array);
ds.getStringDataArrays()[0].setName("s1");
ds.getStringDataArrays()[1].setName("s2");
ds.getIntegerDataArrays() = std::vector<MSSpectrum::IntegerDataArray>(2, int_array);
ds.getIntegerDataArrays()[0].setName("i1");
ds.sortByPositionPresorted(chunks.getChunks());
TEST_STRING_EQUAL(ds.getFloatDataArrays()[0].getName(),"f1")
TEST_STRING_EQUAL(ds.getFloatDataArrays()[1].getName(),"f2")
TEST_STRING_EQUAL(ds.getFloatDataArrays()[2].getName(),"f3")
TEST_STRING_EQUAL(ds.getStringDataArrays()[0].getName(),"s1")
TEST_STRING_EQUAL(ds.getStringDataArrays()[1].getName(),"s2")
TEST_STRING_EQUAL(ds.getIntegerDataArrays()[0].getName(),"i1")
Size size = intensities.size();
ABORT_IF(ds.size() != size);
ABORT_IF(ds.getFloatDataArrays()[1].size() != size);
ABORT_IF(ds.getStringDataArrays()[0].size() != size);
ABORT_IF(ds.getIntegerDataArrays()[0].size() != size);
MSSpectrum::iterator it1 = ds.begin();
MSSpectrum::FloatDataArray::iterator it2 = ds.getFloatDataArrays()[1].begin();
MSSpectrum::StringDataArray::iterator it3 = ds.getStringDataArrays()[0].begin();
MSSpectrum::IntegerDataArray::iterator it4 = ds.getIntegerDataArrays()[0].begin();
std::sort(intensities.begin(), intensities.end());
for (std::vector<double>::iterator it = intensities.begin(); it != intensities.end(); ++it)
{
//metadataarray values == intensity values
TEST_REAL_SIMILAR(it1->getIntensity(), *it);
TEST_REAL_SIMILAR(*it2 , *it);
TEST_STRING_EQUAL(*it3 , String::number(*it,0));
TEST_EQUAL(*it4 , (Int)floor(*it));
++it1; ++it2; ++it3; ++it4;
}
}
END_SECTION
START_SECTION(bool isSorted() const)
{
//make test dataset
MSSpectrum spec {{1000.0, 3}, {1001, 5}, {1002, 1}};
TEST_EQUAL(spec.isSorted(),true)
reverse(spec.begin(), spec.end());
TEST_EQUAL(spec.isSorted(),false)
}
END_SECTION
START_SECTION(template<class Predicate>
bool isSorted(const Predicate& lamdba) const)
{
MSSpectrum ds;
MSSpectrum::FloatDataArray float_array {56, 201, 31, 31, 31, 37, 29, 34, 60, 29};
MSSpectrum::StringDataArray string_array {"56", "201", "31", "31", "31", "37", "29", "34", "60", "29"};
MSSpectrum::IntegerDataArray int_array {56, 201, 31, 31, 31, 37, 29, 34, 60, 29};
std::vector<double> mzs{423.269, 420.130, 419.113, 418.232, 416.293, 415.287, 414.301, 413.800, 412.824, 412.321};
std::vector<double> intensities{56, 201, 31, 31, 31, 37, 29, 34, 60, 29};
for (Size i = 0; i < mzs.size(); ++i)
{
ds.emplace_back(mzs[i], intensities[i]);
}
ds.getFloatDataArrays() = std::vector<MSSpectrum::FloatDataArray>(3, float_array);
ds.getFloatDataArrays()[0].setName("f1");
ds.getStringDataArrays() = std::vector<MSSpectrum::StringDataArray>(2, string_array);
ds.getStringDataArrays()[0].setName("s1");
ds.getStringDataArrays()[1].setName("s2");
ds.getIntegerDataArrays() = std::vector<MSSpectrum::IntegerDataArray>(1, int_array);
ds.getIntegerDataArrays()[0].setName("i1");
ds.sortByPosition();
// more expensive than isSorted(), but just to make sure
TEST_EQUAL(ds.isSorted([&ds](Size a, Size b) {return ds[a].getMZ() < ds[b].getMZ();}), true)
TEST_EQUAL(ds.isSorted(), true) // call other method. Should give the same result
ds.sortByIntensity();
TEST_EQUAL(ds.isSorted([&ds](Size a, Size b) { return ds[a].getIntensity() < ds[b].getIntensity(); }), true)
TEST_EQUAL(ds.isSorted([&ds](Size a, Size b) { return ds[a].getMZ() < ds[b].getMZ(); }), false)
TEST_EQUAL(ds.isSorted(), false)// call other method. Should give the same result
// sort by metadata array; float data is identical to intensities here, so we can easily check
auto float_sort_func = [&ds](Size a, Size b) {
return ds.getFloatDataArrays()[0][a] < ds.getFloatDataArrays()[0][b];
};
ds.sortByPosition();// make sure the order is wrong before calling .sort(...)
ds.sort(float_sort_func);
TEST_EQUAL(ds[0].getIntensity(), 29)
TEST_EQUAL(ds.isSorted(), false) // not sorted by m/z
TEST_EQUAL(ds.isSorted([&ds](Size a, Size b) { return ds[a].getIntensity() < ds[b].getIntensity(); }), true)
}
END_SECTION
START_SECTION(template<class Predicate> void sort(const Predicate& lambda))
{
// tested above
NOT_TESTABLE
}
END_SECTION
/////////////////////////////////////////////////////////////
// Finding peaks or peak ranges
const MSSpectrum spec_find{{1.0, 29.0f}, {2.0, 60.0f}, {3.0, 34.0f}, {4.0, 29.0f}, {5.0, 37.0f}, {6.0, 31.0f}};
START_SECTION((Iterator MZEnd(CoordinateType mz)))
{
MSSpectrum::Iterator it;
auto tmp = spec_find;
it = tmp.MZEnd(4.5);
TEST_EQUAL(it->getPosition()[0],5.0)
it = tmp.MZEnd(5.0);
TEST_EQUAL(it->getPosition()[0],6.0)
it = tmp.MZEnd(5.5);
TEST_EQUAL(it->getPosition()[0],6.0)
}
END_SECTION
START_SECTION((Iterator MZBegin(CoordinateType mz)))
{
MSSpectrum::Iterator it;
auto tmp = spec_find;
it = tmp.MZBegin(4.5);
TEST_EQUAL(it->getPosition()[0],5.0)
it = tmp.MZBegin(5.0);
TEST_EQUAL(it->getPosition()[0],5.0)
it = tmp.MZBegin(5.5);
TEST_EQUAL(it->getPosition()[0],6.0)
}
END_SECTION
START_SECTION((Iterator MZBegin(Iterator begin, CoordinateType mz, Iterator end)))
{
MSSpectrum::Iterator it;
auto tmp = spec_find;
it = tmp.MZBegin(tmp.begin(), 4.5, tmp.end());
TEST_EQUAL(it->getPosition()[0],5.0)
it = tmp.MZBegin(tmp.begin(), 4.5, tmp.end());
TEST_EQUAL(it->getPosition()[0],5.0)
it = tmp.MZBegin(tmp.begin(), 4.5, tmp.begin());
TEST_EQUAL(it->getPosition()[0],tmp.begin()->getPosition()[0])
}
END_SECTION
START_SECTION((ConstIterator MZBegin(ConstIterator begin, CoordinateType mz, ConstIterator end) const))
{
MSSpectrum::Iterator it;
auto tmp = spec_find;
it = tmp.MZBegin(tmp.begin(), 4.5, tmp.end());
TEST_EQUAL(it->getPosition()[0],5.0)
it = tmp.MZBegin(tmp.begin(), 4.5, tmp.end());
TEST_EQUAL(it->getPosition()[0],5.0)
it = tmp.MZBegin(tmp.begin(), 4.5, tmp.begin());
TEST_EQUAL(it->getPosition()[0],tmp.begin()->getPosition()[0])
}
END_SECTION
START_SECTION((Iterator MZEnd(Iterator begin, CoordinateType mz, Iterator end)))
{
MSSpectrum::Iterator it;
auto tmp = spec_find;
it = tmp.MZEnd(tmp.begin(), 4.5, tmp.end());
TEST_EQUAL(it->getPosition()[0],5.0)
it = tmp.MZEnd(tmp.begin(), 5, tmp.end());
TEST_EQUAL(it->getPosition()[0],6.0)
it = tmp.MZEnd(tmp.begin(), 4.5, tmp.begin());
TEST_EQUAL(it->getPosition()[0],tmp.begin()->getPosition()[0])
}
END_SECTION
START_SECTION((ConstIterator MZEnd(ConstIterator begin, CoordinateType mz, ConstIterator end) const))
{
MSSpectrum::ConstIterator it;
it = spec_find.MZEnd(spec_find.begin(), 4.5, spec_find.end());
TEST_EQUAL(it->getPosition()[0],5.0)
it = spec_find.MZEnd(spec_find.begin(), 5, spec_find.end());
TEST_EQUAL(it->getPosition()[0],6.0)
it = spec_find.MZEnd(spec_find.begin(), 4.5, spec_find.begin());
TEST_EQUAL(it->getPosition()[0], spec_find.begin()->getPosition()[0])
}
END_SECTION
START_SECTION((ConstIterator MZEnd(CoordinateType mz) const))
{
MSSpectrum::ConstIterator it;
it = spec_find.MZEnd(4.5);
TEST_EQUAL(it->getPosition()[0],5.0)
it = spec_find.MZEnd(5.0);
TEST_EQUAL(it->getPosition()[0],6.0)
it = spec_find.MZEnd(5.5);
TEST_EQUAL(it->getPosition()[0],6.0)
}
END_SECTION
START_SECTION((ConstIterator MZBegin(CoordinateType mz) const))
{
MSSpectrum::ConstIterator it;
it = spec_find.MZBegin(4.5);
TEST_EQUAL(it->getPosition()[0],5.0)
it = spec_find.MZBegin(5.0);
TEST_EQUAL(it->getPosition()[0],5.0)
it = spec_find.MZBegin(5.5);
TEST_EQUAL(it->getPosition()[0],6.0)
}
END_SECTION
auto tmp = spec_find;
START_SECTION((Iterator PosBegin(CoordinateType mz)))
{
MSSpectrum::Iterator it;
it = tmp.PosBegin(4.5);
TEST_EQUAL(it->getPos(), 5.0)
it = tmp.PosBegin(5.0);
TEST_EQUAL(it->getPos(), 5.0)
it = tmp.PosBegin(5.5);
TEST_EQUAL(it->getPos(), 6.0)
}
END_SECTION
START_SECTION((Iterator PosBegin(Iterator begin, CoordinateType mz, Iterator end)))
{
MSSpectrum::Iterator it;
it = tmp.PosBegin(tmp.begin(), 4.5, tmp.end());
TEST_EQUAL(it->getPos(), 5.0)
it = tmp.PosBegin(tmp.begin(), 5.5, tmp.end());
TEST_EQUAL(it->getPos(), 6.0)
it = tmp.PosBegin(tmp.begin(), 4.5, tmp.begin());
TEST_EQUAL(it->getPos(), tmp.begin()->getPos())
it = tmp.PosBegin(tmp.begin(), 8.0, tmp.end());
TEST_EQUAL((it-1)->getPos(), (tmp.end()-1)->getPos())
}
END_SECTION
START_SECTION((ConstIterator PosBegin(CoordinateType mz) const ))
{
MSSpectrum::ConstIterator it;
it = tmp.PosBegin(4.5);
TEST_EQUAL(it->getPos(), 5.0)
it = tmp.PosBegin(5.0);
TEST_EQUAL(it->getPos(), 5.0)
it = tmp.PosBegin(5.5);
TEST_EQUAL(it->getPos(), 6.0)
}
END_SECTION
START_SECTION((ConstIterator PosBegin(ConstIterator begin, CoordinateType mz, ConstIterator end) const ))
{
MSSpectrum::ConstIterator it;
it = tmp.PosBegin(tmp.begin(), 3.5, tmp.end());
TEST_EQUAL(it->getPos(), 4.0)
it = tmp.PosBegin(tmp.begin(), 4.5, tmp.end());
TEST_EQUAL(it->getPos(), 5.0)
it = tmp.PosBegin(tmp.begin(), 4.5, tmp.begin());
TEST_EQUAL(it->getPos(), tmp.begin()->getPos())
it = tmp.PosBegin(tmp.begin(), 8.0, tmp.end());
TEST_EQUAL((it-1)->getPos(), (tmp.end()-1)->getPos())
}
END_SECTION
START_SECTION((Iterator PosEnd(CoordinateType mz)))
{
MSSpectrum::Iterator it;
it = tmp.PosEnd(4.5);
TEST_EQUAL(it->getPos(), 5.0)
it = tmp.PosEnd(5.0);
TEST_EQUAL(it->getPos(), 6.0)
it = tmp.PosEnd(5.5);
TEST_EQUAL(it->getPos(), 6.0)
}
END_SECTION
START_SECTION((Iterator PosEnd(Iterator begin, CoordinateType mz, Iterator end)))
{
MSSpectrum::Iterator it;
it = tmp.PosEnd(tmp.begin(), 3.5, tmp.end());
TEST_EQUAL(it->getPos(), 4.0)
it = tmp.PosEnd(tmp.begin(), 4.0, tmp.end());
TEST_EQUAL(it->getPos(), 5.0)
it = tmp.PosEnd(tmp.begin(), 4.5, tmp.begin());
TEST_EQUAL(it->getPos(), tmp.begin()->getPos())
it = tmp.PosBegin(tmp.begin(), 8.0, tmp.end());
TEST_EQUAL((it-1)->getPos(), (tmp.end()-1)->getPos())
}
END_SECTION
START_SECTION((ConstIterator PosEnd(CoordinateType mz) const ))
{
MSSpectrum::ConstIterator it;
it = tmp.PosEnd(4.5);
TEST_EQUAL(it->getPos(), 5.0)
it = tmp.PosEnd(5.0);
TEST_EQUAL(it->getPos(), 6.0)
it = tmp.PosEnd(5.5);
TEST_EQUAL(it->getPos(), 6.0)
}
END_SECTION
START_SECTION((ConstIterator PosEnd(ConstIterator begin, CoordinateType mz, ConstIterator end) const ))
{
MSSpectrum::ConstIterator it;
it = tmp.PosEnd(tmp.begin(), 4.5, tmp.end());
TEST_EQUAL(it->getPos(), 5.0)
it = tmp.PosEnd(tmp.begin(), 5.0, tmp.end());
TEST_EQUAL(it->getPos(), 6.0)
it = tmp.PosEnd(tmp.begin(), 4.5, tmp.begin());
TEST_EQUAL(it->getPos(), tmp.begin()->getPos())
it = tmp.PosBegin(tmp.begin(), 8.0, tmp.end());
TEST_EQUAL((it-1)->getPos(), (tmp.end()-1)->getPos())
}
END_SECTION
START_SECTION(bool containsIMData() const)
{
auto ds = getPrefilledSpec();
TEST_TRUE(ds.containsIMData())
}
END_SECTION
START_SECTION((std::pair<Size,DriftTimeUnit> getIMData() const))
{
auto ds = getPrefilledSpec();
auto [im_data_index, unit] = ds.getIMData();
TEST_EQUAL(im_data_index, 1)
TEST_TRUE(unit == DriftTimeUnit::MILLISECOND)
}
END_SECTION
const MSSpectrum spec_test {
{412.321, 29.0f},
{412.824, 60.0f},
{413.8, 34.0f},
{414.301, 29.0f},
{415.287, 37.0f},
{416.293, 31.0f},
{418.232, 31.0f},
{419.113, 31.0f},
{420.13, 201.0f},
{423.269, 56.0f},
{426.292, 34.0f},
{427.28, 82.0f},
{428.322, 87.0f},
{430.269, 30.0f},
{431.246, 29.0f},
{432.289, 42.0f},
{436.161, 32.0f},
{437.219, 54.0f},
{439.186, 40.0f},
{440.27, 40},
{441.224, 23.0f}};
START_SECTION((Size findNearest(CoordinateType mz) const))
{
MSSpectrum tmp = spec_test;
//test outside mass range
TEST_EQUAL(tmp.findNearest(400.0),0);
TEST_EQUAL(tmp.findNearest(500.0),20);
//test mass range borders
TEST_EQUAL(tmp.findNearest(412.4),0);
TEST_EQUAL(tmp.findNearest(441.224),20);
//test inside scan
TEST_EQUAL(tmp.findNearest(426.29),10);
TEST_EQUAL(tmp.findNearest(426.3),10);
TEST_EQUAL(tmp.findNearest(427.2),11);
TEST_EQUAL(tmp.findNearest(427.3),11);
//empty spectrum
MSSpectrum tmp2;
TEST_PRECONDITION_VIOLATED(tmp2.findNearest(427.3));
}
END_SECTION
START_SECTION((Size findNearest(CoordinateType mz, CoordinateType tolerance) const))
{
//test outside mass range
TEST_EQUAL(spec_test.findNearest(400.0, 1.0), -1);
TEST_EQUAL(spec_test.findNearest(500.0, 1.0), -1);
//test mass range borders
TEST_EQUAL(spec_test.findNearest(412.4, 0.01), -1);
TEST_EQUAL(spec_test.findNearest(412.4, 0.1), 0);
TEST_EQUAL(spec_test.findNearest(441.3, 0.01),-1);
TEST_EQUAL(spec_test.findNearest(441.3, 0.1), 20);
//test inside scan
TEST_EQUAL(spec_test.findNearest(426.29, 0.1), 10);
TEST_EQUAL(spec_test.findNearest(426.3, 0.1), 10);
TEST_EQUAL(spec_test.findNearest(427.2, 0.1), 11);
TEST_EQUAL(spec_test.findNearest(427.3, 0.1), 11);
TEST_EQUAL(spec_test.findNearest(427.3, 0.001), -1);
//empty spectrum
MSSpectrum spec_test2;
TEST_EQUAL(spec_test2.findNearest(427.3, 1.0, 1.0), -1);
}
END_SECTION
START_SECTION((Size findNearest(CoordinateType mz, CoordinateType left_tolerance, CoordinateType right_tolerance) const))
{
//test outside mass range
TEST_EQUAL(spec_test.findNearest(400.0, 1.0, 1.0), -1);
TEST_EQUAL(spec_test.findNearest(500.0, 1.0, 1.0), -1);
//test mass range borders
TEST_EQUAL(spec_test.findNearest(412.4, 0.01, 0.01), -1);
TEST_EQUAL(spec_test.findNearest(412.4, 0.1, 0.1), 0);
TEST_EQUAL(spec_test.findNearest(441.3, 0.01, 0.01),-1);
TEST_EQUAL(spec_test.findNearest(441.3, 0.1, 0.1), 20);
//test inside scan
TEST_EQUAL(spec_test.findNearest(426.29, 0.1, 0.1), 10);
TEST_EQUAL(spec_test.findNearest(426.3, 0.1, 0.1), 10);
TEST_EQUAL(spec_test.findNearest(427.2, 0.1, 0.1), 11);
TEST_EQUAL(spec_test.findNearest(427.3, 0.1, 0.1), 11);
TEST_EQUAL(spec_test.findNearest(427.3, 0.001, 0.001), -1);
TEST_EQUAL(spec_test.findNearest(427.3, 0.1, 0.001), 11);
TEST_EQUAL(spec_test.findNearest(427.3, 0.001, 1.01), -1);
TEST_EQUAL(spec_test.findNearest(427.3, 0.001, 1.1), 12);
//empty spectrum
MSSpectrum spec_test2;
TEST_EQUAL(spec_test2.findNearest(427.3, 1.0, 1.0), -1);
}
END_SECTION
START_SECTION((Size findHighestInWindow(CoordinateType mz, CoordinateType tolerance_left, CoordinateType tolerance_righ) const))
{
//test outside mass range
TEST_EQUAL(spec_test.findHighestInWindow(400.0, 1.0, 1.0), -1);
TEST_EQUAL(spec_test.findHighestInWindow(500.0, 1.0, 1.0), -1);
//test mass range borders
TEST_EQUAL(spec_test.findHighestInWindow(412.4, 0.01, 0.01), -1);
TEST_EQUAL(spec_test.findHighestInWindow(412.4, 0.1, 0.1), 0);
TEST_EQUAL(spec_test.findHighestInWindow(441.3, 0.01, 0.01),-1);
TEST_EQUAL(spec_test.findHighestInWindow(441.3, 0.1, 0.1), 20);
//test inside scan
TEST_EQUAL(spec_test.findHighestInWindow(426.29, 0.1, 0.1), 10);
TEST_EQUAL(spec_test.findHighestInWindow(426.3, 0.1, 0.1), 10);
TEST_EQUAL(spec_test.findHighestInWindow(427.2, 0.1, 0.1), 11);
TEST_EQUAL(spec_test.findHighestInWindow(427.3, 0.1, 0.1), 11);
TEST_EQUAL(spec_test.findHighestInWindow(427.3, 0.001, 0.001), -1);
TEST_EQUAL(spec_test.findHighestInWindow(427.3, 0.1, 0.001), 11);
TEST_EQUAL(spec_test.findHighestInWindow(427.3, 0.001, 1.01), -1);
TEST_EQUAL(spec_test.findHighestInWindow(427.3, 0.001, 1.1), 12);
TEST_EQUAL(spec_test.findHighestInWindow(427.3, 9.0, 4.0), 8);
TEST_EQUAL(spec_test.findHighestInWindow(430.25, 1.9, 1.01), 13);
//empty spectrum
MSSpectrum spec_test2;
TEST_EQUAL(spec_test2.findHighestInWindow(427.3, 1.0, 1.0), -1);
}
END_SECTION
START_SECTION( SpectrumSettings::SpectrumType MSSpectrum::getType(const bool query_data) const)
{
// test empty spectrum
MSSpectrum edit;
TEST_EQUAL(edit.getType(false), SpectrumSettings::SpectrumType::UNKNOWN);
TEST_EQUAL(edit.getType(true), SpectrumSettings::SpectrumType::UNKNOWN);
// easiest: type is explicitly given
edit.setType(SpectrumSettings::SpectrumType::PROFILE);
TEST_EQUAL(edit.getType(false), SpectrumSettings::SpectrumType::PROFILE);
TEST_EQUAL(edit.getType(true), SpectrumSettings::SpectrumType::PROFILE);
// second easiest: type is given in data processing
DataProcessing dp;
dp.setProcessingActions( { DataProcessing::PEAK_PICKING } );
std::shared_ptr< DataProcessing > dp_(new DataProcessing(dp));
edit.getDataProcessing().push_back(dp_);
// still profile, since DP is only checked when type is unknown
TEST_EQUAL(edit.getType(false), SpectrumSettings::SpectrumType::PROFILE);
TEST_EQUAL(edit.getType(true), SpectrumSettings::SpectrumType::PROFILE);
edit.setType(SpectrumSettings::SpectrumType::UNKNOWN);
TEST_EQUAL(edit.getType(false), SpectrumSettings::SpectrumType::CENTROID);
TEST_EQUAL(edit.getType(true), SpectrumSettings::SpectrumType::CENTROID);
// third case: estimation from data
edit.getDataProcessing().clear();
// too few points
edit.push_back( { 100.0, 1.0 } );
edit.push_back( { 200.0, 1.0 } );
edit.push_back( { 300.0, 1.0 } );
edit.push_back( { 400.0, 1.0 } );
TEST_EQUAL(edit.getType(false), SpectrumSettings::SpectrumType::UNKNOWN);
TEST_EQUAL(edit.getType(true), SpectrumSettings::SpectrumType::UNKNOWN);
edit.push_back( { 500.0, 1.0 } );
edit.push_back( { 600.0, 1.0 } );
TEST_EQUAL(edit.getType(false), SpectrumSettings::SpectrumType::UNKNOWN); // data is not inspected
TEST_EQUAL(edit.getType(true), SpectrumSettings::SpectrumType::CENTROID);
}
END_SECTION
START_SECTION(ConstIterator getBasePeak() const)
{
const auto it = spec_test.getBasePeak();
TEST_REAL_SIMILAR(it->getIntensity(), 201.0)
TEST_EQUAL(std::distance(spec_test.begin(), it), 8);
MSSpectrum empty;
TEST_EQUAL(empty.getBasePeak() == empty.end(), true);
}
END_SECTION
START_SECTION(Iterator getBasePeak())
{
MSSpectrum test = spec_test;
auto it = test.getBasePeak();
it->setIntensity(it->getIntensity() + 0.0);
TEST_REAL_SIMILAR(it->getIntensity(), 201.0)
TEST_EQUAL(std::distance(test.begin(), it), 8);
}
END_SECTION
START_SECTION(PeakType::IntensityType calculateTIC() const)
{
auto r = spec_test.calculateTIC();
TEST_REAL_SIMILAR(r, 1032.0)
TEST_EQUAL(MSSpectrum().calculateTIC(), 0.0);
}
END_SECTION
START_SECTION(void setIMFormat(IMFormat imf))
{
// test invalid format validation
MSSpectrum spec;
TEST_EXCEPTION(Exception::InvalidValue, spec.setIMFormat(IMFormat::MIXED)); // this should trigger the validation check because a single spectrum can't be mixed
}
END_SECTION
START_SECTION(void clear(bool clear_meta_data))
{
MSSpectrum edit;
edit.getInstrumentSettings().getScanWindows().resize(1);
edit.resize(1);
edit.setMetaValue("label",String("bla"));
edit.setRT(5);
edit.setDriftTime(6);
edit.setDriftTimeUnit(DriftTimeUnit::MILLISECOND);
edit.setMSLevel(5);
edit.getFloatDataArrays().resize(5);
edit.getIntegerDataArrays().resize(5);
edit.getStringDataArrays().resize(5);
edit.clear(false);
TEST_EQUAL(edit.size(),0)
TEST_EQUAL(edit == MSSpectrum(),false)
TEST_EQUAL(edit.empty(),true)
edit.clear(true);
TEST_EQUAL(edit.empty(),true)
TEST_EQUAL(edit == MSSpectrum(),true)
}
END_SECTION
START_SECTION(([MSSpectrum::RTLess] bool operator()(const MSSpectrum &a, const MSSpectrum &b) const))
{
vector< MSSpectrum> v;
MSSpectrum sp1;
sp1.setRT(3.0f);
v.push_back(sp1);
MSSpectrum sp2;
sp2.setRT(2.0f);
v.push_back(sp2);
MSSpectrum sp3;
sp3.setRT(1.0f);
v.push_back(sp3);
std::sort(v.begin(),v.end(), MSSpectrum::RTLess());
TEST_REAL_SIMILAR(v[0].getRT(), 1.0);
TEST_REAL_SIMILAR(v[1].getRT(), 2.0);
TEST_REAL_SIMILAR(v[2].getRT(), 3.0);
///
MSSpectrum s1;
s1.setRT(0.451);
MSSpectrum s2;
s2.setRT(0.5);
TEST_EQUAL(MSSpectrum::RTLess()(s1,s2), true);
TEST_EQUAL(MSSpectrum::RTLess()(s2,s1), false);
TEST_EQUAL(MSSpectrum::RTLess()(s2,s2), false);
}
END_SECTION
START_SECTION((std::pair<DriftTimeUnit, std::vector<float>> maybeGetIMData() const))
{
// Test successful retrieval of ion mobility data
MSSpectrum spec;
// Create a float data array with ion mobility data
DataArrays::FloatDataArray im_array;
im_array.setName(Constants::UserParam::ION_MOBILITY);
im_array.resize(3);
im_array[0] = 1.0f;
im_array[1] = 2.0f;
im_array[2] = 3.0f;
im_array.setMetaValue("unit", "millisecond");
// Add the array to spectrum's float data arrays
std::vector<DataArrays::FloatDataArray> fda;
fda.push_back(im_array);
spec.setFloatDataArrays(fda);
// Test successful case
std::pair<DriftTimeUnit, std::vector<float>> result = spec.maybeGetIMData();
TEST_TRUE(result.first == DriftTimeUnit::MILLISECOND)
TEST_EQUAL(result.second.size(), 3)
TEST_REAL_SIMILAR(result.second[0], 1.0)
TEST_REAL_SIMILAR(result.second[1], 2.0)
TEST_REAL_SIMILAR(result.second[2], 3.0)
// Test case with missing ion mobility data
MSSpectrum spec_no_im;
result = spec_no_im.maybeGetIMData();
TEST_TRUE(result.first == DriftTimeUnit::NONE)
TEST_EQUAL(result.second.empty(), true)
// Test case with empty float arrays
MSSpectrum spec_empty;
spec_empty.getFloatDataArrays().clear();
result = spec_empty.maybeGetIMData();
TEST_TRUE(result.first == DriftTimeUnit::NONE)
TEST_EQUAL(result.second.empty(), true)
// Test case with wrong array name
MSSpectrum spec_wrong_name;
DataArrays::FloatDataArray wrong_array;
wrong_array.setName("Wrong Name");
wrong_array.resize(2);
wrong_array[0] = 4.0f;
wrong_array[1] = 5.0f;
fda.clear();
fda.push_back(wrong_array);
spec_wrong_name.setFloatDataArrays(fda);
result = spec_wrong_name.maybeGetIMData();
TEST_TRUE(result.first == DriftTimeUnit::NONE)
TEST_EQUAL(result.second.empty(), true)
}
END_SECTION
START_SECTION(([EXTRA] std::ostream& operator << (std::ostream& os, const MSSpectrum& spec)))
{
MSSpectrum spec
{ {412.321, 29.0f}, //0
{412.824, 60.0f}, //1
{413.8, 34.0f}, //2
{414.301, 29.0f}, //3
{415.287, 37.0f}, //4
{416.293, 31.0f}, //5
{418.232, 31.0f}, //6
{419.113, 31.0f}, //7
{420.13, 201.0f}, //8
{423.269, 56.0f}, //9
{426.292, 34.0f} //10
};
spec.getInstrumentSettings().getScanWindows().resize(1);
spec.setMetaValue("label", 5.0);
spec.setMSLevel(17);
spec.setRT(7.0);
spec.setName("bla");
ostringstream test_stream;
test_stream << spec;
TEST_EQUAL(test_stream.str(), "-- MSSPECTRUM BEGIN --\n"
"-- SPECTRUMSETTINGS BEGIN --\n"
"-- SPECTRUMSETTINGS END --\n"
"POS: 412.321 INT: 29\n"
"POS: 412.824 INT: 60\n"
"POS: 413.8 INT: 34\n"
"POS: 414.301 INT: 29\n"
"POS: 415.287 INT: 37\n"
"POS: 416.293 INT: 31\n"
"POS: 418.232 INT: 31\n"
"POS: 419.113 INT: 31\n"
"POS: 420.13 INT: 201\n"
"POS: 423.269 INT: 56\n"
"POS: 426.292 INT: 34\n"
"-- MSSPECTRUM END --\n")
}
END_SECTION
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
END_TEST
| C++ |
3D | OpenMS/OpenMS | src/tests/class_tests/openms/source/QTClusterFinder_test.cpp | .cpp | 6,152 | 229 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hendrik Weisser $
// $Authors: Hendrik Weisser $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/ClassTest.h>
#include <OpenMS/test_config.h>
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/KERNEL/ConsensusMap.h>
///////////////////////////
#include <OpenMS/ANALYSIS/MAPMATCHING/QTClusterFinder.h>
///////////////////////////
#include <OpenMS/KERNEL/Feature.h>
#include <OpenMS/KERNEL/FeatureMap.h>
#include <OpenMS/METADATA/PeptideHit.h>
#include <OpenMS/METADATA/PeptideIdentification.h>
using namespace OpenMS;
using namespace std;
START_TEST(QTClusterFinder, "$Id$")
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
QTClusterFinder* ptr = nullptr;
QTClusterFinder* nullPointer = nullptr;
START_SECTION((QTClusterFinder()))
{
ptr = new QTClusterFinder();
TEST_NOT_EQUAL(ptr, nullPointer)
}
END_SECTION
START_SECTION((virtual ~QTClusterFinder()))
delete ptr;
END_SECTION
START_SECTION((void run(const std::vector<FeatureMap >& input_maps, ConsensusMap& result_map)))
{
vector<FeatureMap > input(2);
Feature feat1;
Feature feat2;
DPosition<2> pos1(0,0);
DPosition<2> pos2(100,200);
feat1.setPosition(pos1);
feat1.setUniqueId(0);
feat2.setPosition(pos2);
feat2.setUniqueId(1);
PeptideHit hit;
hit.setSequence(AASequence::fromString("AAA"));
feat1.getPeptideIdentifications().resize(1);
feat1.getPeptideIdentifications()[0].insertHit(hit);
hit.setSequence(AASequence::fromString("CCC"));
feat2.getPeptideIdentifications().resize(1);
feat2.getPeptideIdentifications()[0].insertHit(hit);
input[0].push_back(feat1);
input[0].push_back(feat2);
Feature feat3;
Feature feat4;
Feature feat5;
DPosition<2> pos3(4,0.04);
DPosition<2> pos4(5,0.05);
DPosition<2> pos5(104,200.04);
feat3.setPosition(pos3);
feat3.setUniqueId(0);
feat4.setPosition(pos4);
feat4.setUniqueId(1);
feat5.setPosition(pos5);
feat5.setUniqueId(2);
hit.setSequence(AASequence::fromString("DDD"));
feat3.getPeptideIdentifications().resize(1);
feat3.getPeptideIdentifications()[0].insertHit(hit);
hit.setSequence(AASequence::fromString("AAA"));
feat4.getPeptideIdentifications().resize(1);
feat4.getPeptideIdentifications()[0].insertHit(hit);
// no peptide ID for "feat5"
input[1].push_back(feat3);
input[1].push_back(feat4);
input[1].push_back(feat5);
input[0].updateRanges();
input[1].updateRanges();
QTClusterFinder finder;
Param param = finder.getDefaults();
param.setValue("distance_RT:max_difference", 5.1);
param.setValue("distance_MZ:max_difference", 0.1);
param.setValue("nr_partitions", 1);
finder.setParameters(param);
ConsensusMap result;
finder.run(input, result);
TEST_EQUAL(result.size(), 3);
ABORT_IF(result.size() != 3);
ConsensusFeature::HandleSetType group1 = result[0].getFeatures();
ConsensusFeature::HandleSetType group2 = result[1].getFeatures();
ConsensusFeature::HandleSetType group3 = result[2].getFeatures();
FeatureHandle ind1(0, feat1);
FeatureHandle ind2(0, feat2);
FeatureHandle ind3(1, feat3);
FeatureHandle ind4(1, feat4);
FeatureHandle ind5(1, feat5);
ConsensusFeature::HandleSetType::const_iterator it;
// don't know why the order is this way, but it shouldn't matter...
it = group1.begin();
STATUS(*it);
STATUS(ind2);
TEST_EQUAL(*(it) == ind2, true);
++it;
STATUS(*it);
STATUS(ind5);
TEST_EQUAL(*(it) == ind5, true);
it = group2.begin();
STATUS(*it);
STATUS(ind1);
TEST_EQUAL(*(it) == ind1, true);
++it;
STATUS(*it);
STATUS(ind3);
TEST_EQUAL(*(it) == ind3, true);
it = group3.begin();
STATUS(*it);
STATUS(ind4);
TEST_EQUAL(*(it) == ind4, true);
// test annotation-specific matching (simple case):
param.setValue("use_identifications", "true");
finder.setParameters(param);
finder.run(input, result);
TEST_EQUAL(result.size(), 3);
ABORT_IF(result.size() != 3);
group1 = result[0].getFeatures();
group2 = result[1].getFeatures();
group3 = result[2].getFeatures();
it = group1.begin();
STATUS(*it);
STATUS(ind2);
TEST_EQUAL(*(it) == ind2, true);
++it;
STATUS(*it);
STATUS(ind5);
TEST_EQUAL(*(it) == ind5, true);
it = group2.begin();
STATUS(*it);
STATUS(ind1);
TEST_EQUAL(*(it) == ind1, true);
++it;
STATUS(*it);
STATUS(ind3);
TEST_EQUAL(*(it) == ind4, true);
it = group3.begin();
STATUS(*it);
STATUS(ind4);
TEST_EQUAL(*(it) == ind3, true);
// test annotation-specific matching (complex case):
input.resize(3);
Feature feat6;
Feature feat7;
DPosition<2> pos6(104,200.04);
DPosition<2> pos7(108,200.08);
feat6.setPosition(pos6);
feat6.setUniqueId(0);
feat7.setPosition(pos7);
feat7.setUniqueId(1);
hit.setSequence(AASequence::fromString("EEE"));
feat6.getPeptideIdentifications().resize(1);
feat6.getPeptideIdentifications()[0].insertHit(hit);
hit.setSequence(AASequence::fromString("CCC"));
feat7.getPeptideIdentifications().resize(1);
feat7.getPeptideIdentifications()[0].insertHit(hit);
input[2].push_back(feat6);
input[2].push_back(feat7);
input[2].updateRanges();
finder.run(input, result);
TEST_EQUAL(result.size(), 4);
ABORT_IF(result.size() != 4);
FeatureHandle ind7(2, feat7);
group1 = result[0].getFeatures();
it = group1.begin();
STATUS(*it);
STATUS(ind2);
TEST_EQUAL(*(it) == ind2, true);
++it;
STATUS(*it);
STATUS(ind5);
TEST_EQUAL(*(it) == ind5, true);
++it;
STATUS(*it);
// "ind6" is closer, but its annotation doesn't match
STATUS(ind7);
TEST_EQUAL(*(it) == ind7, true);
}
END_SECTION
START_SECTION((void run(const std::vector<ConsensusMap>& input_maps, ConsensusMap& result_map)))
{
NOT_TESTABLE; // same as "run" for feature maps (tested above)
}
END_SECTION
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
END_TEST
| C++ |
3D | OpenMS/OpenMS | src/tests/class_tests/openms/source/FLASHDeconvSpectrumFile_test.cpp | .cpp | 11,780 | 334 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Kyowon Jeong $
// $Authors: Kyowon Jeong $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/ClassTest.h>
#include <OpenMS/test_config.h>
///////////////////////////
#include <OpenMS/FORMAT/FLASHDeconvSpectrumFile.h>
#include <OpenMS/ANALYSIS/TOPDOWN/DeconvolvedSpectrum.h>
#include <OpenMS/ANALYSIS/TOPDOWN/PeakGroup.h>
#include <OpenMS/KERNEL/MSSpectrum.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <sstream>
///////////////////////////
using namespace OpenMS;
using namespace std;
/////////////////////////////////////////////////////////////
// Helper function to create a test PeakGroup
/////////////////////////////////////////////////////////////
PeakGroup createTestPeakGroup(double mono_mass, int min_charge, int max_charge, bool is_positive = true)
{
PeakGroup pg(min_charge, max_charge, is_positive);
pg.setMonoisotopicMass(mono_mass);
pg.setIsotopeCosine(0.95f);
pg.setChargeScore(0.9f);
pg.setSNR(50.0f);
pg.setQscore(0.85);
pg.setQscore2D(0.80);
pg.setRepAbsCharge((min_charge + max_charge) / 2);
pg.setScanNumber(100);
pg.setTargetDecoyType(PeakGroup::TargetDecoyType::target);
// Add some LogMzPeaks
for (int z = min_charge; z <= max_charge; ++z)
{
FLASHHelperClasses::LogMzPeak lp;
lp.mz = mono_mass / z + (is_positive ? Constants::PROTON_MASS_U : -Constants::PROTON_MASS_U);
lp.intensity = 1000.0f * z;
lp.abs_charge = z;
lp.is_positive = is_positive;
lp.isotopeIndex = 0;
lp.mass = mono_mass;
pg.push_back(lp);
}
return pg;
}
/////////////////////////////////////////////////////////////
// Helper function to create a test DeconvolvedSpectrum
/////////////////////////////////////////////////////////////
DeconvolvedSpectrum createTestDeconvolvedSpectrum(int scan_number, uint ms_level, double rt)
{
DeconvolvedSpectrum dspec(scan_number);
// Create original spectrum
MSSpectrum spec;
spec.setRT(rt);
spec.setMSLevel(ms_level);
// Add some peaks to original spectrum
Peak1D p1, p2, p3;
p1.setMZ(500.0);
p1.setIntensity(10000.0f);
p2.setMZ(600.0);
p2.setIntensity(15000.0f);
p3.setMZ(700.0);
p3.setIntensity(8000.0f);
spec.push_back(p1);
spec.push_back(p2);
spec.push_back(p3);
dspec.setOriginalSpectrum(spec);
// Add at least 3 peak groups (required by topFD_min_peak_count_ = 3)
dspec.push_back(createTestPeakGroup(10000.0, 5, 15));
dspec.push_back(createTestPeakGroup(15000.0, 8, 20));
dspec.push_back(createTestPeakGroup(20000.0, 10, 25));
// For MS2, set precursor information
if (ms_level > 1)
{
Precursor prec;
prec.setMZ(800.0);
prec.setIntensity(50000.0f);
prec.setCharge(10);
dspec.setPrecursor(prec);
dspec.setPrecursorScanNumber(scan_number - 1);
dspec.setActivationMethod(Precursor::ActivationMethod::HCD);
// Set precursor peak group
PeakGroup precPg = createTestPeakGroup(8000.0, 8, 12);
precPg.setFeatureIndex(1);
dspec.setPrecursorPeakGroup(precPg);
}
return dspec;
}
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
START_TEST(FLASHDeconvSpectrumFile, "$Id$")
/////////////////////////////////////////////////////////////
// Test writeDeconvolvedMassesHeader
/////////////////////////////////////////////////////////////
START_SECTION(static void writeDeconvolvedMassesHeader(std::ostream& os, uint ms_level, bool detail, bool report_decoy))
{
// Test MS1 header without detail, without decoy
{
ostringstream oss;
FLASHDeconvSpectrumFile::writeDeconvolvedMassesHeader(oss, 1, false, false);
String header = oss.str();
TEST_EQUAL(header.hasSubstring("Index"), true)
TEST_EQUAL(header.hasSubstring("FileName"), true)
TEST_EQUAL(header.hasSubstring("ScanNum"), true)
TEST_EQUAL(header.hasSubstring("RetentionTime"), true)
TEST_EQUAL(header.hasSubstring("MonoisotopicMass"), true)
TEST_EQUAL(header.hasSubstring("AverageMass"), true)
TEST_EQUAL(header.hasSubstring("SumIntensity"), true)
TEST_EQUAL(header.hasSubstring("IsotopeCosine"), true)
TEST_EQUAL(header.hasSubstring("Qscore"), true)
// Should NOT contain precursor info for MS1
TEST_EQUAL(header.hasSubstring("PrecursorScanNum"), false)
// Should NOT contain detail columns
TEST_EQUAL(header.hasSubstring("PeakMZs"), false)
// Should NOT contain decoy column
TEST_EQUAL(header.hasSubstring("TargetDecoyType"), false)
}
// Test MS1 header with detail
{
ostringstream oss;
FLASHDeconvSpectrumFile::writeDeconvolvedMassesHeader(oss, 1, true, false);
String header = oss.str();
// Should contain detail columns
TEST_EQUAL(header.hasSubstring("PeakMZs"), true)
TEST_EQUAL(header.hasSubstring("PeakIntensities"), true)
TEST_EQUAL(header.hasSubstring("PeakCharges"), true)
TEST_EQUAL(header.hasSubstring("PerChargeIntensity"), true)
TEST_EQUAL(header.hasSubstring("PerIsotopeIntensity"), true)
}
// Test MS1 header with decoy reporting
{
ostringstream oss;
FLASHDeconvSpectrumFile::writeDeconvolvedMassesHeader(oss, 1, false, true);
String header = oss.str();
TEST_EQUAL(header.hasSubstring("TargetDecoyType"), true)
TEST_EQUAL(header.hasSubstring("Qvalue"), true)
}
// Test MS2 header (should contain precursor info)
{
ostringstream oss;
FLASHDeconvSpectrumFile::writeDeconvolvedMassesHeader(oss, 2, false, false);
String header = oss.str();
TEST_EQUAL(header.hasSubstring("PrecursorScanNum"), true)
TEST_EQUAL(header.hasSubstring("PrecursorMz"), true)
TEST_EQUAL(header.hasSubstring("PrecursorCharge"), true)
TEST_EQUAL(header.hasSubstring("PrecursorMonoisotopicMass"), true)
}
// Test MS2 header with detail and decoy
{
ostringstream oss;
FLASHDeconvSpectrumFile::writeDeconvolvedMassesHeader(oss, 2, true, true);
String header = oss.str();
TEST_EQUAL(header.hasSubstring("PrecursorScanNum"), true)
TEST_EQUAL(header.hasSubstring("PeakMZs"), true)
TEST_EQUAL(header.hasSubstring("TargetDecoyType"), true)
TEST_EQUAL(header.hasSubstring("PrecursorQvalue"), true)
}
}
END_SECTION
/////////////////////////////////////////////////////////////
// Test writeTopFDHeader
/////////////////////////////////////////////////////////////
START_SECTION(static void writeTopFDHeader(std::ostream& os, const Param& param))
{
ostringstream oss;
Param params;
params.setValue("test_param", 42);
params.setValue("another_param", "value");
FLASHDeconvSpectrumFile::writeTopFDHeader(oss, params);
String header = oss.str();
TEST_EQUAL(header.hasSubstring("#FLASHDeconv generated msalign file"), true)
TEST_EQUAL(header.hasSubstring("Parameters"), true)
TEST_EQUAL(header.hasSubstring("test_param"), true)
TEST_EQUAL(header.hasSubstring("42"), true)
TEST_EQUAL(header.hasSubstring("another_param"), true)
TEST_EQUAL(header.hasSubstring("value"), true)
}
END_SECTION
/////////////////////////////////////////////////////////////
// Test writeIsobaricQuantification
/////////////////////////////////////////////////////////////
START_SECTION(static void writeIsobaricQuantification(std::ostream& os, std::vector<DeconvolvedSpectrum>& deconvolved_spectra))
{
// Test with empty spectra - should produce header only (channel_count == 0)
{
ostringstream oss;
std::vector<DeconvolvedSpectrum> empty_spectra;
FLASHDeconvSpectrumFile::writeIsobaricQuantification(oss, empty_spectra);
String output = oss.str();
// Header is always written, even when no spectra have isobaric quantities
TEST_EQUAL(output.hasPrefix("Scan\tPrecursorScan\tPrecursorMZ\t"), true)
}
// Test with MS2 spectra without isobaric quantities - should produce header only
{
ostringstream oss;
std::vector<DeconvolvedSpectrum> spectra;
spectra.push_back(createTestDeconvolvedSpectrum(100, 2, 600.0));
spectra.push_back(createTestDeconvolvedSpectrum(102, 2, 610.0));
FLASHDeconvSpectrumFile::writeIsobaricQuantification(oss, spectra);
String output = oss.str();
// Header is always written, even when channel_count == 0 (no isobaric quantities present)
TEST_EQUAL(output.hasPrefix("Scan\tPrecursorScan\tPrecursorMZ\t"), true)
}
}
END_SECTION
/////////////////////////////////////////////////////////////
// Test writeMzML
/////////////////////////////////////////////////////////////
START_SECTION(static void writeMzML(const MSExperiment& map, std::vector<DeconvolvedSpectrum>& deconvolved_spectra, const String& deconvolved_mzML_file, const String& annotated_mzML_file, int mzml_charge, DoubleList tols))
{
// Test with empty file names (should return early without error)
{
MSExperiment exp;
std::vector<DeconvolvedSpectrum> empty_spectra;
DoubleList tols = {10.0, 10.0};
// Should not throw when both file names are empty
FLASHDeconvSpectrumFile::writeMzML(exp, empty_spectra, "", "", 0, tols);
TEST_EQUAL(true, true) // If we get here, no exception was thrown
}
}
END_SECTION
/////////////////////////////////////////////////////////////
// Test writeTopFD with synthesized data
/////////////////////////////////////////////////////////////
START_SECTION(static void writeTopFD(DeconvolvedSpectrum& dspec, std::ostream& os, const String& filename, const double qval_threshold, const uint min_ms_level, bool randomize_precursor_mass, bool randomize_fragment_mass))
{
// Test with MS1 spectrum
{
ostringstream oss;
DeconvolvedSpectrum dspec = createTestDeconvolvedSpectrum(100, 1, 600.0);
FLASHDeconvSpectrumFile::writeTopFD(dspec, oss, "test.mzML", 1.0, 1, false, false);
String output = oss.str();
// Check msalign format markers
TEST_EQUAL(output.hasSubstring("BEGIN IONS"), true)
TEST_EQUAL(output.hasSubstring("END IONS"), true)
TEST_EQUAL(output.hasSubstring("FILE_NAME=test.mzML"), true)
TEST_EQUAL(output.hasSubstring("SCANS=100"), true)
TEST_EQUAL(output.hasSubstring("LEVEL=1"), true)
// MS1 should not have precursor info
TEST_EQUAL(output.hasSubstring("PRECURSOR_MZ"), false)
}
// Test with MS2 spectrum
{
ostringstream oss;
DeconvolvedSpectrum dspec = createTestDeconvolvedSpectrum(101, 2, 605.0);
FLASHDeconvSpectrumFile::writeTopFD(dspec, oss, "test.mzML", 1.0, 1, false, false);
String output = oss.str();
// Check msalign format for MS2
TEST_EQUAL(output.hasSubstring("BEGIN IONS"), true)
TEST_EQUAL(output.hasSubstring("END IONS"), true)
TEST_EQUAL(output.hasSubstring("LEVEL=2"), true)
// MS2 should have precursor info
TEST_EQUAL(output.hasSubstring("PRECURSOR_MZ"), true)
TEST_EQUAL(output.hasSubstring("PRECURSOR_CHARGE"), true)
TEST_EQUAL(output.hasSubstring("PRECURSOR_MASS"), true)
TEST_EQUAL(output.hasSubstring("MS_ONE_SCAN"), true)
TEST_EQUAL(output.hasSubstring("ACTIVATION"), true)
}
// Test empty spectrum (should produce no output due to min peak count)
{
ostringstream oss;
DeconvolvedSpectrum empty_dspec(50);
MSSpectrum spec;
spec.setRT(300.0);
spec.setMSLevel(1);
empty_dspec.setOriginalSpectrum(spec);
FLASHDeconvSpectrumFile::writeTopFD(empty_dspec, oss, "test.mzML", 1.0, 1, false, false);
// Empty spectrum should not produce output (below min peak count)
TEST_EQUAL(oss.str().empty(), true)
}
}
END_SECTION
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
END_TEST
| C++ |
3D | OpenMS/OpenMS | src/tests/class_tests/openms/source/SwathQC_test.cpp | .cpp | 5,138 | 192 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/ClassTest.h>
#include <OpenMS/test_config.h>
///////////////////////////
#include <OpenMS/ANALYSIS/OPENSWATH/DATAACCESS/SimpleOpenMSSpectraAccessFactory.h>
///////////////////////////
#include <OpenMS/ANALYSIS/OPENSWATH/SwathQC.h>
#include <OpenMS/CONCEPT/FuzzyStringComparator.h>
#include <OpenMS/FORMAT/MzMLFile.h>
#include <OpenMS/FORMAT/TextFile.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/SYSTEM/File.h>
using namespace OpenMS;
using namespace std;
using namespace OpenSwath;
class SwathQCTest : public SwathQC
{
public:
static bool isSubsampledSpectrum_(const size_t total_spec_count, const size_t subsample_count, const size_t idx)
{
return SwathQC::isSubsampledSpectrum_(total_spec_count, subsample_count, idx);
}
};
START_TEST(SwathQC, "$Id$")
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
SwathQC* nullPointer = nullptr;
SwathQC* ptr = nullptr;
START_SECTION(SwathQC())
{
ptr = new SwathQC(10, 0.04);
TEST_NOT_EQUAL(ptr, nullPointer)
}
END_SECTION
START_SECTION(~SwathQC())
{
delete ptr;
}
END_SECTION
// Create a mock spectrum fitting to the transition group
std::shared_ptr<MSExperiment> exp(new MSExperiment);
MzMLFile().load(OPENMS_GET_TEST_DATA_PATH("PeakPickerHiRes_orbitrap_sn1_out.mzML"), *exp);
OpenSwath::SpectrumAccessPtr sptr = SimpleOpenMSSpectraFactory::getSpectrumAccessOpenMSPtr(exp);
std::vector< OpenSwath::SwathMap > swath_maps(1);
swath_maps.back().sptr = sptr;
swath_maps.back().ms1 = true;
START_SECTION((static ChargeDistribution getChargeDistribution(const std::vector<SwathMap>& swath_maps, const size_t nr_samples, const double mz_tol)))
{
auto cd = SwathQC::getChargeDistribution(swath_maps, 10, 0.04);
SwathQC::ChargeDistribution cde = { {1,17}, {2,4}, {5,1}, {6,2}, {8,2}, {9,1}, {10,5} };
TEST_EQUAL(cd.size(), cde.size());
if (cd != cde)
{
std::cout << "Expected:\n";
for (auto& c : cde)
{
std::cout << c.first << " " << c.second << "\n";
}
std::cout << "Got:\n";
for (auto& c : cd)
{
std::cout << c.first << " " << c.second << "\n";
}
}
TEST_TRUE(cd == cde)
}
END_SECTION
START_SECTION((static bool isSubsampledSpectrum_(const size_t total_spec_count, const size_t subsample_count, const size_t idx)))
{
TEST_EQUAL(SwathQCTest::isSubsampledSpectrum_(0, 100, 4), true); // always true (unknown number of total spectra)
TEST_EQUAL(SwathQCTest::isSubsampledSpectrum_(10, 100, 4), true); // always true (not enough samples)
TEST_EQUAL(SwathQCTest::isSubsampledSpectrum_(10, 4, 10), false); // always false (index beyond # of total spectra)
TEST_EQUAL(SwathQCTest::isSubsampledSpectrum_(10, 4, 11), false); // always false (index beyond # of total spectra)
int r[] = {1, 0, 0, 1, 0, 1, 0, 0, 1, 0};
int c = 10;
for (int i = 0; i < c; ++i)
{
//std::cout << i << ": " << SwathQCTest::isSubsampledSpectrum_(c, 4, i) << "\n";
TEST_EQUAL(SwathQCTest::isSubsampledSpectrum_(c, 4, i), r[i]);
}
// sample none
c = 10;
for (int i = 0; i < c; ++i)
{
//std::cout << i << ": " << SwathQCTest::isSubsampledSpectrum_(c, 0, i) << "\n";
TEST_EQUAL(SwathQCTest::isSubsampledSpectrum_(c, 0, i), false);
}
// sample all
c = 4;
for (int i = 0; i < c; ++i)
{
//std::cout << i << ": " << SwathQCTest::isSubsampledSpectrum_(c, c, i) << "\n";
TEST_EQUAL(SwathQCTest::isSubsampledSpectrum_(c, c, i), true);
}
// sample 2 of 5
c = 5;
int r5[] = {1,0,0,1,0};
for (int i = 0; i < c; ++i)
{
//std::cout << i << ": " << SwathQCTest::isSubsampledSpectrum_(5, 2, i) << "\n";
TEST_EQUAL(SwathQCTest::isSubsampledSpectrum_(c, 2, i), r5[i]);
}
}
END_SECTION
START_SECTION((static void storeJSON(const OpenMS::String& filename)))
{
SwathQC qc(10, 0.04);
int count{};
for (auto& s : *exp)
{
if (s.getMSLevel()==1) ++count;
}
qc.setNrMS1Spectra(count);
auto f = qc.getSpectraProcessingFunc();
for (auto& s : *exp)
{
if (s.getMSLevel()==1) f(s);
}
// getChargeDistribution(swath_maps, 10, 0.04);
String tmp_json = File::getTemporaryFile();
qc.storeJSON(tmp_json);
String tmp_expected = File::getTemporaryFile();
TextFile tf;
tf.addLine(R"({
"ChargeDistributionMS1": [
[
1,
17
],
[
2,
4
],
[
5,
1
],
[
6,
2
],
[
8,
2
],
[
9,
1
],
[
10,
5
]
]
})");
tf.store(tmp_expected);
TEST_EQUAL(FuzzyStringComparator().compareFiles(tmp_json, tmp_expected), true);
}
END_SECTION
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
END_TEST
| C++ |
3D | OpenMS/OpenMS | src/tests/class_tests/openms/source/SwathFile_test.cpp | .cpp | 8,133 | 225 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/ClassTest.h>
///////////////////////////
#include <OpenMS/FORMAT/SwathFile.h>
#include <OpenMS/SYSTEM/File.h>
///////////////////////////
#include <OpenMS/FORMAT/MzMLFile.h>
#include <OpenMS/OPENSWATHALGO/DATAACCESS/SwathMap.h>
#include <OpenMS/METADATA/Precursor.h>
#include <OpenMS/KERNEL/MSExperiment.h>
using namespace OpenMS;
bool sortSwathMaps(const OpenSwath::SwathMap& left, const OpenSwath::SwathMap& right)
{
// true if left is smaller
if (left.ms1) return true;
if (right.ms1) return false;
return left.lower < right.lower;
}
void storeSwathFile(String filename, int nr_swathes=32)
{
PeakMap exp;
{
MSSpectrum s;
s.setMSLevel(1);
Peak1D p; p.setMZ(101); p.setIntensity(201);
s.push_back(p);
exp.addSpectrum(s);
}
for (int i = 0; i< nr_swathes; i++)
{
MSSpectrum s;
s.setMSLevel(2);
std::vector<Precursor> prec(1);
prec[0].setIsolationWindowLowerOffset(12.5);
prec[0].setIsolationWindowUpperOffset(12.5);
prec[0].setMZ(400 + i*25 + 12.5);
s.setPrecursors(prec);
Peak1D p; p.setMZ(101 + i); p.setIntensity(201 + i);
s.push_back(p);
exp.addSpectrum(s);
}
MzMLFile().store(filename, exp);
}
void storeSplitSwathFile(std::vector<String> filenames)
{
{
PeakMap exp;
MSSpectrum s;
s.setMSLevel(1);
Peak1D p; p.setMZ(101); p.setIntensity(201);
s.push_back(p);
exp.addSpectrum(s);
MzMLFile().store(filenames[0], exp);
}
for (Size i = 0; i< filenames.size() -1; i++)
{
PeakMap exp;
MSSpectrum s;
s.setMSLevel(2);
std::vector<Precursor> prec(1);
prec[0].setIsolationWindowLowerOffset(12.5);
prec[0].setIsolationWindowUpperOffset(12.5);
prec[0].setMZ(400 + i*25 + 12.5);
s.setPrecursors(prec);
Peak1D p; p.setMZ(101 + i); p.setIntensity(201 + i);
s.push_back(p);
exp.addSpectrum(s);
MzMLFile().store(filenames[i+1], exp);
}
}
START_TEST(SwathFile, "$Id$")
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
SwathFile* swath_file_ptr = nullptr;
SwathFile* swath_file_nullPointer = nullptr;
START_SECTION((SwathFile()))
swath_file_ptr = new SwathFile;
TEST_NOT_EQUAL(swath_file_ptr, swath_file_nullPointer)
END_SECTION
START_SECTION(([EXTRA]virtual ~SwathFile()))
delete swath_file_ptr;
END_SECTION
// fast
START_SECTION(std::vector< OpenSwath::SwathMap > loadMzML(String file, String tmp, std::shared_ptr<ExperimentalSettings>& exp_meta, String readoptions="normal") )
{
Size nr_swathes = 6;
storeSwathFile("swathFile_1.tmp", nr_swathes);
std::shared_ptr<ExperimentalSettings> meta = std::shared_ptr<ExperimentalSettings>(new ExperimentalSettings());
std::vector< OpenSwath::SwathMap > maps = SwathFile().loadMzML("swathFile_1.tmp", File::getTempDirectory() + "/", meta);
TEST_EQUAL(maps.size(), nr_swathes+1)
TEST_EQUAL(maps[0].ms1, true)
for (Size i = 0; i< nr_swathes; i++)
{
TEST_EQUAL(maps[i+1].ms1, false)
TEST_EQUAL(maps[i+1].sptr->getNrSpectra(), 1)
TEST_EQUAL(maps[i+1].sptr->getSpectrumById(0)->getMZArray()->data.size(), 1)
TEST_REAL_SIMILAR(maps[i+1].sptr->getSpectrumById(0)->getMZArray()->data[0], 101.0+i)
TEST_REAL_SIMILAR(maps[i+1].sptr->getSpectrumById(0)->getIntensityArray()->data[0], 201.0+i)
TEST_REAL_SIMILAR(maps[i+1].lower, 400+i*25.0)
TEST_REAL_SIMILAR(maps[i+1].upper, 425+i*25.0)
}
}
END_SECTION
// medium (2x slower than normal mzML)
START_SECTION([EXTRA]std::vector< OpenSwath::SwathMap > loadMzML(String file, String tmp, std::shared_ptr<ExperimentalSettings>& exp_meta, String readoptions="cache") )
{
Size nr_swathes = 2;
storeSwathFile("swathFile_1.tmp", nr_swathes);
std::shared_ptr<ExperimentalSettings> meta = std::shared_ptr<ExperimentalSettings>(new ExperimentalSettings());
std::vector< OpenSwath::SwathMap > maps = SwathFile().loadMzML("swathFile_1.tmp", File::getTempDirectory() + "/", meta, "cache");
TEST_EQUAL(maps.size(), nr_swathes+1)
TEST_EQUAL(maps[0].ms1, true)
for (Size i = 0; i< nr_swathes; i++)
{
TEST_EQUAL(maps[i+1].ms1, false)
TEST_EQUAL(maps[i+1].sptr->getNrSpectra(), 1)
TEST_EQUAL(maps[i+1].sptr->getSpectrumById(0)->getMZArray()->data.size(), 1)
TEST_REAL_SIMILAR(maps[i+1].sptr->getSpectrumById(0)->getMZArray()->data[0], 101.0+i)
TEST_REAL_SIMILAR(maps[i+1].sptr->getSpectrumById(0)->getIntensityArray()->data[0], 201.0+i)
TEST_REAL_SIMILAR(maps[i+1].lower, 400+i*25.0)
TEST_REAL_SIMILAR(maps[i+1].upper, 425+i*25.0)
}
}
END_SECTION
// medium (2x slower than normal mzML)
START_SECTION(std::vector< OpenSwath::SwathMap > loadSplit(StringList file_list, String tmp, std::shared_ptr<ExperimentalSettings>& exp_meta, String readoptions="normal"))
{
std::vector<String> swath_filenames;
Size nr_swathes = 3;
swath_filenames.push_back("swathFile_2_ms1.tmp");
for (Size i = 0; i < nr_swathes; i++)
{
swath_filenames.push_back( String("swathFile_2_sw" ) + String(i) + ".tmp");
}
storeSplitSwathFile(swath_filenames);
std::shared_ptr<ExperimentalSettings> meta = std::shared_ptr<ExperimentalSettings>(new ExperimentalSettings());
std::vector< OpenSwath::SwathMap > maps = SwathFile().loadSplit(swath_filenames, File::getTempDirectory() + "/", meta);
// ensure they are sorted ...
std::sort(maps.begin(), maps.end(), sortSwathMaps);
TEST_EQUAL(maps.size(), nr_swathes + 1)
TEST_EQUAL(maps[0].ms1, true)
for (Size i = 0; i< maps.size() -1; i++)
{
TEST_EQUAL(maps[i+1].ms1, false)
TEST_EQUAL(maps[i+1].sptr->getNrSpectra(), 1)
TEST_EQUAL(maps[i+1].sptr->getSpectrumById(0)->getMZArray()->data.size(), 1)
TEST_REAL_SIMILAR(maps[i+1].sptr->getSpectrumById(0)->getMZArray()->data[0], 101.0+i)
TEST_REAL_SIMILAR(maps[i+1].sptr->getSpectrumById(0)->getIntensityArray()->data[0], 201.0+i)
TEST_REAL_SIMILAR(maps[i+1].lower, 400+i*25.0)
TEST_REAL_SIMILAR(maps[i+1].upper, 425+i*25.0)
}
}
END_SECTION
// slow (7x slower than normal mzML)
START_SECTION([EXTRA]std::vector< OpenSwath::SwathMap > loadSplit(StringList file_list, String tmp, std::shared_ptr<ExperimentalSettings>& exp_meta, String readoptions="cache"))
{
std::vector<String> swath_filenames;
Size nr_swathes = 2;
swath_filenames.push_back("swathFile_3_ms1.tmp");
for (Size i = 0; i < nr_swathes; i++)
{
swath_filenames.push_back( String("swathFile_3_sw" ) + String(i) + ".tmp");
}
storeSplitSwathFile(swath_filenames);
std::shared_ptr<ExperimentalSettings> meta = std::shared_ptr<ExperimentalSettings>(new ExperimentalSettings());
std::vector< OpenSwath::SwathMap > maps = SwathFile().loadSplit(swath_filenames, File::getTempDirectory() + "/", meta, "cache");
// ensure they are sorted ...
std::sort(maps.begin(), maps.end(), sortSwathMaps);
TEST_EQUAL(maps.size(), nr_swathes + 1)
TEST_EQUAL(maps[0].ms1, true)
for (Size i = 0; i< maps.size() -1; i++)
{
TEST_EQUAL(maps[i+1].ms1, false)
TEST_EQUAL(maps[i+1].sptr->getNrSpectra(), 1)
TEST_EQUAL(maps[i+1].sptr->getSpectrumById(0)->getMZArray()->data.size(), 1)
TEST_REAL_SIMILAR(maps[i+1].sptr->getSpectrumById(0)->getMZArray()->data[0], 101.0+i)
TEST_REAL_SIMILAR(maps[i+1].sptr->getSpectrumById(0)->getIntensityArray()->data[0], 201.0+i)
TEST_REAL_SIMILAR(maps[i+1].lower, 400+i*25.0)
TEST_REAL_SIMILAR(maps[i+1].upper, 425+i*25.0)
}
}
END_SECTION
START_SECTION((std::vector< OpenSwath::SwathMap > loadMzXML(String file, String tmp, std::shared_ptr<ExperimentalSettings>& exp_meta, String readoptions="normal") ) )
{
NOT_TESTABLE // mzXML is not supported
}
END_SECTION
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
END_TEST
| C++ |
3D | OpenMS/OpenMS | src/tests/class_tests/openms/source/PScore_test.cpp | .cpp | 6,691 | 211 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg$
// $Authors: Timo Sachsenberg$
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/ClassTest.h>
#include <OpenMS/test_config.h>
#include <OpenMS/FORMAT/DTAFile.h>
///////////////////////////
#include <OpenMS/ANALYSIS/ID/PScore.h>
///////////////////////////
#include <OpenMS/KERNEL/MSSpectrum.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/CHEMISTRY/TheoreticalSpectrumGenerator.h>
using namespace OpenMS;
using namespace std;
START_TEST(PScore, "$Id$")
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
PScore* ptr = nullptr;
PScore* null_ptr = nullptr;
START_SECTION(PScore())
{
ptr = new PScore();
TEST_NOT_EQUAL(ptr, null_ptr)
}
END_SECTION
START_SECTION(~PScore())
{
delete ptr;
}
END_SECTION
START_SECTION((static std::vector<Size> calculateIntensityRankInMZWindow(const std::vector< double > &mz, const std::vector< double > &intensities, double mz_window)))
{
std::vector< double > mz;
std::vector< double > intensities;
// simple increasing sequence
for (Size m = 0; m < 100; ++m)
{
mz.push_back(m);
intensities.push_back(m);
}
// test window size
std::vector<Size> ranks = PScore::calculateIntensityRankInMZWindow(mz, intensities, 9.9);
TEST_EQUAL(ranks.size(), mz.size());
for (Size i = 0; i != ranks.size() - 4; ++i)
{
TEST_EQUAL(ranks[i], 4);
}
ranks = PScore::calculateIntensityRankInMZWindow(mz, intensities, 10.1);
TEST_EQUAL(ranks.size(), mz.size());
for (Size i = 0; i != ranks.size() - 5; ++i)
{
TEST_EQUAL(ranks[i], 5);
}
}
END_SECTION
START_SECTION((static std::vector<std::vector<Size> > calculateRankMap(const PeakMap &peak_map, double mz_window=100)))
{
// Convenience function. Calculations tested via calculateIntensityRankInMZWindow
}
END_SECTION
START_SECTION((static std::map<Size, PeakSpectrum> calculatePeakLevelSpectra(const PeakSpectrum &spec, const std::vector< Size > &ranks, Size min_level=2, Size max_level=10)))
{
DTAFile dta_file;
PeakSpectrum spec;
dta_file.load(OPENMS_GET_TEST_DATA_PATH("PScore_test.dta"), spec);
vector<double> mz, intensities;
for (Size i = 0; i != spec.size(); ++i)
{
mz.push_back(spec[i].getMZ());
intensities.push_back(spec[i].getIntensity());
}
std::vector<Size> ranks = PScore::calculateIntensityRankInMZWindow(mz, intensities, 100.0);
TEST_EQUAL(ranks.size(), spec.size())
std::map<Size, PeakSpectrum > pls = PScore::calculatePeakLevelSpectra(spec, ranks, 0, 1);
TEST_EQUAL(pls.size(), 2)
// top intensity peaks in +- 50 Th neighborhood
TEST_REAL_SIMILAR(pls[0][0].getMZ(), 169.65);
TEST_REAL_SIMILAR(pls[0][1].getMZ(), 231.51);
TEST_REAL_SIMILAR(pls[0][2].getMZ(), 362.22);
TEST_REAL_SIMILAR(pls[0][3].getMZ(), 508.47);
TEST_REAL_SIMILAR(pls[0][4].getMZ(), 579.61);
TEST_REAL_SIMILAR(pls[0][5].getMZ(), 629.66);
TEST_REAL_SIMILAR(pls[0][6].getMZ(), 712.18);
// top two intensity peaks in +- 50 Th neighborhood
TEST_REAL_SIMILAR(pls[1][0].getMZ(), 149.93);
TEST_REAL_SIMILAR(pls[1][1].getMZ(), 169.65);
TEST_REAL_SIMILAR(pls[1][2].getMZ(), 231.51);
TEST_REAL_SIMILAR(pls[1][3].getMZ(), 263.88);
TEST_REAL_SIMILAR(pls[1][4].getMZ(), 318.38);
TEST_REAL_SIMILAR(pls[1][5].getMZ(), 362.22);
TEST_REAL_SIMILAR(pls[1][6].getMZ(), 389.84);
TEST_REAL_SIMILAR(pls[1][7].getMZ(), 489.86);
TEST_REAL_SIMILAR(pls[1][8].getMZ(), 508.47);
TEST_REAL_SIMILAR(pls[1][9].getMZ(), 562.72);
TEST_REAL_SIMILAR(pls[1][10].getMZ(), 579.61);
TEST_REAL_SIMILAR(pls[1][11].getMZ(), 629.66);
TEST_REAL_SIMILAR(pls[1][12].getMZ(), 712.18);
}
END_SECTION
START_SECTION((static double computePScore(double fragment_mass_tolerance, bool fragment_mass_tolerance_unit_ppm, const std::map< Size, PeakSpectrum > &peak_level_spectra, const std::vector< PeakSpectrum > &theo_spectra, double mz_window=100.0)))
{
// Convenience function. Calculations tested via computePScore
}
END_SECTION
START_SECTION((static double computePScore(double fragment_mass_tolerance, bool fragment_mass_tolerance_unit_ppm, const std::map< Size, PeakSpectrum > &peak_level_spectra, const PeakSpectrum &theo_spectrum, double mz_window=100.0)))
{
DTAFile dta_file;
PeakSpectrum spec;
dta_file.load(OPENMS_GET_TEST_DATA_PATH("PScore_test.dta"), spec);
vector<double> mz, intensities;
for (Size i = 0; i != spec.size(); ++i)
{
mz.push_back(spec[i].getMZ());
intensities.push_back(spec[i].getIntensity());
}
PeakSpectrum theo_spec;
for (Size i = 0; i != spec.size(); ++i)
{
Peak1D p;
p.setMZ(spec[i].getMZ());
p.setIntensity(spec[i].getIntensity());
theo_spec.push_back(p);
}
std::vector<Size> ranks = PScore::calculateIntensityRankInMZWindow(mz, intensities, 100.0);
std::map<Size, PeakSpectrum > pls = PScore::calculatePeakLevelSpectra(spec, ranks, 0, 0);
double pscore_all_match_top_1 = PScore::computePScore(0.1, true, pls, theo_spec);
pls = PScore::calculatePeakLevelSpectra(spec, ranks, 0, 1);
double pscore_all_match_top_2 = PScore::computePScore(0.1, true, pls, theo_spec);
TEST_REAL_SIMILAR(pscore_all_match_top_1, 83.867454)
TEST_REAL_SIMILAR(pscore_all_match_top_2, 154.682242)
AASequence peptide = AASequence::fromString("IFSQVGK");
TheoreticalSpectrumGenerator tg;
Param param(tg.getParameters());
param.setValue("add_first_prefix_ion", "true");
tg.setParameters(param);
spec.clear(true);
tg.getSpectrum(spec, peptide, 1, 1);
TEST_EQUAL(spec.size(), 12)
mz.clear();
intensities.clear();
for (Size i = 0; i != spec.size(); ++i)
{
mz.push_back(spec[i].getMZ());
intensities.push_back(spec[i].getIntensity());
}
ranks = PScore::calculateIntensityRankInMZWindow(mz, intensities, 100.0);
pls = PScore::calculatePeakLevelSpectra(spec, ranks, 0, 0);
double all_match = PScore::computePScore(0.1, true, pls, spec);
TEST_REAL_SIMILAR(all_match, 240)
}
END_SECTION
START_SECTION((static double massCorrectionTerm(double mass)))
{
// Not tested
}
END_SECTION
START_SECTION((static double cleavageCorrectionTerm(Size cleavages, bool consecutive_cleavage)))
{
// Not tested
}
END_SECTION
START_SECTION((static double modificationCorrectionTerm(Size modifications)))
{
// Not tested
}
END_SECTION
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
END_TEST
| C++ |
3D | OpenMS/OpenMS | src/tests/class_tests/openms/source/Param_test.cpp | .cpp | 58,540 | 1,754 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: Marc Sturm, Clemens Groepl $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/ClassTest.h>
#include <OpenMS/test_config.h>
///////////////////////////
#include <OpenMS/DATASTRUCTURES/Param.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/DATASTRUCTURES/ListUtils.h>
#include <OpenMS/APPLICATIONS/TOPPBase.h> // for "ParameterInformation"
#include <type_traits>
#include <iterator>
///////////////////////////
using namespace OpenMS;
using namespace std;
#ifdef _MSC_VER // disable optimization in VS only for this test (as its size triggers 'heap-overflow' during compile otherwise)
#pragma warning (disable: 4748) // disable warning that occurs when switching optimzation off (as /GS is still enabled)
#pragma optimize( "", off )
#endif
START_TEST(Param, "$Id$")
//////////////////// Param::ParamEntry /////////////////////////////
////////////////////////////////////////////////////////////////////
Param::ParamEntry* pe_ptr = nullptr;
Param::ParamEntry* pe_nullPointer = nullptr;
START_SECTION(([Param::ParamEntry] ParamEntry()))
pe_ptr = new Param::ParamEntry();
TEST_NOT_EQUAL(pe_ptr,pe_nullPointer)
END_SECTION
START_SECTION(([Param::ParamEntry] ~ParamEntry()))
delete pe_ptr;
END_SECTION
START_SECTION(([Param::ParamEntry] ParamEntry(const std::string &n, const DataValue &v, const std::string &d, const std::vector<std::string> &t=std::vector<std::string>())))
Param::ParamEntry pe("n","v","d",{"advanced"});
TEST_EQUAL(pe.name,"n")
TEST_EQUAL(pe.description,"d")
TEST_EQUAL(pe.value,"v")
TEST_EQUAL(pe.tags.count("advanced")==1,true)
pe = Param::ParamEntry("n1","v1","d1");
TEST_EQUAL(pe.name,"n1")
TEST_EQUAL(pe.description,"d1")
TEST_EQUAL(pe.value,"v1")
TEST_EQUAL(pe.tags.count("advanced")==1,false)
END_SECTION
START_SECTION(([Param::ParamEntry] bool isValid(std::string& message) const))
Param p;
std::string m;
p.setValue("int",5);
TEST_EQUAL(p.getEntry("int").isValid(m),true);
p.setMinInt("int",5);
TEST_EQUAL(p.getEntry("int").isValid(m),true);
p.setMaxInt("int",8);
TEST_EQUAL(p.getEntry("int").isValid(m),true);
p.setValue("int",10);
TEST_EQUAL(p.getEntry("int").isValid(m),false);
p.setValue("float",5.1);
TEST_EQUAL(p.getEntry("float").isValid(m),true);
p.setMinFloat("float",5.1);
TEST_EQUAL(p.getEntry("float").isValid(m),true);
p.setMaxFloat("float",8.1);
TEST_EQUAL(p.getEntry("float").isValid(m),true);
p.setValue("float",10.1);
TEST_EQUAL(p.getEntry("float").isValid(m),false);
p.setValue("float",5.1);
TEST_EQUAL(p.getEntry("float").isValid(m),true);
p.setMinFloat("float",5.1);
TEST_EQUAL(p.getEntry("float").isValid(m),true);
p.setMaxFloat("float",8.1);
TEST_EQUAL(p.getEntry("float").isValid(m),true);
p.setValue("float",10.1);
TEST_EQUAL(p.getEntry("float").isValid(m),false);
vector<std::string> strings;
strings.push_back("bla");
strings.push_back("bluff");
p.setValue("string","bli");
TEST_EQUAL(p.getEntry("string").isValid(m),true);
p.setValidStrings("string",strings);
TEST_EQUAL(p.getEntry("string").isValid(m),false);
p.setValue("string_2","bla");
TEST_EQUAL(p.getEntry("string_2").isValid(m),true);
p.setValidStrings("string_2",strings);
TEST_EQUAL(p.getEntry("string_2").isValid(m),true);
END_SECTION
START_SECTION(([Param::ParamEntry] bool operator==(const ParamEntry& rhs) const))
Param::ParamEntry n1("n","d","v",{"advanced"});
Param::ParamEntry n2("n","d","v",{"advanced"});
TEST_EQUAL(n1==n2,true)
n2.name = "name";
TEST_EQUAL(n1==n2,false)
n2 = n1;
n2.value = "bla";
TEST_EQUAL(n1==n2,false)
n2 = n1;
n2.description = "bla";
TEST_EQUAL(n1==n2,true)
n2.tags.clear();
TEST_EQUAL(n1==n2,true)
END_SECTION
////////////////// Param::ParamNode ////////////////////////////////
////////////////////////////////////////////////////////////////////
Param::ParamNode* pn_ptr = nullptr;
Param::ParamNode* pn_nullPointer = nullptr;
START_SECTION(([Param::ParamNode] ParamNode()))
pn_ptr = new Param::ParamNode();
TEST_NOT_EQUAL(pn_ptr,pn_nullPointer)
END_SECTION
START_SECTION(([Param::ParamNode] ~ParamNode()))
delete pn_ptr;
END_SECTION
START_SECTION(([Param::ParamNode] ParamNode(const std::string& n, const std::string& d)))
Param::ParamNode n("n","d");
TEST_EQUAL(n.name,"n")
TEST_EQUAL(n.description,"d")
n = Param::ParamNode("n1","d1");
TEST_EQUAL(n.name,"n1")
TEST_EQUAL(n.description,"d1")
END_SECTION
START_SECTION(([Param::ParamNode] bool operator==(const ParamNode& rhs) const))
Param::ParamNode n1("n","d");
Param::ParamNode n2("n","d");
TEST_EQUAL(n1==n2,true)
n2.name = "name";
TEST_EQUAL(n1==n2,false)
n2 = n1;
n2.description = "bla";
TEST_EQUAL(n1==n2,true)
n2 = n1;
n2.nodes.resize(5);
TEST_EQUAL(n1==n2,false)
n2 = n1;
n2.entries.resize(5);
TEST_EQUAL(n1==n2,false)
n2 = n1;
n2.entries.push_back(Param::ParamEntry("a","x",""));
n2.entries.push_back(Param::ParamEntry("b","y",""));
n1.entries.push_back(Param::ParamEntry("b","y",""));
n1.entries.push_back(Param::ParamEntry("a","x",""));
TEST_EQUAL(n1==n2,true)
n2.nodes.push_back(Param::ParamNode("a","x"));
n2.nodes.push_back(Param::ParamNode("b","y"));
n1.nodes.push_back(Param::ParamNode("b","y"));
n1.nodes.push_back(Param::ParamNode("a","x"));
TEST_EQUAL(n1==n2,true)
END_SECTION
START_SECTION(([Param::ParamNode] std::string suffix(const std::string &key) const ))
Param::ParamNode node;
TEST_EQUAL(node.suffix(""),"")
TEST_EQUAL(node.suffix("A"),"A")
TEST_EQUAL(node.suffix("A:A"),"A")
TEST_EQUAL(node.suffix("A:AB"),"AB")
TEST_EQUAL(node.suffix("AB:A"),"A")
TEST_EQUAL(node.suffix(":A"),"A")
END_SECTION
//Dummy Tree:
// A
// |-B(1)
// |-C
// | |-D(2)
// | |-E(3)
// |-B
// |-G(4)
Param::ParamNode pn,n;
Param::ParamEntry e;
pn.name="A";
e.name="B"; e.value=1; pn.entries.push_back(e);
n.name="C"; pn.nodes.push_back(n);
e.name="D"; e.value=1; pn.nodes[0].entries.push_back(e);
e.name="E"; e.value=1; pn.nodes[0].entries.push_back(e);
n.name="B"; pn.nodes.push_back(n);
e.name="G"; e.value=1; pn.nodes[1].entries.push_back(e);
START_SECTION(([Param::ParamNode] Size size() const ))
TEST_EQUAL(pn.size(),4)
TEST_EQUAL(pn.nodes[0].size(),2)
TEST_EQUAL(pn.nodes[1].size(),1)
END_SECTION
START_SECTION(([Param::ParamNode] EntryIterator findEntry(const std::string& name)))
TEST_EQUAL(pn.findEntry("A")==pn.entries.end(),true)
TEST_EQUAL(pn.findEntry("B")!=pn.entries.end(),true)
TEST_EQUAL(pn.findEntry("C")==pn.entries.end(),true)
TEST_EQUAL(pn.findEntry("D")==pn.entries.end(),true)
TEST_EQUAL(pn.findEntry("E")==pn.entries.end(),true)
TEST_EQUAL(pn.findEntry("F")==pn.entries.end(),true)
TEST_EQUAL(pn.findEntry("G")==pn.entries.end(),true)
TEST_EQUAL(pn.findEntry("H")==pn.entries.end(),true)
END_SECTION
START_SECTION(([Param::ParamNode] NodeIterator findNode(const std::string& name)))
TEST_EQUAL(pn.findNode("A")==pn.nodes.end(),true)
TEST_EQUAL(pn.findNode("B")!=pn.nodes.end(),true)
TEST_EQUAL(pn.findNode("C")!=pn.nodes.end(),true)
TEST_EQUAL(pn.findNode("D")==pn.nodes.end(),true)
TEST_EQUAL(pn.findNode("E")==pn.nodes.end(),true)
TEST_EQUAL(pn.findNode("F")==pn.nodes.end(),true)
TEST_EQUAL(pn.findNode("G")==pn.nodes.end(),true)
TEST_EQUAL(pn.findNode("H")==pn.nodes.end(),true)
END_SECTION
START_SECTION(([Param::ParamNode] ParamNode* findParentOf(const std::string &name)))
TEST_EQUAL(pn.findParentOf("A"),pn_nullPointer)
TEST_EQUAL(pn.findParentOf("B"),&pn)
TEST_EQUAL(pn.findParentOf("C"),&pn)
TEST_EQUAL(pn.findParentOf("C:D"),&(pn.nodes[0]))
TEST_EQUAL(pn.findParentOf("C:E"),&(pn.nodes[0]))
TEST_EQUAL(pn.findParentOf("F"),pn_nullPointer)
TEST_EQUAL(pn.findParentOf("B:G"),&(pn.nodes[1]))
TEST_EQUAL(pn.findParentOf("X"),pn_nullPointer)
TEST_EQUAL(pn.findParentOf("H:X"),pn_nullPointer)
TEST_EQUAL(pn.findParentOf("H:C:X"),pn_nullPointer)
TEST_EQUAL(pn.findParentOf("H:C:"),pn_nullPointer)
END_SECTION
START_SECTION(([Param::ParamNode] ParamEntry* findEntryRecursive(const std::string& name)))
TEST_EQUAL(pn.findEntryRecursive("A"),pe_nullPointer)
TEST_EQUAL(pn.findEntryRecursive("B"),&(pn.entries[0]))
TEST_EQUAL(pn.findEntryRecursive("C"),pe_nullPointer)
TEST_EQUAL(pn.findEntryRecursive("C:D"),&(pn.nodes[0].entries[0]))
TEST_EQUAL(pn.findEntryRecursive("C:E"),&(pn.nodes[0].entries[1]))
TEST_EQUAL(pn.findEntryRecursive("F"),pe_nullPointer)
TEST_EQUAL(pn.findEntryRecursive("B:G"),&(pn.nodes[1].entries[0]))
TEST_EQUAL(pn.findEntryRecursive("X"),pe_nullPointer)
TEST_EQUAL(pn.findEntryRecursive("H:X"),pe_nullPointer)
TEST_EQUAL(pn.findEntryRecursive("H:C:X"),pe_nullPointer)
TEST_EQUAL(pn.findEntryRecursive("H:C:"),pe_nullPointer)
END_SECTION
//Dummy Tree:
// A
// |-B(1)
// |-C
// | |-D(2)
// | |-E(3)
// |-B
// | |-G(4)
// |-F
// |-H(5)
START_SECTION(([Param::ParamNode] void insert(const ParamNode& node, const std::string& prefix = "")))
Param::ParamNode node("","");
node.entries.push_back(Param::ParamEntry("H",5,"",{"advanced"}));
pn.insert(node,"F");
TEST_NOT_EQUAL(pn.findEntryRecursive("F:H"),pe_nullPointer)
pn.insert(node,"F:Z");
TEST_NOT_EQUAL(pn.findEntryRecursive("F:Z:H"),pe_nullPointer)
pn.insert(node,"F:Z:");
TEST_NOT_EQUAL(pn.findEntryRecursive("F:Z::H"),pe_nullPointer)
pn.insert(node,"FD:ZD:D");
TEST_NOT_EQUAL(pn.findEntryRecursive("FD:ZD:D:H"),pe_nullPointer)
node.name = "W";
pn.insert(node);
TEST_NOT_EQUAL(pn.findEntryRecursive("W:H"),pe_nullPointer)
pn.insert(node,"Q");
TEST_NOT_EQUAL(pn.findEntryRecursive("QW:H"),pe_nullPointer)
END_SECTION
START_SECTION(([Param::ParamNode] void insert(const ParamEntry& entry, const std::string& prefix = "")))
Param::ParamEntry entry("H","","5",{"advanced"});
pn.insert(entry);
TEST_NOT_EQUAL(pn.findEntryRecursive("H"),pe_nullPointer)
pn.insert(entry,"F");
TEST_NOT_EQUAL(pn.findEntryRecursive("FH"),pe_nullPointer)
pn.insert(entry,"G:");
TEST_NOT_EQUAL(pn.findEntryRecursive("G:H"),pe_nullPointer)
pn.insert(entry,"FD:ZD:D");
TEST_NOT_EQUAL(pn.findEntryRecursive("FD:ZD:DH"),pe_nullPointer)
END_SECTION
////////////////// Param::ParamIterator ////////////////////////////
////////////////////////////////////////////////////////////////////
Param::ParamIterator* pi_ptr = nullptr;
Param::ParamIterator* pi_nullPointer = nullptr;
START_SECTION(([Param::ParamIterator] ParamIterator()))
pi_ptr = new Param::ParamIterator();
TEST_NOT_EQUAL(pi_ptr,pi_nullPointer)
END_SECTION
START_SECTION(([Param::ParamIterator] ~ParamIterator()))
delete(pi_ptr);
END_SECTION
START_SECTION(([Param::ParamIterator] ParamIterator(const Param::ParamNode& root)))
Param::ParamNode node;
pi_ptr = new Param::ParamIterator(node);
TEST_NOT_EQUAL(pi_ptr,pi_nullPointer)
delete pi_ptr;
END_SECTION
START_SECTION(([Param::ParamIterator] const Param::ParamEntry& operator*()))
Param::ParamNode node;
node.entries.push_back(Param::ParamEntry("name","value","description",{"advanced"}));
Param::ParamIterator it(node);
TEST_EQUAL((*it).name,"name")
TEST_EQUAL((*it).value,"value");
TEST_EQUAL((*it).description,"description")
TEST_EQUAL((*it).tags.count("advanced")==1,true)
END_SECTION
START_SECTION(([Param::ParamIterator] const Param::ParamEntry* operator->()))
Param::ParamNode node;
node.entries.push_back(Param::ParamEntry("name","value","description",{"advanced"}));
Param::ParamIterator it(node);
TEST_EQUAL(it->name,"name");
TEST_EQUAL(it->value,"value");
TEST_EQUAL(it->description,"description");
TEST_EQUAL(it->tags.count("advanced")==1,true);
END_SECTION
//complicated subtree
// Root
// |-A=1
// |-R
// | |-S
// | | |-B=2
// | | |-C=3
// | |
// | |-U (empty)
// |
// |-T
// |-D=4
Param::ParamNode root, r, s, t, u;
root.name="root";
r.name="r";
s.name="s";
t.name="t";
root.entries.push_back(Param::ParamEntry("A","1",""));
s.entries.push_back(Param::ParamEntry("B","2",""));
s.description="s_desc";
s.entries.push_back(Param::ParamEntry("C","3",""));
t.entries.push_back(Param::ParamEntry("D","4",""));
r.nodes.push_back(s);
u.description="empty";
r.nodes.push_back(u);
root.nodes.push_back(r);
root.nodes.push_back(t);
START_SECTION(([Param::ParamIterator] ParamIterator& operator++()))
Param::ParamNode node;
node.entries.push_back(Param::ParamEntry("name","value","description",{"advanced"}));
node.entries.push_back(Param::ParamEntry("name2","value2","description2"));
node.entries.push_back(Param::ParamEntry("name3","value3","description3",{"advanced"}));
//linear list
Param::ParamIterator it(node);
++it;
TEST_EQUAL(it->name,"name2");
TEST_EQUAL(it->value,"value2");
TEST_EQUAL(it->description,"description2");
TEST_EQUAL(it->tags.count("advanced")==1,false);
++it;
TEST_EQUAL(it->name,"name3");
TEST_EQUAL(it->value,"value3");
TEST_EQUAL(it->description,"description3");
TEST_EQUAL(it->tags.count("advanced")==1,true);
++it;
//subtree
node.name = "root";
node.nodes.push_back(node);
node.nodes[0].name = "tree";
node.nodes[0].entries[0].name = "name4";
node.nodes[0].entries[1].name = "name5";
node.nodes[0].entries[2].name = "name6";
it = Param::ParamIterator(node);
TEST_EQUAL(it->name,"name");
TEST_EQUAL(it->value,"value");
TEST_EQUAL(it->description,"description");
TEST_EQUAL(it->tags.count("advanced")==1,true);
++it;
TEST_EQUAL(it->name,"name2");
TEST_EQUAL(it->value,"value2");
TEST_EQUAL(it->description,"description2");
TEST_EQUAL(it->tags.count("advanced")==1,false);
++it;
TEST_EQUAL(it->name,"name3");
TEST_EQUAL(it->value,"value3");
TEST_EQUAL(it->description,"description3");
TEST_EQUAL(it->tags.count("advanced")==1,true);
++it;
TEST_EQUAL(it->name,"name4");
TEST_EQUAL(it->value,"value");
TEST_EQUAL(it->description,"description");
TEST_EQUAL(it->tags.count("advanced")==1,true);
++it;
TEST_EQUAL(it->name,"name5");
TEST_EQUAL(it->value,"value2");
TEST_EQUAL(it->description,"description2");
TEST_EQUAL(it->tags.count("advanced")==1,false);
++it;
TEST_EQUAL(it->name,"name6");
TEST_EQUAL(it->value,"value3");
TEST_EQUAL(it->description,"description3");
TEST_EQUAL(it->tags.count("advanced")==1,true);
++it;
//complicated subtree
Param::ParamIterator it2(root);
TEST_EQUAL(it2->name,"A");
TEST_EQUAL(it2->value,"1");
++it2;
TEST_EQUAL(it2->name,"B");
TEST_EQUAL(it2->value,"2");
++it2;
TEST_EQUAL(it2->name,"C");
TEST_EQUAL(it2->value,"3");
++it2;
TEST_EQUAL(it2->name,"D");
TEST_EQUAL(it2->value,"4");
++it2;
END_SECTION
START_SECTION(([Param::ParamIterator] ParamIterator operator++(int)))
Param::ParamNode node;
node.entries.push_back(Param::ParamEntry("name","value","description",{"advanced"}));
node.entries.push_back(Param::ParamEntry("name2","value2","description2"));
node.entries.push_back(Param::ParamEntry("name3","value3","description3",{"advanced"}));
//linear list
Param::ParamIterator it(node), it2(node);
it2 = it++;
TEST_EQUAL(it->name,"name2");
TEST_EQUAL(it->value,"value2");
TEST_EQUAL(it->description,"description2");
TEST_EQUAL(it->tags.count("advanced")==1,false);
TEST_EQUAL(it2->name,"name");
TEST_EQUAL(it2->value,"value");
TEST_EQUAL(it2->description,"description");
TEST_EQUAL(it2->tags.count("advanced")==1,true);
END_SECTION
START_SECTION(([Param::ParamIterator] std::string getName() const))
Param::ParamIterator it(root);
TEST_EQUAL(it.getName(),"A");
++it;
TEST_EQUAL(it.getName(),"r:s:B");
++it;
TEST_EQUAL(it.getName(),"r:s:C");
++it;
TEST_EQUAL(it.getName(),"t:D");
++it;
END_SECTION
START_SECTION(([Param::ParamIterator] bool operator==(const ParamIterator& rhs) const))
Param::ParamIterator begin(root), begin2(root), end;
TEST_EQUAL(begin==end, false)
TEST_TRUE(begin == begin)
TEST_TRUE(begin == begin2)
TEST_TRUE(end == end)
++begin;
TEST_EQUAL(begin==begin2, false)
TEST_EQUAL(begin==end, false)
TEST_EQUAL(begin2==end, false)
++begin2;
TEST_TRUE(begin == begin2)
TEST_EQUAL(begin==end, false)
TEST_EQUAL(begin2==end, false)
++begin;
TEST_EQUAL(begin==begin2, false)
TEST_EQUAL(begin==end, false)
TEST_EQUAL(begin2==end, false)
++begin2;
TEST_TRUE(begin == begin2)
TEST_EQUAL(begin==end, false)
TEST_EQUAL(begin2==end, false)
++begin;
TEST_EQUAL(begin==begin2, false)
TEST_EQUAL(begin==end, false)
TEST_EQUAL(begin2==end, false)
++begin2;
TEST_TRUE(begin == begin2)
TEST_EQUAL(begin==end, false)
TEST_EQUAL(begin2==end, false)
++begin;
TEST_EQUAL(begin==begin2, false)
TEST_TRUE(begin == end)
TEST_EQUAL(begin2==end, false)
++begin2;
TEST_TRUE(begin == begin2)
TEST_TRUE(begin == end)
TEST_TRUE(begin2 == end)
END_SECTION
START_SECTION(([Param::ParamIterator] bool operator!=(const ParamIterator& rhs) const))
Param::ParamIterator begin(root), begin2(root), end;
TEST_EQUAL(begin==end, false)
TEST_EQUAL(begin2==end, false)
TEST_TRUE(begin == begin2)
TEST_TRUE(begin == begin)
TEST_TRUE(begin2 == begin2)
TEST_TRUE(end == end)
END_SECTION
START_SECTION(([Param::ParamIterator] const std::vector< TraceInfo>& getTrace() const))
//Recap:
//complicated subtree
// Root
// |-A=1
// |-R
// | |-S
// | | |-B=2
// | | |-C=3
// | |
// | |-U (empty)
// |
// |-T
// |-D=4
//A
Param::ParamIterator it(root);
TEST_EQUAL(it.getTrace().size(),0);
++it;
//r:s:B
TEST_EQUAL(it.getTrace().size(),2);
TEST_EQUAL(it.getTrace()[0].name,"r");
TEST_EQUAL(it.getTrace()[0].opened,true);
TEST_EQUAL(it.getTrace()[1].name,"s");
TEST_EQUAL(it.getTrace()[1].opened,true);
TEST_EQUAL(it.getTrace()[1].description,"s_desc");
++it;
//r:s:C
TEST_EQUAL(it.getTrace().size(),0);
++it;
//t:D
TEST_EQUAL(it.getTrace().size(),3);
TEST_EQUAL(it.getTrace()[0].name,"s");
TEST_EQUAL(it.getTrace()[0].opened,false);
TEST_EQUAL(it.getTrace()[1].name,"r");
TEST_EQUAL(it.getTrace()[1].opened,false);
TEST_EQUAL(it.getTrace()[2].name,"t");
TEST_EQUAL(it.getTrace()[2].opened,true);
++it;
//end()
TEST_EQUAL(it.getTrace().size(),1);
TEST_EQUAL(it.getTrace()[0].name,"t");
TEST_EQUAL(it.getTrace()[0].opened,false);
END_SECTION
START_SECTION(([Param::ParamIterator] iterator type traits))
// Test that the iterator has the required type traits for C++ standard library compatibility
using iterator_type = Param::ParamIterator;
// Check iterator_category
TEST_EQUAL((std::is_same<typename iterator_type::iterator_category, std::forward_iterator_tag>::value), true);
// Check value_type
TEST_EQUAL((std::is_same<typename iterator_type::value_type, Param::ParamEntry>::value), true);
// Check difference_type
TEST_EQUAL((std::is_same<typename iterator_type::difference_type, std::ptrdiff_t>::value), true);
// Check pointer
TEST_EQUAL((std::is_same<typename iterator_type::pointer, const Param::ParamEntry*>::value), true);
// Check reference
TEST_EQUAL((std::is_same<typename iterator_type::reference, const Param::ParamEntry&>::value), true);
// Test that std::iterator_traits can extract our type definitions
using traits = std::iterator_traits<iterator_type>;
TEST_EQUAL((std::is_same<traits::iterator_category, std::forward_iterator_tag>::value), true);
TEST_EQUAL((std::is_same<traits::value_type, Param::ParamEntry>::value), true);
TEST_EQUAL((std::is_same<traits::difference_type, std::ptrdiff_t>::value), true);
TEST_EQUAL((std::is_same<traits::pointer, const Param::ParamEntry*>::value), true);
TEST_EQUAL((std::is_same<traits::reference, const Param::ParamEntry&>::value), true);
END_SECTION
///////////////////////// Param ///////////////////////////////
///////////////////////////////////////////////////////////////
Param* d10_ptr = nullptr;
Param* d10_nullPointer = nullptr;
START_SECTION((Param()))
d10_ptr = new Param();
TEST_NOT_EQUAL(d10_ptr, d10_nullPointer)
END_SECTION
START_SECTION((~Param()))
delete d10_ptr;
END_SECTION
START_SECTION((bool exists(const std::string& key) const))
Param p;
TEST_EQUAL(p.exists(""), false)
TEST_EQUAL(p.exists("key"), false)
TEST_EQUAL(p.exists("key:value"), false)
END_SECTION
START_SECTION((bool hasSection(const std::string& key) const))
Param p;
p.addSection("test", "");
p.addSection("test:test", "");
TEST_EQUAL(p.hasSection("test"), true)
TEST_EQUAL(p.hasSection("test:test"), true)
TEST_EQUAL(p.hasSection("test:"), true)
TEST_EQUAL(p.hasSection("test:test:"), true)
TEST_EQUAL(p.hasSection("sectionThatDoesNotExist"), false)
TEST_EQUAL(p.hasSection("AnotherSection:"), false)
TEST_EQUAL(p.hasSection("Section:WithinSection"), false)
TEST_EQUAL(p.hasSection("Section:WithinSection:"), false)
END_SECTION
START_SECTION((const DataValue& getValue(const std::string &key) const))
Param p;
TEST_EXCEPTION(Exception::ElementNotFound, p.getValue(""))
TEST_EXCEPTION(Exception::ElementNotFound, p.getValue("key"))
TEST_EXCEPTION(Exception::ElementNotFound, p.getValue("key:value"))
END_SECTION
START_SECTION((const std::string& getSectionDescription(const std::string& key) const))
Param p;
TEST_EQUAL(p.getSectionDescription(""),"")
TEST_EQUAL(p.getSectionDescription("key"),"")
TEST_EQUAL(p.getSectionDescription("key:value"),"")
END_SECTION
START_SECTION((const std::string& getDescription(const std::string &key) const))
Param p;
TEST_EXCEPTION(Exception::ElementNotFound, p.getDescription(""))
TEST_EXCEPTION(Exception::ElementNotFound, p.getDescription("key"))
TEST_EXCEPTION(Exception::ElementNotFound, p.getDescription("key:value"))
END_SECTION
START_SECTION((const ParamEntry& getEntry(const std::string &key) const))
Param p;
TEST_EXCEPTION(Exception::ElementNotFound, p.getEntry(""))
TEST_EXCEPTION(Exception::ElementNotFound, p.getEntry("key"))
TEST_EXCEPTION(Exception::ElementNotFound, p.getEntry("key:value"))
END_SECTION
START_SECTION((void setValue(const std::string &key, const DataValue& value, const std::string &description="", const std::stringList &tags=std::stringList())))
Param p;
p.setValue("key","value");
TEST_EQUAL(p.exists("key"), true)
TEST_EQUAL(p.getValue("key"), "value")
TEST_EQUAL(p.getDescription("key"), "")
TEST_EQUAL(p.hasTag("key","advanced"), false)
p.setValue("key","value","description",{"advanced"});
TEST_EQUAL(p.exists("key"), true)
TEST_EQUAL(p.getValue("key"), "value")
TEST_EQUAL(p.getDescription("key"), "description")
TEST_EQUAL(p.hasTag("key","advanced"), true)
p.setValue("key:key","value2","description2");
TEST_EQUAL(p.exists("key"), true)
TEST_EQUAL(p.getValue("key"), "value")
TEST_EQUAL(p.getDescription("key"), "description")
TEST_EQUAL(p.hasTag("key","advanced"), true)
TEST_EQUAL(p.exists("key:key"), true)
TEST_EQUAL(p.getValue("key:key"), "value2")
TEST_EQUAL(p.getDescription("key:key"), "description2")
TEST_EQUAL(p.hasTag("key:key","advanced"), false)
END_SECTION
START_SECTION((std::vector<std::string> getTags(const std::string& key) const))
Param p;
TEST_EXCEPTION(Exception::ElementNotFound, p.getTags("key"))
p.setValue("key","value");
TEST_EQUAL(p.getTags("key").size(),0)
END_SECTION
START_SECTION((void addTag(const std::string& key, const std::string& tag)))
Param p;
TEST_EXCEPTION(Exception::ElementNotFound, p.addTag("key","bla"))
std::vector<std::string> error_list;
error_list.push_back("a,b");
TEST_EXCEPTION(Exception::ElementNotFound, p.addTags("key",error_list))
p.setValue("key","value");
TEST_EQUAL(p.getTags("key").size(),0)
p.addTag("key","advanced");
TEST_EQUAL(p.getTags("key").size(),1)
p.addTag("key","advanced");
TEST_EQUAL(p.getTags("key").size(),1)
p.addTag("key","advanced2");
TEST_EQUAL(p.getTags("key").size(),2)
END_SECTION
START_SECTION((bool hasTag(const std::string& key, const std::string& tag) const))
Param p;
TEST_EXCEPTION(Exception::ElementNotFound, p.hasTag("key","bla"))
p.setValue("key","value");
TEST_EQUAL(p.hasTag("key","advanced"),false)
TEST_EQUAL(p.hasTag("key","advanced2"),false)
p.addTag("key","advanced");
TEST_EQUAL(p.hasTag("key","advanced"),true)
TEST_EQUAL(p.hasTag("key","advanced2"),false)
p.addTag("key","advanced2");
TEST_EQUAL(p.hasTag("key","advanced"),true)
TEST_EQUAL(p.hasTag("key","advanced2"),true)
END_SECTION
START_SECTION((void addTags(const std::string& key, const std::vector<std::string>& tags)))
Param p;
TEST_EXCEPTION(Exception::ElementNotFound, p.addTags("key",std::vector<std::string>()))
std::vector<std::string> error_list;
error_list.push_back("a,b");
TEST_EXCEPTION(Exception::ElementNotFound, p.addTags("key",error_list))
p.setValue("key","value");
TEST_EQUAL(p.hasTag("key","advanced"),false)
TEST_EQUAL(p.hasTag("key","advanced2"),false)
p.addTags("key",{"advanced","advanced2"});
TEST_EQUAL(p.hasTag("key","advanced"),true)
TEST_EQUAL(p.hasTag("key","advanced2"),true)
END_SECTION
START_SECTION((void clearTags(const std::string& key)))
Param p;
TEST_EXCEPTION(Exception::ElementNotFound, p.clearTags("key"))
p.setValue("key","value");
p.addTag("key","advanced");
TEST_EQUAL(p.getTags("key").size(),1)
p.clearTags("key");
TEST_EQUAL(p.getTags("key").size(),0)
END_SECTION
START_SECTION((bool empty() const))
Param p;
TEST_EQUAL(p.empty(), true)
p.setValue("key",17.4f);
TEST_EQUAL(p.empty(), false)
Param p2;
TEST_EQUAL(p2.empty(), true)
p2.setValue("a:key",17.4f);
TEST_EQUAL(p2.empty(), false)
END_SECTION
START_SECTION((void clear()))
Param p;
p.setValue("key",17.4,"keydesc");
p.clear();
TEST_EQUAL(p.empty(), true)
Param p2;
p2.setValue("a:b:key",17.4,"keydesc");
p2.clear();
TEST_EQUAL(p2.empty(), true)
END_SECTION
START_SECTION((Size size() const))
Param p;
TEST_EQUAL(p.size(), 0)
p.setValue("key",17.4f);
TEST_EQUAL(p.size(), 1)
p.setValue("key",17.4f);
TEST_EQUAL(p.size(), 1)
p.setValue("key:a",17.5f);
TEST_EQUAL(p.size(), 2)
p.setValue("key:a",18.5f);
TEST_EQUAL(p.size(), 2)
p.setValue("key:b",18.5f);
TEST_EQUAL(p.size(), 3)
p.setValue("b",18.5f);
TEST_EQUAL(p.size(), 4)
END_SECTION
START_SECTION((void setSectionDescription(const std::string &key, const std::string &description)))
Param p;
p.setValue("test:test",47.1);
p.setValue("test2:test",47.1);
p.setValue("test:test2:test",47.1);
p.setValue("test:test:test",47.1);
p.setSectionDescription("test","a");
p.setSectionDescription("test2","b");
p.setSectionDescription("test:test","c");
p.setSectionDescription("test:test2","d");
TEST_EQUAL(p.getSectionDescription("test"), "a")
TEST_EQUAL(p.getSectionDescription("test2"), "b")
TEST_EQUAL(p.getSectionDescription("test:test"), "c")
TEST_EQUAL(p.getSectionDescription("test:test2"), "d")
END_SECTION
START_SECTION([EXTRA](friend std::ostream& operator << (std::ostream& os, const Param& param)))
Param p;
p.setValue("key", 17.5);
stringstream ss;
ss << p;
TEST_EQUAL(ss.str(), "\"key\" -> \"17.5\"\n")
ss.str("");
p.setValue("key", 17.5, "thiskey");
ss<<p;
TEST_EQUAL(ss.str(), "\"key\" -> \"17.5\" (thiskey)\n")
ss.str("");
p.clear();
p.setValue("tree:key", 17.5);
ss<<p;
TEST_EQUAL(ss.str(), "\"tree|key\" -> \"17.5\"\n")
END_SECTION
START_SECTION((void insert(const std::string& prefix, const Param ¶m)))
Param p;
p.setValue("a",17,"intdesc");
p.setValue("n1:b",17.4f,"floatdesc");
p.setValue("n1:c","test,test,test","stringdesc");
p.setValue("n2:d",17.5f);
p.setSectionDescription("n1","sectiondesc");
Param p2;
p2.insert("prefix",p);
TEST_EQUAL(p2.size(),4)
TEST_EQUAL(Int(p2.getValue("prefixa")), 17)
TEST_STRING_EQUAL(p2.getDescription("prefixa"), "intdesc")
TEST_REAL_SIMILAR(float(p2.getValue("prefixn1:b")), 17.4)
TEST_STRING_EQUAL(p2.getDescription("prefixn1:b"), "floatdesc")
TEST_EQUAL(p2.getValue("prefixn1:c"), "test,test,test")
TEST_STRING_EQUAL(p2.getDescription("prefixn1:c"), "stringdesc")
TEST_REAL_SIMILAR(float(p2.getValue("prefixn2:d")), 17.5)
TEST_STRING_EQUAL(p2.getDescription("prefixn2:d"), "")
TEST_EQUAL(p2.getSectionDescription("prefixn1"),"sectiondesc")
p2.insert("",p);
TEST_EQUAL(p2.size(),8)
TEST_EQUAL(Int(p2.getValue("a")), 17)
TEST_STRING_EQUAL(p2.getDescription("a"), "intdesc")
TEST_REAL_SIMILAR(float(p2.getValue("n1:b")), 17.4)
TEST_STRING_EQUAL(p2.getDescription("n1:b"), "floatdesc")
TEST_EQUAL(p2.getValue("n1:c"), "test,test,test")
TEST_STRING_EQUAL(p2.getDescription("n1:c"), "stringdesc")
TEST_REAL_SIMILAR(float(p2.getValue("n2:d")), 17.5)
TEST_STRING_EQUAL(p2.getDescription("n2:d"), "")
TEST_EQUAL(p2.getSectionDescription("n1"),"sectiondesc")
p2.insert("n3:",p);
TEST_EQUAL(p2.size(),12)
TEST_EQUAL(Int(p2.getValue("n3:a")), 17)
TEST_STRING_EQUAL(p2.getDescription("n3:a"), "intdesc")
TEST_REAL_SIMILAR(float(p2.getValue("n3:n1:b")), 17.4)
TEST_STRING_EQUAL(p2.getDescription("n3:n1:b"), "floatdesc")
TEST_EQUAL(p2.getValue("n3:n1:c"), "test,test,test")
TEST_STRING_EQUAL(p2.getDescription("n3:n1:c"), "stringdesc")
TEST_REAL_SIMILAR(float(p2.getValue("n3:n2:d")), 17.5)
TEST_STRING_EQUAL(p2.getDescription("n3:n2:d"), "")
TEST_EQUAL(p2.getSectionDescription("n3:n1"),"sectiondesc")
p.clear();
p.setValue("a",18,"intdesc");
p.setValue("n1:b",17.7f,"floatdesc");
p.setValue("n1:c","test,test,test,test","stringdesc");
p.setValue("n2:d",17.8f);
p2.insert("",p);
TEST_EQUAL(p2.size(),12)
TEST_EQUAL(Int(p2.getValue("a")), 18)
TEST_REAL_SIMILAR(float(p2.getValue("n1:b")), 17.7)
TEST_EQUAL(p2.getValue("n1:c"), "test,test,test,test")
TEST_REAL_SIMILAR(float(p2.getValue("n2:d")), 17.8)
END_SECTION
Param p_src;
p_src.setValue("test:float",17.4f,"floatdesc");
p_src.setValue("test:string","test,test,test","stringdesc");
p_src.setValue("test:int",17,"intdesc");
p_src.setValue("test2:float",17.5f);
p_src.setValue("test2:string","test2");
p_src.setValue("test2:int",18);
p_src.setSectionDescription("test","sectiondesc");
p_src.addTags("test:float", {"a", "b", "c"});
START_SECTION((Param(const Param& rhs)))
Param p2(p_src);
TEST_REAL_SIMILAR(float(p2.getValue("test:float")), 17.4)
TEST_STRING_EQUAL(p_src.getDescription("test:float"), "floatdesc")
TEST_EQUAL(p2.getValue("test:string"), "test,test,test")
TEST_STRING_EQUAL(p_src.getDescription("test:string"), "stringdesc")
TEST_EQUAL(Int(p2.getValue("test:int")), 17)
TEST_STRING_EQUAL(p_src.getDescription("test:int"), "intdesc")
TEST_REAL_SIMILAR(float(p2.getValue("test2:float")), 17.5)
TEST_STRING_EQUAL(p2.getDescription("test2:float"), "")
TEST_EQUAL(p2.getValue("test2:string"), "test2")
TEST_STRING_EQUAL(p2.getDescription("test2:string"), "")
TEST_EQUAL(Int(p2.getValue("test2:int")), 18)
TEST_STRING_EQUAL(p2.getDescription("test2:int"), "")
TEST_EQUAL(p2.getSectionDescription("test"),"sectiondesc")
TEST_EQUAL(p2.getTags("test:float").size(), 3)
TEST_EQUAL(p2.getTags("test:float") == ListUtils::create<std::string>("a,b,c"), true)
END_SECTION
START_SECTION((Param& operator = (const Param& rhs)))
Param p2;
p2=p_src;
TEST_REAL_SIMILAR(float(p2.getValue("test:float")), 17.4)
TEST_STRING_EQUAL(p_src.getDescription("test:float"), "floatdesc")
TEST_EQUAL(p2.getValue("test:string"), "test,test,test")
TEST_STRING_EQUAL(p_src.getDescription("test:string"), "stringdesc")
TEST_EQUAL(Int(p2.getValue("test:int")), 17)
TEST_STRING_EQUAL(p2.getDescription("test:int"), "intdesc")
TEST_REAL_SIMILAR(float(p2.getValue("test2:float")), 17.5)
TEST_STRING_EQUAL(p2.getDescription("test2:float"), "")
TEST_EQUAL(p2.getValue("test2:string"), "test2")
TEST_STRING_EQUAL(p2.getDescription("test2:string"), "")
TEST_EQUAL(Int(p2.getValue("test2:int")), 18)
TEST_STRING_EQUAL(p2.getDescription("test2:int"), "")
TEST_EQUAL(p2.getSectionDescription("test"),"sectiondesc")
TEST_EQUAL(p2.getTags("test:float").size(), 3)
TEST_EQUAL(p2.getTags("test:float") == ListUtils::create<std::string>("a,b,c"), true)
END_SECTION
START_SECTION((Param copy(const std::string &prefix, bool remove_prefix=false) const))
Param p2;
p2 = p_src.copy("notthere:");
TEST_EQUAL((p2.empty()),true)
p2 = p_src.copy("test:");
TEST_REAL_SIMILAR(float(p2.getValue("test:float")), 17.4)
TEST_STRING_EQUAL(p2.getDescription("test:float"), "floatdesc")
TEST_EQUAL(p2.getValue("test:string"), "test,test,test")
TEST_STRING_EQUAL(p2.getDescription("test:int"), "intdesc")
TEST_EQUAL(Int(p2.getValue("test:int")), 17)
TEST_STRING_EQUAL(p2.getDescription("test:string"), "stringdesc")
TEST_EXCEPTION(Exception::ElementNotFound, p2.getValue("test2:float"))
p2 = p_src.copy("test:",true);
TEST_REAL_SIMILAR(float(p2.getValue("float")), 17.4)
TEST_STRING_EQUAL(p2.getDescription("float"), "floatdesc")
TEST_EQUAL(p2.getValue("string"), "test,test,test")
TEST_STRING_EQUAL(p2.getDescription("string"), "stringdesc")
p2 = p_src.copy("test");
TEST_REAL_SIMILAR(float(p2.getValue("test:float")), 17.4)
TEST_STRING_EQUAL(p2.getDescription("test:float"), "floatdesc")
TEST_EQUAL(p2.getValue("test:string"), "test,test,test")
TEST_STRING_EQUAL(p2.getDescription("test:string"), "stringdesc")
TEST_EQUAL(Int(p2.getValue("test:int")), 17)
TEST_STRING_EQUAL(p2.getDescription("test:int"), "intdesc")
TEST_REAL_SIMILAR(float(p2.getValue("test2:float")), 17.5)
TEST_STRING_EQUAL(p2.getDescription("test2:float"), "")
TEST_EQUAL(p2.getValue("test2:string"), "test2")
TEST_STRING_EQUAL(p2.getDescription("test2:string"), "")
TEST_EQUAL(Int(p2.getValue("test2:int")), 18)
TEST_STRING_EQUAL(p2.getDescription("test2:int"), "")
TEST_EQUAL(p2.getSectionDescription("test"),"sectiondesc")
END_SECTION
START_SECTION((void remove(const std::string& key)))
Param p2(p_src);
p2.setValue("test:string2","test,test");
TEST_EQUAL(p2.size(),7)
p2.remove("test");
TEST_EQUAL(p2.size(),7)
p2.remove("test2");
TEST_EQUAL(p2.size(),7)
p2.remove("test:strin");
TEST_EQUAL(p2.size(),7)
p2.remove("test:string");
TEST_EQUAL(p2.size(),6)
p2.remove("test:string2");
TEST_EQUAL(p2.size(),5)
p2.remove("test:float");
TEST_EQUAL(p2.size(),4)
p2.remove("test:int");
TEST_EQUAL(p2.size(),3)
// test deletion of nodes (when using a trailing ':')
p2 = p_src;
p2.setValue("test:string2","an entry");
p2.setValue("test:string2:e1","subnode with entries");
p2.setValue("test:string2:sn2","subsubnode with entries");
p2.setValue("test:string2:sn2:e1","subsubnode with entries");
p2.setValue("test:string2:sn2:e2","subsubnode with entries");
Param p3 = p2;
TEST_EQUAL(p2.size(),11)
std::cout << "p2 is " << p2 << "\n";
p2.remove("test:"); // test subtree removal
TEST_EQUAL(p2.size(),3)
p3.remove("test:string2:sn2:e2:"); // nothing should happen
TEST_EQUAL(p3.size(),11)
p3.remove("test:string2:sn2:e1"); // delete one, the parent node is still populated
TEST_EQUAL(p3.size(),10)
p3.remove("test:string2:sn2:e2"); // delete last entry in subnode sn2
TEST_EQUAL(p3.size(),9)
END_SECTION
START_SECTION((void removeAll(const std::string& prefix)))
Param p2(p_src);
p2.removeAll("test:float");
TEST_EXCEPTION(Exception::ElementNotFound, p2.getValue("test:float"))
TEST_EQUAL(p2.getValue("test:string"), "test,test,test")
TEST_EQUAL(Int(p2.getValue("test:int")), 17)
TEST_REAL_SIMILAR(float(p2.getValue("test2:float")), 17.5)
TEST_EQUAL(p2.getValue("test2:string"), "test2")
TEST_EQUAL(Int(p2.getValue("test2:int")), 18)
TEST_EQUAL(p2.getSectionDescription("test"),"sectiondesc")
p2.removeAll("test:");
TEST_EXCEPTION(Exception::ElementNotFound, p2.getValue("test:string"))
TEST_EXCEPTION(Exception::ElementNotFound, p2.getValue("test:int"))
TEST_REAL_SIMILAR(float(p2.getValue("test2:float")), 17.5)
TEST_EQUAL(p2.getValue("test2:string"), "test2")
TEST_EQUAL(Int(p2.getValue("test2:int")), 18)
p2.removeAll("test");
TEST_EQUAL(p2.empty(),true)
cout << p2;
END_SECTION
START_SECTION((bool operator == (const Param& rhs) const))
Param p2(p_src);
TEST_TRUE(p_src == p2)
p2.setValue("test:float",17.5f);
TEST_EQUAL(p_src==p2, false)
p2 = p_src;
p2.setValue("test:float3",17.4f);
TEST_EQUAL(p_src==p2, false)
p2 = p_src;
p2.removeAll("test:float");
TEST_EQUAL(p_src==p2, false)
//it should be independent of entry order
Param p3,p4;
p3.setValue("1",1);
p3.setValue("2",2);
p4.setValue("2",2);
p4.setValue("1",1);
TEST_TRUE(p3 == p4)
//it should be independent of node order
Param p5,p6;
p5.setValue("1:1",1);
p5.setValue("2:1",1);
p6.setValue("2:1",1);
p6.setValue("1:1",1);
TEST_TRUE(p5 == p6)
END_SECTION
START_SECTION((void setDefaults(const Param& defaults, const std::string& prefix="", bool showMessage=false)))
Param defaults;
defaults.setValue("float",1.0f,"float");
defaults.setValue("float2",2.0f,"float2");
defaults.setValue("string","default string1","string");
defaults.setValue("string2","default string2","string2");
defaults.setValue("PATH:onlyfordescription",45.2);
defaults.setValue("stringlist",std::vector<std::string>{"a","b","c"},"stringlist");
defaults.setValue("stringlist2",std::vector<std::string>{"d","e","f"},"stringlist2");
defaults.setValue("intlist",ListUtils::create<Int>("1,2,3"),"intlist");
defaults.setValue("intlist2",ListUtils::create<Int>("11,22,33"),"intlist2");
defaults.setValue("doublelist",ListUtils::create<double>("1.2,2.3"),"doublelist");
defaults.setValue("doublelist2",ListUtils::create<double>("11.22,22.33"),"doublelist2");
defaults.setSectionDescription("PATH","PATHdesc");
Param p2;
p2.setValue("PATH:float",-1.0f,"PATH:float");
p2.setValue("PATH:string","some string","PATH:string");
p2.setValue("float",-2.0f,"float");
p2.setValue("string","other string","string");
p2.setValue("PATH:stringlist",std::vector<std::string>{"d","a","v","i","d"},"PATH:stringlist");
p2.setValue("stringlist",std::vector<std::string>{"r","o","c","k","s"},"stringlist");
p2.setValue("PATH:intlist2",ListUtils::create<Int>("14,9"),"PATH:intlist2");
p2.setValue("intlist", ListUtils::create<Int>("16,9"),"intlist");
p2.setValue("PATH:doublelist2",ListUtils::create<double>("6.66,6.16"),"PATH:doublelist2");
p2.setValue("doublelist",ListUtils::create<double>("1.2,5.55"),"doublelist");
TEST_EQUAL(p2.size(),10);
p2.setDefaults(defaults);
TEST_EQUAL(p2.size(),16);
TEST_REAL_SIMILAR(float(p2.getValue("float")),-2.0);
TEST_STRING_EQUAL(p2.getDescription("float"),"float");
TEST_REAL_SIMILAR(float(p2.getValue("float2")),2.0);
TEST_STRING_EQUAL(p2.getDescription("float2"),"float2");
TEST_EQUAL(string(p2.getValue("string")),"other string");
TEST_STRING_EQUAL(p2.getDescription("string"),"string");
TEST_EQUAL(string(p2.getValue("string2")),"default string2");
TEST_STRING_EQUAL(p2.getDescription("string2"),"string2");
TEST_STRING_EQUAL(p2.getSectionDescription("PATH"),"PATHdesc");
TEST_EQUAL(p2.getValue("stringlist") == ListUtils::create<std::string>("r,o,c,k,s"), true)
TEST_EQUAL(p2.getValue("intlist") == ListUtils::create<Int>("16,9"), true)
TEST_EQUAL(p2.getValue("doublelist") == ListUtils::create<double>("1.2,5.55"), true)
TEST_EQUAL(p2.getValue("stringlist2") == ListUtils::create<std::string>("d,e,f"), true)
TEST_EQUAL(p2.getValue("intlist2") == ListUtils::create<Int>("11,22,33"), true)
TEST_EQUAL(p2.getValue("doublelist2") == ListUtils::create<double>("11.22,22.33"), true)
p2.setDefaults(defaults,"PATH");
TEST_EQUAL(p2.size(),22);
TEST_REAL_SIMILAR(float(p2.getValue("PATH:float")),-1.0);
TEST_STRING_EQUAL(p2.getDescription("PATH:float"),"PATH:float");
TEST_REAL_SIMILAR(float(p2.getValue("PATH:float2")),2.0);
TEST_STRING_EQUAL(p2.getDescription("PATH:float2"),"float2");
TEST_EQUAL(string(p2.getValue("PATH:string")),"some string");
TEST_STRING_EQUAL(p2.getDescription("PATH:string"),"PATH:string");
TEST_EQUAL(string(p2.getValue("PATH:string2")),"default string2");
TEST_STRING_EQUAL(p2.getDescription("PATH:string2"),"string2");
TEST_STRING_EQUAL(p2.getSectionDescription("PATH"),"PATHdesc");
TEST_STRING_EQUAL(p2.getSectionDescription("PATH:PATH"),"PATHdesc");
TEST_EQUAL(p2.getValue("PATH:stringlist") == ListUtils::create<std::string>("d,a,v,i,d"), true)
TEST_EQUAL(p2.getValue("PATH:intlist") == ListUtils::create<Int>("1,2,3"), true)
TEST_EQUAL(p2.getValue("PATH:doublelist") == ListUtils::create<double>("1.2,2.3"), true)
END_SECTION
const char* a1 ="executable";
const char* a2 ="-a";
const char* a3 ="av";
const char* a4 ="-b";
const char* a5 ="bv";
const char* a6 ="-c";
const char* a7 ="cv";
const char* a8 ="rv1";
const char* a9 ="rv2";
const char* a10="-1.0";
const char* command_line[9]; // "executable -a av -b bv -c cv rv1 rv2"
command_line[0] = a1;
command_line[1] = a2;
command_line[2] = a3;
command_line[3] = a4;
command_line[4] = a5;
command_line[5] = a6;
command_line[6] = a7;
command_line[7] = a8;
command_line[8] = a9;
const char* command_line2[6]; // "executable -a av -b -c cv"
command_line2[0] = a1;
command_line2[1] = a2;
command_line2[2] = a3;
command_line2[3] = a4;
command_line2[4] = a6;
command_line2[5] = a7;
const char* command_line3[6]; // "executable -a -b -c cv rv1"
command_line3[0] = a1;
command_line3[1] = a2;
command_line3[2] = a4;
command_line3[3] = a6;
command_line3[4] = a7;
command_line3[5] = a8;
const char* command_line4[10]; // "executable -a -1.0 -b bv -c cv rv1 rv2 -1.0"
command_line4[0] = a1;
command_line4[1] = a2;
command_line4[2] = a10;
command_line4[3] = a4;
command_line4[4] = a5;
command_line4[5] = a6;
command_line4[6] = a7;
command_line4[7] = a8;
command_line4[8] = a9;
command_line4[9] = a10;
START_SECTION((void parseCommandLine(const int argc, const char **argv, const std::string& prefix="")))
Param p2,p3;
p2.parseCommandLine(9,command_line,"test4");
p3.setValue("test4:-a","av");
p3.setValue("test4:-b","bv");
p3.setValue("test4:-c","cv");
p3.setValue("test4:misc",std::vector<std::string>{"rv1","rv2"});
TEST_EQUAL(p2==p3,true)
Param p20,p30;
p20.parseCommandLine(6,command_line2);
p30.setValue("-a","av");
p30.setValue("-b","");
p30.setValue("-c","cv");
TEST_EQUAL(p20==p30,true)
Param p200,p300;
p200.parseCommandLine(10,command_line4,"test4");
p300.setValue("test4:-a","-1.0");
p300.setValue("test4:-b","bv");
p300.setValue("test4:-c","cv");
p300.setValue("test4:misc",std::vector<std::string>{"rv1","rv2","-1.0"});
TEST_EQUAL(p200==p300,true)
END_SECTION
const char* m1 ="mult";
const char* m2 ="-d";
const char* m3 ="1.333";
const char* m4 ="2.23";
const char* m5 ="3";
const char* m6 ="-e";
const char* m7 ="4";
const char* m8 ="-f";
const char* m9 ="-g";
const char* command_line_mult[9]; // "mult -d 1.333 2.23 3 -e 4 -f -g"
command_line_mult[0] = m1;
command_line_mult[1] = m2;
command_line_mult[2] = m3;
command_line_mult[3] = m4;
command_line_mult[4] = m5;
command_line_mult[5] = m6;
command_line_mult[6] = m7;
command_line_mult[7] = m8;
command_line_mult[8] = m9;
START_SECTION((void parseCommandLine(const int argc, const char **argv, const std::map< std::string, std::string > &options_with_one_argument, const std::map< std::string, std::string > &options_without_argument, const std::map< std::string, std::string > &options_with_multiple_argument, const std::string &misc="misc", const std::string &unknown="unknown")))
std::map<std::string,std::string> with_one,without,with_multiple;
with_one["-a"]="a";
with_one["-b"]="b";
with_one["-c"]="c";
with_multiple["-d"] = "d";
with_multiple["-e"] = "e";
with_multiple["-f"] = "f";
with_multiple["-g"] = "g";
Param p2,p3;
p2.parseCommandLine(10,command_line4,with_one,without,with_multiple,"misc_","unknown_");
p3.setValue("a","-1.0");
p3.setValue("b","bv");
p3.setValue("c","cv");
p3.setValue("misc_",std::vector<std::string>{"rv1","rv2","-1.0"});
TEST_EQUAL(p2==p3,true)
Param p4,p5;
p4.parseCommandLine(9,command_line,with_one,without,with_multiple,"misc_","unknown_");
p5.setValue("a","av");
p5.setValue("b","bv");
p5.setValue("c","cv");
p5.setValue("misc_",std::vector<std::string>{"rv1","rv2"});
TEST_EQUAL(p4==p5,true)
with_one.clear();
with_one["-a"]="a";
without["-b"]="b";
Param p40,p50;
p40.parseCommandLine(9,command_line,with_one,without,with_multiple,"misc__","unknown__");
p50.setValue("a","av");
p50.setValue("b","true");
p50.setValue("misc__",std::vector<std::string>{"bv","cv","rv1","rv2"});
p50.setValue("unknown__",std::vector<std::string>{"-c"});
TEST_EQUAL(p40==p50,true)
TEST_EQUAL(p40,p50)
//"executable -a av -b -c cv"
Param p400,p500;
p400.parseCommandLine(6,command_line2,with_one,without,with_multiple,"misc__","unknown__");
p500.setValue("a","av");
p500.setValue("b","true");
p500.setValue("misc__",std::vector<std::string>{"cv"});
p500.setValue("unknown__",std::vector<std::string>{"-c"});
TEST_EQUAL(p400==p500,true)
//"executable -a -b -c cv rv1"
Param p4000,p5000;
p4000.parseCommandLine(6,command_line3,with_one,without,with_multiple,"misc__","unknown__");
p5000.setValue("a","");
p5000.setValue("b","true");
p5000.setValue("misc__",std::vector<std::string>{"cv","rv1"});
p5000.setValue("unknown__",std::vector<std::string>{"-c"});
TEST_EQUAL(p4000==p5000,true)
// list options:
Param p6,p7;
p6.parseCommandLine(9,command_line_mult,with_one,without,with_multiple,"misc__","unkown__");
p7.setValue("d",std::vector<std::string>{"1.333","2.23","3"});
p7.setValue("e",std::vector<std::string>{"4"});
p7.setValue("f",std::vector<std::string>());
p7.setValue("g",std::vector<std::string>());
TEST_EQUAL(p6,p7);
Param p8,p9;
p9.parseCommandLine(4,command_line_mult,with_one,without,with_multiple,"misc__","unkown__");
p8.setValue("d", std::vector<std::string>{"1.333","2.23"});
TEST_EQUAL(p9,p8);
END_SECTION
START_SECTION((void update(const Param& old_version, const bool add_unknown, Logger::LogStream& stream)))
Param common;
common.setValue("float",1.0f,"float");
common.setValue("float2",2.0f,"float2");
common.setValue("string","default string1","string");
common.setValue("string2","default string2","string2");
common.setValue("PATH:onlyfordescription",45.2);
common.setValue("stringlist",std::vector<std::string>{"a","b","c"},"stringlist");
common.setValue("stringlist2",std::vector<std::string>{"d","e","f"},"stringlist2");
common.setValue("intlist",ListUtils::create<Int>("1,2,3"),"intlist");
// copy and alter
Param old = common;
//old.setValue("recently_removed_float",1.1f,"float"); // should not make it into new param
old.setValue("old_type","a string","string");
old.setValue("some:version","1.2","old version");
old.setValue("some:1:type","unlabeled","type");
old.setValue("some:type","unlabeled","type");
old.setValue("stringlist2",std::vector<std::string>{"d","e","f","altered"},"stringlist2"); // change some values, we expect them to show up after update()
old.setValue("intlist",ListUtils::create<Int>("3"),"intlist");
Param defaults = common;
defaults.setValue("old_type",3,"old_type has evolved from string to int"); // as type has changed, this value should be kept
defaults.setValue("some:version","1.9","new version"); // this value should be kept (due to its reserved name)
defaults.setValue("some:1:type","information","type"); // this value should be kept (due to its reserved name at depth 2)
defaults.setValue("some:type","information","type"); // this value should NOT be kept (wrong depth)
defaults.setValue("new_value",3,"new param not present in old");
Param expected = defaults;
expected.setValue("stringlist2",std::vector<std::string>{"d","e","f","altered"},"stringlist2"); // change some values, we expect them to show up after update()
expected.setValue("intlist",ListUtils::create<Int>("3"),"intlist");
expected.setValue("some:type","unlabeled","type");
// update()
defaults.update(old);
TEST_EQUAL(defaults,expected);
END_SECTION
START_SECTION((void merge(const Param& toMerge)))
{
Param original;
original.setValue("a", 2.0f, "a value");
original.setMinFloat("a", 0.0f);
original.setValue("b", "value", "b value");
Param toMerge;
toMerge.setValue("b", "value", "a value");
toMerge.setValue("section:a", "a-value", "section:a");
toMerge.setSectionDescription("section", "section description");
toMerge.setValue("section:b", "b-value", "section:b");
Param expected;
expected.setValue("a", 2.0f, "a value");
expected.setMinFloat("a", 0.0f);
expected.setValue("b", "value", "b value");
expected.setValue("section:a", "a-value", "section:a");
expected.setValue("section:b", "b-value", "section:b");
expected.setSectionDescription("section", "section description");
original.merge(toMerge);
TEST_EQUAL(original, expected)
TEST_EQUAL(original.getSectionDescription("section"),expected.getSectionDescription("section"))
Param p1;
p1.setValue("in", "in-value", "in-description");
p1.setValue("out", "out-value", "out-description");
p1.setValue("reference:index", "reference:index value", "reference:index description");
p1.setSectionDescription("reference", "reference description");
p1.setValue("algorithm:sub_param", "algorithm:sub_param value", "algorithm:sub_param description");
Param p2;
p2.setValue("reference:index", "reference:index value", "reference:index description");
p2.setSectionDescription("reference", "reference description");
p2.setValue("algorithm:sub_param", "algorithm:sub_param value", "algorithm:sub_param description");
p2.setValue("algorithm:superimposer:mz_pair_max_distance", "algorithm:superimposer:mz_pair_max_distance value", "algorithm:superimposer:mz_pair_max_distance description");
p2.setSectionDescription("algorithm", "algorithm description");
p2.setSectionDescription("algorithm:superimposer", "algorithm:superimposer description");
Param expected_2;
expected_2.setValue("in", "in-value", "in-description");
expected_2.setValue("out", "out-value", "out-description");
expected_2.setValue("algorithm:sub_param", "algorithm:sub_param value", "algorithm:sub_param description");
expected_2.setValue("reference:index", "reference:index value", "reference:index description");
expected_2.setSectionDescription("reference", "reference description");
expected_2.setValue("algorithm:superimposer:mz_pair_max_distance", "algorithm:superimposer:mz_pair_max_distance value", "algorithm:superimposer:mz_pair_max_distance description");
expected_2.setSectionDescription("algorithm", "algorithm description");
expected_2.setSectionDescription("algorithm:superimposer", "algorithm:superimposer description");
p1.merge(p2);
TEST_EQUAL(p1, expected_2)
TEST_EQUAL(p1.getSectionDescription("algorithm"),expected_2.getSectionDescription("algorithm"))
TEST_EQUAL(p1.getSectionDescription("algorithm:superimposer"),expected_2.getSectionDescription("algorithm:superimposer"))
TEST_EQUAL(p1.getSectionDescription("reference"),expected_2.getSectionDescription("reference"))
}
END_SECTION
START_SECTION((ParamIterator findFirst(const std::string &leaf) const ))
{
Param p;
p.setValue("a:b:leaf", "leaf_val1", "leaf 1");
p.setValue("b:a:leaf", "leaf_val2", "leaf 2");
p.setValue("a:c:leaf", "leaf_val3", "leaf 3");
p.setValue("a:c:another-leaf", "leaf_val4", "leaf 3");
Param::ParamIterator pI = p.findFirst("leaf");
TEST_EQUAL(pI.getName(), "a:b:leaf")
p.remove("a:b:leaf");
pI = p.findFirst("leaf");
TEST_EQUAL(pI.getName(), "a:c:leaf")
p.remove("a:c:leaf");
pI = p.findFirst("leaf");
TEST_EQUAL(pI.getName(), "b:a:leaf")
p.remove("b:a:leaf");
pI = p.findFirst("leaf");
TEST_EQUAL(pI == p.end(), true)
}
END_SECTION
START_SECTION((ParamIterator findNext(const std::string &leaf, const ParamIterator &start_leaf) const))
{
Param p;
p.setValue("a:b:leaf", "leaf_val1", "leaf 1");
p.setValue("b:a:leaf", "leaf_val2", "leaf 2");
p.setValue("a:c:leaf", "leaf_val3", "leaf 3");
p.setValue("a:c:another-leaf", "leaf_val4", "leaf 3");
Param::ParamIterator pI = p.findFirst("leaf");
TEST_EQUAL(pI.getName(), "a:b:leaf")
pI = p.findNext("leaf", pI);
TEST_EQUAL(pI.getName(), "a:c:leaf")
pI = p.findNext("leaf", pI);
TEST_EQUAL(pI.getName(), "b:a:leaf")
pI = p.findNext("leaf", pI);
TEST_EQUAL(pI == p.end(), true)
}
END_SECTION
START_SECTION((ParamIterator begin() const))
NOT_TESTABLE;
END_SECTION
START_SECTION((ParamIterator end() const))
Param p;
p.setValue("a",5);
p.setValue("b:a",6);
p.setValue("b:b",7);
p.setValue("c",8);
Param::ParamIterator it = p.begin();
TEST_EQUAL(it->name, "a")
TEST_EQUAL(it.getName(), "a")
TEST_EQUAL((UInt)it->value, 5)
++it;
TEST_EQUAL(it->name, "c")
TEST_EQUAL(it.getName(), "c")
TEST_EQUAL((UInt)it->value, 8)
++it;
TEST_EQUAL(it->name, "a")
TEST_EQUAL(it.getName(), "b:a")
TEST_EQUAL((UInt)it->value, 6)
++it;
TEST_EQUAL(it->name, "b")
TEST_EQUAL(it.getName(), "b:b")
TEST_EQUAL((UInt)it->value, 7)
++it;
TEST_EQUAL(it==p.end(),true)
END_SECTION
START_SECTION((void setValidStrings(const std::string &key, const std::vector< std::string > &strings)))
vector<std::string> strings;
strings.push_back("bla");
Param d;
d.setValue("ok","string");
d.setValue("dummy",5);
d.setValidStrings("ok",strings);
TEST_EQUAL(d.getEntry("ok").valid_strings==strings, true);
TEST_EXCEPTION(Exception::ElementNotFound, d.setValidStrings("dummy",strings))
strings.push_back("sdf,sdfd");
TEST_EXCEPTION(Exception::InvalidParameter, d.setValidStrings("ok",strings))
END_SECTION
START_SECTION((void setMinInt(const std::string &key, Int min)))
Param d;
d.setValue("ok",4);
d.setValue("dummy",5.5);
d.setMinInt("ok",4);
TEST_EQUAL(d.getEntry("ok").min_int,4);
TEST_EXCEPTION(Exception::ElementNotFound, d.setMinInt("dummy",4))
END_SECTION
START_SECTION((void setMaxInt(const std::string &key, Int max)))
Param d;
d.setValue("ok",4);
d.setValue("dummy",5.5);
d.setMaxInt("ok",4);
TEST_EQUAL(d.getEntry("ok").max_int,4);
TEST_EXCEPTION(Exception::ElementNotFound, d.setMaxInt("dummy",4))
END_SECTION
START_SECTION((void setMinFloat(const std::string &key, double min)))
Param d;
d.setValue("ok",4.5);
d.setValue("dummy",4);
d.setMinFloat("ok",4.0);
TEST_REAL_SIMILAR(d.getEntry("ok").min_float,4.0);
TEST_EXCEPTION(Exception::ElementNotFound, d.setMinFloat("dummy",4.5))
END_SECTION
START_SECTION((void setMaxFloat(const std::string &key, double max)))
Param d;
d.setValue("ok",4.5);
d.setValue("dummy",4);
d.setMaxFloat("ok",4.0);
TEST_REAL_SIMILAR(d.getEntry("ok").max_float,4.0);
TEST_EXCEPTION(Exception::ElementNotFound, d.setMaxFloat("dummy",4.5))
END_SECTION
// warnings for unknown parameters
// keep outside the scope of a single test to avoid destruction, leaving
// the log stream in an undefined state
ostringstream os;
// checkDefaults sends its warnings to OPENMS_LOG_WARN so we register our own
// listener here to check the output.
// Configure the thread-local warn stream
getThreadLocalLogWarn().remove(cout);
getThreadLocalLogWarn().insert(os);
START_SECTION((void checkDefaults(const std::string &name, const Param &defaults, const std::string& prefix="") const))
Param p,d;
p.setValue("string",std::string("bla"),"string");
p.setValue("int",5,"int");
p.setValue("double",47.11,"double");
p.checkDefaults("Test",d,"");
TEST_EQUAL(os.str().empty(),false)
d.setValue("int",5,"int");
d.setValue("double",47.11,"double");
os.str("");
os.clear();
p.checkDefaults("Test",d,"");
TEST_EQUAL(os.str().empty(),false)
p.clear();
p.setValue("pref:string",std::string("bla"),"pref:string");
p.setValue("pref:int",5,"pref:int");
p.setValue("pref:double",47.11,"pref:double");
os.str("");
os.clear();
p.checkDefaults("Test",d,"pref");
TEST_EQUAL(os.str().empty(),false)
os.str("");
os.clear();
p.checkDefaults("Test2",d,"pref:");
TEST_EQUAL(os.str().empty(),false)
//check string restrictions
vector<std::string> s_rest = {"a","b","c"};
d.setValue("stringv","bla","desc");
d.setValidStrings("stringv", s_rest);
p.clear();
p.setValue("stringv","a");
p.checkDefaults("Param_test",d,"");
p.setValue("stringv","d");
TEST_EXCEPTION(Exception::InvalidParameter,p.checkDefaults("Param_test",d,""))
//check int restrictions
d.setValue("intv",4,"desc");
d.setMinInt("intv",-4);
p.clear();
p.setValue("intv",-4);
p.checkDefaults("Param_test",d,"");
p.setValue("intv",700);
p.checkDefaults("Param_test",d,"");
p.setValue("intv",-5);
TEST_EXCEPTION(Exception::InvalidParameter,p.checkDefaults("Param_test",d,""))
d.setValue("intv2",4,"desc");
d.setMaxInt("intv2",4);
p.clear();
p.setValue("intv2",4);
p.checkDefaults("Param_test",d,"");
p.setValue("intv2",-700);
p.checkDefaults("Param_test",d,"");
p.setValue("intv2",5);
TEST_EXCEPTION(Exception::InvalidParameter,p.checkDefaults("Param_test",d,""))
//check double restrictions
d.setValue("doublev",4.0,"desc");
d.setMinFloat("doublev",-4.0);
p.clear();
p.setValue("doublev",-4.0);
p.checkDefaults("Param_test",d,"");
p.setValue("doublev",0.0);
p.checkDefaults("Param_test",d,"");
p.setValue("doublev",7.0);
p.checkDefaults("Param_test",d,"");
p.setValue("doublev",-4.1);
TEST_EXCEPTION(Exception::InvalidParameter,p.checkDefaults("Param_test",d,""))
d.setValue("doublev2",4.0,"desc");
d.setMaxFloat("doublev2",4.0);
p.clear();
p.setValue("doublev2",4.0);
p.checkDefaults("Param_test",d,"");
p.setValue("doublev2",-700.0);
p.checkDefaults("Param_test",d,"");
p.setValue("doublev2",4.1);
TEST_EXCEPTION(Exception::InvalidParameter,p.checkDefaults("Param_test",d,""))
//check list restrictions
vector<std::string> s_rest1 = {"a","b","c"};
d.setValue("stringlist",std::vector<std::string>{"aaa","abc","cab"},"desc");
d.setValidStrings("stringlist", s_rest);
p.clear();
p.setValue("stringlist",std::vector<std::string>{"a","c"});
p.checkDefaults("Param_test",d,"");
p.setValue("stringlist",std::vector<std::string>{"aa","dd","cc"});
TEST_EXCEPTION(Exception::InvalidParameter,p.checkDefaults("Param_test",d,""))
//wrong type
p.clear();
p.setValue("doublev",4);
TEST_EXCEPTION(Exception::InvalidParameter,p.checkDefaults("Param_test",d,""))
p.clear();
p.setValue("intv","bla");
TEST_EXCEPTION(Exception::InvalidParameter,p.checkDefaults("Param_test",d,""))
p.clear();
p.setValue("stringv",4.5);
TEST_EXCEPTION(Exception::InvalidParameter,p.checkDefaults("Param_test",d,""))
END_SECTION
START_SECTION((void update(const Param& old_version, const bool add_unknown = false)))
{
NOT_TESTABLE // see full implementation below
}
END_SECTION
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
END_TEST
| C++ |
3D | OpenMS/OpenMS | src/tests/class_tests/openms/source/SpecArrayFile_test.cpp | .cpp | 2,389 | 84 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/ClassTest.h>
#include <OpenMS/test_config.h>
///////////////////////////
#include <OpenMS/FORMAT/SpecArrayFile.h>
///////////////////////////
#include <OpenMS/KERNEL/FeatureMap.h>
#include <OpenMS/KERNEL/MSSpectrum.h>
using namespace OpenMS;
using namespace std;
START_TEST(SpecArrayFile, "$Id$")
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
SpecArrayFile* ptr = nullptr;
SpecArrayFile* null_ptr = nullptr;
START_SECTION(SpecArrayFile())
{
ptr = new SpecArrayFile();
TEST_NOT_EQUAL(ptr, null_ptr)
}
END_SECTION
START_SECTION(virtual ~SpecArrayFile())
{
delete ptr;
}
END_SECTION
START_SECTION((template < typename FeatureMapType > void load(const String &filename, FeatureMapType &feature_map)))
{
SpecArrayFile f;
FeatureMap fm;
f.load(OPENMS_GET_TEST_DATA_PATH("SpecArrayFile_test_1.peplist"), fm);
TEST_EQUAL(fm.size(),2)
ABORT_IF(fm.size()!=2)
TEST_EQUAL(fm[0].getRT(), 60.1*60)
TEST_REAL_SIMILAR(fm[0].getMZ(), 500.1)
TEST_EQUAL(fm[0].getIntensity(), 4343534)
TEST_EQUAL(fm[0].getCharge(), 5)
TEST_EQUAL(double(fm[0].getMetaValue("s/n")), 3.2)
TEST_EQUAL(fm[1].getRT(), 40.1*60)
TEST_REAL_SIMILAR(fm[1].getMZ(), 700.1 )
TEST_EQUAL(fm[1].getIntensity(), 222432)
TEST_EQUAL(fm[1].getCharge(), 3)
TEST_EQUAL(double(fm[1].getMetaValue("s/n")), 2.2)
TEST_EXCEPTION(Exception::ParseError, f.load(OPENMS_GET_TEST_DATA_PATH("SpecArrayFile_test_2.peplist"), fm));
TEST_EXCEPTION(Exception::FileNotFound, f.load(OPENMS_GET_TEST_DATA_PATH("SpecArrayFile_test_2_doesnotexist.peplist"), fm));
}
END_SECTION
START_SECTION((template < typename SpectrumType > void store(const String &filename, const SpectrumType &spectrum) const ))
{
SpecArrayFile f;
MSSpectrum spec;
TEST_EXCEPTION(Exception::NotImplemented, f.store("bla", spec))
}
END_SECTION
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
END_TEST
| C++ |
3D | OpenMS/OpenMS | src/tests/class_tests/openms/source/FlagSet_test.cpp | .cpp | 8,154 | 341 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Chris Bielow $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/ClassTest.h>
#include <OpenMS/test_config.h>
///////////////////////////
#include <OpenMS/DATASTRUCTURES/FlagSet.h>
#include <iostream>
//////////////////////////
using namespace OpenMS;
enum Enum_test
{
e_red,
e_green,
e_blue
};
enum class Enum_broken
{
eb_red = -1, // too small
eb_green,
eb_blue = 64 // too large
};
using FST = FlagSet<Enum_test>;
using FSB = FlagSet<Enum_broken>;
std::ostream& operator<<(std::ostream& str, FST s)
{
return str << s.value();
}
START_TEST(FlagSet, "$Id$")
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////
//start Section
/////////////////////////////////////////////////////////////////
FlagSet<Enum_test>* ptr = nullptr;
FlagSet<Enum_test>* nulpt = nullptr;
START_SECTION(FlagSet())
{
ptr = new FlagSet<Enum_test>();
TEST_NOT_EQUAL(ptr, nulpt)
}
END_SECTION
START_SECTION(~FlagSet())
delete ptr;
END_SECTION
START_SECTION(explicit FlagSet(const ENUM& en))
{
TEST_NOT_EQUAL(FST(e_red), FST())
TEST_EQUAL(FST(e_red).value(), 1)
TEST_PRECONDITION_VIOLATED(FSB{Enum_broken::eb_red}) // negative value
TEST_PRECONDITION_VIOLATED(FSB{Enum_broken::eb_blue}) // too large for uint64
}
END_SECTION
START_SECTION(FlagSet(const FlagSet& stat))
TEST_EQUAL(FST(FST(e_green)).value(), 2)
END_SECTION
START_SECTION(FlagSet& operator=(const FlagSet & stat))
FST gg(e_green);
FST target = gg;
TEST_EQUAL(target, gg)
END_SECTION
START_SECTION(~FlagSet())
NOT_TESTABLE
END_SECTION
/// Equality
START_SECTION(bool operator==(const FlagSet & stat) const)
{
FST gg(e_green);
FST target = gg;
TEST_TRUE(target == gg)
TEST_NOT_EQUAL(gg, FST())
}
END_SECTION
/// bitwise AND
START_SECTION(FlagSet operator&(const ENUM & en) const)
{
FST gg(e_green);
FST empty = gg & e_red;
FST just_green = gg & e_green;
TEST_EQUAL(empty, FST())
TEST_EQUAL(just_green, gg)
}
END_SECTION
/// bitwise AND
START_SECTION(FlagSet operator&(const FlagSet & rhs) const)
{
FST gg(e_green);
FST rr(e_red);
FST empty = gg & rr;
FST just_green = gg & gg;
TEST_EQUAL(empty, FST())
TEST_EQUAL(just_green, gg)
}
END_SECTION
START_SECTION(FlagSet& operator&=(const ENUM & en))
{
FST gg(e_green);
FST empty = gg;
empty &= e_red;
FST just_green = gg;
just_green &= e_green;
TEST_EQUAL(empty, FST())
TEST_EQUAL(just_green, gg)
}
END_SECTION
START_SECTION(FlagSet& operator&=(const FlagSet & rhs))
{
FST gg(e_green);
FST rr(e_red);
FST empty = gg;
empty &= rr;
FST just_green = gg;
just_green &= gg;
TEST_EQUAL(empty, FST())
TEST_EQUAL(just_green, gg)
}
END_SECTION
START_SECTION(FlagSet operator|(const ENUM & en) const)
{
FST gg(e_green);
FST green_or_red = gg | e_red;
FST green_or_green = gg | e_green;
TEST_EQUAL(green_or_red.value(), 3)
TEST_EQUAL(green_or_green, gg)
}
END_SECTION
START_SECTION(FlagSet operator|(const FlagSet & rhs) const)
{
FST gg;
FST empty_or_red = gg | e_red;
FST red_or_green = empty_or_red | e_green;
TEST_EQUAL(empty_or_red, FST(e_red))
TEST_EQUAL(red_or_green.value(), 3)
}
END_SECTION
START_SECTION(FlagSet& operator|=(const ENUM & en))
{
FST gg(e_green);
FST green_or_red = gg;
green_or_red |= e_red;
FST green_or_green = gg;
green_or_green |= e_green;
TEST_EQUAL(green_or_red.value(), 3)
TEST_EQUAL(green_or_green, gg)
}
END_SECTION
START_SECTION(FlagSet& operator|=(const FlagSet & rhs))
{
FST gg;
FST empty_or_red = gg;
empty_or_red |= e_red;
FST red_or_green = empty_or_red;
red_or_green |= e_green;
TEST_EQUAL(empty_or_red, FST(e_red))
TEST_EQUAL(red_or_green.value(), 3)
}
END_SECTION
START_SECTION(FlagSet operator+(const ENUM & en) const)
{
FST gg(e_green);
FST green_or_red = gg + e_red;
FST green_or_green = gg + e_green;
TEST_EQUAL(green_or_red.value(), 3)
TEST_EQUAL(green_or_green, gg)
}
END_SECTION
START_SECTION(FlagSet operator+(const FlagSet & en) const)
{
FST gg;
FST empty_or_red = gg + e_red;
FST red_or_green = empty_or_red + e_green;
TEST_EQUAL(empty_or_red, FST(e_red))
TEST_EQUAL(red_or_green.value(), 3)
}
END_SECTION
START_SECTION(FlagSet& operator+=(const ENUM & rhs))
{
FST gg(e_green);
FST green_or_red = gg;
green_or_red += e_red;
FST green_or_green = gg;
green_or_green += e_green;
TEST_EQUAL(green_or_red.value(), 3)
TEST_EQUAL(green_or_green, gg)
}
END_SECTION
START_SECTION(FlagSet& operator+=(const FlagSet & rhs))
{
FST gg;
FST empty_or_red = gg;
empty_or_red += e_red;
FST red_or_green = empty_or_red;
red_or_green += e_green;
TEST_EQUAL(empty_or_red, FST(e_red))
TEST_EQUAL(red_or_green.value(), 3)
}
END_SECTION
START_SECTION(FlagSet operator-(const FlagSet & rhs))
{
FST gg;
FST empty = gg - FST(e_red);
TEST_EQUAL(FST(), empty)
FST red_or_green = (FST(e_red) + e_green);
FST red_or_green_no_blue = red_or_green - FST(e_blue);
TEST_EQUAL(red_or_green, red_or_green_no_blue)
FST red_only = red_or_green - e_green;
TEST_EQUAL(red_only, FST(e_red))
}
END_SECTION
START_SECTION(FlagSet& operator-=(const FlagSet & rhs))
{
FST gg;
FST empty = gg;
empty -= FST(e_red);
TEST_EQUAL(FST(), empty)
FST red_or_green = (FST(e_red) + e_green);
FST red_or_green_no_blue = red_or_green;
red_or_green_no_blue -= FST(e_blue);
TEST_EQUAL(red_or_green, red_or_green_no_blue)
FST red_only = red_or_green;
red_only -= FST(e_green);
TEST_EQUAL(red_only, FST(e_red))
}
END_SECTION
START_SECTION(FlagSet operator-(const ENUM & rhs))
{
FST gg;
FST empty = gg - e_red;
TEST_EQUAL(FST(), empty)
FST red_or_green = (FST(e_red) + e_green);
FST red_or_green_no_blue = red_or_green - e_blue;
TEST_EQUAL(red_or_green, red_or_green_no_blue)
FST red_only = red_or_green - e_green;
TEST_EQUAL(red_only, FST(e_red))
}
END_SECTION
START_SECTION(FlagSet& operator-=(const ENUM & rhs))
{
FST gg;
FST empty = gg;
empty -= e_red;
TEST_EQUAL(FST(), empty)
FST red_or_green = (FST(e_red) + e_green);
FST red_or_green_no_blue = red_or_green;
red_or_green_no_blue -= e_blue;
TEST_EQUAL(red_or_green, red_or_green_no_blue)
FST red_only = red_or_green;
red_only -= e_green;
TEST_EQUAL(red_only, FST(e_red))
}
END_SECTION
START_SECTION(bool isSuperSetOf(const FlagSet & required) const)
{
FST gg;
FST empty = gg - e_red;
TEST_EQUAL(gg.isSuperSetOf(empty), true)
TEST_EQUAL(empty.isSuperSetOf(gg), true)
FST red_or_green = (FST(e_red) + e_green);
FST red_or_green_or_blue = red_or_green + e_blue;
TEST_EQUAL(red_or_green_or_blue.isSuperSetOf(red_or_green), true)
TEST_EQUAL(red_or_green_or_blue.isSuperSetOf(red_or_green_or_blue), true)
TEST_EQUAL(red_or_green_or_blue.isSuperSetOf(empty), true)
TEST_EQUAL(red_or_green.isSuperSetOf(red_or_green_or_blue), false)
TEST_EQUAL(empty.isSuperSetOf(red_or_green_or_blue), false)
TEST_EQUAL(FST(e_red).isSuperSetOf(red_or_green_or_blue), false)
}
END_SECTION
START_SECTION(bool isSuperSetOf(const ENUM & required) const)
{
FST empty;
TEST_EQUAL(empty.isSuperSetOf(e_red), false)
FST red_or_green = (FST(e_red) + e_green);
FST red_or_green_or_blue = red_or_green + e_blue;
TEST_EQUAL(red_or_green_or_blue.isSuperSetOf(e_red), true)
TEST_EQUAL(red_or_green_or_blue.isSuperSetOf(e_blue), true)
TEST_EQUAL(red_or_green_or_blue.isSuperSetOf(e_green), true)
}
END_SECTION
START_SECTION(bool empty() const)
FST empty;
TEST_EQUAL(empty.empty(), true)
FST red(e_red);
TEST_EQUAL(red.empty(), false)
FST red_or_green = (FST(e_red) + e_green);
TEST_EQUAL(red_or_green.empty(), false)
END_SECTION
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
END_TEST
| C++ |
3D | OpenMS/OpenMS | src/tests/class_tests/openms/source/TransitionTSVFile_test.cpp | .cpp | 12,391 | 362 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/ClassTest.h>
#include <OpenMS/test_config.h>
#include <OpenMS/FORMAT/TraMLFile.h>
#include <OpenMS/FORMAT/FileTypes.h>
#include <OpenMS/ANALYSIS/OPENSWATH/DATAACCESS/DataAccessHelper.h>
#include <boost/assign/std/vector.hpp>
///////////////////////////
#include <OpenMS/ANALYSIS/OPENSWATH/TransitionTSVFile.h>
#include <OpenMS/ANALYSIS/OPENSWATH/TransitionPQPFile.h>
///////////////////////////
#include <fstream>
#include <sstream>
#include <algorithm>
using namespace OpenMS;
using namespace std;
// Helper function to read TSV file into a vector of lines (sorted for comparison)
std::vector<std::string> readAndSortTSV(const std::string& filename)
{
std::vector<std::string> lines;
std::ifstream file(filename);
std::string line;
// Skip header
std::getline(file, line);
// Read all data lines
while (std::getline(file, line))
{
if (!line.empty())
{
lines.push_back(line);
}
}
// Sort for order-independent comparison
std::sort(lines.begin(), lines.end());
return lines;
}
// Helper function to parse a TSV line into fields
std::vector<std::string> parseTSVLine(const std::string& line)
{
std::vector<std::string> fields;
std::stringstream ss(line);
std::string field;
while (std::getline(ss, field, '\t'))
{
fields.push_back(field);
}
return fields;
}
// Helper to normalize NA/empty for comparison
std::string normalizeField(const std::string& field)
{
if (field.empty() || field == "NA")
{
return "NA";
}
return field;
}
START_TEST(TransitionTSVFile, "$Id$")
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
TransitionTSVFile* ptr = nullptr;
TransitionTSVFile* nullPointer = nullptr;
START_SECTION(TransitionTSVFile())
{
ptr = new TransitionTSVFile();
TEST_NOT_EQUAL(ptr, nullPointer)
}
END_SECTION
START_SECTION(~TransitionTSVFile())
{
delete ptr;
}
END_SECTION
START_SECTION( void convertTargetedExperimentToTSV(const char * filename, OpenMS::TargetedExperiment & targeted_exp))
{
// Test roundtrip: TraML -> TargetedExperiment -> TSV
std::string input_file = OPENMS_GET_TEST_DATA_PATH("MRMAssay_detectingTransistionCompound_input.TraML");
std::string output_file;
NEW_TMP_FILE(output_file);
TraMLFile traml;
TargetedExperiment targeted_exp;
traml.load(input_file, targeted_exp);
TransitionTSVFile tsv_writer;
tsv_writer.convertTargetedExperimentToTSV(output_file.c_str(), targeted_exp);
// Verify output file exists and has content
std::ifstream ifs(output_file);
TEST_EQUAL(ifs.good(), true)
std::string line;
int line_count = 0;
while (std::getline(ifs, line)) { line_count++; }
TEST_EQUAL(line_count > 1, true) // Header + at least one data line
}
END_SECTION
START_SECTION( void convertTSVToTargetedExperiment(const char * filename, OpenMS::TargetedExperiment & targeted_exp))
{
// Load from TraML and write to TSV, then read TSV back
std::string input_file = OPENMS_GET_TEST_DATA_PATH("MRMAssay_detectingTransistionCompound_input.TraML");
std::string tsv_file;
NEW_TMP_FILE(tsv_file);
TraMLFile traml;
TargetedExperiment targeted_exp_in;
traml.load(input_file, targeted_exp_in);
TransitionTSVFile tsv_handler;
tsv_handler.convertTargetedExperimentToTSV(tsv_file.c_str(), targeted_exp_in);
// Now read the TSV back
TargetedExperiment targeted_exp_out;
tsv_handler.convertTSVToTargetedExperiment(tsv_file.c_str(), FileTypes::TSV, targeted_exp_out);
// Verify we loaded some transitions
TEST_EQUAL(targeted_exp_out.getTransitions().size() > 0, true)
// Note: Peptides may not be loaded if the TraML only contains compounds
TEST_EQUAL(targeted_exp_out.getCompounds().size() > 0 || targeted_exp_out.getPeptides().size() > 0, true)
}
END_SECTION
START_SECTION( void validateTargetedExperiment(OpenMS::TargetedExperiment & targeted_exp))
{
NOT_TESTABLE
}
END_SECTION
START_SECTION([EXTRA] Light path TSV roundtrip)
{
// Test roundtrip: TraML -> Heavy -> TSV -> Light -> TSV
std::string input_file = OPENMS_GET_TEST_DATA_PATH("MRMAssay_detectingTransistionCompound_input.TraML");
std::string tsv_file, output_file;
NEW_TMP_FILE(tsv_file);
NEW_TMP_FILE(output_file);
// Load TraML to heavy, convert to TSV
TraMLFile traml;
TargetedExperiment targeted_exp;
traml.load(input_file, targeted_exp);
TransitionTSVFile tsv_handler;
tsv_handler.convertTargetedExperimentToTSV(tsv_file.c_str(), targeted_exp);
// Read TSV into LightTargetedExperiment
OpenSwath::LightTargetedExperiment light_exp;
tsv_handler.convertTSVToTargetedExperiment(tsv_file.c_str(), FileTypes::TSV, light_exp);
// Verify we loaded some data
TEST_EQUAL(light_exp.transitions.size() > 0, true)
TEST_EQUAL(light_exp.compounds.size() > 0, true)
// Write back to TSV
tsv_handler.convertLightTargetedExperimentToTSV(output_file.c_str(), light_exp);
// Verify output file exists and has content
std::ifstream ifs(output_file);
TEST_EQUAL(ifs.good(), true)
std::string line;
int line_count = 0;
while (std::getline(ifs, line)) { line_count++; }
TEST_EQUAL(line_count > 1, true)
}
END_SECTION
START_SECTION([EXTRA] Light vs Heavy path equivalence via PQP roundtrip)
{
// Test that light and heavy paths produce equivalent output when both go through PQP
// This tests: TraML -> Heavy -> PQP -> Heavy -> TSV vs TraML -> Light -> PQP -> Light -> TSV
std::string input_file = OPENMS_GET_TEST_DATA_PATH("MRMAssay_detectingTransistionCompound_input.TraML");
std::string heavy_pqp, light_pqp, heavy_tsv, light_tsv;
NEW_TMP_FILE(heavy_pqp);
NEW_TMP_FILE(light_pqp);
NEW_TMP_FILE(heavy_tsv);
NEW_TMP_FILE(light_tsv);
TransitionTSVFile tsv_file;
TransitionPQPFile pqp_file;
TraMLFile traml;
// Load TraML
TargetedExperiment targeted_exp;
traml.load(input_file, targeted_exp);
// Heavy path: TargetedExperiment -> PQP -> TargetedExperiment -> TSV
{
TargetedExperiment heavy_exp_out;
pqp_file.convertTargetedExperimentToPQP(heavy_pqp.c_str(), targeted_exp);
pqp_file.convertPQPToTargetedExperiment(heavy_pqp.c_str(), heavy_exp_out);
tsv_file.convertTargetedExperimentToTSV(heavy_tsv.c_str(), heavy_exp_out);
}
// Light path: TargetedExperiment -> LightTargetedExperiment -> PQP -> LightTargetedExperiment -> TSV
{
OpenSwath::LightTargetedExperiment light_exp_in, light_exp_out;
OpenSwathDataAccessHelper::convertTargetedExp(targeted_exp, light_exp_in);
pqp_file.convertLightTargetedExperimentToPQP(light_pqp.c_str(), light_exp_in);
pqp_file.convertPQPToTargetedExperiment(light_pqp.c_str(), light_exp_out);
tsv_file.convertLightTargetedExperimentToTSV(light_tsv.c_str(), light_exp_out);
}
// Compare outputs (sorted for order-independent comparison)
std::vector<std::string> heavy_lines = readAndSortTSV(heavy_tsv);
std::vector<std::string> light_lines = readAndSortTSV(light_tsv);
// Should have same number of transitions
TEST_EQUAL(heavy_lines.size(), light_lines.size())
// Compare field by field for each line
// Fields that should match after PQP roundtrip (indices based on TSV header):
// 0: PrecursorMz, 1: ProductMz, 2: PrecursorCharge, 3: ProductCharge
// 4: LibraryIntensity, 5: NormalizedRetentionTime, 6: PeptideSequence
// 7: ModifiedPeptideSequence, 8: PeptideGroupLabel
// 22: TransitionGroupId, 23: TransitionId
// 24: Decoy, 25: DetectingTransition, 26: IdentifyingTransition, 27: QuantifyingTransition
//
// Note: Fields like FragmentType (17) may differ between empty and "NA"
// due to different handling - we normalize these for comparison
std::vector<size_t> numeric_fields = {0, 1, 4, 5};
std::vector<size_t> string_fields = {2, 3, 6, 7, 8, 22, 23, 24, 25, 26, 27};
for (size_t i = 0; i < std::min(heavy_lines.size(), light_lines.size()); ++i)
{
std::vector<std::string> heavy_fields = parseTSVLine(heavy_lines[i]);
std::vector<std::string> light_fields = parseTSVLine(light_lines[i]);
// Compare numeric fields
for (size_t field_idx : numeric_fields)
{
if (field_idx < heavy_fields.size() && field_idx < light_fields.size())
{
double heavy_val = std::stod(heavy_fields[field_idx]);
double light_val = std::stod(light_fields[field_idx]);
TEST_REAL_SIMILAR(heavy_val, light_val)
}
}
// Compare string fields (normalize empty/"NA" values)
for (size_t field_idx : string_fields)
{
if (field_idx < heavy_fields.size() && field_idx < light_fields.size())
{
std::string heavy_norm = normalizeField(heavy_fields[field_idx]);
std::string light_norm = normalizeField(light_fields[field_idx]);
TEST_STRING_EQUAL(heavy_norm, light_norm)
}
}
}
}
END_SECTION
START_SECTION([EXTRA] Light path preserves transition flags)
{
// Test that detecting/quantifying/identifying flags are preserved in light path
std::string input_file = OPENMS_GET_TEST_DATA_PATH("MRMAssay_detectingTransistionCompound_input.TraML");
std::string pqp_file_path;
NEW_TMP_FILE(pqp_file_path);
TraMLFile traml;
TransitionPQPFile pqp_file;
TargetedExperiment targeted_exp;
OpenSwath::LightTargetedExperiment light_exp_in, light_exp_out;
// Load TraML and convert to light
traml.load(input_file, targeted_exp);
OpenSwathDataAccessHelper::convertTargetedExp(targeted_exp, light_exp_in);
// Store original flag values
std::vector<bool> original_detecting, original_quantifying, original_identifying, original_decoy;
for (const auto& tr : light_exp_in.transitions)
{
original_detecting.push_back(tr.isDetectingTransition());
original_quantifying.push_back(tr.isQuantifyingTransition());
original_identifying.push_back(tr.isIdentifyingTransition());
original_decoy.push_back(tr.getDecoy());
}
// Write to PQP and read back
pqp_file.convertLightTargetedExperimentToPQP(pqp_file_path.c_str(), light_exp_in);
pqp_file.convertPQPToTargetedExperiment(pqp_file_path.c_str(), light_exp_out);
// Verify flags are preserved
TEST_EQUAL(light_exp_out.transitions.size(), original_detecting.size())
for (size_t i = 0; i < light_exp_out.transitions.size(); ++i)
{
TEST_EQUAL(light_exp_out.transitions[i].isDetectingTransition(), original_detecting[i])
TEST_EQUAL(light_exp_out.transitions[i].isQuantifyingTransition(), original_quantifying[i])
TEST_EQUAL(light_exp_out.transitions[i].isIdentifyingTransition(), original_identifying[i])
TEST_EQUAL(light_exp_out.transitions[i].getDecoy(), original_decoy[i])
}
}
END_SECTION
START_SECTION([EXTRA] Light path preserves fragment annotation)
{
// Test that fragment type/number/charge are preserved in light path
std::string input_file = OPENMS_GET_TEST_DATA_PATH("MRMAssay_detectingTransistionCompound_input.TraML");
std::string pqp_file_path;
NEW_TMP_FILE(pqp_file_path);
TraMLFile traml;
TransitionPQPFile pqp_file;
TargetedExperiment targeted_exp;
OpenSwath::LightTargetedExperiment light_exp_in, light_exp_out;
// Load TraML and convert to light
traml.load(input_file, targeted_exp);
OpenSwathDataAccessHelper::convertTargetedExp(targeted_exp, light_exp_in);
// Store original fragment info
std::vector<std::string> original_annotations;
for (const auto& tr : light_exp_in.transitions)
{
original_annotations.push_back(tr.getAnnotation());
}
// Write to PQP and read back
pqp_file.convertLightTargetedExperimentToPQP(pqp_file_path.c_str(), light_exp_in);
pqp_file.convertPQPToTargetedExperiment(pqp_file_path.c_str(), light_exp_out);
// Verify annotations are preserved
TEST_EQUAL(light_exp_out.transitions.size(), original_annotations.size())
for (size_t i = 0; i < light_exp_out.transitions.size(); ++i)
{
TEST_STRING_EQUAL(light_exp_out.transitions[i].getAnnotation(), original_annotations[i])
}
}
END_SECTION
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
END_TEST
| C++ |
3D | OpenMS/OpenMS | src/tests/class_tests/openms/source/IDBoostGraph_test.cpp | .cpp | 9,656 | 206 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Julianus Pfeuffer $
// $Authors: Julianus Pfeuffer $
// --------------------------------------------------------------------------
#include <OpenMS/CONCEPT/ClassTest.h>
#include <OpenMS/ANALYSIS/ID/IDBoostGraph.h>
#include <OpenMS/PROCESSING/ID/IDFilter.h>
#include <OpenMS/FORMAT/IdXMLFile.h>
#include <OpenMS/test_config.h>
using namespace OpenMS;
using namespace std;
using Internal::IDBoostGraph;
static void runIBGResolve(vector<ProteinIdentification>& inferred_protein_ids,
PeptideIdentificationList& inferred_peptide_ids)
{
IDBoostGraph ibg(inferred_protein_ids[0], inferred_peptide_ids, 1, false, false);
ibg.computeConnectedComponents();
ibg.clusterIndistProteinsAndPeptides(); //TODO check in resolve or do it there if not done yet!
//Note: the above does not add singleton groups to graph
ibg.resolveGraphPeptideCentric(true);
inferred_protein_ids[0].getIndistinguishableProteins().clear();
inferred_protein_ids[0].getProteinGroups().clear();
ibg.annotateIndistProteins(true); // this does not really add singletons since they are not in the graph
IDFilter::removeUnreferencedProteins(inferred_protein_ids[0], inferred_peptide_ids);
IDFilter::updateProteinGroups(inferred_protein_ids[0].getIndistinguishableProteins(),inferred_protein_ids[0].getHits());
inferred_protein_ids[0].fillIndistinguishableGroupsWithSingletons();
auto & ipg = inferred_protein_ids[0].getIndistinguishableProteins();
std::sort(std::begin(ipg), std::end(ipg));
}
START_TEST(IDBoostGraph, "$Id$")
START_SECTION(IDBoostGraph only best PSMs)
{
vector<ProteinIdentification> prots;
PeptideIdentificationList peps;
IdXMLFile idf;
idf.load(OPENMS_GET_TEST_DATA_PATH("newMergerTest_out.idXML"),prots,peps);
IDBoostGraph idb{prots[0], peps, 1, false, false};
TEST_EQUAL(idb.getNrConnectedComponents(), 0)
// 6 proteins (1 unmatched and omitted since we build the graph psm-centric) plus 4 peptides (top per psm).
TEST_EQUAL(boost::num_vertices(idb.getComponent(0)), 9)
idb.computeConnectedComponents();
TEST_EQUAL(idb.getNrConnectedComponents(), 3)
TEST_EQUAL(boost::num_vertices(idb.getComponent(0)), 3)
TEST_EQUAL(boost::num_vertices(idb.getComponent(1)), 4)
TEST_EQUAL(boost::num_vertices(idb.getComponent(2)), 2)
TEST_EXCEPTION(Exception::MissingInformation, idb.clusterIndistProteinsAndPeptidesAndExtendGraph());
idb.clusterIndistProteinsAndPeptides();
TEST_EQUAL(idb.getNrConnectedComponents(), 3)
// Only cc 0 and 1 have indist prot group
TEST_EQUAL(boost::num_vertices(idb.getComponent(0)), 4)
TEST_EQUAL(boost::num_vertices(idb.getComponent(1)), 5)
TEST_EQUAL(boost::num_vertices(idb.getComponent(2)), 2)
}
END_SECTION
/* TODO test graph-based resolution
START_SECTION(IDBoostGraph graph-based group resolution)
{
vector<ProteinIdentification> prots;
PeptideIdentificationList peps;
IdXMLFile idf;
idf.load(OPENMS_GET_TEST_DATA_PATH("newMergerTest_out.idXML"),prots,peps);
IDBoostGraph idb{prots[0], peps, 1, false};
TEST_EQUAL(idb.getNrConnectedComponents(), 0)
TEST_EQUAL(boost::num_vertices(idb.getComponent(0)), 9)
idb.computeConnectedComponents();
TEST_EQUAL(idb.getNrConnectedComponents(), 3)
// The next lines do not sum up to 9 because protein PH2 is unmatched
// If you want to avoid that, filter unmatched proteins first!
TEST_EQUAL(boost::num_vertices(idb.getComponent(0)), 3)
TEST_EQUAL(boost::num_vertices(idb.getComponent(1)), 4)
TEST_EQUAL(boost::num_vertices(idb.getComponent(2)), 2)
TEST_EXCEPTION(Exception::MissingInformation, idb.clusterIndistProteinsAndPeptidesAndExtendGraph());
idb.clusterIndistProteinsAndPeptides();
TEST_EQUAL(idb.getNrConnectedComponents(), 3)
// Only cc 0 and 1 have indist prot group
TEST_EQUAL(boost::num_vertices(idb.getComponent(0)), 4)
TEST_EQUAL(boost::num_vertices(idb.getComponent(1)), 5)
TEST_EQUAL(boost::num_vertices(idb.getComponent(2)), 2)
}
END_SECTION
*/
START_SECTION(IDBoostGraph all PSMs)
{
vector<ProteinIdentification> prots;
PeptideIdentificationList peps;
IdXMLFile idf;
idf.load(OPENMS_GET_TEST_DATA_PATH("newMergerTest_out.idXML"),prots,peps);
IDBoostGraph idb{prots[0], peps, 0, false, false};
TEST_EQUAL(idb.getNrConnectedComponents(), 0)
TEST_EQUAL(boost::num_vertices(idb.getComponent(0)), 14)
idb.computeConnectedComponents();
// Now it is 5 ccs because there is an unmatched peptide and a new PSM that only matches to
// previously uncovered protein PH2.
TEST_EQUAL(idb.getNrConnectedComponents(), 5)
TEST_EQUAL(boost::num_vertices(idb.getComponent(0)), 4)
TEST_EQUAL(boost::num_vertices(idb.getComponent(1)), 2)
TEST_EQUAL(boost::num_vertices(idb.getComponent(2)), 5)
TEST_EQUAL(boost::num_vertices(idb.getComponent(3)), 1)
TEST_EQUAL(boost::num_vertices(idb.getComponent(4)), 2)
}
END_SECTION
START_SECTION(IDBoostGraph only best PSMs with runinfo)
{
vector<ProteinIdentification> prots;
PeptideIdentificationList peps;
IdXMLFile idf;
idf.load(OPENMS_GET_TEST_DATA_PATH("IDBoostGraph_test_in.idXML"),prots,peps);
IDBoostGraph idb{prots[0], peps, 1, true, false};
TEST_EQUAL(idb.getNrConnectedComponents(), 0)
TEST_EQUAL(boost::num_vertices(idb.getComponent(0)), 8)
idb.computeConnectedComponents();
TEST_EQUAL(idb.getNrConnectedComponents(), 2)
TEST_EQUAL(boost::num_vertices(idb.getComponent(0)), 6)
TEST_EQUAL(boost::num_vertices(idb.getComponent(1)), 2)
idb.clusterIndistProteinsAndPeptidesAndExtendGraph();
TEST_EQUAL(idb.getNrConnectedComponents(), 2)
// Only cc 0 and 1 have indist prot group
//TODO we could reduce the number of nodes by removing ones without evidence
TEST_EQUAL(boost::num_vertices(idb.getComponent(0)), 25)
TEST_EQUAL(boost::num_vertices(idb.getComponent(1)), 11)
}
END_SECTION
START_SECTION(IDBoostGraph graph-based group resolution)
{
vector<ProteinIdentification> prots;
PeptideIdentificationList peps;
IdXMLFile idf;
idf.load(OPENMS_GET_TEST_DATA_PATH("newMergerTest_out.idXML"),prots,peps);
IDBoostGraph idb{prots[0], peps, 1, false, false};
TEST_EQUAL(idb.getNrConnectedComponents(), 0)
TEST_EQUAL(boost::num_vertices(idb.getComponent(0)), 9)
idb.computeConnectedComponents();
TEST_EQUAL(idb.getNrConnectedComponents(), 3)
TEST_EQUAL(boost::num_vertices(idb.getComponent(0)), 3)
TEST_EQUAL(boost::num_vertices(idb.getComponent(1)), 4)
TEST_EQUAL(boost::num_vertices(idb.getComponent(2)), 2)
TEST_EXCEPTION(Exception::MissingInformation, idb.clusterIndistProteinsAndPeptidesAndExtendGraph());
idb.clusterIndistProteinsAndPeptides();
// Only cc 0 and 1 have indist prot group
TEST_EQUAL(boost::num_edges(idb.getComponent(0)), 3)
TEST_EQUAL(boost::num_edges(idb.getComponent(1)), 4)
TEST_EQUAL(boost::num_edges(idb.getComponent(2)), 1)
idb.resolveGraphPeptideCentric();
TEST_EQUAL(idb.getNrConnectedComponents(), 3)
// Only cc 0 and 1 have indist prot group
TEST_EQUAL(boost::num_edges(idb.getComponent(0)), 3)
// There is one shared peptide in the second component whose edge will be resolved
TEST_EQUAL(boost::num_edges(idb.getComponent(1)), 3)
TEST_EQUAL(boost::num_edges(idb.getComponent(2)), 1)
TEST_EQUAL(boost::num_vertices(idb.getComponent(0)), 4)
TEST_EQUAL(boost::num_vertices(idb.getComponent(1)), 5)
TEST_EQUAL(boost::num_vertices(idb.getComponent(2)), 2)
}
END_SECTION
START_SECTION(Resolution)
{
// TODO problem is that there is no way to build the graph using existing groups.
// therefore resolution on the graph will redo groups and assign new scores.
// Therefore we need slightly different test files.
vector<ProteinIdentification> prots;
PeptideIdentificationList peps;
IdXMLFile idf;
idf.load(OPENMS_GET_TEST_DATA_PATH("PeptideProteinResolution_in.idXML"), prots, peps);
runIBGResolve(prots, peps);
std::string tmp_filename;
NEW_TMP_FILE(tmp_filename);
IdXMLFile().store(tmp_filename, prots, peps);
TEST_FILE_SIMILAR(OPENMS_GET_TEST_DATA_PATH("PeptideProteinResolution_out_ibg.idXML"), tmp_filename);
prots.clear();
peps.clear();
tmp_filename.clear();
NEW_TMP_FILE(tmp_filename);
idf.load(OPENMS_GET_TEST_DATA_PATH("PeptideProteinResolution_in2.idXML"), prots, peps);
runIBGResolve(prots, peps);
IdXMLFile().store(tmp_filename, prots, peps);
TEST_FILE_SIMILAR(OPENMS_GET_TEST_DATA_PATH("PeptideProteinResolution_out2_ibg.idXML"), tmp_filename);
}
END_SECTION
START_SECTION(IDBoostGraph on consensusXML TODO)
{
}
END_SECTION
END_TEST
| C++ |
3D | OpenMS/OpenMS | src/tests/class_tests/openms/data/header_file.h | .h | 384 | 15 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Timo Sachsenberg $
// $Authors: $
// --------------------------------------------------------------------------
#pragma once
class irrelevant_class
{
};
| Unknown |
3D | OpenMS/OpenMS | src/tests/coding/cpplint.py | .py | 262,075 | 6,902 | #!/usr/bin/env python
#
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Does google-lint on c++ files.
The goal of this script is to identify places in the code that *may*
be in non-compliance with google style. It does not attempt to fix
up these problems -- the point is to educate. It does also not
attempt to find all problems, or to ensure that everything it does
find is legitimately a problem.
In particular, we can get very confused by /* and // inside strings!
We do a small hack, which is to ignore //'s with "'s after them on the
same line, but it is far from perfect (in either direction).
"""
import codecs
import copy
import getopt
import glob
import itertools
import math # for log
import os
import re
import sre_compile
import string
import sys
import sysconfig
import unicodedata
import xml.etree.ElementTree
# if empty, use defaults
_valid_extensions = set([])
__VERSION__ = '1.5.4'
try:
xrange # Python 2
except NameError:
# -- pylint: disable=redefined-builtin
xrange = range # Python 3
_USAGE = """
Syntax: cpplint.py [--verbose=#] [--output=emacs|eclipse|vs7|junit|sed|gsed]
[--filter=-x,+y,...]
[--counting=total|toplevel|detailed] [--root=subdir]
[--repository=path]
[--linelength=digits] [--headers=x,y,...]
[--recursive]
[--exclude=path]
[--extensions=hpp,cpp,...]
[--includeorder=default|standardcfirst]
[--quiet]
[--version]
<file> [file] ...
Style checker for C/C++ source files.
This is a fork of the Google style checker with minor extensions.
The style guidelines this tries to follow are those in
https://google.github.io/styleguide/cppguide.html
Every problem is given a confidence score from 1-5, with 5 meaning we are
certain of the problem, and 1 meaning it could be a legitimate construct.
This will miss some errors, and is not a substitute for a code review.
To suppress false-positive errors of a certain category, add a
'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
suppresses errors of all categories on that line.
The files passed in will be linted; at least one file must be provided.
Default linted extensions are %s.
Other file types will be ignored.
Change the extensions with the --extensions flag.
Flags:
output=emacs|eclipse|vs7|junit|sed|gsed
By default, the output is formatted to ease emacs parsing. Visual Studio
compatible output (vs7) may also be used. Further support exists for
eclipse (eclipse), and JUnit (junit). XML parsers such as those used
in Jenkins and Bamboo may also be used.
The sed format outputs sed commands that should fix some of the errors.
Note that this requires gnu sed. If that is installed as gsed on your
system (common e.g. on macOS with homebrew) you can use the gsed output
format. Sed commands are written to stdout, not stderr, so you should be
able to pipe output straight to a shell to run the fixes.
verbose=#
Specify a number 0-5 to restrict errors to certain verbosity levels.
Errors with lower verbosity levels have lower confidence and are more
likely to be false positives.
quiet
Don't print anything if no errors are found.
filter=-x,+y,...
Specify a comma-separated list of category-filters to apply: only
error messages whose category names pass the filters will be printed.
(Category names are printed with the message and look like
"[whitespace/indent]".) Filters are evaluated left to right.
"-FOO" and "FOO" means "do not print categories that start with FOO".
"+FOO" means "do print categories that start with FOO".
Examples: --filter=-whitespace,+whitespace/braces
--filter=whitespace,runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
To see a list of all the categories used in cpplint, pass no arg:
--filter=
counting=total|toplevel|detailed
The total number of errors found is always printed. If
'toplevel' is provided, then the count of errors in each of
the top-level categories like 'build' and 'whitespace' will
also be printed. If 'detailed' is provided, then a count
is provided for each category like 'build/class'.
repository=path
The top level directory of the repository, used to derive the header
guard CPP variable. By default, this is determined by searching for a
path that contains .git, .hg, or .svn. When this flag is specified, the
given path is used instead. This option allows the header guard CPP
variable to remain consistent even if members of a team have different
repository root directories (such as when checking out a subdirectory
with SVN). In addition, users of non-mainstream version control systems
can use this flag to ensure readable header guard CPP variables.
Examples:
Assuming that Alice checks out ProjectName and Bob checks out
ProjectName/trunk and trunk contains src/chrome/ui/browser.h, then
with no --repository flag, the header guard CPP variable will be:
Alice => TRUNK_SRC_CHROME_BROWSER_UI_BROWSER_H_
Bob => SRC_CHROME_BROWSER_UI_BROWSER_H_
If Alice uses the --repository=trunk flag and Bob omits the flag or
uses --repository=. then the header guard CPP variable will be:
Alice => SRC_CHROME_BROWSER_UI_BROWSER_H_
Bob => SRC_CHROME_BROWSER_UI_BROWSER_H_
root=subdir
The root directory used for deriving header guard CPP variable.
This directory is relative to the top level directory of the repository
which by default is determined by searching for a directory that contains
.git, .hg, or .svn but can also be controlled with the --repository flag.
If the specified directory does not exist, this flag is ignored.
Examples:
Assuming that src is the top level directory of the repository (and
cwd=top/src), the header guard CPP variables for
src/chrome/browser/ui/browser.h are:
No flag => CHROME_BROWSER_UI_BROWSER_H_
--root=chrome => BROWSER_UI_BROWSER_H_
--root=chrome/browser => UI_BROWSER_H_
--root=.. => SRC_CHROME_BROWSER_UI_BROWSER_H_
linelength=digits
This is the allowed line length for the project. The default value is
80 characters.
Examples:
--linelength=120
recursive
Search for files to lint recursively. Each directory given in the list
of files to be linted is replaced by all files that descend from that
directory. Files with extensions not in the valid extensions list are
excluded.
exclude=path
Exclude the given path from the list of files to be linted. Relative
paths are evaluated relative to the current directory and shell globbing
is performed. This flag can be provided multiple times to exclude
multiple files.
Examples:
--exclude=one.cc
--exclude=src/*.cc
--exclude=src/*.cc --exclude=test/*.cc
extensions=extension,extension,...
The allowed file extensions that cpplint will check
Examples:
--extensions=%s
includeorder=default|standardcfirst
For the build/include_order rule, the default is to blindly assume angle
bracket includes with file extension are c-system-headers (default),
even knowing this will have false classifications.
The default is established at google.
standardcfirst means to instead use an allow-list of known c headers and
treat all others as separate group of "other system headers". The C headers
included are those of the C-standard lib and closely related ones.
headers=x,y,...
The header extensions that cpplint will treat as .h in checks. Values are
automatically added to --extensions list.
(by default, only files with extensions %s will be assumed to be headers)
Examples:
--headers=%s
--headers=hpp,hxx
--headers=hpp
cpplint.py supports per-directory configurations specified in CPPLINT.cfg
files. CPPLINT.cfg file can contain a number of key=value pairs.
Currently the following options are supported:
set noparent
filter=+filter1,-filter2,...
exclude_files=regex
linelength=80
root=subdir
headers=x,y,...
"set noparent" option prevents cpplint from traversing directory tree
upwards looking for more .cfg files in parent directories. This option
is usually placed in the top-level project directory.
The "filter" option is similar in function to --filter flag. It specifies
message filters in addition to the |_DEFAULT_FILTERS| and those specified
through --filter command-line flag.
"exclude_files" allows to specify a regular expression to be matched against
a file name. If the expression matches, the file is skipped and not run
through the linter.
"linelength" allows to specify the allowed line length for the project.
The "root" option is similar in function to the --root flag (see example
above). Paths are relative to the directory of the CPPLINT.cfg.
The "headers" option is similar in function to the --headers flag
(see example above).
CPPLINT.cfg has an effect on files in the same directory and all
sub-directories, unless overridden by a nested configuration file.
Example file:
filter=-build/include_order,+build/include_alpha
exclude_files=.*\\.cc
The above example disables build/include_order warning and enables
build/include_alpha as well as excludes all .cc from being
processed by linter, in the current directory (where the .cfg
file is located) and all sub-directories.
"""
# We categorize each error message we print. Here are the categories.
# We want an explicit list so we can list them all in cpplint --filter=.
# If you add a new error message with a new category, add it to the list
# here! cpplint_unittest.py should tell you if you forget to do this.
_ERROR_CATEGORIES = [
'build/class',
'build/c++11',
'build/c++14',
'build/c++tr1',
'build/deprecated',
'build/endif_comment',
'build/explicit_make_pair',
'build/forward_decl',
'build/header_guard',
'build/include',
'build/include_subdir',
'build/include_alpha',
'build/include_order',
'build/include_what_you_use',
'build/namespaces_headers',
'build/namespaces_literals',
'build/namespaces',
'build/printf_format',
'build/storage_class',
'legal/copyright',
'readability/alt_tokens',
'readability/braces',
'readability/casting',
'readability/check',
'readability/constructors',
'readability/fn_size',
'readability/inheritance',
'readability/multiline_comment',
'readability/multiline_string',
'readability/namespace',
'readability/nolint',
'readability/nul',
'readability/strings',
'readability/todo',
'readability/utf8',
'runtime/arrays',
'runtime/casting',
'runtime/explicit',
'runtime/int',
'runtime/init',
'runtime/invalid_increment',
'runtime/member_string_references',
'runtime/memset',
'runtime/indentation_namespace',
'runtime/operator',
'runtime/printf',
'runtime/printf_format',
'runtime/references',
'runtime/string',
'runtime/threadsafe_fn',
'runtime/vlog',
'whitespace/blank_line',
'whitespace/braces',
'whitespace/comma',
'whitespace/comments',
'whitespace/empty_conditional_body',
'whitespace/empty_if_body',
'whitespace/empty_loop_body',
'whitespace/end_of_line',
'whitespace/ending_newline',
'whitespace/forcolon',
'whitespace/indent',
'whitespace/line_length',
'whitespace/newline',
'whitespace/operators',
'whitespace/parens',
'whitespace/semicolon',
'whitespace/tab',
'whitespace/todo',
]
# keywords to use with --outputs which generate stdout for machine processing
_MACHINE_OUTPUTS = [
'junit',
'sed',
'gsed'
]
# These error categories are no longer enforced by cpplint, but for backwards-
# compatibility they may still appear in NOLINT comments.
_LEGACY_ERROR_CATEGORIES = [
'readability/streams',
'readability/function',
]
# The default state of the category filter. This is overridden by the --filter=
# flag. By default all errors are on, so only add here categories that should be
# off by default (i.e., categories that must be enabled by the --filter= flags).
# All entries here should start with a '-' or '+', as in the --filter= flag.
_DEFAULT_FILTERS = ['-build/include_alpha']
# The default list of categories suppressed for C (not C++) files.
_DEFAULT_C_SUPPRESSED_CATEGORIES = [
'readability/casting',
]
# The default list of categories suppressed for Linux Kernel files.
_DEFAULT_KERNEL_SUPPRESSED_CATEGORIES = [
'whitespace/tab',
]
# We used to check for high-bit characters, but after much discussion we
# decided those were OK, as long as they were in UTF-8 and didn't represent
# hard-coded international strings, which belong in a separate i18n file.
# C++ headers
_CPP_HEADERS = frozenset([
# Legacy
'algobase.h',
'algo.h',
'alloc.h',
'builtinbuf.h',
'bvector.h',
'complex.h',
'defalloc.h',
'deque.h',
'editbuf.h',
'fstream.h',
'function.h',
'hash_map',
'hash_map.h',
'hash_set',
'hash_set.h',
'hashtable.h',
'heap.h',
'indstream.h',
'iomanip.h',
'iostream.h',
'istream.h',
'iterator.h',
'list.h',
'map.h',
'multimap.h',
'multiset.h',
'ostream.h',
'pair.h',
'parsestream.h',
'pfstream.h',
'procbuf.h',
'pthread_alloc',
'pthread_alloc.h',
'rope',
'rope.h',
'ropeimpl.h',
'set.h',
'slist',
'slist.h',
'stack.h',
'stdiostream.h',
'stl_alloc.h',
'stl_relops.h',
'streambuf.h',
'stream.h',
'strfile.h',
'strstream.h',
'tempbuf.h',
'tree.h',
'type_traits.h',
'vector.h',
# 17.6.1.2 C++ library headers
'algorithm',
'array',
'atomic',
'bitset',
'chrono',
'codecvt',
'complex',
'condition_variable',
'deque',
'exception',
'forward_list',
'fstream',
'functional',
'future',
'initializer_list',
'iomanip',
'ios',
'iosfwd',
'iostream',
'istream',
'iterator',
'limits',
'list',
'locale',
'map',
'memory',
'mutex',
'new',
'numeric',
'ostream',
'queue',
'random',
'ratio',
'regex',
'scoped_allocator',
'set',
'sstream',
'stack',
'stdexcept',
'streambuf',
'string',
'strstream',
'system_error',
'thread',
'tuple',
'typeindex',
'typeinfo',
'type_traits',
'unordered_map',
'unordered_set',
'utility',
'valarray',
'vector',
# 17.6.1.2 C++14 headers
'shared_mutex',
# 17.6.1.2 C++17 headers
'any',
'charconv',
'codecvt',
'execution',
'filesystem',
'memory_resource',
'optional',
'string_view',
'variant',
# 17.6.1.2 C++ headers for C library facilities
'cassert',
'ccomplex',
'cctype',
'cerrno',
'cfenv',
'cfloat',
'cinttypes',
'ciso646',
'climits',
'clocale',
'cmath',
'csetjmp',
'csignal',
'cstdalign',
'cstdarg',
'cstdbool',
'cstddef',
'cstdint',
'cstdio',
'cstdlib',
'cstring',
'ctgmath',
'ctime',
'cuchar',
'cwchar',
'cwctype',
])
# C headers
_C_HEADERS = frozenset([
# System C headers
'assert.h',
'complex.h',
'ctype.h',
'errno.h',
'fenv.h',
'float.h',
'inttypes.h',
'iso646.h',
'limits.h',
'locale.h',
'math.h',
'setjmp.h',
'signal.h',
'stdalign.h',
'stdarg.h',
'stdatomic.h',
'stdbool.h',
'stddef.h',
'stdint.h',
'stdio.h',
'stdlib.h',
'stdnoreturn.h',
'string.h',
'tgmath.h',
'threads.h',
'time.h',
'uchar.h',
'wchar.h',
'wctype.h',
# additional POSIX C headers
'aio.h',
'arpa/inet.h',
'cpio.h',
'dirent.h',
'dlfcn.h',
'fcntl.h',
'fmtmsg.h',
'fnmatch.h',
'ftw.h',
'glob.h',
'grp.h',
'iconv.h',
'langinfo.h',
'libgen.h',
'monetary.h',
'mqueue.h',
'ndbm.h',
'net/if.h',
'netdb.h',
'netinet/in.h',
'netinet/tcp.h',
'nl_types.h',
'poll.h',
'pthread.h',
'pwd.h',
'regex.h',
'sched.h',
'search.h',
'semaphore.h',
'setjmp.h',
'signal.h',
'spawn.h',
'strings.h',
'stropts.h',
'syslog.h',
'tar.h',
'termios.h',
'trace.h',
'ulimit.h',
'unistd.h',
'utime.h',
'utmpx.h',
'wordexp.h',
# additional GNUlib headers
'a.out.h',
'aliases.h',
'alloca.h',
'ar.h',
'argp.h',
'argz.h',
'byteswap.h',
'crypt.h',
'endian.h',
'envz.h',
'err.h',
'error.h',
'execinfo.h',
'fpu_control.h',
'fstab.h',
'fts.h',
'getopt.h',
'gshadow.h',
'ieee754.h',
'ifaddrs.h',
'libintl.h',
'mcheck.h',
'mntent.h',
'obstack.h',
'paths.h',
'printf.h',
'pty.h',
'resolv.h',
'shadow.h',
'sysexits.h',
'ttyent.h',
# Additional linux glibc headers
'dlfcn.h',
'elf.h',
'features.h',
'gconv.h',
'gnu-versions.h',
'lastlog.h',
'libio.h',
'link.h',
'malloc.h',
'memory.h',
'netash/ash.h',
'netatalk/at.h',
'netax25/ax25.h',
'neteconet/ec.h',
'netipx/ipx.h',
'netiucv/iucv.h',
'netpacket/packet.h',
'netrom/netrom.h',
'netrose/rose.h',
'nfs/nfs.h',
'nl_types.h',
'nss.h',
're_comp.h',
'regexp.h',
'sched.h',
'sgtty.h',
'stab.h',
'stdc-predef.h',
'stdio_ext.h',
'syscall.h',
'termio.h',
'thread_db.h',
'ucontext.h',
'ustat.h',
'utmp.h',
'values.h',
'wait.h',
'xlocale.h',
# Hardware specific headers
'arm_neon.h',
'emmintrin.h',
'xmmintin.h',
])
# Folders of C libraries so commonly used in C++,
# that they have parity with standard C libraries.
C_STANDARD_HEADER_FOLDERS = frozenset([
# standard C library
"sys",
# glibc for linux
"arpa",
"asm-generic",
"bits",
"gnu",
"net",
"netinet",
"protocols",
"rpc",
"rpcsvc",
"scsi",
# linux kernel header
"drm",
"linux",
"misc",
"mtd",
"rdma",
"sound",
"video",
"xen",
])
# Type names
_TYPES = re.compile(
r'^(?:'
# [dcl.type.simple]
r'(char(16_t|32_t)?)|wchar_t|'
r'bool|short|int|long|signed|unsigned|float|double|'
# [support.types]
r'(ptrdiff_t|size_t|max_align_t|nullptr_t)|'
# [cstdint.syn]
r'(u?int(_fast|_least)?(8|16|32|64)_t)|'
r'(u?int(max|ptr)_t)|'
r')$')
# These headers are excluded from [build/include] and [build/include_order]
# checks:
# - Anything not following google file name conventions (containing an
# uppercase character, such as Python.h or nsStringAPI.h, for example).
# - Lua headers.
_THIRD_PARTY_HEADERS_PATTERN = re.compile(
r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$')
# Pattern for matching FileInfo.BaseName() against test file name
_test_suffixes = ['_test', '_regtest', '_unittest']
_TEST_FILE_SUFFIX = '(' + '|'.join(_test_suffixes) + r')$'
# Pattern that matches only complete whitespace, possibly across multiple lines.
_EMPTY_CONDITIONAL_BODY_PATTERN = re.compile(r'^\s*$', re.DOTALL)
# Assertion macros. These are defined in base/logging.h and
# testing/base/public/gunit.h.
_CHECK_MACROS = [
'DCHECK', 'CHECK',
'EXPECT_TRUE', 'ASSERT_TRUE',
'EXPECT_FALSE', 'ASSERT_FALSE',
]
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
_CHECK_REPLACEMENT = dict([(macro_var, {}) for macro_var in _CHECK_MACROS])
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
('>=', 'GE'), ('>', 'GT'),
('<=', 'LE'), ('<', 'LT')]:
_CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
_CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
('>=', 'LT'), ('>', 'LE'),
('<=', 'GT'), ('<', 'GE')]:
_CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
# Alternative tokens and their replacements. For full list, see section 2.5
# Alternative tokens [lex.digraph] in the C++ standard.
#
# Digraphs (such as '%:') are not included here since it's a mess to
# match those on a word boundary.
_ALT_TOKEN_REPLACEMENT = {
'and': '&&',
'bitor': '|',
'or': '||',
'xor': '^',
'compl': '~',
'bitand': '&',
'and_eq': '&=',
'or_eq': '|=',
'xor_eq': '^=',
'not': '!',
'not_eq': '!='
}
# Compile regular expression that matches all the above keywords. The "[ =()]"
# bit is meant to avoid matching these keywords outside of boolean expressions.
#
# False positives include C-style multi-line comments and multi-line strings
# but those have always been troublesome for cpplint.
_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
# These constants define types of headers for use with
# _IncludeState.CheckNextIncludeOrder().
_C_SYS_HEADER = 1
_CPP_SYS_HEADER = 2
_OTHER_SYS_HEADER = 3
_LIKELY_MY_HEADER = 4
_POSSIBLE_MY_HEADER = 5
_OTHER_HEADER = 6
# These constants define the current inline assembly state
_NO_ASM = 0 # Outside of inline assembly block
_INSIDE_ASM = 1 # Inside inline assembly block
_END_ASM = 2 # Last line of inline assembly block
_BLOCK_ASM = 3 # The whole block is an inline assembly block
# Match start of assembly blocks
_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
r'(?:\s+(volatile|__volatile__))?'
r'\s*[{(]')
# Match strings that indicate we're working on a C (not C++) file.
_SEARCH_C_FILE = re.compile(r'\b(?:LINT_C_FILE|'
r'vim?:\s*.*(\s*|:)filetype=c(\s*|:|$))')
# Match string that indicates we're working on a Linux Kernel file.
_SEARCH_KERNEL_FILE = re.compile(r'\b(?:LINT_KERNEL_FILE)')
# Commands for sed to fix the problem
_SED_FIXUPS = {
'Remove spaces around =': r's/ = /=/',
'Remove spaces around !=': r's/ != /!=/',
'Remove space before ( in if (': r's/if (/if(/',
'Remove space before ( in for (': r's/for (/for(/',
'Remove space before ( in while (': r's/while (/while(/',
'Remove space before ( in switch (': r's/switch (/switch(/',
'Should have a space between // and comment': r's/\/\//\/\/ /',
'Missing space before {': r's/\([^ ]\){/\1 {/',
'Tab found, replace by spaces': r's/\t/ /g',
'Line ends in whitespace. Consider deleting these extra spaces.': r's/\s*$//',
'You don\'t need a ; after a }': r's/};/}/',
'Missing space after ,': r's/,\([^ ]\)/, \1/g',
}
_regexp_compile_cache = {}
# {str, set(int)}: a map from error categories to sets of linenumbers
# on which those errors are expected and should be suppressed.
_error_suppressions = {}
# The root directory used for deriving header guard CPP variable.
# This is set by --root flag.
_root = None
_root_debug = False
# The top level repository directory. If set, _root is calculated relative to
# this directory instead of the directory containing version control artifacts.
# This is set by the --repository flag.
_repository = None
# Files to exclude from linting. This is set by the --exclude flag.
_excludes = None
# Whether to supress all PrintInfo messages, UNRELATED to --quiet flag
_quiet = False
# The allowed line length of files.
# This is set by --linelength flag.
_line_length = 80
# This allows to use different include order rule than default
_include_order = "default"
try:
unicode
except NameError:
# -- pylint: disable=redefined-builtin
basestring = unicode = str
try:
long
except NameError:
# -- pylint: disable=redefined-builtin
long = int
if sys.version_info < (3,):
# -- pylint: disable=no-member
# BINARY_TYPE = str
itervalues = dict.itervalues
iteritems = dict.iteritems
else:
# BINARY_TYPE = bytes
itervalues = dict.values
iteritems = dict.items
def unicode_escape_decode(x):
if sys.version_info < (3,):
return codecs.unicode_escape_decode(x)[0]
else:
return x
# Treat all headers starting with 'h' equally: .h, .hpp, .hxx etc.
# This is set by --headers flag.
_hpp_headers = set([])
# {str, bool}: a map from error categories to booleans which indicate if the
# category should be suppressed for every line.
_global_error_suppressions = {}
def ProcessHppHeadersOption(val):
global _hpp_headers
try:
_hpp_headers = {ext.strip() for ext in val.split(',')}
except ValueError:
PrintUsage('Header extensions must be comma separated list.')
def ProcessIncludeOrderOption(val):
if val is None or val == "default":
pass
elif val == "standardcfirst":
global _include_order
_include_order = val
else:
PrintUsage('Invalid includeorder value %s. Expected default|standardcfirst')
def IsHeaderExtension(file_extension):
return file_extension in GetHeaderExtensions()
def GetHeaderExtensions():
if _hpp_headers:
return _hpp_headers
if _valid_extensions:
return {h for h in _valid_extensions if 'h' in h}
return set(['h', 'hh', 'hpp', 'hxx', 'h++', 'cuh'])
# The allowed extensions for file names
# This is set by --extensions flag
def GetAllExtensions():
return GetHeaderExtensions().union(_valid_extensions or set(
['c', 'cc', 'cpp', 'cxx', 'c++', 'cu']))
def ProcessExtensionsOption(val):
global _valid_extensions
try:
extensions = [ext.strip() for ext in val.split(',')]
_valid_extensions = set(extensions)
except ValueError:
PrintUsage('Extensions should be a comma-separated list of values;'
'for example: extensions=hpp,cpp\n'
'This could not be parsed: "%s"' % (val,))
def GetNonHeaderExtensions():
return GetAllExtensions().difference(GetHeaderExtensions())
def ParseNolintSuppressions(filename, raw_line, linenum, error):
"""Updates the global list of line error-suppressions.
Parses any NOLINT comments on the current line, updating the global
error_suppressions store. Reports an error if the NOLINT comment
was malformed.
Args:
filename: str, the name of the input file.
raw_line: str, the line of input text, with comments.
linenum: int, the number of the current line.
error: function, an error handler.
"""
matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line)
if matched:
if matched.group(1):
suppressed_line = linenum + 1
else:
suppressed_line = linenum
category = matched.group(2)
if category in (None, '(*)'): # => "suppress all"
_error_suppressions.setdefault(None, set()).add(suppressed_line)
else:
if category.startswith('(') and category.endswith(')'):
category = category[1:-1]
if category in _ERROR_CATEGORIES:
_error_suppressions.setdefault(category, set()).add(suppressed_line)
elif category not in _LEGACY_ERROR_CATEGORIES:
error(filename, linenum, 'readability/nolint', 5,
'Unknown NOLINT error category: %s' % category)
def ProcessGlobalSuppresions(lines):
"""Updates the list of global error suppressions.
Parses any lint directives in the file that have global effect.
Args:
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
"""
for line in lines:
if _SEARCH_C_FILE.search(line):
for category in _DEFAULT_C_SUPPRESSED_CATEGORIES:
_global_error_suppressions[category] = True
if _SEARCH_KERNEL_FILE.search(line):
for category in _DEFAULT_KERNEL_SUPPRESSED_CATEGORIES:
_global_error_suppressions[category] = True
def ResetNolintSuppressions():
"""Resets the set of NOLINT suppressions to empty."""
_error_suppressions.clear()
_global_error_suppressions.clear()
def IsErrorSuppressedByNolint(category, linenum):
"""Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
ParseNolintSuppressions/ProcessGlobalSuppresions/ResetNolintSuppressions.
Args:
category: str, the category of the error.
linenum: int, the current line number.
Returns:
bool, True iff the error should be suppressed due to a NOLINT comment or
global suppression.
"""
return (_global_error_suppressions.get(category, False) or
linenum in _error_suppressions.get(category, set()) or
linenum in _error_suppressions.get(None, set()))
def Match(pattern, s):
"""Matches the string with the pattern, caching the compiled regexp."""
# The regexp compilation caching is inlined in both Match and Search for
# performance reasons; factoring it out into a separate function turns out
# to be noticeably expensive.
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
def ReplaceAll(pattern, rep, s):
"""Replaces instances of pattern in a string with a replacement.
The compiled regex is kept in a cache shared by Match and Search.
Args:
pattern: regex pattern
rep: replacement text
s: search string
Returns:
string with replacements made (or original string if no replacements)
"""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].sub(rep, s)
def Search(pattern, s):
"""Searches the string for the pattern, caching the compiled regexp."""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s)
def _IsSourceExtension(s):
"""File extension (excluding dot) matches a source file extension."""
return s in GetNonHeaderExtensions()
class _IncludeState(object):
"""Tracks line numbers for includes, and the order in which includes appear.
include_list contains list of lists of (header, line number) pairs.
It's a lists of lists rather than just one flat list to make it
easier to update across preprocessor boundaries.
Call CheckNextIncludeOrder() once for each header in the file, passing
in the type constants defined above. Calls in an illegal order will
raise an _IncludeError with an appropriate error message.
"""
# self._section will move monotonically through this set. If it ever
# needs to move backwards, CheckNextIncludeOrder will raise an error.
_INITIAL_SECTION = 0
_MY_H_SECTION = 1
_C_SECTION = 2
_CPP_SECTION = 3
_OTHER_SYS_SECTION = 4
_OTHER_H_SECTION = 5
_TYPE_NAMES = {
_C_SYS_HEADER: 'C system header',
_CPP_SYS_HEADER: 'C++ system header',
_OTHER_SYS_HEADER: 'other system header',
_LIKELY_MY_HEADER: 'header this file implements',
_POSSIBLE_MY_HEADER: 'header this file may implement',
_OTHER_HEADER: 'other header',
}
_SECTION_NAMES = {
_INITIAL_SECTION: "... nothing. (This can't be an error.)",
_MY_H_SECTION: 'a header this file implements',
_C_SECTION: 'C system header',
_CPP_SECTION: 'C++ system header',
_OTHER_SYS_SECTION: 'other system header',
_OTHER_H_SECTION: 'other header',
}
def __init__(self):
self.include_list = [[]]
self._section = None
self._last_header = None
self.ResetSection('')
def FindHeader(self, header):
"""Check if a header has already been included.
Args:
header: header to check.
Returns:
Line number of previous occurrence, or -1 if the header has not
been seen before.
"""
for section_list in self.include_list:
for f in section_list:
if f[0] == header:
return f[1]
return -1
def ResetSection(self, directive):
"""Reset section checking for preprocessor directive.
Args:
directive: preprocessor directive (e.g. "if", "else").
"""
# The name of the current section.
self._section = self._INITIAL_SECTION
# The path of last found header.
self._last_header = ''
# Update list of includes. Note that we never pop from the
# include list.
if directive in ('if', 'ifdef', 'ifndef'):
self.include_list.append([])
elif directive in ('else', 'elif'):
self.include_list[-1] = []
def SetLastHeader(self, header_path):
self._last_header = header_path
def CanonicalizeAlphabeticalOrder(self, header_path):
"""Returns a path canonicalized for alphabetical comparison.
- replaces "-" with "_" so they both cmp the same.
- removes '-inl' since we don't require them to be after the main header.
- lowercase everything, just in case.
Args:
header_path: Path to be canonicalized.
Returns:
Canonicalized path.
"""
return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
"""Check if a header is in alphabetical order with the previous header.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
header_path: Canonicalized header to be checked.
Returns:
Returns true if the header is in alphabetical order.
"""
# If previous section is different from current section, _last_header will
# be reset to empty string, so it's always less than current header.
#
# If previous line was a blank line, assume that the headers are
# intentionally sorted the way they are.
if (self._last_header > header_path and
Match(r'^\s*#\s*include\b', clean_lines.elided[linenum - 1])):
return False
return True
def CheckNextIncludeOrder(self, header_type):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
last_section = self._section
if header_type == _C_SYS_HEADER:
if self._section <= self._C_SECTION:
self._section = self._C_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _CPP_SYS_HEADER:
if self._section <= self._CPP_SECTION:
self._section = self._CPP_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _OTHER_SYS_HEADER:
if self._section <= self._OTHER_SYS_SECTION:
self._section = self._OTHER_SYS_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
elif header_type == _POSSIBLE_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
# This will always be the fallback because we're not sure
# enough that the header is associated with this file.
self._section = self._OTHER_H_SECTION
else:
assert header_type == _OTHER_HEADER
self._section = self._OTHER_H_SECTION
if last_section != self._section:
self._last_header = ''
return ''
class _CppLintState(object):
"""Maintains module-wide state.."""
def __init__(self):
self.verbose_level = 1 # global setting.
self.error_count = 0 # global count of reported errors
# filters to apply when emitting error messages
self.filters = _DEFAULT_FILTERS[:]
# backup of filter list. Used to restore the state after each file.
self._filters_backup = self.filters[:]
self.counting = 'total' # In what way are we counting errors?
self.errors_by_category = {} # string to int dict storing error counts
self.quiet = False # Suppress non-error messagess?
# output format:
# "emacs" - format that emacs can parse (default)
# "eclipse" - format that eclipse can parse
# "vs7" - format that Microsoft Visual Studio 7 can parse
# "junit" - format that Jenkins, Bamboo, etc can parse
# "sed" - returns a gnu sed command to fix the problem
# "gsed" - like sed, but names the command gsed, e.g. for macOS homebrew users
self.output_format = 'emacs'
# For JUnit output, save errors and failures until the end so that they
# can be written into the XML
self._junit_errors = []
self._junit_failures = []
def SetOutputFormat(self, output_format):
"""Sets the output format for errors."""
self.output_format = output_format
def SetQuiet(self, quiet):
"""Sets the module's quiet settings, and returns the previous setting."""
last_quiet = self.quiet
self.quiet = quiet
return last_quiet
def SetVerboseLevel(self, level):
"""Sets the module's verbosity, and returns the previous setting."""
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level
def SetCountingStyle(self, counting_style):
"""Sets the module's counting options."""
self.counting = counting_style
def SetFilters(self, filters):
"""Sets the error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "+whitespace/indent").
Each filter should start with + or -; else we die.
Raises:
ValueError: The comma-separated filters did not all start with '+' or '-'.
E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
"""
# Default filters always have less priority than the flag ones.
self.filters = _DEFAULT_FILTERS[:]
self.AddFilters(filters)
def AddFilters(self, filters):
""" Adds more filters to the existing list of error-message filters. """
for filt in filters.split(','):
clean_filt = filt.strip()
if clean_filt:
self.filters.append(clean_filt)
for filt in self.filters:
if not (filt.startswith('+') or filt.startswith('-')):
raise ValueError('Every filter in --filters must start with + or -'
' (%s does not)' % filt)
def BackupFilters(self):
""" Saves the current filter list to backup storage."""
self._filters_backup = self.filters[:]
def RestoreFilters(self):
""" Restores filters previously backed up."""
self.filters = self._filters_backup[:]
def ResetErrorCounts(self):
"""Sets the module's error statistic back to zero."""
self.error_count = 0
self.errors_by_category = {}
def IncrementErrorCount(self, category):
"""Bumps the module's error statistic."""
self.error_count += 1
if self.counting in ('toplevel', 'detailed'):
if self.counting != 'detailed':
category = category.split('/')[0]
if category not in self.errors_by_category:
self.errors_by_category[category] = 0
self.errors_by_category[category] += 1
def PrintErrorCounts(self):
"""Print a summary of errors by category, and the total."""
for category, count in sorted(iteritems(self.errors_by_category)):
self.PrintInfo('Category \'%s\' errors found: %d\n' %
(category, count))
if self.error_count > 0:
self.PrintInfo('Total errors found: %d\n' % self.error_count)
def PrintInfo(self, message):
# _quiet does not represent --quiet flag.
# Hide infos from stdout to keep stdout pure for machine consumption
if not _quiet and self.output_format not in _MACHINE_OUTPUTS:
sys.stdout.write(message)
def PrintError(self, message):
if self.output_format == 'junit':
self._junit_errors.append(message)
else:
sys.stderr.write(message)
def AddJUnitFailure(self, filename, linenum, message, category, confidence):
self._junit_failures.append((filename, linenum, message, category,
confidence))
def FormatJUnitXML(self):
num_errors = len(self._junit_errors)
num_failures = len(self._junit_failures)
testsuite = xml.etree.ElementTree.Element('testsuite')
testsuite.attrib['errors'] = str(num_errors)
testsuite.attrib['failures'] = str(num_failures)
testsuite.attrib['name'] = 'cpplint'
if num_errors == 0 and num_failures == 0:
testsuite.attrib['tests'] = str(1)
xml.etree.ElementTree.SubElement(testsuite, 'testcase', name='passed')
else:
testsuite.attrib['tests'] = str(num_errors + num_failures)
if num_errors > 0:
testcase = xml.etree.ElementTree.SubElement(testsuite, 'testcase')
testcase.attrib['name'] = 'errors'
error = xml.etree.ElementTree.SubElement(testcase, 'error')
error.text = '\n'.join(self._junit_errors)
if num_failures > 0:
# Group failures by file
failed_file_order = []
failures_by_file = {}
for failure in self._junit_failures:
failed_file = failure[0]
if failed_file not in failed_file_order:
failed_file_order.append(failed_file)
failures_by_file[failed_file] = []
failures_by_file[failed_file].append(failure)
# Create a testcase for each file
for failed_file in failed_file_order:
failures = failures_by_file[failed_file]
testcase = xml.etree.ElementTree.SubElement(testsuite, 'testcase')
testcase.attrib['name'] = failed_file
failure = xml.etree.ElementTree.SubElement(testcase, 'failure')
template = '{0}: {1} [{2}] [{3}]'
texts = [template.format(f[1], f[2], f[3], f[4]) for f in failures]
failure.text = '\n'.join(texts)
xml_decl = '<?xml version="1.0" encoding="UTF-8" ?>\n'
return xml_decl + xml.etree.ElementTree.tostring(testsuite, 'utf-8').decode('utf-8')
_cpplint_state = _CppLintState()
def _OutputFormat():
"""Gets the module's output format."""
return _cpplint_state.output_format
def _SetOutputFormat(output_format):
"""Sets the module's output format."""
_cpplint_state.SetOutputFormat(output_format)
def _Quiet():
"""Return's the module's quiet setting."""
return _cpplint_state.quiet
def _SetQuiet(quiet):
"""Set the module's quiet status, and return previous setting."""
return _cpplint_state.SetQuiet(quiet)
def _VerboseLevel():
"""Returns the module's verbosity setting."""
return _cpplint_state.verbose_level
def _SetVerboseLevel(level):
"""Sets the module's verbosity, and returns the previous setting."""
return _cpplint_state.SetVerboseLevel(level)
def _SetCountingStyle(level):
"""Sets the module's counting options."""
_cpplint_state.SetCountingStyle(level)
def _Filters():
"""Returns the module's list of output filters, as a list."""
return _cpplint_state.filters
def _SetFilters(filters):
"""Sets the module's error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.SetFilters(filters)
def _AddFilters(filters):
"""Adds more filter overrides.
Unlike _SetFilters, this function does not reset the current list of filters
available.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.AddFilters(filters)
def _BackupFilters():
""" Saves the current filter list to backup storage."""
_cpplint_state.BackupFilters()
def _RestoreFilters():
""" Restores filters previously backed up."""
_cpplint_state.RestoreFilters()
class _FunctionState(object):
"""Tracks current function name and the number of lines in its body."""
_NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
_TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
def __init__(self):
self.in_a_function = False
self.lines_in_function = 0
self.current_function = ''
def Begin(self, function_name):
"""Start analyzing function body.
Args:
function_name: The name of the function being tracked.
"""
self.in_a_function = True
self.lines_in_function = 0
self.current_function = function_name
def Count(self):
"""Count line in current function body."""
if self.in_a_function:
self.lines_in_function += 1
def Check(self, error, filename, linenum):
"""Report if too many lines in function body.
Args:
error: The function to call with any errors found.
filename: The name of the current file.
linenum: The number of the line to check.
"""
if not self.in_a_function:
return
if Match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = base_trigger * 2**_VerboseLevel()
if self.lines_in_function > trigger:
error_level = int(math.log(self.lines_in_function / base_trigger, 2))
# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
if error_level > 5:
error_level = 5
error(filename, linenum, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
' %s has %d non-comment lines'
' (error triggered by exceeding %d lines).' % (
self.current_function, self.lines_in_function, trigger))
def End(self):
"""Stop analyzing function body."""
self.in_a_function = False
class _IncludeError(Exception):
"""Indicates a problem with the include order in a file."""
pass
class FileInfo(object):
"""Provides utility functions for filenames.
FileInfo provides easy access to the components of a file's path
relative to the project root.
"""
def __init__(self, filename):
self._filename = filename
def FullName(self):
"""Make Windows paths like Unix."""
return os.path.abspath(self._filename).replace('\\', '/')
def RepositoryName(self):
r"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\\Documents and Settings\\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
# If the user specified a repository path, it exists, and the file is
# contained in it, use the specified repository path
if _repository:
repo = FileInfo(_repository).FullName()
root_dir = project_dir
while os.path.exists(root_dir):
# allow case insensitive compare on Windows
if os.path.normcase(root_dir) == os.path.normcase(repo):
return os.path.relpath(fullname, root_dir).replace('\\', '/')
one_up_dir = os.path.dirname(root_dir)
if one_up_dir == root_dir:
break
root_dir = one_up_dir
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we recursively look
# up the directory tree for the top of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
# searching up from the current path.
root_dir = current_dir = os.path.dirname(fullname)
while current_dir != os.path.dirname(current_dir):
if (os.path.exists(os.path.join(current_dir, ".git")) or
os.path.exists(os.path.join(current_dir, ".hg")) or
os.path.exists(os.path.join(current_dir, ".svn"))):
root_dir = current_dir
current_dir = os.path.dirname(current_dir)
if (os.path.exists(os.path.join(root_dir, ".git")) or
os.path.exists(os.path.join(root_dir, ".hg")) or
os.path.exists(os.path.join(root_dir, ".svn"))):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Don't know what to do; header guard warnings may be wrong...
return fullname
def Split(self):
"""Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cc', Split() would
return ('chrome/browser', 'browser', '.cc')
Returns:
A tuple of (directory, basename, extension).
"""
googlename = self.RepositoryName()
project, rest = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
def BaseName(self):
"""File base name - text after the final slash, before the final period."""
return self.Split()[1]
def Extension(self):
"""File extension - text following the final period, includes that period."""
return self.Split()[2]
def NoExtension(self):
"""File has no source file extension."""
return '/'.join(self.Split()[0:2])
def IsSource(self):
"""File has a source file extension."""
return _IsSourceExtension(self.Extension()[1:])
def _ShouldPrintError(category, confidence, linenum):
"""If confidence >= verbose, category passes filter and is not suppressed."""
# There are three ways we might decide not to print an error message:
# a "NOLINT(category)" comment appears in the source,
# the verbosity level isn't high enough, or the filters filter it out.
if IsErrorSuppressedByNolint(category, linenum):
return False
if confidence < _cpplint_state.verbose_level:
return False
is_filtered = False
for one_filter in _Filters():
if one_filter.startswith('-'):
if category.startswith(one_filter[1:]):
is_filtered = True
elif one_filter.startswith('+'):
if category.startswith(one_filter[1:]):
is_filtered = False
else:
assert False # should have been checked for in SetFilter.
if is_filtered:
return False
return True
def Error(filename, linenum, category, confidence, message):
"""Logs the fact we've found a lint error.
We log where the error was found, and also our confidence in the error,
that is, how certain we are this is a legitimate style regression, and
not a misidentification or a use that's sometimes justified.
False positives can be suppressed by the use of
"cpplint(category)" comments on the offending line. These are
parsed into _error_suppressions.
Args:
filename: The name of the file containing the error.
linenum: The number of the line containing the error.
category: A string used to describe the "category" this bug
falls under: "whitespace", say, or "runtime". Categories
may have a hierarchy separated by slashes: "whitespace/indent".
confidence: A number from 1-5 representing a confidence score for
the error, with 5 meaning that we are certain of the problem,
and 1 meaning that it could be a legitimate construct.
message: The error message.
"""
if _ShouldPrintError(category, confidence, linenum):
_cpplint_state.IncrementErrorCount(category)
if _cpplint_state.output_format == 'vs7':
_cpplint_state.PrintError('%s(%s): error cpplint: [%s] %s [%d]\n' % (
filename, linenum, category, message, confidence))
elif _cpplint_state.output_format == 'eclipse':
sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
elif _cpplint_state.output_format == 'junit':
_cpplint_state.AddJUnitFailure(filename, linenum, message, category,
confidence)
elif _cpplint_state.output_format in ['sed', 'gsed']:
if message in _SED_FIXUPS:
sys.stdout.write(_cpplint_state.output_format + " -i '%s%s' %s # %s [%s] [%d]\n" % (
linenum, _SED_FIXUPS[message], filename, message, category, confidence))
else:
sys.stderr.write('# %s:%s: "%s" [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
else:
final_message = '%s:%s: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence)
sys.stderr.write(final_message)
# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
# Match a single C style comment on the same line.
_RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/'
# Matches multi-line C style comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
# statements better.
# The current rule is: We only clear spaces from both sides when we're at the
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' +
_RE_PATTERN_C_COMMENTS + r'\s+|' +
r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' +
_RE_PATTERN_C_COMMENTS + r')')
def IsCppString(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def CleanseRawStrings(raw_lines):
"""Removes C++11 raw strings from lines.
Before:
static const char kData[] = R"(
multi-line string
)";
After:
static const char kData[] = ""
(replaced by blank line)
"";
Args:
raw_lines: list of raw lines.
Returns:
list of lines with C++11 raw strings replaced by empty strings.
"""
delimiter = None
lines_without_raw_strings = []
for line in raw_lines:
if delimiter:
# Inside a raw string, look for the end
end = line.find(delimiter)
if end >= 0:
# Found the end of the string, match leading space for this
# line and resume copying the original lines, and also insert
# a "" on the last line.
leading_space = Match(r'^(\s*)\S', line)
line = leading_space.group(1) + '""' + line[end + len(delimiter):]
delimiter = None
else:
# Haven't found the end yet, append a blank line.
line = '""'
# Look for beginning of a raw string, and replace them with
# empty strings. This is done in a loop to handle multiple raw
# strings on the same line.
while delimiter is None:
# Look for beginning of a raw string.
# See 2.14.15 [lex.string] for syntax.
#
# Once we have matched a raw string, we check the prefix of the
# line to make sure that the line is not part of a single line
# comment. It's done this way because we remove raw strings
# before removing comments as opposed to removing comments
# before removing raw strings. This is because there are some
# cpplint checks that requires the comments to be preserved, but
# we don't want to check comments that are inside raw strings.
matched = Match(r'^(.*?)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
if (matched and
not Match(r'^([^\'"]|\'(\\.|[^\'])*\'|"(\\.|[^"])*")*//',
matched.group(1))):
delimiter = ')' + matched.group(2) + '"'
end = matched.group(3).find(delimiter)
if end >= 0:
# Raw string ended on same line
line = (matched.group(1) + '""' +
matched.group(3)[end + len(delimiter):])
delimiter = None
else:
# Start of a multi-line raw string
line = matched.group(1) + '""'
else:
break
lines_without_raw_strings.append(line)
# TODO(unknown): if delimiter is not None here, we might want to
# emit a warning for unterminated string.
return lines_without_raw_strings
def FindNextMultiLineCommentStart(lines, lineix):
"""Find the beginning marker for a multiline comment."""
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines)
def FindNextMultiLineCommentEnd(lines, lineix):
"""We are inside a comment, find the end marker."""
while lineix < len(lines):
if lines[lineix].strip().endswith('*/'):
return lineix
lineix += 1
return len(lines)
def RemoveMultiLineCommentsFromRange(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // <empty> comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '/**/'
def RemoveMultiLineComments(filename, lines, error):
"""Removes multiline (c-style) comments from lines."""
lineix = 0
while lineix < len(lines):
lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
if lineix_begin >= len(lines):
return
lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
if lineix_end >= len(lines):
error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
lineix = lineix_end + 1
def CleanseComments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
commentpos = line.find('//')
if commentpos != -1 and not IsCppString(line[:commentpos]):
line = line[:commentpos].rstrip()
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object):
"""Holds 4 copies of all lines with different preprocessing applied to them.
1) elided member contains lines without strings and comments.
2) lines member contains lines without comments.
3) raw_lines member contains all the lines without processing.
4) lines_without_raw_strings member is same as raw_lines, but with C++11 raw
strings removed.
All these members are of <type 'list'>, and of the same length.
"""
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self.num_lines = len(lines)
self.lines_without_raw_strings = CleanseRawStrings(lines)
for linenum in range(len(self.lines_without_raw_strings)):
self.lines.append(CleanseComments(
self.lines_without_raw_strings[linenum]))
elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
self.elided.append(CleanseComments(elided))
def NumLines(self):
"""Returns the number of lines represented."""
return self.num_lines
@staticmethod
def _CollapseStrings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if _RE_PATTERN_INCLUDE.match(elided):
return elided
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
# Replace quoted strings and digit separators. Both single quotes
# and double quotes are processed in the same loop, otherwise
# nested quotes wouldn't work.
collapsed = ''
while True:
# Find the first quote character
match = Match(r'^([^\'"]*)([\'"])(.*)$', elided)
if not match:
collapsed += elided
break
head, quote, tail = match.groups()
if quote == '"':
# Collapse double quoted strings
second_quote = tail.find('"')
if second_quote >= 0:
collapsed += head + '""'
elided = tail[second_quote + 1:]
else:
# Unmatched double quote, don't bother processing the rest
# of the line since this is probably a multiline string.
collapsed += elided
break
else:
# Found single quote, check nearby text to eliminate digit separators.
#
# There is no special handling for floating point here, because
# the integer/fractional/exponent parts would all be parsed
# correctly as long as there are digits on both sides of the
# separator. So we are fine as long as we don't see something
# like "0.'3" (gcc 4.9.0 will not allow this literal).
if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):
match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail)
collapsed += head + match_literal.group(1).replace("'", '')
elided = match_literal.group(2)
else:
second_quote = tail.find('\'')
if second_quote >= 0:
collapsed += head + "''"
elided = tail[second_quote + 1:]
else:
# Unmatched single quote
collapsed += elided
break
return collapsed
def FindEndOfExpressionInLine(line, startpos, stack):
"""Find the position just after the end of current parenthesized expression.
Args:
line: a CleansedLines line.
startpos: start searching at this position.
stack: nesting stack at startpos.
Returns:
On finding matching end: (index just after matching end, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at end of this line)
"""
for i in xrange(startpos, len(line)):
char = line[i]
if char in '([{':
# Found start of parenthesized expression, push to expression stack
stack.append(char)
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
if stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
elif i > 0 and Search(r'\boperator\s*$', line[0:i]):
# operator<, don't add to stack
continue
else:
# Tentative start of template argument list
stack.append('<')
elif char in ')]}':
# Found end of parenthesized expression.
#
# If we are currently expecting a matching '>', the pending '<'
# must have been an operator. Remove them from expression stack.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
if ((stack[-1] == '(' and char == ')') or
(stack[-1] == '[' and char == ']') or
(stack[-1] == '{' and char == '}')):
stack.pop()
if not stack:
return (i + 1, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == '>':
# Found potential end of template argument list.
# Ignore "->" and operator functions
if (i > 0 and
(line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))):
continue
# Pop the stack if there is a matching '<'. Otherwise, ignore
# this '>' since it must be an operator.
if stack:
if stack[-1] == '<':
stack.pop()
if not stack:
return (i + 1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '>', the matching '<' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
# Did not find end of expression or unbalanced parentheses on this line
return (-1, stack)
def CloseExpression(clean_lines, linenum, pos):
"""If input points to ( or { or [ or <, finds the position that closes it.
If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
linenum/pos that correspond to the closing of the expression.
TODO(unknown): cpplint spends a fair bit of time matching parentheses.
Ideally we would want to index all opening and closing parentheses once
and have CloseExpression be just a simple lookup, but due to preprocessor
tricks, this is not so easy.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *past* the closing brace, or
(line, len(lines), -1) if we never find a close. Note we ignore
strings and comments when matching; and the line we return is the
'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]):
return (line, clean_lines.NumLines(), -1)
# Check first line
(end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])
if end_pos > -1:
return (line, linenum, end_pos)
# Continue scanning forward
while stack and linenum < clean_lines.NumLines() - 1:
linenum += 1
line = clean_lines.elided[linenum]
(end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack)
if end_pos > -1:
return (line, linenum, end_pos)
# Did not find end of expression before end of file, give up
return (line, clean_lines.NumLines(), -1)
def FindStartOfExpressionInLine(line, endpos, stack):
"""Find position at the matching start of current expression.
This is almost the reverse of FindEndOfExpressionInLine, but note
that the input position and returned position differs by 1.
Args:
line: a CleansedLines line.
endpos: start searching at this position.
stack: nesting stack at endpos.
Returns:
On finding matching start: (index at matching start, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at beginning of this line)
"""
i = endpos
while i >= 0:
char = line[i]
if char in ')]}':
# Found end of expression, push to expression stack
stack.append(char)
elif char == '>':
# Found potential end of template argument list.
#
# Ignore it if it's a "->" or ">=" or "operator>"
if (i > 0 and
(line[i - 1] == '-' or
Match(r'\s>=\s', line[i - 1:]) or
Search(r'\boperator\s*$', line[0:i]))):
i -= 1
else:
stack.append('>')
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
i -= 1
else:
# If there is a matching '>', we can pop the expression stack.
# Otherwise, ignore this '<' since it must be an operator.
if stack and stack[-1] == '>':
stack.pop()
if not stack:
return (i, None)
elif char in '([{':
# Found start of expression.
#
# If there are any unmatched '>' on the stack, they must be
# operators. Remove those.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
if ((char == '(' and stack[-1] == ')') or
(char == '[' and stack[-1] == ']') or
(char == '{' and stack[-1] == '}')):
stack.pop()
if not stack:
return (i, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '<', the matching '>' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
i -= 1
return (-1, stack)
def ReverseCloseExpression(clean_lines, linenum, pos):
"""If input points to ) or } or ] or >, finds the position that opens it.
If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
linenum/pos that correspond to the opening of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *at* the opening brace, or
(line, 0, -1) if we never find the matching opening brace. Note
we ignore strings and comments when matching; and the line we
return is the 'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if line[pos] not in ')}]>':
return (line, 0, -1)
# Check last line
(start_pos, stack) = FindStartOfExpressionInLine(line, pos, [])
if start_pos > -1:
return (line, linenum, start_pos)
# Continue scanning backward
while stack and linenum > 0:
linenum -= 1
line = clean_lines.elided[linenum]
(start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack)
if start_pos > -1:
return (line, linenum, start_pos)
# Did not find start of expression before beginning of file, give up
return (line, 0, -1)
def CheckForCopyright(filename, lines, error):
"""Logs an error if no Copyright message appears at the top of the file."""
# We'll say it should occur by line 10. Don't forget there's a
# placeholder line at the front.
for line in xrange(1, min(len(lines), 11)):
if re.search(r'Copyright', lines[line], re.I): break
else: # means no copyright line was found
error(filename, 0, 'legal/copyright', 5,
'No copyright message found. '
'You should have a line: "Copyright [year] <Copyright Owner>"')
def GetIndentLevel(line):
"""Return the number of leading spaces in line.
Args:
line: A string to check.
Returns:
An integer count of leading spaces, possibly zero.
"""
indent = Match(r'^( *)\S', line)
if indent:
return len(indent.group(1))
else:
return 0
def PathSplitToList(path):
"""Returns the path split into a list by the separator.
Args:
path: An absolute or relative path (e.g. '/a/b/c/' or '../a')
Returns:
A list of path components (e.g. ['a', 'b', 'c]).
"""
lst = []
while True:
(head, tail) = os.path.split(path)
if head == path: # absolute paths end
lst.append(head)
break
if tail == path: # relative paths end
lst.append(tail)
break
path = head
lst.append(tail)
lst.reverse()
return lst
def GetHeaderGuardCPPVariable(filename):
"""Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
"""
# Restores original filename in case that cpplint is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
# Replace 'c++' with 'cpp'.
filename = filename.replace('C++', 'cpp').replace('c++', 'cpp')
fileinfo = FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
def FixupPathFromRoot():
if _root_debug:
sys.stderr.write("\n_root fixup, _root = '%s', repository name = '%s'\n"
% (_root, fileinfo.RepositoryName()))
# Process the file path with the --root flag if it was set.
if not _root:
if _root_debug:
sys.stderr.write("_root unspecified\n")
return file_path_from_root
def StripListPrefix(lst, prefix):
# f(['x', 'y'], ['w, z']) -> None (not a valid prefix)
if lst[:len(prefix)] != prefix:
return None
# f(['a, 'b', 'c', 'd'], ['a', 'b']) -> ['c', 'd']
return lst[(len(prefix)):]
# root behavior:
# --root=subdir , lstrips subdir from the header guard
maybe_path = StripListPrefix(PathSplitToList(file_path_from_root),
PathSplitToList(_root))
if _root_debug:
sys.stderr.write(("_root lstrip (maybe_path=%s, file_path_from_root=%s," +
" _root=%s)\n") % (maybe_path, file_path_from_root, _root))
if maybe_path:
return os.path.join(*maybe_path)
# --root=.. , will prepend the outer directory to the header guard
full_path = fileinfo.FullName()
# adapt slashes for windows
root_abspath = os.path.abspath(_root).replace('\\', '/')
maybe_path = StripListPrefix(PathSplitToList(full_path),
PathSplitToList(root_abspath))
if _root_debug:
sys.stderr.write(("_root prepend (maybe_path=%s, full_path=%s, " +
"root_abspath=%s)\n") % (maybe_path, full_path, root_abspath))
if maybe_path:
return os.path.join(*maybe_path)
if _root_debug:
sys.stderr.write("_root ignore, returning %s\n" % (file_path_from_root))
# --root=FAKE_DIR is ignored
return file_path_from_root
file_path_from_root = FixupPathFromRoot()
return re.sub(r'[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_'
def CheckForHeaderGuard(filename, clean_lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
clean_lines: A CleansedLines instance containing the file.
error: The function to call with any errors found.
"""
# Don't check for header guards if there are error suppression
# comments somewhere in this file.
#
# Because this is silencing a warning for a nonexistent line, we
# only support the very specific NOLINT(build/header_guard) syntax,
# and not the general NOLINT or NOLINT(*) syntax.
raw_lines = clean_lines.lines_without_raw_strings
for i in raw_lines:
if Search(r'//\s*NOLINT\(build/header_guard\)', i):
return
# Allow pragma once instead of header guards
for i in raw_lines:
if Search(r'^\s*#pragma\s+once', i):
return
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = ''
ifndef_linenum = 0
define = ''
endif = ''
endif_linenum = 0
for linenum, line in enumerate(raw_lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '#define':
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('#endif'):
endif = line
endif_linenum = linenum
if not ifndef or not define or ifndef != define:
error(filename, 0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
error_level = 0
if ifndef != cppvar + '_':
error_level = 5
ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'#ifndef header guard has wrong style, please use: %s' % cppvar)
# Check for "//" comments on endif line.
ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum,
error)
match = Match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif)
if match:
if match.group(1) == '_':
# Issue low severity warning for deprecated double trailing underscore
error(filename, endif_linenum, 'build/header_guard', 0,
'#endif line should be "#endif // %s"' % cppvar)
return
# Didn't find the corresponding "//" comment. If this file does not
# contain any "//" comments at all, it could be that the compiler
# only wants "/**/" comments, look for those instead.
no_single_line_comments = True
for i in xrange(1, len(raw_lines) - 1):
line = raw_lines[i]
if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line):
no_single_line_comments = False
break
if no_single_line_comments:
match = Match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif)
if match:
if match.group(1) == '_':
# Low severity warning for double trailing underscore
error(filename, endif_linenum, 'build/header_guard', 0,
'#endif line should be "#endif /* %s */"' % cppvar)
return
# Didn't find anything
error(filename, endif_linenum, 'build/header_guard', 5,
'#endif line should be "#endif // %s"' % cppvar)
def CheckHeaderFileIncluded(filename, include_state, error):
"""Logs an error if a source file does not include its header."""
# Do not check test files
fileinfo = FileInfo(filename)
if Search(_TEST_FILE_SUFFIX, fileinfo.BaseName()):
return
for ext in GetHeaderExtensions():
basefilename = filename[0:len(filename) - len(fileinfo.Extension())]
headerfile = basefilename + '.' + ext
if not os.path.exists(headerfile):
continue
headername = FileInfo(headerfile).RepositoryName()
first_include = None
include_uses_unix_dir_aliases = False
for section_list in include_state.include_list:
for f in section_list:
include_text = f[0]
if "./" in include_text:
include_uses_unix_dir_aliases = True
if headername in include_text or include_text in headername:
return
if not first_include:
first_include = f[1]
message = '%s should include its header file %s' % (fileinfo.RepositoryName(), headername)
if include_uses_unix_dir_aliases:
message += ". Relative paths like . and .. are not allowed."
error(filename, first_include, 'build/include', 5, message)
def CheckForBadCharacters(filename, lines, error):
"""Logs an error for each line containing bad characters.
Two kinds of bad characters:
1. Unicode replacement characters: These indicate that either the file
contained invalid UTF-8 (likely) or Unicode replacement characters (which
it shouldn't). Note that it's possible for this to throw off line
numbering if the invalid UTF-8 occurred adjacent to a newline.
2. NUL bytes. These are problematic for some tools.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
if unicode_escape_decode('\ufffd') in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
if '\0' in line:
error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
def CheckForNewlineAtEOF(filename, lines, error):
"""Logs an error if there is no newline char at the end of the file.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# The array lines() was created by adding two newlines to the
# original file (go figure), then splitting on \n.
# To verify that the file ends in \n, we just have to make sure the
# last-but-two element of lines() exists and is empty.
if len(lines) < 3 or lines[-2]:
error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(filename, linenum, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(filename, linenum, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. '
'Use C++11 raw strings or concatenation instead.')
# (non-threadsafe name, thread-safe alternative, validation pattern)
#
# The validation pattern is used to eliminate false positives such as:
# _rand(); // false positive due to substring match.
# ->rand(); // some member function rand().
# ACMRandom rand(seed); // some variable named rand.
# ISAACRandom rand(); // another variable named rand.
#
# Basically we require the return value of these functions to be used
# in some expression context on the same line by matching on some
# operator before the function name. This eliminates constructors and
# member function calls.
_UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)'
_THREADING_LIST = (
('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'),
('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'),
('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'),
('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'),
('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'),
('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'),
('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'),
('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'),
('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'),
('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'),
('strtok(', 'strtok_r(',
_UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'),
('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'),
)
def CheckPosixThreading(filename, clean_lines, linenum, error):
"""Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
they have learned posix before threading extensions were added. These
tests guide the engineers to use thread-safe functions (when using
posix directly).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:
# Additional pattern matching check to confirm that this is the
# function we are looking for
if Search(pattern, line):
error(filename, linenum, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_func +
'...) instead of ' + single_thread_func +
'...) for improved thread safety.')
def CheckVlogArguments(filename, clean_lines, linenum, error):
"""Checks that VLOG() is only used for defining a logging level.
For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
VLOG(FATAL) are not.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
error(filename, linenum, 'runtime/vlog', 5,
'VLOG() should be used with numeric verbosity level. '
'Use LOG() if you want symbolic severity levels.')
# Matches invalid increment: *count++, which moves pointer instead of
# incrementing a value.
_RE_PATTERN_INVALID_INCREMENT = re.compile(
r'^\s*\*\w+(\+\+|--);')
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
def IsMacroDefinition(clean_lines, linenum):
if Search(r'^#define', clean_lines[linenum]):
return True
if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]):
return True
return False
def IsForwardClassDeclaration(clean_lines, linenum):
return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum])
class _BlockInfo(object):
"""Stores information about a generic block of code."""
def __init__(self, linenum, seen_open_brace):
self.starting_linenum = linenum
self.seen_open_brace = seen_open_brace
self.open_parentheses = 0
self.inline_asm = _NO_ASM
self.check_namespace_indentation = False
def CheckBegin(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text up to the opening brace.
This is mostly for checking the text after the class identifier
and the "{", usually where the base class is specified. For other
blocks, there isn't much to check, so we always pass.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text after the closing brace.
This is mostly used for checking end of namespace comments.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def IsBlockInfo(self):
"""Returns true if this block is a _BlockInfo.
This is convenient for verifying that an object is an instance of
a _BlockInfo, but not an instance of any of the derived classes.
Returns:
True for this class, False for derived classes.
"""
return self.__class__ == _BlockInfo
class _ExternCInfo(_BlockInfo):
"""Stores information about an 'extern "C"' block."""
def __init__(self, linenum):
_BlockInfo.__init__(self, linenum, True)
class _ClassInfo(_BlockInfo):
"""Stores information about a class."""
def __init__(self, name, class_or_struct, clean_lines, linenum):
_BlockInfo.__init__(self, linenum, False)
self.name = name
self.is_derived = False
self.check_namespace_indentation = True
if class_or_struct == 'struct':
self.access = 'public'
self.is_struct = True
else:
self.access = 'private'
self.is_struct = False
# Remember initial indentation level for this class. Using raw_lines here
# instead of elided to account for leading comments.
self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum])
# Try to find the end of the class. This will be confused by things like:
# class A {
# } *x = { ...
#
# But it's still good enough for CheckSectionSpacing.
self.last_line = 0
depth = 0
for i in range(linenum, clean_lines.NumLines()):
line = clean_lines.elided[i]
depth += line.count('{') - line.count('}')
if not depth:
self.last_line = i
break
def CheckBegin(self, filename, clean_lines, linenum, error):
# Look for a bare ':'
if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
self.is_derived = True
def CheckEnd(self, filename, clean_lines, linenum, error):
# If there is a DISALLOW macro, it should appear near the end of
# the class.
seen_last_thing_in_class = False
for i in xrange(linenum - 1, self.starting_linenum, -1):
match = Search(
r'\b(DISALLOW_COPY_AND_ASSIGN|DISALLOW_IMPLICIT_CONSTRUCTORS)\(' +
self.name + r'\)',
clean_lines.elided[i])
if match:
if seen_last_thing_in_class:
error(filename, i, 'readability/constructors', 3,
match.group(1) + ' should be the last thing in the class')
break
if not Match(r'^\s*$', clean_lines.elided[i]):
seen_last_thing_in_class = True
# Check that closing brace is aligned with beginning of the class.
# Only do this if the closing brace is indented by only whitespaces.
# This means we will not check single-line class definitions.
indent = Match(r'^( *)\}', clean_lines.elided[linenum])
if indent and len(indent.group(1)) != self.class_indent:
if self.is_struct:
parent = 'struct ' + self.name
else:
parent = 'class ' + self.name
error(filename, linenum, 'whitespace/indent', 3,
'Closing brace should be aligned with beginning of %s' % parent)
class _NamespaceInfo(_BlockInfo):
"""Stores information about a namespace."""
def __init__(self, name, linenum):
_BlockInfo.__init__(self, linenum, False)
self.name = name or ''
self.check_namespace_indentation = True
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Check end of namespace comments."""
line = clean_lines.raw_lines[linenum]
# Check how many lines is enclosed in this namespace. Don't issue
# warning for missing namespace comments if there aren't enough
# lines. However, do apply checks if there is already an end of
# namespace comment and it's incorrect.
#
# TODO(unknown): We always want to check end of namespace comments
# if a namespace is large, but sometimes we also want to apply the
# check if a short namespace contained nontrivial things (something
# other than forward declarations). There is currently no logic on
# deciding what these nontrivial things are, so this check is
# triggered by namespace size only, which works most of the time.
if (linenum - self.starting_linenum < 10
and not Match(r'^\s*};*\s*(//|/\*).*\bnamespace\b', line)):
return
# Look for matching comment at end of namespace.
#
# Note that we accept C style "/* */" comments for terminating
# namespaces, so that code that terminate namespaces inside
# preprocessor macros can be cpplint clean.
#
# We also accept stuff like "// end of namespace <name>." with the
# period at the end.
#
# Besides these, we don't accept anything else, otherwise we might
# get false negatives when existing comment is a substring of the
# expected namespace.
if self.name:
# Named namespace
if not Match((r'^\s*};*\s*(//|/\*).*\bnamespace\s+' +
re.escape(self.name) + r'[\*/\.\\\s]*$'),
line):
error(filename, linenum, 'readability/namespace', 5,
'Namespace should be terminated with "// namespace %s"' %
self.name)
else:
# Anonymous namespace
if not Match(r'^\s*};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
# If "// namespace anonymous" or "// anonymous namespace (more text)",
# mention "// anonymous namespace" as an acceptable form
if Match(r'^\s*}.*\b(namespace anonymous|anonymous namespace)\b', line):
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"'
' or "// anonymous namespace"')
else:
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"')
class _PreprocessorInfo(object):
"""Stores checkpoints of nesting stacks when #if/#else is seen."""
def __init__(self, stack_before_if):
# The entire nesting stack before #if
self.stack_before_if = stack_before_if
# The entire nesting stack up to #else
self.stack_before_else = []
# Whether we have already seen #else or #elif
self.seen_else = False
class NestingState(object):
"""Holds states related to parsing braces."""
def __init__(self):
# Stack for tracking all braces. An object is pushed whenever we
# see a "{", and popped when we see a "}". Only 3 types of
# objects are possible:
# - _ClassInfo: a class or struct.
# - _NamespaceInfo: a namespace.
# - _BlockInfo: some other type of block.
self.stack = []
# Top of the previous stack before each Update().
#
# Because the nesting_stack is updated at the end of each line, we
# had to do some convoluted checks to find out what is the current
# scope at the beginning of the line. This check is simplified by
# saving the previous top of nesting stack.
#
# We could save the full stack, but we only need the top. Copying
# the full nesting stack would slow down cpplint by ~10%.
self.previous_stack_top = []
# Stack of _PreprocessorInfo objects.
self.pp_stack = []
def SeenOpenBrace(self):
"""Check if we have seen the opening brace for the innermost block.
Returns:
True if we have seen the opening brace, False if the innermost
block is still expecting an opening brace.
"""
return (not self.stack) or self.stack[-1].seen_open_brace
def InNamespaceBody(self):
"""Check if we are currently one level inside a namespace body.
Returns:
True if top of the stack is a namespace block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
def InExternC(self):
"""Check if we are currently one level inside an 'extern "C"' block.
Returns:
True if top of the stack is an extern block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _ExternCInfo)
def InClassDeclaration(self):
"""Check if we are currently one level inside a class or struct declaration.
Returns:
True if top of the stack is a class/struct, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _ClassInfo)
def InAsmBlock(self):
"""Check if we are currently one level inside an inline ASM block.
Returns:
True if the top of the stack is a block containing inline ASM.
"""
return self.stack and self.stack[-1].inline_asm != _NO_ASM
def InTemplateArgumentList(self, clean_lines, linenum, pos):
"""Check if current position is inside template argument list.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: position just after the suspected template argument.
Returns:
True if (linenum, pos) is inside template arguments.
"""
while linenum < clean_lines.NumLines():
# Find the earliest character that might indicate a template argument
line = clean_lines.elided[linenum]
match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:])
if not match:
linenum += 1
pos = 0
continue
token = match.group(1)
pos += len(match.group(0))
# These things do not look like template argument list:
# class Suspect {
# class Suspect x; }
if token in ('{', '}', ';'): return False
# These things look like template argument list:
# template <class Suspect>
# template <class Suspect = default_value>
# template <class Suspect[]>
# template <class Suspect...>
if token in ('>', '=', '[', ']', '.'): return True
# Check if token is an unmatched '<'.
# If not, move on to the next character.
if token != '<':
pos += 1
if pos >= len(line):
linenum += 1
pos = 0
continue
# We can't be sure if we just find a single '<', and need to
# find the matching '>'.
(_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1)
if end_pos < 0:
# Not sure if template argument list or syntax error in file
return False
linenum = end_line
pos = end_pos
return False
def UpdatePreprocessor(self, line):
"""Update preprocessor stack.
We need to handle preprocessors due to classes like this:
#ifdef SWIG
struct ResultDetailsPageElementExtensionPoint {
#else
struct ResultDetailsPageElementExtensionPoint : public Extension {
#endif
We make the following assumptions (good enough for most files):
- Preprocessor condition evaluates to true from #if up to first
#else/#elif/#endif.
- Preprocessor condition evaluates to false from #else/#elif up
to #endif. We still perform lint checks on these lines, but
these do not affect nesting stack.
Args:
line: current line to check.
"""
if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
# Beginning of #if block, save the nesting stack here. The saved
# stack will allow us to restore the parsing state in the #else case.
self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
elif Match(r'^\s*#\s*(else|elif)\b', line):
# Beginning of #else block
if self.pp_stack:
if not self.pp_stack[-1].seen_else:
# This is the first #else or #elif block. Remember the
# whole nesting stack up to this point. This is what we
# keep after the #endif.
self.pp_stack[-1].seen_else = True
self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
# Restore the stack to how it was before the #if
self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
else:
# TODO(unknown): unexpected #else, issue warning?
pass
elif Match(r'^\s*#\s*endif\b', line):
# End of #if or #else blocks.
if self.pp_stack:
# If we saw an #else, we will need to restore the nesting
# stack to its former state before the #else, otherwise we
# will just continue from where we left off.
if self.pp_stack[-1].seen_else:
# Here we can just use a shallow copy since we are the last
# reference to it.
self.stack = self.pp_stack[-1].stack_before_else
# Drop the corresponding #if
self.pp_stack.pop()
else:
# TODO(unknown): unexpected #endif, issue warning?
pass
# TODO(unknown): Update() is too long, but we will refactor later.
def Update(self, filename, clean_lines, linenum, error):
"""Update nesting state with current line.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remember top of the previous nesting stack.
#
# The stack is always pushed/popped and not modified in place, so
# we can just do a shallow copy instead of copy.deepcopy. Using
# deepcopy would slow down cpplint by ~28%.
if self.stack:
self.previous_stack_top = self.stack[-1]
else:
self.previous_stack_top = None
# Update pp_stack
self.UpdatePreprocessor(line)
# Count parentheses. This is to avoid adding struct arguments to
# the nesting stack.
if self.stack:
inner_block = self.stack[-1]
depth_change = line.count('(') - line.count(')')
inner_block.open_parentheses += depth_change
# Also check if we are starting or ending an inline assembly block.
if inner_block.inline_asm in (_NO_ASM, _END_ASM):
if (depth_change != 0 and
inner_block.open_parentheses == 1 and
_MATCH_ASM.match(line)):
# Enter assembly block
inner_block.inline_asm = _INSIDE_ASM
else:
# Not entering assembly block. If previous line was _END_ASM,
# we will now shift to _NO_ASM state.
inner_block.inline_asm = _NO_ASM
elif (inner_block.inline_asm == _INSIDE_ASM and
inner_block.open_parentheses == 0):
# Exit assembly block
inner_block.inline_asm = _END_ASM
# Consume namespace declaration at the beginning of the line. Do
# this in a loop so that we catch same line declarations like this:
# namespace proto2 { namespace bridge { class MessageSet; } }
while True:
# Match start of namespace. The "\b\s*" below catches namespace
# declarations even if it weren't followed by a whitespace, this
# is so that we don't confuse our namespace checker. The
# missing spaces will be flagged by CheckSpacing.
namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
if not namespace_decl_match:
break
new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
self.stack.append(new_namespace)
line = namespace_decl_match.group(2)
if line.find('{') != -1:
new_namespace.seen_open_brace = True
line = line[line.find('{') + 1:]
# Look for a class declaration in whatever is left of the line
# after parsing namespaces. The regexp accounts for decorated classes
# such as in:
# class LOCKABLE API Object {
# };
class_decl_match = Match(
r'^(\s*(?:template\s*<[\w\s<>,:=]*>\s*)?'
r'(class|struct)\s+(?:[a-zA-Z0-9_]+\s+)*(\w+(?:::\w+)*))'
r'(.*)$', line)
if (class_decl_match and
(not self.stack or self.stack[-1].open_parentheses == 0)):
# We do not want to accept classes that are actually template arguments:
# template <class Ignore1,
# class Ignore2 = Default<Args>,
# template <Args> class Ignore3>
# void Function() {};
#
# To avoid template argument cases, we scan forward and look for
# an unmatched '>'. If we see one, assume we are inside a
# template argument list.
end_declaration = len(class_decl_match.group(1))
if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration):
self.stack.append(_ClassInfo(
class_decl_match.group(3), class_decl_match.group(2),
clean_lines, linenum))
line = class_decl_match.group(4)
# If we have not yet seen the opening brace for the innermost block,
# run checks here.
if not self.SeenOpenBrace():
self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
# Update access control if we are inside a class/struct
if self.stack and isinstance(self.stack[-1], _ClassInfo):
classinfo = self.stack[-1]
access_match = Match(
r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
r':(?:[^:]|$)',
line)
if access_match:
classinfo.access = access_match.group(2)
# Check that access keywords are indented +1 space. Skip this
# check if the keywords are not preceded by whitespaces.
indent = access_match.group(1)
if (len(indent) != classinfo.class_indent + 1 and
Match(r'^\s*$', indent)):
if classinfo.is_struct:
parent = 'struct ' + classinfo.name
else:
parent = 'class ' + classinfo.name
slots = ''
if access_match.group(3):
slots = access_match.group(3)
error(filename, linenum, 'whitespace/indent', 3,
'%s%s: should be indented +1 space inside %s' % (
access_match.group(2), slots, parent))
# Consume braces or semicolons from what's left of the line
while True:
# Match first brace, semicolon, or closed parenthesis.
matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
if not matched:
break
token = matched.group(1)
if token == '{':
# If namespace or class hasn't seen a opening brace yet, mark
# namespace/class head as complete. Push a new block onto the
# stack otherwise.
if not self.SeenOpenBrace():
self.stack[-1].seen_open_brace = True
elif Match(r'^extern\s*"[^"]*"\s*\{', line):
self.stack.append(_ExternCInfo(linenum))
else:
self.stack.append(_BlockInfo(linenum, True))
if _MATCH_ASM.match(line):
self.stack[-1].inline_asm = _BLOCK_ASM
elif token == ';' or token == ')':
# If we haven't seen an opening brace yet, but we already saw
# a semicolon, this is probably a forward declaration. Pop
# the stack for these.
#
# Similarly, if we haven't seen an opening brace yet, but we
# already saw a closing parenthesis, then these are probably
# function arguments with extra "class" or "struct" keywords.
# Also pop these stack for these.
if not self.SeenOpenBrace():
self.stack.pop()
else: # token == '}'
# Perform end of block checks and pop the stack.
if self.stack:
self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
self.stack.pop()
line = matched.group(2)
def InnermostClass(self):
"""Get class info on the top of the stack.
Returns:
A _ClassInfo object if we are inside a class, or None otherwise.
"""
for i in range(len(self.stack), 0, -1):
classinfo = self.stack[i - 1]
if isinstance(classinfo, _ClassInfo):
return classinfo
return None
def CheckCompletedBlocks(self, filename, error):
"""Checks that all classes and namespaces have been completely parsed.
Call this when all lines in a file have been processed.
Args:
filename: The name of the current file.
error: The function to call with any errors found.
"""
# Note: This test can result in false positives if #ifdef constructs
# get in the way of brace matching. See the testBuildClass test in
# cpplint_unittest.py for an example of this.
for obj in self.stack:
if isinstance(obj, _ClassInfo):
error(filename, obj.starting_linenum, 'build/class', 5,
'Failed to find complete declaration of class %s' %
obj.name)
elif isinstance(obj, _NamespaceInfo):
error(filename, obj.starting_linenum, 'build/namespaces', 5,
'Failed to find complete declaration of namespace %s' %
obj.name)
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
nesting_state, error):
r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage-class specifier (static, extern, typedef, etc) should be '
'at the beginning of the declaration.')
if Match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Everything else in this function operates on class declarations.
# Return early if the top of the nesting stack is not a class, or if
# the class head is not completed yet.
classinfo = nesting_state.InnermostClass()
if not classinfo or not classinfo.seen_open_brace:
return
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style.
explicit_constructor_match = Match(
r'\s+(?:(?:inline|constexpr)\s+)*(explicit\s+)?'
r'(?:(?:inline|constexpr)\s+)*%s\s*'
r'\(((?:[^()]|\([^()]*\))*)\)'
% re.escape(base_classname),
line)
if explicit_constructor_match:
is_marked_explicit = explicit_constructor_match.group(1)
if not explicit_constructor_match.group(2):
constructor_args = []
else:
constructor_args = explicit_constructor_match.group(2).split(',')
# collapse arguments so that commas in template parameter lists and function
# argument parameter lists don't split arguments in two
i = 0
while i < len(constructor_args):
constructor_arg = constructor_args[i]
while (constructor_arg.count('<') > constructor_arg.count('>') or
constructor_arg.count('(') > constructor_arg.count(')')):
constructor_arg += ',' + constructor_args[i + 1]
del constructor_args[i + 1]
constructor_args[i] = constructor_arg
i += 1
variadic_args = [arg for arg in constructor_args if '&&...' in arg]
defaulted_args = [arg for arg in constructor_args if '=' in arg]
noarg_constructor = (not constructor_args or # empty arg list
# 'void' arg specifier
(len(constructor_args) == 1 and
constructor_args[0].strip() == 'void'))
onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg
not noarg_constructor) or
# all but at most one arg defaulted
(len(constructor_args) >= 1 and
not noarg_constructor and
len(defaulted_args) >= len(constructor_args) - 1) or
# variadic arguments with zero or one argument
(len(constructor_args) <= 2 and
len(variadic_args) >= 1))
initializer_list_constructor = bool(
onearg_constructor and
Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
copy_constructor = bool(
onearg_constructor and
Match(r'((const\s+(volatile\s+)?)?|(volatile\s+(const\s+)?))?'
r'%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
% re.escape(base_classname), constructor_args[0].strip()))
if (not is_marked_explicit and
onearg_constructor and
not initializer_list_constructor and
not copy_constructor):
if defaulted_args or variadic_args:
error(filename, linenum, 'runtime/explicit', 5,
'Constructors callable with one argument '
'should be marked explicit.')
else:
error(filename, linenum, 'runtime/explicit', 5,
'Single-parameter constructors should be marked explicit.')
elif is_marked_explicit and not onearg_constructor:
if noarg_constructor:
error(filename, linenum, 'runtime/explicit', 5,
'Zero-parameter constructors should not be marked explicit.')
def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
"""Checks for the correctness of various spacing around function calls.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Since function calls often occur inside if/for/while/switch
# expressions - which have their own, more liberal conventions - we
# first see if we should be looking inside such an expression for a
# function call, to which we can apply more strict standards.
fncall = line # if there's no control flow construct, look at whole line
for pattern in (r'\bif\s*\((.*)\)\s*{',
r'\bfor\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
match = Search(pattern, line)
if match:
fncall = match.group(1) # look inside the parens for function calls
break
# Except in if/for/while/switch, there should never be space
# immediately inside parens (eg "f( 3, 4 )"). We make an exception
# for nested parens ( (a+b) + c ). Likewise, there should never be
# a space before a ( when it's a function argument. I assume it's a
# function argument when the char before the whitespace is legal in
# a function name (alnum + _) and we're not starting a macro. Also ignore
# pointers and references to arrays and functions coz they're too tricky:
# we use a very simple way to recognize these:
# " (something)(maybe-something)" or
# " (something)(maybe-something," or
# " (something)[something]"
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
not Search(r'\b(if|elif|for|while|switch|return|new|delete|catch|sizeof)\b',
fncall) and
# Ignore pointers/references to functions.
not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
# Ignore pointers/references to arrays.
not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
error(filename, linenum, 'whitespace/parens', 4,
'Extra space after ( in function call')
elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space after (')
if (Search(r'\w\s+\(', fncall) and
not Search(r'_{0,2}asm_{0,2}\s+_{0,2}volatile_{0,2}\s+\(', fncall) and
not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and
not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall) and
not Search(r'\bcase\s+\(', fncall)):
# TODO(unknown): Space after an operator function seem to be a common
# error, silence those for now by restricting them to highest verbosity.
if Search(r'\boperator_*\b', line):
error(filename, linenum, 'whitespace/parens', 0,
'Extra space before ( in function call')
else:
error(filename, linenum, 'whitespace/parens', 4,
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
# part of a control statement (if/while/etc), and don't complain
if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
# If the closing parenthesis is preceded by only whitespaces,
# try to give a more descriptive error message.
if Search(r'^\s+\)', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Closing ) should be moved to the previous line')
else:
error(filename, linenum, 'whitespace/parens', 2,
'Extra space before )')
def IsBlankLine(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace()
def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error):
is_namespace_indent_item = (
len(nesting_state.stack) > 1 and
nesting_state.stack[-1].check_namespace_indentation and
isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and
nesting_state.previous_stack_top == nesting_state.stack[-2])
if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
clean_lines.elided, line):
CheckItemIndentationInNamespace(filename, clean_lines.elided,
line, error)
def CheckForFunctionLengths(filename, clean_lines, linenum,
function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and comments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[linenum]
joined_line = ''
starting_func = False
regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
match_result = Match(regexp, line)
if match_result:
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name == 'TEST' or function_name == 'TEST_F' or (
not Match(r'[A-Z_]+$', function_name)):
starting_func = True
if starting_func:
body_found = False
for start_linenum in xrange(linenum, clean_lines.NumLines()):
start_line = lines[start_linenum]
joined_line += ' ' + start_line.lstrip()
if Search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
if Search(r'{', start_line):
body_found = True
function = Search(r'((\w|:)*)\(', line).group(1)
if Match(r'TEST', function): # Handle TEST... macros
parameter_regexp = Search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
function_state.Begin(function)
break
if not body_found:
# No body for the function (or evidence of a non-function) was found.
error(filename, linenum, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
elif Match(r'^\}\s*$', line): # function end
function_state.Check(error, filename, linenum)
function_state.End()
elif not Match(r'^\s*$', line):
function_state.Count() # Count non-blank/non-comment lines.
_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
def CheckComment(line, filename, linenum, next_line_start, error):
"""Checks for common mistakes in comments.
Args:
line: The line in question.
filename: The name of the current file.
linenum: The number of the line to check.
next_line_start: The first non-whitespace column of the next line.
error: The function to call with any errors found.
"""
commentpos = line.find('//')
if commentpos != -1:
# Check if the // may be in quotes. If so, ignore it
if re.sub(r'\\.', '', line[0:commentpos]).count('"') % 2 == 0:
# Allow one space for new scopes, two spaces otherwise:
if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and
((commentpos >= 1 and
line[commentpos-1] not in string.whitespace) or
(commentpos >= 2 and
line[commentpos-2] not in string.whitespace))):
error(filename, linenum, 'whitespace/comments', 2,
'At least two spaces is best between code and comments')
# Checks for common mistakes in TODO comments.
comment = line[commentpos:]
match = _RE_PATTERN_TODO.match(comment)
if match:
# One whitespace is correct; zero whitespace is handled elsewhere.
leading_whitespace = match.group(1)
if len(leading_whitespace) > 1:
error(filename, linenum, 'whitespace/todo', 2,
'Too many spaces before TODO')
username = match.group(2)
if not username:
error(filename, linenum, 'readability/todo', 2,
'Missing username in TODO; it should look like '
'"// TODO(my_username): Stuff."')
middle_whitespace = match.group(3)
# Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
if middle_whitespace != ' ' and middle_whitespace != '':
error(filename, linenum, 'whitespace/todo', 2,
'TODO(my_username) should be followed by a space')
# If the comment contains an alphanumeric character, there
# should be a space somewhere between it and the // unless
# it's a /// or //! Doxygen comment.
if (Match(r'//[^ ]*\w', comment) and
not Match(r'(///|//\!)(\s+|$)', comment)):
error(filename, linenum, 'whitespace/comments', 4,
'Should have a space between // and comment')
def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't add a blank line
after public/protected/private, don't have too many blank lines in a row.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw = clean_lines.lines_without_raw_strings
line = raw[linenum]
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}'
#
# Skip all the blank line checks if we are immediately inside a
# namespace body. In other words, don't issue blank line warnings
# for this block:
# namespace {
#
# }
#
# A warning about missing end of namespace comments will be issued instead.
#
# Also skip blank line checks for 'extern "C"' blocks, which are formatted
# like namespaces.
if (IsBlankLine(line) and
not nesting_state.InNamespaceBody() and
not nesting_state.InExternC()):
elided = clean_lines.elided
prev_line = elided[linenum - 1]
prevbrace = prev_line.rfind('{')
# TODO(unknown): Don't complain if line before blank line, and line after,
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the parameters of a function header that are indented
# 4 spaces (because they did not fit in a 80 column line when placed on
# the same line as the function name). We also check for the case where
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
if Match(r' {6}\w', prev_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = linenum-2
while (search_position >= 0
and Match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
else:
# Search for the function arguments or an initializer list. We use a
# simple heuristic here: If the line is indented 4 spaces; and we have a
# closing paren, without the opening paren, followed by an opening brace
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
prev_line)
or Match(r' {4}:', prev_line))
if not exception:
error(filename, linenum, 'whitespace/blank_line', 2,
'Redundant blank line at the start of a code block '
'should be deleted.')
# Ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
#
# } else if (condition2) {
# // Something else
# }
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
if (next_line
and Match(r'\s*}', next_line)
and next_line.find('} else ') == -1):
error(filename, linenum, 'whitespace/blank_line', 3,
'Redundant blank line at the end of a code block '
'should be deleted.')
matched = Match(r'\s*(public|protected|private):', prev_line)
if matched:
error(filename, linenum, 'whitespace/blank_line', 3,
'Do not leave a blank line after "%s:"' % matched.group(1))
# Next, check comments
next_line_start = 0
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
next_line_start = len(next_line) - len(next_line.lstrip())
CheckComment(line, filename, linenum, next_line_start, error)
# get rid of comments and strings
line = clean_lines.elided[linenum]
# You shouldn't have spaces before your brackets, except for C++11 attributes
# or maybe after 'delete []', 'return []() {};', or 'auto [abc, ...] = ...;'.
if (Search(r'\w\s+\[(?!\[)', line) and
not Search(r'(?:auto&?|delete|return)\s+\[', line)):
error(filename, linenum, 'whitespace/braces', 5,
'Extra space before [')
# In range-based for, we wanted spaces before and after the colon, but
# not around "::" tokens that might appear.
if (Search(r'for *\(.*[^:]:[^: ]', line) or
Search(r'for *\(.*[^: ]:[^:]', line)):
error(filename, linenum, 'whitespace/forcolon', 2,
'Missing space around colon in range-based for loop')
def CheckOperatorSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around operators.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Don't try to do spacing checks for operator methods. Do this by
# replacing the troublesome characters with something else,
# preserving column position for all other characters.
#
# The replacement is done repeatedly to avoid false positives from
# operators that call operators.
while True:
match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line)
if match:
line = match.group(1) + ('_' * len(match.group(2))) + match.group(3)
else:
break
# We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
# Otherwise not. Note we only check for non-spaces on *both* sides;
# sometimes people put non-spaces on one side when aligning ='s among
# many lines (not that this is behavior that I approve of...)
if ((Search(r'[\w.]=', line) or
Search(r'=[\w.]', line))
and not Search(r'\b(if|while|for) ', line)
# Operators taken from [lex.operators] in C++11 standard.
and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line)
and not Search(r'operator=', line)):
error(filename, linenum, 'whitespace/operators', 4,
'Missing spaces around =')
# It's ok not to have spaces around binary operators like + - * /, but if
# there's too little whitespace, we get concerned. It's hard to tell,
# though, so we punt on this one for now. TODO.
# You should always have whitespace around binary operators.
#
# Check <= and >= first to avoid false positives with < and >, then
# check non-include lines for spacing around < and >.
#
# If the operator is followed by a comma, assume it's be used in a
# macro context and don't do any checks. This avoids false
# positives.
#
# Note that && is not included here. This is because there are too
# many false positives due to RValue references.
match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
elif not Match(r'#.*include', line):
# Look for < that is not surrounded by spaces. This is only
# triggered if both sides are missing spaces, even though
# technically should should flag if at least one side is missing a
# space. This is done to avoid some false positives with shifts.
match = Match(r'^(.*[^\s<])<[^\s=<,]', line)
if match:
(_, _, end_pos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if end_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <')
# Look for > that is not surrounded by spaces. Similar to the
# above, we only trigger if both sides are missing spaces to avoid
# false positives with shifts.
match = Match(r'^(.*[^-\s>])>[^\s=>,]', line)
if match:
(_, _, start_pos) = ReverseCloseExpression(
clean_lines, linenum, len(match.group(1)))
if start_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >')
# We allow no-spaces around << when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
#
# We also allow operators following an opening parenthesis, since
# those tend to be macros that deal with operators.
match = Search(r'(operator|[^\s(<])(?:L|UL|LL|ULL|l|ul|ll|ull)?<<([^\s,=<])', line)
if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and
not (match.group(1) == 'operator' and match.group(2) == ';')):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <<')
# We allow no-spaces around >> for almost anything. This is because
# C++11 allows ">>" to close nested templates, which accounts for
# most cases when ">>" is not followed by a space.
#
# We still warn on ">>" followed by alpha character, because that is
# likely due to ">>" being used for right shifts, e.g.:
# value >> alpha
#
# When ">>" is used to close templates, the alphanumeric letter that
# follows would be part of an identifier, and there should still be
# a space separating the template type and the identifier.
# type<type<type>> alpha
match = Search(r'>>[a-zA-Z_]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >>')
# There shouldn't be space around unary operators
match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4,
'Extra space for operator %s' % match.group(1))
def CheckParenthesisSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around parentheses.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# No spaces after an if, while, switch, or for
match = Search(r' (if\(|for\(|while\(|switch\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5,
'Missing space before ( in %s' % match.group(1))
# For if/for/while/switch, the left and right parens should be
# consistent about how many spaces are inside the parens, and
# there should either be zero or one spaces inside the parens.
# We don't want: "if ( foo)" or "if ( foo )".
# Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
match = Search(r'\b(if|for|while|switch)\s*'
r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
line)
if match:
if len(match.group(2)) != len(match.group(4)):
if not (match.group(3) == ';' and
len(match.group(2)) == 1 + len(match.group(4)) or
not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
error(filename, linenum, 'whitespace/parens', 5,
'Mismatching spaces inside () in %s' % match.group(1))
if len(match.group(2)) not in [0, 1]:
error(filename, linenum, 'whitespace/parens', 5,
'Should have zero or one spaces inside ( and ) in %s' %
match.group(1))
def CheckCommaSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing near commas and semicolons.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
raw = clean_lines.lines_without_raw_strings
line = clean_lines.elided[linenum]
# You should always have a space after a comma (either as fn arg or operator)
#
# This does not apply when the non-space character following the
# comma is another comma, since the only time when that happens is
# for empty macro arguments.
#
# We run this check in two passes: first pass on elided lines to
# verify that lines contain missing whitespaces, second pass on raw
# lines to confirm that those missing whitespaces are not due to
# elided comments.
if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and
Search(r',[^,\s]', raw[linenum])):
error(filename, linenum, 'whitespace/comma', 3,
'Missing space after ,')
# You should always have a space after a semicolon
# except for few corner cases
# TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
# space after ;
if Search(r';[^\s};\\)/]', line):
error(filename, linenum, 'whitespace/semicolon', 3,
'Missing space after ;')
def _IsType(clean_lines, nesting_state, expr):
"""Check if expression looks like a type name, returns true if so.
Args:
clean_lines: A CleansedLines instance containing the file.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
expr: The expression to check.
Returns:
True, if token looks like a type.
"""
# Keep only the last token in the expression
last_word = Match(r'^.*(\b\S+)$', expr)
if last_word:
token = last_word.group(1)
else:
token = expr
# Match native types and stdint types
if _TYPES.match(token):
return True
# Try a bit harder to match templated types. Walk up the nesting
# stack until we find something that resembles a typename
# declaration for what we are looking for.
typename_pattern = (r'\b(?:typename|class|struct)\s+' + re.escape(token) +
r'\b')
block_index = len(nesting_state.stack) - 1
while block_index >= 0:
if isinstance(nesting_state.stack[block_index], _NamespaceInfo):
return False
# Found where the opening brace is. We want to scan from this
# line up to the beginning of the function, minus a few lines.
# template <typename Type1, // stop scanning here
# ...>
# class C
# : public ... { // start scanning here
last_line = nesting_state.stack[block_index].starting_linenum
next_block_start = 0
if block_index > 0:
next_block_start = nesting_state.stack[block_index - 1].starting_linenum
first_line = last_line
while first_line >= next_block_start:
if clean_lines.elided[first_line].find('template') >= 0:
break
first_line -= 1
if first_line < next_block_start:
# Didn't find any "template" keyword before reaching the next block,
# there are probably no template things to check for this block
block_index -= 1
continue
# Look for typename in the specified range
for i in xrange(first_line, last_line + 1, 1):
if Search(typename_pattern, clean_lines.elided[i]):
return True
block_index -= 1
return False
def CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for horizontal spacing near commas.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Except after an opening paren, or after another opening brace (in case of
# an initializer list, for instance), you should have spaces before your
# braces when they are delimiting blocks, classes, namespaces etc.
# And since you should never have braces at the beginning of a line,
# this is an easy test. Except that braces used for initialization don't
# follow the same rule; we often don't want spaces before those.
match = Match(r'^(.*[^ ({>]){', line)
if match:
# Try a bit harder to check for brace initialization. This
# happens in one of the following forms:
# Constructor() : initializer_list_{} { ... }
# Constructor{}.MemberFunction()
# Type variable{};
# FunctionCall(type{}, ...);
# LastArgument(..., type{});
# LOG(INFO) << type{} << " ...";
# map_of_type[{...}] = ...;
# ternary = expr ? new type{} : nullptr;
# OuterTemplate<InnerTemplateConstructor<Type>{}>
#
# We check for the character following the closing brace, and
# silence the warning if it's one of those listed above, i.e.
# "{.;,)<>]:".
#
# To account for nested initializer list, we allow any number of
# closing braces up to "{;,)<". We can't simply silence the
# warning on first sight of closing brace, because that would
# cause false negatives for things that are not initializer lists.
# Silence this: But not this:
# Outer{ if (...) {
# Inner{...} if (...){ // Missing space before {
# }; }
#
# There is a false negative with this approach if people inserted
# spurious semicolons, e.g. "if (cond){};", but we will catch the
# spurious semicolon with a separate check.
leading_text = match.group(1)
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
trailing_text = ''
if endpos > -1:
trailing_text = endline[endpos:]
for offset in xrange(endlinenum + 1,
min(endlinenum + 3, clean_lines.NumLines() - 1)):
trailing_text += clean_lines.elided[offset]
# We also suppress warnings for `uint64_t{expression}` etc., as the style
# guide recommends brace initialization for integral types to avoid
# overflow/truncation.
if (not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text)
and not _IsType(clean_lines, nesting_state, leading_text)):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
if Search(r'}else', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use {} instead.')
elif Search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use {} instead.')
elif (Search(r'\s+;\s*$', line) and
not Search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use {} instead.')
def IsDecltype(clean_lines, linenum, column):
"""Check if the token ending on (linenum, column) is decltype().
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is decltype() expression, False otherwise.
"""
(text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column)
if start_col < 0:
return False
if Search(r'\bdecltype\s*$', text[0:start_col]):
return True
return False
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
"""Checks for additional blank line issues related to sections.
Currently the only thing checked here is blank line before protected/private.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
class_info: A _ClassInfo objects.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Skip checks if the class is small, where small means 25 lines or less.
# 25 lines seems like a good cutoff since that's the usual height of
# terminals, and any class that can't fit in one screen can't really
# be considered "small".
#
# Also skip checks if we are on the first line. This accounts for
# classes that look like
# class Foo { public: ... };
#
# If we didn't find the end of the class, last_line would be zero,
# and the check will be skipped by the first condition.
if (class_info.last_line - class_info.starting_linenum <= 24 or
linenum <= class_info.starting_linenum):
return
matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
if matched:
# Issue warning if the line before public/protected/private was
# not a blank line, but don't do this if the previous line contains
# "class" or "struct". This can happen two ways:
# - We are at the beginning of the class.
# - We are forward-declaring an inner class that is semantically
# private, but needed to be public for implementation reasons.
# Also ignores cases where the previous line ends with a backslash as can be
# common when defining classes in C macros.
prev_line = clean_lines.lines[linenum - 1]
if (not IsBlankLine(prev_line) and
not Search(r'\b(class|struct)\b', prev_line) and
not Search(r'\\$', prev_line)):
# Try a bit harder to find the beginning of the class. This is to
# account for multi-line base-specifier lists, e.g.:
# class Derived
# : public Base {
end_class_head = class_info.starting_linenum
for i in range(class_info.starting_linenum, linenum):
if Search(r'\{\s*$', clean_lines.lines[i]):
end_class_head = i
break
if end_class_head < linenum - 1:
error(filename, linenum, 'whitespace/blank_line', 3,
'"%s:" should be preceded by a blank line' % matched.group(1))
def GetPreviousNonBlankLine(clean_lines, linenum):
"""Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
"""
prevlinenum = linenum - 1
while prevlinenum >= 0:
prevline = clean_lines.elided[prevlinenum]
if not IsBlankLine(prevline): # if not a blank line...
return (prevline, prevlinenum)
prevlinenum -= 1
return ('', -1)
def CheckBraces(filename, clean_lines, linenum, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
if Match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone is using
# braces in a block to explicitly create a new scope, which is commonly used
# to control the lifetime of stack-allocated variables. Braces are also
# used for brace initializers inside function calls. We don't detect this
# perfectly: we just don't complain if the last non-whitespace character on
# the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
# previous line starts a preprocessor block. We also allow a brace on the
# following line if it is part of an array initialization and would not fit
# within the 80 character limit of the preceding line.
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if (not Search(r'[,;:}{(]\s*$', prevline) and
not Match(r'\s*#', prevline) and
not (GetLineWidth(prevline) > _line_length - 2 and '[]' in prevline)):
error(filename, linenum, 'whitespace/braces', 4,
'{ should almost always be at the end of the previous line')
# An else clause should be on the same line as the preceding closing brace.
if Match(r'\s*else\b\s*(?:if\b|\{|$)', line):
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if Match(r'\s*}\s*$', prevline):
error(filename, linenum, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
if Search(r'else if\s*\(', line): # could be multi-line if
brace_on_left = bool(Search(r'}\s*else if\s*\(', line))
# find the ( after the if
pos = line.find('else if')
pos = line.find('(', pos)
if pos > 0:
(endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
brace_on_right = endline[endpos:].find('{') != -1
if brace_on_left != brace_on_right: # must be brace after if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
# Likewise, an else should never have the else clause on the same line
if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
error(filename, linenum, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if Match(r'\s*do [^\s{]', line):
error(filename, linenum, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Check single-line if/else bodies. The style guide says 'curly braces are not
# required for single-line statements'. We additionally allow multi-line,
# single statements, but we reject anything with more than one semicolon in
# it. This means that the first semicolon after the if should be at the end of
# its line, and the line after that should have an indent level equal to or
# lower than the if. We also check for ambiguous if/else nesting without
# braces.
if_else_match = Search(r'\b(if\s*(|constexpr)\s*\(|else\b)', line)
if if_else_match and not Match(r'\s*#', line):
if_indent = GetIndentLevel(line)
endline, endlinenum, endpos = line, linenum, if_else_match.end()
if_match = Search(r'\bif\s*(|constexpr)\s*\(', line)
if if_match:
# This could be a multiline if condition, so find the end first.
pos = if_match.end() - 1
(endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos)
# Check for an opening brace, either directly after the if or on the next
# line. If found, this isn't a single-statement conditional.
if (not Match(r'\s*{', endline[endpos:])
and not (Match(r'\s*$', endline[endpos:])
and endlinenum < (len(clean_lines.elided) - 1)
and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))):
while (endlinenum < len(clean_lines.elided)
and ';' not in clean_lines.elided[endlinenum][endpos:]):
endlinenum += 1
endpos = 0
if endlinenum < len(clean_lines.elided):
endline = clean_lines.elided[endlinenum]
# We allow a mix of whitespace and closing braces (e.g. for one-liner
# methods) and a single \ after the semicolon (for macros)
endpos = endline.find(';')
if not Match(r';[\s}]*(\\?)$', endline[endpos:]):
# Semicolon isn't the last character, there's something trailing.
# Output a warning if the semicolon is not contained inside
# a lambda expression.
if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$',
endline):
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
elif endlinenum < len(clean_lines.elided) - 1:
# Make sure the next line is dedented
next_line = clean_lines.elided[endlinenum + 1]
next_indent = GetIndentLevel(next_line)
# With ambiguous nested if statements, this will error out on the
# if that *doesn't* match the else, regardless of whether it's the
# inner one or outer one.
if (if_match and Match(r'\s*else\b', next_line)
and next_indent != if_indent):
error(filename, linenum, 'readability/braces', 4,
'Else clause should be indented at the same level as if. '
'Ambiguous nested if/else chains require braces.')
elif next_indent > if_indent:
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
"""Looks for redundant trailing semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Block bodies should not be followed by a semicolon. Due to C++11
# brace initialization, there are more places where semicolons are
# required than not, so we explicitly list the allowed rules rather
# than listing the disallowed ones. These are the places where "};"
# should be replaced by just "}":
# 1. Some flavor of block following closing parenthesis:
# for (;;) {};
# while (...) {};
# switch (...) {};
# Function(...) {};
# if (...) {};
# if (...) else if (...) {};
#
# 2. else block:
# if (...) else {};
#
# 3. const member function:
# Function(...) const {};
#
# 4. Block following some statement:
# x = 42;
# {};
#
# 5. Block at the beginning of a function:
# Function(...) {
# {};
# }
#
# Note that naively checking for the preceding "{" will also match
# braces inside multi-dimensional arrays, but this is fine since
# that expression will not contain semicolons.
#
# 6. Block following another block:
# while (true) {}
# {};
#
# 7. End of namespaces:
# namespace {};
#
# These semicolons seems far more common than other kinds of
# redundant semicolons, possibly due to people converting classes
# to namespaces. For now we do not warn for this case.
#
# Try matching case 1 first.
match = Match(r'^(.*\)\s*)\{', line)
if match:
# Matched closing parenthesis (case 1). Check the token before the
# matching opening parenthesis, and don't warn if it looks like a
# macro. This avoids these false positives:
# - macro that defines a base class
# - multi-line macro that defines a base class
# - macro that defines the whole class-head
#
# But we still issue warnings for macros that we know are safe to
# warn, specifically:
# - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
# - TYPED_TEST
# - INTERFACE_DEF
# - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
#
# We implement a list of safe macros instead of a list of
# unsafe macros, even though the latter appears less frequently in
# google code and would have been easier to implement. This is because
# the downside for getting the allowed checks wrong means some extra
# semicolons, while the downside for getting disallowed checks wrong
# would result in compile errors.
#
# In addition to macros, we also don't want to warn on
# - Compound literals
# - Lambdas
# - alignas specifier with anonymous structs
# - decltype
closing_brace_pos = match.group(1).rfind(')')
opening_parenthesis = ReverseCloseExpression(
clean_lines, linenum, closing_brace_pos)
if opening_parenthesis[2] > -1:
line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
macro = Search(r'\b([A-Z_][A-Z0-9_]*)\s*$', line_prefix)
func = Match(r'^(.*\])\s*$', line_prefix)
if ((macro and
macro.group(1) not in (
'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
(func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or
Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or
Search(r'\bdecltype$', line_prefix) or
Search(r'\s+=\s*$', line_prefix)):
match = None
if (match and
opening_parenthesis[1] > 1 and
Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):
# Multi-line lambda-expression
match = None
else:
# Try matching cases 2-3.
match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
if not match:
# Try matching cases 4-6. These are always matched on separate lines.
#
# Note that we can't simply concatenate the previous line to the
# current line and do a single match, otherwise we may output
# duplicate warnings for the blank line case:
# if (cond) {
# // blank line
# }
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if prevline and Search(r'[;{}]\s*$', prevline):
match = Match(r'^(\s*)\{', line)
# Check matching closing brace
if match:
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
# Current {} pair is eligible for semicolon check, and we have found
# the redundant semicolon, output warning here.
#
# Note: because we are scanning forward for opening braces, and
# outputting warnings for the matching closing brace, if there are
# nested blocks with trailing semicolons, we will get the error
# messages in reversed order.
# We need to check the line forward for NOLINT
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[endlinenum-1], endlinenum-1,
error)
ParseNolintSuppressions(filename, raw_lines[endlinenum], endlinenum,
error)
error(filename, endlinenum, 'readability/braces', 4,
"You don't need a ; after a }")
def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
"""Look for empty loop/conditional body with only a single semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Search for loop keywords at the beginning of the line. Because only
# whitespaces are allowed before the keywords, this will also ignore most
# do-while-loops, since those lines should start with closing brace.
#
# We also check "if" blocks here, since an empty conditional block
# is likely an error.
line = clean_lines.elided[linenum]
matched = Match(r'\s*(for|while|if)\s*\(', line)
if matched:
# Find the end of the conditional expression.
(end_line, end_linenum, end_pos) = CloseExpression(
clean_lines, linenum, line.find('('))
# Output warning if what follows the condition expression is a semicolon.
# No warning for all other cases, including whitespace or newline, since we
# have a separate check for semicolons preceded by whitespace.
if end_pos >= 0 and Match(r';', end_line[end_pos:]):
if matched.group(1) == 'if':
error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
'Empty conditional bodies should use {}')
else:
error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
'Empty loop bodies should use {} or continue')
# Check for if statements that have completely empty bodies (no comments)
# and no else clauses.
if end_pos >= 0 and matched.group(1) == 'if':
# Find the position of the opening { for the if statement.
# Return without logging an error if it has no brackets.
opening_linenum = end_linenum
opening_line_fragment = end_line[end_pos:]
# Loop until EOF or find anything that's not whitespace or opening {.
while not Search(r'^\s*\{', opening_line_fragment):
if Search(r'^(?!\s*$)', opening_line_fragment):
# Conditional has no brackets.
return
opening_linenum += 1
if opening_linenum == len(clean_lines.elided):
# Couldn't find conditional's opening { or any code before EOF.
return
opening_line_fragment = clean_lines.elided[opening_linenum]
# Set opening_line (opening_line_fragment may not be entire opening line).
opening_line = clean_lines.elided[opening_linenum]
# Find the position of the closing }.
opening_pos = opening_line_fragment.find('{')
if opening_linenum == end_linenum:
# We need to make opening_pos relative to the start of the entire line.
opening_pos += end_pos
(closing_line, closing_linenum, closing_pos) = CloseExpression(
clean_lines, opening_linenum, opening_pos)
if closing_pos < 0:
return
# Now construct the body of the conditional. This consists of the portion
# of the opening line after the {, all lines until the closing line,
# and the portion of the closing line before the }.
if (clean_lines.raw_lines[opening_linenum] !=
CleanseComments(clean_lines.raw_lines[opening_linenum])):
# Opening line ends with a comment, so conditional isn't empty.
return
if closing_linenum > opening_linenum:
# Opening line after the {. Ignore comments here since we checked above.
bodylist = list(opening_line[opening_pos+1:])
# All lines until closing line, excluding closing line, with comments.
bodylist.extend(clean_lines.raw_lines[opening_linenum+1:closing_linenum])
# Closing line before the }. Won't (and can't) have comments.
bodylist.append(clean_lines.elided[closing_linenum][:closing_pos-1])
body = '\n'.join(bodylist)
else:
# If statement has brackets and fits on a single line.
body = opening_line[opening_pos+1:closing_pos-1]
# Check if the body is empty
if not _EMPTY_CONDITIONAL_BODY_PATTERN.search(body):
return
# The body is empty. Now make sure there's not an else clause.
current_linenum = closing_linenum
current_line_fragment = closing_line[closing_pos:]
# Loop until EOF or find anything that's not whitespace or else clause.
while Search(r'^\s*$|^(?=\s*else)', current_line_fragment):
if Search(r'^(?=\s*else)', current_line_fragment):
# Found an else clause, so don't log an error.
return
current_linenum += 1
if current_linenum == len(clean_lines.elided):
break
current_line_fragment = clean_lines.elided[current_linenum]
# The body is empty and there's no else clause until EOF or other code.
error(filename, end_linenum, 'whitespace/empty_if_body', 4,
('If statement had no body and no else clause'))
def FindCheckMacro(line):
"""Find a replaceable CHECK-like macro.
Args:
line: line to search on.
Returns:
(macro name, start position), or (None, -1) if no replaceable
macro is found.
"""
for macro in _CHECK_MACROS:
i = line.find(macro)
if i >= 0:
# Find opening parenthesis. Do a regular expression match here
# to make sure that we are matching the expected CHECK macro, as
# opposed to some other macro that happens to contain the CHECK
# substring.
matched = Match(r'^(.*\b' + macro + r'\s*)\(', line)
if not matched:
continue
return (macro, len(matched.group(1)))
return (None, -1)
def CheckCheck(filename, clean_lines, linenum, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
lines = clean_lines.elided
(check_macro, start_pos) = FindCheckMacro(lines[linenum])
if not check_macro:
return
# Find end of the boolean expression by matching parentheses
(last_line, end_line, end_pos) = CloseExpression(
clean_lines, linenum, start_pos)
if end_pos < 0:
return
# If the check macro is followed by something other than a
# semicolon, assume users will log their own custom error messages
# and don't suggest any replacements.
if not Match(r'\s*;', last_line[end_pos:]):
return
if linenum == end_line:
expression = lines[linenum][start_pos + 1:end_pos - 1]
else:
expression = lines[linenum][start_pos + 1:]
for i in xrange(linenum + 1, end_line):
expression += lines[i]
expression += last_line[0:end_pos - 1]
# Parse expression so that we can take parentheses into account.
# This avoids false positives for inputs like "CHECK((a < 4) == b)",
# which is not replaceable by CHECK_LE.
lhs = ''
rhs = ''
operator = None
while expression:
matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
r'==|!=|>=|>|<=|<|\()(.*)$', expression)
if matched:
token = matched.group(1)
if token == '(':
# Parenthesized operand
expression = matched.group(2)
(end, _) = FindEndOfExpressionInLine(expression, 0, ['('])
if end < 0:
return # Unmatched parenthesis
lhs += '(' + expression[0:end]
expression = expression[end:]
elif token in ('&&', '||'):
# Logical and/or operators. This means the expression
# contains more than one term, for example:
# CHECK(42 < a && a < b);
#
# These are not replaceable with CHECK_LE, so bail out early.
return
elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
# Non-relational operator
lhs += token
expression = matched.group(2)
else:
# Relational operator
operator = token
rhs = matched.group(2)
break
else:
# Unparenthesized operand. Instead of appending to lhs one character
# at a time, we do another regular expression match to consume several
# characters at once if possible. Trivial benchmark shows that this
# is more efficient when the operands are longer than a single
# character, which is generally the case.
matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
if not matched:
matched = Match(r'^(\s*\S)(.*)$', expression)
if not matched:
break
lhs += matched.group(1)
expression = matched.group(2)
# Only apply checks if we got all parts of the boolean expression
if not (lhs and operator and rhs):
return
# Check that rhs do not contain logical operators. We already know
# that lhs is fine since the loop above parses out && and ||.
if rhs.find('&&') > -1 or rhs.find('||') > -1:
return
# At least one of the operands must be a constant literal. This is
# to avoid suggesting replacements for unprintable things like
# CHECK(variable != iterator)
#
# The following pattern matches decimal, hex integers, strings, and
# characters (in that order).
lhs = lhs.strip()
rhs = rhs.strip()
match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
if Match(match_constant, lhs) or Match(match_constant, rhs):
# Note: since we know both lhs and rhs, we can provide a more
# descriptive error message like:
# Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
# Instead of:
# Consider using CHECK_EQ instead of CHECK(a == b)
#
# We are still keeping the less descriptive message because if lhs
# or rhs gets long, the error message might become unreadable.
error(filename, linenum, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[check_macro][operator],
check_macro, operator))
def CheckAltTokens(filename, clean_lines, linenum, error):
"""Check alternative keywords being used in boolean expressions.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Avoid preprocessor lines
if Match(r'^\s*#', line):
return
# Last ditch effort to avoid multi-line comments. This will not help
# if the comment started before the current line or ended after the
# current line, but it catches most of the false positives. At least,
# it provides a way to workaround this warning for people who use
# multi-line comments in preprocessor macros.
#
# TODO(unknown): remove this once cpplint has better support for
# multi-line comments.
if line.find('/*') >= 0 or line.find('*/') >= 0:
return
for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
error(filename, linenum, 'readability/alt_tokens', 2,
'Use operator %s instead of %s' % (
_ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
def GetLineWidth(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if isinstance(line, unicode):
width = 0
for uc in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(uc) in ('W', 'F'):
width += 2
elif not unicodedata.combining(uc):
# Issue 337
# https://mail.python.org/pipermail/python-list/2012-August/628809.html
if (sys.version_info.major, sys.version_info.minor) <= (3, 2):
# https://github.com/python/cpython/blob/2.7/Include/unicodeobject.h#L81
is_wide_build = sysconfig.get_config_var("Py_UNICODE_SIZE") >= 4
# https://github.com/python/cpython/blob/2.7/Objects/unicodeobject.c#L564
is_low_surrogate = 0xDC00 <= ord(uc) <= 0xDFFF
if not is_wide_build and is_low_surrogate:
width -= 1
width += 1
return width
else:
return len(line)
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw_lines = clean_lines.lines_without_raw_strings
line = raw_lines[linenum]
prev = raw_lines[linenum - 1] if linenum > 0 else ''
if line.find('\t') != -1:
error(filename, linenum, 'whitespace/tab', 1,
'Tab found; better to use spaces')
# One or three blank spaces at the beginning of the line is weird; it's
# hard to reconcile that with 2-space indents.
# NOTE: here are the conditions rob pike used for his tests. Mine aren't
# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
# if(RLENGTH > 20) complain = 0;
# if(match($0, " +(error|private|public|protected):")) complain = 0;
# if(match(prev, "&& *$")) complain = 0;
# if(match(prev, "\\|\\| *$")) complain = 0;
# if(match(prev, "[\",=><] *$")) complain = 0;
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
scope_or_label_pattern = r'\s*(?:public|private|protected|signals)(?:\s+(?:slots\s*)?)?:\s*\\?$'
classinfo = nesting_state.InnermostClass()
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
while initial_spaces < len(line) and line[initial_spaces] == ' ':
initial_spaces += 1
# There are certain situations we allow one space, notably for
# section labels, and also lines containing multi-line raw strings.
# We also don't check for lines that look like continuation lines
# (of lines ending in double quotes, commas, equals, or angle brackets)
# because the rules for how to indent those are non-trivial.
if (not Search(r'[",=><] *$', prev) and
(initial_spaces == 1 or initial_spaces == 3) and
not Match(scope_or_label_pattern, cleansed_line) and
not (clean_lines.raw_lines[linenum] != line and
Match(r'^\s*""', line))):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 2-space indent?')
if line and line[-1].isspace():
error(filename, linenum, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
# Check if the line is a header guard.
is_header_guard = False
if IsHeaderExtension(file_extension):
cppvar = GetHeaderGuardCPPVariable(filename)
if (line.startswith('#ifndef %s' % cppvar) or
line.startswith('#define %s' % cppvar) or
line.startswith('#endif // %s' % cppvar)):
is_header_guard = True
# #include lines and header guards can be long, since there's no clean way to
# split them.
#
# URLs can be long too. It's possible to split these, but it makes them
# harder to cut&paste.
#
# The "$Id:...$" comment may also get very long without it being the
# developers fault.
#
# Doxygen documentation copying can get pretty long when using an overloaded
# function declaration
if (not line.startswith('#include') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line) and
not Match(r'^\s*//\s*[^\s]*$', line) and
not Match(r'^// \$Id:.*#[0-9]+ \$$', line) and
not Match(r'^\s*/// [@\\](copydoc|copydetails|copybrief) .*$', line)):
line_width = GetLineWidth(line)
if line_width > _line_length:
error(filename, linenum, 'whitespace/line_length', 2,
'Lines should be <= %i characters long' % _line_length)
if (cleansed_line.count(';') > 1 and
# allow simple single line lambdas
not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}\n\r]*\}',
line) and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
# It's ok to have many commands in a switch case that fits in 1 line
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
error(filename, linenum, 'whitespace/newline', 0,
'More than one command on the same line')
# Some more style checks
CheckBraces(filename, clean_lines, linenum, error)
CheckTrailingSemicolon(filename, clean_lines, linenum, error)
CheckEmptyBlockBody(filename, clean_lines, linenum, error)
CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckOperatorSpacing(filename, clean_lines, linenum, error)
CheckParenthesisSpacing(filename, clean_lines, linenum, error)
CheckCommaSpacing(filename, clean_lines, linenum, error)
CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckSpacingForFunctionCall(filename, clean_lines, linenum, error)
CheckCheck(filename, clean_lines, linenum, error)
CheckAltTokens(filename, clean_lines, linenum, error)
classinfo = nesting_state.InnermostClass()
if classinfo:
CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
# Matches the first component of a filename delimited by -s and _s. That is:
# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
def _DropCommonSuffixes(filename):
"""Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in itertools.chain(
('%s.%s' % (test_suffix.lstrip('_'), ext)
for test_suffix, ext in itertools.product(_test_suffixes, GetNonHeaderExtensions())),
('%s.%s' % (suffix, ext)
for suffix, ext in itertools.product(['inl', 'imp', 'internal'], GetHeaderExtensions()))):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
def _ClassifyInclude(fileinfo, include, used_angle_brackets, include_order="default"):
"""Figures out what kind of header 'include' is.
Args:
fileinfo: The current file cpplint is running over. A FileInfo instance.
include: The path to a #included file.
used_angle_brackets: True if the #include used <> rather than "".
include_order: "default" or other value allowed in program arguments
Returns:
One of the _XXX_HEADER constants.
For example:
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
_C_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
_CPP_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', True, "standardcfirst")
_OTHER_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
_LIKELY_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
... 'bar/foo_other_ext.h', False)
_POSSIBLE_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
_OTHER_HEADER
"""
# This is a list of all standard c++ header files, except
# those already checked for above.
is_cpp_header = include in _CPP_HEADERS
# Mark include as C header if in list or in a known folder for standard-ish C headers.
is_std_c_header = (include_order == "default") or (include in _C_HEADERS
# additional linux glibc header folders
or Search(r'(?:%s)\/.*\.h' % "|".join(C_STANDARD_HEADER_FOLDERS), include))
# Headers with C++ extensions shouldn't be considered C system headers
is_system = used_angle_brackets and not os.path.splitext(include)[1] in ['.hpp', '.hxx', '.h++']
if is_system:
if is_cpp_header:
return _CPP_SYS_HEADER
if is_std_c_header:
return _C_SYS_HEADER
else:
return _OTHER_SYS_HEADER
# If the target file and the include we're checking share a
# basename when we drop common extensions, and the include
# lives in . , then it's likely to be owned by the target file.
target_dir, target_base = (
os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
target_dir_pub = os.path.normpath(target_dir + '/../public')
target_dir_pub = target_dir_pub.replace('\\', '/')
if target_base == include_base and (
include_dir == target_dir or
include_dir == target_dir_pub):
return _LIKELY_MY_HEADER
# If the target and include share some initial basename
# component, it's possible the target is implementing the
# include, so it's allowed to be first, but we'll never
# complain if it's not there.
target_first_component = _RE_FIRST_COMPONENT.match(target_base)
include_first_component = _RE_FIRST_COMPONENT.match(include_base)
if (target_first_component and include_first_component and
target_first_component.group(0) ==
include_first_component.group(0)):
return _POSSIBLE_MY_HEADER
return _OTHER_HEADER
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
"""Check rules that are applicable to #include lines.
Strings on #include lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
applicable to #include lines in CheckLanguage must be put here.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
fileinfo = FileInfo(filename)
line = clean_lines.lines[linenum]
# "include" should use the new style "foo/bar.h" instead of just "bar.h"
# Only do this check if the included header follows google naming
# conventions. If not, assume that it's a 3rd party API that
# requires special include conventions.
#
# We also make an exception for Lua headers, which follow google
# naming convention but not the include convention.
match = Match(r'#include\s*"([^/]+\.h)"', line)
if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)):
error(filename, linenum, 'build/include_subdir', 4,
'Include the directory when naming .h files')
# we shouldn't include a file more than once. actually, there are a
# handful of instances where doing so is okay, but in general it's
# not.
match = _RE_PATTERN_INCLUDE.search(line)
if match:
include = match.group(2)
used_angle_brackets = (match.group(1) == '<')
duplicate_line = include_state.FindHeader(include)
if duplicate_line >= 0:
error(filename, linenum, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, duplicate_line))
return
for extension in GetNonHeaderExtensions():
if (include.endswith('.' + extension) and
os.path.dirname(fileinfo.RepositoryName()) != os.path.dirname(include)):
error(filename, linenum, 'build/include', 4,
'Do not include .' + extension + ' files from other packages')
return
# We DO want to include a 3rd party looking header if it matches the
# filename. Otherwise we get an erroneous error "...should include its
# header" error later.
third_src_header = False
for ext in GetHeaderExtensions():
basefilename = filename[0:len(filename) - len(fileinfo.Extension())]
headerfile = basefilename + '.' + ext
headername = FileInfo(headerfile).RepositoryName()
if headername in include or include in headername:
third_src_header = True
break
if third_src_header or not _THIRD_PARTY_HEADERS_PATTERN.match(include):
include_state.include_list[-1].append((include, linenum))
# We want to ensure that headers appear in the right order:
# 1) for foo.cc, foo.h (preferred location)
# 2) c system files
# 3) cpp system files
# 4) for foo.cc, foo.h (deprecated location)
# 5) other google headers
#
# We classify each include statement as one of those 5 types
# using a number of techniques. The include_state object keeps
# track of the highest type seen, and complains if we see a
# lower type after that.
error_message = include_state.CheckNextIncludeOrder(
_ClassifyInclude(fileinfo, include, used_angle_brackets, _include_order))
if error_message:
error(filename, linenum, 'build/include_order', 4,
'%s. Should be: %s.h, c system, c++ system, other.' %
(error_message, fileinfo.BaseName()))
canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
if not include_state.IsInAlphabeticalOrder(
clean_lines, linenum, canonical_include):
error(filename, linenum, 'build/include_alpha', 4,
'Include "%s" not in alphabetical order' % include)
include_state.SetLastHeader(canonical_include)
def _GetTextInside(text, start_pattern):
r"""Retrieves all the text between matching open and close parentheses.
Given a string of lines and a regular expression string, retrieve all the text
following the expression and between opening punctuation symbols like
(, [, or {, and the matching close-punctuation symbol. This properly nested
occurrences of the punctuations, so for the text like
printf(a(), b(c()));
a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
start_pattern must match string having an open punctuation symbol at the end.
Args:
text: The lines to extract text. Its comments and strings must be elided.
It can be single line and can span multiple lines.
start_pattern: The regexp string indicating where to start extracting
the text.
Returns:
The extracted text.
None if either the opening string or ending punctuation could not be found.
"""
# TODO(unknown): Audit cpplint.py to see what places could be profitably
# rewritten to use _GetTextInside (and use inferior regexp matching today).
# Give opening punctuations to get the matching close-punctuations.
matching_punctuation = {'(': ')', '{': '}', '[': ']'}
closing_punctuation = set(itervalues(matching_punctuation))
# Find the position to start extracting text.
match = re.search(start_pattern, text, re.M)
if not match: # start_pattern not found in text.
return None
start_position = match.end(0)
assert start_position > 0, (
'start_pattern must ends with an opening punctuation.')
assert text[start_position - 1] in matching_punctuation, (
'start_pattern must ends with an opening punctuation.')
# Stack of closing punctuations we expect to have in text after position.
punctuation_stack = [matching_punctuation[text[start_position - 1]]]
position = start_position
while punctuation_stack and position < len(text):
if text[position] == punctuation_stack[-1]:
punctuation_stack.pop()
elif text[position] in closing_punctuation:
# A closing punctuation without matching opening punctuations.
return None
elif text[position] in matching_punctuation:
punctuation_stack.append(matching_punctuation[text[position]])
position += 1
if punctuation_stack:
# Opening punctuations left without matching close-punctuations.
return None
# punctuations match.
return text[start_position:position - 1]
# Patterns for matching call-by-reference parameters.
#
# Supports nested templates up to 2 levels deep using this messy pattern:
# < (?: < (?: < [^<>]*
# >
# | [^<>] )*
# >
# | [^<>] )*
# >
_RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]*
_RE_PATTERN_TYPE = (
r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
r'(?:\w|'
r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
r'::)+')
# A call-by-reference parameter ends with '& identifier'.
_RE_PATTERN_REF_PARAM = re.compile(
r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
# A call-by-const-reference parameter either ends with 'const& identifier'
# or looks like 'const type& identifier' when 'type' is atomic.
_RE_PATTERN_CONST_REF_PARAM = (
r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
# Stream types.
_RE_PATTERN_REF_STREAM_PARAM = (
r'(?:.*stream\s*&\s*' + _RE_PATTERN_IDENT + r')')
def CheckLanguage(filename, clean_lines, linenum, file_extension,
include_state, nesting_state, error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[linenum]
if not line:
return
match = _RE_PATTERN_INCLUDE.search(line)
if match:
CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
return
# Reset include state across preprocessor directives. This is meant
# to silence warnings for conditional includes.
match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line)
if match:
include_state.ResetSection(match.group(1))
# Perform other checks now that we are sure that this is not an include line
CheckCasts(filename, clean_lines, linenum, error)
CheckGlobalStatic(filename, clean_lines, linenum, error)
CheckPrintf(filename, clean_lines, linenum, error)
if IsHeaderExtension(file_extension):
# TODO(unknown): check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in CheckForNonStandardConstructs for now)
# TODO(unknown): check that classes declare or disable copy/assign
# (level 1 error)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if Search(r'\bshort port\b', line):
if not Search(r'\bunsigned short port\b', line):
error(filename, linenum, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
else:
match = Search(r'\b(short|long(?! +double)|long long)\b', line)
if match:
error(filename, linenum, 'runtime/int', 4,
'Use int16/int64/etc, rather than the C type %s' % match.group(1))
# Check if some verboten operator overloading is going on
# TODO(unknown): catch out-of-line unary operator&:
# class X {};
# int operator&(const X& x) { return 42; } // unary operator&
# The trick is it's hard to tell apart from binary operator&:
# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
if Search(r'\boperator\s*&\s*\(\s*\)', line):
error(filename, linenum, 'runtime/operator', 4,
'Unary operator& is dangerous. Do not use it.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if Search(r'\}\s*if\s*\(', line):
error(filename, linenum, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
# TODO(unknown): Catch the following case. Need to change the calling
# convention of the whole function to process multiple line to handle it.
# printf(
# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
if printf_args:
match = Match(r'([\w.\->()]+)$', printf_args)
if match and match.group(1) != '__VA_ARGS__':
function_name = re.search(r'\b((?:string)?printf)\s*\(',
line, re.I).group(1)
error(filename, linenum, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (function_name, match.group(1)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
error(filename, linenum, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (match.group(1), match.group(2)))
if Search(r'\busing namespace\b', line):
if Search(r'\bliterals\b', line):
error(filename, linenum, 'build/namespaces_literals', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
else:
error(filename, linenum, 'build/namespaces', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
# Detect variable-length arrays.
match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
match.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if Search(r'sizeof\(.+\)', tok): continue
if Search(r'arraysize\(\w+\)', tok): continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok: continue
if Match(r'\d+', tok): continue
if Match(r'0[xX][0-9a-fA-F]+', tok): continue
if Match(r'k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token because we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(filename, linenum, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (IsHeaderExtension(file_extension)
and Search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(filename, linenum, 'build/namespaces_headers', 4,
'Do not use unnamed namespaces in header files. See '
'https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
def CheckGlobalStatic(filename, clean_lines, linenum, error):
"""Check for unsafe global or static objects.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Match two lines at a time to support multiline declarations
if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line):
line += clean_lines.elided[linenum + 1].strip()
# Check for people declaring static/global STL strings at the top level.
# This is dangerous because the C++ language does not guarantee that
# globals with constructors are initialized before the first access, and
# also because globals can be destroyed when some threads are still running.
# TODO(unknown): Generalize this to also find static unique_ptr instances.
# TODO(unknown): File bugs for clang-tidy to find these.
match = Match(
r'((?:|static +)(?:|const +))(?::*std::)?string( +const)? +'
r'([a-zA-Z0-9_:]+)\b(.*)',
line)
# Remove false positives:
# - String pointers (as opposed to values).
# string *pointer
# const string *pointer
# string const *pointer
# string *const pointer
#
# - Functions and template specializations.
# string Function<Type>(...
# string Class<Type>::Method(...
#
# - Operators. These are matched separately because operator names
# cross non-word boundaries, and trying to match both operators
# and functions at the same time would decrease accuracy of
# matching identifiers.
# string Class::operator*()
if (match and
not Search(r'\bstring\b(\s+const)?\s*[\*\&]\s*(const\s+)?\w', line) and
not Search(r'\boperator\W', line) and
not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(4))):
if Search(r'\bconst\b', line):
error(filename, linenum, 'runtime/string', 4,
'For a static/global string constant, use a C style string '
'instead: "%schar%s %s[]".' %
(match.group(1), match.group(2) or '', match.group(3)))
else:
error(filename, linenum, 'runtime/string', 4,
'Static/global string variables are not permitted.')
if (Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line) or
Search(r'\b([A-Za-z0-9_]*_)\(CHECK_NOTNULL\(\1\)\)', line)):
error(filename, linenum, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
def CheckPrintf(filename, clean_lines, linenum, error):
"""Check for printf related issues.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# When snprintf is used, the second argument shouldn't be a literal.
match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if match and match.group(2) != '0':
# If 2nd arg is zero, snprintf is used to calculate size.
error(filename, linenum, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (match.group(1), match.group(2)))
# Check if some verboten C functions are being used.
if Search(r'\bsprintf\s*\(', line):
error(filename, linenum, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
match = Search(r'\b(strcpy|strcat)\s*\(', line)
if match:
error(filename, linenum, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % match.group(1))
def IsDerivedFunction(clean_lines, linenum):
"""Check if current line contains an inherited function.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains a function with "override"
virt-specifier.
"""
# Scan back a few lines for start of current function
for i in xrange(linenum, max(-1, linenum - 10), -1):
match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i])
if match:
# Look for "override" after the matching closing parenthesis
line, _, closing_paren = CloseExpression(
clean_lines, i, len(match.group(1)))
return (closing_paren >= 0 and
Search(r'\boverride\b', line[closing_paren:]))
return False
def IsOutOfLineMethodDefinition(clean_lines, linenum):
"""Check if current line contains an out-of-line method definition.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains an out-of-line method definition.
"""
# Scan back a few lines for start of current function
for i in xrange(linenum, max(-1, linenum - 10), -1):
if Match(r'^([^()]*\w+)\(', clean_lines.elided[i]):
return Match(r'^[^()]*\w+::\w+\(', clean_lines.elided[i]) is not None
return False
def IsInitializerList(clean_lines, linenum):
"""Check if current line is inside constructor initializer list.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line appears to be inside constructor initializer
list, False otherwise.
"""
for i in xrange(linenum, 1, -1):
line = clean_lines.elided[i]
if i == linenum:
remove_function_body = Match(r'^(.*)\{\s*$', line)
if remove_function_body:
line = remove_function_body.group(1)
if Search(r'\s:\s*\w+[({]', line):
# A lone colon tend to indicate the start of a constructor
# initializer list. It could also be a ternary operator, which
# also tend to appear in constructor initializer lists as
# opposed to parameter lists.
return True
if Search(r'\}\s*,\s*$', line):
# A closing brace followed by a comma is probably the end of a
# brace-initialized member in constructor initializer list.
return True
if Search(r'[{};]\s*$', line):
# Found one of the following:
# - A closing brace or semicolon, probably the end of the previous
# function.
# - An opening brace, probably the start of current class or namespace.
#
# Current line is probably not inside an initializer list since
# we saw one of those things without seeing the starting colon.
return False
# Got to the beginning of the file without seeing the start of
# constructor initializer list.
return False
def CheckForNonConstReference(filename, clean_lines, linenum,
nesting_state, error):
"""Check for non-const references.
Separate from CheckLanguage since it scans backwards from current
line, instead of scanning forward.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Do nothing if there is no '&' on current line.
line = clean_lines.elided[linenum]
if '&' not in line:
return
# If a function is inherited, current function doesn't have much of
# a choice, so any non-const references should not be blamed on
# derived function.
if IsDerivedFunction(clean_lines, linenum):
return
# Don't warn on out-of-line method definitions, as we would warn on the
# in-line declaration, if it isn't marked with 'override'.
if IsOutOfLineMethodDefinition(clean_lines, linenum):
return
# Long type names may be broken across multiple lines, usually in one
# of these forms:
# LongType
# ::LongTypeContinued &identifier
# LongType::
# LongTypeContinued &identifier
# LongType<
# ...>::LongTypeContinued &identifier
#
# If we detected a type split across two lines, join the previous
# line to current line so that we can match const references
# accordingly.
#
# Note that this only scans back one line, since scanning back
# arbitrary number of lines would be expensive. If you have a type
# that spans more than 2 lines, please use a typedef.
if linenum > 1:
previous = None
if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
# previous_line\n + ::current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
clean_lines.elided[linenum - 1])
elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
# previous_line::\n + current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
clean_lines.elided[linenum - 1])
if previous:
line = previous.group(1) + line.lstrip()
else:
# Check for templated parameter that is split across multiple lines
endpos = line.rfind('>')
if endpos > -1:
(_, startline, startpos) = ReverseCloseExpression(
clean_lines, linenum, endpos)
if startpos > -1 and startline < linenum:
# Found the matching < on an earlier line, collect all
# pieces up to current line.
line = ''
for i in xrange(startline, linenum + 1):
line += clean_lines.elided[i].strip()
# Check for non-const references in function parameters. A single '&' may
# found in the following places:
# inside expression: binary & for bitwise AND
# inside expression: unary & for taking the address of something
# inside declarators: reference parameter
# We will exclude the first two cases by checking that we are not inside a
# function body, including one that was just introduced by a trailing '{'.
# TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
if (nesting_state.previous_stack_top and
not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or
isinstance(nesting_state.previous_stack_top, _NamespaceInfo))):
# Not at toplevel, not within a class, and not within a namespace
return
# Avoid initializer lists. We only need to scan back from the
# current line for something that starts with ':'.
#
# We don't need to check the current line, since the '&' would
# appear inside the second set of parentheses on the current line as
# opposed to the first set.
if linenum > 0:
for i in xrange(linenum - 1, max(0, linenum - 10), -1):
previous_line = clean_lines.elided[i]
if not Search(r'[),]\s*$', previous_line):
break
if Match(r'^\s*:\s+\S', previous_line):
return
# Avoid preprocessors
if Search(r'\\\s*$', line):
return
# Avoid constructor initializer lists
if IsInitializerList(clean_lines, linenum):
return
# We allow non-const references in a few standard places, like functions
# called "swap()" or iostream operators like "<<" or ">>". Do not check
# those function parameters.
#
# We also accept & in static_assert, which looks like a function but
# it's actually a declaration expression.
allowed_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
r'operator\s*[<>][<>]|'
r'static_assert|COMPILE_ASSERT'
r')\s*\(')
if Search(allowed_functions, line):
return
elif not Search(r'\S+\([^)]*$', line):
# Don't see an allowed function on this line. Actually we
# didn't see any function name on this line, so this is likely a
# multi-line parameter list. Try a bit harder to catch this case.
for i in xrange(2):
if (linenum > i and
Search(allowed_functions, clean_lines.elided[linenum - i - 1])):
return
decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body
for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
if (not Match(_RE_PATTERN_CONST_REF_PARAM, parameter) and
not Match(_RE_PATTERN_REF_STREAM_PARAM, parameter)):
error(filename, linenum, 'runtime/references', 2,
'Is this a non-const reference? '
'If so, make const or use a pointer: ' +
ReplaceAll(' *<', '<', parameter))
def CheckCasts(filename, clean_lines, linenum, error):
"""Various cast related checks.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Check to see if they're using an conversion function cast.
# I just try to capture the most common basic types, though there are more.
# Parameterless conversion functions, such as bool(), are allowed as they are
# probably a member operator declaration or default constructor.
match = Search(
r'(\bnew\s+(?:const\s+)?|\S<\s*(?:const\s+)?)?\b'
r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
r'(\([^)].*)', line)
expecting_function = ExpectingFunctionArgs(clean_lines, linenum)
if match and not expecting_function:
matched_type = match.group(2)
# matched_new_or_template is used to silence two false positives:
# - New operators
# - Template arguments with function types
#
# For template arguments, we match on types immediately following
# an opening bracket without any spaces. This is a fast way to
# silence the common case where the function type is the first
# template argument. False negative with less-than comparison is
# avoided because those operators are usually followed by a space.
#
# function<double(double)> // bracket + no space = false positive
# value < double(42) // bracket + space = true positive
matched_new_or_template = match.group(1)
# Avoid arrays by looking for brackets that come after the closing
# parenthesis.
if Match(r'\([^()]+\)\s*\[', match.group(3)):
return
# Other things to ignore:
# - Function pointers
# - Casts to pointer types
# - Placement new
# - Alias declarations
matched_funcptr = match.group(3)
if (matched_new_or_template is None and
not (matched_funcptr and
(Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
matched_funcptr) or
matched_funcptr.startswith('(*)'))) and
not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and
not Search(r'new\(\S+\)\s*' + matched_type, line)):
error(filename, linenum, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
matched_type)
if not expecting_function:
CheckCStyleCast(filename, clean_lines, linenum, 'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
# This doesn't catch all cases. Consider (const char * const)"hello".
#
# (char *) "foo" should always be a const_cast (reinterpret_cast won't
# compile).
if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast',
r'\((char\s?\*+\s?)\)\s*"', error):
pass
else:
# Check pointer casts for other than string constants
CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast',
r'\((\w+\s?\*+\s?)\)', error)
# In addition, we look for people taking the address of a cast. This
# is dangerous -- casts can assign to temporaries, so the pointer doesn't
# point where you think.
#
# Some non-identifier character is required before the '&' for the
# expression to be recognized as a cast. These are casts:
# expression = &static_cast<int*>(temporary());
# function(&(int*)(temporary()));
#
# This is not a cast:
# reference_type&(int* function_param);
match = Search(
r'(?:[^\w]&\(([^)*][^)]*)\)[\w(])|'
r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line)
if match:
# Try a better error message when the & is bound to something
# dereferenced by the casted pointer, as opposed to the casted
# pointer itself.
parenthesis_error = False
match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line)
if match:
_, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1)))
if x1 >= 0 and clean_lines.elided[y1][x1] == '(':
_, y2, x2 = CloseExpression(clean_lines, y1, x1)
if x2 >= 0:
extended_line = clean_lines.elided[y2][x2:]
if y2 < clean_lines.NumLines() - 1:
extended_line += clean_lines.elided[y2 + 1]
if Match(r'\s*(?:->|\[)', extended_line):
parenthesis_error = True
if parenthesis_error:
error(filename, linenum, 'readability/casting', 4,
('Are you taking an address of something dereferenced '
'from a cast? Wrapping the dereferenced expression in '
'parentheses will make the binding more obvious'))
else:
error(filename, linenum, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'))
def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error):
"""Checks for a C-style cast by looking for the pattern.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast, static_cast, or const_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
Returns:
True if an error was emitted.
False otherwise.
"""
line = clean_lines.elided[linenum]
match = Search(pattern, line)
if not match:
return False
# Exclude lines with keywords that tend to look like casts
context = line[0:match.start(1) - 1]
if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context):
return False
# Try expanding current context to see if we one level of
# parentheses inside a macro.
if linenum > 0:
for i in xrange(linenum - 1, max(0, linenum - 5), -1):
context = clean_lines.elided[i] + context
if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context):
return False
# operator++(int) and operator--(int)
if context.endswith(' operator++') or context.endswith(' operator--'):
return False
# A single unnamed argument for a function tends to look like old style cast.
# If we see those, don't issue warnings for deprecated casts.
remainder = line[match.end(0):]
if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)',
remainder):
return False
# At this point, all that should be left is actual casts.
error(filename, linenum, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, match.group(1)))
return True
def ExpectingFunctionArgs(clean_lines, linenum):
"""Checks whether where function type arguments are expected.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if the line at 'linenum' is inside something that expects arguments
of function types.
"""
line = clean_lines.elided[linenum]
return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
(linenum >= 2 and
(Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
clean_lines.elided[linenum - 1]) or
Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
clean_lines.elided[linenum - 2]) or
Search(r'\bstd::m?function\s*\<\s*$',
clean_lines.elided[linenum - 1]))))
_HEADERS_CONTAINING_TEMPLATES = (
('<deque>', ('deque',)),
('<functional>', ('unary_function', 'binary_function',
'plus', 'minus', 'multiplies', 'divides', 'modulus',
'negate',
'equal_to', 'not_equal_to', 'greater', 'less',
'greater_equal', 'less_equal',
'logical_and', 'logical_or', 'logical_not',
'unary_negate', 'not1', 'binary_negate', 'not2',
'bind1st', 'bind2nd',
'pointer_to_unary_function',
'pointer_to_binary_function',
'ptr_fun',
'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
'mem_fun_ref_t',
'const_mem_fun_t', 'const_mem_fun1_t',
'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
'mem_fun_ref',
)),
('<limits>', ('numeric_limits',)),
('<list>', ('list',)),
('<map>', ('multimap',)),
('<memory>', ('allocator', 'make_shared', 'make_unique', 'shared_ptr',
'unique_ptr', 'weak_ptr')),
('<queue>', ('queue', 'priority_queue',)),
('<set>', ('multiset',)),
('<stack>', ('stack',)),
('<string>', ('char_traits', 'basic_string',)),
('<tuple>', ('tuple',)),
('<unordered_map>', ('unordered_map', 'unordered_multimap')),
('<unordered_set>', ('unordered_set', 'unordered_multiset')),
('<utility>', ('pair',)),
('<vector>', ('vector',)),
# gcc extensions.
# Note: std::hash is their hash, ::hash is our hash
('<hash_map>', ('hash_map', 'hash_multimap',)),
('<hash_set>', ('hash_set', 'hash_multiset',)),
('<slist>', ('slist',)),
)
_HEADERS_MAYBE_TEMPLATES = (
('<algorithm>', ('copy', 'max', 'min', 'min_element', 'sort',
'transform',
)),
('<utility>', ('forward', 'make_pair', 'move', 'swap')),
)
_RE_PATTERN_STRING = re.compile(r'\bstring\b')
_re_pattern_headers_maybe_templates = []
for _header, _templates in _HEADERS_MAYBE_TEMPLATES:
for _template in _templates:
# Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
# 'type::max()'.
_re_pattern_headers_maybe_templates.append(
(re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
_template,
_header))
# Match set<type>, but not foo->set<type>, foo.set<type>
_re_pattern_headers_maybe_templates.append(
(re.compile(r'[^>.]\bset\s*\<'),
'set<>',
'<set>'))
# Match 'map<type> var' and 'std::map<type>(...)', but not 'map<type>(...)''
_re_pattern_headers_maybe_templates.append(
(re.compile(r'(std\b::\bmap\s*\<)|(^(std\b::\b)map\b\(\s*\<)'),
'map<>',
'<map>'))
# Other scripts may reach in and modify this pattern.
_re_pattern_templates = []
for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
for _template in _templates:
_re_pattern_templates.append(
(re.compile(r'(\<|\b)' + _template + r'\s*\<'),
_template + '<>',
_header))
def FilesBelongToSameModule(filename_cc, filename_h):
"""Check if these two filenames belong to the same module.
The concept of a 'module' here is a as follows:
foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
same 'module' if they are in the same directory.
some/path/public/xyzzy and some/path/internal/xyzzy are also considered
to belong to the same module here.
If the filename_cc contains a longer path than the filename_h, for example,
'/absolute/path/to/base/sysinfo.cc', and this file would include
'base/sysinfo.h', this function also produces the prefix needed to open the
header. This is used by the caller of this function to more robustly open the
header file. We don't have access to the real include paths in this context,
so we need this guesswork here.
Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
according to this implementation. Because of this, this function gives
some false positives. This should be sufficiently rare in practice.
Args:
filename_cc: is the path for the source (e.g. .cc) file
filename_h: is the path for the header path
Returns:
Tuple with a bool and a string:
bool: True if filename_cc and filename_h belong to the same module.
string: the additional prefix needed to open the header file.
"""
fileinfo_cc = FileInfo(filename_cc)
if not fileinfo_cc.Extension().lstrip('.') in GetNonHeaderExtensions():
return (False, '')
fileinfo_h = FileInfo(filename_h)
if not IsHeaderExtension(fileinfo_h.Extension().lstrip('.')):
return (False, '')
filename_cc = filename_cc[:-(len(fileinfo_cc.Extension()))]
matched_test_suffix = Search(_TEST_FILE_SUFFIX, fileinfo_cc.BaseName())
if matched_test_suffix:
filename_cc = filename_cc[:-len(matched_test_suffix.group(1))]
filename_cc = filename_cc.replace('/public/', '/')
filename_cc = filename_cc.replace('/internal/', '/')
filename_h = filename_h[:-(len(fileinfo_h.Extension()))]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
filename_h = filename_h.replace('/internal/', '/')
files_belong_to_same_module = filename_cc.endswith(filename_h)
common_path = ''
if files_belong_to_same_module:
common_path = filename_cc[:-len(filename_h)]
return files_belong_to_same_module, common_path
def UpdateIncludeState(filename, include_dict, io=codecs):
"""Fill up the include_dict with new includes found from the file.
Args:
filename: the name of the header to read.
include_dict: a dictionary in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was successfully added. False otherwise.
"""
headerfile = None
try:
with io.open(filename, 'r', 'utf8', 'replace') as headerfile:
linenum = 0
for line in headerfile:
linenum += 1
clean_line = CleanseComments(line)
match = _RE_PATTERN_INCLUDE.search(clean_line)
if match:
include = match.group(2)
include_dict.setdefault(include, linenum)
return True
except IOError:
return False
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
io=codecs):
"""Reports for missing stl includes.
This function will output warnings to make sure you are including the headers
necessary for the stl containers and functions that you use. We only give one
reason to include a header. For example, if you use both equal_to<> and
less<> in a .h file, only one (the latter in the file) of these will be
reported as a reason to include the <functional>.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
include_state: An _IncludeState instance.
error: The function to call with any errors found.
io: The IO factory to use to read the header file. Provided for unittest
injection.
"""
required = {} # A map of header name to linenumber and the template entity.
# Example of required: { '<functional>': (1219, 'less<>') }
for linenum in xrange(clean_lines.NumLines()):
line = clean_lines.elided[linenum]
if not line or line[0] == '#':
continue
# String is special -- it is a non-templatized type in STL.
matched = _RE_PATTERN_STRING.search(line)
if matched:
# Don't warn about strings in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required['<string>'] = (linenum, 'string')
for pattern, template, header in _re_pattern_headers_maybe_templates:
if pattern.search(line):
required[header] = (linenum, template)
# The following function is just a speed up, no semantics are changed.
if not '<' in line: # Reduces the cpu time usage by skipping lines.
continue
for pattern, template, header in _re_pattern_templates:
matched = pattern.search(line)
if matched:
# Don't warn about IWYU in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required[header] = (linenum, template)
# The policy is that if you #include something in foo.h you don't need to
# include it again in foo.cc. Here, we will look at possible includes.
# Let's flatten the include_state include_list and copy it into a dictionary.
include_dict = dict([item for sublist in include_state.include_list
for item in sublist])
# Did we find the header for this file (if any) and successfully load it?
header_found = False
# Use the absolute path so that matching works properly.
abs_filename = FileInfo(filename).FullName()
# For Emacs's flymake.
# If cpplint is invoked from Emacs's flymake, a temporary file is generated
# by flymake and that file name might end with '_flymake.cc'. In that case,
# restore original file name here so that the corresponding header file can be
# found.
# e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
# instead of 'foo_flymake.h'
abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
# include_dict is modified during iteration, so we iterate over a copy of
# the keys.
header_keys = list(include_dict.keys())
for header in header_keys:
(same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
fullpath = common_path + header
if same_module and UpdateIncludeState(fullpath, include_dict, io):
header_found = True
# If we can't find the header file for a .cc, assume it's because we don't
# know where to look. In that case we'll give up as we're not sure they
# didn't include it in the .h file.
# TODO(unknown): Do a better job of finding .h files so we are confident that
# not having the .h file means there isn't one.
if not header_found:
for extension in GetNonHeaderExtensions():
if filename.endswith('.' + extension):
return
# All the lines have been processed, report the errors found.
for required_header_unstripped in sorted(required, key=required.__getitem__):
template = required[required_header_unstripped][1]
if required_header_unstripped.strip('<>"') not in include_dict:
error(filename, required[required_header_unstripped][0],
'build/include_what_you_use', 4,
'Add #include ' + required_header_unstripped + ' for ' + template)
_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
"""Check that make_pair's template arguments are deduced.
G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are
specified explicitly, and such use isn't intended in any case.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
if match:
error(filename, linenum, 'build/explicit_make_pair',
4, # 4 = high confidence
'For C++11-compatibility, omit template arguments from make_pair'
' OR use pair directly OR if appropriate, construct a pair directly')
def CheckRedundantVirtual(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "virtual" function-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Look for "virtual" on current line.
line = clean_lines.elided[linenum]
virtual = Match(r'^(.*)(\bvirtual\b)(.*)$', line)
if not virtual: return
# Ignore "virtual" keywords that are near access-specifiers. These
# are only used in class base-specifier and do not apply to member
# functions.
if (Search(r'\b(public|protected|private)\s+$', virtual.group(1)) or
Match(r'^\s+(public|protected|private)\b', virtual.group(3))):
return
# Ignore the "virtual" keyword from virtual base classes. Usually
# there is a column on the same line in these cases (virtual base
# classes are rare in google3 because multiple inheritance is rare).
if Match(r'^.*[^:]:[^:].*$', line): return
# Look for the next opening parenthesis. This is the start of the
# parameter list (possibly on the next line shortly after virtual).
# TODO(unknown): doesn't work if there are virtual functions with
# decltype() or other things that use parentheses, but csearch suggests
# that this is rare.
end_col = -1
end_line = -1
start_col = len(virtual.group(2))
for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())):
line = clean_lines.elided[start_line][start_col:]
parameter_list = Match(r'^([^(]*)\(', line)
if parameter_list:
# Match parentheses to find the end of the parameter list
(_, end_line, end_col) = CloseExpression(
clean_lines, start_line, start_col + len(parameter_list.group(1)))
break
start_col = 0
if end_col < 0:
return # Couldn't find end of parameter list, give up
# Look for "override" or "final" after the parameter list
# (possibly on the next few lines).
for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())):
line = clean_lines.elided[i][end_col:]
match = Search(r'\b(override|final)\b', line)
if match:
error(filename, linenum, 'readability/inheritance', 4,
('"virtual" is redundant since function is '
'already declared as "%s"' % match.group(1)))
# Set end_col to check whole lines after we are done with the
# first line.
end_col = 0
if Search(r'[^\w]\s*$', line):
break
def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "override" or "final" virt-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Look for closing parenthesis nearby. We need one to confirm where
# the declarator ends and where the virt-specifier starts to avoid
# false positives.
line = clean_lines.elided[linenum]
declarator_end = line.rfind(')')
if declarator_end >= 0:
fragment = line[declarator_end:]
else:
if linenum > 1 and clean_lines.elided[linenum - 1].rfind(')') >= 0:
fragment = line
else:
return
# Check that at most one of "override" or "final" is present, not both
if Search(r'\boverride\b', fragment) and Search(r'\bfinal\b', fragment):
error(filename, linenum, 'readability/inheritance', 4,
('"override" is redundant since function is '
'already declared as "final"'))
# Returns true if we are at a new block, and it is directly
# inside of a namespace.
def IsBlockInNameSpace(nesting_state, is_forward_declaration):
"""Checks that the new block is directly in a namespace.
Args:
nesting_state: The _NestingState object that contains info about our state.
is_forward_declaration: If the class is a forward declared class.
Returns:
Whether or not the new block is directly in a namespace.
"""
if is_forward_declaration:
return len(nesting_state.stack) >= 1 and (
isinstance(nesting_state.stack[-1], _NamespaceInfo))
return (len(nesting_state.stack) > 1 and
nesting_state.stack[-1].check_namespace_indentation and
isinstance(nesting_state.stack[-2], _NamespaceInfo))
def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
raw_lines_no_comments, linenum):
"""This method determines if we should apply our namespace indentation check.
Args:
nesting_state: The current nesting state.
is_namespace_indent_item: If we just put a new class on the stack, True.
If the top of the stack is not a class, or we did not recently
add the class, False.
raw_lines_no_comments: The lines without the comments.
linenum: The current line number we are processing.
Returns:
True if we should apply our namespace indentation check. Currently, it
only works for classes and namespaces inside of a namespace.
"""
is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments,
linenum)
if not (is_namespace_indent_item or is_forward_declaration):
return False
# If we are in a macro, we do not want to check the namespace indentation.
if IsMacroDefinition(raw_lines_no_comments, linenum):
return False
return IsBlockInNameSpace(nesting_state, is_forward_declaration)
# Call this method if the line is directly inside of a namespace.
# If the line above is blank (excluding comments) or the start of
# an inner namespace, it cannot be indented.
def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum,
error):
line = raw_lines_no_comments[linenum]
if Match(r'^\s+', line):
error(filename, linenum, 'runtime/indentation_namespace', 4,
'Do not indent within a namespace')
def ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions=None):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
nesting_state.Update(filename, clean_lines, line, error)
CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error)
if nesting_state.InAsmBlock(): return
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
nesting_state, error)
CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
CheckForNonStandardConstructs(filename, clean_lines, line,
nesting_state, error)
CheckVlogArguments(filename, clean_lines, line, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
CheckMakePairUsesDeduction(filename, clean_lines, line, error)
CheckRedundantVirtual(filename, clean_lines, line, error)
CheckRedundantOverrideOrFinal(filename, clean_lines, line, error)
if extra_check_functions:
for check_fn in extra_check_functions:
check_fn(filename, clean_lines, line, error)
def FlagCxx11Features(filename, clean_lines, linenum, error):
"""Flag those c++11 features that we only allow in certain places.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
# Flag unapproved C++ TR1 headers.
if include and include.group(1).startswith('tr1/'):
error(filename, linenum, 'build/c++tr1', 5,
('C++ TR1 headers such as <%s> are unapproved.') % include.group(1))
# Flag unapproved C++11 headers.
if include and include.group(1) in ('cfenv',
'condition_variable',
'fenv.h',
'future',
'mutex',
'thread',
'chrono',
'ratio',
'regex',
'system_error',
):
error(filename, linenum, 'build/c++11', 5,
('<%s> is an unapproved C++11 header.') % include.group(1))
# The only place where we need to worry about C++11 keywords and library
# features in preprocessor directives is in macro definitions.
if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return
# These are classes and free functions. The classes are always
# mentioned as std::*, but we only catch the free functions if
# they're not found by ADL. They're alphabetical by header.
for top_name in (
# type_traits
'alignment_of',
'aligned_union',
):
if Search(r'\bstd::%s\b' % top_name, line):
error(filename, linenum, 'build/c++11', 5,
('std::%s is an unapproved C++11 class or function. Send c-style '
'an example of where it would make your code more readable, and '
'they may let you use it.') % top_name)
def FlagCxx14Features(filename, clean_lines, linenum, error):
"""Flag those C++14 features that we restrict.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
# Flag unapproved C++14 headers.
if include and include.group(1) in ('scoped_allocator', 'shared_mutex'):
error(filename, linenum, 'build/c++14', 5,
('<%s> is an unapproved C++14 header.') % include.group(1))
def ProcessFileData(filename, file_extension, lines, error,
extra_check_functions=None):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
nesting_state = NestingState()
ResetNolintSuppressions()
CheckForCopyright(filename, lines, error)
ProcessGlobalSuppresions(lines)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
if IsHeaderExtension(file_extension):
CheckForHeaderGuard(filename, clean_lines, error)
for line in xrange(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions)
FlagCxx11Features(filename, clean_lines, line, error)
nesting_state.CheckCompletedBlocks(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
# Check that the .cc file has included its header if it exists.
if _IsSourceExtension(file_extension):
CheckHeaderFileIncluded(filename, include_state, error)
# We check here rather than inside ProcessLine so that we see raw
# lines rather than "cleaned" lines.
CheckForBadCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error)
def ProcessConfigOverrides(filename):
""" Loads the configuration files and processes the config overrides.
Args:
filename: The name of the file being processed by the linter.
Returns:
False if the current |filename| should not be processed further.
"""
abs_filename = os.path.abspath(filename)
cfg_filters = []
keep_looking = True
while keep_looking:
abs_path, base_name = os.path.split(abs_filename)
if not base_name:
break # Reached the root directory.
cfg_file = os.path.join(abs_path, "CPPLINT.cfg")
abs_filename = abs_path
if not os.path.isfile(cfg_file):
continue
try:
with open(cfg_file) as file_handle:
for line in file_handle:
line, _, _ = line.partition('#') # Remove comments.
if not line.strip():
continue
name, _, val = line.partition('=')
name = name.strip()
val = val.strip()
if name == 'set noparent':
keep_looking = False
elif name == 'filter':
cfg_filters.append(val)
elif name == 'exclude_files':
# When matching exclude_files pattern, use the base_name of
# the current file name or the directory name we are processing.
# For example, if we are checking for lint errors in /foo/bar/baz.cc
# and we found the .cfg file at /foo/CPPLINT.cfg, then the config
# file's "exclude_files" filter is meant to be checked against "bar"
# and not "baz" nor "bar/baz.cc".
if base_name:
pattern = re.compile(val)
if pattern.match(base_name):
if _cpplint_state.quiet:
# Suppress "Ignoring file" warning when using --quiet.
return False
_cpplint_state.PrintInfo('Ignoring "%s": file excluded by "%s". '
'File path component "%s" matches '
'pattern "%s"\n' %
(filename, cfg_file, base_name, val))
return False
elif name == 'linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
_cpplint_state.PrintError('Line length must be numeric.')
elif name == 'extensions':
ProcessExtensionsOption(val)
elif name == 'root':
global _root
# root directories are specified relative to CPPLINT.cfg dir.
_root = os.path.join(os.path.dirname(cfg_file), val)
elif name == 'headers':
ProcessHppHeadersOption(val)
elif name == 'includeorder':
ProcessIncludeOrderOption(val)
else:
_cpplint_state.PrintError(
'Invalid configuration option (%s) in file %s\n' %
(name, cfg_file))
except IOError:
_cpplint_state.PrintError(
"Skipping config file '%s': Can't open for reading\n" % cfg_file)
keep_looking = False
# Apply all the accumulated filters in reverse order (top-level directory
# config options having the least priority).
for cfg_filter in reversed(cfg_filters):
_AddFilters(cfg_filter)
return True
def ProcessFile(filename, vlevel, extra_check_functions=None):
"""Does google-lint on a single file.
Args:
filename: The name of the file to parse.
vlevel: The level of errors to report. Every error of confidence
>= verbose_level will be reported. 0 is a good default.
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
_SetVerboseLevel(vlevel)
_BackupFilters()
old_errors = _cpplint_state.error_count
if not ProcessConfigOverrides(filename):
_RestoreFilters()
return
lf_lines = []
crlf_lines = []
try:
# Support the UNIX convention of using "-" for stdin. Note that
# we are not opening the file with universal newline support
# (which codecs doesn't support anyway), so the resulting lines do
# contain trailing '\r' characters if we are reading a file that
# has CRLF endings.
# If after the split a trailing '\r' is present, it is removed
# below.
if filename == '-':
lines = codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace').read().split('\n')
else:
with codecs.open(filename, 'r', 'utf8', 'replace') as target_file:
lines = target_file.read().split('\n')
# Remove trailing '\r'.
# The -1 accounts for the extra trailing blank line we get from split()
for linenum in range(len(lines) - 1):
if lines[linenum].endswith('\r'):
lines[linenum] = lines[linenum].rstrip('\r')
crlf_lines.append(linenum + 1)
else:
lf_lines.append(linenum + 1)
except IOError:
_cpplint_state.PrintError(
"Skipping input '%s': Can't open for reading\n" % filename)
_RestoreFilters()
return
# Note, if no dot is found, this will give the entire filename as the ext.
file_extension = filename[filename.rfind('.') + 1:]
# When reading from stdin, the extension is unknown, so no cpplint tests
# should rely on the extension.
if filename != '-' and file_extension not in GetAllExtensions():
_cpplint_state.PrintError('Ignoring %s; not a valid file name '
'(%s)\n' % (filename, ', '.join(GetAllExtensions())))
else:
ProcessFileData(filename, file_extension, lines, Error,
extra_check_functions)
# If end-of-line sequences are a mix of LF and CR-LF, issue
# warnings on the lines with CR.
#
# Don't issue any warnings if all lines are uniformly LF or CR-LF,
# since critique can handle these just fine, and the style guide
# doesn't dictate a particular end of line sequence.
#
# We can't depend on os.linesep to determine what the desired
# end-of-line sequence should be, since that will return the
# server-side end-of-line sequence.
if lf_lines and crlf_lines:
# Warn on every line with CR. An alternative approach might be to
# check whether the file is mostly CRLF or just LF, and warn on the
# minority, we bias toward LF here since most tools prefer LF.
for linenum in crlf_lines:
Error(filename, linenum, 'whitespace/newline', 1,
'Unexpected \\r (^M) found; better to use only \\n')
# Suppress printing anything if --quiet was passed unless the error
# count has increased after processing this file.
if not _cpplint_state.quiet or old_errors != _cpplint_state.error_count:
_cpplint_state.PrintInfo('Done processing %s\n' % filename)
_RestoreFilters()
def PrintUsage(message):
"""Prints a brief usage string and exits, optionally with an error message.
Args:
message: The optional error message.
"""
sys.stderr.write(_USAGE % (list(GetAllExtensions()),
','.join(list(GetAllExtensions())),
GetHeaderExtensions(),
','.join(GetHeaderExtensions())))
if message:
sys.exit('\nFATAL ERROR: ' + message)
else:
sys.exit(0)
def PrintVersion():
sys.stdout.write('Cpplint fork (https://github.com/cpplint/cpplint)\n')
sys.stdout.write('cpplint ' + __VERSION__ + '\n')
sys.stdout.write('Python ' + sys.version + '\n')
sys.exit(0)
def PrintCategories():
"""Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter.
"""
sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
sys.exit(0)
def ParseArguments(args):
"""Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint.
"""
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'v=',
'version',
'counting=',
'filter=',
'root=',
'repository=',
'linelength=',
'extensions=',
'exclude=',
'recursive',
'headers=',
'includeorder=',
'quiet'])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
quiet = _Quiet()
counting_style = ''
recursive = False
for (opt, val) in opts:
if opt == '--help':
PrintUsage(None)
if opt == '--version':
PrintVersion()
elif opt == '--output':
if val not in ('emacs', 'vs7', 'eclipse', 'junit', 'sed', 'gsed'):
PrintUsage('The only allowed output formats are emacs, vs7, eclipse '
'sed, gsed and junit.')
output_format = val
elif opt == '--quiet':
quiet = True
elif opt == '--verbose' or opt == '--v':
verbosity = int(val)
elif opt == '--filter':
filters = val
if not filters:
PrintCategories()
elif opt == '--counting':
if val not in ('total', 'toplevel', 'detailed'):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
elif opt == '--root':
global _root
_root = val
elif opt == '--repository':
global _repository
_repository = val
elif opt == '--linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
PrintUsage('Line length must be digits.')
elif opt == '--exclude':
global _excludes
if not _excludes:
_excludes = set()
_excludes.update(glob.glob(val))
elif opt == '--extensions':
ProcessExtensionsOption(val)
elif opt == '--headers':
ProcessHppHeadersOption(val)
elif opt == '--recursive':
recursive = True
elif opt == '--includeorder':
ProcessIncludeOrderOption(val)
if not filenames:
PrintUsage('No files were specified.')
if recursive:
filenames = _ExpandDirectories(filenames)
if _excludes:
filenames = _FilterExcludedFiles(filenames)
_SetOutputFormat(output_format)
_SetQuiet(quiet)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
filenames.sort()
return filenames
def _ExpandDirectories(filenames):
"""Searches a list of filenames and replaces directories in the list with
all files descending from those directories. Files with extensions not in
the valid extensions list are excluded.
Args:
filenames: A list of files or directories
Returns:
A list of all files that are members of filenames or descended from a
directory in filenames
"""
expanded = set()
for filename in filenames:
if not os.path.isdir(filename):
expanded.add(filename)
continue
for root, _, files in os.walk(filename):
for loopfile in files:
fullname = os.path.join(root, loopfile)
if fullname.startswith('.' + os.path.sep):
fullname = fullname[len('.' + os.path.sep):]
expanded.add(fullname)
filtered = []
for filename in expanded:
if os.path.splitext(filename)[1][1:] in GetAllExtensions():
filtered.append(filename)
return filtered
def _FilterExcludedFiles(fnames):
"""Filters out files listed in the --exclude command line switch. File paths
in the switch are evaluated relative to the current working directory
"""
exclude_paths = [os.path.abspath(f) for f in _excludes]
# because globbing does not work recursively, exclude all subpath of all excluded entries
return [f for f in fnames
if not any(e for e in exclude_paths
if _IsParentOrSame(e, os.path.abspath(f)))]
def _IsParentOrSame(parent, child):
"""Return true if child is subdirectory of parent.
Assumes both paths are absolute and don't contain symlinks.
"""
parent = os.path.normpath(parent)
child = os.path.normpath(child)
if parent == child:
return True
prefix = os.path.commonprefix([parent, child])
if prefix != parent:
return False
# Note: os.path.commonprefix operates on character basis, so
# take extra care of situations like '/foo/ba' and '/foo/bar/baz'
child_suffix = child[len(prefix):]
child_suffix = child_suffix.lstrip(os.sep)
return child == os.path.join(prefix, child_suffix)
def main():
filenames = ParseArguments(sys.argv[1:])
backup_err = sys.stderr
try:
# Change stderr to write with replacement characters so we don't die
# if we try to print something containing non-ASCII characters.
sys.stderr = codecs.StreamReader(sys.stderr, 'replace')
_cpplint_state.ResetErrorCounts()
for filename in filenames:
ProcessFile(filename, _cpplint_state.verbose_level)
# If --quiet is passed, suppress printing error count unless there are errors.
if not _cpplint_state.quiet or _cpplint_state.error_count > 0:
_cpplint_state.PrintErrorCounts()
if _cpplint_state.output_format == 'junit':
sys.stderr.write(_cpplint_state.FormatJUnitXML())
finally:
sys.stderr = backup_err
sys.exit(_cpplint_state.error_count > 0)
if __name__ == '__main__':
main()
| Python |
3D | OpenMS/OpenMS | share/OpenMS/SCRIPTS/ProduceQCFigures_tic.R | .R | 716 | 24 | ## This is an R script to produce the figures that are attached to the qcML format
library("ggplot2")
library(scales)
options(warn=-1) #suppress warnings
#options
options(digits=10)
file<-commandArgs(TRUE)[1]
post<-commandArgs(TRUE)[2]
knime.in<-read.csv(file=file,head=TRUE,sep="\t")
names(knime.in)<- c("RT", "TIC")
png(post)
######################################
###TIC
######################################
knime.in$rt <- as.POSIXct(as.character(0),format="%S")+knime.in$RT
ggplot(data=knime.in, aes(x=rt, y=TIC)) +
geom_line() +
#scale_x_datetime( breaks="5 mins", minor_breaks="1 mins", labels=date_format("%H:%M")) +
xlab("RT (HH:MM)")
######################################
garbage<-dev.off()
| R |
3D | OpenMS/OpenMS | share/OpenMS/SCRIPTS/mzTab2tsv_PSM.R | .R | 2,934 | 105 | ## This is an R script for the conversion of mzTab to a better readable tsv format.
# clear entire workspace
rm(list = ls())
input.file <- commandArgs(TRUE)[1]
output.file <- commandArgs(TRUE)[2]
# find start of the section
startSection <- function(file, section.identifier) {
data <- file(file, "r")
row = 0
while (TRUE) {
row = row + 1
line = readLines(data, n=1)
if (substr(line, 1, 3)==section.identifier) {
break
}
}
close(data)
return (row)
}
# function describing how to collapse rows
# In the case of string columns (e.g. accessions), the row entries are first made unique and then written to a comma-separated string.
# In all other cases, the entry of the first row is returned.
collapseRows <- function(x) {
if (is.character(x)) {
x <- paste(unique(x[!is.na(x)]), collapse=",")
if (x=="") {
return(NA)
}
else {
return (x)
}
}
else {
return (x[1])
}
}
# count the occurrences of character c in string s
countOccurrences <- function(c,s) {
s2 <- gsub(c,"",s)
return (nchar(s) - nchar(s2))
}
# check that the protein accession is of the format *|*|*
# Note that NA returns TRUE.
checkAccessionFormat <- function(accession) {
if (is.na(accession)) {
return (TRUE)
}
n <- length(accession)
count <- countOccurrences("[|]",accession)
m <- length(which(count==2))
return (n==m)
}
# Extracts the second entry from a string of the form *|*|*.
getAccession <- function(string) {
if (is.na(string)) {
return (NA)
}
return (unlist(strsplit(string, "[|]"))[2])
}
# Extracts the third entry from a string of the form *|*|*.
getGene <- function(string) {
if (is.na(string)) {
return (NA)
}
return (unlist(strsplit(string, "[|]"))[3])
}
# read the PSM section of an mzTab file
readMzTabPSM <- function(file) {
# find start of the PSM section
first.row <- startSection(file, "PSH")
# read entire mzTab
data <- read.table(file, sep="\t", skip=first.row-1, fill=TRUE, header=TRUE, quote="", na.strings=c("null","NA"), stringsAsFactors=FALSE, check.names=FALSE)
# extract PSM data
psm.data <- data[which(data[,1]=="PSM"),]
psm.data$PSH <- NULL
# In case the accession column is of the format *|*|*, we split this column into an accession and a gene column.
if (all(sapply(psm.data$accession, checkAccessionFormat))) {
psm.data$gene <- sapply(psm.data$accession, getGene)
psm.data$accession <- sapply(psm.data$accession, getAccession)
}
# In the mzTab format, PSMs with multiple protein accessions are written to multiple rows.
# Here we collapse these rows and separate multiple accessions/genes by comma.
psm.data <- aggregate(psm.data, by=list(temp = psm.data$PSM_ID), FUN=collapseRows)
psm.data$temp <- NULL
return (psm.data)
}
psm.data <- readMzTabPSM(input.file)
write.table(psm.data, output.file, sep="\t", row.names=FALSE, col.names=TRUE, quote=FALSE)
| R |
3D | OpenMS/OpenMS | share/OpenMS/SCRIPTS/ProteomicsLFQ.R | .R | 9,013 | 227 | # example script to perform an MSstats analysis
Sys.setenv(LANG = "en")
library(MSstats)
library(dplyr)
library(tibble)
library(tidyr)
args = commandArgs(trailingOnly=TRUE)
MSstats_input <- args[1]
mzTab_input <- args[2]
mzTab_output <- args[3]
Sys.setenv(LANG = "en")
data <- read.csv(MSstats_input, sep=",", header=T, stringsAsFactors=T)
quant <- OpenMStoMSstatsFormat(data, removeProtein_with1Feature=T)
processed.quant <- dataProcess(quant, censoredInt = 'NA')
# generating these plots takes quite a while. Disable if needed.
dataProcessPlots(data=processed.quant, type="QCPlot",which.Protein="allonly")
dataProcessPlots(data=processed.quant, type="ProfilePlot",which.Protein="all")
# iPRG2015 study matrix
comparison1<-matrix(c(-1,1,0,0),nrow=1)
comparison2<-matrix(c(-1,0,1,0),nrow=1)
comparison3<-matrix(c(-1,0,0,1),nrow=1)
comparison4<-matrix(c(0,-1,1,0),nrow=1)
comparison5<-matrix(c(0,-1,0,1),nrow=1)
comparison6<-matrix(c(0,0,-1,1),nrow=1)
comparison <- rbind(comparison1, comparison2, comparison3, comparison4, comparison5, comparison6)
row.names(comparison)<-c("C2-C1","C3-C1","C4-C1","C3-C2","C4-C2","C4-C3")
############ also calculate missingness on condition level
# input: ProcessedData matrix of MSstats
# output:
# calculate fraction of na in condition (per protein)
# Groups: PROTEIN [762]
# PROTEIN `1` `2`
# <fct> <dbl> <dbl>
# 1 sp|A1ANS1|HTPG_PELPD 0 0.5
# 2 sp|A2I7N3|SPA37_BOVIN 0 0.5
# 3 sp|A2VDF0|FUCM_HUMAN 0 0.5
# 4 sp|A6ND91|ASPD_HUMAN 0.5 0.5
# 5 sp|A7E3W2|LG3BP_BOVIN 0.5 0.5
# 6 sp|B8FGT4|ATPB_DESAA 0 0.5
getMissingInCondition <- function(processedData)
{
p <- processedData
# count number of samples per condition
n_samples = p %>% group_by(GROUP) %>% summarize(n_samples = length(unique((as.numeric(SUBJECT)))))
p <- p %>%
filter(!is.na(INTENSITY)) %>% # remove rows with INTENSITY=NA
select(PROTEIN, GROUP, SUBJECT) %>%
distinct() %>%
group_by(PROTEIN, GROUP) %>%
summarize(non_na = n()) # count non-NA values for this protein and condition
p <- left_join(p, n_samples) %>%
mutate(missingInCondition = 1 - non_na/n_samples) # calculate fraction of missing values in condition
# create one column for every condition containing the missingness
p <- spread(data = p[,c("PROTEIN", "GROUP", "missingInCondition")], key = GROUP, value = missingInCondition)
return(p)
}
mic <- getMissingInCondition(processed.quant$ProcessedData)
#filtered.quant <- processed.quant
#filtered.quant$RunlevelData <- merge(x=processed.quant$RunlevelData, y=mic, by.y="PROTEIN", by.x="Protein")
#filtered.quant$RunlevelData[is.na(filtered.quant$RunlevelData)] <- 1 # set completely missing to 1.0 (had no matching entry in join and were set to NA)
#filtered.quant$ProcessedData <- merge(x=processed.quant$ProcessedData, y=mic, by="PROTEIN")
#filtered.quant$ProcessedData[is.na(filtered.quant$ProcessedData)] <- 1 # set completely missing to 1.0 (had no matching entry in join and were set to NA)
groupcomp <- groupComparison(contrast.matrix=comparison, data=processed.quant)
# for plotting, remove proteins with infinite fold change / p-value NA (e.g., those only present in one condition)
groupcomp$Volcano = groupcomp$ComparisonResult[!is.na(groupcomp$ComparisonResult$pvalue),]
groupComparisonPlots(data=groupcomp$Volcano, type="VolcanoPlot", width=12, height=12,dot.size = 2,ylimUp = 7)
# annotate how often the protein was quantified in each condition (NA values introduced by merge of completely missing are set to 1.0)
groupcomp$ComparisonResult <- merge(x=groupcomp$ComparisonResult, y=mic, by.x="Protein", by.y="PROTEIN")
commoncols <- intersect(colnames(mic), colnames(groupcomp$ComparisonResult))
groupcomp$ComparisonResult[, commoncols]<-groupcomp$ComparisonResult %>% select(commoncols) %>% mutate_all(funs(replace(., is.na(.), 1)))
#write comparison to CSV (one CSV per contrast)
writeComparisonToCSV <- function(DF)
{
write.table(DF, file=paste0("comparison_",unique(DF$Label),".csv"), quote=FALSE, sep='\t', row.names = FALSE)
return(DF)
}
groupcomp$ComparisonResult %>% group_by(Label) %>% do(writeComparisonToCSV(as.data.frame(.)))
################# MzTab
# find start of the section
startSection <- function(file, section.identifier) {
data <- file(file, "r")
row = 0
while (TRUE) {
row = row + 1
line = readLines(data, n=1)
if (substr(line, 1, 3)==section.identifier) {
break
}
}
close(data)
return (row)
}
# find start of the mzTab section tables
MTD.first_row <- startSection(mzTab_input, "MTD")
PRT.first_row <- startSection(mzTab_input, "PRH")
PEP.first_row <- startSection(mzTab_input, "PEH")
PSM.first_row <- startSection(mzTab_input, "PSH")
# read entire mzTab and extract protein data
MTD <- read.table(mzTab_input, sep="\t",
skip=MTD.first_row-1,
nrows=PRT.first_row - MTD.first_row - 1 -1, # one extra empty line
fill=TRUE,
header=TRUE,
quote="",
na.strings=c("null","NA"),
stringsAsFactors=FALSE,
check.names=FALSE)
PRT <- read.table(mzTab_input, sep="\t",
skip=PRT.first_row-1,
nrows=PEP.first_row - PRT.first_row - 1 -1, # one extra empty line
fill=TRUE,
header=TRUE,
quote="",
na.strings=c("null","NA"),
stringsAsFactors=FALSE,
check.names=FALSE)
PEP <- read.table(mzTab_input, sep="\t",
skip=PEP.first_row-1,
nrows=PSM.first_row - PEP.first_row - 1 - 1, # one extra empty line
fill=TRUE,
header=TRUE,
quote="",
na.strings=c("null","NA"),
stringsAsFactors=FALSE,
check.names=FALSE)
PSM <- read.table(mzTab_input, sep="\t",
skip=PSM.first_row-1,
fill=TRUE,
header=TRUE,
quote="",
na.strings=c("null","NA"),
stringsAsFactors=FALSE,
check.names=FALSE)
#### Insert quantification data from MSstats into PRT section
# first we create a run level protein table form MSstats output
# then we merge the values into the mzTab PRT table
# Input: MSstats RunLevelData
# Output: Run level quantification
# Create a run level protein table
# PROTEIN `1` `2` `3` `4` `5` `6` `7` `8` `9` `10` `11` `12` `13` `14` `15` `16` `17` `18` `19` `20`
# <fct> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
# 1 sp|A1ANS1|HTPG_PELPD 24.2 24.9 22.8 25.3 24.7 22.9 24.6 25.1 24.0 22.1 25.0 24.3 23.6 NA NA NA NA NA NA NA
# 2 sp|A2I7N1|SPA35_BOVIN 22.9 23.6 22.4 23.8 23.4 NA 23.6 23.9 22.5 NA 23.7 23.5 22.5 22.5 23.0 23.0 22.6 22.2 22.1 22.8
getRunLevelQuant <- function(runLevelData)
{
runlevel.long <- tibble(RUN=as.numeric(runLevelData$RUN), PROTEIN=runLevelData$Protein, INTENSITY=runLevelData$LogIntensities)
runlevel.wide <- spread(data = runlevel.long, key = RUN, value = INTENSITY)
return(runlevel.wide)
}
quant.runLevel=getRunLevelQuant(processed.quant$RunlevelData)
colnames(quant.runLevel)[1] = "accession"
quant.runLevel$accession<-as.character(quant.runLevel$accession)
for (col_nr in seq(from=2, to=length(colnames(quant.runLevel))))
{
colnames(quant.runLevel)[col_nr]=(paste0("protein_abundance_assay[", colnames(quant.runLevel)[col_nr] , "]"))
}
# TODO: check if assays in MzTab match to runs. Also true for fractionated data?
# clear old quant values from ProteinQuantifier
PRT[,grepl( "protein_abundance_assay" , names(PRT))] = NA
PRT[,grepl( "protein_abundance_study_variable" , names(PRT))] = NA
# merge in quant.runLevel values into PRT
PRT_assay_cols <- grepl("protein_abundance_assay", names(PRT))
PRT_stdv_cols <- grepl("protein_abundance_study_variable", names(PRT))
RL_assay_cols <- grepl("protein_abundance_assay", names(quant.runLevel))
for (acc in quant.runLevel$accession)
{
q<-which(quant.runLevel$accession==acc)
# acc from MSstats might be a group e.g., "A;B" so
# we check the single leader protein in mzTab PRT$accession against both A and B
w<-which(PRT$accession %in% strsplit(acc, ";", fixed=TRUE)[[1]])
if (length(w) == 0)
{
# TODO: check why not all summarized protein accessions are in the mzTab. Minimum number of peptides/features different?
print(paste("Warning: ", acc, " not in mzTab but reported by MSstats"))
}
else
{
PRT[w, PRT_assay_cols] <- quant.runLevel[q, RL_assay_cols]
PRT[w, PRT_stdv_cols] <- quant.runLevel[q, RL_assay_cols] # we currently store same data in stdv and assay column
}
}
write.table(MTD, mzTab_output, sep = "\t", quote=FALSE, row.names = FALSE, na = "null")
write("\n",file=mzTab_output,append=TRUE)
write.table(PRT, mzTab_output, sep = "\t", quote=FALSE, row.names = FALSE, append=TRUE, na = "null")
write("\n",file=mzTab_output,append=TRUE)
write.table(PEP, mzTab_output, sep = "\t", quote=FALSE, row.names = FALSE, append=TRUE, na = "null")
write("\n",file=mzTab_output,append=TRUE)
write.table(PSM, mzTab_output, sep = "\t", quote=FALSE, row.names = FALSE, append=TRUE, na = "null")
| R |
3D | OpenMS/OpenMS | share/OpenMS/SCRIPTS/mzTab2tsv_PEP.R | .R | 2,234 | 82 | ## This is an R script for the conversion of mzTab to a better readable tsv format.
# clear entire workspace
rm(list = ls())
input.file <- commandArgs(TRUE)[1]
output.file <- commandArgs(TRUE)[2]
# find start of the section
startSection <- function(file, section.identifier) {
data <- file(file, "r")
row = 0
while (TRUE) {
row = row + 1
line = readLines(data, n=1)
if (substr(line, 1, 3)==section.identifier) {
break
}
}
close(data)
return (row)
}
# count the occurrences of character c in string s
countOccurrences <- function(char,s) {
s2 <- gsub(char,"",s)
return (nchar(s) - nchar(s2))
}
# check that the protein accession is of the format *|*|*
# Note that NA returns TRUE.
checkAccessionFormat <- function(accession) {
if (is.na(accession)) {
return (TRUE)
}
n <- length(accession)
count <- countOccurrences("[|]",accession)
m <- length(which(count==2))
return (n==m)
}
# Extracts the second entry from a string of the form *|*|*.
getAccession <- function(string) {
if (is.na(string)) {
return (NA)
}
return (unlist(strsplit(string, "[|]"))[2])
}
# Extracts the third entry from a string of the form *|*|*.
getGene <- function(string) {
if (is.na(string)) {
return (NA)
}
return (unlist(strsplit(string, "[|]"))[3])
}
# read the PEP section of an mzTab file
readMzTabPEP <- function(file) {
# find start of the PEP section
first.row <- startSection(file, "PEH")
# read entire mzTab
data <- read.table(file, sep="\t", skip=first.row-1, fill=TRUE, header=TRUE, quote="", na.strings=c("null","NA"), stringsAsFactors=FALSE, check.names=FALSE)
# extract PEP data
peptide.data <- data[which(data[,1]=="PEP"),]
peptide.data$PEH <- NULL
# In case the accession column is of the format *|*|*, we split this column into an accession and a gene column.
if (all(sapply(peptide.data$accession, checkAccessionFormat))) {
peptide.data$gene <- sapply(peptide.data$accession, getGene)
peptide.data$accession <- sapply(peptide.data$accession, getAccession)
}
return (peptide.data)
}
peptide.data <- readMzTabPEP(input.file)
write.table(peptide.data, output.file, sep="\t", row.names=FALSE, col.names=TRUE, quote=FALSE)
| R |
3D | OpenMS/OpenMS | share/OpenMS/SCRIPTS/InternalCalibration_Residuals.R | .R | 2,857 | 69 | ## add default CRAN mirror in case the user's config does not have one
options(repos = list(CRAN="http://cran.rstudio.com/"))
if (!require(ggplot2)) install.packages("ggplot2")
library("ggplot2")
if (!require(reshape2)) install.packages("reshape2")
library("reshape2")
if (!require(plyr)) install.packages("plyr")
library("plyr")
file.table.in = commandArgs(TRUE)[1] ## file.table.in = "residuals.csv"
file.plot.out = commandArgs(TRUE)[2] ## file.plot.out = "residuals.png"
cat(paste0("Reading file '", file.table.in, "' to plot residual masses ..."))
d = read.csv(file.table.in, comment.char = "#", strip.white = TRUE, fill = FALSE)
head(d)
## check if header is complete
required_cols = c("RT", "intensity", "mz.ref", "mz.before", "mz.after", "ppm.before", "ppm.after")
if (!all(required_cols %in% colnames(d))) {
stop(paste0("File '", file.table.in, "' has missing columns. Required are: ", paste(required_cols, sep="", collapse=", "), "."))
}
dpm = melt(d[, grep("^mz.[ab]", colnames(d), invert = TRUE)], id.vars = c("RT", "mz.ref", "intensity"))
head(dpm)
## for peptide ID data:
## - mz.ref will be mostly unique (assume oversampling of at most 3)
## - and RT of more than 10min
## otherwise we assume direct-injection (and facet-wrap by each mz-ref)
## - unless its more than 100 facets... which will be hard to read
if ((length(unique(d$mz.ref)) / nrow(d) > 0.333 && diff(range(d$RT, na.rm = TRUE)) > 600)
|| length(unique(d$mz.ref)) > 100 ) {
dpm2 = dpm
dpm2$masstrace = ""
} else {
## for direct-injection, every spectrum will repeatedly give multiple 'mz.ref'
## annotate mz.ref with average intensity
dpm2 = ddply(dpm, "mz.ref", function(x) {
x$masstrace = paste0("m/z ",
# we want zero/space padded masses, such that ggplot will sort them by mass automatically
format(round(x$mz.ref, 5), nsmall = 5, width = 9, zero.print = TRUE),
" ~ int ",
format(median(x$intensity), scientific = TRUE, digits = 2))
return(x)
})
}
head(dpm2)
#getOption("device") ## RStudioGD
#options(device = "pdf")
#dev.new(filename = file.plot.out, file = file.plot.out)
png(filename = file.plot.out, width=15, height=10, units="cm", res=300)
pl = ggplot(dpm2) +
geom_hline(yintercept = 0, colour="grey") +
geom_hline(yintercept = c(-1,1), colour = "grey", linetype = "dotdash") +
facet_wrap(~ masstrace) +
geom_point(aes(x = RT, y = value, color = variable), alpha=0.6) +
scale_color_manual(values = c("ppm.before" = "#FF2222", "ppm.after" = "#2222FF"),
labels = c("before", "after"),
name = "error") +
ggtitle("Calibrant's mass error over time") +
xlab("RT [sec]") +
ylab("mass error [ppm]")
print(pl)
dev.off()
| R |
3D | OpenMS/OpenMS | share/OpenMS/SCRIPTS/ProduceQCFigures_idmap.R | .R | 1,292 | 40 | ## This is an R script to produce the figures that are attached to the qcML format
library("ggplot2")
library(scales)
options(warn=-1) #suppress warnings
#options
options(digits=10)
file_p<-commandArgs(TRUE)[1]
file_id<-commandArgs(TRUE)[2]
post<-commandArgs(TRUE)[3]
png(post)
#file_p<-"/tmp/TOPPAS_out/024-QCExtractor-out_csv/old1.csv"
#file_id<-"/tmp/TOPPAS_out/023-QCExtractor-out_csv/old1.csv"
#precs<-read.table(file_p, header=TRUE, sep="", na.strings="NA", dec=".", strip.white=TRUE)
#ids<-read.table(file_id, header=TRUE, sep="", na.strings="NA", dec=".", strip.white=TRUE)
precs<-read.csv(file=file_p,head=TRUE,sep="\t")
ids<-read.csv(file=file_id,head=TRUE,sep="\t")
names(precs)<- c("RT", "MZ")
names(ids)<- c("RT", "MZ", "Score", "PeptideSequence", "Charge", "TheoreticalWeight", "DeltaPpm")
##########################
###IDs on rt/mz map vs precursors
##########################
spec<-cbind(precs[])
id<-cbind(ids[,1:2])
spec$color<-"is_recorded"
id$color<-"is_identified"
spec$rt<-as.POSIXct(as.character(0),format="%S")+spec$RT
id$rt<-as.POSIXct(as.character(0),format="%S")+id$RT
ggplot(spec, aes(rt, MZ, color=color)) +
geom_point() +
geom_point(data=id, aes(rt, MZ, color=color)) +
xlab("RT (HH:MM)")
######################################
garbage<-dev.off()
| R |
3D | OpenMS/OpenMS | share/OpenMS/SCRIPTS/ProduceQCFigures_setid.R | .R | 510 | 28 | ## This is an R script to produce the figures that are attached to the qcML format
#options
options(digits=10)
file<-commandArgs(TRUE)[1]
post<-commandArgs(TRUE)[2]
######
###setid
######
a<-read.table(file=file, header=TRUE, sep="\t", na.strings="NA", dec=".", strip.white=TRUE)
######################################
png(post)
bxpo=list()
bxpo$names=a[,1]
a <- as.matrix(a[,-1])
a <- t(a[,c("min","Q1","Q2","Q3","max")])
bxpo$stats = a
bxp(bxpo)
######################################
dev.off()
#
#
#
#
#
| R |
3D | OpenMS/OpenMS | share/OpenMS/SCRIPTS/Rscript_generic_example.R | .R | 4,366 | 89 | ## This is an exemplary R-Script which can be used in conjunction with TOPP:GenericWrapper (of type: RScript_General)
## In this mode, the GenericWrapper provides four 'in' and six 'out' slots (four single files, two lists), which the user can couple to in/out files as desired
## Slots can be empty, and depending on who is invoking this script, you should not rely on
## argument strings being present (even empty) or not.
## e.g. a user may write
## ...... -out3 "" ...
## or leave it out completely.
## grabbing command args
## you might want to use a dedicated R package to do this
## The script will be invoked like this when used with GenericWrapper, where <inX> and <outX> might be missing completely:
## <thisScript> -in1 <in1> -in2 <in2> -in3 <in3> -in4 <in4> -out1 <out1> -out2 <out2> -out3 <out3> -out4 <out4> -outlist1 <outlist1> -outlist2 <outlist2>
argv = commandArgs(TRUE)
#argv = c("-in1", "bla", "-in3", "-out1", "o1", "-out3", "") ## internal debug, worst combination of arguments.. and we should be able to deal with it
cat("Arguments passed are:")
cat(argv)
cat("\n\nLooking at parameters now ... \n\n")
## sanity check for input. This script (arbitrarily demands that the first input file (in1) is provided plus an optional output (out1)
## while assuming that the outer GenericWrapper node provides up to four inputs plus six outputs)
## everything that starts with a "-" is assumed to be a parameter name (not a value)
idx_in1 = which(argv == "-in1") + 1
if (length(idx_in1)!=1 | is.na(argv[idx_in1]) | nchar(argv[idx_in1])==0 | substr(argv[idx_in1],1,1)=="-")
{
stop("This script requires one input file for slot 'in1' for arbitrary reasons. The value must not start with '-'\n",
"Usage:", "<thisScript> -in1 <in1> -in2 <list> [[-in3 <ignored> -in4 <ignored>] -out1 <optional> -out2 <ignored> -out3 <ignored> -out4 <ignored>] -outlist1 <optional> [-outlist2 <ignored>]", " \n");
}
in1 = argv[2]
cat(paste0("Argument -in1: '", in1, "'\n"))
idx_in2 = which(argv == "-in2") + 1
if (length(idx_in2)!=1 | is.na(argv[idx_in2]) | nchar(argv[idx_in2])==0 | substr(argv[idx_in2],1,1)=="-")
{
stop("This script requires a second input in list format (in2) for arbitrary reasons. The values must not start with '-'\n",
"Usage:", "<thisScript> -in1 <in1> -in2 <list> [[-in3 <ignored> -in4 <ignored>] -out1 <optional> -out2 <ignored> -out3 <ignored> -out4 <ignored>] -outlist1 <optional> [-outlist2 <ignored>]", " \n");
}
idx_in2_end = idx_in2 + 1
while (!(length(idx_in2_end)!=1 | is.na(argv[idx_in2_end]) | nchar(argv[idx_in2_end])==0 | substr(argv[idx_in2_end],1,1)=="-"))
{ ## consume as many files as present until a new parameter shows up
idx_in2_end = idx_in2_end + 1
}
idx_in2_end = idx_in2_end - 1
in2 = argv[idx_in2:idx_in2_end]
cat(paste0("Argument -in2 (list): '", paste0(in2, collapse=" + "), "'\n"))
## do something with input ...
## ...
## deal with output (here we only look at -out1 and -outlist1 ...)
idx_out1 = which(argv == "-out1") + 1
if (length(idx_out1)==1 && !is.na(argv[idx_out1]) && nchar(argv[idx_out1])>0 && substr(argv[idx_out1],1,1)!="-")
{
out1 = argv[idx_out1]
cat(paste0("Argument -out1 provided as: '", out1, "'\n"))
## if the file is requested, we need to deliver
cat(file=out1, "The R script wrote some output here...")
} else {
cat("No output out1 requested!\n")
}
## deal with output (here we only look at -out1 ...
idx_outlist1 = which(argv == "-outlist1") + 1
if (length(idx_outlist1)==1 && !is.na(argv[idx_outlist1]) && nchar(argv[idx_outlist1])>0 && substr(argv[idx_outlist1],1,1)!="-")
{
idx_outlist1_end = idx_outlist1 + 1
while (!(length(idx_outlist1_end)!=1 | is.na(argv[idx_outlist1_end]) | nchar(argv[idx_outlist1_end])==0 | substr(argv[idx_outlist1_end],1,1)=="-"))
{ ## consume as many files as present until a new parameter shows up
idx_outlist1_end = idx_outlist1_end + 1
}
idx_outlist1_end = idx_outlist1_end - 1
outlist1 = argv[idx_outlist1:idx_outlist1_end]
cat(paste0("Argument -outlist1 provided as: '", paste0(outlist1, collapse=" + "), "'\n"))
## if the file is requested, we need to deliver
for (outlist_entry in outlist1)
{
cat(paste0("Writing some content to : '", outlist_entry, "' ...\n"))
cat(file=outlist_entry, "The R script wrote some output here...")
}
} else {
cat("No output outlist1 requested!\n")
}
| R |
3D | OpenMS/OpenMS | share/OpenMS/SCRIPTS/ropenms.R | .R | 3,163 | 107 | ################### OpenMS in R ###################
#### Some simple scripts how to use OpenMS in R ####
### if not installed:
### - install pyopenms (https://pyopenms.readthedocs.io/en/latest/installation.html)
### make sure R is using the same python environment as your pyopenms installation
### eg. reticulate::use_python("/usr/local/miniconda3/envs/py37/bin/python")
### or before loading the reticulate library
### Sys.setenv(RETICULATE_PYTHON = "/usr/local/miniconda3/envs/py37/bin/python")
### - install reticulate (https://rstudio.github.io/reticulate/)
### eg. install.packages('reticulate')
library("reticulate")
ropenms=import("pyopenms", convert = FALSE)
### load and parse idXML
f="/OpenMS/OpenMS/share/OpenMS/examples/BSA/BSA1_OMSSA.idXML"
idXML=ropenms$IdXMLFile()
pepids=r_to_py(list())
protids=r_to_py(list())
idXML$load(f,protids,pepids)
pepids=py_to_r(pepids)
protids=py_to_r(protids)
pephits=pepids[[1]]$getHits()
pepseq=pephits[[1]]$getSequence()
### load and parse featureXML
f="/OpenMS/OpenMS/share/OpenMS/examples/FRACTIONS/BSA1_F1.featureXML"
featXML=ropenms$FeatureXMLFile()
fmap = ropenms$FeatureMap()
featXML$load(f, fmap)
print(paste0("FeatureID: ", fmap[1]$getUniqueId()))
print(paste0("Charge: ", fmap[1]$getCharge()))
print(paste0("M/z: ", fmap[1]$getMZ()))
print(paste0("RT: ", fmap[1]$getRT()))
### load and parse mzML
f="/OpenMS/OpenMS/share/OpenMS/examples/BSA/BSA1.mzML"
mzML= ropenms$MzMLFile()
msexp = ropenms$MSExperiment()
mzML$load(f,msexp)
spectra = py_to_r(msexp$getSpectra())
#ms1
ms1=sapply(spectra, function(x) x$getMSLevel()==1)
peaks=sapply(spectra[ms1], function(x) cbind(do.call("cbind", x$get_peaks()),x$getRT()))
peaks=do.call("rbind", peaks)
peaks_df=data.frame(peaks)
colnames(peaks_df)=c('MZ','Intensity','RT')
peaks_df$Intensity=log10(peaks_df$Intensity)
ggplot(peaks_df, aes(x=RT, y=MZ) )+geom_point(size=1, aes(colour = Intensity), alpha=0.25) + theme_minimal() + scale_colour_gradient(low = "blue", high = "yellow")
#ms2
ms2=spectra[!ms1][[1]]$get_peaks()
peaks_ms2=do.call("cbind", ms2)
peaks_ms2=data.frame(peaks_ms2)
colnames(peaks_ms2)=c("MZ","Intensity")
ggplot(peaks_ms2, aes(x=MZ, y=Intensity)) +
geom_segment( aes(x=MZ, xend=MZ, y=0, yend=Intensity)) +
geom_segment( aes(x=MZ, xend=MZ, y=0, yend=-Intensity)) + # mirror spectrum possibly useful for synthetic peptide spectra comparison
theme_minimal()
### Spectrum
spectrum = ropenms$MSSpectrum()
mz = seq(1500, 500, -100)
i = seq(10, 2000, length.out = length(mz))
spectrum$set_peaks(list(mz, i))
# Sort the peaks according to ascending mass-to-charge ratio
spectrum$sortByPosition()
# Iterate using the reticulate::iterate() function
iterate(spectrum, function(x) {print(paste0("M/z :" , x$getMZ(), " Intensity: ", x$getIntensity()))})
# Iterate over spectrum of those peaks
for (i in seq(0,py_to_r(spectrum$size())-1)) {
print(spectrum[i]$getMZ())
print(spectrum[i]$getIntensity())
}
# More efficient peak access with get_peaks()
peak_df=do.call("cbind", py_to_r(spectrum$get_peaks()))
apply(peak_df,1,c)
# Access a peak by index
print(c(spectrum[1]$getMZ(), spectrum[1]$getIntensity()))
| R |
3D | OpenMS/OpenMS | share/OpenMS/SCRIPTS/ProduceQCFigures_inj.R | .R | 954 | 28 | #install.packages("scales")
#install.packages("ggplot2")
## This is an R script to produce the figures that are attached to the qcML format
library("ggplot2")
library(scales)
options(warn=-1) #suppress warnings
file<-commandArgs(TRUE)[1]
post<-commandArgs(TRUE)[2]
#file<-"/tmp/2015-10-28_171407_Cetirizin_2990_1/TOPPAS_tmp/QCWorkflow_fr/019_QCExtractor/out_csv/old1.dta"
knime.in<-read.csv(file=file,head=TRUE,sep="\t")
names(knime.in)<-c("rt","it")
knime.in$rt <- as.POSIXct(as.character(0),format="%S")+knime.in$rt
png(post)
##########################
###Injection time
##########################
ggplot(data=knime.in, aes(x=rt, y=it)) +
geom_point(shape=2) +
geom_line(y=0, colour="blue") +
stat_smooth(colour="red", method=loess) +
#scale_x_datetime( breaks="10 mins", minor_breaks="1 mins", labels=date_format("%H:%M")) +
xlab("RT (HH:MM)") +
ylab("Injection time (ms)")
######################################
garbage<-dev.off()
| R |
3D | OpenMS/OpenMS | share/OpenMS/SCRIPTS/plot_trafo.R | .R | 3,936 | 127 | #!/usr/bin/env Rscript
library(XML)
## utility function:
"%within%" <- function(x, range) {
(x >= range[1]) & (x <= range[2])
}
## read pairs of data points from trafoXML file:
read.pairs <- function(filename) {
pairs <- matrix(nrow=0, ncol=2)
pair.handler <- function(name, attrs) {
pairs <<- rbind(pairs, as.numeric(c(attrs["from"], attrs["to"])))
}
xmlEventParse(filename, list("Pair"=pair.handler))
pairs
}
## create short, but unique names from trafoXML files:
unique.names <- function(paths) {
stopifnot(!any(duplicated(paths)))
paths <- sub("\\.trafoXML", "", paths, ignore.case=TRUE)
labels <- basename(paths)
if (!any(duplicated(labels)))
return(labels)
parts <- strsplit(paths, .Platform$file.sep, fixed=TRUE)
parts <- lapply(parts, rev)
i <- 2
repeat {
labels <- file.path(sapply(parts, function(p) p[i]), labels)
if (!any(duplicated(labels)))
return(labels)
i <- i + 1
}
}
## plot data points:
plot.pairs <- function(filenames, percent=90, pch=1, legend.loc="topleft",
legend.ncol=2) {
filenames <- unique(filenames)
pairs <- lapply(filenames, read.pairs)
lens <- sapply(pairs, nrow)
pairs <- do.call(rbind, pairs)
diffs <- pairs[, 2] - pairs[, 1]
diffs.range <- range(diffs)
if (percent < 100) {
frac <- (100 - percent) / 2 / 100
q <- quantile(diffs, c(frac, 1 - frac))
## double the quantile range:
yrange <- q + (diff(q) / 2) * c(-1, 1)
## ...unless the data range is smaller:
yrange[1] <- max(yrange[1], diffs.range[1])
yrange[2] <- min(yrange[2], diffs.range[2])
xrange <- range(pairs[diffs %within% yrange, 1])
}
else {
yrange <- xrange <- NULL
}
colors <- rainbow(length(filenames))
plot(pairs[, 1], diffs, xlim=xrange, ylim=yrange, col=rep(colors, lens),
pch=pch, main="Retention time transformation", xlab="original RT [s]",
ylab=expression(paste(Delta, "RT [s]", sep="")))
abline(h=0, col="grey")
if (legend.loc != "none")
legend(legend.loc, legend=unique.names(filenames), pch=20, col=colors,
ncol=legend.ncol, cex=0.8)
}
## command line parameters:
opt <- data.frame(
c("percent", "pch", "legend.loc", "legend.ncol"),
desc=c("Percentage of data points to define (half the) visible range",
"Plotting character", "Location of legend",
"Number of columns for legend"),
value=c("90", ".", "topleft", "2"), row.names=1,
stringsAsFactors=FALSE)
params <- commandArgs(trailingOnly=TRUE)
if (length(params) < 2) {
cat("Usage: Rscript Plot_trafoXML.R",
paste0("[", rownames(opt), "=?]", collapse=" "),
"in1.trafoXML [in2.trafoXML ...] out.pdf\n\n")
cat("Generate a plot of RT transformation data.\n\n")
cat("Input: trafoXML file(s)\n")
cat("Output: PDF file with plot\n")
cat("Optional parameters:\n")
width <- max(nchar(rownames(opt)))
cat(paste0(" ", format(rownames(opt), width=width), " ", opt$desc,
" (default: ", opt$value, ")", collapse="\n"), "\n")
quit("no")
}
## no R package for handling command line parameters installed by default :-(
params.split <- strsplit(params, "=", fixed=TRUE)
for (i in 1:length(params)) {
parts <- params.split[[i]]
if (length(parts) == 1)
break # no "=", therefore no optional parameter
if (!(parts[[1]] %in% rownames(opt))) {
cat("Unknown parameter:", parts[[1]], "- ignored.\n")
next
}
parts[[2]] <- sub("^['\"](.*)['\"]$", "\\1", parts[[2]]) # remove quotes
opt[parts[[1]], "value"] <- parts[[2]]
}
filenames <- params[i:(length(params) - 1)]
outfile <- params[length(params)]
for (i in 1:nrow(opt)) {
assign(rownames(opt)[i], opt[i, "value"])
}
percent <- as.numeric(percent)
legend.ncol <- as.numeric(legend.ncol)
if (pch %in% as.character(1:25))
pch <- as.numeric(pch)
pdf(outfile)
plot.pairs(filenames, percent, pch, legend.loc, legend.ncol)
invisible(dev.off())
cat("Done.\n")
| R |
3D | OpenMS/OpenMS | share/OpenMS/SCRIPTS/ProduceQCFigures_acc.R | .R | 977 | 29 | library("ggplot2")
library(scales)
options(warn=-1) #suppress warnings
#options
options(digits=10)
file<-commandArgs(TRUE)[1]
post<-commandArgs(TRUE)[2]
#file<-"/tmp/TOPPAS_out/023-QCExtractor-out_csv/old1.csv"
knime.in<-read.csv(file=file,head=TRUE,sep="\t")
names(knime.in)<- c("RT", "MZ", "Score", "PeptideSequence", "Charge", "TheoreticalWeight", "DeltaPpm")
png(post)
##########################
###Mass accuracy
##########################
ggplot(knime.in, aes(x=DeltaPpm)) +
geom_histogram(aes(y=..density..), # Histogram with density instead of count on y-axis
binwidth=.5,
colour="black", fill="white") +
geom_density(alpha=.1, fill="green") + # Overlay with transparent density plot
geom_vline(aes(xintercept=median(DeltaPpm, na.rm=T)), # Ignore NA values for mean
color="red", linetype="dashed", size=1) +
xlim(c(-10,10)) +
ylab("Density")
######################################
garbage<-dev.off()
| R |
3D | OpenMS/OpenMS | share/OpenMS/SCRIPTS/InternalCalibration_Models.R | .R | 1,511 | 46 | ## add default CRAN mirror in case the user's config does not have one
options(repos = list(CRAN="http://cran.rstudio.com/"))
if (!require(ggplot2)) install.packages("ggplot2")
library("ggplot2")
if (!require(reshape2)) install.packages("reshape2")
library("reshape2")
file.table.in = commandArgs(TRUE)[1] ## file.table.in = "models.csv"
file.plot.out = commandArgs(TRUE)[2] ## file.plot.out = "output.png"
cat(paste0("Reading file '", file.table.in, "' to plot model parameters ..."))
d = read.csv(file.table.in, check.names = FALSE, comment.char = "#", strip.white = TRUE)
model_count = sum(d$source == "local")
dm = melt(d, id.vars = c("RT", "source"))
head(dm)
## for linear models: remove 'power' graph (it's all 0)
if (all(dm$value[grep("power", dm$variable)] == 0, na.rm=TRUE))
{
dm = dm[grep("power", dm$variable, invert=TRUE), ]
}
#options(device = "pdf")
#dev.new(filename = file.plot.out, file = file.plot.out)
png(filename = file.plot.out, width=15, height=10, units="cm", res=300)
if (model_count == 0)
{
plot(c(0, 1), c(0, 1), ann = F, bty = 'n', type = 'n', xaxt = 'n', yaxt = 'n')
text(0.5,0.5,"Model fitting failed!\nCheck your tool parameters and/or data!")
} else {
pl = ggplot(dm) +
geom_point(aes(x=RT, y=value, col=source)) +
ggtitle(paste("Model coefficients over time\n", model_count, "model(s)", collapse="")) +
xlab("RT [sec]") +
ylab("model coefficient") +
facet_grid( variable ~ ., scales="free_y")
print(pl)
}
dev.off()
| R |
3D | OpenMS/OpenMS | share/OpenMS/SCRIPTS/ProduceQCFigures_rt_acc.R | .R | 904 | 32 | library("ggplot2")
library(scales)
options(warn=-1) #suppress warnings
#options
options(digits=10)
file<-commandArgs(TRUE)[1]
post<-commandArgs(TRUE)[2]
#file<-"/tmp/TOPPAS_out/023-QCExtractor-out_csv/old1.csv"
knime.in<-read.csv(file=file,head=TRUE,sep="\t")
names(knime.in)<- c("RT", "MZ", "Score", "PeptideSequence", "Charge", "TheoreticalWeight", "DeltaPpm")
png(post)
##########################
###Mass accuracy
##########################
if(nrow(knime.in) < 2){
df <- data.frame()
ggplot(df) + geom_point() + xlim(0, 10) + ylim(0, 10)
}else{
knime.in$rt <- as.POSIXct(as.character(0),format="%S")+knime.in$RT
ggplot(data=knime.in, aes(x=rt , y=DeltaPpm)) +
geom_point(alpha=0.5) +
ylim(c(-10,10)) +
geom_line(y=0, colour="blue") +
stat_smooth(colour="red", method=loess, span=1/5) +
xlab("RT (HH:MM)")
}
######################################
garbage<-dev.off()
| R |
3D | OpenMS/OpenMS | share/OpenMS/SCRIPTS/mzTab2tsv_PRT.R | .R | 3,771 | 120 | ## This is an R script for the conversion of mzTab to a better readable tsv format.
# clear entire workspace
rm(list = ls())
input.file <- commandArgs(TRUE)[1]
output.file <- commandArgs(TRUE)[2]
# find start of the section
startSection <- function(file, section.identifier) {
data <- file(file, "r")
row = 0
while (TRUE) {
row = row + 1
line = readLines(data, n=1)
if (substr(line, 1, 3)==section.identifier) {
break
}
}
close(data)
return (row)
}
# get index in protein groups list containing protein x
getIndex <- function(x, members) {
g <- gsub(x, "", members, fixed=TRUE)
d <- nchar(members)-nchar(g)
return (which(d>0)[1])
}
# returns first entry of a comma-separated list
firstEntry <- function(x) {
list <- strsplit(as.character(x),",",fixed=TRUE)
return (unlist(lapply(list, '[[', 1)))
}
# count the occurrences of character c in string s
countOccurrences <- function(char,s) {
s2 <- gsub(char,"",s)
return (nchar(s) - nchar(s2))
}
# check that the protein accession is of the format *|*|*
# Note that NA returns TRUE.
checkAccessionFormat <- function(accession) {
if (is.na(accession)) {
return (TRUE)
}
n <- length(accession)
count <- countOccurrences("[|]",accession)
m <- length(which(count==2))
return (n==m)
}
# Extracts the second entry from a string of the form *|*|*.
getAccession <- function(string) {
if (is.na(string)) {
return (NA)
}
return (unlist(strsplit(string, "[|]"))[2])
}
# Extracts the third entry from a string of the form *|*|*.
getGene <- function(string) {
if (is.na(string)) {
return (NA)
}
return (unlist(strsplit(string, "[|]"))[3])
}
# read the PRT section of an mzTab file
readMzTabPRT <-function(file) {
# find start of the PRT section
first.row <- startSection(file, "PRH")
# read entire mzTab
data <- read.table(file, sep="\t", skip=first.row-1, fill=TRUE, header=TRUE, quote="", na.strings=c("null","NA"), stringsAsFactors=FALSE, check.names=FALSE)
# extract PRT data
protein.data <- data[which(data[,1]=="PRT"),]
protein.data$PRH <- NULL
# In case the accession column is of the format *|*|*, we split this column into an accession and a gene column.
if (all(sapply(protein.data$accession, checkAccessionFormat))) {
protein.data$gene <- sapply(protein.data$accession, getGene)
protein.data$accession <- sapply(protein.data$accession, getAccession)
}
# split into different types
proteins <- protein.data[which(protein.data$opt_global_protein_group_type=="single_protein"),]
protein.groups <- protein.data[which(protein.data$opt_global_protein_group_type=="protein_group"),]
indistinguishable.groups <- protein.data[which(protein.data$opt_global_protein_group_type=="indistinguishable_group"),]
if ((dim(protein.groups)[1] > 0) && (dim(indistinguishable.groups)[1] > 0)) {
# match indistinguishable groups to protein groups
group.index <- as.vector(sapply(firstEntry(indistinguishable.groups.members), getIndex, members=protein.groups.members))
table <- data.frame(cbind(group.index, indistinguishable.groups.members))
# merge information from the protein list
colnames(table) <- c("protein group","accessions")
table$accession <- firstEntry(table$accessions)
table <- merge(table, proteins, by="accession")
table$accession <- NULL
# order table by protein.group
table$"protein group" <- as.integer(table$"protein group")
table <- table[order(table$"protein group"),]
}
else {
table <- proteins
colnames(table) <- gsub("accession","accessions", colnames(table))
}
return (table)
}
protein.data <- readMzTabPRT(input.file)
write.table(protein.data, output.file, sep="\t", row.names=FALSE, col.names=TRUE, quote=FALSE)
| R |
3D | OpenMS/OpenMS | share/OpenMS/examples/external_code/Main.cpp | .cpp | 560 | 24 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
#include <OpenMS/KERNEL/FeatureMap.h>
#include <OpenMS/CHEMISTRY/AASequence.h>
#include "ExampleLibraryFile.h"
using namespace OpenMS;
using namespace OpenMSExternal;
int main(int argc, char * argv[])
{
FeatureMap fm;
Feature feature;
fm.push_back(feature);
std::string s = ExampleLibraryFile::printSomething();
std::cout << "From external lib: " << s << "\n";
std::cout << "All good and well!\n";
return 0;
}
| C++ |
3D | OpenMS/OpenMS | share/OpenMS/examples/external_code/ExampleLibraryFile.h | .h | 541 | 24 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Chris Bielow $
// --------------------------------------------------------------------------
#pragma once
#include <string>
namespace OpenMSExternal //optional namespace... however you like it
{
class ExampleLibraryFile
{
public:
static std::string printSomething();
};
}
| Unknown |
3D | OpenMS/OpenMS | share/OpenMS/examples/external_code/ExampleLibraryFile.cpp | .cpp | 579 | 21 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Chris Bielow $
// --------------------------------------------------------------------------
#include "ExampleLibraryFile.h"
using namespace std;
namespace OpenMSExternal //optional namespace... however you like it
{
std::string ExampleLibraryFile::printSomething()
{
return "this is the external library.";
}
}
| C++ |
3D | OpenMS/OpenMS | cmake/Modules/Findcppcheck.cpp | .cpp | 266 | 18 | /**
* \file Findcppcheck.cpp
* \brief Dummy C++ source file used by CMake module Findcppcheck.cmake
*
* \author
* Ryan Pavlik, 2009-2010
* <rpavlik@iastate.edu>
* http://academic.cleardefinition.com/
*
*/
int main(int argc, char* argv[]) {
return 0;
}
| C++ |
3D | OpenMS/OpenMS | cmake/MacOSX/fixdmg.sh | .sh | 2,848 | 105 | #!/bin/bash
# 2012/01/06 - Stephan Aiche
# based on http://stackoverflow.com/questions/96882/how-do-i-create-a-nice-looking-dmg-for-mac-os-x-using-command-line-tools
# bash script to further customize the OpenMS dmg
# if the apple script part fails with "timed out" error try
# defaults write com.apple.Finder AppleShowAllFiles TRUE && killall Finder
# <wait 1-2min>
# defaults write com.apple.Finder AppleShowAllFiles FALSE && killall Finder
DISK_NAME=OpenMS-2.3.0-Darwin
DMG_NAME=${DISK_NAME}.dmg
OPENMS_NAME=OpenMS-2.3.0
backgroundPictureName=.background.png
LICENSE=${PWD}/_CPack_Packages/Darwin/DragNDrop/sla.r
if [ ! -e ${DMG_NAME} ]
then
echo "Please execute make package first"
exit 1
fi
# make dmg writable
hdiutil convert ${DMG_NAME} -format UDRW -o temp.dmg
#hdiutil attach temp.dmg
device=$(hdiutil attach -readwrite -noverify -noautoopen "temp.dmg" | \
egrep '^/dev/' | sed 1q | awk '{print $1}')
# remove original dmg
rm -f ${DMG_NAME}
# wait till package is open
#sleep 10
echo 'tell application "Finder"
tell disk "'${DISK_NAME}'"
with timeout of 300 seconds
open
set theXOrigin to 400
set theYOrigin to 200
set theBottomRightX to 1030
set theBottomRightY to 785
tell container window
set current view to icon view
set toolbar visible to false
set statusbar visible to false
set the bounds to {theXOrigin, theYOrigin, theBottomRightX, theBottomRightY}
set statusbar visible to false
end tell
update without registering applications
delay 1
set theViewOptions to the icon view options of container window
set arrangement of theViewOptions to not arranged
-- if we have a fixed resolution we can also set this
set icon size of theViewOptions to 72
set bgimg to "'${OPENMS_NAME}':share:OpenMS:background.png" as text
set background picture of theViewOptions to file bgimg
set the position of item "'${OPENMS_NAME}'" of container window to {470, 140}
set the position of item "Applications" of container window to {160, 140}
--give the finder some time to write the .DS_Store file
delay 3
-- work around for Snow Leopard bug
close
open
update without registering applications
-- delay 5
-- try eject
eject
end timeout
end tell
end tell
' | osascript
# be sure that everything is done
#sleep 10
chmod -Rf go-w /Volumes/"${DISK_NAME}"
sync
sync
hdiutil detach ${device}
hdiutil convert "temp.dmg" -format UDZO -imagekey zlib-level=9 -o "${DMG_NAME}"
#rm -f /pack.temp.dmg
# hdiutil convert temp.dmg -format UDZO -imagekey zlib-level=9 -o ${DMG_NAME}
rm -f temp.dmg
if [ ! -z "${LICENSE}" -a "${LICENSE}" != "-null-" ]; then
echo "adding EULA resources"
hdiutil unflatten "${DMG_NAME}"
ResMerger "${LICENSE}" -a -o "${DMG_NAME}"
hdiutil flatten "${DMG_NAME}"
fi
| Shell |
3D | OpenMS/OpenMS | cmake/MacOSX/notarize.sh | .sh | 5,847 | 176 | #!/bin/bash
# macOS notarization script using notarytool (requires Xcode 13+ / macOS 11.3+)
# altool was deprecated and unsupported after Fall 2023.
#
# Usage: notarize.sh <bundle_pkg> <bundle_id> <apple_id> <password_env_var> [log_folder]
#
# Arguments:
# bundle_pkg - The package to notarize (.dmg, .pkg, .zip, or .app)
# bundle_id - The bundle identifier (e.g., de.openms)
# apple_id - Apple ID email for notarization
# password_env_var - Environment variable name containing app-specific password
# log_folder - Optional: folder for log files (defaults to current directory)
# Exit on error and fail on any error in a pipeline
set -e
set -o pipefail
BUNDLE_PKG="$1"
BUNDLE_ID="$2"
ASC_USERNAME="$3"
ASC_PASSWORD_ENVVAR="$4"
ASC_TEAMID="$5"
LOG_FOLDER="${6:-.}"
NOTARIZE_LOG="$LOG_FOLDER/notarize.log"
mkdir -p "$LOG_FOLDER"
touch "$NOTARIZE_LOG"
REMOVE_PKG=false
IS_ZIP=false
echo "=== macOS Notarization Script ==="
echo "Bundle: $BUNDLE_PKG"
echo "Bundle ID: $BUNDLE_ID"
echo "Apple ID: $ASC_USERNAME"
echo "Log folder: $LOG_FOLDER"
# Validate inputs
if [[ -z "$BUNDLE_PKG" ]] || [[ -z "$BUNDLE_ID" ]] || [[ -z "$ASC_USERNAME" ]] || [[ -z "$ASC_PASSWORD_ENVVAR" ]]; then
echo "Error: Missing required arguments"
echo "Usage: $0 <bundle_pkg> <bundle_id> <apple_id> <password_env_var> [log_folder]"
exit 1
fi
# Check that the password environment variable is set
if [[ -z "${!ASC_PASSWORD_ENVVAR}" ]]; then
echo "Error: Environment variable '$ASC_PASSWORD_ENVVAR' is not set or empty"
exit 1
fi
# Determine file type and prepare for notarization
# Only notarize the outermost container (zip, pkg, dmg)
if [[ $BUNDLE_PKG == *.dmg ]]; then
BUNDLE_FILE=$BUNDLE_PKG
echo "Notarizing DMG: $BUNDLE_PKG"
elif [[ $BUNDLE_PKG == *.pkg ]]; then
BUNDLE_FILE=$BUNDLE_PKG
echo "Notarizing PKG: $BUNDLE_PKG"
elif [[ $BUNDLE_PKG == *.zip ]]; then
# For zip files, we need to unzip to staple, then re-zip
BUNDLE_FILE="$BUNDLE_PKG"
IS_ZIP=true
echo "Notarizing ZIP: $BUNDLE_PKG (will staple contents)"
elif [[ $BUNDLE_PKG == *.app ]]; then
# Apps need to be zipped for upload, then unzipped for stapling
BUNDLE_FILE=$BUNDLE_PKG
BUNDLE_PKG="${BUNDLE_PKG}.zip"
echo "Zipping app bundle for notarization..."
ditto -c -k --rsrc --keepParent "$BUNDLE_FILE" "$BUNDLE_PKG"
REMOVE_PKG=true
echo "Notarizing APP (via ZIP): $BUNDLE_PKG"
else
echo "Error: Unsupported filetype for notarization: $BUNDLE_PKG"
echo "Supported types: .dmg, .pkg, .zip, .app"
exit 1
fi
# Verify the file exists
if [[ ! -f "$BUNDLE_PKG" ]]; then
echo "Error: File not found: $BUNDLE_PKG"
exit 1
fi
echo ""
echo "=== Submitting for notarization ==="
# Submit for notarization using notarytool
# --wait makes the command block until notarization is complete
# Ensure pipefail is set for this block in case of subshells
set -o pipefail
if xcrun notarytool submit "$BUNDLE_PKG" \
--apple-id "$ASC_USERNAME" \
--password "${!ASC_PASSWORD_ENVVAR}" \
--team-id "$ASC_TEAMID" \
--wait \
2>&1 | tee "$NOTARIZE_LOG"; then
echo ""
echo "=== Notarization submission completed ==="
# Check if notarization was successful
if grep -q "status: Accepted" "$NOTARIZE_LOG"; then
echo "Notarization successful!"
# Staple the notarization ticket to the bundle
echo ""
echo "=== Stapling notarization ticket ==="
# Note: You cannot staple a .zip file directly
# If the original was a zip, we need to handle it differently
if [[ "$IS_ZIP" = true ]]; then
echo "Warning: Cannot staple a .zip file. The notarization is stored with Apple."
echo "Users will need to be online for Gatekeeper to verify the notarization."
else
if xcrun stapler staple "$BUNDLE_FILE"; then
echo "Stapling successful!"
# Verify the stapling worked
echo ""
echo "=== Verifying notarization ==="
xcrun stapler validate "$BUNDLE_FILE" || echo "Warning: Stapler validation returned non-zero"
else
echo "Warning: Stapling failed, but notarization was successful."
echo "Users will need to be online for Gatekeeper to verify."
fi
fi
# Clean up temporary zip if we created one
if [ "$REMOVE_PKG" = true ]; then
rm -f "$BUNDLE_PKG"
fi
echo ""
echo "=== Notarization complete ==="
exit 0
else
echo "Error: Notarization failed!"
echo "Check the log for details: $NOTARIZE_LOG"
# Try to get the submission ID and fetch detailed logs
SUBMISSION_ID=$(grep -o 'id: [a-f0-9-]*' "$NOTARIZE_LOG" | head -1 | cut -d' ' -f2)
if [[ -n "$SUBMISSION_ID" ]]; then
echo ""
echo "=== Fetching detailed notarization log ==="
xcrun notarytool log "$SUBMISSION_ID" \
--apple-id "$ASC_USERNAME" \
--password "${!ASC_PASSWORD_ENVVAR}" \
--team-id "$ASC_TEAMID" \
"$LOG_FOLDER/notarization_details.json" 2>&1 || true
if [[ -f "$LOG_FOLDER/notarization_details.json" ]]; then
echo "Detailed log saved to: $LOG_FOLDER/notarization_details.json"
cat "$LOG_FOLDER/notarization_details.json"
fi
fi
# Clean up temporary zip if we created one
if [ "$REMOVE_PKG" = true ]; then
rm -f "$BUNDLE_PKG"
fi
exit 1
fi
else
echo "Error: notarytool submission failed!"
cat "$NOTARIZE_LOG"
# Clean up temporary zip if we created one
if [ "$REMOVE_PKG" = true ]; then
rm -f "$BUNDLE_PKG"
fi
exit 1
fi
| Shell |
3D | OpenMS/OpenMS | cmake/Windows/Contrib/Inetc/pluginapi.c | .c | 7,045 | 295 | #include <windows.h>
#include "pluginapi.h"
#ifdef _countof
#define COUNTOF _countof
#else
#define COUNTOF(a) (sizeof(a)/sizeof(a[0]))
#endif
unsigned int g_stringsize;
stack_t **g_stacktop;
TCHAR *g_variables;
// utility functions (not required but often useful)
int NSISCALL popstring(TCHAR *str)
{
stack_t *th;
if (!g_stacktop || !*g_stacktop) return 1;
th=(*g_stacktop);
if (str) lstrcpy(str,th->text);
*g_stacktop = th->next;
GlobalFree((HGLOBAL)th);
return 0;
}
int NSISCALL popstringn(TCHAR *str, int maxlen)
{
stack_t *th;
if (!g_stacktop || !*g_stacktop) return 1;
th=(*g_stacktop);
if (str) lstrcpyn(str,th->text,maxlen?maxlen:g_stringsize);
*g_stacktop = th->next;
GlobalFree((HGLOBAL)th);
return 0;
}
void NSISCALL pushstring(const TCHAR *str)
{
stack_t *th;
if (!g_stacktop) return;
th=(stack_t*)GlobalAlloc(GPTR,(sizeof(stack_t)+(g_stringsize)*sizeof(TCHAR)));
lstrcpyn(th->text,str,g_stringsize);
th->next=*g_stacktop;
*g_stacktop=th;
}
TCHAR* NSISCALL getuservariable(const int varnum)
{
if (varnum < 0 || varnum >= __INST_LAST) return NULL;
return g_variables+varnum*g_stringsize;
}
void NSISCALL setuservariable(const int varnum, const TCHAR *var)
{
if (var != NULL && varnum >= 0 && varnum < __INST_LAST)
lstrcpy(g_variables + varnum*g_stringsize, var);
}
#ifdef _UNICODE
int NSISCALL PopStringA(char* ansiStr)
{
wchar_t* wideStr = (wchar_t*) GlobalAlloc(GPTR, g_stringsize*sizeof(wchar_t));
int rval = popstring(wideStr);
WideCharToMultiByte(CP_ACP, 0, wideStr, -1, ansiStr, g_stringsize, NULL, NULL);
GlobalFree((HGLOBAL)wideStr);
return rval;
}
int NSISCALL PopStringNA(char* ansiStr, int maxlen)
{
int realLen = maxlen ? maxlen : g_stringsize;
wchar_t* wideStr = (wchar_t*) GlobalAlloc(GPTR, realLen*sizeof(wchar_t));
int rval = popstringn(wideStr, realLen);
WideCharToMultiByte(CP_ACP, 0, wideStr, -1, ansiStr, realLen, NULL, NULL);
GlobalFree((HGLOBAL)wideStr);
return rval;
}
void NSISCALL PushStringA(const char* ansiStr)
{
wchar_t* wideStr = (wchar_t*) GlobalAlloc(GPTR, g_stringsize*sizeof(wchar_t));
MultiByteToWideChar(CP_ACP, 0, ansiStr, -1, wideStr, g_stringsize);
pushstring(wideStr);
GlobalFree((HGLOBAL)wideStr);
return;
}
void NSISCALL GetUserVariableW(const int varnum, wchar_t* wideStr)
{
lstrcpyW(wideStr, getuservariable(varnum));
}
void NSISCALL GetUserVariableA(const int varnum, char* ansiStr)
{
wchar_t* wideStr = getuservariable(varnum);
WideCharToMultiByte(CP_ACP, 0, wideStr, -1, ansiStr, g_stringsize, NULL, NULL);
}
void NSISCALL SetUserVariableA(const int varnum, const char* ansiStr)
{
if (ansiStr != NULL && varnum >= 0 && varnum < __INST_LAST)
{
wchar_t* wideStr = g_variables + varnum * g_stringsize;
MultiByteToWideChar(CP_ACP, 0, ansiStr, -1, wideStr, g_stringsize);
}
}
#else
// ANSI defs
int NSISCALL PopStringW(wchar_t* wideStr)
{
char* ansiStr = (char*) GlobalAlloc(GPTR, g_stringsize);
int rval = popstring(ansiStr);
MultiByteToWideChar(CP_ACP, 0, ansiStr, -1, wideStr, g_stringsize);
GlobalFree((HGLOBAL)ansiStr);
return rval;
}
int NSISCALL PopStringNW(wchar_t* wideStr, int maxlen)
{
int realLen = maxlen ? maxlen : g_stringsize;
char* ansiStr = (char*) GlobalAlloc(GPTR, realLen);
int rval = popstringn(ansiStr, realLen);
MultiByteToWideChar(CP_ACP, 0, ansiStr, -1, wideStr, realLen);
GlobalFree((HGLOBAL)ansiStr);
return rval;
}
void NSISCALL PushStringW(wchar_t* wideStr)
{
char* ansiStr = (char*) GlobalAlloc(GPTR, g_stringsize);
WideCharToMultiByte(CP_ACP, 0, wideStr, -1, ansiStr, g_stringsize, NULL, NULL);
pushstring(ansiStr);
GlobalFree((HGLOBAL)ansiStr);
}
void NSISCALL GetUserVariableW(const int varnum, wchar_t* wideStr)
{
char* ansiStr = getuservariable(varnum);
MultiByteToWideChar(CP_ACP, 0, ansiStr, -1, wideStr, g_stringsize);
}
void NSISCALL GetUserVariableA(const int varnum, char* ansiStr)
{
lstrcpyA(ansiStr, getuservariable(varnum));
}
void NSISCALL SetUserVariableW(const int varnum, const wchar_t* wideStr)
{
if (wideStr != NULL && varnum >= 0 && varnum < __INST_LAST)
{
char* ansiStr = g_variables + varnum * g_stringsize;
WideCharToMultiByte(CP_ACP, 0, wideStr, -1, ansiStr, g_stringsize, NULL, NULL);
}
}
#endif
// playing with integers
INT_PTR NSISCALL nsishelper_str_to_ptr(const TCHAR *s)
{
INT_PTR v=0;
if (*s == _T('0') && (s[1] == _T('x') || s[1] == _T('X')))
{
s++;
for (;;)
{
int c=*(++s);
if (c >= _T('0') && c <= _T('9')) c-=_T('0');
else if (c >= _T('a') && c <= _T('f')) c-=_T('a')-10;
else if (c >= _T('A') && c <= _T('F')) c-=_T('A')-10;
else break;
v<<=4;
v+=c;
}
}
else if (*s == _T('0') && s[1] <= _T('7') && s[1] >= _T('0'))
{
for (;;)
{
int c=*(++s);
if (c >= _T('0') && c <= _T('7')) c-=_T('0');
else break;
v<<=3;
v+=c;
}
}
else
{
int sign=0;
if (*s == _T('-')) sign++; else s--;
for (;;)
{
int c=*(++s) - _T('0');
if (c < 0 || c > 9) break;
v*=10;
v+=c;
}
if (sign) v = -v;
}
return v;
}
unsigned int NSISCALL myatou(const TCHAR *s)
{
unsigned int v=0;
for (;;)
{
unsigned int c=*s++;
if (c >= _T('0') && c <= _T('9')) c-=_T('0');
else break;
v*=10;
v+=c;
}
return v;
}
int NSISCALL myatoi_or(const TCHAR *s)
{
int v=0;
if (*s == _T('0') && (s[1] == _T('x') || s[1] == _T('X')))
{
s++;
for (;;)
{
int c=*(++s);
if (c >= _T('0') && c <= _T('9')) c-=_T('0');
else if (c >= _T('a') && c <= _T('f')) c-=_T('a')-10;
else if (c >= _T('A') && c <= _T('F')) c-=_T('A')-10;
else break;
v<<=4;
v+=c;
}
}
else if (*s == _T('0') && s[1] <= _T('7') && s[1] >= _T('0'))
{
for (;;)
{
int c=*(++s);
if (c >= _T('0') && c <= _T('7')) c-=_T('0');
else break;
v<<=3;
v+=c;
}
}
else
{
int sign=0;
if (*s == _T('-')) sign++; else s--;
for (;;)
{
int c=*(++s) - _T('0');
if (c < 0 || c > 9) break;
v*=10;
v+=c;
}
if (sign) v = -v;
}
// Support for simple ORed expressions
if (*s == _T('|'))
{
v |= myatoi_or(s+1);
}
return v;
}
INT_PTR NSISCALL popintptr()
{
TCHAR buf[128];
if (popstringn(buf,COUNTOF(buf)))
return 0;
return nsishelper_str_to_ptr(buf);
}
int NSISCALL popint_or()
{
TCHAR buf[128];
if (popstringn(buf,COUNTOF(buf)))
return 0;
return myatoi_or(buf);
}
void NSISCALL pushintptr(INT_PTR value)
{
TCHAR buffer[30];
wsprintf(buffer, sizeof(void*) > 4 ? _T("%Id") : _T("%d"), value);
pushstring(buffer);
}
| C |
3D | OpenMS/OpenMS | cmake/Windows/Contrib/Inetc/afxres.h | .h | 48 | 3 | #include <windows.h>
#define IDC_STATIC (-1)
| Unknown |
3D | OpenMS/OpenMS | cmake/Windows/Contrib/Inetc/resource.h | .h | 1,860 | 48 | //{{NO_DEPENDENCIES}}
// Microsoft Developer Studio generated include file.
// Used by inetc.rc
//
#define IDC_SLOGIN 8
#define IDC_PROGRESS 10
#define IDC_SUBTEXT 11
#define IDC_SPWD 11
#define IDC_ICON1 12
#define IDD_DIALOG1 101
#define IDI_ICON1 102
#define IDI_ICON2 103
#define IDD_AUTH 104
#define IDI_ICON3 105
#define IDI_ICON4 106
#define IDI_ICON5 107
#define IDD_DIALOG2 108
#define IDI_ICON6 109
#define IDD_DIALOG3 110
#define IDC_STATIC1 1001
#define IDC_STATIC2 1002
#define IDC_STATIC3 1003
#define IDC_STATIC4 1004
#define IDC_PROGRESS1 1005
#define IDC_STATIC5 1006
#define IDC_STATIC6 1007
#define IDC_STATIC12 1008
#define IDC_STATIC13 1009
#define IDC_STATIC20 1009
#define IDC_STATIC21 1010
#define IDC_STATIC22 1011
#define IDC_STATIC23 1012
#define IDC_STATIC24 1013
#define IDC_STATIC25 1014
#define IDC_ELOGIN 1015
#define IDC_EPWD 1016
// Next default values for new objects
//
#ifdef APSTUDIO_INVOKED
#ifndef APSTUDIO_READONLY_SYMBOLS
#define _APS_NEXT_RESOURCE_VALUE 111
#define _APS_NEXT_COMMAND_VALUE 40001
#define _APS_NEXT_CONTROL_VALUE 1018
#define _APS_NEXT_SYMED_VALUE 101
#endif
#endif
| Unknown |
3D | OpenMS/OpenMS | cmake/Windows/Contrib/Inetc/nsis_tchar.h | .h | 5,028 | 230 | /*
* nsis_tchar.h
*
* This file is a part of NSIS.
*
* Copyright (C) 1999-2013 Nullsoft and Contributors
*
* This software is provided 'as-is', without any express or implied
* warranty.
*
* For Unicode support by Jim Park -- 08/30/2007
*/
// Jim Park: Only those we use are listed here.
#pragma once
#ifdef _UNICODE
#ifndef _T
#define __T(x) L ## x
#define _T(x) __T(x)
#define _TEXT(x) __T(x)
#endif
#ifndef _TCHAR_DEFINED
#define _TCHAR_DEFINED
#if !defined(_NATIVE_WCHAR_T_DEFINED) && !defined(_WCHAR_T_DEFINED)
typedef unsigned short TCHAR;
#else
typedef wchar_t TCHAR;
#endif
#endif
// program
#define _tenviron _wenviron
#define __targv __wargv
// printfs
#define _ftprintf fwprintf
#define _sntprintf _snwprintf
#if (defined(_MSC_VER) && (_MSC_VER<=1310)) || defined(__MINGW32__)
# define _stprintf swprintf
#else
# define _stprintf _swprintf
#endif
#define _tprintf wprintf
#define _vftprintf vfwprintf
#define _vsntprintf _vsnwprintf
#if defined(_MSC_VER) && (_MSC_VER<=1310)
# define _vstprintf vswprintf
#else
# define _vstprintf _vswprintf
#endif
// scanfs
#define _tscanf wscanf
#define _stscanf swscanf
// string manipulations
#define _tcscat wcscat
#define _tcschr wcschr
#define _tcsclen wcslen
#define _tcscpy wcscpy
#define _tcsdup _wcsdup
#define _tcslen wcslen
#define _tcsnccpy wcsncpy
#define _tcsncpy wcsncpy
#define _tcsrchr wcsrchr
#define _tcsstr wcsstr
#define _tcstok wcstok
// string comparisons
#define _tcscmp wcscmp
#define _tcsicmp _wcsicmp
#define _tcsncicmp _wcsnicmp
#define _tcsncmp wcsncmp
#define _tcsnicmp _wcsnicmp
// upper / lower
#define _tcslwr _wcslwr
#define _tcsupr _wcsupr
#define _totlower towlower
#define _totupper towupper
// conversions to numbers
#define _tcstoi64 _wcstoi64
#define _tcstol wcstol
#define _tcstoul wcstoul
#define _tstof _wtof
#define _tstoi _wtoi
#define _tstoi64 _wtoi64
#define _ttoi _wtoi
#define _ttoi64 _wtoi64
#define _ttol _wtol
// conversion from numbers to strings
#define _itot _itow
#define _ltot _ltow
#define _i64tot _i64tow
#define _ui64tot _ui64tow
// file manipulations
#define _tfopen _wfopen
#define _topen _wopen
#define _tremove _wremove
#define _tunlink _wunlink
// reading and writing to i/o
#define _fgettc fgetwc
#define _fgetts fgetws
#define _fputts fputws
#define _gettchar getwchar
// directory
#define _tchdir _wchdir
// environment
#define _tgetenv _wgetenv
#define _tsystem _wsystem
// time
#define _tcsftime wcsftime
#else // ANSI
#ifndef _T
#define _T(x) x
#define _TEXT(x) x
#endif
#ifndef _TCHAR_DEFINED
#define _TCHAR_DEFINED
typedef char TCHAR;
#endif
// program
#define _tenviron environ
#define __targv __argv
// printfs
#define _ftprintf fprintf
#define _sntprintf _snprintf
#define _stprintf sprintf
#define _tprintf printf
#define _vftprintf vfprintf
#define _vsntprintf _vsnprintf
#define _vstprintf vsprintf
// scanfs
#define _tscanf scanf
#define _stscanf sscanf
// string manipulations
#define _tcscat strcat
#define _tcschr strchr
#define _tcsclen strlen
#define _tcscnlen strnlen
#define _tcscpy strcpy
#define _tcsdup _strdup
#define _tcslen strlen
#define _tcsnccpy strncpy
#define _tcsrchr strrchr
#define _tcsstr strstr
#define _tcstok strtok
// string comparisons
#define _tcscmp strcmp
#define _tcsicmp _stricmp
#define _tcsncmp strncmp
#define _tcsncicmp _strnicmp
#define _tcsnicmp _strnicmp
// upper / lower
#define _tcslwr _strlwr
#define _tcsupr _strupr
#define _totupper toupper
#define _totlower tolower
// conversions to numbers
#define _tcstol strtol
#define _tcstoul strtoul
#define _tstof atof
#define _tstoi atoi
#define _tstoi64 _atoi64
#define _tstoi64 _atoi64
#define _ttoi atoi
#define _ttoi64 _atoi64
#define _ttol atol
// conversion from numbers to strings
#define _i64tot _i64toa
#define _itot _itoa
#define _ltot _ltoa
#define _ui64tot _ui64toa
// file manipulations
#define _tfopen fopen
#define _topen _open
#define _tremove remove
#define _tunlink _unlink
// reading and writing to i/o
#define _fgettc fgetc
#define _fgetts fgets
#define _fputts fputs
#define _gettchar getchar
// directory
#define _tchdir _chdir
// environment
#define _tgetenv getenv
#define _tsystem system
// time
#define _tcsftime strftime
#endif
// is functions (the same in Unicode / ANSI)
#define _istgraph isgraph
#define _istascii __isascii
#define __TFILE__ _T(__FILE__)
#define __TDATE__ _T(__DATE__)
#define __TTIME__ _T(__TIME__)
| Unknown |
3D | OpenMS/OpenMS | cmake/Windows/Contrib/Inetc/api.h | .h | 2,760 | 84 | /*
* apih
*
* This file is a part of NSIS.
*
* Copyright (C) 1999-2013 Nullsoft and Contributors
*
* Licensed under the zlib/libpng license (the "License");
* you may not use this file except in compliance with the License.
*
* Licence details can be found in the file COPYING.
*
* This software is provided 'as-is', without any express or implied
* warranty.
*/
#ifndef _NSIS_EXEHEAD_API_H_
#define _NSIS_EXEHEAD_API_H_
// Starting with NSIS 2.42, you can check the version of the plugin API in exec_flags->plugin_api_version
// The format is 0xXXXXYYYY where X is the major version and Y is the minor version (MAKELONG(y,x))
// When doing version checks, always remember to use >=, ex: if (pX->exec_flags->plugin_api_version >= NSISPIAPIVER_1_0) {}
#define NSISPIAPIVER_1_0 0x00010000
#define NSISPIAPIVER_CURR NSISPIAPIVER_1_0
// NSIS Plug-In Callback Messages
enum NSPIM
{
NSPIM_UNLOAD, // This is the last message a plugin gets, do final cleanup
NSPIM_GUIUNLOAD, // Called after .onGUIEnd
};
// Prototype for callbacks registered with extra_parameters->RegisterPluginCallback()
// Return NULL for unknown messages
// Should always be __cdecl for future expansion possibilities
typedef UINT_PTR (*NSISPLUGINCALLBACK)(enum NSPIM);
// extra_parameters data structures containing other interesting stuff
// but the stack, variables and HWND passed on to plug-ins.
typedef struct
{
int autoclose;
int all_user_var;
int exec_error;
int abort;
int exec_reboot; // NSIS_SUPPORT_REBOOT
int reboot_called; // NSIS_SUPPORT_REBOOT
int XXX_cur_insttype; // depreacted
int plugin_api_version; // see NSISPIAPIVER_CURR
// used to be XXX_insttype_changed
int silent; // NSIS_CONFIG_SILENT_SUPPORT
int instdir_error;
int rtl;
int errlvl;
int alter_reg_view;
int status_update;
} exec_flags_t;
#ifndef NSISCALL
# define NSISCALL __stdcall
#endif
typedef struct {
exec_flags_t *exec_flags;
int (NSISCALL *ExecuteCodeSegment)(int, HWND);
void (NSISCALL *validate_filename)(LPTSTR);
int (NSISCALL *RegisterPluginCallback)(HMODULE, NSISPLUGINCALLBACK); // returns 0 on success, 1 if already registered and < 0 on errors
} extra_parameters;
// Definitions for page showing plug-ins
// See Ui.c to understand better how they're used
// sent to the outer window to tell it to go to the next inner window
#define WM_NOTIFY_OUTER_NEXT (WM_USER+0x8)
// custom pages should send this message to let NSIS know they're ready
#define WM_NOTIFY_CUSTOM_READY (WM_USER+0xd)
// sent as wParam with WM_NOTIFY_OUTER_NEXT when user cancels - heed its warning
#define NOTIFY_BYE_BYE 'x'
#endif /* _PLUGIN_H_ */
| Unknown |
3D | OpenMS/OpenMS | cmake/Windows/Contrib/Inetc/crt.cpp | .cpp | 2,167 | 106 | #include <Windows.h>
#if defined(_MSC_VER) && _MSC_VER+0 >= 1400
#if defined(_MSC_FULL_VER) && _MSC_FULL_VER+0 >= 140050727
#include <intrin.h>
#else
EXTERN_C void __stosb(BYTE*,BYTE,size_t);
#endif
#pragma intrinsic(__stosb)
#define CRTINTRINSIC_memset(p,c,s) __stosb((BYTE*)(p),(BYTE)(c),(s))
#endif
extern "C" void* __cdecl memset(void *p, int c, size_t z)
{
#ifdef CRTINTRINSIC_memset
CRTINTRINSIC_memset(p, c, z);
#else
BYTE *pb = reinterpret_cast<BYTE*>(p);
for(size_t i=0; i<z; ++i, ++pb)
(*pb) = c;
#endif
return p;
}
extern "C" const char* __cdecl strstr(const char *str, const char *target)
{
if (!*target) return (char*)str;
char *p1 = (char*)str, *p2 = (char*)target;
char *p1Adv = (char*)str;
while (*++p2)
p1Adv++;
while (*p1Adv)
{
char *p1Begin = p1;
p2 = (char*)target;
while (*p1 && *p2 && *p1 == *p2)
{
p1++;
p2++;
}
if (!*p2)
return p1Begin;
p1 = p1Begin + 1;
p1Adv++;
}
return NULL;
}
extern "C" const wchar_t* __cdecl wcsstr(const wchar_t *str, const wchar_t *target)
{
if (!*target) return (wchar_t*)str;
wchar_t *p1 = (wchar_t*)str, *p2 = (wchar_t*)target;
wchar_t *p1Adv = (wchar_t*)str;
while (*++p2)
p1Adv++;
while (*p1Adv)
{
wchar_t *p1Begin = p1;
p2 = (wchar_t*)target;
while (*p1 && *p2 && *p1 == *p2)
{
p1++;
p2++;
}
if (!*p2)
return p1Begin;
p1 = p1Begin + 1;
p1Adv++;
}
return NULL;
}
extern "C" const char* __cdecl strchr(const char* s, int ch)
{
while(*s && *s != ch)
++s;
return s;
}
extern "C" const wchar_t* __cdecl wcschr(const wchar_t* s, wchar_t ch)
{
while(*s && *s != ch)
++s;
return s;
}
extern "C" const char* __cdecl strrchr(const char* s, int c)
{
char *rtnval = 0;
do {
if (*s == c)
rtnval = (char*) s;
} while (*s++);
return rtnval;
}
extern "C" const wchar_t* __cdecl wcsrchr(const wchar_t* s, wchar_t c)
{
wchar_t *rtnval = 0;
do {
if (*s == c)
rtnval = (wchar_t*) s;
} while (*s++);
return rtnval;
}
| C++ |
3D | OpenMS/OpenMS | cmake/Windows/Contrib/Inetc/inetc.cpp | .cpp | 56,098 | 1,732 | /*******************************************************
* FILE NAME: inetc.cpp
*
* Copyright (c) 2004-2015 Takhir Bedertdinov and NSIS contributors
*
* PURPOSE:
* ftp/http file download plug-in
* on the base of MS Inet API
* 4 GB limit (http support?)
*
* CHANGE HISTORY
*
* Author Date Modifications
* Takhir Bedertdinov
* Nov 11, 2004 Original
* Dec 17, 2004 Embedded edition -
* NSISdl GUI style as default
* (nsisdl.cpp code was partly used)
* Dec 17, 2004 MSI Banner style
* Feb 20, 2005 Resume download
* feature for big files and bad connections
* Mar 05, 2005 Proxy authentication
* and /POPUP caption prefix option
* Mar 25, 2005 Connect timeout option
* and FTP switched to passive mode
* Apr 18, 2005 Crack URL buffer size
* bug fixed (256->string_size)
* HTTP POST added
* Jun 06, 2005 IDOK on "Enter" key locked
* POST HTTP header added
* Jun 22, 2005 non-interaptable mode /nocancel
* and direct connect /noproxy
* Jun 29, 2005 post.php written and tested
* Jul 05, 2005 60 sec delay on WinInet detach problem
* solved (not fine, but works including
* installer exit and system reboot)
* Jul 08, 2005 'set foreground' finally removed
* Jul 26, 2005 POPUP translate option
* Aug 23, 2005 https service type in InternetConnect
* and "ignore certificate" flags
* Sep 30, 2005 https with bad certificate from old OS;
* Forbidden handling
* Dec 23, 2005 'put' entry point, new names, 12003
* ftp error handling (on ftp write permission)
* 405 http error (method not allowed)
* Mar 12, 2006 Internal authorization via InternetErrorDlg()
* and Unauthorized (401) handling.
* Jun 10, 2006 Caption text option for Resume download
* MessageBox
* Jun 24, 2006 HEAD method, silent mode clean up
* Sep 05, 2006 Center dialog code from Backland
* Sep 07, 2006 NSISdl crash fix /Backland idea/
* Sep 08, 2006 POST as dll entry point.
* Sep 21, 2006 parent dlg progr.bar style and font,
* nocancel via ws_sysmenu
* Sep 22, 2006 current lang IDCANCEL text, /canceltext
* and /useragent options
* Sep 24, 2006 .onInit improvements and defaults
* Nov 11, 2006 FTP path creation, root|current dir mgmt
* Jan 01, 2007 Global char[] cleanup, GetLastError() to
* status string on ERR_DIALOG, few MSVCRT replaces
* Jan 13, 2007 /HEADER option added
* Jan 28, 2007 _open -> CreateFile and related
* Feb 18, 2007 Speed calculating improved (pauses),
* /popup text parameter to hide URL
* Jun 07, 2007 Local file truncation added for download
* (CREATE_ALWAYS)
* Jun 11, 2007 FTP download permitted even if server rejects
* SIZE request (ProFTPD).
* Aug 11, 2007 Backland' fix for progress bar redraw/style
* issue in NSISdl display mode.
* Jan 09, 2008 {_trueparuex^}' fix - InternetSetFilePointer()
* returns -1 on error.
* /question option added for cancel question.
* Feb 15, 2008 PUT content-length file size fix
* Feb 17, 2008 char -> TCHAR replace for UNICODE option
* Feb 19, 2008 janekschwarz fix for HTTP PUT with auth
* CreateFile INVALID_HANDLE_VALUE on error fix
* Feb 20, 2008 base64 encoder update for unicode
* Feb 27, 2008 Unicode configurations added to VS 6 dsp
* Mar 20, 2008 HTTP PUT with proxy auth finally fixed
* FTP errors handling improved.
* HEAD bug fixed
* Mar 27, 2008 Details window hide/show in NSISdl mode
* Apr 10, 2008 Auth test method changed to HEAD for
* old proxy's
* Apr 30, 2008 InternetErrorDlg() ERROR_SUCESS on cancel
* click patched
* 3xx errors added to status list.
* May 20, 2008 InternetReadFile on cable disconnect patched
* May 20, 2008 Reply status "0" patch (name resolution?)
* Jul 15, 2008 HTTP 304 parsing. Incorrect size reported fix.
* Aug 21, 2009 Escape sequence convertion removed (caused
* error in signature with %2b requests)
* Marqueue progess bar style for unknown file size.
* Feb 04, 2010 Unicode POST patch - body converted to multibyte
* Jul 11, 2010 /FILE POST option added
* Nov 04, 2010 Disabled cookies and cache for cleanliness
* Feb 14, 2011 Fixed reget bug introduced in previous commit
* Feb 18, 2011 /NOCOOKIES option added
* Mar 02, 2011 User-agent buffer increased. Small memory leak fix
* Mar 23, 2011 Use caption on embedded progressbar - zenpoy
* Apr 05, 2011 reget fix - INTERNET_FLAG_RELOAD for first connect only
* Apr 27, 2011 /receivetimeout option added for big files and antivirus
* Jun 15, 2011 Stack clean up fix on cancel - zenpoy
* Oct 19, 2011 FTP PUT error parsing fix - tperquin
* Aug 19, 2013 Fix focus stealing when in silent - negativesir, JohnTHaller
* Jul 20, 2014 - 1.0.4.4 - Stuart 'Afrow UK' Welch
* /tostack & /tostackconv added
* Version information resource added
* Updated to NSIS 3.0 plugin API
* Upgraded to Visual Studio 2012
* 64-bit build added
* MSVCRT dependency removed
* Sep 04, 2015 - 1.0.5.0 - anders_k
* HTTPS connections are more secure by default
* Added /weaksecurity switch, reverts to old cert. security checks
* Sep 06, 2015 - 1.0.5.1 - anders_k
* Don't allow infinite FtpCreateDirectory tries
* Use memset intrinsic when possible to avoid VC code generation bug
* Oct 17, 2015 - 1.0.5.2 - anders_k
* Tries to set FTP mode to binary before querying the size.
* Calls FtpGetFileSize if it exists.
*******************************************************/
#define _WIN32_WINNT 0x0500
#include <windows.h>
//#include <tchar.h>
#include <wininet.h>
#include <commctrl.h>
#include "pluginapi.h"
#include "resource.h"
#include <string.h> // strstr etc
#ifndef PBM_SETMARQUEE
#define PBM_SETMARQUEE (WM_USER + 10)
#define PBS_MARQUEE 0x08
#endif
#ifndef HTTP_QUERY_PROXY_AUTHORIZATION
#define HTTP_QUERY_PROXY_AUTHORIZATION 61
#endif
#ifndef SECURITY_FLAG_IGNORE_REVOCATION
#define SECURITY_FLAG_IGNORE_REVOCATION 0x00000080
#endif
#ifndef SECURITY_FLAG_IGNORE_UNKNOWN_CA
#define SECURITY_FLAG_IGNORE_UNKNOWN_CA 0x00000100
#endif
// IE 4 safety and VS 6 compatibility
typedef BOOL (__stdcall *FTP_CMD)(HINTERNET,BOOL,DWORD,LPCTSTR,DWORD,HINTERNET *);
FTP_CMD myFtpCommand;
#define PLUGIN_NAME TEXT("Inetc plug-in")
#define INETC_USERAGENT TEXT("NSIS_Inetc (Mozilla)")
#define PB_RANGE 400 // progress bar values range
#define PAUSE1_SEC 2 // transfer error indication time, for reget only
#define PAUSE2_SEC 3 // paused state time, increase this if need (60?)
#define PAUSE3_SEC 1 // pause after resume button pressed
#define NOT_AVAILABLE 0xffffffff
#define POST_HEADER TEXT("Content-Type: application/x-www-form-urlencoded")
#define PUT_HEADER TEXT("Content-Type: octet-stream\nContent-Length: %d")
#define INTERNAL_OK 0xFFEE
#define PROGRESS_MS 1000 // screen values update interval
#define DEF_QUESTION TEXT("Are you sure that you want to stop download?")
#define HOST_AUTH_HDR TEXT("Authorization: basic %s")
#define PROXY_AUTH_HDR TEXT("Proxy-authorization: basic %s")
//#define MY_WEAKSECURITY_CERT_FLAGS SECURITY_FLAG_IGNORE_UNKNOWN_CA | SECURITY_FLAG_IGNORE_REVOCATION | SECURITY_FLAG_IGNORE_CERT_DATE_INVALID | SECURITY_FLAG_IGNORE_CERT_CN_INVALID
#define MY_WEAKSECURITY_CERT_FLAGS SECURITY_FLAG_IGNORE_UNKNOWN_CA | SECURITY_FLAG_IGNORE_REVOCATION
#define MY_REDIR_FLAGS INTERNET_FLAG_IGNORE_REDIRECT_TO_HTTP | INTERNET_FLAG_IGNORE_REDIRECT_TO_HTTPS
#define MY_HTTPS_FLAGS (MY_REDIR_FLAGS | INTERNET_FLAG_SECURE)
enum STATUS_CODES {
ST_OK = 0,
ST_CONNECTING,
ST_DOWNLOAD,
ST_CANCELLED,
ST_URLOPEN,
// ST_OPENING,
ST_PAUSE,
ERR_TERMINATED,
ERR_DIALOG,
ERR_INETOPEN,
ERR_URLOPEN,
ERR_TRANSFER,
ERR_FILEOPEN,
ERR_FILEWRITE,
ERR_FILEREAD,
ERR_REGET,
ERR_CONNECT,
ERR_OPENREQUEST,
ERR_SENDREQUEST,
ERR_CRACKURL,
ERR_NOTFOUND,
ERR_THREAD,
ERR_PROXY,
ERR_FORBIDDEN,
ERR_NOTALLOWED,
ERR_REQUEST,
ERR_SERVER,
ERR_AUTH,
ERR_CREATEDIR,
ERR_PATH,
ERR_NOTMODIFIED,
ERR_REDIRECTION
};
static TCHAR szStatus[][32] = {
TEXT("OK"),TEXT("Connecting"),TEXT("Downloading"),TEXT("Cancelled"),TEXT("Connecting"), //TEXT("Opening URL")),
TEXT("Reconnect Pause"),TEXT("Terminated"),TEXT("Dialog Error"),TEXT("Open Internet Error"),
TEXT("Open URL Error"),TEXT("Transfer Error"),TEXT("File Open Error"),TEXT("File Write Error"),TEXT("File Read Error"),
TEXT("Reget Error"),TEXT("Connection Error"),TEXT("OpenRequest Error"),TEXT("SendRequest Error"),
TEXT("URL Parts Error"),TEXT("File Not Found (404)"),TEXT("CreateThread Error"),TEXT("Proxy Error (407)"),
TEXT("Access Forbidden (403)"),TEXT("Not Allowed (405)"),TEXT("Request Error"),TEXT("Server Error"),
TEXT("Unauthorized (401)"),TEXT("FtpCreateDir failed (550)"),TEXT("Error FTP path (550)"),TEXT("Not Modified"),
TEXT("Redirection")
};
HINSTANCE g_hInstance;
TCHAR fn[MAX_PATH]=TEXT(""),
*url = NULL,
*szAlias = NULL,
*szProxy = NULL,
*szHeader = NULL,
*szBanner = NULL,
*szQuestion = NULL,
szCancel[64]=TEXT(""),
szCaption[128]=TEXT(""),
szUserAgent[256]=TEXT(""),
szResume[256] = TEXT("Your internet connection seems to be not permitted or dropped out!\nPlease reconnect and click Retry to resume installation.");
CHAR *szPost = NULL,
post_fname[MAX_PATH] = "";
DWORD fSize = 0;
TCHAR *szToStack = NULL;
int status;
DWORD cnt = 0,
cntToStack = 0,
fs = 0,
timeout = 0,
receivetimeout = 0;
DWORD startTime, transfStart, openType;
bool silent, popup, resume, nocancel, noproxy, nocookies, convToStack, g_ignorecertissues;
HWND childwnd;
HWND hDlg;
bool fput = false, fhead = false;
#define Option_IgnoreCertIssues() ( g_ignorecertissues )
static FARPROC GetWininetProcAddress(LPCSTR Name)
{
return GetProcAddress(LoadLibraryA("WININET"), Name);
}
/*****************************************************
* FUNCTION NAME: sf(HWND)
* PURPOSE:
* moves HWND to top and activates it
* SPECIAL CONSIDERATIONS:
* commented because annoying
*****************************************************/
/*
void sf(HWND hw)
{
DWORD ctid = GetCurrentThreadId();
DWORD ftid = GetWindowThreadProcessId(GetForegroundWindow(), NULL);
AttachThreadInput(ftid, ctid, TRUE);
SetForegroundWindow(hw);
AttachThreadInput(ftid, ctid, FALSE);
}
*/
static TCHAR szUrl[64] = TEXT("");
static TCHAR szDownloading[64] = TEXT("Downloading %s");
static TCHAR szConnecting[64] = TEXT("Connecting ...");
static TCHAR szSecond[64] = TEXT("second");
static TCHAR szMinute[32] = TEXT("minute");
static TCHAR szHour[32] = TEXT("hour");
static TCHAR szPlural[32] = TEXT("s");
static TCHAR szProgress[128] = TEXT("%dkB (%d%%) of %dkB @ %d.%01dkB/s");
static TCHAR szRemaining[64] = TEXT(" (%d %s%s remaining)");
static TCHAR szBasic[128] = TEXT("");
static TCHAR szAuth[128] = TEXT("");
// is it possible to make it working with unicode strings?
/* Base64 encode one byte */
static TCHAR encode(unsigned char u) {
if(u < 26) return TEXT('A')+u;
if(u < 52) return TEXT('a')+(u-26);
if(u < 62) return TEXT('0')+(u-52);
if(u == 62) return TEXT('+');
return TEXT('/');
}
TCHAR *encode_base64(int size, TCHAR *src, TCHAR *dst) {
int i;
TCHAR *p;
if(!src)
return NULL;
if(!size)
size= lstrlen(src);
p = dst;
for(i=0; i<size; i+=3) {
unsigned char b1=0, b2=0, b3=0, b4=0, b5=0, b6=0, b7=0;
b1 = (unsigned char)src[i];
if(i+1<size)
b2 = (unsigned char)src[i+1];
if(i+2<size)
b3 = (unsigned char)src[i+2];
b4= b1>>2;
b5= ((b1&0x3)<<4)|(b2>>4);
b6= ((b2&0xf)<<2)|(b3>>6);
b7= b3&0x3f;
*p++= encode(b4);
*p++= encode(b5);
if(i+1<size) {
*p++= encode(b6);
} else {
*p++= TEXT('=');
}
if(i+2<size) {
*p++= encode(b7);
} else {
*p++= TEXT('=');
}
}
return dst;
}
/*****************************************************
* FUNCTION NAME: fileTransfer()
* PURPOSE:
* http/ftp file transfer itself
* for any protocol and both directions I guess
* SPECIAL CONSIDERATIONS:
*
*****************************************************/
void fileTransfer(HANDLE localFile,
HINTERNET hFile)
{
static BYTE data_buf[1024*8];
BYTE *dw;
DWORD rslt = 0;
DWORD bytesDone;
status = ST_DOWNLOAD;
while(status == ST_DOWNLOAD)
{
if(fput)
{
if(!ReadFile(localFile, data_buf, rslt = sizeof(data_buf), &bytesDone, NULL))
{
status = ERR_FILEREAD;
break;
}
if(bytesDone == 0) // EOF reached
{
status = ST_OK;
break;
}
while(bytesDone > 0)
{
dw = data_buf;
if(!InternetWriteFile(hFile, dw, bytesDone, &rslt) || rslt == 0)
{
status = ERR_TRANSFER;
break;
}
dw += rslt;
cnt += rslt;
bytesDone -= rslt;
}
}
else
{
if(!InternetReadFile(hFile, data_buf, sizeof(data_buf), &rslt))
{
status = ERR_TRANSFER;
break;
}
if(rslt == 0) // EOF reached or cable disconnect
{
// on cable disconnect returns TRUE and 0 bytes. is cnt == 0 OK (zero file size)?
// cannot check this if reply is chunked (no content-length, http 1.1)
status = (fs != NOT_AVAILABLE && cnt < fs) ? ERR_TRANSFER : ST_OK;
break;
}
if(szToStack)
{
for (DWORD i = 0; cntToStack < g_stringsize && i < rslt; i++, cntToStack++)
*(szToStack + cntToStack) = data_buf[i];
}
else if(!WriteFile(localFile, data_buf, rslt, &bytesDone, NULL) ||
rslt != bytesDone)
{
status = ERR_FILEWRITE;
break;
}
cnt += rslt;
}
}
}
/*****************************************************
* FUNCTION NAME: mySendRequest()
* PURPOSE:
* HttpSendRequestEx() sends headers only - for PUT
* We also can use InetWriteFile for POST body I guess
* SPECIAL CONSIDERATIONS:
*
*****************************************************/
int mySendRequest(HINTERNET hFile)
{
INTERNET_BUFFERS BufferIn = {0};
if(fput)
{
BufferIn.dwStructSize = sizeof( INTERNET_BUFFERS );
BufferIn.dwBufferTotal = fs;
return HttpSendRequestEx( hFile, &BufferIn, NULL, HSR_INITIATE, 0);
}
return HttpSendRequest(hFile, NULL, 0, szPost, fSize);
}
/*****************************************************
* FUNCTION NAME: queryStatus()
* PURPOSE:
* http status code comes before download (get) and
* after upload (put), so this is called from 2 places
* SPECIAL CONSIDERATIONS:
*
*****************************************************/
bool queryStatus(HINTERNET hFile)
{
TCHAR buf[256] = TEXT("");
DWORD rslt;
if(HttpQueryInfo(hFile, HTTP_QUERY_STATUS_CODE,
buf, &(rslt = sizeof(buf)), NULL))
{
buf[3] = 0;
if(lstrcmp(buf, TEXT("0")) == 0 || *buf == 0)
status = ERR_SENDREQUEST;
else if(lstrcmp(buf, TEXT("401")) == 0)
status = ERR_AUTH;
else if(lstrcmp(buf, TEXT("403")) == 0)
status = ERR_FORBIDDEN;
else if(lstrcmp(buf, TEXT("404")) == 0)
status = ERR_NOTFOUND;
else if(lstrcmp(buf, TEXT("407")) == 0)
status = ERR_PROXY;
else if(lstrcmp(buf, TEXT("405")) == 0)
status = ERR_NOTALLOWED;
else if(lstrcmp(buf, TEXT("304")) == 0)
status = ERR_NOTMODIFIED;
else if(*buf == TEXT('3'))
{
status = ERR_REDIRECTION;
wsprintf(szStatus[status] + lstrlen(szStatus[status]), TEXT(" (%s)"), buf);
}
else if(*buf == TEXT('4'))
{
status = ERR_REQUEST;
wsprintf(szStatus[status] + lstrlen(szStatus[status]), TEXT(" (%s)"), buf);
}
else if(*buf == TEXT('5'))
{
status = ERR_SERVER;
wsprintf(szStatus[status] + lstrlen(szStatus[status]), TEXT(" (%s)"), buf);
}
return true;
}
return false;
}
/*****************************************************
* FUNCTION NAME: openFtpFile()
* PURPOSE:
* control connection, size request, re-get lseek
* SPECIAL CONSIDERATIONS:
*
*****************************************************/
HINTERNET openFtpFile(HINTERNET hConn,
TCHAR *path)
{
TCHAR buf[256] = TEXT(""), *movp;
HINTERNET hFile;
DWORD rslt, err, gle;
bool https_req_ok = false;
/* reads connection / auth responce info and cleares 'control' buffer this way */
InternetGetLastResponseInfo(&err, buf, &(rslt = sizeof(buf)));
if(cnt == 0)
{
if(!fput) // we know local file size already
{
if (myFtpCommand)
{
/* Try to set the REPRESENTATION TYPE to I[mage] (Binary) because some servers
don't accept the SIZE command in ASCII mode */
myFtpCommand(hConn, false, FTP_TRANSFER_TYPE_ASCII, TEXT("TYPE I"), 0, &hFile);
}
/* too clever myFtpCommand returnes false on the valid TEXT("550 Not found/Not permitted" server answer,
to read answer I had to ignory returned false (!= 999999) :-(
GetLastError also possible, but MSDN description of codes is very limited */
wsprintf(buf, TEXT("SIZE %s"), path + 1);
if(myFtpCommand != NULL &&
myFtpCommand(hConn, false, FTP_TRANSFER_TYPE_ASCII, buf, 0, &hFile) != 9999 &&
memset(buf, 0, sizeof(buf)) != NULL &&
InternetGetLastResponseInfo(&err, buf, &(rslt = sizeof(buf))))
{
if(_tcsstr(buf, TEXT("213 ")))
{
fs = myatou(_tcschr(buf, TEXT(' ')) + 1);
}
/* stupid ProFTPD returns error on SIZE request. let's continue without size.
But IE knows some trick to get size from ProFTPD......
else if(mystrstr(buf, TEXT("550 TEXT("))
{
status = ERR_SIZE_NOT_PERMITTED;
return NULL;
}
*/
}
if(fs == 0)
{
fs = NOT_AVAILABLE;
}
}
}
else
{
wsprintf(buf, TEXT("REST %d"), cnt);
if(myFtpCommand == NULL ||
!myFtpCommand(hConn, false, FTP_TRANSFER_TYPE_BINARY, buf, 0, &hFile) ||
memset(buf, 0, sizeof(buf)) == NULL ||
!InternetGetLastResponseInfo(&err, buf, &(rslt = sizeof(buf))) ||
(_tcsstr(buf, TEXT("350")) == NULL && _tcsstr(buf, TEXT("110")) == NULL))
{
status = ERR_REGET;
return NULL;
}
}
if((hFile = FtpOpenFile(hConn, path + 1, fput ? GENERIC_WRITE : GENERIC_READ,
FTP_TRANSFER_TYPE_BINARY|INTERNET_FLAG_RELOAD,0)) == NULL)
{
gle = GetLastError();
*buf = 0;
InternetGetLastResponseInfo(&err, buf, &(rslt = sizeof(buf)));
// wrong path - dir may not exist or upload may be not allowed
// we use ftp://host//path (double /) to define path from FS root
if(fput && (_tcsstr(buf, TEXT("550")) != NULL || _tcsstr(buf, TEXT("553")) != NULL))
{
movp = path + 1;
if(*movp == TEXT('/')) movp++; // don't need to create root
for (UINT8 escapehatch = 0; ++escapehatch;) // Weak workaround for http://forums.winamp.com/showpost.php?p=3031692&postcount=513 bug
{
TCHAR *pbs = _tcschr(movp, TEXT('/'));
if (!pbs) break;
*pbs = TEXT('\0');
FtpCreateDirectory(hConn, path + 1);
InternetGetLastResponseInfo(&err, buf, &(rslt = sizeof(buf)));
*(movp + lstrlen(movp)) = TEXT('/');
movp = _tcschr(movp, TEXT('/')) + 1;
}
if(status != ERR_CREATEDIR &&
(hFile = FtpOpenFile(hConn, path + 1, GENERIC_WRITE,
FTP_TRANSFER_TYPE_BINARY|INTERNET_FLAG_RELOAD,0)) == NULL)
{
status = ERR_PATH;
if(InternetGetLastResponseInfo(&err, buf, &(rslt = sizeof(buf))))
lstrcpyn(szStatus[status], _tcsstr(buf, TEXT("550")), sizeof(szStatus[0]) / sizeof(TCHAR));
}
}
// may be firewall related error, let's give user time to disable it
else if(gle == 12003) // ERROR_INTERNET_EXTENDED_ERROR
{
if(_tcsstr(buf, TEXT("550")))
{
status = ERR_NOTFOUND;
lstrcpyn(szStatus[status], _tcsstr(buf, TEXT("550")), sizeof(szStatus[0]) / sizeof(TCHAR));
}
else
{
lstrcpyn(szStatus[status], buf, sizeof(szStatus[0]) / sizeof(TCHAR));
}
}
// timeout (firewall or dropped connection problem)
else if(gle == 12002) // ERROR_INTERNET_TIMEOUT
{
if(!silent)
resume = true;
status = ERR_URLOPEN;
}
}
else
InternetGetLastResponseInfo(&err, buf, &(rslt = sizeof(buf)));
if (hFile && NOT_AVAILABLE == fs)
{
FARPROC ftpgfs = GetWininetProcAddress("FtpGetFileSize"); // IE5+
if (ftpgfs)
{
DWORD shi, slo = ((DWORD(WINAPI*)(HINTERNET,DWORD*))ftpgfs)(hFile, &shi);
if (slo != -1 && !shi) fs = slo;
}
}
return hFile;
}
/*****************************************************
* FUNCTION NAME: openHttpFile()
* PURPOSE:
* file open, size request, re-get lseek
* SPECIAL CONSIDERATIONS:
*
*****************************************************/
HINTERNET openHttpFile(HINTERNET hConn, INTERNET_SCHEME nScheme, TCHAR *path)
{
TCHAR buf[256] = TEXT("");
HINTERNET hFile;
DWORD rslt, err;
bool first_attempt = true;;
// test connection for PUT, the only way to do this before sending data
// OPTIONS fails on HttpOpenRequest step for HTTPS
// but works for HEAD I guess
if(fput)// && nScheme != INTERNET_SCHEME_HTTPS)
{
// old proxy's may not support OPTIONS request, so changed to HEAD....
if((hFile = HttpOpenRequest(hConn, TEXT("HEAD"), path, NULL, NULL, NULL,
// if((hFile = HttpOpenRequest(hConn, TEXT("OPTIONS"), path, NULL, NULL, NULL,
INTERNET_FLAG_RELOAD | INTERNET_FLAG_KEEP_CONNECTION |
(nocookies ? (INTERNET_FLAG_NO_CACHE_WRITE | INTERNET_FLAG_NO_COOKIES) : 0), 0)) != NULL)
{
if(*szAuth)
{
wsprintf(buf, PROXY_AUTH_HDR, szAuth);
HttpAddRequestHeaders(hFile, buf, -1,
HTTP_ADDREQ_FLAG_ADD | HTTP_ADDREQ_FLAG_REPLACE);
}
resend_proxy1:
if(*szBasic)
{
wsprintf(buf, HOST_AUTH_HDR, szBasic);
HttpAddRequestHeaders(hFile, buf, -1,
HTTP_ADDREQ_FLAG_ADD | HTTP_ADDREQ_FLAG_REPLACE);
}
resend_auth1:
if(HttpSendRequest(hFile, NULL, 0, NULL, 0))
{
queryStatus(hFile);
// may be don't need to read all from socket, but this looks safer
while(InternetReadFile(hFile, buf, sizeof(buf), &rslt) && rslt > 0) {}
if(!silent && (status == ERR_PROXY || status == ERR_AUTH))// || status == ERR_FORBIDDEN))
{
rslt = InternetErrorDlg(hDlg, hFile,
ERROR_INTERNET_INCORRECT_PASSWORD,
FLAGS_ERROR_UI_FILTER_FOR_ERRORS |
FLAGS_ERROR_UI_FLAGS_GENERATE_DATA |
FLAGS_ERROR_UI_FLAGS_CHANGE_OPTIONS,
NULL);
if (rslt == ERROR_INTERNET_FORCE_RETRY)
{
status = ST_URLOPEN;
if(status == ERR_PROXY) goto resend_proxy1;
else goto resend_auth1;
}
else
{
status = ST_CANCELLED;
}
}
// no such file is OK for PUT. server first checks authentication
if(status == ERR_NOTFOUND || status == ERR_FORBIDDEN || status == ERR_NOTALLOWED)
{
// MessageBox(childwnd, TEXT("NOT_FOUND"), "", 0);
status = ST_URLOPEN;
}
// parameters might be updated during dialog popup
if(status == ST_URLOPEN)
{
*buf = 0;
if(HttpQueryInfo(hFile, HTTP_QUERY_AUTHORIZATION, buf, &(rslt = sizeof(buf)), NULL) && *buf)
lstrcpyn(szBasic, buf, rslt);
*buf = 0;
if(HttpQueryInfo(hFile, HTTP_QUERY_PROXY_AUTHORIZATION, buf, &(rslt = sizeof(buf)), NULL) && *buf)
lstrcpyn(szAuth, buf, rslt);
}
}
else status = ERR_SENDREQUEST;
InternetCloseHandle(hFile);
}
else status = ERR_OPENREQUEST;
}
// request itself
if(status == ST_URLOPEN)
{
DWORD secflags = nScheme == INTERNET_SCHEME_HTTPS ? MY_HTTPS_FLAGS : 0;
if (Option_IgnoreCertIssues()) secflags |= MY_WEAKSECURITY_CERT_FLAGS;
DWORD cokflags = nocookies ? (INTERNET_FLAG_NO_CACHE_WRITE | INTERNET_FLAG_NO_COOKIES) : 0;
if((hFile = HttpOpenRequest(hConn, fput ? TEXT("PUT") : (fhead ? TEXT("HEAD") : (szPost ? TEXT("POST") : NULL)),
path, NULL, NULL, NULL,
// INTERNET_FLAG_RELOAD conflicts with reget - hidden re-read from beginning has place
// INTERNET_FLAG_RESYNCHRONIZE // note - sync may not work with some http servers
// reload on first connect (and any req. except GET), just continue on resume.
// HTTP Proxy still is a problem for reget
(cnt ? 0 : INTERNET_FLAG_RELOAD) | INTERNET_FLAG_KEEP_CONNECTION | cokflags | secflags, 0)) != NULL)
{
if(*szAuth)
{
wsprintf(buf, PROXY_AUTH_HDR, szAuth);
HttpAddRequestHeaders(hFile, buf, -1,
HTTP_ADDREQ_FLAG_ADD | HTTP_ADDREQ_FLAG_REPLACE);
}
resend_proxy2:
if(szPost != NULL)
HttpAddRequestHeaders(hFile, POST_HEADER,
-1, HTTP_ADDREQ_FLAG_ADD | HTTP_ADDREQ_FLAG_REPLACE);
if(*post_fname)
HttpAddRequestHeadersA(hFile, post_fname,
-1, HTTP_ADDREQ_FLAG_ADD | HTTP_ADDREQ_FLAG_REPLACE);
if(szHeader != NULL)
HttpAddRequestHeaders(hFile, szHeader, -1,
HTTP_ADDREQ_FLAG_ADD | HTTP_ADDREQ_FLAG_REPLACE);
if(*szBasic)
{
wsprintf(buf, HOST_AUTH_HDR, szBasic);
HttpAddRequestHeaders(hFile, buf, -1,
HTTP_ADDREQ_FLAG_ADD | HTTP_ADDREQ_FLAG_REPLACE);
}
if(fput)
{
wsprintf(buf, PUT_HEADER, fs);
HttpAddRequestHeaders(hFile, buf, -1,
HTTP_ADDREQ_FLAG_ADD | HTTP_ADDREQ_FLAG_REPLACE);
}
resend_auth2:
first_attempt = true;
if(nScheme == INTERNET_SCHEME_HTTPS)
{
if(!mySendRequest(hFile))
{
InternetQueryOption (hFile, INTERNET_OPTION_SECURITY_FLAGS,
(LPVOID)&rslt, &(err = sizeof(rslt)));
rslt |= Option_IgnoreCertIssues() ? MY_WEAKSECURITY_CERT_FLAGS : 0;
InternetSetOption (hFile, INTERNET_OPTION_SECURITY_FLAGS,
&rslt, sizeof(rslt) );
}
else first_attempt = false;
}
// https Request answer may be after optional second Send only on Win98
if(!first_attempt || mySendRequest(hFile))
{
// no status for PUT - headers were sent only. And not need to get size / set position
if(!fput)
{
queryStatus(hFile);
if(!silent && (status == ERR_PROXY || status == ERR_AUTH))
{
rslt = InternetErrorDlg(hDlg, hFile,
ERROR_INTERNET_INCORRECT_PASSWORD,
FLAGS_ERROR_UI_FILTER_FOR_ERRORS |
FLAGS_ERROR_UI_FLAGS_GENERATE_DATA |
FLAGS_ERROR_UI_FLAGS_CHANGE_OPTIONS,
NULL);
if (rslt == ERROR_INTERNET_FORCE_RETRY)
{
status = ST_URLOPEN;
if(status == ERR_PROXY) goto resend_proxy2;
else goto resend_auth2;
}
else
status = ST_CANCELLED;
}
// get size / set position
if(status == ST_URLOPEN)
{
if(cnt == 0)
{
if(HttpQueryInfo(hFile, HTTP_QUERY_CONTENT_LENGTH, buf,
&(rslt = sizeof(buf)), NULL))
fs = myatou(buf);
else
fs = NOT_AVAILABLE;
}
else
{
if((int)InternetSetFilePointer(hFile, cnt, NULL, FILE_BEGIN, 0) == -1)
status = ERR_REGET;
}
}
}
}
else
{
if(!queryStatus(hFile))
status = ERR_SENDREQUEST;
}
}
else status = ERR_OPENREQUEST;
}
return hFile;
}
/*****************************************************
* FUNCTION NAME: inetTransfer()
* PURPOSE:
* http/ftp file transfer
* SPECIAL CONSIDERATIONS:
*
*****************************************************/
DWORD __stdcall inetTransfer(void *hw)
{
HINTERNET hSes, hConn, hFile;
HANDLE localFile = NULL;
HWND hDlg = (HWND)hw;
DWORD lastCnt, rslt, err;
static TCHAR hdr[2048];
TCHAR *host = (TCHAR*)LocalAlloc(LPTR, g_stringsize * sizeof(TCHAR)),
*path = (TCHAR*)LocalAlloc(LPTR, g_stringsize * sizeof(TCHAR)),
*params = (TCHAR*)LocalAlloc(LPTR, g_stringsize * sizeof(TCHAR)),
*user = (TCHAR*)LocalAlloc(LPTR, g_stringsize * sizeof(TCHAR)),
*passwd = (TCHAR*)LocalAlloc(LPTR, g_stringsize * sizeof(TCHAR));
URL_COMPONENTS uc = {sizeof(URL_COMPONENTS), NULL, 0,
(INTERNET_SCHEME)0, host, g_stringsize, 0 , user, g_stringsize,
passwd, g_stringsize, path, g_stringsize, params, g_stringsize};
if((hSes = InternetOpen(szUserAgent, openType, szProxy, NULL, 0)) != NULL)
{
if(InternetQueryOption(hSes, INTERNET_OPTION_CONNECTED_STATE, &(rslt=0),
&(lastCnt=sizeof(DWORD))) &&
(rslt & INTERNET_STATE_DISCONNECTED_BY_USER))
{
INTERNET_CONNECTED_INFO ci = {INTERNET_STATE_CONNECTED, 0};
InternetSetOption(hSes,
INTERNET_OPTION_CONNECTED_STATE, &ci, sizeof(ci));
}
if(timeout > 0)
lastCnt = InternetSetOption(hSes, INTERNET_OPTION_CONNECT_TIMEOUT, &timeout, sizeof(timeout));
if(receivetimeout > 0)
InternetSetOption(hSes, INTERNET_OPTION_RECEIVE_TIMEOUT, &receivetimeout, sizeof(receivetimeout));
// 60 sec WinInet.dll detach delay on socket time_wait fix
myFtpCommand = (FTP_CMD) GetWininetProcAddress(
#ifdef UNICODE
"FtpCommandW"
#else
"FtpCommandA"
#endif
);
while(!popstring(url) && lstrcmpi(url, TEXT("/end")) != 0)
{
// too many customers requested not to do this
// sf(hDlg);
if(popstring(fn) != 0 || lstrcmpi(url, TEXT("/end")) == 0) break;
status = ST_CONNECTING;
cnt = fs = *host = *user = *passwd = *path = *params = 0;
PostMessage(hDlg, WM_TIMER, 1, 0); // show url & fn, do it sync
if(szToStack || (localFile = CreateFile(fn, fput ? GENERIC_READ : GENERIC_WRITE, FILE_SHARE_READ,
NULL, fput ? OPEN_EXISTING : CREATE_ALWAYS, 0, NULL)) != INVALID_HANDLE_VALUE)
{
uc.dwHostNameLength = uc.dwUserNameLength = uc.dwPasswordLength =
uc.dwUrlPathLength = uc.dwExtraInfoLength = g_stringsize;
if(fput)
{
fs = GetFileSize(localFile, NULL);
}
if(InternetCrackUrl(url, 0, 0/*ICU_ESCAPE*/ , &uc))
{
// auth headers for HTTP PUT seems to be lost, preparing encoded login:password
if(*user && *passwd)
{
wsprintf(hdr, TEXT("%s:%s"), user, passwd);
// does unicode version of encoding works correct?
// are user and passwd ascii only?
encode_base64(lstrlen(hdr), hdr, szBasic);
*hdr = 0;
}
lstrcat(path, params); // BUGBUG: Could overflow path?
transfStart = GetTickCount();
do
{
// re-PUT to already deleted tmp file on http server is not possible.
// the same with POST - must re-send data to server. for 'resume' loop
if((fput && uc.nScheme != INTERNET_SCHEME_FTP) || szPost)
{
cnt = 0;
SetFilePointer(localFile, 0, NULL, FILE_BEGIN);
}
status = ST_CONNECTING;
lastCnt = cnt;
if((hConn = InternetConnect(hSes, host, uc.nPort,
lstrlen(user) > 0 ? user : NULL,
lstrlen(passwd) > 0 ? passwd : NULL,
uc.nScheme == INTERNET_SCHEME_FTP ? INTERNET_SERVICE_FTP : INTERNET_SERVICE_HTTP,
uc.nScheme == INTERNET_SCHEME_FTP ? INTERNET_FLAG_PASSIVE : 0, 0)) != NULL)
{
status = ST_URLOPEN;
hFile = uc.nScheme == INTERNET_SCHEME_FTP ?
openFtpFile(hConn, path) : openHttpFile(hConn, uc.nScheme, path);
if(status != ST_URLOPEN && hFile != NULL)
{
InternetCloseHandle(hFile);
hFile = NULL;
}
if(hFile != NULL)
{
if(fhead)
{// repeating calls clear headers..
if(HttpQueryInfo(hFile, HTTP_QUERY_RAW_HEADERS_CRLF, hdr, &(rslt=2048), NULL))
{
if(szToStack)
{
for (DWORD i = 0; cntToStack < g_stringsize && i < rslt; i++, cntToStack++)
*(szToStack + cntToStack) = hdr[i];
}
else
{
WriteFile(localFile, hdr, rslt, &lastCnt, NULL);
}
}
status = ST_OK;
}
else
{
HWND hBar = GetDlgItem(hDlg, IDC_PROGRESS1);
SendDlgItemMessage(hDlg, IDC_PROGRESS1, PBM_SETPOS, 0, 0);
SetWindowText(GetDlgItem(hDlg, IDC_STATIC5), fs == NOT_AVAILABLE ? TEXT("Not Available") : TEXT(""));
SetWindowText(GetDlgItem(hDlg, IDC_STATIC4), fs == NOT_AVAILABLE ? TEXT("Unknown") : TEXT(""));
SetWindowLong(hBar, GWL_STYLE, fs == NOT_AVAILABLE ?
(GetWindowLong(hBar, GWL_STYLE) | PBS_MARQUEE) : (GetWindowLong(hBar, GWL_STYLE) & ~PBS_MARQUEE));
SendDlgItemMessage(hDlg, IDC_PROGRESS1, PBM_SETMARQUEE, (WPARAM)(fs == NOT_AVAILABLE ? 1 : 0), (LPARAM)50 );
fileTransfer(localFile, hFile);
if(fput && uc.nScheme != INTERNET_SCHEME_FTP)
{
rslt = HttpEndRequest(hFile, NULL, 0, 0);
queryStatus(hFile);
}
}
InternetCloseHandle(hFile);
}
InternetCloseHandle(hConn);
}
else
{
status = ERR_CONNECT;
if(uc.nScheme == INTERNET_SCHEME_FTP &&
InternetGetLastResponseInfo(&err, hdr, &(rslt = sizeof(hdr))) &&
_tcsstr(hdr, TEXT("530")))
{
lstrcpyn(szStatus[status], _tcsstr(hdr, TEXT("530")), sizeof(szStatus[0]) / sizeof(TCHAR));
}
else
{
rslt = GetLastError();
if((rslt == 12003 || rslt == 12002) && !silent)
resume = true;
}
}
} while(((!fput || uc.nScheme == INTERNET_SCHEME_FTP) &&
cnt > lastCnt &&
status == ERR_TRANSFER &&
SleepEx(PAUSE1_SEC * 1000, false) == 0 &&
(status = ST_PAUSE) != ST_OK &&
SleepEx(PAUSE2_SEC * 1000, false) == 0)
|| (resume &&
status != ST_OK &&
status != ST_CANCELLED &&
status != ERR_NOTFOUND &&
ShowWindow(hDlg, SW_HIDE) != -1 &&
MessageBox(GetParent(hDlg), szResume, *szCaption ? szCaption : PLUGIN_NAME, MB_RETRYCANCEL|MB_ICONWARNING) == IDRETRY &&
(status = ST_PAUSE) != ST_OK &&
ShowWindow(hDlg, silent ? SW_HIDE : SW_SHOW) == false &&
SleepEx(PAUSE3_SEC * 1000, false) == 0));
}
else status = ERR_CRACKURL;
CloseHandle(localFile);
if(!fput && status != ST_OK && !szToStack)
{
rslt = DeleteFile(fn);
break;
}
}
else status = ERR_FILEOPEN;
}
InternetCloseHandle(hSes);
if (lstrcmpi(url, TEXT("/end"))==0)
pushstring(url);
}
else status = ERR_INETOPEN;
LocalFree(host);
LocalFree(path);
LocalFree(user);
LocalFree(passwd);
LocalFree(params);
if(IsWindow(hDlg))
PostMessage(hDlg, WM_COMMAND, MAKELONG(IDOK, INTERNAL_OK), 0);
return status;
}
/*****************************************************
* FUNCTION NAME: fsFormat()
* PURPOSE:
* formats DWORD (max 4 GB) file size for dialog, big MB
* SPECIAL CONSIDERATIONS:
*
*****************************************************/
void fsFormat(DWORD bfs, TCHAR *b)
{
if(bfs == NOT_AVAILABLE)
lstrcpy(b, TEXT("???"));
else if(bfs == 0)
lstrcpy(b, TEXT("0"));
else if(bfs < 10 * 1024)
wsprintf(b, TEXT("%u bytes"), bfs);
else if(bfs < 10 * 1024 * 1024)
wsprintf(b, TEXT("%u kB"), bfs / 1024);
else wsprintf(b, TEXT("%u MB"), (bfs / 1024 / 1024));
}
/*****************************************************
* FUNCTION NAME: progress_callback
* PURPOSE:
* old-style progress bar text updates
* SPECIAL CONSIDERATIONS:
*
*****************************************************/
void progress_callback(void)
{
static TCHAR buf[1024] = TEXT(""), b[1024] = TEXT("");
int time_sofar = max(1, (GetTickCount() - transfStart) / 1000);
int bps = cnt / time_sofar;
int remain = (cnt > 0 && fs != NOT_AVAILABLE) ? (MulDiv(time_sofar, fs, cnt) - time_sofar) : 0;
TCHAR *rtext=szSecond;
if(remain < 0) remain = 0;
if (remain >= 60)
{
remain/=60;
rtext=szMinute;
if (remain >= 60)
{
remain/=60;
rtext=szHour;
}
}
wsprintf(buf,
szProgress,
cnt/1024,
fs > 0 && fs != NOT_AVAILABLE ? MulDiv(100, cnt, fs) : 0,
fs != NOT_AVAILABLE ? fs/1024 : 0,
bps/1024,((bps*10)/1024)%10
);
if (remain) wsprintf(buf + lstrlen(buf),
szRemaining,
remain,
rtext,
remain==1?TEXT(""):szPlural
);
SetDlgItemText(hDlg, IDC_STATIC1, (cnt == 0 || status == ST_CONNECTING) ? szConnecting : buf);
if(fs > 0 && fs != NOT_AVAILABLE)
SendMessage(GetDlgItem(hDlg, IDC_PROGRESS1), PBM_SETPOS, MulDiv(cnt, PB_RANGE, fs), 0);
if (*szCaption == 0)
wsprintf(buf, szDownloading, _tcsrchr(fn, TEXT('\\')) ? _tcsrchr(fn, TEXT('\\')) + 1 : fn);
else
wsprintf(buf, TEXT("%s"),szCaption);
HWND hwndS = GetDlgItem(childwnd, 1006);
if(!silent && hwndS != NULL && IsWindow(hwndS))
{
GetWindowText(hwndS, b, sizeof(b));
if(lstrcmp(b, buf) != 0)
SetWindowText(hwndS, buf);
}
}
/*****************************************************
* FUNCTION NAME: onTimer()
* PURPOSE:
* updates text fields every second
* SPECIAL CONSIDERATIONS:
*
*****************************************************/
void onTimer(HWND hDlg)
{
TCHAR b[128];
DWORD ct = (GetTickCount() - transfStart) / 1000,
tt = (GetTickCount() - startTime) / 1000;
// dialog window caption
wsprintf(b, TEXT("%s - %s"), *szCaption ? szCaption : PLUGIN_NAME, szStatus[status]);
if(fs > 0 && fs != NOT_AVAILABLE && status == ST_DOWNLOAD)
{
wsprintf(b + lstrlen(b), TEXT(" %d%%"), MulDiv(100, cnt, fs));
}
if(szBanner == NULL) SetWindowText(hDlg, b);
// current file and url
SetDlgItemText(hDlg, IDC_STATIC1, (szAlias && *szAlias) ? szAlias : url);
SetDlgItemText(hDlg, IDC_STATIC2, /*_tcsrchr(fn, '\\') ? _tcsrchr(fn, '\\') + 1 : */fn);
// bytes done and rate
if(cnt > 0)
{
fsFormat(cnt, b);
if(ct > 1 && status == ST_DOWNLOAD)
{
lstrcat(b, TEXT(" ( "));
fsFormat(cnt / ct, b + lstrlen(b));
lstrcat(b, TEXT("/sec )"));
}
}
else *b = 0;
SetDlgItemText(hDlg, IDC_STATIC3, b);
// total download time
wsprintf(b, TEXT("%d:%02d:%02d"), tt / 3600, (tt / 60) % 60, tt % 60);
SetDlgItemText(hDlg, IDC_STATIC6, b);
// file size, time remaining, progress bar
if(fs > 0 && fs != NOT_AVAILABLE)
{
fsFormat(fs, b);
SetDlgItemText(hDlg, IDC_STATIC5, b);
SendDlgItemMessage(hDlg, IDC_PROGRESS1, PBM_SETPOS, MulDiv(cnt, PB_RANGE, fs), 0);
if(cnt > 5000)
{
ct = MulDiv(fs - cnt, ct, cnt);
wsprintf(b, TEXT("%d:%02d:%02d"), ct / 3600, (ct / 60) % 60, ct % 60);
}
else *b = 0;
SetWindowText(GetDlgItem(hDlg, IDC_STATIC4), b);
}
}
/*****************************************************
* FUNCTION NAME: centerDlg()
* PURPOSE:
* centers dlg on NSIS parent
* SPECIAL CONSIDERATIONS:
*
*****************************************************/
void centerDlg(HWND hDlg)
{
HWND hwndParent = GetParent(hDlg);
RECT nsisRect, dlgRect, waRect;
int dlgX, dlgY, dlgWidth, dlgHeight;
if(hwndParent == NULL || silent)
return;
if(popup)
GetWindowRect(hwndParent, &nsisRect);
else GetClientRect(hwndParent, &nsisRect);
GetWindowRect(hDlg, &dlgRect);
dlgWidth = dlgRect.right - dlgRect.left;
dlgHeight = dlgRect.bottom - dlgRect.top;
dlgX = (nsisRect.left + nsisRect.right - dlgWidth) / 2;
dlgY = (nsisRect.top + nsisRect.bottom - dlgHeight) / 2;
if(popup)
{
SystemParametersInfo(SPI_GETWORKAREA, 0, &waRect, 0);
if(dlgX > waRect.right - dlgWidth)
dlgX = waRect.right - dlgWidth;
if(dlgX < waRect.left) dlgX = waRect.left;
if(dlgY > waRect.bottom - dlgHeight)
dlgY = waRect.bottom - dlgHeight;
if(dlgY < waRect.top) dlgY = waRect.top;
}
else dlgY += 20;
SetWindowPos(hDlg, HWND_TOP, dlgX, dlgY, 0, 0, SWP_NOSIZE);
}
/*****************************************************
* FUNCTION NAME: onInitDlg()
* PURPOSE:
* dlg init
* SPECIAL CONSIDERATIONS:
*
*****************************************************/
void onInitDlg(HWND hDlg)
{
HFONT hFont;
HWND hPrbNew;
HWND hPrbOld;
HWND hCan = GetDlgItem(hDlg, IDCANCEL);
if(childwnd)
{
hPrbNew = GetDlgItem(hDlg, IDC_PROGRESS1);
hPrbOld = GetDlgItem(childwnd, 0x3ec);
// Backland' fix for progress bar redraw/style issue.
// Original bar may be hidden because of interfernce with other plug-ins.
LONG prbStyle = WS_VISIBLE | WS_CHILD | WS_CLIPSIBLINGS | WS_CLIPCHILDREN;
if(hPrbOld != NULL)
{
prbStyle |= GetWindowLong(hPrbOld, GWL_STYLE);
}
SetWindowLong(hPrbNew, GWL_STYLE, prbStyle);
if(!popup)
{
if((hFont = (HFONT)SendMessage(childwnd, WM_GETFONT, 0, 0)) != NULL)
{
SendDlgItemMessage(hDlg, IDC_STATIC1, WM_SETFONT, (WPARAM)hFont, 0);
SendDlgItemMessage(hDlg, IDCANCEL, WM_SETFONT, (WPARAM)hFont, 0);
}
if(*szCancel == 0)
GetWindowText(GetDlgItem(GetParent(childwnd), IDCANCEL), szCancel, sizeof(szCancel));
SetWindowText(hCan, szCancel);
SetWindowPos(hPrbNew, HWND_TOP, 0, 0, 0, 0, SWP_NOSIZE | SWP_NOMOVE);
}
}
if(nocancel)
{
if(hCan != NULL)
ShowWindow(hCan, SW_HIDE);
if(popup)
SetWindowLong(hDlg, GWL_STYLE, GetWindowLong(hDlg, GWL_STYLE) ^ WS_SYSMENU);
}
SendDlgItemMessage(hDlg, IDC_PROGRESS1, PBM_SETRANGE,
0, MAKELPARAM(0, PB_RANGE));
if(szBanner != NULL)
{
SendDlgItemMessage(hDlg, IDC_STATIC13, STM_SETICON,
(WPARAM)LoadIcon(GetModuleHandle(NULL), MAKEINTRESOURCE(103)), 0);
SetDlgItemText(hDlg, IDC_STATIC12, szBanner);
SetWindowText(hDlg, *szCaption ? szCaption : PLUGIN_NAME);
}
SetTimer(hDlg, 1, 1000, NULL);
if(*szUrl != 0)
{
SetDlgItemText(hDlg, IDC_STATIC20, szUrl);
SetDlgItemText(hDlg, IDC_STATIC21, szDownloading);
SetDlgItemText(hDlg, IDC_STATIC22, szConnecting);
SetDlgItemText(hDlg, IDC_STATIC23, szProgress);
SetDlgItemText(hDlg, IDC_STATIC24, szSecond);
SetDlgItemText(hDlg, IDC_STATIC25, szRemaining);
}
}
/*****************************************************
* FUNCTION NAME: dlgProc()
* PURPOSE:
* dlg message handling procedure
* SPECIAL CONSIDERATIONS:
* todo: better dialog design
*****************************************************/
INT_PTR CALLBACK dlgProc(HWND hDlg, UINT message, WPARAM wParam, LPARAM lParam )
{
switch(message)
{
case WM_INITDIALOG:
onInitDlg(hDlg);
centerDlg(hDlg);
return false;
case WM_PAINT:
// child dialog redraw problem. return false is important
RedrawWindow(GetDlgItem(hDlg, IDC_STATIC1), NULL, NULL, RDW_INVALIDATE);
RedrawWindow(GetDlgItem(hDlg, IDCANCEL), NULL, NULL, RDW_INVALIDATE);
RedrawWindow(GetDlgItem(hDlg, IDC_PROGRESS1), NULL, NULL, RDW_INVALIDATE);
UpdateWindow(GetDlgItem(hDlg, IDC_STATIC1));
UpdateWindow(GetDlgItem(hDlg, IDCANCEL));
UpdateWindow(GetDlgItem(hDlg, IDC_PROGRESS1));
return false;
case WM_TIMER:
if(!silent && IsWindow(hDlg))
{
// long connection period and paused state updates
if(status != ST_DOWNLOAD && GetTickCount() - transfStart > PROGRESS_MS)
transfStart += PROGRESS_MS;
if(popup) onTimer(hDlg); else progress_callback();
RedrawWindow(GetDlgItem(hDlg, IDC_STATIC1), NULL, NULL, RDW_INVALIDATE);
RedrawWindow(GetDlgItem(hDlg, IDCANCEL), NULL, NULL, RDW_INVALIDATE);
RedrawWindow(GetDlgItem(hDlg, IDC_PROGRESS1), NULL, NULL, RDW_INVALIDATE);
}
break;
case WM_COMMAND:
switch(LOWORD(wParam))
{
case IDCANCEL:
if(nocancel) break;
if(szQuestion &&
MessageBox(hDlg, szQuestion, *szCaption ? szCaption : PLUGIN_NAME, MB_ICONWARNING|MB_YESNO) == IDNO)
break;
status = ST_CANCELLED;
// FallThrough
case IDOK:
if(status != ST_CANCELLED && HIWORD(wParam) != INTERNAL_OK) break;
// otherwise in the silent mode next banner windows may go to background
// if(silent) sf(hDlg);
KillTimer(hDlg, 1);
DestroyWindow(hDlg);
break;
}
return false;
default:
return false;
}
return true;
}
/*****************************************************
* FUNCTION NAME: get()
* PURPOSE:
* http/https/ftp file download entry point
* SPECIAL CONSIDERATIONS:
*
*****************************************************/
extern "C"
void __declspec(dllexport) __cdecl get(HWND hwndParent,
int string_size,
TCHAR *variables,
stack_t **stacktop,
extra_parameters *extra
)
{
HANDLE hThread;
DWORD dwThreadId;
MSG msg;
TCHAR szUsername[64]=TEXT(""), // proxy params
szPassword[64]=TEXT("");
EXDLL_INIT();
// for repeating /nounload plug-un calls - global vars clean up
silent = popup = resume = nocancel = noproxy = nocookies = false;
g_ignorecertissues = false;
myFtpCommand = NULL;
openType = INTERNET_OPEN_TYPE_PRECONFIG;
status = ST_CONNECTING;
*szCaption = *szCancel = *szUserAgent = *szBasic = *szAuth = 0;
url = (TCHAR*)LocalAlloc(LPTR, string_size * sizeof(TCHAR));
if(szPost)
{
popstring(url);
#ifdef UNICODE
WideCharToMultiByte(CP_ACP, 0, url, -1, szPost, string_size, NULL, NULL);
#else
lstrcpy(szPost, url);
#endif
fSize = (DWORD)lstrlenA(szPost);
}
// global silent option
if(extra->exec_flags->silent != 0)
silent = true;
// we must take this from stack, or push url back
while(!popstring(url) && *url == TEXT('/'))
{
if(lstrcmpi(url, TEXT("/silent")) == 0)
silent = true;
else if(lstrcmpi(url, TEXT("/weaksecurity")) == 0)
g_ignorecertissues = true;
else if(lstrcmpi(url, TEXT("/caption")) == 0)
popstring(szCaption);
else if(lstrcmpi(url, TEXT("/username")) == 0)
popstring(szUsername);
else if(lstrcmpi(url, TEXT("/password")) == 0)
popstring(szPassword);
else if(lstrcmpi(url, TEXT("/nocancel")) == 0)
nocancel = true;
else if(lstrcmpi(url, TEXT("/nocookies")) == 0)
nocookies = true;
else if(lstrcmpi(url, TEXT("/noproxy")) == 0)
openType = INTERNET_OPEN_TYPE_DIRECT;
else if(lstrcmpi(url, TEXT("/popup")) == 0)
{
popup = true;
szAlias = (TCHAR*)LocalAlloc(LPTR, string_size * sizeof(TCHAR));
popstring(szAlias);
}
else if(lstrcmpi(url, TEXT("/resume")) == 0)
{
popstring(url);
if(url[0]) lstrcpy(szResume, url);
resume = true;
}
else if(lstrcmpi(url, TEXT("/translate")) == 0)
{
if(popup)
{
popstring(szUrl);
popstring(szStatus[ST_DOWNLOAD]); // Downloading
popstring(szStatus[ST_CONNECTING]); // Connecting
lstrcpy(szStatus[ST_URLOPEN], szStatus[ST_CONNECTING]);
popstring(szDownloading);// file name
popstring(szConnecting);// received
popstring(szProgress);// file size
popstring(szSecond);// remaining time
popstring(szRemaining);// total time
}
else
{
popstring(szDownloading);
popstring(szConnecting);
popstring(szSecond);
popstring(szMinute);
popstring(szHour);
popstring(szPlural);
popstring(szProgress);
popstring(szRemaining);
}
}
else if(lstrcmpi(url, TEXT("/banner")) == 0)
{
popup = true;
szBanner = (TCHAR*)LocalAlloc(LPTR, string_size * sizeof(TCHAR));
popstring(szBanner);
}
else if(lstrcmpi(url, TEXT("/canceltext")) == 0)
{
popstring(szCancel);
}
else if(lstrcmpi(url, TEXT("/question")) == 0)
{
szQuestion = (TCHAR*)LocalAlloc(LPTR, string_size * sizeof(TCHAR));
popstring(szQuestion);
if(*szQuestion == 0) lstrcpy(szQuestion, DEF_QUESTION);
}
else if(lstrcmpi(url, TEXT("/useragent")) == 0)
{
popstring(szUserAgent);
}
else if(lstrcmpi(url, TEXT("/proxy")) == 0)
{
szProxy = (TCHAR*)LocalAlloc(LPTR, string_size * sizeof(TCHAR));
popstring(szProxy);
openType = INTERNET_OPEN_TYPE_PROXY;
}
else if(lstrcmpi(url, TEXT("/connecttimeout")) == 0)
{
popstring(url);
timeout = myatou(url) * 1000;
}
else if(lstrcmpi(url, TEXT("/receivetimeout")) == 0)
{
popstring(url);
receivetimeout = myatou(url) * 1000;
}
else if(lstrcmpi(url, TEXT("/header")) == 0)
{
szHeader = (TCHAR*)LocalAlloc(LPTR, string_size * sizeof(TCHAR));
popstring(szHeader);
}
else if(!fput && ((convToStack = lstrcmpi(url, TEXT("/tostackconv")) == 0) || lstrcmpi(url, TEXT("/tostack")) == 0))
{
szToStack = (TCHAR*)LocalAlloc(LPTR, string_size * sizeof(TCHAR));
cntToStack = 0;
lstrcpy(fn, TEXT("file"));
}
else if(lstrcmpi(url, TEXT("/file")) == 0)
{
HANDLE hFile = CreateFileA(szPost, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, 0, NULL);
DWORD rslt;
if(hFile == INVALID_HANDLE_VALUE)
{
status = ERR_FILEOPEN;
goto cleanup;
}
if((fSize = GetFileSize(hFile, NULL)) == 0)
{
CloseHandle(hFile);
status = ERR_FILEREAD;
goto cleanup;
}
wsprintfA(post_fname, "Filename: %s",
strchr(szPost, '\\') ? strrchr(szPost, '\\') + 1 : szPost);
LocalFree(szPost);
szPost = (char*)LocalAlloc(LPTR, fSize);
if(ReadFile(hFile, szPost, fSize, &rslt, NULL) == 0 || rslt != fSize)
{
CloseHandle(hFile);
status = ERR_FILEREAD;
goto cleanup;
}
CloseHandle(hFile);
}
}
pushstring(url);
// if(*szCaption == 0) lstrcpy(szCaption, PLUGIN_NAME);
if(*szUserAgent == 0) lstrcpy(szUserAgent, INETC_USERAGENT);
if(*szPassword && *szUsername)
{
wsprintf(url, TEXT("%s:%s"), szUsername, szPassword);
encode_base64(lstrlen(url), url, szAuth);
}
// may be silent for plug-in, but not so for installer itself - let's try to define 'progress text'
if(hwndParent != NULL &&
(childwnd = FindWindowEx(hwndParent, NULL, TEXT("#32770"), NULL)) != NULL &&
!silent)
SetDlgItemText(childwnd, 1006, *szCaption ? szCaption : PLUGIN_NAME);
else InitCommonControls(); // or NSIS do this before .onInit?
// cannot embed child dialog to non-existing parent. Using 'silent' to hide it
if(childwnd == NULL && !popup) silent = true;
// let's use hidden popup dlg in the silent mode - works both on .onInit and Page
if(silent) { resume = false; popup = true; }
// google says WS_CLIPSIBLINGS helps to redraw... not in my tests...
if(!popup)
{
unsigned int wstyle = GetWindowLong(childwnd, GWL_STYLE);
wstyle |= WS_CLIPSIBLINGS;
SetWindowLong(childwnd, GWL_STYLE, wstyle);
}
startTime = GetTickCount();
if((hDlg = CreateDialog(g_hInstance,
MAKEINTRESOURCE(szBanner ? IDD_DIALOG2 : (popup ? IDD_DIALOG1 : IDD_DIALOG3)),
(popup ? hwndParent : childwnd), dlgProc)) != NULL)
{
if((hThread = CreateThread(NULL, 0, inetTransfer, (LPVOID)hDlg, 0,
&dwThreadId)) != NULL)
{
HWND hButton = GetDlgItem(childwnd, 0x403);
HWND hList = GetDlgItem(childwnd, 0x3f8);
DWORD dwStyleButton = 0;
BOOL fVisibleList = false;
if(!silent)
{
ShowWindow(hDlg, SW_NORMAL);
if(childwnd && !popup)
{
if(hButton)
{
dwStyleButton = GetWindowLong(hButton, GWL_STYLE);
EnableWindow(hButton, false);
}
if(hList)
{
fVisibleList = IsWindowVisible(hList);
ShowWindow(hList, SW_HIDE);
}
}
}
while(IsWindow(hDlg) &&
GetMessage(&msg, NULL, 0, 0) > 0)
{
if(!IsDialogMessage(hDlg, &msg) &&
!IsDialogMessage(hwndParent, &msg) &&
!TranslateMessage(&msg))
DispatchMessage(&msg);
}
if(WaitForSingleObject(hThread, 3000) == WAIT_TIMEOUT)
{
TerminateThread(hThread, 1);
status = ERR_TERMINATED;
}
CloseHandle(hThread);
if(!silent && childwnd)
{
SetDlgItemText(childwnd, 1006, TEXT(""));
if(!popup)
{
if(hButton)
SetWindowLong(hButton, GWL_STYLE, dwStyleButton);
if(hList && fVisibleList)
ShowWindow(hList, SW_SHOW);
}
// RedrawWindow(childwnd, NULL, NULL, RDW_INVALIDATE|RDW_ERASE);
}
}
else
{
status = ERR_THREAD;
DestroyWindow(hDlg);
}
}
else {
status = ERR_DIALOG;
wsprintf(szStatus[status] + lstrlen(szStatus[status]), TEXT(" (Err=%d)"), GetLastError());
}
cleanup:
// we need to clean up stack from remaining url/file pairs.
// this multiple files download head pain and may be not safe
while(!popstring(url) && lstrcmpi(url, TEXT("/end")) != 0)
{
/* nothing MessageBox(NULL, url, TEXT(""), 0);*/
}
LocalFree(url);
if(szAlias) LocalFree(szAlias);
if(szBanner) LocalFree(szAlias);
if(szQuestion) LocalFree(szQuestion);
if(szProxy) LocalFree(szProxy);
if(szPost) LocalFree(szPost);
if(szHeader) LocalFree(szHeader);
url = szProxy = szHeader = szAlias = szQuestion = NULL;
szPost = NULL;
fput = fhead = false;
if(szToStack && status == ST_OK)
{
if(cntToStack > 0 && convToStack)
{
#ifdef UNICODE
int required = MultiByteToWideChar(CP_ACP, 0, (CHAR*)szToStack, string_size * sizeof(TCHAR), NULL, 0);
if(required > 0)
{
WCHAR* pszToStackNew = (WCHAR*)LocalAlloc(LPTR, sizeof(WCHAR) * (required + 1));
if(pszToStackNew)
{
if(MultiByteToWideChar(CP_ACP, 0, (CHAR*)szToStack, string_size * sizeof(TCHAR), pszToStackNew, required) > 0)
pushstring(pszToStackNew);
LocalFree(pszToStackNew);
}
}
#else
int required = WideCharToMultiByte(CP_ACP, 0, (WCHAR*)szToStack, -1, NULL, 0, NULL, NULL);
if(required > 0)
{
CHAR* pszToStackNew = (CHAR*)LocalAlloc(LPTR, required + 1);
if(pszToStackNew)
{
if(WideCharToMultiByte(CP_ACP, 0, (WCHAR*)szToStack, -1, pszToStackNew, required, NULL, NULL) > 0)
pushstring(pszToStackNew);
LocalFree(pszToStackNew);
}
}
#endif
}
else
{
pushstring(szToStack);
}
LocalFree(szToStack);
szToStack = NULL;
}
pushstring(szStatus[status]);
}
/*****************************************************
* FUNCTION NAME: put()
* PURPOSE:
* http/ftp file upload entry point
* SPECIAL CONSIDERATIONS:
* re-put not works with http, but ftp REST - may be.
*****************************************************/
extern "C"
void __declspec(dllexport) __cdecl put(HWND hwndParent,
int string_size,
TCHAR *variables,
stack_t **stacktop,
extra_parameters *extra
)
{
fput = true;
lstrcpy(szDownloading, TEXT("Uploading %s"));
lstrcpy(szStatus[2], TEXT("Uploading"));
get(hwndParent, string_size, variables, stacktop, extra);
}
/*****************************************************
* FUNCTION NAME: post()
* PURPOSE:
* http post entry point
* SPECIAL CONSIDERATIONS:
*
*****************************************************/
extern "C"
void __declspec(dllexport) __cdecl post(HWND hwndParent,
int string_size,
TCHAR *variables,
stack_t **stacktop,
extra_parameters *extra
)
{
szPost = (CHAR*)LocalAlloc(LPTR, string_size);
get(hwndParent, string_size, variables, stacktop, extra);
}
/*****************************************************
* FUNCTION NAME: head()
* PURPOSE:
* http/ftp file upload entry point
* SPECIAL CONSIDERATIONS:
* re-put not works with http, but ftp REST - may be.
*****************************************************/
extern "C"
void __declspec(dllexport) __cdecl head(HWND hwndParent,
int string_size,
TCHAR *variables,
stack_t **stacktop,
extra_parameters *extra
)
{
fhead = true;
get(hwndParent, string_size, variables, stacktop, extra);
}
/*****************************************************
* FUNCTION NAME: DllMain()
* PURPOSE:
* Dll main (initialization) entry point
* SPECIAL CONSIDERATIONS:
*
*****************************************************/
#ifdef _VC_NODEFAULTLIB
#define DllMain _DllMainCRTStartup
#endif
EXTERN_C BOOL WINAPI DllMain(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
{
g_hInstance = hinstDLL;
return TRUE;
}
| C++ |
3D | OpenMS/OpenMS | cmake/Windows/Contrib/Inetc/pluginapi.h | .h | 3,076 | 105 | #ifndef ___NSIS_PLUGIN__H___
#define ___NSIS_PLUGIN__H___
#ifdef __cplusplus
extern "C" {
#endif
#include "api.h"
#include "nsis_tchar.h"
#ifndef NSISCALL
# define NSISCALL __stdcall
#endif
#define EXDLL_INIT() { \
g_stringsize=string_size; \
g_stacktop=stacktop; \
g_variables=variables; }
typedef struct _stack_t {
struct _stack_t *next;
TCHAR text[1]; // this should be the length of string_size
} stack_t;
enum
{
INST_0, // $0
INST_1, // $1
INST_2, // $2
INST_3, // $3
INST_4, // $4
INST_5, // $5
INST_6, // $6
INST_7, // $7
INST_8, // $8
INST_9, // $9
INST_R0, // $R0
INST_R1, // $R1
INST_R2, // $R2
INST_R3, // $R3
INST_R4, // $R4
INST_R5, // $R5
INST_R6, // $R6
INST_R7, // $R7
INST_R8, // $R8
INST_R9, // $R9
INST_CMDLINE, // $CMDLINE
INST_INSTDIR, // $INSTDIR
INST_OUTDIR, // $OUTDIR
INST_EXEDIR, // $EXEDIR
INST_LANG, // $LANGUAGE
__INST_LAST
};
extern unsigned int g_stringsize;
extern stack_t **g_stacktop;
extern TCHAR *g_variables;
void NSISCALL pushstring(const TCHAR *str);
void NSISCALL pushintptr(INT_PTR value);
#define pushint(v) pushintptr((INT_PTR)(v))
int NSISCALL popstring(TCHAR *str); // 0 on success, 1 on empty stack
int NSISCALL popstringn(TCHAR *str, int maxlen); // with length limit, pass 0 for g_stringsize
INT_PTR NSISCALL popintptr();
#define popint() ( (int) popintptr() )
int NSISCALL popint_or(); // with support for or'ing (2|4|8)
INT_PTR NSISCALL nsishelper_str_to_ptr(const TCHAR *s);
#define myatoi(s) ( (int) nsishelper_str_to_ptr(s) ) // converts a string to an integer
unsigned int NSISCALL myatou(const TCHAR *s); // converts a string to an unsigned integer, decimal only
int NSISCALL myatoi_or(const TCHAR *s); // with support for or'ing (2|4|8)
TCHAR* NSISCALL getuservariable(const int varnum);
void NSISCALL setuservariable(const int varnum, const TCHAR *var);
#ifdef _UNICODE
#define PopStringW(x) popstring(x)
#define PushStringW(x) pushstring(x)
#define SetUserVariableW(x,y) setuservariable(x,y)
int NSISCALL PopStringA(char* ansiStr);
void NSISCALL PushStringA(const char* ansiStr);
void NSISCALL GetUserVariableW(const int varnum, wchar_t* wideStr);
void NSISCALL GetUserVariableA(const int varnum, char* ansiStr);
void NSISCALL SetUserVariableA(const int varnum, const char* ansiStr);
#else
// ANSI defs
#define PopStringA(x) popstring(x)
#define PushStringA(x) pushstring(x)
#define SetUserVariableA(x,y) setuservariable(x,y)
int NSISCALL PopStringW(wchar_t* wideStr);
void NSISCALL PushStringW(wchar_t* wideStr);
void NSISCALL GetUserVariableW(const int varnum, wchar_t* wideStr);
void NSISCALL GetUserVariableA(const int varnum, char* ansiStr);
void NSISCALL SetUserVariableW(const int varnum, const wchar_t* wideStr);
#endif
#ifdef __cplusplus
}
#endif
#endif//!___NSIS_PLUGIN__H___
| Unknown |
3D | HengCai-NJU/3D2DCT | code/test_util.py | .py | 6,663 | 170 | import h5py
import math
import nibabel as nib
import numpy as np
from medpy import metric
import torch
import torch.nn.functional as F
from tqdm import tqdm
from utils import ramps, losses
def test_all_case(net1, image_list, num_classes, patch_size=(112, 112, 80), stride_xy=18, stride_z=4, save_result=True, test_save_path=None, preproc_fn=None,dataset='la'):
total_metric = 0.0
totaldice=0.0
totalasd=0.0
totalhd=0.0
totaliou=0.0
dicelist=[]
# delete after
#slice_random=[5,0,3,11,3,7,9,3 , 5, 2 , 4, 7, 6, 8, 8, 10]
#img_idx=0
for image_path in tqdm(image_list):
print(image_path)
id = image_path.split('/')[-1]
h5f = h5py.File(image_path, 'r')
image = h5f['image'][:]
label = h5f['label'][:]
if dataset=='mmwhs':
image = (image - np.mean(image)) / np.std(image)
if preproc_fn is not None:
image = preproc_fn(image)
prediction, score_map = test_single_case(net1, image, stride_xy, stride_z, patch_size, num_classes=num_classes)
classdice=[]
classiou=[]
classasd=[]
classhd=[]
for c in range(1,num_classes):
if np.count_nonzero(prediction==c)==0 and np.count_nonzero(label[:]==c)!=0:
curdice,curiou,curhd,curasd=(0,0,50,50)
elif np.count_nonzero(prediction==c)==0 and np.count_nonzero(label[:]==c)==0:
curdice,curiou,curhd,curasd=(1,1,0,0)
else:
curdice,curiou,curhd,curasd=calculate_metric_percase(prediction==c,label[:]==c)
classdice.append(curdice)
classiou.append(curiou)
classhd.append(curhd)
classasd.append(curasd)
classdice.append(np.mean(classdice))
classiou.append(np.mean(classiou))
classhd.append(np.mean(classhd))
classasd.append(np.mean(classasd))
totaldice+=np.asarray(classdice)
totalhd+=np.asarray(classhd)
totaliou+=np.asarray(classiou)
totalasd+=np.asarray(classasd)
print(classdice)
print(classiou)
print(classhd)
print(classasd)
dicelist.append(classdice)
if save_result:
nib.save(nib.Nifti1Image(score_map.astype(np.float32), np.eye(4)), test_save_path + id + "_prob.nii.gz")
nib.save(nib.Nifti1Image(prediction.astype(np.float32), np.eye(4)), test_save_path + id + "_pred.nii.gz")
prediction[score_map < 0.7] = 0
nib.save(nib.Nifti1Image(prediction.astype(np.float32), np.eye(4)), test_save_path + id + "_threspred.nii.gz")
nib.save(nib.Nifti1Image(image[:].astype(np.float32), np.eye(4)), test_save_path + id + "_img.nii.gz")
fakelabel=np.zeros_like(label)
fakelabel[:,50,:]=label[:,50,:]
nib.save(nib.Nifti1Image(label[:].astype(np.float32), np.eye(4)), test_save_path + id + "_gt.nii.gz")
nib.save(nib.Nifti1Image(fakelabel[:].astype(np.float32), np.eye(4)), test_save_path + id + "_fakegt.nii.gz")
avg_dice = totaldice / len(image_list)
avg_iou=totaliou/len(image_list)
avg_hd=totalhd/len(image_list)
avg_asd=totalasd/len(image_list)
print('average metric is {},{},{},{}'.format(avg_dice,avg_iou,avg_hd,avg_asd))
print(dicelist)
return avg_dice,avg_iou,avg_hd,avg_asd,dicelist
def test_single_case(net1, image, stride_xy, stride_z, patch_size, num_classes=1):
w, h, d = image.shape
# if the size of image is less than patch_size, then padding it
add_pad = False
if w < patch_size[0]:
w_pad = patch_size[0]-w
add_pad = True
else:
w_pad = 0
if h < patch_size[1]:
h_pad = patch_size[1]-h
add_pad = True
else:
h_pad = 0
if d < patch_size[2]:
d_pad = patch_size[2]-d
add_pad = True
else:
d_pad = 0
wl_pad, wr_pad = w_pad//2,w_pad-w_pad//2
hl_pad, hr_pad = h_pad//2,h_pad-h_pad//2
dl_pad, dr_pad = d_pad//2,d_pad-d_pad//2
if add_pad:
image = np.pad(image, [(wl_pad,wr_pad),(hl_pad,hr_pad), (dl_pad, dr_pad)], mode='constant', constant_values=0)
ww,hh,dd = image.shape
sx = math.ceil((ww - patch_size[0]) / stride_xy) + 1
sy = math.ceil((hh - patch_size[1]) / stride_xy) + 1
sz = math.ceil((dd - patch_size[2]) / stride_z) + 1
print("{}, {}, {}".format(sx, sy, sz))
score_map = np.zeros((num_classes, ) + image.shape).astype(np.float32)
cnt = np.zeros(image.shape).astype(np.float32)
for x in range(0, sx):
xs = min(stride_xy*x, ww-patch_size[0])
for y in range(0, sy):
ys = min(stride_xy * y,hh-patch_size[1])
for z in range(0, sz):
zs = min(stride_z * z, dd-patch_size[2])
test_patch = image[xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]]
test_patch = np.expand_dims(np.expand_dims(test_patch,axis=0),axis=0).astype(np.float32)
test_patch = torch.from_numpy(test_patch).cuda()
y1 = net1(test_patch)
y = F.softmax(y1, dim=1)
y = y.cpu().data.numpy()
y = y[0,:,:,:,:]
score_map[:, xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] \
= score_map[:, xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] + y
cnt[xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] \
= cnt[xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] + 1
score_map = score_map/np.expand_dims(cnt,axis=0)
label_map = np.argmax(score_map, axis = 0)
score_map=np.max(score_map,axis=0)
if add_pad:
label_map = label_map[wl_pad:wl_pad+w,hl_pad:hl_pad+h,dl_pad:dl_pad+d]
score_map = score_map[wl_pad:wl_pad+w,hl_pad:hl_pad+h,dl_pad:dl_pad+d]
#score_map = score_map[:,wl_pad:wl_pad+w,hl_pad:hl_pad+h,dl_pad:dl_pad+d]
return label_map, score_map
def cal_dice(prediction, label, num=2):
total_dice = np.zeros(num-1)
for i in range(1, num):
prediction_tmp = (prediction==i)
label_tmp = (label==i)
prediction_tmp = prediction_tmp.astype(np.float)
label_tmp = label_tmp.astype(np.float)
dice = 2 * np.sum(prediction_tmp * label_tmp) / (np.sum(prediction_tmp) + np.sum(label_tmp))
total_dice[i - 1] += dice
return total_dice
def calculate_metric_percase(pred, gt):
dice = metric.binary.dc(pred, gt)
jc = metric.binary.jc(pred, gt)
hd = metric.binary.hd95(pred, gt)
asd = metric.binary.asd(pred, gt)
return dice, jc, hd, asd
| Python |
3D | HengCai-NJU/3D2DCT | code/test.py | .py | 3,558 | 80 | import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str, default='../data/2018LA_Seg_Training Set/', help='Name of Experiment')
parser.add_argument('--model', type=str, default='UAMT_unlabel', help='model_name')
parser.add_argument('--gpu', type=str, default='4', help='GPU to use')
parser.add_argument('--dataset',type=str,default="la",help='dataset to use')
parser.add_argument('--modeleffe',type=int,default=1,help='model to use')
parser.add_argument('--mid_iterations', type=int, default=6000)
parser.add_argument('--max_iteration', type=int, default=2000)
parser.add_argument('--iteration_step', type=int, default=100)
parser.add_argument('--split', type=str, default='test', help='testlist to use')
parser.add_argument('--min_iteration', type=int, default=100)
FLAGS = parser.parse_args()
import os
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
import torch
from networks.vnet import VNet
from test_util import test_all_case
snapshot_path = "../../../../data/xx/3d2d/model/"+FLAGS.model+"/"
test_save_path = "../../../../data/xx/3d2d/model/prediction/"+FLAGS.model+"_post/"
if not os.path.exists(test_save_path):
os.makedirs(test_save_path)
if FLAGS.dataset=='mmwhs':
num_classes=8
with open('../data/'+FLAGS.split+'_mmwhs.txt', 'r') as f:
image_list = f.readlines()
image_list = ['../../../../data/caiheng/sas/data/MMWHS/'+item.replace('\n', '').split(",")[0]+'.h5' for item in image_list]
def test_calculate_metric(epoch_num):
net1 = VNet(n_channels=1, n_classes=num_classes, normalization='batchnorm', has_dropout=False).cuda()
save_mode_path1 = os.path.join(snapshot_path, 'iter_' + str(epoch_num) + '.pth')
print(save_mode_path1)
#for param in net2.block_one.parameters():
# print(param)
print("init weight from {}".format(save_mode_path1))
net1.eval()
if FLAGS.dataset=='mmwhs':
net1.load_state_dict(torch.load(save_mode_path1), strict=True)
avg_dice,avg_iou,avg_hd,avg_asd,dice_list = test_all_case(net1, image_list, num_classes=num_classes,
patch_size=(192, 192, 96), stride_xy=16, stride_z=4,
save_result=True, test_save_path=test_save_path, dataset=FLAGS.dataset)
return avg_dice,avg_iou,avg_hd,avg_asd,dice_list
if __name__ == '__main__':
maxmetric=0
maxi=-1
path=os.path.join(snapshot_path, 'test.txt')
for i in range(FLAGS.min_iteration,FLAGS.max_iteration+1,FLAGS.iteration_step):
avg_dice,avg_iou,avg_hd,avg_asd,dice_list=test_calculate_metric(i)
strmetric = 'net'+str(FLAGS.modeleffe)+": iter"+str(i)+":\n"+str(avg_dice)+'\n'+str(avg_iou)+'\n'+str(avg_hd)+'\n'+str(avg_asd)+'\n'
with open (path,"a") as f:
f.writelines(strmetric)
if avg_dice[-1]>maxmetric:
maxi=i
maxmetric=avg_dice[-1]
print(maxmetric,"||",maxi)
with open(path, "a") as f:
f.writelines(str(maxi)+'\n')
with open('../data/'+(FLAGS.split).replace('valid','test')+'_mmwhs.txt', 'r') as f:
image_list = f.readlines()
image_list = ['../../../../data/xx/3d2d/data/MMWHS/'+item.replace('\n', '').split(",")[0]+'.h5' for item in image_list]
avg_dice,avg_iou,avg_hd,avg_asd,dice_list=test_calculate_metric(maxi)
strmetric = 'net' + str(FLAGS.modeleffe) + ": iter" + str(maxi) + ":\n" + str(avg_dice) + '\n' + str(
avg_iou) + '\n' + str(avg_hd) + '\n' + str(avg_asd) + '\n'
with open(path, "a") as f:
f.writelines(strmetric) | Python |
3D | HengCai-NJU/3D2DCT | code/run_2d3d.sh | .sh | 292 | 3 | python train_3d2d.py --dataset mmwhs --max_iteration 6000 --exp whs0 --consistency 0.1 --slice_strategy 12 --split 'train0' --quality_bar 0.98 --ht 0.9 --st 0.7
python test.py --gpu 0 --dataset mmwhs --model whs0 --min_iteration 100 --max_iteration 6000 --iteration_step 100 --split 'valid0'
| Shell |
3D | HengCai-NJU/3D2DCT | code/train_3d2d.py | .py | 12,943 | 307 | import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str, default='../data/2018LA_Seg_Training Set/', help='Name of Experiment')
parser.add_argument('--exp', type=str, default='MT31', help='model_name')
parser.add_argument('--dataset', type=str, default='effe', help='dataset to use')
parser.add_argument('--label_num', type=int, default=5, help='number of labeled data')
parser.add_argument('--max_iterations', type=int, default=6000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=4, help='batch_size per gpu')
parser.add_argument('--labeled_bs', type=int, default=1, help='labeled_batch_size per gpu')
parser.add_argument('--base_lr', type=float, default=0.001, help='maximum epoch number to train')
parser.add_argument('--deterministic', type=int, default=1, help='whether use deterministic training')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--sliceseed', type=int, default=0, help='random seed')
parser.add_argument('--gpu', type=str, default='0', help='GPU to use')
parser.add_argument('--split', type=str, default='train', help='datalist to use')
parser.add_argument('--num', type=int, default=16, help='number of labeled volume')
parser.add_argument('--quality_bar', type=float, default=0.98, help='quality bar')
parser.add_argument('--ht', type=float, default=0.9, help='hard threshold')
parser.add_argument('--st', type=float, default=0.7, help='soft threshold')
### costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--slice_strategy', type=int, default=0, help='ema_decay')
parser.add_argument('--consistency_type', type=str, default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float, default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float, default=40.0, help='consistency_rampup')
args = parser.parse_args()
import os
os.environ['CUDA_VISIBLE_DEVICES'] = "0,1,2"
import sys
from tqdm import tqdm
from tensorboardX import SummaryWriter
import shutil
import logging
import time
import random
import numpy as np
import pdb
import torch
import torch.optim as optim
from torchvision import transforms
from torch.nn import DataParallel
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
from torchvision.utils import make_grid
from networks.vnet import VNet
from networks.unet import UNet
gpus=[0,1,2]
from dataloaders import utils
from utils import ramps, losses
from dataloaders.mmwhs import MMWHS_Sparse, MMRandomCrop_Sparse,MMRandomRotFlip,ToTensor, TwoStreamBatchSampler
train_data_path = args.root_path
snapshot_path = "../../../../data/xx/3d2dct/model/" + args.exp + "/"
if args.dataset=='mmwhs':
train_data_path = "../../../../data/caiheng/sas/data/MMWHS"
batch_size = 1
max_iterations = args.max_iterations
base_lr = args.base_lr
labeled_bs = args.labeled_bs
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
if __name__ == "__main__":
## make logger file
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code', shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
if args.deterministic:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
np.random.seed(args.sliceseed)
if args.dataset == 'mmwhs':
num_classes = 8
patch_size = (176, 176, 96)
db_train1 = MMWHS_Sparse(base_dir=train_data_path,
split=args.split,
num=args.num,
slice_strategy=args.slice_strategy,
transform=transforms.Compose([
MMRandomCrop_Sparse(patch_size),
#MMRandomRotFlip(),
ToTensor(),
]))
np.random.seed(args.seed)
def create_3dmodel(ema=False):
# Network definition
net = VNet(n_channels=1, n_classes=num_classes, normalization='batchnorm', has_dropout=True)
model = net.cuda()
return model
def create_2dmodel(ema=False):
# Network definition
net = UNet(n_channels=1, n_classes=num_classes)
model = net.cuda()
return model
model1 = create_3dmodel()
model1=DataParallel(model1,device_ids=gpus,output_device=gpus[0])
model2 = create_2dmodel()
model2=DataParallel(model2,device_ids=gpus,output_device=gpus[0])
model3=create_2dmodel()
model3=DataParallel(model3,device_ids=gpus,output_device=gpus[0])
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
trainloader1 = DataLoader(db_train1, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
model1.train()
model2.train()
model3.train()
#optimizer1 = optim.SGD(model1.parameters(), lr=base_lr, momentum=0.9, weight_decay=0.0001)
optimizer1 = optim.Adam(model1.parameters(), lr=base_lr, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0001)
optimizer2 = optim.Adam(model2.parameters(), lr=base_lr, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0001)
optimizer3 = optim.Adam(model3.parameters(), lr=base_lr, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0001)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} itertations per epoch".format(len(trainloader1)))
iter_num = 0
max_epoch = max_iterations // len(trainloader1) + 1
lr_ = base_lr
for epoch_num in tqdm(range(max_epoch), ncols=70):
time1 = time.time()
for i_batch, sampled_batch1 in enumerate(trainloader1):
time2 = time.time()
# print('fetch data cost {}'.format(time2-time1))
volume_batch1, label_batch1, maskz1 = sampled_batch1['image'], sampled_batch1['label'], sampled_batch1['weight']
maskz1 = maskz1.cuda()
maskzz1 = torch.unsqueeze(maskz1, 1).cuda()
maskzz1 = maskzz1.repeat(1, 1, 1, 1, 1).cuda()
volume_batch1, label_batch1 = volume_batch1.cuda(), label_batch1.cuda()
outputs1 = model1(volume_batch1)
outputs_soft1 = F.softmax(outputs1, dim=1)
volume_batch2 = volume_batch1[0].transpose(0, 3).squeeze().unsqueeze(1) #[1,1,192,192,96]->[96,1,192,192]
#print(torch.sum(outputs_soft1[:,1]>0.5))
outputs2 = model2(volume_batch2) # [96,8,192,192]
outputs_soft2 = F.softmax(outputs2, dim=1)
outputs2 = outputs2.unsqueeze(4).transpose(0, 4) #[1,8,192,192,96]
outputs_soft2 = outputs_soft2.unsqueeze(4).transpose(0, 4)
volume_batch3=volume_batch1[0].transpose(0,2).squeeze().unsqueeze(1)#[192,1,192,96]
outputs3=model3(volume_batch3) #[192,8,192,96]
outputs_soft3=F.softmax(outputs3,dim=1)
outputs3=outputs3.unsqueeze(3).transpose(0,3)
outputs_soft3=outputs_soft3.unsqueeze(3).transpose(0,3)
twodthreshold=0.0
twod1=torch.argmax(outputs_soft2.detach(), dim=1, keepdim=False)
confidence2d1,_=torch.max(outputs_soft2.detach(), dim=1, keepdim=False)
twod2=torch.argmax(outputs_soft3.detach(),dim=1,keepdim=False)
confidence2d2,_=torch.max(outputs_soft3.detach(), dim=1, keepdim=False)
threed = torch.argmax(outputs_soft1.detach(), dim=1, keepdim=False)
confidence3d,_ = torch.max(outputs_soft1.detach(), dim=1, keepdim=False)
threedcorrection=(twod1!=threed)*(confidence3d>confidence2d1)*(confidence3d>confidence2d2)
threedcorrection=~threedcorrection
twodmask=(twod1==twod2)*threedcorrection*(confidence2d1>twodthreshold)*(confidence2d2>twodthreshold)
hardthreedthreshold = args.ht
softthreedthreshold = args.st
threedmask = confidence3d > hardthreedthreshold
twodcorrection1 = (confidence2d1 > confidence3d) * (twod1 != threed)
twodcorrection1 = ~twodcorrection1
threedmask1 = threedmask
twodcorrection2 = (confidence2d2 > confidence3d) * (twod2 != threed)
twodcorrection2 = ~twodcorrection2
threedmask2 = threedmask
consistency_weight = get_current_consistency_weight(iter_num // 150)
# 3d quality verification and good sample selection
print(threed.shape)
print(label_batch1.shape)
print(np.count_nonzero(threed[maskz1==1]==label_batch1[maskz1==1]),np.count_nonzero(maskz1==1))
quality=np.count_nonzero(threed[maskz1==1]==label_batch1[maskz1==1])/np.count_nonzero(maskz1==1)
if quality>args.quality_bar:
threedmask1=confidence3d > softthreedthreshold
threedmask2=confidence3d > softthreedthreshold
## calculate the loss
twod1[maskz1==1]=label_batch1[maskz1==1]
twodmask=consistency_weight*twodmask
twodmask[maskz1==1]=1
loss_seg1 = losses.wce(outputs1, twod1, twodmask, batch_size, patch_size[0], patch_size[1],
patch_size[2])
loss_seg_dice1=losses.multi_dice_loss_weight(outputs_soft1,twod1,twodmask,classnum=7)
supervised_loss1 = 0.5 * (loss_seg1 + loss_seg_dice1)
threed[maskz1 == 1] = label_batch1[maskz1 == 1]
threedmask1 = consistency_weight * threedmask1
threedmask1[maskz1 == 1] = 1
loss_seg2 = losses.wce(outputs2, threed, threedmask1, batch_size, patch_size[0], patch_size[1],
patch_size[2])
loss_seg_dice2= losses.multi_dice_loss_weight(outputs_soft2, threed, threedmask1, classnum=7)
supervised_loss2 = 0.5 * (loss_seg2 + loss_seg_dice2)
threedmask2 = consistency_weight * threedmask2
threedmask2[maskz1 == 1] = 1
loss_seg3 = losses.wce(outputs3, threed, threedmask2, batch_size, patch_size[0], patch_size[1],
patch_size[2])
loss_seg_dice3= losses.multi_dice_loss_weight(outputs_soft3, threed, threedmask2, classnum=7)
supervised_loss3 = 0.5 * (loss_seg3 + loss_seg_dice3)
# total loss
loss = supervised_loss1+supervised_loss2+supervised_loss3
#loss=loss_seg_dice1+loss_seg_dice2
optimizer1.zero_grad()
optimizer2.zero_grad()
optimizer3.zero_grad()
loss.backward()
optimizer1.step()
optimizer2.step()
optimizer3.step()
iter_num = iter_num + 1
writer.add_scalar('lr', lr_, iter_num)
writer.add_scalar('loss/loss', loss, iter_num)
logging.info('iteration %d : 3d loss : %f 2d loss : %f, %f, mask num %d %d %d, quality %f ' %(iter_num,supervised_loss1.item(),supervised_loss2.item(),supervised_loss3.item(),torch.count_nonzero(twodmask).item(),torch.count_nonzero(threedmask1).item(),torch.count_nonzero(threedmask2).item(),quality))
if iter_num % 100 == 0 and iter_num<=6000:
save_mode_path = os.path.join(snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model1.module.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
time1 = time.time()
if iter_num >= max_iterations:
break
save_mode_path = os.path.join(snapshot_path, 'iter_' + str(max_iterations) + '.pth')
torch.save(model1.module.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
writer.close()
| Python |
3D | HengCai-NJU/3D2DCT | code/utils/losses.py | .py | 7,196 | 210 | import torch
from torch.nn import functional as F
import numpy as np
import torch.nn as nn
import torch
from torch.autograd import Variable
def multi_dice_loss_weight(outputs_soft1,twod1,twodmask,classnum=7):
loss_seg_dice=0
for i in range(1,classnum+1):
loss_seg_dice += dice_loss_weight(outputs_soft1[:, i, :, :, :],
twod1[:] == i, twodmask)
return 1/classnum*loss_seg_dice
def dice_loss_weight(score,target,mask):
target = target.float()
smooth = 1e-5
intersect = torch.sum(score * target*mask)
y_sum = torch.sum(target * target*mask)
z_sum = torch.sum(score * score*mask)
loss = (2 * intersect + smooth) / (z_sum + y_sum + smooth)
loss = 1 - loss
return loss
def wce(logits,target,weights,batch_size,H,W,D):
# Calculate log probabilities
logp = F.log_softmax(logits,dim=1)
# Gather log probabilities with respect to target
logp = logp.gather(1, target.view(batch_size, 1, H, W,D))
# Multiply with weights
weighted_logp = (logp * weights).view(batch_size, -1)
# Rescale so that loss is in approx. same interval
#weighted_loss = weighted_logp.sum(1) / weights.view(batch_size, -1).sum(1)
weighted_loss = (weighted_logp.sum(1) - 0.00001) / (weights.view(batch_size, -1).sum(1) + 0.00001)
# Average over mini-batch
weighted_loss = -1.0*weighted_loss.mean()
return weighted_loss
def dice_loss(score, target):
target = target.float()
smooth = 1e-5
intersect = torch.sum(score * target)
y_sum = torch.sum(target * target)
z_sum = torch.sum(score * score)
loss = (2 * intersect + smooth) / (z_sum + y_sum + smooth)
loss = 1 - loss
return loss
def dice_loss1(score, target):
target = target.float()
smooth = 1e-5
intersect = torch.sum(score * target)
y_sum = torch.sum(target)
z_sum = torch.sum(score)
loss = (2 * intersect + smooth) / (z_sum + y_sum + smooth)
loss = 1 - loss
return loss
def entropy_loss(p,C=2):
## p N*C*W*H*D
y1 = -1*torch.sum(p*torch.log(p+1e-6), dim=1)/torch.tensor(np.log(C)).cuda()
ent = torch.mean(y1)
return ent
def softmax_dice_loss(input_logits, target_logits):
"""Takes softmax on both sides and returns MSE loss
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to inputs but not the targets.
"""
assert input_logits.size() == target_logits.size()
input_softmax = F.softmax(input_logits, dim=1)
target_softmax = F.softmax(target_logits, dim=1)
n = input_logits.shape[1]
dice = 0
for i in range(0, n):
dice += dice_loss1(input_softmax[:, i], target_softmax[:, i])
mean_dice = dice / n
return mean_dice
def entropy_loss_map(p, C=2):
ent = -1*torch.sum(p * torch.log(p + 1e-6), dim=1, keepdim=True)/torch.tensor(np.log(C)).cuda()
return ent
def softmax_mse_loss(input_logits, target_logits):
"""Takes softmax on both sides and returns MSE loss
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to inputs but not the targets.
"""
assert input_logits.size() == target_logits.size()
input_softmax = F.softmax(input_logits, dim=1)
target_softmax = F.softmax(target_logits, dim=1)
mse_loss = (input_softmax-target_softmax)**2
return mse_loss
def softmax_kl_loss(input_logits, target_logits):
"""Takes softmax on both sides and returns KL divergence
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to inputs but not the targets.
"""
assert input_logits.size() == target_logits.size()
input_log_softmax = F.log_softmax(input_logits, dim=1)
target_softmax = F.softmax(target_logits, dim=1)
# return F.kl_div(input_log_softmax, target_softmax)
kl_div = F.kl_div(input_log_softmax, target_softmax, reduction='none')
# mean_kl_div = torch.mean(0.2*kl_div[:,0,...]+0.8*kl_div[:,1,...])
return kl_div
def symmetric_mse_loss(input1, input2):
"""Like F.mse_loss but sends gradients to both directions
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to both input1 and input2.
"""
assert input1.size() == input2.size()
return torch.mean((input1 - input2)**2)
def get_probability(logits):
""" Get probability from logits, if the channel of logits is 1 then use sigmoid else use softmax.
:param logits: [N, C, H, W] or [N, C, D, H, W]
:return: prediction and class num
"""
size = logits.size()
# N x 1 x H x W
if size[1] > 1:
pred = F.softmax(logits, dim=1)
nclass = size[1]
else:
pred = F.sigmoid(logits)
pred = torch.cat([1 - pred, pred], 1)
nclass = 2
return pred, nclass
def to_one_hot(tensor, nClasses):
""" Input tensor : Nx1xHxW
:param tensor:
:param nClasses:
:return:
"""
assert tensor.max().item() < nClasses, 'one hot tensor.max() = {} < {}'.format(torch.max(tensor), nClasses)
assert tensor.min().item() >= 0, 'one hot tensor.min() = {} < {}'.format(tensor.min(), 0)
size = list(tensor.size())
assert size[1] == 1
size[1] = nClasses
one_hot = torch.zeros(*size)
if tensor.is_cuda:
one_hot = one_hot.cuda(tensor.device)
one_hot = one_hot.scatter_(1, tensor, 1)
return one_hot
class DiceLoss(nn.Module):
def __init__(self, nclass, class_weights=None, smooth=1e-5):
super(DiceLoss, self).__init__()
self.smooth = smooth
if class_weights is None:
# default weight is all 1
self.class_weights = nn.Parameter(torch.ones((1, nclass)).type(torch.float32), requires_grad=False)
else:
class_weights = np.array(class_weights)
assert nclass == class_weights.shape[0]
self.class_weights = nn.Parameter(torch.tensor(class_weights, dtype=torch.float32), requires_grad=False)
def forward(self, logits, target, mask=None):
size = logits.size()
N, nclass = size[0], size[1]
logits = logits.view(N, nclass, -1)
target = target.view(N, 1, -1)
pred, nclass = get_probability(logits)
# N x C x H x W
pred_one_hot = pred
target_one_hot = to_one_hot(target.type(torch.long), nclass).type(torch.float32)
# N x C x H x W
inter = pred_one_hot * target_one_hot
union = pred_one_hot + target_one_hot
if mask is not None:
mask = mask.view(N, 1, -1)
inter = (inter.view(N, nclass, -1)*mask).sum(2)
union = (union.view(N, nclass, -1)*mask).sum(2)
else:
# N x C
inter = inter.view(N, nclass, -1).sum(2)
union = union.view(N, nclass, -1).sum(2)
# smooth to prevent overfitting
# [https://github.com/pytorch/pytorch/issues/1249]
# NxC
dice = (2 * inter + self.smooth) / (union + self.smooth)
return 1 - dice.mean() | Python |
3D | HengCai-NJU/3D2DCT | code/utils/util.py | .py | 3,449 | 120 | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import pickle
import numpy as np
import torch
from torch.utils.data.sampler import Sampler
import networks
def load_model(path):
"""Loads model and return it without DataParallel table."""
if os.path.isfile(path):
print("=> loading checkpoint '{}'".format(path))
checkpoint = torch.load(path)
# size of the top layer
N = checkpoint['state_dict']['top_layer.bias'].size()
# build skeleton of the model
sob = 'sobel.0.weight' in checkpoint['state_dict'].keys()
model = models.__dict__[checkpoint['arch']](sobel=sob, out=int(N[0]))
# deal with a dataparallel table
def rename_key(key):
if not 'module' in key:
return key
return ''.join(key.split('.module'))
checkpoint['state_dict'] = {rename_key(key): val
for key, val
in checkpoint['state_dict'].items()}
# load weights
model.load_state_dict(checkpoint['state_dict'])
print("Loaded")
else:
model = None
print("=> no checkpoint found at '{}'".format(path))
return model
class UnifLabelSampler(Sampler):
"""Samples elements uniformely accross pseudolabels.
Args:
N (int): size of returned iterator.
images_lists: dict of key (target), value (list of data with this target)
"""
def __init__(self, N, images_lists):
self.N = N
self.images_lists = images_lists
self.indexes = self.generate_indexes_epoch()
def generate_indexes_epoch(self):
size_per_pseudolabel = int(self.N / len(self.images_lists)) + 1
res = np.zeros(size_per_pseudolabel * len(self.images_lists))
for i in range(len(self.images_lists)):
indexes = np.random.choice(
self.images_lists[i],
size_per_pseudolabel,
replace=(len(self.images_lists[i]) <= size_per_pseudolabel)
)
res[i * size_per_pseudolabel: (i + 1) * size_per_pseudolabel] = indexes
np.random.shuffle(res)
return res[:self.N].astype('int')
def __iter__(self):
return iter(self.indexes)
def __len__(self):
return self.N
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def learning_rate_decay(optimizer, t, lr_0):
for param_group in optimizer.param_groups:
lr = lr_0 / np.sqrt(1 + lr_0 * param_group['weight_decay'] * t)
param_group['lr'] = lr
class Logger():
""" Class to update every epoch to keep trace of the results
Methods:
- log() log and save
"""
def __init__(self, path):
self.path = path
self.data = []
def log(self, train_point):
self.data.append(train_point)
with open(os.path.join(self.path), 'wb') as fp:
pickle.dump(self.data, fp, -1)
| Python |
3D | HengCai-NJU/3D2DCT | code/utils/ramps.py | .py | 1,319 | 42 | # Copyright (c) 2018, Curious AI Ltd. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Functions for ramping hyperparameters up or down
Each function takes the current training step or epoch, and the
ramp length in the same format, and returns a multiplier between
0 and 1.
"""
import numpy as np
def sigmoid_rampup(current, rampup_length):
"""Exponential rampup from https://arxiv.org/abs/1610.02242"""
if rampup_length == 0:
return 1.0
else:
current = np.clip(current, 0.0, rampup_length)
phase = 1.0 - current / rampup_length
return float(np.exp(-5.0 * phase * phase))
def linear_rampup(current, rampup_length):
"""Linear rampup"""
assert current >= 0 and rampup_length >= 0
if current >= rampup_length:
return 1.0
else:
return current / rampup_length
def cosine_rampdown(current, rampdown_length):
"""Cosine rampdown from https://arxiv.org/abs/1608.03983"""
assert 0 <= current <= rampdown_length
return float(.5 * (np.cos(np.pi * current / rampdown_length) + 1))
| Python |
3D | HengCai-NJU/3D2DCT | code/networks/unetr.py | .py | 8,715 | 223 | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple, Union
import torch
import torch.nn as nn
from monai.networks.blocks.dynunet_block import UnetOutBlock
from monai.networks.blocks import UnetrBasicBlock, UnetrPrUpBlock, UnetrUpBlock
from monai.networks.nets import ViT
class UNETR(nn.Module):
"""
UNETR based on: "Hatamizadeh et al.,
UNETR: Transformers for 3D Medical Image Segmentation <https://arxiv.org/abs/2103.10504>"
"""
def __init__(
self,
in_channels: int,
out_channels: int,
img_size: Tuple[int, int, int],
feature_size: int = 16,
hidden_size: int = 768,
mlp_dim: int = 3072,
num_heads: int = 12,
pos_embed: str = "perceptron",
norm_name: Union[Tuple, str] = "instance",
conv_block: bool = False,
res_block: bool = True,
dropout_rate: float = 0.0,
) -> None:
"""
Args:
in_channels: dimension of input channels.
out_channels: dimension of output channels.
img_size: dimension of input image.
feature_size: dimension of network feature size.
hidden_size: dimension of hidden layer.
mlp_dim: dimension of feedforward layer.
num_heads: number of attention heads.
pos_embed: position embedding layer type.
norm_name: feature normalization type and arguments.
conv_block: bool argument to determine if convolutional block is used.
res_block: bool argument to determine if residual block is used.
dropout_rate: faction of the input units to drop.
Examples::
# for single channel input 4-channel output with patch size of (96,96,96), feature size of 32 and batch norm
>>> net = UNETR(in_channels=1, out_channels=4, img_size=(96,96,96), feature_size=32, norm_name='batch')
# for 4-channel input 3-channel output with patch size of (128,128,128), conv position embedding and instance norm
>>> net = UNETR(in_channels=4, out_channels=3, img_size=(128,128,128), pos_embed='conv', norm_name='instance')
"""
super().__init__()
if not (0 <= dropout_rate <= 1):
raise AssertionError("dropout_rate should be between 0 and 1.")
if hidden_size % num_heads != 0:
raise AssertionError("hidden size should be divisible by num_heads.")
if pos_embed not in ["conv", "perceptron"]:
raise KeyError(f"Position embedding layer of type {pos_embed} is not supported.")
self.num_layers = 12
self.patch_size = (16, 16, 16)
self.feat_size = (
img_size[0] // self.patch_size[0],
img_size[1] // self.patch_size[1],
img_size[2] // self.patch_size[2],
)
self.hidden_size = hidden_size
self.classification = False
self.vit = ViT(
in_channels=in_channels,
img_size=img_size,
patch_size=self.patch_size,
hidden_size=hidden_size,
mlp_dim=mlp_dim,
num_layers=self.num_layers,
num_heads=num_heads,
pos_embed=pos_embed,
classification=self.classification,
dropout_rate=dropout_rate,
)
self.encoder1 = UnetrBasicBlock(
spatial_dims=3,
in_channels=in_channels,
out_channels=feature_size,
kernel_size=3,
stride=1,
norm_name=norm_name,
res_block=res_block,
)
self.encoder2 = UnetrPrUpBlock(
spatial_dims=3,
in_channels=hidden_size,
out_channels=feature_size * 2,
num_layer=2,
kernel_size=3,
stride=1,
upsample_kernel_size=2,
norm_name=norm_name,
conv_block=conv_block,
res_block=res_block,
)
self.encoder3 = UnetrPrUpBlock(
spatial_dims=3,
in_channels=hidden_size,
out_channels=feature_size * 4,
num_layer=1,
kernel_size=3,
stride=1,
upsample_kernel_size=2,
norm_name=norm_name,
conv_block=conv_block,
res_block=res_block,
)
self.encoder4 = UnetrPrUpBlock(
spatial_dims=3,
in_channels=hidden_size,
out_channels=feature_size * 8,
num_layer=0,
kernel_size=3,
stride=1,
upsample_kernel_size=2,
norm_name=norm_name,
conv_block=conv_block,
res_block=res_block,
)
self.decoder5 = UnetrUpBlock(
spatial_dims=3,
in_channels=hidden_size,
out_channels=feature_size * 8,
kernel_size=3,
upsample_kernel_size=2,
norm_name=norm_name,
res_block=res_block,
)
self.decoder4 = UnetrUpBlock(
spatial_dims=3,
in_channels=feature_size * 8,
out_channels=feature_size * 4,
kernel_size=3,
upsample_kernel_size=2,
norm_name=norm_name,
res_block=res_block,
)
self.decoder3 = UnetrUpBlock(
spatial_dims=3,
in_channels=feature_size * 4,
out_channels=feature_size * 2,
kernel_size=3,
upsample_kernel_size=2,
norm_name=norm_name,
res_block=res_block,
)
self.decoder2 = UnetrUpBlock(
spatial_dims=3,
in_channels=feature_size * 2,
out_channels=feature_size,
kernel_size=3,
upsample_kernel_size=2,
norm_name=norm_name,
res_block=res_block,
)
self.out = UnetOutBlock(spatial_dims=3, in_channels=feature_size, out_channels=out_channels) # type: ignore
def proj_feat(self, x, hidden_size, feat_size):
x = x.view(x.size(0), feat_size[0], feat_size[1], feat_size[2], hidden_size)
x = x.permute(0, 4, 1, 2, 3).contiguous()
return x
def load_from(self, weights):
with torch.no_grad():
res_weight = weights
# copy weights from patch embedding
for i in weights['state_dict']:
print(i)
self.vit.patch_embedding.position_embeddings.copy_(weights['state_dict']['module.transformer.patch_embedding.position_embeddings_3d'])
self.vit.patch_embedding.cls_token.copy_(weights['state_dict']['module.transformer.patch_embedding.cls_token'])
self.vit.patch_embedding.patch_embeddings[1].weight.copy_(weights['state_dict']['module.transformer.patch_embedding.patch_embeddings.1.weight'])
self.vit.patch_embedding.patch_embeddings[1].bias.copy_(weights['state_dict']['module.transformer.patch_embedding.patch_embeddings.1.bias'])
# copy weights from encoding blocks (default: num of blocks: 12)
for bname, block in self.vit.blocks.named_children():
print(block)
block.loadFrom(weights, n_block=bname)
# last norm layer of transformer
self.vit.norm.weight.copy_(weights['state_dict']['module.transformer.norm.weight'])
self.vit.norm.bias.copy_(weights['state_dict']['module.transformer.norm.bias'])
def forward(self, x_in):
x, hidden_states_out = self.vit(x_in)
enc1 = self.encoder1(x_in)
x2 = hidden_states_out[3]
enc2 = self.encoder2(self.proj_feat(x2, self.hidden_size, self.feat_size))
x3 = hidden_states_out[6]
enc3 = self.encoder3(self.proj_feat(x3, self.hidden_size, self.feat_size))
x4 = hidden_states_out[9]
enc4 = self.encoder4(self.proj_feat(x4, self.hidden_size, self.feat_size))
dec4 = self.proj_feat(x, self.hidden_size, self.feat_size)
dec3 = self.decoder5(dec4, enc4)
dec2 = self.decoder4(dec3, enc3)
dec1 = self.decoder3(dec2, enc2)
out = self.decoder2(dec1, enc1)
logits = self.out(out)
return logits
| Python |
3D | HengCai-NJU/3D2DCT | code/networks/unet.py | .py | 3,366 | 106 | import torch
import torch.nn as nn
import torch.nn.functional as F
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
"""Downscaling with maxpool then double conv"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = torch.tensor([x2.size()[2] - x1.size()[2]])
diffX = torch.tensor([x2.size()[3] - x1.size()[3]])
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
# if you have padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
class UNet(nn.Module):
def __init__(self, n_channels, n_classes, bilinear=False):
super(UNet, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
self.bilinear = bilinear
self.inc = DoubleConv(n_channels, 64)
self.down1 = Down(64, 128)
self.down2 = Down(128, 256)
self.down3 = Down(256, 512)
self.down4 = Down(512, 1024)
self.up1 = Up(1024, 512, bilinear)
self.up2 = Up(512, 256, bilinear)
self.up3 = Up(256, 128, bilinear)
self.up4 = Up(128, 64, bilinear)
self.outc = OutConv(64, n_classes)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
logits = self.outc(x)
return logits
| Python |
3D | HengCai-NJU/3D2DCT | code/networks/crn.py | .py | 9,450 | 259 | import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
class ConvBlock(nn.Module):
def __init__(self, n_stages, n_filters_in, n_filters_out, normalization='none'):
super(ConvBlock, self).__init__()
ops = []
for i in range(n_stages):
if i==0:
input_channel = n_filters_in
else:
input_channel = n_filters_out
ops.append(nn.Conv3d(input_channel, n_filters_out, 3, padding=1))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
elif normalization != 'none':
assert False
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
class ResidualConvBlock(nn.Module):
def __init__(self, n_stages, n_filters_in, n_filters_out, normalization='none'):
super(ResidualConvBlock, self).__init__()
ops = []
for i in range(n_stages):
if i == 0:
input_channel = n_filters_in
else:
input_channel = n_filters_out
ops.append(nn.Conv3d(input_channel, n_filters_out, 3, padding=1))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
elif normalization != 'none':
assert False
if i != n_stages-1:
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = (self.conv(x) + x)
x = self.relu(x)
return x
class DownsamplingConvBlock(nn.Module):
def __init__(self, n_filters_in, n_filters_out, stride=2, normalization='none'):
super(DownsamplingConvBlock, self).__init__()
ops = []
if normalization != 'none':
ops.append(nn.Conv3d(n_filters_in, n_filters_out, stride, padding=0, stride=stride))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
else:
assert False
else:
ops.append(nn.Conv3d(n_filters_in, n_filters_out, stride, padding=0, stride=stride))
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
class UpsamplingDeconvBlock(nn.Module):
def __init__(self, n_filters_in, n_filters_out, stride=2, normalization='none'):
super(UpsamplingDeconvBlock, self).__init__()
ops = []
if normalization != 'none':
ops.append(nn.ConvTranspose3d(n_filters_in, n_filters_out, stride, padding=0, stride=stride))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
else:
assert False
else:
ops.append(nn.ConvTranspose3d(n_filters_in, n_filters_out, stride, padding=0, stride=stride))
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
class Upsampling(nn.Module):
def __init__(self, n_filters_in, n_filters_out, stride=2, normalization='none'):
super(Upsampling, self).__init__()
ops = []
ops.append(nn.Upsample(scale_factor=stride, mode='trilinear',align_corners=False))
ops.append(nn.Conv3d(n_filters_in, n_filters_out, kernel_size=3, padding=1))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
elif normalization != 'none':
assert False
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
class crnVNet(nn.Module):
def __init__(self, n_channels=1, n_classes=2, n_filters=16, normalization='none', has_dropout=False):
super(crnVNet, self).__init__()
self.has_dropout = has_dropout
self.block_one = ConvBlock(1, n_channels, n_filters, normalization=normalization)
self.block_one_dw = DownsamplingConvBlock(n_filters, 2 * n_filters, normalization=normalization)
self.block_two = ConvBlock(2, n_filters * 2, n_filters * 2, normalization=normalization)
self.block_two_dw = DownsamplingConvBlock(n_filters * 2, n_filters * 4, normalization=normalization)
self.block_three = ConvBlock(3, n_filters * 4, n_filters * 4, normalization=normalization)
self.block_three_dw = DownsamplingConvBlock(n_filters * 4, n_filters * 8, normalization=normalization)
self.block_four = ConvBlock(3, n_filters * 8, n_filters * 8, normalization=normalization)
self.block_four_dw = DownsamplingConvBlock(n_filters * 8, n_filters * 16, normalization=normalization)
self.block_five = ConvBlock(3, n_filters * 16, n_filters * 16, normalization=normalization)
self.block_five_up = UpsamplingDeconvBlock(n_filters * 16, n_filters * 8, normalization=normalization)
self.block_six = ConvBlock(3, n_filters * 8, n_filters * 8, normalization=normalization)
self.block_six_up = UpsamplingDeconvBlock(n_filters * 8, n_filters * 4, normalization=normalization)
self.block_seven = ConvBlock(3, n_filters * 4, n_filters * 4, normalization=normalization)
self.block_seven_up = UpsamplingDeconvBlock(n_filters * 4, n_filters * 2, normalization=normalization)
self.block_eight = ConvBlock(2, n_filters * 2, n_filters * 2, normalization=normalization)
self.block_eight_up = UpsamplingDeconvBlock(n_filters * 2, n_filters, normalization=normalization)
self.branchs = nn.ModuleList()
for i in range(3):
if has_dropout:
seq = nn.Sequential(
ConvBlock(1, n_filters, n_filters, normalization=normalization),
nn.Dropout3d(p=0.5, inplace=False),
nn.Conv3d(n_filters, n_classes, 1, padding=0)
)
else:
seq = nn.Sequential(
ConvBlock(1, n_filters, n_filters, normalization=normalization),
nn.Conv3d(n_filters, n_classes, 1, padding=0)
)
self.branchs.append(seq)
# self.block_nine =
# self.out_conv =
# self.__init_weight()
def encoder(self, input):
x1 = self.block_one(input)
x1_dw = self.block_one_dw(x1)
x2 = self.block_two(x1_dw)
x2_dw = self.block_two_dw(x2)
x3 = self.block_three(x2_dw)
x3_dw = self.block_three_dw(x3)
x4 = self.block_four(x3_dw)
x4_dw = self.block_four_dw(x4)
x5 = self.block_five(x4_dw)
# x5 = F.dropout3d(x5, p=0.5, training=True)
if self.has_dropout:
#x5 = self.dropout(x5)
x5 = F.dropout3d(x5, p=0.5, training=True)
res = [x1, x2, x3, x4, x5]
return res
def decoder(self, features):
x1 = features[0]
x2 = features[1]
x3 = features[2]
x4 = features[3]
x5 = features[4]
x5_up = self.block_five_up(x5)
x5_up = x5_up + x4
x6 = self.block_six(x5_up)
x6_up = self.block_six_up(x6)
x6_up = x6_up + x3
x7 = self.block_seven(x6_up)
x7_up = self.block_seven_up(x7)
x7_up = x7_up + x2
x8 = self.block_eight(x7_up)
x8_up = self.block_eight_up(x8)
x8_up = x8_up + x1
out = []
for branch in self.branchs:
o = branch(x8_up)
out.append(o)
# x9 = self.block_nine(x8_up)
# # x9 = F.dropout3d(x9, p=0.5, training=True)
# if self.has_dropout:
# x9 = self.dropout(x9)
# out = self.out_conv(x9)
return out
def forward(self, input, turnoff_drop=False):
if turnoff_drop:
has_dropout = self.has_dropout
self.has_dropout = False
features = self.encoder(input)
out = self.decoder(features)
if turnoff_drop:
self.has_dropout = has_dropout
return out | Python |
3D | HengCai-NJU/3D2DCT | code/networks/vnet.py | .py | 13,105 | 352 | import torch
from torch import nn
import torch.nn.functional as F
class ConvBlock(nn.Module):
def __init__(self, n_stages, n_filters_in, n_filters_out, normalization='none'):
super(ConvBlock, self).__init__()
ops = []
for i in range(n_stages):
if i==0:
input_channel = n_filters_in
else:
input_channel = n_filters_out
ops.append(nn.Conv3d(input_channel, n_filters_out, 3, padding=1))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
elif normalization != 'none':
assert False
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
class ResidualConvBlock(nn.Module):
def __init__(self, n_stages, n_filters_in, n_filters_out, normalization='none'):
super(ResidualConvBlock, self).__init__()
ops = []
for i in range(n_stages):
if i == 0:
input_channel = n_filters_in
else:
input_channel = n_filters_out
ops.append(nn.Conv3d(input_channel, n_filters_out, 3, padding=1))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
elif normalization != 'none':
assert False
if i != n_stages-1:
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = (self.conv(x) + x)
x = self.relu(x)
return x
class DownsamplingConvBlock(nn.Module):
def __init__(self, n_filters_in, n_filters_out, stride=2, normalization='none'):
super(DownsamplingConvBlock, self).__init__()
ops = []
if normalization != 'none':
ops.append(nn.Conv3d(n_filters_in, n_filters_out, stride, padding=0, stride=stride))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
else:
assert False
else:
ops.append(nn.Conv3d(n_filters_in, n_filters_out, stride, padding=0, stride=stride))
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
class UpsamplingDeconvBlock(nn.Module):
def __init__(self, n_filters_in, n_filters_out, stride=2, normalization='none'):
super(UpsamplingDeconvBlock, self).__init__()
ops = []
if normalization != 'none':
ops.append(nn.ConvTranspose3d(n_filters_in, n_filters_out, stride, padding=0, stride=stride))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
else:
assert False
else:
ops.append(nn.ConvTranspose3d(n_filters_in, n_filters_out, stride, padding=0, stride=stride))
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
class Upsampling(nn.Module):
def __init__(self, n_filters_in, n_filters_out, stride=2, normalization='none'):
super(Upsampling, self).__init__()
ops = []
ops.append(nn.Upsample(scale_factor=stride, mode='trilinear',align_corners=False))
ops.append(nn.Conv3d(n_filters_in, n_filters_out, kernel_size=3, padding=1))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
elif normalization != 'none':
assert False
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
class VNet(nn.Module):
def __init__(self, n_channels=3, n_classes=2, n_filters=16, normalization='none', has_dropout=False):
super(VNet, self).__init__()
self.has_dropout = has_dropout
self.block_one = ConvBlock(1, n_channels, n_filters, normalization=normalization)
self.block_one_dw = DownsamplingConvBlock(n_filters, 2 * n_filters, normalization=normalization)
self.block_two = ConvBlock(2, n_filters * 2, n_filters * 2, normalization=normalization)
self.block_two_dw = DownsamplingConvBlock(n_filters * 2, n_filters * 4, normalization=normalization)
self.block_three = ConvBlock(3, n_filters * 4, n_filters * 4, normalization=normalization)
self.block_three_dw = DownsamplingConvBlock(n_filters * 4, n_filters * 8, normalization=normalization)
self.block_four = ConvBlock(3, n_filters * 8, n_filters * 8, normalization=normalization)
self.block_four_dw = DownsamplingConvBlock(n_filters * 8, n_filters * 16, normalization=normalization)
self.block_five = ConvBlock(3, n_filters * 16, n_filters * 16, normalization=normalization)
self.block_five_up = UpsamplingDeconvBlock(n_filters * 16, n_filters * 8, normalization=normalization)
self.block_six = ConvBlock(3, n_filters * 8, n_filters * 8, normalization=normalization)
self.block_six_up = UpsamplingDeconvBlock(n_filters * 8, n_filters * 4, normalization=normalization)
self.block_seven = ConvBlock(3, n_filters * 4, n_filters * 4, normalization=normalization)
self.block_seven_up = UpsamplingDeconvBlock(n_filters * 4, n_filters * 2, normalization=normalization)
self.block_eight = ConvBlock(2, n_filters * 2, n_filters * 2, normalization=normalization)
self.block_eight_up = UpsamplingDeconvBlock(n_filters * 2, n_filters, normalization=normalization)
self.block_nine = ConvBlock(1, n_filters, n_filters, normalization=normalization)
self.out_conv = nn.Conv3d(n_filters, n_classes, 1, padding=0)
self.dropout = nn.Dropout3d(p=0.5, inplace=False)
# self.__init_weight()
def encoder(self, input):
x1 = self.block_one(input)
x1_dw = self.block_one_dw(x1)
x2 = self.block_two(x1_dw)
x2_dw = self.block_two_dw(x2)
x3 = self.block_three(x2_dw)
x3_dw = self.block_three_dw(x3)
x4 = self.block_four(x3_dw)
x4_dw = self.block_four_dw(x4)
x5 = self.block_five(x4_dw)
# x5 = F.dropout3d(x5, p=0.5, training=True)
if self.has_dropout:
x5 = self.dropout(x5)
res = [x1, x2, x3, x4, x5]
return res
def decoder(self, features):
x1 = features[0]
x2 = features[1]
x3 = features[2]
x4 = features[3]
x5 = features[4]
x5_up = self.block_five_up(x5)
x5_up = x5_up + x4
x6 = self.block_six(x5_up)
x6_up = self.block_six_up(x6)
x6_up = x6_up + x3
x7 = self.block_seven(x6_up)
x7_up = self.block_seven_up(x7)
x7_up = x7_up + x2
x8 = self.block_eight(x7_up)
x8_up = self.block_eight_up(x8)
x8_up = x8_up + x1
x9 = self.block_nine(x8_up)
# x9 = F.dropout3d(x9, p=0.5, training=True)
if self.has_dropout:
x9 = self.dropout(x9)
out = self.out_conv(x9)
return out
def forward(self, input, turnoff_drop=False):
if turnoff_drop:
has_dropout = self.has_dropout
self.has_dropout = False
features = self.encoder(input)
out = self.decoder(features)
if turnoff_drop:
self.has_dropout = has_dropout
return out
# def __init_weight(self):
# for m in self.modules():
# if isinstance(m, nn.Conv3d):
# torch.nn.init.kaiming_normal_(m.weight)
# elif isinstance(m, nn.BatchNorm3d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
class VNet_coranet(nn.Module):
def __init__(self, n_channels=3, n_classes=2, n_filters=16, normalization='none', has_dropout=False):
super(VNet_coranet, self).__init__()
self.has_dropout = has_dropout
self.block_one = ConvBlock(1, n_channels, n_filters, normalization=normalization)
self.block_one_dw = DownsamplingConvBlock(n_filters, 2 * n_filters, normalization=normalization)
self.block_two = ConvBlock(2, n_filters * 2, n_filters * 2, normalization=normalization)
self.block_two_dw = DownsamplingConvBlock(n_filters * 2, n_filters * 4, normalization=normalization)
self.block_three = ConvBlock(3, n_filters * 4, n_filters * 4, normalization=normalization)
self.block_three_dw = DownsamplingConvBlock(n_filters * 4, n_filters * 8, normalization=normalization)
self.block_four = ConvBlock(3, n_filters * 8, n_filters * 8, normalization=normalization)
self.block_four_dw = DownsamplingConvBlock(n_filters * 8, n_filters * 16, normalization=normalization)
self.block_five = ConvBlock(3, n_filters * 16, n_filters * 16, normalization=normalization)
self.block_five_up = UpsamplingDeconvBlock(n_filters * 16, n_filters * 8, normalization=normalization)
self.block_six = ConvBlock(3, n_filters * 8, n_filters * 8, normalization=normalization)
self.block_six_up = UpsamplingDeconvBlock(n_filters * 8, n_filters * 4, normalization=normalization)
self.block_seven = ConvBlock(3, n_filters * 4, n_filters * 4, normalization=normalization)
self.block_seven_up = UpsamplingDeconvBlock(n_filters * 4, n_filters * 2, normalization=normalization)
self.block_eight = ConvBlock(2, n_filters * 2, n_filters * 2, normalization=normalization)
self.block_eight_up = UpsamplingDeconvBlock(n_filters * 2, n_filters, normalization=normalization)
self.block_nine = ConvBlock(1, n_filters, n_filters, normalization=normalization)
self.out_conv = nn.Conv3d(n_filters, n_classes, 1, padding=0)
self.dropout = nn.Dropout3d(p=0.5, inplace=False)
# self.__init_weight()
def encoder(self, input):
x1 = self.block_one(input)
x1_dw = self.block_one_dw(x1)
x2 = self.block_two(x1_dw)
x2_dw = self.block_two_dw(x2)
x3 = self.block_three(x2_dw)
x3_dw = self.block_three_dw(x3)
x4 = self.block_four(x3_dw)
x4_dw = self.block_four_dw(x4)
x5 = self.block_five(x4_dw)
# x5 = F.dropout3d(x5, p=0.5, training=True)
if self.has_dropout:
x5 = self.dropout(x5)
res = [x1, x2, x3, x4, x5]
return res
def decoder(self, features):
x1 = features[0]
x2 = features[1]
x3 = features[2]
x4 = features[3]
x5 = features[4]
x5_up = self.block_five_up(x5)
x5_up = x5_up + x4
x6 = self.block_six(x5_up)
x6_up = self.block_six_up(x6)
x6_up = x6_up + x3
x7 = self.block_seven(x6_up)
x7_up = self.block_seven_up(x7)
x7_up = x7_up + x2
x8 = self.block_eight(x7_up)
x8_up = self.block_eight_up(x8)
x8_up = x8_up + x1
x9 = self.block_nine(x8_up)
# x9 = F.dropout3d(x9, p=0.5, training=True)
if self.has_dropout:
x9 = self.dropout(x9)
out = self.out_conv(x9)
return out
def forward(self, input, turnoff_drop=False):
if turnoff_drop:
has_dropout = self.has_dropout
self.has_dropout = False
features = self.encoder(input)
out = self.decoder(features)
if turnoff_drop:
self.has_dropout = has_dropout
return out
# def __init_weight(self):
# for m in self.modules():
# if isinstance(m, nn.Conv3d):
# torch.nn.init.kaiming_normal_(m.weight)
# elif isinstance(m, nn.BatchNorm3d):
# m.weight.data.fill_(1)
# m.bias.data.zero_() | Python |
3D | HengCai-NJU/3D2DCT | code/dataloaders/utils.py | .py | 6,729 | 215 | import os
import torch
import numpy as np
import torch.nn as nn
import matplotlib.pyplot as plt
from skimage import measure
import scipy.ndimage as nd
def recursive_glob(rootdir='.', suffix=''):
"""Performs recursive glob with given suffix and rootdir
:param rootdir is the root directory
:param suffix is the suffix to be searched
"""
return [os.path.join(looproot, filename)
for looproot, _, filenames in os.walk(rootdir)
for filename in filenames if filename.endswith(suffix)]
def get_cityscapes_labels():
return np.array([
# [ 0, 0, 0],
[128, 64, 128],
[244, 35, 232],
[70, 70, 70],
[102, 102, 156],
[190, 153, 153],
[153, 153, 153],
[250, 170, 30],
[220, 220, 0],
[107, 142, 35],
[152, 251, 152],
[0, 130, 180],
[220, 20, 60],
[255, 0, 0],
[0, 0, 142],
[0, 0, 70],
[0, 60, 100],
[0, 80, 100],
[0, 0, 230],
[119, 11, 32]])
def get_pascal_labels():
"""Load the mapping that associates pascal classes with label colors
Returns:
np.ndarray with dimensions (21, 3)
"""
return np.asarray([[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],
[0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],
[64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0],
[64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128],
[0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0],
[0, 64, 128]])
def encode_segmap(mask):
"""Encode segmentation label images as pascal classes
Args:
mask (np.ndarray): raw segmentation label image of dimension
(M, N, 3), in which the Pascal classes are encoded as colours.
Returns:
(np.ndarray): class map with dimensions (M,N), where the value at
a given location is the integer denoting the class index.
"""
mask = mask.astype(int)
label_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.int16)
for ii, label in enumerate(get_pascal_labels()):
label_mask[np.where(np.all(mask == label, axis=-1))[:2]] = ii
label_mask = label_mask.astype(int)
return label_mask
def decode_seg_map_sequence(label_masks, dataset='pascal'):
rgb_masks = []
for label_mask in label_masks:
rgb_mask = decode_segmap(label_mask, dataset)
rgb_masks.append(rgb_mask)
rgb_masks = torch.from_numpy(np.array(rgb_masks).transpose([0, 3, 1, 2]))
return rgb_masks
def decode_segmap(label_mask, dataset, plot=False):
"""Decode segmentation class labels into a color image
Args:
label_mask (np.ndarray): an (M,N) array of integer values denoting
the class label at each spatial location.
plot (bool, optional): whether to show the resulting color image
in a figure.
Returns:
(np.ndarray, optional): the resulting decoded color image.
"""
if dataset == 'pascal':
n_classes = 21
label_colours = get_pascal_labels()
elif dataset == 'cityscapes':
n_classes = 19
label_colours = get_cityscapes_labels()
else:
raise NotImplementedError
r = label_mask.copy()
g = label_mask.copy()
b = label_mask.copy()
for ll in range(0, n_classes):
r[label_mask == ll] = label_colours[ll, 0]
g[label_mask == ll] = label_colours[ll, 1]
b[label_mask == ll] = label_colours[ll, 2]
rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3))
rgb[:, :, 0] = r / 255.0
rgb[:, :, 1] = g / 255.0
rgb[:, :, 2] = b / 255.0
if plot:
plt.imshow(rgb)
plt.show()
else:
return rgb
def generate_param_report(logfile, param):
log_file = open(logfile, 'w')
# for key, val in param.items():
# log_file.write(key + ':' + str(val) + '\n')
log_file.write(str(param))
log_file.close()
def cross_entropy2d(logit, target, ignore_index=255, weight=None, size_average=True, batch_average=True):
n, c, h, w = logit.size()
# logit = logit.permute(0, 2, 3, 1)
target = target.squeeze(1)
if weight is None:
criterion = nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index, size_average=False)
else:
criterion = nn.CrossEntropyLoss(weight=torch.from_numpy(np.array(weight)).float().cuda(), ignore_index=ignore_index, size_average=False)
loss = criterion(logit, target.long())
if size_average:
loss /= (h * w)
if batch_average:
loss /= n
return loss
def lr_poly(base_lr, iter_, max_iter=100, power=0.9):
return base_lr * ((1 - float(iter_) / max_iter) ** power)
def get_iou(pred, gt, n_classes=21):
total_iou = 0.0
for i in range(len(pred)):
pred_tmp = pred[i]
gt_tmp = gt[i]
intersect = [0] * n_classes
union = [0] * n_classes
for j in range(n_classes):
match = (pred_tmp == j) + (gt_tmp == j)
it = torch.sum(match == 2).item()
un = torch.sum(match > 0).item()
intersect[j] += it
union[j] += un
iou = []
for k in range(n_classes):
if union[k] == 0:
continue
iou.append(intersect[k] / union[k])
img_iou = (sum(iou) / len(iou))
total_iou += img_iou
return total_iou
def get_dice(pred, gt):
total_dice = 0.0
pred = pred.long()
gt = gt.long()
for i in range(len(pred)):
pred_tmp = pred[i]
gt_tmp = gt[i]
dice = 2.0*torch.sum(pred_tmp*gt_tmp).item()/(1.0+torch.sum(pred_tmp**2)+torch.sum(gt_tmp**2)).item()
print(dice)
total_dice += dice
return total_dice
def get_mc_dice(pred, gt, num=2):
# num is the total number of classes, include the background
total_dice = np.zeros(num-1)
pred = pred.long()
gt = gt.long()
for i in range(len(pred)):
for j in range(1, num):
pred_tmp = (pred[i]==j)
gt_tmp = (gt[i]==j)
dice = 2.0*torch.sum(pred_tmp*gt_tmp).item()/(1.0+torch.sum(pred_tmp**2)+torch.sum(gt_tmp**2)).item()
total_dice[j-1] +=dice
return total_dice
def post_processing(prediction):
prediction = nd.binary_fill_holes(prediction)
label_cc, num_cc = measure.label(prediction,return_num=True)
total_cc = np.sum(prediction)
measure.regionprops(label_cc)
for cc in range(1,num_cc+1):
single_cc = (label_cc==cc)
single_vol = np.sum(single_cc)
if single_vol/total_cc<0.2:
prediction[single_cc]=0
return prediction
| Python |
3D | HengCai-NJU/3D2DCT | code/dataloaders/mmwhs_preprocessing.py | .py | 11,368 | 288 | import nibabel as nib
import numpy as np
import matplotlib.pyplot as plt
import SimpleITK as sitk
import os
from skimage import morphology, exposure
import scipy
from scipy import ndimage
import h5py
import torch
v = "v1"
class CFG:
do_windowing = True
window_width = 2000 # -160
window_center = 350 # 240
do_background_cropping = True
cropping_width = 0.45
cropping_center = 0.5
do_cropping = True
do_mask_cropping = True
do_spacing = False
target_spacing = [1, 1, 1]
do_reshape = True
new_size = [192, 192, 96] # v1
# windowing
def transform_ctdata(image, windowWidth, windowCenter, normal=False):
minWindow = float(windowCenter) - 0.5 * float(windowWidth)
newimg = (image - minWindow) / float(windowWidth)
newimg[newimg < 0] = 0
newimg[newimg > 1] = 1
if not normal:
newimg = (newimg * 255).astype('uint8')
return newimg
# background removing
def image_background_segmentation(img, WW=40, WL=80):
# print(img.shape) # 512*512
# Calculate the outside values by hand (again)
lB = WW - WL
uB = WW + WL
# Keep only values inside of the window
background_seperation = np.logical_and(img > lB, img < uB) # (-40, 120)
# print(background_seperation.shape) # 512*512
background_seperation = morphology.dilation(background_seperation, np.ones((5, 5)))
labels, label_nb = scipy.ndimage.label(background_seperation)
label_count = np.bincount(labels.ravel().astype(np.int))
# discard the 0 label
label_count[0] = 0
mask = labels == label_count.argmax() # find the most frequency number mask
mask = morphology.dilation(mask, np.ones((4, 4))) # dilate the mask for less fuzzy edges
mask = scipy.ndimage.morphology.binary_fill_holes(mask)
mask = morphology.dilation(mask, np.ones((3, 3))) # dilate the mask again
return mask, mask * img
# cropping
def crop(mask, vol):
for i in range(mask.shape[0]): # 125*512*512
coords = np.array(np.nonzero(mask[i]))
if i == 0:
top_left = np.min(coords, axis = 1)
bottom_right = np.max(coords, axis = 1)
else:
top_left = np.vstack((top_left, np.min(coords, axis = 1)))
bottom_right = np.vstack((bottom_right, np.max(coords, axis = 1)))
top = max(0, min(top_left[:, 0]) - 20)
left = max(0, min(top_left[:, 1]) - 20)
bottom = min(mask.shape[1], max(bottom_right[:, 0]) + 20)
right = min(mask.shape[2], max(bottom_right[:, 1]) + 20)
croped_vol = vol[:, top : bottom, left : right]
return croped_vol
def getRangImageDepth(image):
"""
:param image:
:return:rangofimage depth
"""
fistflag = True
startposition = 0
endposition = 0
for z in range(image.shape[0]):
notzeroflag = np.max(image[z])
if notzeroflag and fistflag:
startposition = z
fistflag = False
if notzeroflag:
endposition = z
return startposition, endposition
def make_patch(image, mask, startpostion, endpostion):
"""
make number patch
:param image:[depth,512,512]
:return:[n,512,512]
"""
imagezsrc = np.shape(image)[0]
subimage_startpostion = startpostion - 10
subimage_endpostion = endpostion + 10
if subimage_startpostion < 0:
subimage_startpostion = 0
if subimage_endpostion > imagezsrc:
subimage_endpostion = imagezsrc
imageroi = image[subimage_startpostion:subimage_endpostion, :, :]
maskroi = mask[subimage_startpostion:subimage_endpostion, :, :]
return imageroi, maskroi
# reshape
def resampling(roiImg, new_size, lbl=False):
new_spacing = [old_sz * old_spc / new_sz for old_sz, old_spc, new_sz in
zip(roiImg.GetSize(), roiImg.GetSpacing(), new_size)]
if lbl:
resampled_sitk = sitk.Resample(roiImg, new_size, sitk.Transform(), sitk.sitkNearestNeighbor, roiImg.GetOrigin(),
new_spacing, roiImg.GetDirection(), 0.0, roiImg.GetPixelIDValue())
else:
resampled_sitk = sitk.Resample(roiImg, new_size, sitk.Transform(), sitk.sitkLinear, roiImg.GetOrigin(),
new_spacing, roiImg.GetDirection(), 0.0, roiImg.GetPixelIDValue())
return resampled_sitk
# space resampling
def space_resampling(roiImg, new_spacing, lbl=False):
# print('old spacing: ', roiImg.GetSpacing())
new_size = [int(old_sz * old_spc / new_spc) for old_sz, old_spc, new_spc in
zip(roiImg.GetSize(), roiImg.GetSpacing(), new_spacing)]
if lbl:
resampled_sitk = sitk.Resample(roiImg, new_size, sitk.Transform(), sitk.sitkNearestNeighbor, roiImg.GetOrigin(),
new_spacing, roiImg.GetDirection(), 0.0, roiImg.GetPixelIDValue())
else:
resampled_sitk = sitk.Resample(roiImg, new_size, sitk.Transform(), sitk.sitkLinear, roiImg.GetOrigin(),
new_spacing, roiImg.GetDirection(), 0.0, roiImg.GetPixelIDValue())
return resampled_sitk
loadPath = "../data/KiTS19/data/"
savePath = "../data/KiTS19/processed_" + v + "_h5/"
savePath2 = "../data/KiTS19/processed_" + v + "_nii/"
if not os.path.exists(savePath):
os.makedirs(savePath)
if not os.path.exists(savePath2):
os.makedirs(savePath2)
if __name__ == "__main__":
basedir='../../data/MMWHS-CT'
for i in range(1,21):
path=os.path.join(basedir,'ct_train_10'+str(i).zfill(2))
ct=sitk.ReadImage(path+'_image.nii.gz')
lbl=sitk.ReadImage(path+'_label.nii.gz')
print(ct)
ct_array=sitk.GetArrayFromImage(ct)
lbl_array=sitk.GetArrayFromImage(lbl)
print(ct_array.shape)
if CFG.do_windowing:
ct_array = transform_ctdata(ct_array, CFG.window_width, CFG.window_center, True)
if CFG.do_background_cropping:
mask = np.zeros(ct_array.shape)
for j in range(ct_array.shape[0]):
mask[j], ct_array[j] = image_background_segmentation(ct_array[j], WW=CFG.cropping_center, WL=CFG.cropping_width)
if CFG.do_cropping:
ct_array = crop(mask, ct_array)
lbl_array = crop(mask, lbl_array)
print('crop shape:', ct_array.shape)
print('crop shape:', lbl_array.shape)
if CFG.do_mask_cropping:
startpostion, endpostion = getRangImageDepth(lbl_array)
ct_array, lbl_array = make_patch(ct_array, lbl_array, startpostion=startpostion, endpostion=endpostion)
print('crop mask shape:', ct_array.shape)
print('crop mask shape:', lbl_array.shape)
new_ct = sitk.GetImageFromArray(ct_array)
new_ct.SetOrigin(ct.GetOrigin())
new_ct.SetSpacing(ct.GetSpacing())
new_ct.SetDirection(ct.GetDirection())
new_lbl = sitk.GetImageFromArray(lbl_array)
new_lbl.SetOrigin(lbl.GetOrigin())
new_lbl.SetSpacing(lbl.GetSpacing())
new_lbl.SetDirection(lbl.GetDirection())
print(new_ct)
if CFG.do_spacing:
new_ct = space_resampling(new_ct, CFG.target_spacing, lbl=False)
new_lbl = space_resampling(new_lbl, CFG.target_spacing, lbl=True)
elif CFG.do_reshape:
new_ct = resampling(new_ct, CFG.new_size, lbl=False)
new_lbl = resampling(new_lbl, CFG.new_size, lbl=True)
save_ct_array = sitk.GetArrayFromImage(new_ct)
save_lbl_array = sitk.GetArrayFromImage(new_lbl)
from_labels = [500, 600, 420, 550, 205, 820, 850]
to_labels = [1, 2, 3, 4, 5, 6, 7]
for from_label, to_label in zip(from_labels, to_labels):
save_lbl_array[save_lbl_array == from_label] = to_label
# set everything else to zero
save_lbl_array[save_lbl_array > 7] = 0
save_ct_array = save_ct_array.swapaxes(0, 2)
save_lbl_array = save_lbl_array.swapaxes(0, 2)
save_file = h5py.File('../../data/MMWHS/volume-'+str(i).zfill(2)+ ".h5", 'w')
save_file.create_dataset('image', data=save_ct_array)
save_file.create_dataset('label', data=save_lbl_array)
print('array shape: ', save_ct_array.shape)
save_file.close()
sitk.WriteImage(new_ct,'../../data/MMWHS/volume-'+str(i).zfill(2)+'.nii.gz')
sitk.WriteImage(new_lbl,'../../data/MMWHS/label-'+str(i).zfill(2)+'.nii.gz')
exit(0)
fileList = os.listdir(loadPath)
for i in range(210):
subpath = "case_" + str(i).zfill(5)
print(subpath)
ct = sitk.ReadImage(loadPath + subpath + "/imaging.nii.gz")
lbl = sitk.ReadImage(loadPath + subpath + "/segmentation.nii.gz")
ct_array = sitk.GetArrayFromImage(ct)
lbl_array = sitk.GetArrayFromImage(lbl)
ct_array = ct_array.swapaxes(0, 2)
lbl_array = lbl_array.swapaxes(0, 2)
new_ct_array = ct_array
new_lbl_array = lbl_array
print('raw img shape:', ct_array.shape) # (611, 512, 512)
if CFG.do_windowing:
new_ct_array = transform_ctdata(new_ct_array, CFG.window_width, CFG.window_center, True)
if CFG.do_background_cropping:
mask = np.zeros(new_ct_array.shape)
for i in range(new_ct_array.shape[0]):
mask[i], new_ct_array[i] = image_background_segmentation(new_ct_array[i], WW=CFG.cropping_center, WL=CFG.cropping_width)
if CFG.do_cropping:
new_ct_array = crop(mask, new_ct_array)
new_lbl_array = crop(mask, new_lbl_array)
print('crop shape:', new_ct_array.shape)
print('crop shape:', new_lbl_array.shape)
if CFG.do_mask_cropping:
startpostion, endpostion = getRangImageDepth(new_lbl_array)
new_ct_array, new_lbl_array = make_patch(new_ct_array, new_lbl_array, startpostion=startpostion, endpostion=endpostion)
print('crop mask shape:', new_ct_array.shape)
print('crop mask shape:', new_lbl_array.shape)
new_ct = sitk.GetImageFromArray(new_ct_array)
new_ct.SetOrigin(ct.GetOrigin())
new_ct.SetSpacing(ct.GetSpacing())
new_ct.SetDirection(ct.GetDirection())
new_lbl = sitk.GetImageFromArray(new_lbl_array)
new_lbl.SetOrigin(lbl.GetOrigin())
new_lbl.SetSpacing(lbl.GetSpacing())
new_lbl.SetDirection(lbl.GetDirection())
if CFG.do_spacing:
new_ct = space_resampling(new_ct, CFG.target_spacing, lbl=False)
new_lbl = space_resampling(new_lbl, CFG.target_spacing, lbl=True)
elif CFG.do_reshape:
new_ct = resampling(new_ct, CFG.new_size, lbl=False)
new_lbl = resampling(new_lbl, CFG.new_size, lbl=True)
save_ct_array = sitk.GetArrayFromImage(new_ct)
save_lbl_array = sitk.GetArrayFromImage(new_lbl)
# output shape
# print('new img shape:', save_ct_array.shape)
# print('new lbl shape:', save_lbl_array.shape)
# save
# sitk.WriteImage(new_ct, os.path.join(savePath2, subpath + '_img.nii'))
# sitk.WriteImage(new_lbl, os.path.join(savePath2, subpath + '_lbl.nii'))
save_file = h5py.File(savePath + subpath + ".h5", 'w')
save_file.create_dataset('image', data=save_ct_array)
save_file.create_dataset('label', data=save_lbl_array)
save_file.close()
| Python |
3D | HengCai-NJU/3D2DCT | code/dataloaders/mmwhs.py | .py | 15,737 | 389 | import os
import torch
import numpy as np
from glob import glob
from torch.utils.data import Dataset
import h5py
import itertools
from torch.utils.data.sampler import Sampler
from torchvision.transforms import Compose
import pdb
class MMWHS(Dataset):
# base_dir="../data/processed_v1_h5"
def __init__(self, base_dir=None, split='train', num=None, transform=None):
self._base_dir = base_dir
self.transform = transform
self.sample_list = []
if 'train' in split:
with open('../data/' + split + '_mmwhs.txt', 'r') as f:
self.image_list = f.readlines()
elif split == 'test':
with open(self._base_dir + '/test_mmwhs.txt', 'r') as f:
self.image_list = f.readlines()
self.image_list = [item.replace('\n', '').split(",")[0] for item in self.image_list]
if num is not None:
self.image_list = self.image_list[:num]
print("total {} samples".format(len(self.image_list)))
def __len__(self):
return len(self.image_list)
def __getitem__(self, idx):
image_name = self.image_list[idx]
h5f=h5py.File(self._base_dir+"/{}.h5".format(image_name),'r')
image = h5f['image'][:] # 64*192*192
label = h5f['label'][:]
image = (image - np.mean(image)) / np.std(image)
sample = {'image': image, 'label': label.astype(np.uint8)}
#sample = {'image': image, 'label': label}
if self.transform:
sample = self.transform(sample)
return sample
class MMWHS_Sparse(Dataset):
# base_dir="../data/processed_v1_h5"
def __init__(self, base_dir=None, split='train', slice_strategy=1,num=None, transform=None):
self._base_dir = base_dir
self.transform = transform
self.sample_list = []
self.slice_random=np.random.randint(0,slice_strategy,16)
print(self.slice_random)
self.slice1=[i for i in list(range(0,96,slice_strategy))]
self.slice2=[i for i in list(range(0,192,slice_strategy*2))]
if 'train' in split:
with open( '../data/'+split+'_mmwhs.txt', 'r') as f:
self.image_list = f.readlines()
elif split == 'test':
with open(self._base_dir + '/test_mmwhs.txt', 'r') as f:
self.image_list = f.readlines()
self.image_list = [item.replace('\n', '').split(",")[0] for item in self.image_list]
if num is not None:
self.image_list = self.image_list[:num]
print("total {} samples".format(len(self.image_list)))
def __len__(self):
return len(self.image_list)
def __getitem__(self, idx):
image_name = self.image_list[idx]
h5f=h5py.File(self._base_dir+"/{}.h5".format(image_name),'r')
image = h5f['image'][:] # 64*192*192
labeltmp = h5f['label'][:]
label = np.zeros_like(labeltmp)
slice1=[i+self.slice_random[idx] for i in self.slice1]
slice2 = [i + 2*self.slice_random[idx] for i in self.slice2]
for i in slice1:
label[:,:,i]=labeltmp[:,:,i]
for i in slice2:
label[:,i,:]=labeltmp[:,i,:]
# label[:, label.shape[1] // 2-10, :] = labeltmp[:, label.shape[1] // 2-10, :]
# label[:, slice2, :] = labeltmp[:,slice2, :]
# label=labeltmp
image = (image - np.mean(image)) / np.std(image)
sample = {'image': image, 'label': label.astype(np.uint8), 'slice1': slice1,'slice2':slice2}
# sample = {'image': image, 'label': label, 'slice1': slice1,'idx':idx}
if self.transform:
sample = self.transform(sample)
return sample
class CenterCrop(object):
def __init__(self, output_size):
self.output_size = output_size
def __call__(self, sample):
image, label = sample['image'], sample['label']
# pad the sample if necessary
if label.shape[0] <= self.output_size[0] or label.shape[1] <= self.output_size[1] or label.shape[2] <= \
self.output_size[2]:
pw = max((self.output_size[0] - label.shape[0]) // 2 + 3, 0)
ph = max((self.output_size[1] - label.shape[1]) // 2 + 3, 0)
pd = max((self.output_size[2] - label.shape[2]) // 2 + 3, 0)
image = np.pad(image, [(pw, pw), (ph, ph), (pd, pd)], mode='constant', constant_values=0)
label = np.pad(label, [(pw, pw), (ph, ph), (pd, pd)], mode='constant', constant_values=0)
(w, h, d) = image.shape
w1 = int(round((w - self.output_size[0]) / 2.))
h1 = int(round((h - self.output_size[1]) / 2.))
d1 = int(round((d - self.output_size[2]) / 2.))
label = label[w1:w1 + self.output_size[0], h1:h1 + self.output_size[1], d1:d1 + self.output_size[2]]
image = image[w1:w1 + self.output_size[0], h1:h1 + self.output_size[1], d1:d1 + self.output_size[2]]
return {'image': image, 'label': label}
class RandomCrop(object):
"""
Crop randomly the image in a sample
Args:
output_size (int): Desired output size
"""
def __init__(self, output_size):
self.output_size = output_size
def __call__(self, sample):
image, label = sample['image'], sample['label']
# pad the sample if necessary
if label.shape[0] <= self.output_size[0] or label.shape[1] <= self.output_size[1] or label.shape[2] <= \
self.output_size[2]:
pw = max((self.output_size[0] - label.shape[0]) // 2 + 3, 0)
ph = max((self.output_size[1] - label.shape[1]) // 2 + 3, 0)
pd = max((self.output_size[2] - label.shape[2]) // 2 + 3, 0)
image = np.pad(image, [(pw, pw), (ph, ph), (pd, pd)], mode='constant', constant_values=0)
label = np.pad(label, [(pw, pw), (ph, ph), (pd, pd)], mode='constant', constant_values=0)
(w, h, d) = image.shape
# if np.random.uniform() > 0.33:
# w1 = np.random.randint((w - self.output_size[0])//4, 3*(w - self.output_size[0])//4)
# h1 = np.random.randint((h - self.output_size[1])//4, 3*(h - self.output_size[1])//4)
# else:
w1 = np.random.randint(0, w - self.output_size[0])
h1 = np.random.randint(0, h - self.output_size[1])
d1 = np.random.randint(0, d - self.output_size[2])
label = label[w1:w1 + self.output_size[0], h1:h1 + self.output_size[1], d1:d1 + self.output_size[2]]
image = image[w1:w1 + self.output_size[0], h1:h1 + self.output_size[1], d1:d1 + self.output_size[2]]
return {'image': image, 'label': label}
class MMRandomCrop_Sparse(object):
"""
Crop randomly the image in a sample
Args:
output_size (int): Desired output size
"""
def __init__(self, output_size):
self.output_size = output_size
def __call__(self, sample):
image, label,slice1,slice2 = sample['image'], sample['label'],sample['slice1'],sample['slice2']
#image, label, slice1 = sample['image'], sample['label'], sample['slice1']
# pad the sample if necessary
if label.shape[0] <= self.output_size[0] or label.shape[1] <= self.output_size[1] or label.shape[2] <= \
self.output_size[2]:
pw = max((self.output_size[0] - label.shape[0]) // 2 + 3, 0)
ph = max((self.output_size[1] - label.shape[1]) // 2 + 3, 0)
pd = max((self.output_size[2] - label.shape[2]) // 2 + 3, 0)
image = np.pad(image, [(pw, pw), (ph, ph), (pd, pd)], mode='constant', constant_values=0)
label = np.pad(label, [(pw, pw), (ph, ph), (pd, pd)], mode='constant', constant_values=0)
(w, h, d) = image.shape
# if np.random.uniform() > 0.33:
# w1 = np.random.randint((w - self.output_size[0])//4, 3*(w - self.output_size[0])//4)
# h1 = np.random.randint((h - self.output_size[1])//4, 3*(h - self.output_size[1])//4)
# else:
w1 = np.random.randint(0, w - self.output_size[0])
h1 = np.random.randint(0, h - self.output_size[1])
d1 = np.random.randint(0, d - self.output_size[2])
weight = np.zeros_like(image)
label = label[w1:w1 + self.output_size[0], h1:h1 + self.output_size[1], d1:d1 + self.output_size[2]]
image = image[w1:w1 + self.output_size[0], h1:h1 + self.output_size[1], d1:d1 + self.output_size[2]]
#weight[:]=1
for i in slice1:
weight[:, :, i+3] = 1
for i in slice2:
weight[:,i,:]=1
weight = weight[w1:w1 + self.output_size[0], h1:h1 + self.output_size[1], d1:d1 + self.output_size[2]]
return {'image': image, 'label': label, 'weight': weight}
class MMRandomRotFlip(object):
"""
Crop randomly flip the dataset in a sample
Args:
output_size (int): Desired output size
"""
def __call__(self, sample):
if 'weight' in sample:
image, label,weight = sample['image'], sample['label'],sample['weight']
k = np.random.randint(0, 4)
image = np.rot90(image, k)
label = np.rot90(label, k)
weight=np.rot90(weight, k)
axis = np.random.randint(0, 2)
image = np.flip(image, axis=axis).copy()
label = np.flip(label, axis=axis).copy()
weight=np.flip(weight, axis=axis).copy()
return {'image': image, 'label': label,'weight':weight}
else:
image, label = sample['image'], sample['label']
k = np.random.randint(0, 4)
image = np.rot90(image, k)
label = np.rot90(label, k)
axis = np.random.randint(0, 2)
image = np.flip(image, axis=axis).copy()
label = np.flip(label, axis=axis).copy()
return {'image': image, 'label': label}
class RandomNoise(object):
def __init__(self, mu=0, sigma=0.1):
self.mu = mu
self.sigma = sigma
def __call__(self, sample):
image, label = sample['image'], sample['label']
noise = np.clip(self.sigma * np.random.randn(image.shape[0], image.shape[1], image.shape[2]), -2*self.sigma, 2*self.sigma)
noise = noise + self.mu
image = image + noise
return {'image': image, 'label': label}
class CreateOnehotLabel(object):
def __init__(self, num_classes):
self.num_classes = num_classes
def __call__(self, sample):
image, label = sample['image'], sample['label']
onehot_label = np.zeros((self.num_classes, label.shape[0], label.shape[1], label.shape[2]), dtype=np.float32)
for i in range(self.num_classes):
onehot_label[i, :, :, :] = (label == i).astype(np.float32)
return {'image': image, 'label': label,'onehot_label':onehot_label}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
image = sample['image']
image = image.reshape(1, image.shape[0], image.shape[1], image.shape[2]).astype(np.float32)
if 'onehot_label' in sample:
return {'image': torch.from_numpy(image), 'label': torch.from_numpy(sample['label']).long(),
'onehot_label': torch.from_numpy(sample['onehot_label']).long()}
elif 'weight' in sample:
return {'image': torch.from_numpy(image), 'label': torch.from_numpy(sample['label']).long(),'weight': torch.from_numpy(sample['weight'])}
else:
return {'image': torch.from_numpy(image), 'label': torch.from_numpy(sample['label']).long()}
class TwoStreamBatchSampler(Sampler):
"""Iterate two sets of indices
An 'epoch' is one iteration through the primary indices.
During the epoch, the secondary indices are iterated through
as many times as needed.
"""
def __init__(self, primary_indices, secondary_indices, batch_size, secondary_batch_size):
self.primary_indices = primary_indices
self.secondary_indices = secondary_indices
self.secondary_batch_size = secondary_batch_size
self.primary_batch_size = batch_size - secondary_batch_size
assert len(self.primary_indices) >= self.primary_batch_size > 0
assert len(self.secondary_indices) >= self.secondary_batch_size > 0
def __iter__(self):
primary_iter = iterate_once(self.primary_indices)
secondary_iter = iterate_eternally(self.secondary_indices)
return (
primary_batch + secondary_batch
for (primary_batch, secondary_batch)
in zip(grouper(primary_iter, self.primary_batch_size),
grouper(secondary_iter, self.secondary_batch_size))
)
def __len__(self):
return len(self.primary_indices) // self.primary_batch_size
def iterate_once(iterable):
return np.random.permutation(iterable)
def iterate_eternally(indices):
def infinite_shuffles():
while True:
yield np.random.permutation(indices)
return itertools.chain.from_iterable(infinite_shuffles())
def grouper(iterable, n):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3) --> ABC DEF"
args = [iter(iterable)] * n
return zip(*args)
class CenterCrop_2(object):
def __init__(self, output_size):
self.output_size = output_size
def _get_transform(self, label):
if label.shape[0] <= self.output_size[0] or label.shape[1] <= self.output_size[1] or label.shape[2] <= \
self.output_size[2]:
pw = max((self.output_size[0] - label.shape[0]) // 2 + 3, 0)
ph = max((self.output_size[1] - label.shape[1]) // 2 + 3, 0)
pd = max((self.output_size[2] - label.shape[2]) // 2 + 3, 0)
label = np.pad(label, [(pw, pw), (ph, ph), (pd, pd)], mode='constant', constant_values=0)
else:
pw,ph,pd=0,0,0
(w, h, d) = label.shape
w1 = int(round((w - self.output_size[0]) / 2.))
h1 = int(round((h - self.output_size[1]) / 2.))
d1 = int(round((d - self.output_size[2]) / 2.))
def do_transform(x):
if x.shape[0] <= self.output_size[0] or x.shape[1] <= self.output_size[1] or x.shape[2] <= self.output_size[2] :
x = np.pad(x, [(pw, pw), (ph, ph),(pd,pd)], mode='constant', constant_values=0)
x = x[w1:w1 + self.output_size[0], h1:h1 + self.output_size[1],d1:d1+self.output_size[2]]
return x
return do_transform
def __call__(self, samples):
transform = self._get_transform(samples[0])
return [transform(s) for s in samples]
class ToTensor_2(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
image = sample[0]
image = image.reshape(1, image.shape[0], image.shape[1],image.shape[2]).astype(np.float32)
sample = [image] + [*sample[1:]]
return [torch.from_numpy(s.astype(np.float32)) for s in sample]
class make_data_3d(Dataset):
def __init__(self, imgs, plabs, masks, labs,patch_size):
self.img = [img.squeeze() for img in imgs]
self.plab = [np.squeeze(lab) for lab in plabs]
self.mask = [np.squeeze(mask) for mask in masks]
self.lab = [np.squeeze(lab) for lab in labs]
self.num = len(self.img)
self.tr_transform = Compose([
CenterCrop_2(patch_size),
# RandomNoise(),
ToTensor_2()
])
def __getitem__(self, idx):
samples = self.img[idx], self.plab[idx], self.mask[idx], self.lab[idx]
# pdb.set_trace()
samples = self.tr_transform(samples)
imgs, plabs, masks, labs = samples
return imgs, plabs.long(), masks.float(), labs.long()
def __len__(self):
return self.num
| Python |
3D | HengCai-NJU/3D2DCT | code/dataloaders/make_dataset.py | .py | 3,951 | 116 | # -*- coding: utf-8 -*-
import h5py, os
import torch, cv2
import numpy as np
from torch.utils.data import Dataset, DataLoader
import os
from pathlib import Path
import torch
import numpy as np
from glob import glob
from torch.utils.data import Dataset
import h5py
import itertools
from torch.utils.data.sampler import Sampler
from torchvision.transforms import Compose
class make_data(Dataset):
def __init__(self, img, lab, mask):
self.img = img
self.lab = lab
self.mask = mask
self.num = len(self.img)
def __getitem__(self, idx):
# global i
# imgs = []
imgs = self.img[idx].squeeze(1)
labs = self.lab[idx].squeeze(0)
masks = self.mask[idx].squeeze(0)
# print(imgs.shape,labs.shape)
# for m in self.modalities:
# img = img[m,:,:]
# if self.transform:
# img = cv2.resize(img,dsize=self.transform)
# labs = cv2.resize(labs,dsize=self.transform)
# img = self.img_pre(img)
# labs = self.lab_pre(lab)
return imgs, labs, masks
def __len__(self):
return self.num
class RandomCrop(object):
"""
Crop randomly the image in a sample
Args:
output_size (int): Desired output size
"""
def __init__(self, output_size, with_sdf=False):
self.output_size = output_size
self.with_sdf = with_sdf
def _get_transform(self, x):
if x.shape[0] <= self.output_size[0] or x.shape[1] <= self.output_size[1] or x.shape[2] <= self.output_size[2]:
pw = max((self.output_size[0] - x.shape[0]) // 2 + 1, 0)
ph = max((self.output_size[1] - x.shape[1]) // 2 + 1, 0)
pd = max((self.output_size[2] - x.shape[2]) // 2 + 1, 0)
x = np.pad(x, [(pw, pw), (ph, ph), (pd, pd)], mode='constant', constant_values=0)
else:
pw, ph, pd = 0, 0, 0
(w, h, d) = x.shape
w1 = np.random.randint(0, w - self.output_size[0])
h1 = np.random.randint(0, h - self.output_size[1])
d1 = np.random.randint(0, d - self.output_size[2])
def do_transform(image):
if image.shape[0] <= self.output_size[0] or image.shape[1] <= self.output_size[1] or image.shape[2] <= self.output_size[2]:
try:
image = np.pad(image, [(pw, pw), (ph, ph), (pd, pd)], mode='constant', constant_values=0)
except Exception as e:
print(e)
image = image[w1:w1 + self.output_size[0], h1:h1 + self.output_size[1], d1:d1 + self.output_size[2]]
return image
return do_transform
def __call__(self, samples):
transform = self._get_transform(samples[0])
return [transform(s) for s in samples]
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
image = sample[0]
image = image.reshape(1, image.shape[0], image.shape[1], image.shape[2]).astype(np.float32)
sample = [image] + [*sample[1:]]
return [torch.from_numpy(s.astype(np.float32)) for s in sample]
class make_data_3d(Dataset):
def __init__(self, imgs, plabs, masks, labs):
self.img = [img.cpu().squeeze() for img in imgs]
self.plab = [np.squeeze(lab.cpu()) for lab in plabs]
self.mask = [np.squeeze(mask.cpu()) for mask in masks]
self.lab = [np.squeeze(lab.cpu()) for lab in labs]
self.num = len(self.img)
self.tr_transform = Compose([
# RandomRotFlip(),
RandomCrop((96, 96, 96)),
# RandomNoise(),
ToTensor()
])
def __getitem__(self, idx):
samples = self.img[idx], self.plab[idx], self.mask[idx], self.lab[idx]
samples = self.tr_transform(samples)
imgs, plabs, masks, labs = samples
return imgs, plabs.long(), masks.float(), labs.long()
def __len__(self):
return self.num
| Python |
3D | theaidenlab/AGWG-merge | run-asm-pipeline.sh | .sh | 42,279 | 822 | #!/bin/bash
##########
#The MIT License (MIT)
#
# Copyright (c) 2018 Aiden Lab
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
##########
#
# 3D DNA de novo genome assembly pipeline: 180114 version.
#
echo `readlink -f $0`" "$*
#set -x
USAGE_short="
*****************************************************
3D de novo assembly: version 180114
USAGE: ./run-asm-pipeline.sh [options] <path_to_input_fasta> <path_to_input_mnd>
DESCRIPTION:
This is a script to assemble draft assemblies (represented in input by draft fasta and deduplicated list of alignments of Hi-C reads to this fasta as produced by the Juicer pipeline) into chromosome-length scaffolds. The script will produce an output fasta file, a Hi-C map of the final assembly, and a few supplementary annotation files to help review the result in Juicebox.
ARGUMENTS:
path_to_input_fasta Specify file path to draft assembly fasta file.
path_to_input_mnd Specify path to deduplicated list of alignments of Hi-C reads to the draft assembly fasta as produced by the Juicer pipeline: the merged_nodups file (mnd).
OPTIONS:
-m|--mode haploid/diploid Runs in specific mode, either haploid or diploid (default is haploid).
-i|--input input_size Specifies threshold input contig/scaffold size (default is 15000). Contigs/scaffolds smaller than input_size are going to be ignored.
-r|--rounds number_of_edit_rounds Specifies number of iterative rounds for misjoin correction (default is 2).
-s|--stage stage Fast forward to later assembly steps, can be polish, split, seal, merge and finalize.
-h|--help Shows this help. Type --help for a full set of options.
*****************************************************
"
USAGE_long="
*****************************************************
3D de novo assembly: version 170123
USAGE: ./run-asm-pipeline.sh [options] <path_to_input_fasta> <path_to_input_mnd>
DESCRIPTION:
This is a script to assemble draft assemblies (represented in input by draft fasta and deduplicated list of alignments of Hi-C reads to this fasta as produced by the Juicer pipeline) into chromosome-length scaffolds. The script will produce an output fasta file, a Hi-C map of the final assembly, and a few supplementary annotation files to help review the result in Juicebox.
ARGUMENTS:
path_to_input_fasta Specify file path to draft assembly fasta file.
path_to_input_mnd Specify path to deduplicated list of alignments of Hi-C reads to the draft assembly fasta as produced by the Juicer pipeline: the merged_nodups file (mnd).
OPTIONS:
-h Shows main options.
--help Shows this help.
-m|--mode haploid/diploid Runs in specific mode, either haploid or diploid (default is haploid).
-i|--input input_size Specifies threshold input contig/scaffold size (default is 15000). Contigs/scaffolds smaller than input_size are going to be ignored.
-r|--rounds number_of_edit_rounds Specifies number of iterative rounds for misjoin correction (default is 2).
-s|--stage stage Fast forward to later assembly steps, can be polish, split, seal, merge and finalize.
ADDITIONAL OPTIONS:
**scaffolder**
-q|--mapq mapq Mapq threshold for scaffolding and visualization (default is 1).
**misjoin detector**
--editor-coarse-resolution editor_coarse_resolution Misjoin editor coarse matrix resolution, should be one of the following: 2500000, 1000000, 500000, 250000, 100000, 50000, 25000, 10000, 5000, 1000 (default is 25000).
--editor-coarse-region editor_coarse_region Misjoin editor triangular motif region size (default is 125000).
--editor-coarse-stringency editor_coarse_stringency Misjoin editor stringency parameter (default is 55).
--editor-saturation-centile editor_saturation_centile Misjoin editor saturation parameter (default is 5).
--editor-fine-resolution editor_fine_resiolution Misjoin editor fine matrix resolution, should be one of the following: 2500000, 1000000, 500000, 250000, 100000, 50000, 25000, 10000, 5000, 1000 (default is 1000).
--editor-repeat-coverage editor_repeat_coverage Misjoin editor threshold repeat coverage (default is 2).
**polisher**
--polisher-input-size polisher_input_size Polisher input size threshold. Scaffolds smaller than polisher_input_size are going to be placed into unresolved (default is 1000000).
--polisher-coarse-resolution editor_coarse_resolution Polisher coarse matrix resolution, should be one of the following: 2500000, 1000000, 500000, 250000, 100000, 50000, 25000, 10000, 5000, 1000 (default is 25000).
--polisher-coarse-region editor_coarse_region Polisher triangular motif region size (default is 3000000).
--polisher-coarse-stringency editor_coarse_stringency Polisher stringency parameter (default is 55).
--polisher-saturation-centile editor_saturation_centile Polisher saturation parameter (default is 5).
--polisher-fine-resolution editor_fine_resiolution Polisher fine matrix resolution, should be one of the following: 2500000, 1000000, 500000, 250000, 100000, 50000, 25000, 10000, 5000, 1000 (default is 1000).
**splitter**
--splitter-input-size splitter_input_size
Splitter input size threshold. Scaffolds smaller than polisher_input_size are going to be placed into unresolved (Default: 1000000).
--splitter-coarse-resolution splitter_coarse_resolution
Splitter coarse matrix resolution, should be one of the following: 2500000, 1000000, 500000, 250000, 100000, 50000, 25000, 10000, 5000, 1000 (Default: 25000).
--splitter-coarse-region splitter_coarse_region
Splitter triangular motif region size (Default: 3000000).
--splitter-coarse-stringency splitter_coarse_stringency
Splitter stringency parameter (Default: 55).
--splitter-saturation-centile splitter_saturation_centile
Splitter saturation parameter (Default: 5).
--splitter-fine-resolution splitter_fine_resiolution
Splitter fine matrix resolution, should be one of the following: 2500000, 1000000, 500000, 250000, 100000, 50000, 25000, 10000, 5000, 1000 (Default: 1000).
**merger**
--merger-search-band merger_search_band
Distance (in bp) within which to locally search for alternative haplotypes to a given contig or scaffold, from the position of their suggested incorporation in the assembly. The larger the original input contigs/scaffolds, the larger band size it might be necessary to set. Default: 3000000.
--merger-alignment-score merger_alignment_score
Minimal LASTZ alignment score for nearby sequences (located in the assembly within the distance defined by the merger_search_band parameter) to be recongnized as alternative haplotypes. Default: 50000000.
--merger-alignment-identity merger_alignment_identity
Minimal identity score required from similar nearby sequences (per length) for them to be classified as alternative haplotypes. Default: 20.
--merger-alignment-length merger_alignment_length
Minimal length necessary to recognize similar nearby sequences as alternative haplotypes. Default: 20000.
--merger-lastz-options merger_lastz_options
Option string to customize LASTZ alignment. Default: \"--gfextend\ --gapped\ --chain=200,200\"
*****************************************************
"
pipeline=`cd "$( dirname $0)" && pwd`
## default parameter setup
diploid="false" # by default run haploid pipeline
input_size=15000 # contigs/scaffolds smaller than input_size are ignored
MAX_ROUNDS=2 # use 2 for Hs2 and 9 for AaegL4
mapq=1 # read mapping quality threshold for Hi-C scaffolder
# misassembly detector and editor default params
editor_coarse_resolution=25000
editor_fine_resolution=1000
editor_coarse_region=125000
editor_coarse_stringency=55
editor_saturation_centile=5
editor_repeat_coverage=2
# polisher default params
polisher_coarse_resolution=100000
polisher_fine_resolution=1000
polisher_coarse_region=3000000
polisher_coarse_stringency=55
polisher_saturation_centile=5
polisher_input_size=1000000
# splitter detection default params
splitter_input_size=100000 # in principle don't really need this, just moves smallish scaffolds to the back
splitter_coarse_resolution=100000
splitter_fine_resolution=1000
splitter_coarse_region=3000000
splitter_coarse_stringency=55
splitter_saturation_centile=5
# merger default params after option handling
default_merger_search_band=3000000
default_merger_alignment_score=50000000
default_merger_alignment_identity=20
default_merger_alignment_length=20000
default_merger_lastz_options=\"--gfextend\ --gapped\ --chain=200,200\"
stage="" # by default run full pipeline
early=false
fast=false
sort_output=false
############### HANDLE OPTIONS ###############
while :; do
case $1 in
-h)
echo "$USAGE_short" >&1
exit 0
;;
--help)
echo "$USAGE_long" >&1
exit 0
;;
## short menu options
-m|--mode) OPTARG=$2
if [ "$OPTARG" == "haploid" ] || [ "$OPTARG" == "diploid" ]; then
echo >&1 " -m|--mode flag was triggered. Running in $OPTARG mode."
else
echo ":( Unrecognized value for mode flag. Running with default parameters (--mode haploid)." >&2
fi
if [ "$OPTARG" == "diploid" ]; then
diploid="true"
fi
shift
;;
-r|--rounds) OPTARG=$2
re='^[0-9]+$'
if [[ $OPTARG =~ $re ]] && [[ $OPTARG -ge 0 ]]; then
echo " -r|--rounds flag was triggered, will run $OPTARG round(s) of misjoin correction." >&1
MAX_ROUNDS=$OPTARG
else
echo ":( Wrong syntax for number of iterative rounds of misjoin correction. Using the default value ${MAX_ROUNDS}." >&2
fi
shift
;;
-i|--input) OPTARG=$2
re='^[0-9]+$'
if [[ $OPTARG =~ $re ]]; then
echo " -i|--input flag was triggered, filtering draft contigs/scaffolds smaller than $OPTARG." >&1
input_size=$OPTARG
else
echo ":( Wrong syntax for input size threshold. Using the default value ${input_size}." >&2
fi
shift
;;
-s|--stage) OPTARG=$2
if [ "$OPTARG" == "scaffold" ] || [ "$OPTARG" == "polish" ] || [ "$OPTARG" == "split" ] || [ "$OPTARG" == "seal" ] || [ "$OPTARG" == "merge" ] || [ "$OPTARG" == "finalize" ]; then
echo " -s|--stage flag was triggered, fast-forwarding to \"$OPTARG\" pipeline section." >&1
stage=$OPTARG
else
echo ":( Wrong syntax for pipeline stage. Exiting!" >&2
fi
shift
;;
-e|--early-exit)
echo " -e|--early-exit flag was triggered, will do early exit." >&1
early=true
;;
-f|--fast-start)
echo " -f|--fast-start flag was triggered, will start assuming first iterative round and map are available." >&1
fast=true
;;
# scaffolder
-q|--mapq) OPTARG=$2 ##TODO: check that propagates consistently, not tested sufficiently
re='^[0-9]+$'
if [[ $OPTARG =~ $re ]]; then
echo " -q|--mapq flag was triggered, scaffolding using reads with at least $OPTARG mapping quality." >&1
mapq=$OPTARG
else
echo ":( Wrong syntax for mapping quality. Using the default value ${mapq}." >&2
fi
shift
;;
## long menu options
--sort-output)
echo " --sort-output was triggered, will sort output scaffolds by size." >&1
sort_output=true
;;
# misjoin editor
--editor-saturation-centile) OPTARG=$2
re='^[0-9]+\.?[0-9]*$'
if [[ $OPTARG =~ $re ]] && [[ ${OPTARG%.*} -ge 0 ]] && ! [[ "$OPTARG" =~ ^0*(\.)?0*$ ]] && [[ $((${OPTARG%.*} + 1)) -le 100 ]]; then
echo " --editor-saturation-centile flag was triggered, misjoin editor saturation parameter set to ${OPTARG}%." >&1
editor_saturation_centile=$OPTARG
else
echo ":( Wrong syntax for misjoin editor saturation threshold. Using the default value ${editor_saturation_centile}%." >&2
fi
shift
;;
--editor-coarse-resolution) OPTARG=$2
re='^[0-9]+$' ## TODO: specify/generalize re matrix resolutions size
if [[ $OPTARG =~ $re ]]; then
echo " --editor-coarse-resolution flag was triggered, misjoin editor coarse matrix resolution set to $OPTARG." >&1
editor_coarse_resolution=$OPTARG
else
echo ":( Wrong syntax for misjoin editor coarse matrix resolution. Using the default value ${editor_coarse_resolution}." >&2
fi
shift
;;
--editor-coarse-region) OPTARG=$2
re='^[0-9]+$'
if [[ $OPTARG =~ $re ]]; then
echo " --editor-coarse-region flag was triggered, misjoin editor coarse resolution depletion region size set to $OPTARG." >&1
editor_coarse_region=$OPTARG
else
echo ":( Wrong syntax for misjoin editor coarse resolution depletion region size. Using the default value ${editor_coarse_region}." >&2
fi
shift
;;
--editor-coarse-stringency) OPTARG=$2
re='^[0-9]+$'
if [[ $OPTARG =~ $re ]] && [[ $OPTARG -gt 0 ]] && [[ $OPTARG -lt 100 ]]; then
echo " --editor-coarse-stringency flag was triggered, misjoin detection stringency parameter set to $OPTARG%." >&1
editor_coarse_stringency=$OPTARG
else
echo ":( Wrong syntax for misjoin detection stringency parameter. Using the default value ${editor_coarse_stringency}%." >&2
fi
shift
;;
--editor-fine-resolution) OPTARG=$2
re='^[0-9]+$' ## TODO: specify/generalize re matrix resolutions size
if [[ $OPTARG =~ $re ]]; then
echo " --editor-fine-resolution flag was triggered, misjoin detection fine matrix resolution set to $OPTARG." >&1
editor_fine_resolution=$OPTARG
else
echo ":( Wrong syntax for misjoin editor fine matrix resolution. Using the default value ${editor_fine_resolution}." >&2
fi
shift
;;
--editor-repeat-coverage) OPTARG=$2
re='^[0-9]+$'
if [[ $OPTARG =~ $re ]] && [[ $OPTARG -gt 0 ]]; then
echo " --editor-repeat-coverage flag was triggered, threshold repeat coverage parameter set to $OPTARG." >&1
editor_repeat_coverage=$OPTARG
else
echo ":( Wrong syntax for misjoin detection stringency parameter. Using the default value ${editor_repeat_coverage}." >&2
fi
shift
;;
# polisher
--polisher-input-size) OPTARG=$2
re='^[0-9]+$'
if [[ $OPTARG =~ $re ]]; then
echo " --polisher-input-size flag was triggered, excluding scaffolds smaller than $OPTARG when polishing." >&1
polisher_input_size=$OPTARG
else
echo ":( Wrong syntax for polisher scaffold input size threshold. Using the default value ${polisher_input_size}." >&2
fi
shift
;;
--polisher-saturation-centile) OPTARG=$2
re='^[0-9]+\.?[0-9]*$'
if [[ $OPTARG =~ $re ]] && [[ ${OPTARG%.*} -ge 0 ]] && ! [[ "$OPTARG" =~ ^0*(\.)?0*$ ]] && [[ $((${OPTARG%.*} + 1)) -le 100 ]]; then
echo " --polisher-saturation-centile flag was triggered, polisher saturation parameter set to ${OPTARG}%." >&1
polisher_saturation_centile=$OPTARG
else
echo ":( Wrong syntax for polisher saturation threshold. Using the default value ${polisher_saturation_centile}%." >&2
fi
shift
;;
--polisher-coarse-resolution) OPTARG=$2
re='^[0-9]+$' ## TODO: specify/generalize re matrix resolutions size
if [[ $OPTARG =~ $re ]]; then
echo " --polisher-coarse-resolution flag was triggered, polisher coarse matrix resolution set to $OPTARG." >&1
polisher_coarse_resolution=$OPTARG
else
echo ":( Wrong syntax for polisher coarse matrix resolution. Using the default value ${polisher_coarse_resolution}." >&2
fi
shift
;;
--polisher-coarse-region) OPTARG=$2
re='^[0-9]+$'
if [[ $OPTARG =~ $re ]]; then
echo " --polisher-coarse-region flag was triggered, polisher coarse resolution depletion region size set to $OPTARG." >&1
polisher_coarse_region=$OPTARG
else
echo ":( Wrong syntax for polisher coarse resolution depletion region size. Using the default value ${polisher_coarse_region}." >&2
fi
shift
;;
--polisher-coarse-stringency) OPTARG=$2
re='^[0-9]+$'
if [[ $OPTARG =~ $re ]] && [[ $OPTARG -gt 0 ]] && [[ $OPTARG -lt 100 ]]; then
echo " --polisher-coarse-stringency flag was triggered, polisher stringency parameter set to $OPTARG%." >&1
polisher_coarse_stringency=$OPTARG
else
echo ":( Wrong syntax for polisher stringency parameter. Using the default value ${polisher_coarse_stringency}%." >&2
fi
shift
;;
--polisher-fine-resolution) OPTARG=$2
re='^[0-9]+$' ## TODO: specify/generalize re matrix resolutions size
if [[ $OPTARG =~ $re ]]; then
echo " --polisher-fine-resolution flag was triggered, polisher fine matrix resolution set to $OPTARG." >&1
polisher_fine_resolution=$OPTARG
else
echo ":( Wrong syntax for polisher fine matrix resolution. Using the default value ${polisher_fine_resolution}." >&2
fi
shift
;;
# splitter
--splitter-input-size) OPTARG=$2 ##TODO: should get rid of this, don't think I really need it
re='^[0-9]+$'
if [[ $OPTARG =~ $re ]]; then
echo " --splitter-input-size flag was triggered, excluding scaffolds smaller than $OPTARG when splitting." >&1
splitter_input_size=$OPTARG
else
echo ":( Wrong syntax for splitter scaffold input size threshold. Using the default value ${splitter_input_size}." >&2
fi
shift
;;
--splitter-saturation-centile) OPTARG=$2
re='^[0-9]+\.?[0-9]*$'
if [[ $OPTARG =~ $re ]] && [[ ${OPTARG%.*} -ge 0 ]] && ! [[ "$OPTARG" =~ ^0*(\.)?0*$ ]] && [[ $((${OPTARG%.*} + 1)) -le 100 ]]; then
echo " --splitter-saturation-centile flag was triggered, splitter saturation parameter set to ${OPTARG}%." >&1
splitter_saturation_centile=$OPTARG
else
echo ":( Wrong syntax for splitter saturation threshold. Using the default value ${splitter_saturation_centile}%." >&2
fi
shift
;;
--splitter-coarse-resolution) OPTARG=$2
re='^[0-9]+$' ## TODO: specify/generalize re matrix resolutions size
if [[ $OPTARG =~ $re ]]; then
echo " --splitter-coarse-resolution flag was triggered, splitter coarse matrix resolution set to $OPTARG." >&1
splitter_coarse_resolution=$OPTARG
else
echo ":( Wrong syntax for splitter coarse matrix resolution. Using the default value ${splitter_coarse_resolution}." >&2
fi
shift
;;
--splitter-coarse-region) OPTARG=$2
re='^[0-9]+$'
if [[ $OPTARG =~ $re ]]; then
echo " --splitter-coarse-region flag was triggered, splitter coarse resolution depletion region size set to $OPTARG." >&1
splitter_coarse_region=$OPTARG
else
echo ":( Wrong syntax for splitter coarse resolution depletion region size. Using the default value ${splitter_coarse_region}." >&2
fi
shift
;;
--splitter-coarse-stringency) OPTARG=$2
re='^[0-9]+$'
if [[ $OPTARG =~ $re ]] && [[ $OPTARG -gt 0 ]] && [[ $OPTARG -lt 100 ]]; then
echo " --splitter-coarse-stringency flag was triggered, splitter stringency parameter set to $OPTARG%." >&1
splitter_coarse_stringency=$OPTARG
else
echo ":( Wrong syntax for splitter stringency parameter. Using the default value ${splitter_coarse_stringency}%." >&2
fi
shift
;;
--splitter-fine-resolution) OPTARG=$2
re='^[0-9]+$' ## TODO: specify/generalize re matrix resolutions size
if [[ $OPTARG =~ $re ]]; then
echo " --splitter-fine-resolution flag was triggered, splitter fine matrix resolution set to $OPTARG." >&1
splitter_fine_resolution=$OPTARG
else
echo ":( Wrong syntax for splitter fine matrix resolution. Using the default value ${splitter_fine_resolution}." >&2
fi
shift
;;
# merger
--merger-search-band) OPTARG=$2
re='^[0-9]+$' ## TODO: specify/generalize re matrix resolutions size
if [[ $OPTARG =~ $re ]]; then
echo " --merger-search-band flag was triggered, merger will look for alternative haplotypes to input contigs and scaffolds within $OPTARG bases from their suggested location in the assembly." >&1
merger_search_band=$OPTARG
else
echo ":( Wrong syntax for alternative haplotype search region size. Exiting!" >&2
exit
fi
shift
;;
--merger-alignment-length) OPTARG=$2
re='^[0-9]+$'
if [[ $OPTARG =~ $re ]]; then
echo " --merger-alignment-length flag was triggered, overlap length threshold for sequences to be recognized as alternative haplotypes is set to $OPTARG." >&1
merger_alignment_length=$OPTARG
else
echo ":( Wrong syntax for alternative haplotype search alignment length. Exiting!" >&2
exit 1
fi
shift
;;
--merger-alignment-identity) OPTARG=$2
re='^[0-9]+$'
if [[ $OPTARG =~ $re ]]; then
echo " --merger-alignment-identity flag was triggered, lastz alignment identity threshold for sequences to be recognized as alternative haplotypes is set to $OPTARG." >&1
merger_alignment_identity=$OPTARG
else
echo ":( Wrong syntax for alternative haplotype search alignment identity. Exiting!" >&2
exit 1
fi
shift
;;
--merger-alignment-score) OPTARG=$2
re='^[0-9]+$'
if [[ $OPTARG =~ $re ]]; then
echo " --merger-alignment-score flag was triggered, lastz alignment score threshold for sequences to be recognized as alternative haplotypes is set to $OPTARG." >&1
merger_alignment_score=$OPTARG
else
echo ":( Wrong syntax for alternative haplotype search alignment score. Exiting!" >&2
exit
fi
shift
;;
--merger-lastz-options) OPTARG=$2
re='^\"--.+\"$'
if [[ $OPTARG =~ $re ]]; then
echo " --merger-lastz-options flag was triggered, overlap length threshold for sequences to be recognized as alternative haplotypes is set to $OPTARG." >&1
merger_lastz_options="$OPTARG"
else
echo ":( Wrong syntax for alternative haplotype search lastz option string. Exiting!" >&2
exit 1
fi
shift
;;
# TODO: merger, sealer, etc options
--) # End of all options
shift
break
;;
-?*)
echo ":| WARNING: Unknown option. Ignoring: ${1}" >&2
;;
*) # Default case: If no more options then break out of the loop.
break
esac
shift
done
## check parameters for compatibility
[[ ${editor_coarse_region} -le ${editor_coarse_resolution} ]] && echo >&2 ":( Requested depletion region size ${editor_coarse_region} and bin size ${editor_coarse_resolution} parameters for editor are incompatible. Run ${pipeline}/edit/run-mismatch-detector.sh -h for instructions. Exiting!" && exit 1
[[ ${editor_coarse_resolution} -le ${editor_fine_resolution} ]] && echo >&2 ":( Requested mismatch localization resolution ${editor_fine_resolution} and coarse search bin size ${editor_coarse_resolution} parameters for editor are incompatible. Run ${pipeline}/edit/run-mismatch-detector.sh -h for instructions. Exiting!" && exit 1
[[ ${polisher_coarse_region} -le ${polisher_coarse_resolution} ]] && echo >&2 ":( Requested depletion region size ${polisher_coarse_region} and bin size ${polisher_coarse_resolution} parameters for polisher are incompatible. Run ${pipeline}/edit/run-mismatch-detector.sh -h for instructions. Exiting!" && exit 1
[[ ${polisher_coarse_resolution} -le ${polisher_fine_resolution} ]] && echo >&2 ":( Requested mismatch localization resolution ${polisher_fine_resolution} and coarse search bin size ${polisher_coarse_resolution} parameters for polisher are incompatible. Run ${pipeline}/edit/run-mismatch-detector.sh -h for instructions. Exiting!" && exit 1
[[ ${splitter_coarse_region} -le ${splitter_coarse_resolution} ]] && echo >&2 ":( Requested depletion region size ${splitter_coarse_region} and bin size ${splitter_coarse_resolution} parameters for splitter are incompatible. Run ${pipeline}/edit/run-mismatch-detector.sh -h for instructions. Exiting!" && exit 1
[[ ${splitter_coarse_resolution} -le ${splitter_fine_resolution} ]] && echo >&2 ":( Requested mismatch localization resolution ${splitter_fine_resolution} and coarse search bin size ${splitter_coarse_resolution} parameters for splitter are incompatible. Run ${pipeline}/edit/run-mismatch-detector.sh -h for instructions. Exiting!" && exit 1
([[ $diploid == "false" ]] && [[ ! -z ${merger_band_width} || ! -z ${merger_alignment_score} || ! -z ${merger_alignment_identity} || ! -z ${merger_alignment_length} || ! -z ${merger_lastz_options} || $stage == "merge" ]]) && echo >&2 ":( Some options were requested that are not compatible with default haploid mode. Please include --mode diploid in your option list or remove flag calls associated with the merge block of the pipeline. Exiting!" && exit 1
## set merger default parameters if missing any
if [[ $diploid == "true" ]]; then
[[ -z ${merger_search_band} ]] && merger_search_band=${default_merger_search_band}
[[ -z ${merger_alignment_score} ]] && merger_alignment_score=${default_merger_alignment_score}
[[ -z ${merger_alignment_identity} ]] && merger_alignment_identity=${default_merger_alignment_identity}
[[ -z ${merger_alignment_length} ]] && merger_alignment_length=${default_merger_alignment_length}
[[ -z ${merger_lastz_options} ]] && merger_lastz_options=${default_merger_lastz_options}
fi
############### HANDLE EXTERNAL DEPENDENCIES ###############
## GNU Parallel Dependency
parallel="false"
if hash parallel 2>/dev/null; then
ver=`parallel --version | awk 'NR==1{print \$3}'`
[ $ver -ge 20150322 ] && parallel="true"
fi
[ $parallel == "false" ] && echo ":| WARNING: GNU Parallel version 20150322 or later not installed. We highly recommend to install it to increase performance. Starting pipeline without parallelization!" >&2
## LASTZ dependency
lastz="false"
if hash lastz 2>/dev/null; then
lastz="true"
fi
[[ $lastz == "false" && $diploid == "true" ]] && echo >&2 ":( LASTZ not installed or not in the path while diploid mode is triggered. The merge section of the pipeline will not run. Exiting!" && exit 1
############### HANDLE ARGUMENTS ###############
[ -z $1 ] || [ -z $2 ] && echo >&2 "Not sure how to parse your input: files not listed or not found at expected locations. Exiting!" && echo >&2 "$USAGE_short" && exit 1
[ ! -s $1 ] || [ ! -s $2 ] && echo >&2 "Not sure how to parse your input: files not listed or not found at expected locations. Exiting!" && echo >&2 "$USAGE_short" && exit 1
## TODO: check file format
if [ "$#" -ne 2 ]; then
echo >&2 "Illegal number of arguments. Please double check your input. Exiting!" && echo >&2 "$USAGE_short" && exit 1
fi
orig_fasta=$1
orig_mnd=$2
genomeid=$(basename "$orig_fasta" .fasta)
genomeid=$(basename "$genomeid" .fna)
genomeid=$(basename "$genomeid" .fa)
if [ ${orig_mnd} != ${genomeid}.mnd.txt ]; then
if [ -f ${genomeid}.mnd.txt ]; then
cmp --silent ${orig_mnd} ${genomeid}.mnd.txt || { echo >&2 ":( Please remove or rename file ${genomeid}.mnd.txt. Exiting!" && exit 1; }
fi
ln -sf ${orig_mnd} ${genomeid}.mnd.txt
fi
orig_mnd=${genomeid}.mnd.txt
if [ "$stage" != "scaffold" ] && [ "$stage" != "polish" ] && [ "$stage" != "split" ] && [ "$stage" != "seal" ] && [ "$stage" != "merge" ] && [ "$stage" != "finalize" ] && [ "$fast" != "true" ]; then
awk -f ${pipeline}/utils/generate-sorted-cprops-file.awk ${orig_fasta} > ${genomeid}.cprops
fi
orig_cprops=${genomeid}.cprops
[ ! -f ${orig_cprops} ] && echo >&2 ":( No cprops file found. Please rerun the pipeline from scratch. Exiting!" && exit 1
## calculate zoom
# TODO: move this to mismatch detector, pass only scale
totlength=`awk '{total+=$3}END{print total}' ${orig_cprops}`
scale=$(( 1 + $totlength / 2100000000 ))
if [ $scale -ne 1 ]; then
editor_coarse_resolution=$((editor_coarse_resolution/scale))
editor_coarse_region=$((editor_coarse_region/scale))
editor_fine_resolution=$((editor_fine_resolution/scale))
polisher_coarse_resolution=$((polisher_coarse_resolution/scale))
polisher_coarse_region=$((polisher_coarse_region/scale))
polisher_fine_resolution=$((polisher_fine_resolution/scale))
splitter_coarse_resolution=$((splitter_coarse_resolution/scale))
splitter_coarse_region=$((splitter_coarse_region/scale))
splitter_fine_resolution=$((splitter_fine_resolution/scale))
fi
############### ITERATIVE SCAFFOLDING/MISJOIN CORRECTION ###############
if [ "$stage" != "polish" ] && [ "$stage" != "split" ] && [ "$stage" != "seal" ] && [ "$stage" != "merge" ] && [ "$stage" != "finalize" ]; then
ROUND=0
if ! [ "$fast" == "true" ]; then
if [ -f ${genomeid}.*.cprops ] || [ -f ${genomeid}.mnd.*.txt ] ; then
echo >&2 ":( Please remove or rename files ${genomeid}.*.cprops ${genomeid}.mnd.*.txt. Exiting!" && exit
else
ln -sf ${orig_cprops} ${genomeid}.${ROUND}.cprops
ln -sf ${orig_mnd} ${genomeid}.mnd.${ROUND}.txt
fi
else
[ ! -f ${genomeid}.0.cprops ] || [ ! -f ${genomeid}.0.asm ] || [ ! -f ${genomeid}.0.hic ] || [ ! -f ${genomeid}.mnd.0.txt ] || [ ! -f ${genomeid}.0_asm.scaffold_track.txt ] || [ ! -f ${genomeid}.0_asm.superscaf_track.txt ] && echo >&2 ":( No early exit files are found. Please rerun the pipeline to include the round 0 assembly. Exiting!" && exit 1
fi
current_cprops=${genomeid}.${ROUND}.cprops
current_mnd=${genomeid}.mnd.${ROUND}.txt
echo "###############" >&1
echo "Starting iterating scaffolding with editing:" >&1
# run liger
while true; do
{
if !([ "$fast" == "true" ] && [ ${ROUND} -eq 0 ] ); then
# scaffold
echo "...starting round ${ROUND} of scaffolding:" >&1
bash ${pipeline}/scaffold/run-liger-scaffolder.sh -p ${parallel} -s ${input_size} -q ${mapq} ${current_cprops} ${current_mnd}
# build a hic map of the resulting assembly
echo "...visualizing round ${ROUND} results:" >&1
bash ${pipeline}/visualize/run-asm-visualizer.sh -p ${parallel} -q ${mapq} -i ${current_cprops} ${genomeid}.${ROUND}.asm ${current_mnd}
rm temp.${genomeid}.${ROUND}.asm_mnd.txt
# early exit on round zero if requested
[ "$early" == "true" ] && exit 0
fi
# break out of the scaffold-mismatch detection loop if the max number of steps is reached
[ ${ROUND} -eq ${MAX_ROUNDS} ] && break
# annotate near-diagonal mismatches in the map
echo "...detecting misjoins in round ${ROUND} assembly:" >&1
bash ${pipeline}/edit/run-mismatch-detector.sh -p ${parallel} -c ${editor_saturation_centile} -w ${editor_coarse_resolution} -d ${editor_coarse_region} -k ${editor_coarse_stringency} -n ${editor_fine_resolution} ${genomeid}.${ROUND}.hic
# annotate repeats by coverage analysis
bash ${pipeline}/edit/run-coverage-analyzer.sh -w ${editor_coarse_resolution} -t ${editor_repeat_coverage} ${genomeid}.${ROUND}.hic
# store intermediate mismatch stuff - not necessary
mv depletion_score_wide.wig depletion_score_wide.at.step.${ROUND}.wig
mv depletion_score_narrow.wig depletion_score_narrow.at.step.${ROUND}.wig
mv mismatch_wide.bed mismatch_wide.at.step.${ROUND}.bed
mv mismatch_narrow.bed mismatch_narrow.at.step.${ROUND}.bed
# store intermediate repeat stuff - not necessary
mv coverage_wide.wig coverage_wide.at.step.${ROUND}.wig
mv repeats_wide.bed repeats_wide.at.step.${ROUND}.bed
# consolidate bed annotations
cat mismatch_narrow.at.step.${ROUND}.bed repeats_wide.at.step.${ROUND}.bed | sort -k 2,2n | awk 'BEGIN{FS="\t"; OFS="\t"}NR==1{start=$2; end=$3; next}$2<=end{if($3>end){end=$3}; next}{print "assembly", start, end; start=$2; end=$3}END{print "assembly", start, end}' > suspect.at.step.${ROUND}.bed
# convert bed track into 2D annotations
resolved=$(awk 'NR==2{print $3}' ${genomeid}.${ROUND}_asm.superscaf_track.txt) # scaled coordinates
#!!PROBLEM!!
awk -v bin_size=${editor_fine_resolution} -f ${pipeline}/edit/overlay-edits.awk ${genomeid}.${ROUND}_asm.scaffold_track.txt suspect.at.step.${ROUND}.bed | awk -v r=${resolved} 'NR==1||$3<=r' > suspect_2D.at.step.${ROUND}.txt
# separate intra and inter-input scaffold mismatches
awk 'NR==1||$8=="debris"' suspect_2D.at.step.${ROUND}.txt > edits.for.step.$((ROUND+1)).txt
# optional
awk 'NR==1||$8=="mismatch"' suspect_2D.at.step.${ROUND}.txt > mismatches.at.step.$ROUND.txt
test=`wc -l < edits.for.step.$((ROUND+1)).txt`
[ $test -eq 1 ] && echo >&1 ":) No more input edits to be done. Moving to polishing!" && rm edits.for.step.$((ROUND+1)).txt && break
# move on to the next step
ROUND=$((ROUND+1))
[ -f ${genomeid}".edits.txt" ] && cp ${genomeid}".edits.txt" "archive."${genomeid}".edits.at.step."$((ROUND-1))".txt" # not necessary
# reconstruct current edits
awk 'BEGIN{OFS="\t"; print "chr1", "x1", "x2", "chr2", "y1", "y2", "color", "id", "X1", "X2", "Y1", "Y2"}$1~/:::debris/{print $1, 0, $3, $1, 0, $3, "0,0,0", "debris", 0, $3, 0, $3}' ${current_cprops} | awk -f ${pipeline}/lift/lift-input-annotations-to-asm-annotations.awk ${current_cprops} <(awk '{print $2}' ${current_cprops}) - | awk -f ${pipeline}/lift/lift-asm-annotations-to-input-annotations.awk ${orig_cprops} <(awk '{print $2}' ${orig_cprops}) - > h.old.edits.txt
# add new edits
bash ${pipeline}/lift/lift-edit-asm-annotations-to-original-input-annotations.sh ${orig_cprops} ${current_cprops} ${genomeid}.$((ROUND-1)).asm edits.for.step.${ROUND}.txt > h.new.edits.txt
awk 'NR==1' "h.new.edits.txt" > temp
{ awk 'NR>1' h.old.edits.txt ; awk 'NR>1' "h.new.edits.txt" ; } | sort -k 1,1 -k 2,2n >> temp
mv temp ${genomeid}".edits.txt"
rm h.old.edits.txt h.new.edits.txt
# apply edits
bash ${pipeline}/edit/apply-edits-prep-for-next-round.sh -p ${parallel} -r ${ROUND} ${genomeid}".edits.txt" ${orig_cprops} ${orig_mnd}
current_cprops=${genomeid}.${ROUND}.cprops
current_mnd=${genomeid}.mnd.${ROUND}.txt
}
done
ln -sf ${genomeid}.${ROUND}.cprops ${genomeid}.resolved.cprops
ln -sf ${genomeid}.${ROUND}.asm ${genomeid}.resolved.asm
ln -sf ${genomeid}.${ROUND}_asm.scaffold_track.txt ${genomeid}.resolved_asm.scaffold_track.txt
ln -sf ${genomeid}.${ROUND}_asm.superscaf_track.txt ${genomeid}.resolved_asm.superscaf_track.txt
ln -sf ${genomeid}.${ROUND}.hic ${genomeid}.resolved.hic
ln -sf ${genomeid}.mnd.${ROUND}.txt ${genomeid}.mnd.resolved.txt
fi
############### POLISHING ###############
if [ "$stage" != "split" ] && [ "$stage" != "seal" ] && [ "$stage" != "merge" ] && [ "$stage" != "finalize" ]; then
[ ! -f ${genomeid}.resolved.cprops ] || [ ! -f ${genomeid}.resolved.asm ] || [ ! -f ${genomeid}.resolved.hic ] || [ ! -f ${genomeid}.mnd.resolved.txt ] && echo >&2 ":( No resolved files are found. Please rerun the pipeline to include the scaffold segment. Exiting!" && exit 1
echo "###############" >&1
echo "Starting polish:" >&1
bash ${pipeline}/polish/run-asm-polisher.sh -p ${parallel} -j ${genomeid}.resolved.hic -a ${genomeid}.resolved_asm.scaffold_track.txt -b ${genomeid}.resolved_asm.superscaf_track.txt -s ${polisher_input_size} -c ${polisher_saturation_centile} -w ${polisher_coarse_resolution} -d ${polisher_coarse_region} -k ${polisher_coarse_stringency} -n ${polisher_fine_resolution} ${genomeid}.cprops ${orig_mnd} ${genomeid}.resolved.cprops ${genomeid}.resolved.asm
mv ${genomeid}.resolved.polish.cprops ${genomeid}.polished.cprops
mv ${genomeid}.resolved.polish.asm ${genomeid}.polished.asm
mv ${genomeid}.resolved.polish.edits_2D.txt ${genomeid}.polished.edits_2D.txt
mv ${genomeid}.resolved.polish.mismatches_2D.txt ${genomeid}.polished.mismatches_2D.txt
mv ${genomeid}.resolved.polish.suspect_2D.txt ${genomeid}.polished.suspect_2D.txt
mv ${genomeid}.resolved.polish.mismatch_narrow.bed ${genomeid}.polished.mismatch_narrow.bed
mv ${genomeid}.resolved.polish.depletion_score_narrow.wig ${genomeid}.polished.depletion_score_narrow.wig
mv ${genomeid}.resolved.polish.mismatch_wide.bed ${genomeid}.polished.mismatch_wide.bed
mv ${genomeid}.resolved.polish.depletion_score_wide.wig ${genomeid}.polished.depletion_score_wide.wig
mv ${genomeid}.resolved.polish.hic ${genomeid}.polished.hic
mv ${genomeid}.resolved.polish_asm.superscaf_track.txt ${genomeid}.polished_asm.superscaf_track.txt
mv ${genomeid}.resolved.polish_asm.scaffold_track.txt ${genomeid}.polished_asm.scaffold_track.txt
fi
############### SPLITTING ###############
if [ "$stage" != "seal" ] && [ "$stage" != "merge" ] && [ "$stage" != "finalize" ]; then
[ ! -s ${genomeid}.polished.cprops ] || [ ! -s ${genomeid}.polished.asm ] && echo >&2 ":( No resolved files are found. Please rerun the pipeline to include the scaffold/scaffold+polish segment. Exiting!" && exit 1
# [ $chrom_num -ne 1 ] && bash ${pipeline}/split/run-asm-splitter.sh -c ${chrom_num} -r ${diploid} ${genomeid}.polished.cprops ${genomeid}.polished.asm ${genomeid}.mnd.polished.txt || cp ${genomeid}.polished.cprops ${genomeid}.polished.split.asm
echo "###############" >&1
echo "Starting split:" >&1
bash ${pipeline}/split/run-asm-splitter.sh -p ${parallel} -j ${genomeid}.polished.hic -a ${genomeid}.polished_asm.scaffold_track.txt -b ${genomeid}.polished_asm.superscaf_track.txt -s ${splitter_input_size} -c ${splitter_saturation_centile} -w ${splitter_coarse_resolution} -d ${splitter_coarse_region} -k ${splitter_coarse_stringency} -n ${splitter_fine_resolution} ${genomeid}.cprops ${orig_mnd} ${genomeid}.polished.cprops ${genomeid}.polished.asm
mv ${genomeid}.polished.split.cprops ${genomeid}.split.cprops
mv ${genomeid}.polished.split.asm ${genomeid}.split.asm
mv ${genomeid}.polished.split.hic ${genomeid}.split.hic
mv ${genomeid}.polished.split_asm.superscaf_track.txt ${genomeid}.split_asm.superscaf_track.txt
mv ${genomeid}.polished.split_asm.scaffold_track.txt ${genomeid}.split_asm.scaffold_track.txt
fi
############### SEALING ###############
if [ "$stage" != "merge" ] && [ "$stage" != "finalize" ]; then
[ ! -s ${genomeid}.split.cprops ] || [ ! -s ${genomeid}.split.asm ] && echo >&2 ":( No split files are found. Please rerun the pipeline to include the split segment. Exiting!" && exit 1
echo "###############" >&1
echo "Starting sealing:" >&1
bash ${pipeline}/seal/seal-asm.sh -s ${input_size} ${genomeid}.split.cprops ${genomeid}.split.asm
mv ${genomeid}.split.sealed.cprops ${genomeid}.rawchrom.cprops
mv ${genomeid}.split.sealed.asm ${genomeid}.rawchrom.asm
# sort output by scaffold size if requested (except for unattempted which we keep in the end)
if [ "$sort_output" == "true" ]; then
awk -v input_size=${input_size} -f ${pipeline}/utils/sort-asm-by-size.awk ${genomeid}.rawchrom.cprops ${genomeid}.rawchrom.asm > ${genomeid}.rawchrom.asm.tmp && mv ${genomeid}.rawchrom.asm.tmp ${genomeid}.rawchrom.asm
fi
bash ${pipeline}/edit/edit-mnd-according-to-new-cprops.sh ${genomeid}.rawchrom.cprops ${orig_mnd} > ${genomeid}.rawchrom.mnd.txt
bash ${pipeline}/visualize/run-asm-visualizer.sh -p ${parallel} -q ${mapq} -i ${genomeid}.rawchrom.cprops ${genomeid}.rawchrom.asm ${genomeid}.rawchrom.mnd.txt
rm ${genomeid}.rawchrom.mnd.txt temp.${genomeid}.rawchrom.asm_mnd.txt
# prep for merging and finalizing
awk -f ${pipeline}/edit/edit-fasta-according-to-new-cprops.awk ${genomeid}.rawchrom.cprops ${orig_fasta} > ${genomeid}.rawchrom.fasta
if [ $diploid == "false" ]; then
ln -sf ${genomeid}.rawchrom.cprops ${genomeid}.final.cprops
ln -sf ${genomeid}.rawchrom.asm ${genomeid}.final.asm
ln -sf ${genomeid}.rawchrom.fasta ${genomeid}.final.fasta
ln -sf ${genomeid}.rawchrom.hic ${genomeid}.final.hic
ln -sf ${genomeid}.rawchrom.assembly ${genomeid}.final.assembly
fi
fi
############### MERGING ###############
if [ "$stage" != "finalize" ] && [ $diploid == "true" ]; then
[ ! -s ${genomeid}.rawchrom.assembly ] || [ ! -s ${genomeid}.rawchrom.fasta ] && echo >&2 ":( No raw chromosomal files were found. Please rerun he pipeline to include the seal segment" && exit 1
echo "###############" >&1
echo "Starting merge:" >&1
[[ -f ${genomeid}.rawchrom.cprops || -f ${genomeid}.rawchrom.asm ]] || awk -f ${pipeline}/utils/convert-assembly-to-cprop-and-asm.awk ${genomeid}.rawchrom.assembly
## TODO: split unsafe, redo via indexing as in haploid case
if [ -d faSplit ]; then
echo >&2 ":| WARNING: Using existing faSplit folder for merge. Totally fine if you know what you are doing. If unsure delete the faSplit folder and restart pipeline."
else
echo "...preparing fasta..." >&1
mkdir faSplit && cd faSplit && awk -f ${pipeline}/merge/split-fasta-by-cname.awk ../${genomeid}.rawchrom.cprops ../${genomeid}.rawchrom.fasta && cd ..
fi
bash ${pipeline}/merge/run-asm-merger.sh -b ${merger_search_band} -s ${merger_alignment_score} -i ${merger_alignment_identity} -l ${merger_alignment_length} -o "${merger_lastz_options}" ${genomeid}.rawchrom.cprops ${genomeid}.rawchrom.asm faSplit
cp ${genomeid}.rawchrom/${genomeid}.rawchrom_merged.asm ${genomeid}.final.asm
ln -sf ${genomeid}.rawchrom/merged_${genomeid}.rawchrom.fa ${genomeid}.final.fasta
awk -f ${pipeline}/utils/generate-cprops-file.awk ${genomeid}.final.fasta > ${genomeid}.final.cprops
cat <(awk '{$0=">"$0}1' ${genomeid}.final.cprops) ${genomeid}.final.asm > ${genomeid}.final.assembly
# cleanup
rm -r faSplit
fi
############### FINALIZING ###############
# finalize fasta
echo "###############" >&1
echo "Finilizing output:" >&1
bash ${pipeline}/finalize/finalize-output.sh -s ${input_size} -l ${genomeid} ${genomeid}.final.cprops ${genomeid}.final.asm ${genomeid}.final.fasta final
| Shell |
3D | theaidenlab/AGWG-merge | JBAT_demo_instructions.md | .md | 3,397 | 28 | # Demo Instructions for the Juicebox Assembly Tools Module
### Overview
Assembly Tools [[1]](https://www.biorxiv.org/content/early/2018/01/28/254797) is a new module in the Juicebox desktop application [[2]](https://www.cell.com/cell-systems/abstract/S2405-4712(15)00054-X) that extends the Juicebox interface for Hi-C data visualization to allow for visualization and interactive refinement of genome assemblies. When assembly errors are found, users can correct them, using a simple point-and-click interface, in a matter of seconds. Both the heatmap and the reference genome are updated in real-time to reflect these changes. The Juicebox Assembly Tools is available as part of the compiled standalone software Juicebox (versions 1.8.8 and higher). The code is open source.
### Code/software availability
The compiled software can be downloaded from https://github.com/theaidenlab/juicebox/wiki/Download. This demo was tested using Juicebox 1.9.0 (Juicebox with Assembly Tools for Mac).
The source code is shared at https://github.com/theaidenlab/Juicebox. The code includes a README listing System requirements, download guide and instruction for use.
For a quick start guide on the Assembly Tools Module, please see [this tutorial video](https://www.youtube.com/watch?v=Nj7RhQZHM18). The tutorial demonstrates how to use Juicebox Assembly Tools in the context of a demo problem (see also the Juicebox Assembly Tools preprint [[1]](https://www.biorxiv.org/content/early/2018/01/28/254797), figure 2 caption and discussion).
The demo data files can be found [here](https://www.dropbox.com/s/13cppe80692oee9/demo.zip?dl=0). The shared *.zip* archive contains two files: *figure_2.hic* and *figure_2.assembly*. The tutorial video covers opening the two demo files in the compiled version of the software, provides operation instructions and displays expected output. The run time for the demo problem is 8 minutes on a “normal” desktop computer. We recommend computers running a Macintosh Operating System.
Juicebox Assembly Tools requires Java. Download Java [here](https://java.com/en/download/).
For a detailed description of how Juicebox Assembly Tools was used in conjunction with Juicer [[3]](https://www.cell.com/fulltext/S2405-4712(16)30219-8) and 3D-DNA [[4]](http://science.sciencemag.org/content/356/6333/92) to process Hi-C data for AaegL5 genome assembly see [README.md](https://github.com/theaidenlab/AGWG-merge/blob/master/README.md) in this GitHub repository (https://github.com/theaidenlab/AGWG-merge).
### Additional information
The software is distributed under MIT License.
See our ["Genome Assembly Cookbook"](http://aidenlab.org/assembly/manual_180322.pdf) for more information about Juicebox Assembly Tools.
### References
1. Dudchenko, O. et al. The Juicebox Assembly Tools module facilitates *de novo* assembly of mammalian genomes with chromosome-length scaffolds for under $1000. *bioRxiv* 254797 (2018). doi:10.1101/254797.
2. Durand, N. C. et al. Juicebox Provides a Visualization System for Hi-C Contact Maps with Unlimited Zoom. *Cell Syst.* 3, 99–101 (2016).
3. Durand, N. C. et al. Juicer Provides a One-Click System for Analyzing Loop-Resolution Hi-C Experiments. *Cell Syst.* 3, 95–98 (2016).
4. Dudchenko, O. et al. *De novo* assembly of the *Aedes aegypti* genome using Hi-C yields chromosome-length scaffolds. *Science* 356, 92–95 (2017).
| Markdown |
3D | theaidenlab/AGWG-merge | run-asm-pipeline-post-review.sh | .sh | 9,382 | 226 | #!/bin/bash
##########
#The MIT License (MIT)
#
# Copyright (c) 2018 Aiden Lab
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
##########
#
# 3D DNA de novo genome assembly pipeline: 180114 version.
#
echo `readlink -f $0`" "$*
USAGE_short="
*****************************************************
3D de novo assembly: version 180114
USAGE: ./run-asm-pipeline-post-review.sh [options] -r <review.assembly> <path_to_input_fasta> <path_to_input_mnd>
DESCRIPTION:
This is a script to finalize assemblies (represented in input by draft fasta and deduplicated list of alignments of Hi-C reads to this fasta as produced by the Juicer pipeline) into chromosome-length fasta sequences, after review in Juicebox Assembly Tools Module (represented by review.assembly file). The script will produce an output fasta file, a Hi-C map of the final assembly, and a few supplementary annotation files to help review the result in Juicebox.
ARGUMENTS:
path_to_input_fasta Specify file path to draft assembly fasta file.
path_to_input_mnd Specify path to deduplicated list of alignments of Hi-C reads to the draft assembly fasta as produced by the Juicer pipeline: the merged_nodups file (mnd).
OPTIONS:
-r|--review Path to review \".assembly\" file.
--sort-output Sorts output scaffolds by size (prior to adding gaps), default is false.
-i|--input input_size Specifies threshold input contig/scaffold size (default is 15000). Contigs/scaffolds smaller than input_size are going to be ignored. Only matters if running including seal. Should be the same as used for running the original script.
-s|--stage stage Assembly steps to run on top of the reviewed assembly, can be seal and finalize. Default is finalize.
-q|--mapq mapq Mapq threshold for final map visualization, default is 1.
-h|--help Shows this help. Type --help for a full set of options.
*****************************************************
"
pipeline=`cd "$( dirname $0)" && pwd`
## default parameter setup
input_size=15000 # contigs/scaffolds smaller than input_size are ignored
mapq=1 # read mapping quality threshold for Hi-C scaffolder
stage="finalize" # by default run only final pipeline
sort_output=false
############### HANDLE OPTIONS ###############
while :; do
case $1 in
-h|--help)
echo "$USAGE_short" >&1
exit 0
;;
-r|--review) OPTARG=$2
if [[ -f $OPTARG ]]; then
echo " -r|--review flag was triggered, treating file $OPTARG as a JB4A review file for draft fasta in arguments." >&1
review_assembly=$OPTARG
else
echo ":( File not found in the suggested review assembly file path. Exiting!" >&2
exit 1
fi
shift
;;
-i|--input) OPTARG=$2
re='^[0-9]+$'
if [[ $OPTARG =~ $re ]]; then
echo " -i|--input flag was triggered, filtering draft contigs/scaffolds smaller than $OPTARG." >&1
input_size=$OPTARG
else
echo ":( Wrong syntax for input size threshold. Using the default value ${input_size}." >&2
fi
shift
;;
-s|--stage) OPTARG=$2
if [ "$OPTARG" == "seal" ] || [ "$OPTARG" == "finalize" ]; then
echo " -s|--stage flag was triggered, fast-forwarding to \"$OPTARG\" pipeline section." >&1
stage=$OPTARG
else
echo ":( Wrong syntax for pipeline stage. Exiting!" >&2
fi
shift
;;
-q|--mapq) OPTARG=$2 ##TODO: check that propagates consistently, not tested sufficiently
re='^[0-9]+$'
if [[ $OPTARG =~ $re ]]; then
echo " -q|--mapq flag was triggered, scaffolding using reads with at least $OPTARG mapping quality." >&1
mapq=$OPTARG
else
echo ":( Wrong syntax for mapping quality. Using the default value ${mapq}." >&2
fi
shift
;;
## long menu options
--sort-output)
echo " --sort-output was triggered, will sort output scaffolds by size." >&1
sort_output=true
;;
--) # End of all options
shift
break
;;
-?*)
echo ":| WARNING: Unknown option. Ignoring: ${1}" >&2
;;
*) # Default case: If no more options then break out of the loop.
break
esac
shift
done
############### HANDLE EXTERNAL DEPENDENCIES ###############
## GNU Parallel Dependency
parallel="false"
if hash parallel 2>/dev/null; then
ver=`parallel --version | awk 'NR==1{print \$3}'`
[ $ver -ge 20150322 ] && parallel="true"
fi
[ $parallel == "false" ] && echo ":| WARNING: GNU Parallel version 20150322 or later not installed. We highly recommend to install it to increase performance. Starting pipeline without parallelization!" >&2
############### HANDLE ARGUMENTS ###############
[ -z ${review_assembly} ] || [ -z $1 ] || [ -z $2 ] && echo >&2 "Not sure how to parse your input: files not listed or not found at expected locations. Exiting!" && echo >&2 "$USAGE_short" && exit 1
[ ! -s ${review_assembly} ] || [ ! -s $1 ] || [ ! -s $2 ] && echo >&2 "Not sure how to parse your input: files not listed or not found at expected locations. Exiting!" && echo >&2 "$USAGE_short" && exit 1
## TODO: check file format
if [ "$#" -ne 2 ]; then
echo >&2 "Illegal number of arguments. Please double check your input. Exiting!" && echo >&2 "$USAGE_short" && exit 1
fi
orig_fasta=$1
orig_mnd=$2
genomeid=$(basename "$orig_fasta" .fasta)
genomeid=$(basename "$genomeid" .fna)
genomeid=$(basename "$genomeid" .fa)
if [ ${orig_mnd} != ${genomeid}.mnd.txt ]; then
if [ -f ${genomeid}.mnd.txt ]; then
cmp --silent ${orig_mnd} ${genomeid}.mnd.txt || { echo >&2 ":( Please remove or rename file ${genomeid}.mnd.txt. Exiting!" && exit 1; }
fi
ln -sf ${orig_mnd} ${genomeid}.mnd.txt
fi
orig_mnd=${genomeid}.mnd.txt
## TODO: add some checks to make more user-proof against swapping
[ -f ${genomeid}.cprops ] || awk -f ${pipeline}/utils/generate-sorted-cprops-file.awk ${orig_fasta} > ${genomeid}.cprops
orig_cprops=${genomeid}.cprops
[ ! -f ${orig_cprops} ] && echo >&2 ":( No cprops file found. Please rerun the pipeline from scratch. Exiting!" && exit 1
## calculate zoom
# TODO: move this to mismatch detector, pass only scale
totlength=`awk '{total+=$3}END{print total}' ${orig_cprops}`
scale=$(( 1 + $totlength / 2100000000 ))
if [ $scale -ne 1 ]; then
editor_coarse_resolution=$((editor_coarse_resolution/scale))
editor_coarse_region=$((editor_coarse_region/scale))
editor_fine_resolution=$((editor_fine_resolution/scale))
polisher_coarse_resolution=$((polisher_coarse_resolution/scale))
polisher_coarse_region=$((polisher_coarse_region/scale))
polisher_fine_resolution=$((polisher_fine_resolution/scale))
splitter_coarse_resolution=$((splitter_coarse_resolution/scale))
splitter_coarse_region=$((splitter_coarse_region/scale))
splitter_fine_resolution=$((splitter_fine_resolution/scale))
fi
if [ "$stage" != "finalize" ]; then
############### SEALING ###############
awk -v cprops=${genomeid}.split.cprops -v asm=${genomeid}.split.asm '$1~/^>/{$1=substr($1,2); print > cprops;next}{print > asm}' ${review_assembly}
if [ "$sort_output" == "true" ]; then
bash ${pipeline}/run-asm-pipeline.sh -s seal -i ${input_size} --sort-output ${orig_fasta} ${orig_mnd}
else
bash ${pipeline}/run-asm-pipeline.sh -s seal -i ${input_size} ${orig_fasta} ${orig_mnd}
fi
else
############### FINALIZING ###############
echo "###############" >&1
echo "Finilizing output:" >&1
awk -v cprops=${genomeid}.final.cprops -v asm=${genomeid}.final.asm '$1~/^>/{$1=substr($1,2); print > cprops;next}{print > asm}' ${review_assembly}
if [ "$sort_output" == "true" ]; then
awk -v input_size=${input_size} -f ${pipeline}/utils/sort-asm-by-size.awk ${genomeid}.final.cprops ${genomeid}.final.asm > ${genomeid}.final.asm.tmp && mv ${genomeid}.final.asm.tmp ${genomeid}.final.asm
fi
# build final map
bash ${pipeline}/edit/edit-mnd-according-to-new-cprops.sh ${genomeid}.final.cprops ${orig_mnd} > ${genomeid}.final.mnd.txt
bash ${pipeline}/visualize/run-asm-visualizer.sh -p ${parallel} -q ${mapq} -i ${genomeid}.final.cprops ${genomeid}.final.asm ${genomeid}.final.mnd.txt
rm ${genomeid}.final.mnd.txt temp.${genomeid}.final.asm_mnd.txt
# build final fasta
awk -f ${pipeline}/edit/edit-fasta-according-to-new-cprops.awk ${genomeid}.final.cprops ${orig_fasta} > ${genomeid}.final.fasta
bash ${pipeline}/finalize/finalize-output.sh -s ${input_size} -l ${genomeid} ${genomeid}.final.cprops ${genomeid}.final.asm ${genomeid}.final.fasta final
fi
| Shell |
3D | theaidenlab/AGWG-merge | lift/lift-edit-asm-annotations-to-original-input-annotations.sh | .sh | 3,181 | 72 | #!/bin/bash
#### Description: Does a series of liftovers to map annotations from an edited assembly to original scaffold/contig input.
#### Usage: lift-edit-asm-annotations-to-original-input-annotations.sh <path_to_original_cprops> <path_to_edited_cprops> <path_to_edited_asm> <path_to_edited_annotation_file>
#### Input: Current and original cprops, current asm file.
#### Optional input: list of previous edits to original contigs/scaffolds.
#### Output: Stdout of original contig/scaffold edits as a 2D annotation file (0-based, 12-column).
#### Written by: Olga Dudchenko - olga.dudchenko@bcm.edu. Version date 01/11/2017.
USAGE="
*****************************************************
Translates list of assembly mismatch positions into mismatches in original contigs/scaffolds (and merges them with previous annotations, optional).
./lift-edit-asm-annotations-to-original-input-annotations.sh [-s scale] <path_to_original_cprops> <path_to_edited_cprops> <path_to_edited_asm> <path_to_edited_annotation_file>
ARGUMENTS:
path_to_original_cprops Path to original input cprops file.
path_to_edited_cprops Path to cprops file of the assembly made from edited input
path_to_edited_asm Path to asm file of the assembly made from edited input
path_to_edited_annotation_file Path to a 2D annotation file to be lifted from current edited assembly to original input scaffolds/contigs
OPTIONS:
-h Shows this help
#-s scale Scaling coefficient to stretch or squeeze the annotations [currently hard-set to default 1].
*****************************************************
"
## SET DEFAULTS
scale=1
## HANDLE OPTIONS
while getopts "s:h" opt; do
case $opt in
h) echo "$USAGE"
exit 0
;;
s) re='^[0-9]+$'
if [[ $OPTARG =~ $re ]] && [[ $OPTARG -gt 0 ]]; then
echo "...-s flag was triggered. Will scale the output according to $OPTARG scaling coefficient" >&2
scale="${suffix}${OPTARG}"
else
echo ":( Wrong syntax for scaling coefficient. Continuing without scaling" >&2
fi
;;
*) echo ":( Illegal options. Exiting."
echo "$USAGE"
exit 1
;;
esac
done
shift $(( OPTIND-1 ))
## HANDLE ARGUMENTS, TODO: check formats
orig_cprops=$1
edit_cprops=$2
edit_asm=$3
annotations=$4
[ $# -eq 4 ] && [ -s ${orig_cprops} ] && [ -s ${edit_cprops} ] && [ -s ${edit_asm} ] && [ -s ${annotations} ] || { echo >&2 ":( Not sure how to parse your input or input files not found at intended locations. Exiting!" && echo "$USAGE" >&2 && exit 1; }
## HANDLE DEPENDENCIES
path_to_scripts=`cd "$( dirname $0)" && pwd`
path_to_lift=$(dirname ${path_to_scripts})"/lift"
lift_asm_annotations_script=${path_to_lift}"/lift-asm-annotations-to-input-annotations.awk"
lift_input_annotations_script=${path_to_lift}"/lift-input-annotations-to-asm-annotations.awk"
## MAIN
head -n 1 ${annotations} && awk '{print $2}' ${orig_cprops} | awk -v scale=${scale} -f ${lift_asm_annotations_script} ${orig_cprops} - <(awk '{print $2}' ${edit_cprops} | awk -v scale=${scale} -f ${lift_input_annotations_script} ${edit_cprops} - <(awk -f ${lift_asm_annotations_script} ${edit_cprops} ${edit_asm} ${annotations})) | tail -n +2 | sort -k 1,1 -k 2,2n
| Shell |
3D | theaidenlab/AGWG-merge | lift/lift-input-mnd-to-asm-mnd.sh | .sh | 1,155 | 40 | #!/bin/bash
## Handle options
scale=1
while getopts "s:" opt; do
case $opt in
s) re='^[0-9]+$'
if [[ $OPTARG =~ $re ]]; then
scale=$OPTARG
else
echo ":( Wrong syntax for scale quality. Using the default value ${scale}" >&2
fi
;;
*) echo "$USAGE" >&2
exit 1
;;
esac
done
shift $(( OPTIND-1 ))
cprops=$1
asm=$2
mnd=$3
pipeline=`cd "$( dirname $0)" && cd .. && pwd`
lift_mnd_script=${pipeline}/lift/lift-input-mnd-to-asm-mnd.awk
## GNU Parallel Dependency
parallel="false"
if hash parallel 2>/dev/null; then
ver=`parallel --version | awk 'NR==1{print \$3}'`
[ $ver -ge 20150322 ] && parallel="true"
fi
[ $parallel == "false" ] && echo >&2 ":| WARNING: GNU Parallel version 20150322 or later not installed. We highly recommend to install it to increase performance. Starting pipeline without parallelization!"
[ $parallel == "true" ] && parallel -a ${mnd} --pipepart --will-cite --jobs 80% --block 1G "awk -v scale=${scale} -f ${lift_mnd_script} ${cprops} ${asm} - "
[ $parallel == "false" ] && awk -v scale=${scale} -f ${lift_mnd_script} ${cprops} ${asm} ${mnd}
| Shell |
3D | theaidenlab/AGWG-merge | visualize/juicebox_tools.sh | .sh | 84 | 4 | #!/bin/sh
set -e
java -Xms49152m -Xmx49152m -jar `dirname $0`/juicebox_tools.jar $*
| Shell |
3D | theaidenlab/AGWG-merge | visualize/run-assembly-visualizer.sh | .sh | 9,191 | 205 | #!/bin/bash
#### Description: Script to visualize assemblies: a wrapper around remap-input-mnd-to-asm-mnd.awk and Juicebox pre.
#### Usage: bash ./run-assembly-visualizer.sh [options] <path_to_input_assembly_file> <path_to_input-mnd-file>.
#### Input: assembly file, mnd file.
#### Output: hic file, scaffold and super-scaffold 2D annotation files (for juicebox.js).
#### Parameters: zoom (default is >=1 calculated to fit the assembly chromosome); mapq threshold (0, 1, 30, all; default is 1 for mapq>=1).
#### Options: -l <path_to_gap_bed>, -p <true/false> to use parallelization for speed-up.
#### Dependencies: Java; GNU Parallel if available.
#### NOTE: if .assembly involves editing sequences, i.e. splitting them, the original mnd-file should be edited using scripts in the edit section of the pipeline.
#### Written by: Olga Dudchenko, version date 02/12/2018
USAGE="
*****************************************************
Visualizing draft genomes in juicebox: 18 July 2016
USAGE: ./run-assembly-visualizer.sh [options] <path_to_input_assembly_file> <path_to_input_mnd_file>
DESCRIPTION:
This is a script to visualize fasta sequences (represented by the .assembly file) with pairwise contact data encoded in the Juicer merged_nodups.txt (mnd) file that describes alignment of Hi-C data to the input fasta scaffolds. The script allows for changes in order and/or orientation of sequences in the input fasta as described by the .assembly file. The script will produce a .hic file for viewing in Juicebox in a format compatible with Assembly Tools as well as scaffold and superscaffold annotation files. Metadata can be attached to the map by passing -i and -g flags with paths to stats and graph files.
ARGUMENTS:
path_to_input_assembly_file Specify path to assembly file generated from the input fasta directly or by the assembler such as 3D-DNA.
path_to_input_mnd_file Specify path to mnd file describing pairwise Hi-C alignments to the input fasta sequences.
OPTIONS:
-q mapq Build map for a specific mapq threshold (default is 1).
-p true/false Use GNU Parallel to speed up computation (default is true).
-z zoom Build map with hardcoded zoom level. By default this is calculated based on the cprops file and applied only to genomes >= 2.1 Gb.
**unprompted**
-m path_to_asm_mnd Path to mnd already lifted from input to assembly chromosome: used to skip the remapping step.
-n Skip normalization.
-r Build for specific resolutions (default is -r 2500000,1000000,500000,250000,100000,50000,25000,10000,5000,1000)
-c Clean up when done (default: no cleanup.)
-i Ignore mapq suffix.
-h Shows this help
*****************************************************
"
## Defaults
mapq=1
use_parallel=true
res_string="2500000,1000000,500000,250000,100000,50000,25000,10000,5000,1000"
skip_norm=false
clean_up=false
ignore_mapq_suffix=false;
add_metadata=false;
## HANDLE OPTIONS
while getopts "q:p:z:m:l:r:incah" opt; do
case $opt in
h) echo "$USAGE"
exit 0
;;
q) re='^[0-9]+$'
if [[ $OPTARG =~ $re ]]; then
echo ":) -q flag was triggered, starting calculations for $OPTARG threshold mapping quality" >&1
mapq=$OPTARG
else
echo ":( Wrong syntax for mapping quality. Using the default value MAPQ=${MAPQ[0]}" >&2
fi
;;
p) if [ $OPTARG == true ] || [ $OPTARG == false ]; then
echo ":) -p flag was triggered. Running with GNU Parallel support parameter set to $OPTARG." >&1
use_parallel=$OPTARG
else
echo ":( Unrecognized value for -p flag. Running with default parameters (-p true)." >&2
fi
;;
z) re='^[0-9]+$'
if [[ $OPTARG =~ $re ]] && [[ $OPTARG -gt 0 ]]; then
echo ":) -z flag was triggered, starting calculations with specified scaling" >&1
scale=$OPTARG
else
echo ":( Wrong syntax for scaling coefficient. Deriving the default scaling to fit the genome into 2.1Gb" >&2
fi
;;
m) if [ -s $OPTARG ]; then
echo ":) Skipping remap step and using $OPTARG as premapped input" >&1
remapped_mnd=$OPTARG
else
echo ":( Tentative remapped file not found. Building one as part of the workflow"
fi
;;
i) ignore_mapq_suffix=true;
echo ":) -i flag was triggered, building mapq without" >&1
;;
n) skip_norm=true
echo ":) -n flag was triggered, building maps without normalization" >&1
;;
c) clean_up=true
echo ":) -c flag was triggered, will remove temporary files after completion" >&1
;;
r) re='^[0-9]*(\,?[0-9]*)*$'
if [[ $OPTARG =~ $re ]]; then
echo ":) -r flag was triggered, starting calculations for resolution list: ${OPTARG}" >&1
res_string=$OPTARG
else
echo ":( Wrong syntax for resolution flag. Using the default value pct=5" >&2
fi
;;
a) add_metadata=true
echo ":) -a flag was triggered, will look for juicer metadata files and add if present" >&1
;;
*) echo ":( Illegal options. Exiting."
echo "$USAGE"
exit 1
;;
esac
done
shift $(( OPTIND-1 ))
IFS=',' read -r -a res <<< "$res_string"
## HANDLE ARGUMENTS, TODO: check formats
assembly=$1
mnd=$2
[ $# -eq 2 ] && [ -s ${assembly} ] && [ -s ${mnd} ] || { echo ":( Not sure how to parse your input or input files not found at intended locations. Exiting!" && echo "$USAGE" && exit 1 ; }
## CHECK DEPENDENCIES
type java >/dev/null 2>&1 || { echo >&2 ":( Java is not available, please install/add to path Java to run Juicer and Juicebox. Exiting!"; exit 1; }
if [ $use_parallel == true ]; then
type parallel >/dev/null 2>&1 || { echo >&2 ":( GNU Parallel support is set to true (default) but GNU Parallel is not in the path. Please install GNU Parallel or set -p option to false. Exiting!"; exit 1; }
fi
path_to_scripts=`cd "$( dirname $0)" && pwd`
path_to_lift=$(dirname ${path_to_scripts})"/lift"
juicebox=${path_to_scripts}/"juicebox_tools.sh"
lift_input_mnd_script=${path_to_lift}/lift-input-mnd-to-asm-mnd.awk
lift_input_annotations_script=${path_to_lift}/lift-input-annotations-to-asm-annotations.awk
if [ ! -f $juicebox ] || [ ! -f $lift_input_mnd_script ] || [ ! -f $lift_input_annotations_script ] ; then
echo ":( Relevant dependency scripts not found. Exiting!" && exit 1
fi
## Handle default zoom if not specified
if [ -z $scale ]; then
# calculate necessary zoom
totlength=`awk '$0~/^>/{total+=$3}END{print total}' ${assembly}`
scale=$(( 1 + $totlength / 2100000000 ))
fi
genomeid="`basename $assembly .assembly`"
if [ -z ${remapped_mnd} ]; then
## Remap merged_nodups
echo "...Remapping contact data from the original contig set to assembly"
if [ $use_parallel == true ]; then
cmd="parallel --will-cite -a ${mnd} --pipepart -j 80% --block 1G \"awk -v scale=${scale} -f ${lift_input_mnd_script} <(awk '\\\$0~/^>/{\\\$1=substr(\\\$1,2); print}' ${assembly}) <(awk '\\\$0!~/^>/' ${assembly}) - \" > temp.${genomeid}.asm_mnd.txt"
else
cmd="awk -v scale=${scale} -f ${lift_input_mnd_script} <(awk '\$0~/^>/{\$1=substr(\$1,2); print}' ${assembly}) <(awk '\$0!~/^>/' ${assembly}) ${mnd} > temp.${genomeid}.asm_mnd.txt"
fi
eval ${cmd}
remapped_mnd="temp."${genomeid}".asm_mnd.txt"
fi
## Make tracks
echo "...Building track files"
awk 'BEGIN{OFS="\t"; print "chr1", "sx1", "sx2", "chr2", "sy1", "sy2", "color", "Scaffold_ID", "x1", "x2", "y1", "y2"}{print $1, 0, $3, $1, 0, $3, "0,255,0", "+"$1" (+"$2")", 0, $3, 0, $3}' <(awk '$0~/^>/{$1=substr($1,2); print}' ${assembly}) | awk -v scale=${scale} -f ${lift_input_annotations_script} <(awk '$0~/^>/{$1=substr($1,2); print}' ${assembly}) <(awk '$0!~/^>/' ${assembly}) - > ${genomeid}_asm.scaffold_track.txt
awk -v scale=${scale} 'BEGIN{OFS="\t"; print "chr1", "sx1", "sx2", "chr2", "sy1", "sy2", "color", "Superscaffold_ID", "x1", "x2", "y1", "y2"; pos+=0}(FILENAME==ARGV[1]){clength[$2]=$3; next}{gsub("-",""); n=split($0,a); c=0; for (i=1; i<=n; i++) {c+=clength[a[i]]}; print "assembly", int(pos/scale), int((pos+c)/scale), "assembly", int(pos/scale), int((pos+c)/scale), "0,0,255", FNR, pos, pos+c, pos, pos+c; pos+=c}' <(awk '$0~/^>/{$1=substr($1,2); print}' ${assembly}) <(awk '$0!~/^>/' ${assembly}) > ${genomeid}_asm.superscaf_track.txt
## Build .hic files
echo "...Building the hic file"
[ $mapq -eq 1 ] && ignore_q_suffix=true ## lab convention to keep mapq 1 wo suffix, otherwise add suffix in case building multiple maps
[ ${ignore_q_suffix} == "true" ] && mapqsuf="" || mapqsuf="_"${mapq}
rLen=${#res[@]}
add_options=$(( res[0]/scale ))
for (( i=1; i<$rLen; i++ ))
do
add_options=$add_options","$(( res[$i]/scale ))
done
#[ ${scale} -ne 1 ] && add_options=${res[0]}","${add_options}
add_options="-r "${add_options}
[ "$skip_norm" == "true" ] && add_options=${add_options}" -n"
if [ "$add_metadata" == "true" ]; then
[ -f inter${mapqsuf}.txt ] && add_options=${add_options}" -s inter${mapqsuf}.txt"
[ -f inter${mapqsuf}_hists.m ] && add_options=${add_options}" -g inter${mapqsuf}_hists.m"
fi
bash ${juicebox} pre -q ${mapq} ${add_options} ${remapped_mnd} ${genomeid}${mapqsuf}.hic <(echo "assembly "$((totlength / scale)))
## Cleanup
[ "$clean_up" == "true" ] && rm ${remapped_mnd}
#
#
| Shell |
3D | theaidenlab/AGWG-merge | visualize/run-asm-visualizer.sh | .sh | 8,648 | 213 | #!/bin/bash
#### Description: Script to visualize draft assemblies: a wrapper around remap-contig-mnd-to-asm-mnd.awk and Juicebox pre. Also dumps 2D annotation files.
#### Usage: bash ./run-asm-visualizer.sh [options] path_to_cprops path_to_asm <contig-mnd-file>.
#### Input: cprops file, asm file, mnd file.
#### Output: hic file, scaffold and super-scaffold 2D annotation files. Optional: contig annotation file?
#### Parameters: zoom (default is >=1 calculated to fit the assembly chromosome); mapq threshold (0, 1, 30, all; default is 1 for mapq>=1).
#### Options: -l <path_to_gap_bed>, -p <true/false> to use parallelization for speed-up.
#### Dependencies: Java; GNU Parallel if available.
#### Written by: Olga Dudchenko & Sanjit Batra, version date 07/18/2016
USAGE="
*****************************************************
Visualizing draft genomes in juicebox: 18 July 2016
USAGE: ./run-asm-visualizer.sh [options] <path_to_cprops> <path_to_asm> <path_to_contig_mnd>
DESCRIPTION:
This is a script to visualize draft assemblies (represented in input by their cprops and asm files) from pairwise contact data represented by Juicer merged_nodups.txt file. The script will produce hic files for viewing in Juicebox as well as scaffold annotation files. Metadata can be attached to the map by passing -i and -g flags with paths to stats and graph files.
ARGUMENTS:
path_to_cprops Specify cprops file path.
path_to_asm Specify asm file.
path_to_contig_mnd Specify path to mnd file describing pairwise Hi-C contacts between assembly contigs.
OPTIONS:
-q mapq Build map for a specific mapq threshold (default is 1).
-p true/false Use GNU Parallel to speed up computation (default is true).
-z zoom Build map with hardcoded zoom level. By default this is calculated based on the cprops file and applied only to genomes >= 2.1 Gb.
**unprompted**
#-l gap_file Path to gap bed file - necessary to build contig annotation track.
-m path_to_asm_mnd Path to mnd already lifted from input to assembly chromosome: used to skip the remapping step.
-n Skip normalization.
-r Build for specific resolutions (default is -r 2500000,1000000,500000,250000,100000,50000,25000,10000,5000,1000)
-c Clean up when done (default: no cleanup.)
-i Ignore mapq suffix.
-h Shows this help
*****************************************************
"
## Defaults
mapq=1
use_parallel=true
res_string="2500000,1000000,500000,250000,100000,50000,25000,10000,5000,1000"
skip_norm=false
clean_up=false
ignore_mapq_suffix=false;
add_metadata=false;
## HANDLE OPTIONS
while getopts "q:p:z:m:l:r:incah" opt; do
case $opt in
h) echo "$USAGE"
exit 0
;;
q) re='^[0-9]+$'
if [[ $OPTARG =~ $re ]]; then
echo ":) -q flag was triggered, starting calculations for $OPTARG threshold mapping quality" >&1
mapq=$OPTARG
else
echo ":( Wrong syntax for mapping quality. Using the default value MAPQ=${MAPQ[0]}" >&2
fi
;;
p) if [ $OPTARG == true ] || [ $OPTARG == false ]; then
echo ":) -p flag was triggered. Running with GNU Parallel support parameter set to $OPTARG." >&1
use_parallel=$OPTARG
else
echo ":( Unrecognized value for -p flag. Running with default parameters (-p true)." >&2
fi
;;
z) re='^[0-9]+$'
if [[ $OPTARG =~ $re ]] && [[ $OPTARG -gt 0 ]]; then
echo ":) -z flag was triggered, starting calculations with specified scaling" >&1
scale=$OPTARG
else
echo ":( Wrong syntax for scaling coefficient. Deriving the default scaling to fit the genome into 2.1Gb" >&2
fi
;;
m) if [ -s $OPTARG ]; then
echo ":) Skipping remap step and using $OPTARG as premapped input" >&1
remapped_mnd=$OPTARG
else
echo ":( Tentative remapped file not found. Building one as part of the workflow"
fi
;;
# l) gap_file=$OPTARG
# ;;
i) ignore_mapq_suffix=true;
echo ":) -i flag was triggered, building mapq without" >&1
;;
n) skip_norm=true
echo ":) -n flag was triggered, building maps without normalization" >&1
;;
c) clean_up=true
echo ":) -c flag was triggered, will remove temporary files after completion" >&1
;;
r) re='^[0-9]*(\,?[0-9]*)*$'
if [[ $OPTARG =~ $re ]]; then
echo ":) -r flag was triggered, starting calculations for resolution list: ${OPTARG}" >&1
res_string=$OPTARG
else
echo ":( Wrong syntax for resolution flag. Using the default value pct=5" >&2
fi
;;
a) add_metadata=true
echo ":) -a flag was triggered, will look for juicer metadata files and add if present" >&1
;;
*) echo ":( Illegal options. Exiting."
echo "$USAGE"
exit 1
;;
esac
done
shift $(( OPTIND-1 ))
IFS=',' read -r -a res <<< "$res_string"
## HANDLE ARGUMENTS, TODO: check formats
cprops=$1
asm=$2
mnd=$3
[ $# -eq 3 ] && [ -s ${cprops} ] && [ -s ${asm} ] && [ -s ${mnd} ] || { echo ":( Not sure how to parse your input or input files not found at intended locations. Exiting!" && echo "$USAGE" && exit 1 ; }
## CHECK DEPENDENCIES
type java >/dev/null 2>&1 || { echo >&2 ":( Java is not available, please install/add to path Java to run Juicer and Juicebox. Exiting!"; exit 1; }
if [ $use_parallel == true ]; then
type parallel >/dev/null 2>&1 || { echo >&2 ":( GNU Parallel support is set to true (default) but GNU Parallel is not in the path. Please install GNU Parallel or set -p option to false. Exiting!"; exit 1; }
fi
path_to_scripts=`cd "$( dirname $0)" && pwd`
path_to_lift=$(dirname ${path_to_scripts})"/lift"
juicebox=${path_to_scripts}/"juicebox_tools.sh"
lift_input_mnd_script=${path_to_lift}/lift-input-mnd-to-asm-mnd.awk
lift_input_annotations_script=${path_to_lift}/lift-input-annotations-to-asm-annotations.awk
if [ ! -f $juicebox ] || [ ! -f $lift_input_mnd_script ] || [ ! -f $lift_input_annotations_script ] ; then
echo ":( Relevant dependency scripts not found. Exiting!" && exit 1
fi
## Handle default zoom if not specified
if [ -z $scale ]; then
# calculate necessary zoom
totlength=`awk '{total+=$3}END{print total}' ${cprops}`
scale=$(( 1 + $totlength / 2100000000 ))
fi
genomeid="`basename $asm .asm`"
if [ -z ${remapped_mnd} ]; then
## Remap merged_nodups
echo "...Remapping contact data from the original contig set to assembly"
if [ $use_parallel == true ]; then
cmd="parallel --will-cite -a ${mnd} --pipepart -j 80% --block 1G \"awk -v scale=${scale} -f ${lift_input_mnd_script} ${cprops} ${asm} - \" > temp.${genomeid}.asm_mnd.txt"
else
cmd="awk -v scale=${scale} -f ${lift_input_mnd_script} ${cprops} ${asm} ${mnd} > temp.${genomeid}.asm_mnd.txt"
fi
eval ${cmd}
remapped_mnd="temp."${genomeid}".asm_mnd.txt"
fi
## Make tracks
echo "...Building track files"
awk 'BEGIN{OFS="\t"; print "chr1", "sx1", "sx2", "chr2", "sy1", "sy2", "color", "Scaffold_ID", "x1", "x2", "y1", "y2"}{print $1, 0, $3, $1, 0, $3, "0,255,0", "+"$1" (+"$2")", 0, $3, 0, $3}' $cprops | awk -v scale=${scale} -f ${lift_input_annotations_script} $cprops $asm - > ${genomeid}_asm.scaffold_track.txt
awk -v scale=${scale} 'BEGIN{OFS="\t"; print "chr1", "sx1", "sx2", "chr2", "sy1", "sy2", "color", "Superscaffold_ID", "x1", "x2", "y1", "y2"; pos+=0}(FILENAME==ARGV[1]){clength[$2]=$3; next}{gsub("-",""); n=split($0,a); c=0; for (i=1; i<=n; i++) {c+=clength[a[i]]}; print "assembly", int(pos/scale), int((pos+c)/scale), "assembly", int(pos/scale), int((pos+c)/scale), "0,0,255", FNR, pos, pos+c, pos, pos+c; pos+=c}' $cprops $asm > ${genomeid}_asm.superscaf_track.txt
# trial single-file for jb4a
awk '{$1=">"$1}1' ${cprops} | cat - ${asm} > ${genomeid}".assembly"
## Build .hic files
echo "...Building the hic file"
[ $mapq -eq 1 ] && ignore_q_suffix=true ## lab convention to keep mapq 1 wo suffix, otherwise add suffix in case building multiple maps
[ ${ignore_q_suffix} == "true" ] && mapqsuf="" || mapqsuf="_"${mapq}
rLen=${#res[@]}
add_options=$(( res[0]/scale ))
for (( i=1; i<$rLen; i++ ))
do
add_options=$add_options","$(( res[$i]/scale ))
done
#[ ${scale} -ne 1 ] && add_options=${res[0]}","${add_options}
add_options="-r "${add_options}
[ "$skip_norm" == "true" ] && add_options=${add_options}" -n"
if [ "$add_metadata" == "true" ]; then
[ -f inter${mapqsuf}.txt ] && add_options=${add_options}" -s inter${mapqsuf}.txt"
[ -f inter${mapqsuf}_hists.m ] && add_options=${add_options}" -g inter${mapqsuf}_hists.m"
fi
bash ${juicebox} pre -q ${mapq} ${add_options} ${remapped_mnd} ${genomeid}${mapqsuf}.hic <(echo "assembly "$((totlength / scale)))
## Cleanup
[ "$clean_up" == "true" ] && rm ${remapped_mnd}
#
#
| Shell |
3D | theaidenlab/AGWG-merge | polish/run-asm-polisher.sh | .sh | 12,416 | 232 | #!/bin/bash
#### Description: Wrapper script to polish any given assembly. Polish is a process in which a mismatch detector is run and pieces of an assembly between mismatches are treated as an input. Polish cannot insert pieces unlike a typical iterative assembly step but it can avoid some pitfalls caused by very small or unusually behaving contigs and scaffolds. The idea is that you typically run mismatch detector at a pretty large scale to make sure there are no gaping global misassemblies left. To control what is going on one might want to check if the number of mismatches pre to post-polish has decreased (currently disabled).
#### Usage: run-asm-polisher.sh -j <path_to_current_hic_file> -a <path_to_scaf_annotation_file> -b <path_to_superscaf_annotation_file> -w <coarse_res_for_mismatch> -n <fine_res_for_mismatch> -d <depletion_region> <path_to_original_cprops> <path_to_original_mnd_file> <path_to_current_cprops> <path_to_current_asm_file>
#### Input: cprops and mnd for original input contigs/scaffolds; cprops and asm for current input contigs/scaffolds.
#### Optional input: Juicebox .hic file for current assembly (-j option).
#### Output: cprops and asm of the polished assembly. Additional files include the new mnd and the new .hic files.
#### Parameters: primarily those that will be passed to the mismatch detector.
#### Unprompted: -p for use of GNU Parallel; -c for percent to saturate, -k for sensitivity, -b for balancing type (mismatch detector unprompted parameters).
#### Dependencies: mismatch-detector, editor, scaffolder, polish-specific files (edit-asm-according-to-new-cprops.sh).
#### Written by: Olga Dudchenko - olga.dudchenko@bcm.edu. Version dated 01/22/2017
## Set defaults
input_size=1000000
wide_bin=100000
wide_depletion=3000000
narrow_bin=1000
## Set unprompted defaults
use_parallel=true # use GNU Parallel to speed-up calculations (default)
k=55 # sensitivity to depletion score (50% of expected is labeled as a mismatch)
pct=5 # default percent of map to saturate
norm="KR" # use an unbalanced contact matrix for analysis
## HANDLE OPTIONS
while getopts "hs:j:a:b:w:n:d:k:c:b:p:" opt; do
case $opt in
h) echo "$USAGE" >&1
exit 0
;;
p) if [ $OPTARG == true ] || [ $OPTARG == false ]; then
echo ":) -p flag was triggered. Running with GNU Parallel support parameter set to $OPTARG." >&1
use_parallel=$OPTARG
else
echo ":( Unrecognized value for -p flag. Running with default parameters (-p true)." >&2
fi
;;
s) re='^[0-9]+$'
if [[ $OPTARG =~ $re ]]; then
echo "...-s flag was triggered, will ignore all scaffolds shorter than $OPTARG for polishing" >&1
input_size=$OPTARG
else
echo ":( Wrong syntax for input size. Using the default value ${input_size}" >&2
fi
;;
j) if [ -s $OPTARG ]; then
echo "...-j flag was triggered, will use Juicebox map $OPTARG" >&1
current_hic=$OPTARG
else
echo ":( Juicebox file not found. Will run visualize script from scratch" >&2
fi
;;
a) if [ -s $OPTARG ]; then
echo "...-a flag was triggered, will use scaffold annotation file $OPTARG" >&1
current_scaf=$OPTARG
else
echo ":( Scaffold annotation file not found. Will run visualize script from scratch" >&2
fi
;;
b) if [ -s $OPTARG ]; then
echo "...-b flag was triggered, will use superscaffold annotation file $OPTARG" >&1
current_superscaf=$OPTARG
else
echo ":( Superscaffold annotation file not found. Will run visualize script from scratch" >&2
fi
;;
w) re='^[0-9]+$'
if [[ $OPTARG =~ $re ]]; then
echo ":) -w flag was triggered, performing cursory search for mismatches at $OPTARG resolution" >&1
wide_bin=$OPTARG
else
echo ":( Wrong syntax for bin size. Using the default value 25000" >&2
fi
;;
n) re='^[0-9]+$'
if [[ $OPTARG =~ $re ]]; then
echo ":) -n flag was triggered, performing mismatch region thinning at $OPTARG resolution" >&1
narrow_bin=$OPTARG
else
echo ":( Wrong syntax for mismatch localization resolution. Using the default value 1000" >&2
fi
;;
d) re='^[0-9]+$'
if [[ $OPTARG =~ $re ]]; then
echo ":) -d flag was triggered, depletion score will be averaged across a region bounded by $OPTARG superdiagonal" >&1
wide_depletion=$OPTARG
else
echo ":( Wrong syntax for mapping quality. Using the default value dep_size=100000" >&2
fi
;;
k) re='^[0-9]+$'
if [[ $OPTARG =~ $re ]] && [[ $OPTARG -gt 0 ]] && [[ $OPTARG -lt 100 ]]; then
echo ":) -k flag was triggered, starting calculations with ${OPTARG}% depletion as mismatch threshold" >&1
k=$OPTARG
else
echo ":( Wrong syntax for mismatch threshold. Using the default value k=50" >&2
fi
;;
c) re='^[0-9]+\.?[0-9]*$'
if [[ $OPTARG =~ $re ]] && [[ ${OPTARG%.*} -ge 0 ]] && ! [[ "$OPTARG" =~ ^0*(\.)?0*$ ]] && [[ $((${OPTARG%.*} + 1)) -le 100 ]]; then
echo ":) -c flag was triggered, starting calculations with ${OPTARG}% saturation level" >&1
pct=$OPTARG
else
echo ":( Wrong syntax for saturation threshold. Using the default value pct=${pct}" >&2
fi
;;
b) if [ $OPTARG == NONE ] || [ $OPTARG == VC ] || [ $OPTARG == VC_SQRT ] || [ $OPTARG == KR ]; then
echo ":) -b flag was triggered. Type of norm chosen for the contact matrix is $OPTARG." >&1
norm=$OPTARG
else
echo ":( Unrecognized value for -b flag. Running with default parameters (-b NONE)." >&2
fi
;;
*) echo "$USAGE" >&2
exit 1
;;
esac
done
shift $(( OPTIND-1 ))
## HANDLE ARGUMENTS: TODO check file format
if [ $# -lt 4 ]; then
echo ":( Required arguments not found. Please double-check your input!!" >&2
echo "$USAGE" >&2
exit 1
fi
orig_cprops=$1
orig_mnd=$2
current_cprops=$3
current_asm=$4
if [ ! -f ${orig_cprops} ] || [ ! -f ${orig_mnd} ] || [ ! -f ${current_cprops} ] || [ ! -f ${current_asm} ] ; then
echo >&2 ":( Required files not found. Please double-check your input!!" && exit 1
fi
## CHECK DEPENDENCIES
pipeline=`cd "$( dirname $0)" && cd .. && pwd`
## PREP
id=`basename ${current_cprops} .cprops`
STEP="polish"
# PREPARATORY: split asm into resolved and unresolved pieces:
awk 'NR==1' ${current_asm} > temp_resolved.asm
awk 'NR>1' ${current_asm} > temp_unresolved.asm
## MAIN FUNCTION
# 0) Check if Juicebox file for the current assembly has been passed. If not build a map. TODO: make sure that the required resolution is included. TODO: Enable at some point.
if [ -z ${current_hic} ] || [ -z ${current_scaf} ] || [ -z ${current_superscaf} ] ; then
echo "...no hic file and/or annotation files have been provided with input, building the hic map from scratch" >&1
## TODO: check w/o parallel
bash ${pipeline}/edit/edit-mnd-according-to-new-cprops.sh ${current_cprops} ${orig_mnd} > `basename ${current_cprops} .cprops`.mnd.txt
current_mnd=`basename ${current_cprops} .cprops`.mnd.txt
bash ${pipeline}/visualize/run-asm-visualizer.sh ${current_cprops} ${current_asm} ${current_mnd}
current_hic=`basename ${current_asm} .asm`.hic
current_scaf=`basename ${current_asm} .asm`_asm.scaffold_track.txt
current_superscaf=`basename ${current_asm} .asm`_asm.superscaf_track.txt
fi
# 1) Annotate mismatches in current assembly
bash ${pipeline}/edit/run-mismatch-detector.sh -p ${use_parallel} -c ${pct} -w ${wide_bin} -k ${k} -d ${wide_depletion} -n ${narrow_bin} ${current_hic}
# store intermediate mismatch stuff - not necessary
mv depletion_score_wide.wig ${id}.${STEP}.depletion_score_wide.wig
mv depletion_score_narrow.wig ${id}.${STEP}.depletion_score_narrow.wig
mv mismatch_wide.bed ${id}.${STEP}.mismatch_wide.bed
mv mismatch_narrow.bed ${id}.${STEP}.mismatch_narrow.bed
# convert bed track into 2D annotations
resolved=$(awk 'NR==2{print $3}' ${current_superscaf}) #scaled coordinates
awk -v bin_size=${narrow_bin} -f ${pipeline}/edit/overlay-edits.awk ${current_scaf} ${id}.${STEP}.mismatch_narrow.bed | awk -v r=${resolved} 'NR==1||$3<=r' > ${id}.${STEP}.suspect_2D.txt
# split into mismatches and edits
awk 'NR==1||$8=="mismatch"' ${id}.${STEP}.suspect_2D.txt > ${id}.${STEP}.mismatches_2D.txt
awk 'NR==1||$8=="debris"' ${id}.${STEP}.suspect_2D.txt > ${id}.${STEP}.edits_2D.txt
# 2) Deal with mismatches: break resolved asm at joints associated with mismatches
awk -f ${pipeline}/lift/lift-asm-annotations-to-input-annotations.awk ${current_cprops} ${current_asm} ${id}.${STEP}.mismatches_2D.txt | awk 'FILENAME==ARGV[1]{cname[$1]=$2;next}FNR>1{if($2==0){type[cname[$1]]="h"}else{type[cname[$1]]="t"}}END{for(i in type){print i, type[i]}}' ${current_cprops} - | awk 'FILENAME==ARGV[1]{type[$1]=$2; if($2=="h"){type[-$1]="t"}else{type[-$1]="h"}; next}{str=""; for(i=1;i<=NF;i++){if($i in type){if (type[$i]=="h"){print str; str=$i}else{str=str" "$i; print str; str=""}}else{str=str" "$i}}; print str}' - temp_resolved.asm | sed '/^$/d' | awk '{$1=$1}1' > temp_resolved.asm.new && mv temp_resolved.asm.new temp_resolved.asm
# 3) Apply edits
# reconstruct the edits file
awk 'BEGIN{OFS="\t"; print "chr1", "x1", "x2", "chr2", "y1", "y2", "color", "id", "X1", "X2", "Y1", "Y2"}$1~/:::debris/{print $1, 0, $3, $1, 0, $3, "0,0,0", "debris", 0, $3, 0, $3}' ${current_cprops} | awk -f ${pipeline}/lift/lift-input-annotations-to-asm-annotations.awk ${current_cprops} <(awk '{print $2}' ${current_cprops}) - | awk -f ${pipeline}/lift/lift-asm-annotations-to-input-annotations.awk ${orig_cprops} <(awk '{print $2}' ${orig_cprops}) - > temp.pre_polish_edits.txt
current_edits=temp.pre_polish_edits.txt
bash ${pipeline}/lift/lift-edit-asm-annotations-to-original-input-annotations.sh ${orig_cprops} ${current_cprops} ${current_asm} ${id}.${STEP}.edits_2D.txt > h.edits.txt
awk 'NR==1' "h.edits.txt" > temp
{ awk 'NR>1' ${current_edits} ; awk 'NR>1' "h.edits.txt" ; } | sort -k 1,1 -k 2,2n >> temp
mv temp temp.post_polish_edits.txt
polish_edits=temp.post_polish_edits.txt
bash ${pipeline}/edit/apply-edits-prep-for-next-round.sh -p ${use_parallel} -r ${STEP} ${polish_edits} ${orig_cprops} ${orig_mnd}
mv `basename ${orig_cprops} .cprops`.${STEP}.cprops $id.${STEP}.cprops
mv `basename ${orig_mnd} .txt`.${STEP}.txt $id.${STEP}.mnd.txt
polish_cprops=$id.${STEP}.cprops
polish_mnd=$id.${STEP}.mnd.txt
# 4) Lift current assembly to new cprops
bash ${pipeline}/edit/edit-asm-according-to-new-cprops.sh ${polish_cprops} ${current_cprops} temp_resolved.asm > new_resolved.asm
bash ${pipeline}/edit/edit-asm-according-to-new-cprops.sh ${polish_cprops} ${current_cprops} temp_unresolved.asm > new_unresolved.asm
# 5) Prepare for polish run: break resolved at debris, filter pieces smaller than input_size
awk -v input_size=${input_size} 'function printout(str){if(c>=input_size){print substr(str,2)>"h.scaffolds.original.notation.step.0.txt"}else{print substr(str,2)>"h.dropouts.step.0.txt"}}FILENAME==ARGV[1]{len[$2]=$3; len[-$2]=$3; if($1~/:::debris/){remove[$2]=1; remove[-$2]=1}; next}{str=""; for(i=1;i<=NF;i++){if($i in remove){if(str!=""){printout(str)}; print $i > "h.dropouts.step.0.txt"; str=""; c=0}else{str=str" "$i; c+=len[$i]}}; if(str!=""){printout(str)}}' ${polish_cprops} new_resolved.asm
cat new_unresolved.asm >> h.dropouts.step.0.txt
mv h.dropouts.step.0.txt do_not_delete.dropouts.step.0.txt
# 6) Run TIGer
bash ${pipeline}/scaffold/run-tiger-scaffolder.sh -p ${use_parallel} -s ${input_size} ${polish_cprops} ${polish_mnd}
polish_asm=`basename ${polish_cprops} .cprops`.asm
mv do_not_delete.dropouts.step.0.txt h.dropouts.step.0.txt
mv ${polish_asm} h.scaffolds.original.notation.step.0.txt
# 6) Run LIGer (for things TIGer was not able to join - not necessary, but for megascaffold consistency)
bash ${pipeline}/scaffold/run-liger-scaffolder.sh -p ${use_parallel} -s ${input_size} ${polish_cprops} ${polish_mnd}
polish_asm=`basename ${polish_cprops} .cprops`.asm
# 7) Visualize output
bash ${pipeline}/visualize/run-asm-visualizer.sh -p ${use_parallel} ${polish_cprops} ${polish_asm} ${polish_mnd}
# 8) Cleanup
rm ${polish_mnd} temp_resolved.asm temp_unresolved.asm temp.pre_polish_edits.txt temp.post_polish_edits.txt new_resolved.asm new_unresolved.asm temp.${id}.${STEP}.asm_mnd.txt
| Shell |
3D | theaidenlab/AGWG-merge | scaffold/run-tiger-scaffolder.sh | .sh | 10,104 | 228 | #!/bin/bash
## TIGer scaffolder wrapper script
## Written by Olga Dudchenko and Sanjit Batra
USAGE="
*****************************************************
This is a wrapper for a Hi-C Tail Iterative Greedy genome assembly (TIGer) algorithm, version date: Dec 7, 2016.
Usage: ./run-tiger-scaffolder.sh [-h] [-s minimal_scaffold_size] [-t link_threshold] [-q mapq] path_to_cprops_file path_to_merge_nodups_file
ARGUMENTS:
path_to_cprops_file Path to (prefiltered) cprops file listing contigs for which LIGer scaffolding will be attempted
path_to_merge_nodups_file Path to merge_nodups Juicer output file
OPTIONS:
-h Shows this help
-s size Set minimal contig/scaffold size to use as input
-q mapq Set threshold for Hi-C reads mapping quality (default is 1)
-p true/false Use GNU Parallel to speed up calculations (default is true)
-t link_threshod Set threshold for joining links [not working yet, uses default]
Uses scrape-mnd-tiger.awk, generate-unsorted-confidence-table.awk, confidence-to-assembly.awk, scaffolds-to-original-notation.awk and drop-smallest-dubious-element.awk that should be in the same folder as the wrapper script.
In the current version ordering and orienting contigs at each iteration is based on the data from whole inter-contig contact matrices. At each iteraction the contigs and corresponding matrices are updated, the idea being that some low-confidence scaffolds links may benefit from more data provided by the already-joined contigs and get resolved.
Note that in the current version the input is expected in the cprops format.
*****************************************************
"
## Set current defaults
SIZE=15000
MAPQ=1
use_parallel=true
## HANDLE OPTIONS
while getopts "s:q:p:t:h" opt; do
case $opt in
h) echo "$USAGE" >&2
exit 0
;;
s) re='^[0-9]+$'
if [[ $OPTARG =~ $re ]] && [[ $OPTARG -gt 0 ]]; then
echo ":) -s flag was triggered, starting calculations with $OPTARG threshold starting contig/scaffold size" >&1
SIZE=$OPTARG
else
echo ":( Wrong syntax for minimal input contig/scaffold size. Using the default value SIZE=$SIZE" >&2
fi
;;
q) re='^[0-9]+$'
if [[ $OPTARG =~ $re ]]; then
echo ":) -q flag was triggered, starting calculations with $OPTARG threshold mapping quality" >&1
MAPQ=$OPTARG
else
echo ":( Wrong syntax for mapping quality. Using the default value MAPQ=$MAPQ" >&2
fi
;;
p) if [ $OPTARG == true ] || [ $OPTARG == false ]; then
echo ":) -p flag was triggered. Running LIGer with GNU Parallel support parameter set to $OPTARG." >&1
use_parallel=$OPTARG
else
echo ":( Unrecognized value for -p flag. Running LIGer with default parameters (-p true)." >&2
fi
;;
t) echo "...-thr flag was triggered. Sorry, option not functional yet: using default thresholds" >&2
;;
*) echo "$USAGE" >&2
exit 1
;;
esac
done
shift $(( OPTIND-1 ))
## HANDLE ARGUMENTS: TODO check file format
if [ $# -lt 2 ]; then
echo ":( Required arguments not found. Please double-check your input!!" >&2
echo "$USAGE" >&2
exit 1
fi
# Handle arguments: cprops file
contigPropFile=$1
if [[ ! -s "$contigPropFile" ]]; then
echo ":( Cprops file not found. Please double-check your input!!" >&2
echo "$USAGE" >&2
exit 1
else
# TODO: check that cprops file is in proper format
echo "...Using cprops file: $contigPropFile"
fi
# Handle arguments: merged_nodups files
if [ $# -eq 2 ]; then
# TODO: check that merged_nodups file is in proper format
mergelib=$2
echo "...Using merged_nodups file: $mergelib"
fi
## CHECK DEPENDENCIES
if [ $use_parallel == true ]; then
type parallel >/dev/null 2>&1 || { echo >&2 ":( GNU Parallel support is set to true (default) but GNU Parallel is not in the path. Please install GNU Parallel or set -p option to false. Exiting!"; exit 1; }
fi
path_to_scripts=`cd "$( dirname $0)" && pwd`
scrape_contacts_script="$path_to_scripts""/scrape-mnd-tiger.awk"
merge_scores_script="$path_to_scripts""/merge-scores-tiger.awk"
compute_confidences_script="$path_to_scripts""/generate-unsorted-confidence-table.awk"
accept_links_script="$path_to_scripts""/confidence-to-assembly.awk"
update_assembly_script="$path_to_scripts""/scaffolds-to-original-notation.awk"
drop_dubious_script="$path_to_scripts""/drop-smallest-dubious-element.awk"
if [ ! -f $scrape_contacts_script ] || [ ! -f $merge_scores_script ] || [ ! -f $compute_confidences_script ] || [ ! -f $accept_links_script ] || [ ! -f $update_assembly_script ] || [ ! -f $drop_dubious_script ]; then
echo ":( Relevant dependency scripts not found in bin folder. Exiting!" >&2
exit 1
fi
## PREP FOR FIRST STEP - TODO: make restarting from any step possible
if [ ! -f "h.scaffolds.original.notation.step.""0"".txt" ]; then
echo "...Scaffolding all scaffolds and contigs greater or equal to $SIZE bp."
# thinking of introducing the unattempted flag here
gawk -v SIZE=${SIZE} '$3>=SIZE && $1!~/:::debris$/{print $2; next}{print $2 >"/dev/stderr"}' $contigPropFile > "h.scaffolds.original.notation.step.""0"".txt" 2>"h.dropouts.step.""0"".txt"
else
echo "...Explicit scaffold set has been listed as input. Using set as a first iteration."
fi
STEP=1
echo "...Starting iteration # $STEP"
# MAIN LOOP
while true; do
#do not enter the next iteration if nothing to assemble
if [[ $(wc -l <"h.scaffolds.original.notation.step.""$((STEP-1))"".txt") -eq 1 ]]; then
STEP=$((-1+$STEP))
break
fi
#extract, relable and count reads from merged-nodups [TODO: rethink this part once mnd is deprecated]
if [ $use_parallel == true ]; then
parallel -a $mergelib --will-cite --jobs 80% --pipepart --block 1G "gawk -v MAPQ=$MAPQ -v SIZE=${SIZE} -f $scrape_contacts_script $contigPropFile h.scaffolds.original.notation.step.$(($STEP-1)).txt - " | LC_ALL=C sort -k1,1 -k2,2 -k3,3n -s | gawk -f ${merge_scores_script} $contigPropFile "h.scaffolds.original.notation.step.""$(($STEP-1))"".txt" - > "h.scores.step.""$STEP"".txt"
else
gawk -v MAPQ="$MAPQ" -v SIZE=${SIZE} -f $scrape_contacts_script $contigPropFile "h.scaffolds.original.notation.step.""$(($STEP-1))"".txt" "$mergelib" | gawk -f ${merge_scores_script} $contigPropFile "h.scaffolds.original.notation.step.""$(($STEP-1))"".txt" - > "h.scores.step.""$STEP"".txt"
fi
#consolidate scrape data into double-sorted-confidence file
gawk -f $compute_confidences_script "h.scores.step.""$STEP"".txt" | sort -r -gk4 -gk5 -S8G --parallel=48 -s > "h.double.sorted.confidence.step.""$STEP"".txt"
#create new links between contigs based on confidence file
gawk -f $accept_links_script "h.double.sorted.confidence.step.""$STEP"".txt" > "h.scaffolds.step.""$STEP"".txt"
#update assembly file given the new links set
gawk -f $update_assembly_script "h.scaffolds.step.""$STEP"".txt" "h.scaffolds.original.notation.step.""$(($STEP-1))"".txt" > "h.scaffolds.original.notation.step.""$STEP"".txt"
#move to next step if the assembly was updated
if [[ -s "h.scaffolds.step.""$STEP"".txt" ]]; then
STEP=$((1+$STEP))
echo "...Starting iteration # $STEP"
#handle case when there are no links to add
else
## TODO: is this the ultimate end?
#try to push further by dropping dubious contigs
if [ -f "h.dropouts.step.""$STEP"".txt" ]; then
rm -f "h.dropouts.step.""$STEP"".txt"
fi
while true; do
# Choose the smallest dubious contig to be dropped
drop=$(gawk '($4==1){a[$1]; a[$2]}END{for (tmp in a) print tmp}' "h.double.sorted.confidence.step.""$STEP"".txt" | gawk -f $drop_dubious_script - $contigPropFile "h.scaffolds.original.notation.step.""$STEP"".txt")
# One of ultimate end scenarios, probably there are better ways to get out
if [ -z "$drop" ]; then
break 2
fi
# split and overwrite input
sed -n "$drop""p" "h.scaffolds.original.notation.step.""$STEP"".txt" >> "h.dropouts.step.""$STEP"".txt"
gawk -v DROP="$drop" 'NR!=DROP' "h.scaffolds.original.notation.step.""$STEP"".txt" > "h.scaffolds.original.notation.tmp.step.""$STEP"".txt"
mv "h.scaffolds.original.notation.tmp.step.""$STEP"".txt" "h.scaffolds.original.notation.step.""$STEP"".txt"
# fix and overwrite scores
gawk -v DROP="$drop" '($1!=DROP)&&($2!=DROP){if ($1>DROP) $1--; if ($2>DROP) $2--; print}' "h.scores.step.""$STEP"".txt" > "h.scores.tmp.step.""$STEP"".txt"
mv "h.scores.tmp.step.""$STEP"".txt" "h.scores.step.""$STEP"".txt"
# procede with LIGer: fix and overwrite confidence file, accept links and update assembly
gawk -f $compute_confidences_script "h.scores.step.""$STEP"".txt" | sort -r -nk4 -nk5 > "h.double.sorted.confidence.step.""$STEP"".txt"
gawk -f $accept_links_script "h.double.sorted.confidence.step.""$STEP"".txt" > "h.scaffolds.step.""$STEP"".txt"
gawk -f $update_assembly_script "h.scaffolds.step.""$STEP"".txt" "h.scaffolds.original.notation.step.""$STEP"".txt" > "h.scaffolds.original.notation.tmp.step.""$STEP"".txt"
mv "h.scaffolds.original.notation.tmp.step.""$STEP"".txt" "h.scaffolds.original.notation.step.""$STEP"".txt"
# check if was helpful?
if [[ -s "h.scaffolds.step.""$STEP"".txt" ]]; then
STEP=$((1+$STEP))
echo "...Starting iteration # $STEP"
break
fi
done
fi
done
# CONSOLIDATE FINAL OUTPUT
basenamefile="$(basename $contigPropFile .cprops)"
cp "h.scaffolds.original.notation.step.""$STEP"".txt" "$basenamefile"".asm"
for i in $(find . -name 'h.dropouts.step.*.txt' | sort -t "." -nr -k 5); do
cat $i >> "$basenamefile"".asm"
done
## CLEAN LEFTOVER HELPER FILES. TODO: delete some from inside the loop to save space.
find . -name "h.*.txt" -delete
echo ":) DONE!"
##
##
##
##
##
##
##
| Shell |
3D | theaidenlab/AGWG-merge | scaffold/run-liger-scaffolder.sh | .sh | 10,086 | 228 | #!/bin/bash
## LIGer scaffolder wrapper script
## Written by Olga Dudchenko and Sanjit Batra
USAGE="
*****************************************************
This is a wrapper for a Hi-C Limitless Iterative Greedy genome assembly (LIGer) algorithm, version date: Dec 7, 2016.
Usage: ./run-liger-scaffolder.sh [-h] [-s minimal_scaffold_size] [-t link_threshold] [-q mapq] path_to_cprops_file path_to_merge_nodups_file
ARGUMENTS:
path_to_cprops_file Path to (prefiltered) cprops file listing contigs for which LIGer scaffolding will be attempted
path_to_merge_nodups_file Path to merge_nodups Juicer output file
OPTIONS:
-h Shows this help
-s size Set minimal contig/scaffold size to use as input
-q mapq Set threshold for Hi-C reads mapping quality (default is 1)
-p true/false Use GNU Parallel to speed up calculations (default is true)
-t link_threshod Set threshold for joining links [not working yet, uses default]
Uses scrape-mnd.awk, generate-unsorted-confidence-table.awk, confidence-to-assembly.awk, scaffolds-to-original-notation.awk and drop-smallest-dubious-element.awk that should be in the same folder as the wrapper script.
In the current version ordering and orienting contigs at each iteration is based on the data from whole inter-contig contact matrices. At each iteraction the contigs and corresponding matrices are updated, the idea being that some low-confidence scaffolds links may benefit from more data provided by the already-joined contigs and get resolved.
Note that in the current version the input is expected in the cprops format.
*****************************************************
"
## Set current defaults
SIZE=15000
MAPQ=1
use_parallel=true
## HANDLE OPTIONS
while getopts "s:q:p:t:h" opt; do
case $opt in
h) echo "$USAGE" >&2
exit 0
;;
s) re='^[0-9]+$'
if [[ $OPTARG =~ $re ]] && [[ $OPTARG -gt 0 ]]; then
echo ":) -s flag was triggered, starting calculations with $OPTARG threshold starting contig/scaffold size" >&1
SIZE=$OPTARG
else
echo ":( Wrong syntax for minimal input contig/scaffold size. Using the default value SIZE=$SIZE" >&2
fi
;;
q) re='^[0-9]+$'
if [[ $OPTARG =~ $re ]]; then
echo ":) -q flag was triggered, starting calculations with $OPTARG threshold mapping quality" >&1
MAPQ=$OPTARG
else
echo ":( Wrong syntax for mapping quality. Using the default value MAPQ=$MAPQ" >&2
fi
;;
p) if [ $OPTARG == true ] || [ $OPTARG == false ]; then
echo ":) -p flag was triggered. Running LIGer with GNU Parallel support parameter set to $OPTARG." >&1
use_parallel=$OPTARG
else
echo ":( Unrecognized value for -p flag. Running LIGer with default parameters (-p true)." >&2
fi
;;
t) echo "...-thr flag was triggered. Sorry, option not functional yet: using default thresholds" >&2
;;
*) echo "$USAGE" >&2
exit 1
;;
esac
done
shift $(( OPTIND-1 ))
## HANDLE ARGUMENTS: TODO check file format
if [ $# -lt 2 ]; then
echo ":( Required arguments not found. Please double-check your input!!" >&2
echo "$USAGE" >&2
exit 1
fi
# Handle arguments: cprops file
contigPropFile=$1
if [[ ! -s "$contigPropFile" ]]; then
echo ":( Cprops file not found. Please double-check your input!!" >&2
echo "$USAGE" >&2
exit 1
else
# TODO: check that cprops file is in proper format
echo "...Using cprops file: $contigPropFile"
fi
# Handle arguments: merged_nodups files
if [ $# -eq 2 ]; then
# TODO: check that merged_nodups file is in proper format
mergelib=$2
echo "...Using merged_nodups file: $mergelib"
fi
## CHECK DEPENDENCIES
if [ $use_parallel == true ]; then
type parallel >/dev/null 2>&1 || { echo >&2 ":( GNU Parallel support is set to true (default) but GNU Parallel is not in the path. Please install GNU Parallel or set -p option to false. Exiting!"; exit 1; }
fi
path_to_scripts=`cd "$( dirname $0)" && pwd`
scrape_contacts_script="$path_to_scripts""/scrape-mnd.awk"
merge_scores_script="$path_to_scripts""/merge-scores.awk"
compute_confidences_script="$path_to_scripts""/generate-unsorted-confidence-table.awk"
accept_links_script="$path_to_scripts""/confidence-to-assembly.awk"
update_assembly_script="$path_to_scripts""/scaffolds-to-original-notation.awk"
drop_dubious_script="$path_to_scripts""/drop-smallest-dubious-element.awk"
if [ ! -f $scrape_contacts_script ] || [ ! -f $merge_scores_script ] || [ ! -f $compute_confidences_script ] || [ ! -f $accept_links_script ] || [ ! -f $update_assembly_script ] || [ ! -f $drop_dubious_script ]; then
echo ":( Relevant dependency scripts not found in bin folder. Exiting!" >&2
exit 1
fi
## PREP FOR FIRST STEP - TODO: make restarting from any step possible
if [ ! -f "h.scaffolds.original.notation.step.""0"".txt" ]; then
echo "...Scaffolding all scaffolds and contigs greater or equal to $SIZE bp."
# thinking of introducing an unattempted flag, would influence things here
gawk -v SIZE=${SIZE} '$3>=SIZE && $1!~/:::debris$/{print $2; next}{print $2 >"/dev/stderr"}' $contigPropFile > "h.scaffolds.original.notation.step.""0"".txt" 2>"h.dropouts.step.""0"".txt"
else
echo "...Explicit scaffold set has been listed as input. Using set as a first iteration."
fi
STEP=1
echo "...Starting iteration # $STEP"
# MAIN LOOP
while true; do
#do not enter the next iteration if nothing to assemble
if [[ $(wc -l <"h.scaffolds.original.notation.step.""$((STEP-1))"".txt") -eq 1 ]]; then
STEP=$((-1+$STEP))
break
fi
#extract, relable and count reads from merged-nodups [TODO: rethink this part once mnd is deprecated]
if [ $use_parallel == true ]; then
parallel -a $mergelib --will-cite --jobs 80% --pipepart --block 1G "gawk -v MAPQ=$MAPQ -f $scrape_contacts_script $contigPropFile h.scaffolds.original.notation.step.$(($STEP-1)).txt - " | LC_ALL=C sort -k1,1 -k2,2 -k3,3n -s | gawk -f ${merge_scores_script} $contigPropFile "h.scaffolds.original.notation.step.""$(($STEP-1))"".txt" - > "h.scores.step.""$STEP"".txt"
else
gawk -v MAPQ="$MAPQ" -f $scrape_contacts_script $contigPropFile "h.scaffolds.original.notation.step.""$(($STEP-1))"".txt" "$mergelib" | gawk -f ${merge_scores_script} $contigPropFile "h.scaffolds.original.notation.step.""$(($STEP-1))"".txt" - > "h.scores.step.""$STEP"".txt"
fi
#consolidate scrape data into double-sorted-confidence file
gawk -f $compute_confidences_script "h.scores.step.""$STEP"".txt" | sort -r -gk4 -gk5 -S8G --parallel=48 -s > "h.double.sorted.confidence.step.""$STEP"".txt"
#create new links between contigs based on confidence file
gawk -f $accept_links_script "h.double.sorted.confidence.step.""$STEP"".txt" > "h.scaffolds.step.""$STEP"".txt"
#update assembly file given the new links set
gawk -f $update_assembly_script "h.scaffolds.step.""$STEP"".txt" "h.scaffolds.original.notation.step.""$(($STEP-1))"".txt" > "h.scaffolds.original.notation.step.""$STEP"".txt"
#move to next step if the assembly was updated
if [[ -s "h.scaffolds.step.""$STEP"".txt" ]]; then
STEP=$((1+$STEP))
echo "...Starting iteration # $STEP"
#handle case when there are no links to add
else
## TODO: is this the ultimate end?
#try to push further by dropping dubious contigs
if [ -f "h.dropouts.step.""$STEP"".txt" ]; then
rm -f "h.dropouts.step.""$STEP"".txt"
fi
while true; do
# Choose the smallest dubious contig to be dropped
drop=$(gawk '($4==1){a[$1]; a[$2]}END{for (tmp in a) print tmp}' "h.double.sorted.confidence.step.""$STEP"".txt" | gawk -f $drop_dubious_script - $contigPropFile "h.scaffolds.original.notation.step.""$STEP"".txt")
# One of ultimate end scenarios, probably there are better ways to get out
if [ -z "$drop" ]; then
break 2
fi
# split and overwrite input
sed -n "$drop""p" "h.scaffolds.original.notation.step.""$STEP"".txt" >> "h.dropouts.step.""$STEP"".txt"
gawk -v DROP="$drop" 'NR!=DROP' "h.scaffolds.original.notation.step.""$STEP"".txt" > "h.scaffolds.original.notation.tmp.step.""$STEP"".txt"
mv "h.scaffolds.original.notation.tmp.step.""$STEP"".txt" "h.scaffolds.original.notation.step.""$STEP"".txt"
# fix and overwrite scores
gawk -v DROP="$drop" '($1!=DROP)&&($2!=DROP){if ($1>DROP) $1--; if ($2>DROP) $2--; print}' "h.scores.step.""$STEP"".txt" > "h.scores.tmp.step.""$STEP"".txt"
mv "h.scores.tmp.step.""$STEP"".txt" "h.scores.step.""$STEP"".txt"
# procede with LIGer: fix and overwrite confidence file, accept links and update assembly
gawk -f $compute_confidences_script "h.scores.step.""$STEP"".txt" | sort -r -nk4 -nk5 > "h.double.sorted.confidence.step.""$STEP"".txt"
gawk -f $accept_links_script "h.double.sorted.confidence.step.""$STEP"".txt" > "h.scaffolds.step.""$STEP"".txt"
gawk -f $update_assembly_script "h.scaffolds.step.""$STEP"".txt" "h.scaffolds.original.notation.step.""$STEP"".txt" > "h.scaffolds.original.notation.tmp.step.""$STEP"".txt"
mv "h.scaffolds.original.notation.tmp.step.""$STEP"".txt" "h.scaffolds.original.notation.step.""$STEP"".txt"
# check if was helpful?
if [[ -s "h.scaffolds.step.""$STEP"".txt" ]]; then
STEP=$((1+$STEP))
echo "...Starting iteration # $STEP"
break
fi
done
fi
done
# CONSOLIDATE FINAL OUTPUT
basenamefile="$(basename $contigPropFile .cprops)"
cp "h.scaffolds.original.notation.step.""$STEP"".txt" "$basenamefile"".asm"
for i in $(find . -name 'h.dropouts.step.*.txt' | sort -t "." -nr -k 5); do
cat $i >> "$basenamefile"".asm"
done
## CLEAN LEFTOVER HELPER FILES. TODO: delete some from inside the loop to save space.
find . -name "h.*.txt" -delete
echo ":) DONE!"
##
##
##
##
##
##
##
| Shell |
3D | theaidenlab/AGWG-merge | merge/align-nearby-sequences-and-filter-overlaps.sh | .sh | 5,679 | 152 | #!/bin/bash
## Part of the merge segment of the diploid pipeline.
## Requires LASTZ in path! Requires PARALLEL in path!
## TODO: Add scaling for annotations? Or fully switch to bpe? Add warning!!
## Written by: Olga Dudchenko
USAGE="
*****************************
./run-pairwise-alignment.sh [options] <path_to_cprops> <path_to_asm> <path_to_faSplitDir>
*****************************
"
# Defaults:
merger_search_band=3000000
merger_alignment_score=50000000
merger_alignment_identity=20
merger_alignment_length=20000
merger_lastz_options=\"--gfextend\ --gapped\ --chain=200,200\"
## HANDLE OPTIONS
while getopts "b:s:i:l:o:h" opt; do
case $opt in
b) re='^[0-9]+$'
if [[ $OPTARG =~ $re ]] && [[ $OPTARG -gt 0 ]]; then
echo ":) -b flag was triggered, assuming ${OPTARG} bp-wide band for alternative haplotype detection." >&1
merger_search_band=$OPTARG
else
echo ":( Wrong syntax for band size. Using the default value ${band_size}." >&2
fi
;;
l) re='^[0-9]+$'
if [[ $OPTARG =~ $re ]] && [[ $OPTARG -gt 0 ]]; then
echo ":) -l flag was triggered, assuming ${OPTARG} bp as minimal overlap length at which sequences might be classified as alternative haplotypes." >&1
merger_alignment_length=$OPTARG
else
echo ":( Wrong syntax for acceptable synteny length. Using the default value ${merger_alignment_length}." >&2
fi
;;
i) re='^[0-9]+$'
if [[ $OPTARG =~ $re ]] && [[ $OPTARG -gt 0 ]]; then
echo ":) -i flag was triggered, assuming ${OPTARG} as minimal identity (per length) at which similar sequences might be classified as alternative haplotypes." >&1
merger_alignment_identity=$OPTARG
else
echo ":( Wrong syntax for acceptable identity score. Using the default value ${merger_alignment_identity}." >&2
fi
;;
s) re='^[0-9]+$'
if [[ $OPTARG =~ $re ]] && [[ $OPTARG -gt 0 ]]; then
echo ":) -s flag was triggered, assuming ${OPTARG} as saturated alignment score at which similar sequences are classified as alternative haplotypes, irrespective of length." >&1
merger_alignment_score=$OPTARG
else
echo ":( Wrong syntax for acceptable alignment score. Using the default value ${merger_alignment_score}." >&2
fi
;;
o) re='^\"--.+\"$'
if [[ $OPTARG =~ $re ]]; then
echo ":) -o flag was triggered, assuming ${OPTARG} as a list of options to pass on to LASTZ to tune alignment." >&1
merger_lastz_options=${OPTARG}
else
echo ":( Wrong syntax for LASTZ option string. Using the default value ${merger_lastz_options}." >&2
fi
;;
h) echo "$USAGE" >&1
exit 0
;;
*) echo "$USAGE" >&2
exit 1
;;
esac
done
shift $(( OPTIND-1 ))
## HANDLE ARGUMENTS
[ -z $1 ] || [ -z $2 ] || [ -z $3 ] && echo >&2 ":( Some input seems missing." && echo >&2 "$USAGE" && exit 1
cprops=$1
asm=$2
faSplit=$3
# HANDLE DEPENDENCIES
pipeline=`cd "$( dirname $0)" && cd .. && pwd`
## LASTZ dependency
lastz="false"
if hash lastz 2>/dev/null; then
lastz="true"
fi
[[ $lastz == "false" ]] && echo >&2 ":( LASTZ not installed or not in the path. The merge section of the pipeline will not run. Exiting!" && exit 1
## GNU Parallel Dependency
parallel="false"
if hash parallel 2>/dev/null; then
ver=`parallel --version | awk 'NR==1{print \$3}'`
[ $ver -ge 20150322 ] && parallel="true"
fi
[ $parallel == "false" ] && echo ":| WARNING: GNU Parallel version 20150322 or later not installed. We highly recommend to install it to increase performance. Starting pipeline without parallelization!" >&2
# remove comments from LASTZ options
merger_lastz_options="${merger_lastz_options%\"}"
merger_lastz_options="${merger_lastz_options#\"}"
# read in the lengths as associative array
declare -A len
while read -r -a array
do
len["${array[1]}"]="${array[2]}"
done < $cprops
# cleanup
[ -f alignments.txt ] && rm alignments.txt
[ -f joblist.txt ] && rm joblist.txt
# read asm line by line
while read -r line
do
arr=($line)
for ((index=0; index <= ${#arr[@]}; index++)); do
shift=0
k=$((index+1))
while [ $shift -le ${merger_search_band} ] && [ $k -lt ${#arr[@]} ]; do
## create a job to align $index to $k
echo ${arr[index]}" "${arr[k]} >> joblist.txt
## count band from lower end of the contig/scaffold
shift=$((shift + ${len[${arr[k]}]}))
k=$((k+1))
done
done
done < <(awk '{gsub("-",""); print}' $asm)
if [ $parallel == "true" ]; then
parallel --colsep=" " --will-cite --jobs 80% -a joblist.txt "lastz faSplit/{1}.fa faSplit/{2}.fa ${merger_lastz_options} | awk -f ${pipeline}/merge/extract-total-stanza.awk" > alignments.txt
else
while read oname1 oname2
do
lastz faSplit/${oname1}.fa faSplit/${oname2}.fa ${merger_lastz_options} | awk -f ${pipeline}/merge/extract-total-stanza.awk >> alignments.txt
done < joblist.txt
fi
echo "chr1 x1 x2 chr2 y1 y2 color id X1 X2 Y1 Y2" > overlaps_2D_input.txt
awk -v min_overlap=${merger_alignment_length} -v identity=${merger_alignment_identity} -v max_k=${merger_alignment_score} '{if($6-$5<$9-$8){overlap=($6-$5)}else{overlap=($9-$8)}; tol=.5*overlap}(overlap>min_overlap)&&($1>identity*overlap||$1>max_k)&&(($10==0&&($5<tol||$8<tol)&&($2-$6<tol||$3-$9<tol))||($10==1&&($5<tol||$3-$9<tol)&&($2-$6<tol||$8<tol))){print}' alignments.txt | awk 'BEGIN{OFS="\t"}{for(i=1; i<=NF-3; i++){$i=$(i+3)}; $2--; $5--; $8=$7; $7="0,0,0"; $9=$2; $10=$3; $11=$5; $12=$6}1' >> overlaps_2D_input.txt
awk -f ${pipeline}/supp/lift-input-annotations-to-asm-annotations.awk ${cprops} ${asm} overlaps_2D_input.txt > overlaps_2D_asm.txt | Shell |
3D | theaidenlab/AGWG-merge | merge/run-asm-merger.sh | .sh | 4,202 | 104 | #!/bin/bash
## Wrapper script to do the merging block in the diploid pipeline workflow to merge assembly errors due to undercollapsed heterozygosity.
## Requires LASTZ aligner in path
## The idea is that sequence similarities identified via LASTZ alignments need to be long enough and with high enough identity score to meet a certain (length x identity) threshold. An additional (very high) alignment score threshold is listed that represents saturation of the length x identity condition.
## Written by: OD
## Version: 170217
USAGE="
*********************************
bash run-asm-merger.sh [options] <cprops> <asm> <faSplitFolder>
*********************************
"
# defaults
merger_search_band=3000000
merger_alignment_score=50000000
merger_alignment_identity=20
merger_alignment_length=20000
merger_lastz_options=\"--gfextend\ --gapped\ --chain=200,200\"
## HANDLE OPTIONS
while getopts "b:s:i:l:o:t:h" opt; do
case $opt in
b) re='^[0-9]+$'
if [[ $OPTARG =~ $re ]] && [[ $OPTARG -gt 0 ]]; then
echo ":) -b flag was triggered, assuming ${OPTARG} bp-wide band for alternative haplotype detection." >&1
merger_search_band=$OPTARG
else
echo ":( Wrong syntax for band size. Using the default value ${merger_search_band}." >&2
fi
;;
l) re='^[0-9]+$'
if [[ $OPTARG =~ $re ]] && [[ $OPTARG -gt 0 ]]; then
echo ":) -l flag was triggered, assuming ${OPTARG} bp as minimal overlap length at which sequences might be classified as alternative haplotypes." >&1
merger_alignment_length=$OPTARG
else
echo ":( Wrong syntax for acceptable synteny length. Using the default value ${merger_alignment_length}." >&2
fi
;;
i) re='^[0-9]+$'
if [[ $OPTARG =~ $re ]] && [[ $OPTARG -gt 0 ]]; then
echo ":) -i flag was triggered, assuming ${OPTARG} as minimal identity (per length) at which similar sequences might be classified as alternative haplotypes." >&1
merger_alignment_identity=$OPTARG
else
echo ":( Wrong syntax for acceptable identity score. Using the default value ${merger_alignment_identity}." >&2
fi
;;
s) re='^[0-9]+$'
if [[ $OPTARG =~ $re ]] && [[ $OPTARG -gt 0 ]]; then
echo ":) -s flag was triggered, assuming ${OPTARG} as saturated alignment score at which similar sequences are classified as alternative haplotypes, irrespective of length." >&1
merger_alignment_score=$OPTARG
else
echo ":( Wrong syntax for acceptable alignment score. Using the default value ${merger_alignment_score}." >&2
fi
;;
o) re='^\"--.+\"$'
if [[ $OPTARG =~ $re ]]; then
echo ":) -o flag was triggered, assuming ${OPTARG} as a list of options to pass on to LASTZ to tune alignment." >&1
merger_lastz_options=${OPTARG}
else
echo ":( Wrong syntax for LASTZ option string. Using the default value ${merger_lastz_options}." >&2
fi
;;
t)
if [ -f ${OPTARG} ]; then
echo ":) -t flag was triggered, assuming $OPTARG contains tiling results and fast-forwarding to merging per se." >&1
tiled_asm=$OPTARG
fi
;;
h) echo "$USAGE"
exit 0
;;
*) echo "$USAGE" >&2
exit 1
;;
esac
done
shift $(( OPTIND-1 ))
# Handle dependencies
pipeline=`cd "$( dirname $0)" && cd .. && pwd`
# Handle arguments
cprops=$1
asm=$2
faSplit=$3
if [ -z ${tiled_asm} ]; then
# 1) Perform pariwise alignment in a band of predefined size (default: 1Mb) and output reliable overlap input and original_asm annotation track (overlaps_2D_input.txt and overlaps_2D_asm.txt)
bash ${pipeline}/merge/align-nearby-sequences-and-filter-overlaps.sh -b ${merger_search_band} -s ${merger_alignment_score} -i ${merger_alignment_identity} -l ${merger_alignment_length} -o "${merger_lastz_options}" ${cprops} ${asm} ${faSplit}
# 2) Extract connected components, vote orientation, tile cluster, vote order
bash ${pipeline}/merge/tile-assembly-based-on-overlaps.sh ${cprops} ${asm} overlaps_2D_input.txt
tiled_asm=`basename ${asm} .asm`"_tiled.asm"
fi
# 3) Merge tiled asm
bash ${pipeline}/merge/merge-tiled-asm.sh -o "${merger_lastz_options}" ${cprops} ${tiled_asm} ${faSplit}
| Shell |
3D | theaidenlab/AGWG-merge | merge/merge-tiled-asm.sh | .sh | 7,181 | 196 | #!/bin/bash
## Wrapper script to merge contigs inside clusters as indicated by the annotated asm file.
## Requires LASTZ
## Written by: OD
USAGE="
*****************************************************
USAGE: merge-tiled-asm.sh -a <tiled-annotations> <path-to-cprops> <path-to-annotated-asm> <path-to-fasta-split-folder>
*****************************************************
"
# DEPENDENCIES: handle better!
pipeline=`cd "$( dirname $0)" && cd .. && pwd`
# DEFAULTS:
merger_lastz_options=\"--gfextend\ --gapped\ --chain=200,200\"
# coloring for the annotation file, probably overkill
cc_intra="255,255,255"
cc_break="255,0,0"
cc_overlap="255,255,0"
## HANDLE OPTIONS
while getopts "a:o:h" opt; do
case $opt in
h) echo "$USAGE"
exit 0
;;
a) if [ -f $OPTARG ]; then
echo ":) -a flag was triggered, using $OPTARG annotations to build a qc annotation track" >&1
tiled_annotations=`readlink -f $OPTARG`
else
echo ":( Could not find the file $OPTARG. Will skip building a qc annotation track" >&2
fi
;;
o) re='^\"--.+\"$'
if [[ $OPTARG =~ $re ]]; then
echo ":) -o flag was triggered, assuming ${OPTARG} as a list of options to pass on to LASTZ to tune alignment." >&1
merger_lastz_options=${OPTARG}
else
echo ":( Wrong syntax for LASTZ option string. Using the default value ${merger_lastz_options}." >&2
fi
;;
*) echo "$USAGE" >&2
exit 1
;;
esac
done
shift $(( OPTIND-1 ))
# remove comments from LASTZ options
merger_lastz_options="${merger_lastz_options%\"}"
merger_lastz_options="${merger_lastz_options#\"}"
## HANDLE ARGUMENTS
[ -z $1 ] || [ -z $2 ] || [ -z $3 ] && echo >&2 ":( Some input seems missing." && echo >&2 "$USAGE" && exit 1
cprops=`readlink -f $1`
asm=`readlink -f $2`
splitFas=`readlink -f $3`
filename=`basename $cprops .cprops`
## PREP WORKING DIR
chrname=${filename}
outDir="${chrname}"
[ -d $outDir ] && rm -r ${outDir}
mkdir $outDir && cd $outDir
main_fa_file="merged_${chrname}.fa"
tmp_merge_file="tmp_merged_${chrname}.fa"
tmp_fa_file="tmp_${chrname}.fa"
merged_asm=${chrname}"_merged.asm"
touch "${main_fa_file}"
echo "chr1 x1 x2 chr2 y1 y2 color id X1 X2 Y1 Y2" > shifts_2D_input.txt
merge="false"
contig_counter=0
while read -r line
do
#echo $line
first=$((contig_counter+1))
for contig in $line
do
#echo $contig
anno_contig=$contig
if [[ $contig = \{* ]]; then
contig=$(echo $contig | cut -d'{' -f 2)
fi
if [[ $contig = *\} ]]; then
contig=$(echo $contig | rev | cut -d'}' -f 2 | rev)
fi
#echo $contig
## might use more annotations
if [[ $contig == -* ]]; then
reverse=1
contig=$(echo $contig | cut -d'-' -f 2)
else
reverse=0
fi
if [ ${contig_counter} -eq 0 ]; then ## handle first
if [ $reverse -eq 0 ]; then
cat ${splitFas}/${contig}.fa > "${tmp_merge_file}"
else
awk -f ${pipeline}/utils/reverse-fasta.awk ${splitFas}/${contig}.fa > "${tmp_merge_file}"
fi
globalpos=0
len=$(awk -f ${pipeline}/utils/generate-cprops-file.awk ${splitFas}/${contig}.fa | awk '{print $NF}')
let contig_counter=contig_counter+1
## TODO: redo this via shifts, scaling also needs to be incorporated
echo "assembly $(( globalpos )) $(( globalpos+len )) assembly $(( globalpos )) $(( globalpos+len )) ${cc_break} ${contig} $(( globalpos )) $(( globalpos+len )) $(( globalpos )) $(( globalpos+len ))" >> "shifts_2D_input.txt"
globalpos=$(( globalpos+len ))
last_match_pos=1
last_tiled_pos=1
else
if [ $reverse -eq 0 ]; then
contig_file=${splitFas}/${contig}.fa
else
awk -f ${pipeline}/utils/reverse-fasta.awk ${splitFas}/${contig}.fa > "RC_contig.fa"
contig_file="RC_contig.fa"
fi
#head ${contig_file}
if [ $merge = "true" ]; then # attempt to tile next if in cluster
align=$(lastz "${tmp_merge_file}" ${contig_file} ${merger_lastz_options} | awk -v last_tiled_pos=${last_tiled_pos} -f ${pipeline}/merge/extract-highest-oriented-tiled-stanza.awk)
else
align=""
fi
#echo $align >> alignments.txt
read ts te prevlen qs qe len score <<< "$align"
if [ -z "$ts" ]; then # not merged or no overlap found
echo "...No reliable overlap found. Breaking cluster."
awk -f ${pipeline}/utils/wrap-fasta-sequence.awk "${tmp_merge_file}" >> "${main_fa_file}"
if [ $reverse -eq 0 ]; then
cat ${splitFas}/${contig}.fa > "${tmp_merge_file}"
else
awk -f ${pipeline}/utils/reverse-fasta.awk ${splitFas}/${contig}.fa > "${tmp_merge_file}"
fi
last_match_pos=1
last_tiled_pos=1
### add annotations
len=$(awk -f ${pipeline}/utils/generate-cprops-file.awk ${splitFas}/${contig}.fa | awk '{print $NF}')
let contig_counter=contig_counter+1
echo "assembly $(( globalpos )) $(( globalpos+len )) assembly $(( globalpos )) $(( globalpos+len )) ${cc_break} ${contig} $(( globalpos )) $(( globalpos+len )) $(( globalpos )) $(( globalpos+len ))" >> "shifts_2D_input.txt"
# echo ${contig}" " >> ${chrname}"_new_annotations.txt"
globalpos=$(( globalpos+len ))
else # merging
echo "...Overlap found!"
last_match_pos=${ts}
last_tiled_pos=$((ts-qs))
#echo $align
#echo $(( len-prevlen+te-qe ))
if [ $(( len-prevlen+te-qe )) -le 0 ]; then
echo "....new scaffold overlaps fully with previous ones. Skipping merge."
# new contig fully inside
echo "assembly $(( globalpos-prevlen+ts-qs )) $(( globalpos+len-prevlen+te-qe )) assembly $(( globalpos-prevlen+ts-qs )) $(( globalpos+len-prevlen+te-qe )) ${cc_intra} ${contig} $(( globalpos-prevlen+ts-qs )) $(( globalpos+len-prevlen+te-qe )) $(( globalpos-prevlen+ts-qs )) $(( globalpos+len-prevlen+te-qe ))" >> "shifts_2D_input.txt"
else
### second contig needs to be incorporated
echo "....merging scaffolds"
echo "assembly $(( globalpos-prevlen+ts-qs )) $(( globalpos+len-prevlen+te-qe )) assembly $(( globalpos-prevlen+ts-qs )) $(( globalpos+len-prevlen+te-qe )) ${cc_overlap} ${contig} $(( globalpos-prevlen+ts-qs )) $(( globalpos+len-prevlen+te-qe )) $(( globalpos-prevlen+ts-qs )) $(( globalpos+len-prevlen+te-qe ))" >> "shifts_2D_input.txt"
awk -v test=1 -v pos=${te} -f ${pipeline}/merge/grab-fix-sequence.awk ${tmp_merge_file} > ${tmp_fa_file}
awk -v test=0 -v pos=$((qe+1)) -f ${pipeline}/merge/grab-fix-sequence.awk ${contig_file} | awk 'NR>1' >> ${tmp_fa_file}
awk -f ${pipeline}/utils/wrap-fasta-sequence.awk ${tmp_fa_file} > ${tmp_merge_file}
globalpos=$(( globalpos+len-prevlen+te-qe ))
fi
fi
fi
if [[ $anno_contig = \{* ]]; then
merge="true"
fi
if [[ $anno_contig = *\} ]]; then
merge="false"
fi
done
seq ${first} ${contig_counter} | xargs >> ${merged_asm}
done < $asm
awk -f ${pipeline}/utils/wrap-fasta-sequence.awk ${tmp_merge_file} >> ${main_fa_file}
[ -z ${tiled_annotations} ] || (awk -f ${pipeline}/merge/lift-merged-annotations-to-unmerged-map-shifts.awk ${tiled_annotations} shifts_2D_input.txt > shift_qc_track_2D_asm.txt && awk -f ${pipeline}/merge/lift-merged-annotations-to-unmerged-map-overlaps.awk ${tiled_annotations} shifts_2D_input.txt > overlap_qc_track_2D_asm.txt)
[ -f ${tmp_merge_file} ] && rm ${tmp_merge_file}
[ -f ${tmp_fa_file} ] && rm ${tmp_fa_file}
| Shell |
3D | theaidenlab/AGWG-merge | merge/tile-assembly-based-on-overlaps.sh | .sh | 1,166 | 21 | #!/bin/bash
## Sandboxing part of the merge pipeline that performs connected component analysis and tiling based on pairwise alignment data
## USAGE: bash ./merge/tile-assembly-based-on-overlaps.sh <cprops> <asm> <overlaps-from-alignment-2D-annotation-file>
## Input: cprops, asm, overlaps 2D annotation files
## Output: _tiled.asm file, an annotated .asm file in which connected components are annotated with {...}
## Steps involved: extract connected components, vote orientation, tile cluster, vote order
## Written by: OD
## Version: 180402
# Handle dependencies
pipeline=`cd "$( dirname $0)" && cd .. && pwd`
# Handle arguments
cprops=$1
asm=$2
overlaps=$3
([ -z $1 ] || [ -z $2 ] || [ -z $3 ]) && echo >&2 "Not sure how to parse your input: files not listed or not found at expected locations. Exiting!" && echo >&2 "$USAGE_short" && exit 1
awk -f ${pipeline}/merge/identify-clusters-w-orientation.awk ${overlaps} | awk -f ${pipeline}/merge/vote-orientation.awk ${cprops} ${asm} - | awk -f ${pipeline}/merge/tile-cluster-insides.awk ${cprops} ${overlaps} - | awk -f ${pipeline}/merge/vote-order.awk ${cprops} - ${asm} > `basename ${asm} .asm`"_tiled.asm"
| Shell |
3D | theaidenlab/AGWG-merge | split_chrom_aware/run-asm-splitter.sh | .sh | 6,070 | 164 | #!/bin/bash
#### Description: Wrapper script to split megascaffold into individual chromosome-length scaffolds.
#### Written by: Sanjit Batra - sanjit.batra@bcm.edu. Version date 12/19/2016.
pipeline=`cd "$( dirname $0)" && cd .. && pwd`
USAGE="
*****************************************************
This is a wrapper script to split megascaffold into individual chromosome-length scaffolds. The output is a new asm file with additional chrom_num-1 number of lines where chrom_num is the number of chromosomes that are expected.
USAGE: ./run-asm-splitter.sh [options] <path_to_cprops> <path_to_asm> <path_to_contig_mnd>
DESCRIPTION:
This is a script to split an assembly chromosome into individual chromosomes.
ARGUMENTS:
path_to_cprops Specify cprops file path.
path_to_asm Specify asm file.
path_to_contig_mnd Specify path to mnd file describing pairwise Hi-C contacts between assembly contigs.
OPTIONS:
-c chr_num Number of chromosomes in the input genome (default is 23).
-r true/false Specify if the input is a rabl genome (true) or not (false), (default is false).
-h Shows this help
*****************************************************
"
## Defaults
chr_num=23
rabl="false"
## unprompted
mapq=1
# non-rable default params
res=100000
# rable default params
rabl_wide=500000
rabl_narrow=50000
## HANDLE OPTIONS
while getopts "c:r:h" opt; do
case $opt in
h) echo "$USAGE"
exit 0
;;
c) re='^[0-9]+$'
if [[ $OPTARG =~ $re ]] && [[ $OPTARG -gt 0 ]]; then
echo ":) -n flag was triggered, running chromosome splitter with specified number of chromosomes = $OPTARG" >&1
chr_num=$OPTARG
else
echo ":( Wrong syntax for specifying number of chromosomes. Using default number of chromosomes as ${chr_num}" >&2
fi
;;
r) if [ $OPTARG == true ] || [ $OPTARG == false ]; then
echo ":) -r flag was triggered. Rabl status of genome is $OPTARG." >&1
rabl=$OPTARG
else
echo ":( Unrecognized value for -r flag. Running with default parameters (-r ${rabl})." >&2
fi
;;
*) echo >&2 ":( Illegal options. Exiting."
echo >&1 "$USAGE"
exit 1
;;
esac
done
shift $(( OPTIND-1 ))
## HANDLE ARGUMENTS, TODO: check formats
cprops=$1
asm=$2
mnd=$3
id=`basename ${cprops} .cprops`
[ -s ${cprops} ] && [ -s ${asm} ] && [ -s ${mnd} ] || { echo ":( Not sure how to parse your input or input files not found at intended locations. Exiting!" && echo >&2 "$USAGE" && exit 1 ; }
## Handle zoom. TODO: get rid of zoom. What's the point if direct dump?
totlength=`awk '{total+=$3}END{print total}' ${cprops}`
scale=$(( 1 + $totlength / 2100000000 ))
if [ $scale -ne 1 ]; then
res=$((res/scale))
rabl_wide=$((rabl_wide/scale))
rabl_narrow=$((rabl_narrow/scale))
fi
#Work with just the first line of the asm which is the first superscaffold
head -1 ${asm} > "asm_head"
maxl=`awk 'function abs(x){if(x<0){return -x}else{return x}}{if(FILENAME==ARGV[1]){clen[$2]=$3;next}}{if(FILENAME==ARGV[2]){for(i=1;i<=NF;i++){c+=clen[abs($i)]};next}}END{print c}' $cprops "asm_head"`
maxlength=$(( $maxl / $scale ))
buffer=10000000
#Lift over contig merged_nodups to assembly merged_nodups
bash ${pipeline}/lift/lift-input-mnd-to-asm-mnd.sh -s ${scale} ${cprops} "asm_head" ${mnd} > "asm_mnd.txt"
if [ $rabl == "false" ]; then
# Handle non-rabl situation
#Dump matrix data
bash ${pipeline}/split/mnd-dump.sh -q ${mapq} "asm_mnd.txt" ${res} > ${res}.jbdump
#Run main recursive chrom splitter script
python -W ignore ${pipeline}/split/recursive-chromosome-splitter.py ${res}.jbdump "boundaries.wig" ${chr_num} ${res}
awk 'FNR>1{print $1}' "boundaries.wig" > "boundaries.list"
#Here we perform two sanity checks on the boundaries:
#First, the number of lines in it should be chr_num : if not, we declare something went wrong during chromosome splitting
nl=$(wc -l < "boundaries.wig")
if [ $nl -ne $chr_num ]; then
echo >&2 ":( Number of split chromosomes = $nl and input number of chromosomes = ${chr_num}, don't match. Refer to the hic map: continuing without splitting." && cp $asm ${id}.split.asm && exit
#echo "Number of split chromosomes = "$nl
fi
#Second, we make sure that chromosome end positions are > 0
test=`awk 'BEGIN{test=1}{if(\$1<=0){print ":( Chromosome boundary position not positive!" > "/dev/stderr"; test=0; exit}}END{print test}' "boundaries.wig"`
[ $test -eq 0 ] && echo >&2 ":( Chromosome splitter failed. Refer to the hic map: continuing without splitting." && cp $asm ${id}.split.asm && exit
#echo "Sanity check passed"
#Produce boundary scaffolds
awk -f ${pipeline}/split/find_scaffold_at_break.awk $cprops "asm_head" $mnd "boundaries.list" $scale | awk '{print $NF}' - > "scafs.list"
# cleanup
rm ${res}.jbdump boundaries.wig boundaries.list
else
# Handle rabl situation
#First we find the coarse chromosome boundaries using the coarse map juicebox dump
bash ${pipeline}/split/mnd-dump.sh -q ${mapq} "asm_mnd.txt" ${rabl_wide} | awk '{if($1==0){print}}' - | sort -k 3,3nr | awk -v b=$buffer -v m=$maxlength '{if(!( ($2<b) || ($2>(m-b)) )){print $2}}' - | head -n$(( chr_num - 1 )) | sort -n > ${rabl_wide}.positions
#Now we search for the maximum value in a radius of 1Mb??? 50kb centered at each of the coarse positions and find the scaffolds inside these positions lie
bash ${pipeline}/split/mnd-dump.sh -q ${mapq} "asm_mnd.txt" ${rabl_narrow} | awk '{if($1==0){print $2,$3}}' - | sort -k 1,1n | awk -f ${pipeline}/split/find_max.awk - ${rabl_wide}.positions | awk -f ${pipeline}/split/find_scaffold_at_rabl_peak.awk $cprops "asm_head" - $scale > "scafs.list"
# cleanup
rm ${rabl_wide}.positions
fi
# Finalize: break asm at scafs
awk -f ${pipeline}/split/split_asm_at_chr_boundaries.awk "scafs.list" "asm_head" > "chr.asm"
#cat "chr.asm" <(tail -n+2 $asm) > ${id}.split.asm
awk 'NR>1' $asm >> "chr.asm" && mv chr.asm ${id}.split.asm
awk '{$1=$1}1' ${id}.split.asm > ${id}.split.asm.tmp && mv ${id}.split.asm.tmp ${id}.split.asm
# cleanup
rm "scafs.list" "asm_head" "asm_mnd.txt"
| Shell |
3D | theaidenlab/AGWG-merge | split_chrom_aware/recursive-chromosome-splitter.py | .py | 6,047 | 233 | import numpy as np
#import matplotlib.pyplot as plt
import sys
from scipy.sparse import *
import math
# #First we define a function that takes in a matrix and computes the point where
# #S(i) is minimized, splits the matrix at that position and makes two recursive
# #calls to itself on both submatrices
# #This function will return an array of arrays, i.e. a 2D matrix
# #Each row of the matrix looks like this:
# #[position_in_bp_coordinates level S(i)_value(=Value_B) Value_A]
N_chr = int(sys.argv[3])
resolution = int(sys.argv[4])
def find_breaks(m,start_m,level,N_chr,global_mm):
#We define the exiting condition here:
#If the level is more than log_2(N_chr) then we should stop
#Or if we're inside a chromosome
break_point,m1,s1,m2,s2,S_min,Value_A,global_B = find_break_point(m,start_m,global_mm)
#3+math.log(N_chr,2)
# print(m1.shape)
# print(m2.shape)
# print("aaa")
# We run for 23 levels to ensure that we hit all chr boundaries
# We also assume that chr sizes > 1Mb
if( (level>=N_chr+1) or (m1.shape[0]<20) or (m2.shape[0]<20) ):
return [break_point*resolution,level,S_min,Value_A,global_B,min(m1.shape[0],m2.shape[0])]
#Check for being inside a chromosome(will come up with this later)
#elif()
# again
# return [break_point*resolution,level,S_min,Value_A]
else:
return np.concatenate(([break_point*resolution,level,S_min,Value_A,global_B,min(m1.shape[0],m2.shape[0])],np.concatenate((find_breaks(m1,s1,level+1,N_chr,global_mm),find_breaks(m2,s2,level+1,N_chr,global_mm)))))
def find_break_point(m,start_m,global_mm):
mm = np.cumsum(np.cumsum(m.toarray(),1),0)
L = mm.shape[0]
S = np.zeros(L)
# print("The size of this matrix is",L)
for i in range(L):
S[i] = float(mm[i,L-1]-mm[i,i])/((i+1)*(L-i))
#We don't want the first or last entry of S to be the breakpoint
index_1 = min(20,L-1)
S[:index_1] = [np.amax(S)]*(index_1)
index_2 = min(20,L-1)
S[-index_2:] = [np.amax(S)]*(index_2)
# print(L)
# print(mm)
# print(S)
# edge_point = int(7*L/9)
# float(mm[int(8L/9),int(2L/9)]-mm[int()])/((break_point+1)*(L-break_point))
#Now find the point where S is minimized
break_point,min_val = np.argmin(S),np.amin(S) #+/- 1?
#We also find the value of the region A described by IntegralImage(break_point)
value_A = float(mm[break_point,break_point])/((break_point+1)*(break_point+1))
min_val = float(mm[break_point,L-1]-mm[break_point,break_point])/((break_point+1)*(L-break_point))
global_break_point = break_point+start_m
gbp = global_break_point
GL = global_mm.shape[0]
global_B = float(global_mm[gbp,GL-1]-global_mm[gbp,gbp])/((gbp+1)*(GL-gbp))
#Shifting the breakpoint forward by 1
break_point = break_point+1
# print("break_point is:",break_point)
# print(break_point,min_val,value_A)
#Now break the matrix about the break_point
list1 = range(break_point)
list2 = range(break_point,L)
# m1 = mm[0:break_point,0:break_point]
# m2 = mm[break_point:L,break_point:L]
m1 = m.tocsr()[list1, :].tocsc()[:,list1]
m2 = m.tocsr()[list2, :].tocsc()[:,list2]
s1 = 0+start_m
s2 = break_point+start_m
break_point = break_point+start_m
# print(mm)
# print(break_point,np.shape(m1))
# print(m1)
# print(s1,np.shape(m2))
# print(m2)
# print(s2)
# plt.plot(S)
# plt.show()
return break_point,m1,s1,m2,s2,min_val,value_A,global_B
def iter_loadtxt(filename, delimiter='\t', skiprows=0, dtype=float):
def iter_func():
with open(filename, 'r') as infile:
for _ in range(skiprows):
next(infile)
for line in infile:
line = line.rstrip().split(delimiter)
for item in line:
yield dtype(item)
iter_loadtxt.rowlength = len(line)
data = np.fromiter(iter_func(), dtype=dtype)
data = data.reshape((-1, iter_loadtxt.rowlength))
return data
#Load Hi-C Juicebox Dump
#i,j,value=np.loadtxt('jbdump.txt').T
data = iter_loadtxt(sys.argv[1])
i = np.asarray(data[:,0])
j = np.asarray(data[:,1])
value = np.asarray(data[:,2])
#Zero out the diagonal
for iter in range(i.shape[0]):
if(i[iter]==j[iter]):
value[iter]=0
#Convert into symmetric matrix with resolution level indices
i2 = [int(x/resolution) for x in i]
j2 = [int(y/resolution) for y in j]
i22 = [int(y/resolution) for y in j]
j22 = [int(x/resolution) for x in i]
value1 = [float(z) for z in value]
value2 = [float(z) for z in value]
i222 = i2 + i22
j222 = j2 + j22
value222 = value1 + value2
m2=coo_matrix((value222,(i222,j222)))
#print("The dimensions of the complete input matrix are:")
#print(m2.shape)
#print("\n")
# Take only a part of the matrix for testing
#list1 = range(40000)
#m = m2.tocsr()[list1, :].tocsc()[:,list1]
# Or Take the entire matrix for testing
m=m2
# print(m.todense())
#mm = np.cumsum(np.cumsum(m.toarray(),1),0)
# print("Integral Image computed! Now finding breaks...")
# plt.spy(m,precision=2,marker=".",markersize=1)
# plt.show()
#m = [[1,2,3,4,5,6,7],[8,9,10,11,12,13,14],[15,16,17,18,19,20,21],[22,23,24,25,26,27,28],[29,30,31,32,33,34,35],[36,37,38,39,40,41,42],[43,44,45,46,47,48,49]]
# print(find_break_point(mm,0))
# print(find_breaks(mm,0,0,23))
global_mm = np.cumsum(np.cumsum(m.toarray(),1),0)
Breaks = find_breaks(m,0,0,N_chr,global_mm);
n = int(np.shape(Breaks)[0]/6)
Breaks_Output = np.reshape(Breaks,(n,6))
Breaks_Sorted_by_Signal = Breaks_Output[Breaks_Output[:,2].argsort()]
np.set_printoptions(precision=8,threshold=sys.maxint,linewidth=250,suppress=True)
#Now we iterate through this list and choose the top
Final_Breaks = np.zeros(N_chr-1)
curr_i = 0
m_size_threshold = 200
#Now: Potential change for the future
#m_size_threshold = 50
for i in range(n):
if(curr_i>=N_chr-1):
break
if(Breaks_Sorted_by_Signal[i,5]>m_size_threshold):
Final_Breaks[curr_i] = Breaks_Sorted_by_Signal[i,0]
curr_i += 1
#print(np.sort(Final_Breaks))
np.savetxt(sys.argv[2],np.sort(Final_Breaks),fmt='%d 100',header="variableStep chrom=assembly span=10000",comments='')
#print(Breaks_Sorted_by_Signal)
| Python |
3D | theaidenlab/AGWG-merge | split_chrom_aware/mnd-dump.sh | .sh | 1,601 | 52 | #!/bin/bash
#### Description: Script to dump matrix data from mnd.
#### Written by: Olga Dudchenko - olga.dudchenko@bcm.edu.
#TODO: Add usage
## Set defaults
mapq=1
## Handle options
while getopts "q:" opt; do
case $opt in
q) re='^[0-9]+$'
if [[ $OPTARG =~ $re ]]; then
mapq=$OPTARG
else
echo ":( Wrong syntax for mapping quality. Using the default value ${mapq}" >&2
fi
;;
*) echo "$USAGE" >&2
exit 1
;;
esac
done
shift $(( OPTIND-1 ))
## Handle arguments
mnd=$1
bin_size=$2
[ -s ${mnd} ] && [ ! -z ${bin_size} ] || { echo >&2 ":( Not sure how to parse your input or input files not found at intended locations. Exiting!" && exit 1 ; }
## Main function
## GNU Parallel Dependency
parallel="false"
if hash parallel 2>/dev/null; then
ver=`parallel --version | awk 'NR==1{print \$3}'`
[ $ver -ge 20150322 ] && parallel="true"
fi
[ $parallel == "false" ] && echo >&2 ":| WARNING: GNU Parallel version 20150322 or later not installed. We highly recommend to install it to increase performance. Starting pipeline without parallelization!"
cmd="awk -v b=${bin_size} -v q=${mapq} '(\$9>=q)&&(\$12>=q)&&(\$4!=\$8){if(\$3<=\$7){c[b*int(\$3/b)\" \"b*int(\$7/b)]+=1}else{c[b*int(\$7/b)\" \"b*int(\$3/b)]+=1}}END{for(i in c){print i, c[i]}}'"
[ $parallel == "true" ] && parallel --will-cite -a ${mnd} --pipepart -j 80% --block 1G ${cmd} | awk -v b=${bin_size} -v OFS='\t' '{c[$1"\t"$2]+=$3}END{for(i in c){print i, c[i]}}'
[ $parallel == "false" ] && { eval ${cmd}" "${mnd} | awk '{print $1"\t"$2"\t"$3}' -; }
| Shell |
3D | theaidenlab/AGWG-merge | seal/seal-asm.sh | .sh | 1,837 | 57 | #!/bin/bash
## Wrapper script to analyze the assembly and place fragments from among the small scaffolds back into the assembly
## NOTE: now is run after splitting but for diploid pipeline could be run for for tiled assembly
## NOTE: Relies on standard annotations :::fragment_ and :::debris!
## NOTE: Probably should be done after chrom splitting to avoid a slim chance that a misassembled contig/scaffold spanned chromosomes, and the chromosomes ended up in the order and orientation to fit the edges of the contig/scaffold
USAGE="
**********************************
./seal-asm.sh <current_cprops> <current_asm>
**********************************
"
## HANDLE OPTIONS
while getopts "s:h" opt; do
case $opt in
h) echo "$USAGE"
exit 0
;;
s) re='^[0-9]+$'
if [[ $OPTARG =~ $re ]] && [[ $OPTARG -gt 0 ]]; then
echo "... -s flag was triggered, will attempt to place back only singleton debris contigs/scaffolds and those less than $OPTARG" >&1
SIZE=$OPTARG
else
echo ":( Wrong syntax for minimal input contig/scaffold size. Exiting!" >&2
fi
;;
*) echo "$USAGE" >&2
exit 1
;;
esac
done
shift $(( OPTIND-1 ))
## HANDLE ARGUMENTS
[ -z $1 ] || [ -z $2 ] && echo >&2 ":( Some input seems missing." && echo >&2 "$USAGE" && exit 1
current_cprops=$1
current_asm=$2
[ -z ${SIZE} ] && echo >&2 ":| Warning: no size limit was listed. Will put back all singletons without preferential alternative location." && SIZE=`awk '\$3>max{max=\$3}END{print max+1}' ${current_cprops}`
## HANDLE DEPENDENCIES
pipeline=`cd "$( dirname $0)" && cd .. && pwd`
## MAIN FUNCTION
id=`basename ${current_cprops} .cprops`
# dump new asm and cprops
awk -v size=${SIZE} -f ${pipeline}/seal/build-sealed-asm.awk ${current_cprops} ${current_asm} 2>${id}.sealed.cprops 1>${id}.sealed.asm
| Shell |
3D | theaidenlab/AGWG-merge | finalize/construct-fasta-from-asm.sh | .sh | 1,575 | 71 | #!/bin/bash
## Wrapper script to generate a fasta from internal datatypes: cprops and asm
## Input: internally consistent cprops, asm and fasta
## TODO: parallelize for speed
## Prints into STDOUT
USAGE="
*****************************************************
USAGE: construct-fasta-from-asm.sh <path-to-cprops> <path-to-asm> <path-to-fasta>
*****************************************************
"
## HANDLE OPTIONS
label="HiC_scaffold"
while getopts "l:h" opt; do
case $opt in
h) echo "$USAGE"
exit 0
;;
l) label=$OPTARG
;;
*) echo "$USAGE" >&2
exit 1
;;
esac
done
shift $(( OPTIND-1 ))
# DEPENDENCIES:
pipeline=`cd "$( dirname $0)" && cd .. && pwd`
## HANDLE ARGUMENTS
[ -z $1 ] || [ -z $2 ] || [ -z $3 ] && echo >&2 ":( Some input seems missing." && echo >&2 "$USAGE" && exit 1
cprops=$1
asm=$2
fasta=$3
awk -f ${pipeline}/utils/index-fasta.awk $fasta > tmp.index.txt
declare -A index
while read name startbyte skip
do
index[$name]=$startbyte
done < <(awk 'FILENAME==ARGV[1]{cname[$1]=$2;next}{$1=cname[$1]; print}' $cprops tmp.index.txt)
# read asm line by line
scaffold_counter=1
while read -r line
do
echo ">"${label}"_"${scaffold_counter}
for contig in $line
do
## might use more annotations
if [[ $contig == -* ]]; then
contig=$(echo $contig | cut -d'-' -f 2)
tail -c +${index[${contig}]} ${fasta} | awk '$0~/>/{exit}1' | awk -f ${pipeline}/utils/reverse-fasta.awk -
else
tail -c +${index[${contig}]} ${fasta} | awk '$0~/>/{exit}1'
fi
done
let scaffold_counter=scaffold_counter+1
done < $asm
rm tmp.index.txt
| Shell |
3D | theaidenlab/AGWG-merge | finalize/remove-N-overhangs-from-asm.sh | .sh | 2,698 | 64 | #!/bin/bash
#### Description: Script to create an equivalent to a given assembly output (cprops asm fasta) but without N overhangs in the input contigs/scaffolds.
#### Usage: bash remove-N-overhangs-from-asm.sh <(modified) input cprops> <(modified) input asm> <(modified) input-fasta-file>
#### Input: cprops, asm, fasta
#### Output: "no_overhangs" cprops, asm, fasta.
#### Dependencies: make-gap-bed.awk, edit-cprops, edit-fasta, edit-asm scripts
#### Written by: Olga Dudchenko - olga.dudchenko@bcm.edu on 01/30/2017
USAGE="
*****************************************************
./remove-N-overhangs-from-asm.sh <(edited) input cprops> <(edited) input asm> <(edited) input-fasta-file>
*****************************************************
"
## HANDLE OPTIONS
while getopts "h" opt; do
case $opt in
h) echo "$USAGE"
exit 0
;;
*) echo "$USAGE" >&2
exit 1
;;
esac
done
shift $(( OPTIND-1 ))
## HANDLE ARGUMENTS
[ -z $1 ] || [ -z $2 ] || [ -z $3 ] && echo >&2 ":( Some input seems missing." && echo >&2 "$USAGE" && exit 1
orig_cprops=$1
orig_asm=$2
orig_fasta=$3
if [ ! -f ${orig_cprops} ] || [ ! -f ${orig_asm} ] || [ ! -f ${orig_fasta} ]; then
echo "Files corresponding to original input not found. Exiting!" >&2
exit 1
fi
## HANDLE DEPENDENCIES
pipeline=`cd "$( dirname $0)" && cd .. && pwd`
make_gap_bed=${pipeline}/utils/make-gap-bed.awk
edit_cprops=${pipeline}/edit/edit-cprops-according-to-annotations.awk
edit_asm=${pipeline}/edit/edit-asm-according-to-new-cprops.sh
edit_fasta=${pipeline}/edit/edit-fasta-according-to-new-cprops.awk
prefix=`basename ${orig_cprops} .cprops`
awk -f ${make_gap_bed} ${orig_fasta} | awk 'BEGIN{OFS="\t"; print "chr1", "x1", "x2", "chr2", "y1", "y2","color", "id", "X1", "X2", "Y1", "Y2"}FILENAME==ARGV[1]{len[$1]=$3;next}$2==0{print $1, $2, $3, $1, $2, $3, "0,0,0", "gap", $2, $3, $2, $3; next}$3==len[$1]{print $1, $2, $3, $1, $2, $3, "0,0,0", "gap", $2, $3, $2, $3}' ${orig_cprops} - | awk -v label1=":::overhang_" -v label2=":::gap" -f ${edit_cprops} - ${orig_cprops} > ${prefix}.no_overhangs.cprops
new_cprops=${prefix}.no_overhangs.cprops
prefix=`basename ${orig_asm} .asm`
bash ${edit_asm} ${new_cprops} ${orig_cprops} ${orig_asm} | awk 'FILENAME==ARGV[1]{if($1~/:::gap/){skip[$2]=1; skip[-$2]=1}; next}{str=""; for(i=1;i<=NF;i++){if(! skip[$i]){str=str" "$i}}; if(str!=""){print substr(str,2)}}' ${new_cprops} - > ${prefix}.no_overhangs.asm
prefix=`basename ${orig_fasta} .fa`
prefix=`basename ${prefix} .fna`
prefix=`basename ${prefix} .fasta`
awk -v label1=":::overhang_" -v label2=":::gap" -f ${edit_fasta} ${new_cprops} ${orig_fasta} | awk '$0~/>/{test=1; if($0~/:::gap/){test=0}}test{print}' > ${prefix}.no_overhangs.fasta
| Shell |
3D | theaidenlab/AGWG-merge | finalize/finalize-output-w-stats.sh | .sh | 7,826 | 183 | #!/bin/bash
## Wrapper script to generate final fasta as well as various component fastas (unprompted)
## Adds 500bp gaps between assembly components scaffolded via Hi-C
## TODO: make gap length a parameter
## Written by OD
pipeline=`cd "$( dirname $0)" && cd .. && pwd`
## unprompted
gap_size=500
#subtiny_size=1000
subtiny_size=0
# default
label="HiC"
USAGE="
***********************************************
./finalize-output.sh -c <number_of_chromosomes> -s <tiny_threshold> -l <label> <cprops> <asm> <fasta> <type>
***********************************************
"
## HANDLE OPTIONS
while getopts "c:s:l:h" opt; do
case $opt in
h) echo "$USAGE"
exit 0
;;
c) re='^[0-9]+$'
if [[ $OPTARG =~ $re ]] && [[ $OPTARG -gt 0 ]]; then
chrom_num=$OPTARG
else
echo ":( Wrong syntax for chromosome number. Exiting!" >&2 && exit 1
fi
;;
s) re='^[0-9]+$'
if [[ $OPTARG =~ $re ]] && [[ $OPTARG -gt 0 ]]; then
echo "... -s flag was triggered, treating all contigs/scaffolds shorter than $OPTARG as unattempted" >&1
input_size=$OPTARG
else
echo ":( Wrong syntax for minimal input contig/scaffold size. Exiting!" >&2 && exit 1
fi
;;
l) label=$OPTARG
echo "... -l flag was triggered. Output will appear with headers of the form ${OPTARG}_hic_scaffold_#"
;;
*) echo "$USAGE" >&2
exit 1
;;
esac
done
shift $(( OPTIND-1 ))
## HANDLE ARGUMENTS
[ -z $1 ] || [ -z $2 ] || [ -z $3 ] || [ -z $4 ] && echo >&2 ":( Some input seems to be missing." && echo >&2 "$USAGE" && exit 1
cprops=$1
asm=$2
fasta=$3
type=$4
case $type in
"draft")
[ -z $input_size ] && echo >&2 ":( Please type in the minimal contig/scaffold input size -s <num>. Exiting!" && exit 1
echo "Analyzing the draft assembly"
# Note that if the draft is not trimmed might need to trim first...
awk -v except=1 -f ${pipeline}/finalize/make-fasta-subset.awk <( awk -v subtiny=${subtiny_size} '$3<subtiny' ${cprops}) ${fasta} | awk -f ${pipeline}/supp/print-n50.awk > draft.fasta.stats
awk -v except=1 -f ${pipeline}/finalize/make-fasta-subset.awk <( awk -v tiny=${input_size} '$3<tiny' ${cprops}) ${fasta} | awk -f ${pipeline}/supp/print-n50.awk > input.fasta.stats
awk -f ${pipeline}/finalize/make-fasta-subset.awk <( awk -v subtiny=${subtiny_size} -v tiny=${input_size} '$3>=subtiny&&$3<tiny' ${cprops}) ${fasta} | awk -f ${pipeline}/supp/print-n50.awk > tiny.fasta.stats
cp tiny.fasta.stats unattempted.fasta.stats
;;
"raw")
echo "Analyzing the tiled assembly"
[ -z ${input_size} ] || [ -z ${chrom_num} ] && echo >&2 ":( Please type in the number of chromosomes -c <num> and the tiny size -s <num>. Exiting!" && exit 1
awk 'gsub("{||}","")' ${asm} > temp.asm
asm=temp.asm
bash ${pipeline}/finalize/remove-N-overhangs-from-asm.sh ${cprops} ${asm} ${fasta}
prefix=`basename ${cprops} .cprops`
cprops=${prefix}.no_overhangs.cprops
prefix=`basename ${asm} .asm`
asm=${prefix}.no_overhangs.asm
prefix=`basename ${fasta} .fa`
prefix=`basename ${prefix} .fna`
prefix=`basename ${prefix} .fasta`
fasta=${prefix}.no_overhangs.fasta
awk -v chrom_num=${chrom_num} 'FILENAME==ARGV[1]{oname[$2]=$1;next}FNR<=chrom_num{gsub("-","");for(i=1;i<=NF;i++){print oname[$i]}}' ${cprops} ${asm} | awk -f ${pipeline}/finalize/make-fasta-subset.awk - ${fasta} | awk -f ${pipeline}/supp/print-n50.awk > resolved.fasta.stats
awk -v chrom_num=${chrom_num} -v tiny=${input_size} 'FILENAME==ARGV[1]{oname[$2]=$1;split($1,b,":::overhang_||:::gap");len[b[1]]+=$3;next}FNR>chrom_num{gsub("-","");for(i=1;i<=NF;i++){split(oname[$i],b,":::overhang_||:::gap");if(oname[$i]~/:::fragment_/||len[b[1]]>=tiny){print oname[$i]}}}' ${cprops} ${asm} | awk -f ${pipeline}/finalize/make-fasta-subset.awk - ${fasta} | awk -f ${pipeline}/supp/print-n50.awk > unresolved-and-inconsistent.fasta.stats
rm temp.asm
;;
"final")
echo "Analyzing the merged assembly"
[ -z ${input_size} ] || [ -z ${chrom_num} ] || [ -z ${label} ] && echo >&2 ":( Please type in the number of chromosomes -c <num>, the tiny size -s <num> and a label for final fasta -l <label>. Exiting!" && exit 1
# trim N overhangs
echo "...trimming N overhangs"
bash ${pipeline}/finalize/remove-N-overhangs-from-asm.sh ${cprops} ${asm} ${fasta}
prefix=`basename ${cprops} .cprops`
cprops=${prefix}.no_overhangs.cprops
prefix=`basename ${asm} .asm`
asm=${prefix}.no_overhangs.asm
prefix=`basename ${fasta} .fa`
prefix=`basename ${prefix} .fna`
prefix=`basename ${prefix} .fasta`
fasta=${prefix}.no_overhangs.fasta
# riffle
echo "...adding gaps"
gap_id=`awk 'END{print \$2+1}' ${cprops}`
awk -v riffle=${gap_id} -f ${pipeline}/finalize/riffle-asm.awk ${asm} > temp.asm
cp ${cprops} temp.cprops
echo "hic_gap_${gap_id} ${gap_id} ${gap_size}" >> temp.cprops
cp ${fasta} temp.fasta
echo ">hic_gap_${gap_id}" >> temp.fasta
awk -v gap_size=${gap_size} 'BEGIN{for(i=1; i<=gap_size;i++){str=str"N"}; print str}' >> temp.fasta
awk -v chrom_num=${chrom_num} 'NR<=chrom_num' temp.asm > temp.chr-length.asm
awk -v subtiny=${subtiny_size} 'FILENAME==ARGV[1]{split($1,a,":::overhang_"); len[a[1]]+=$3; oname[$2]=a[1]; next}{gsub("-","")}(NF==1&&oname[$1]!~/:::fragment_/&&len[oname[$1]]<subtiny){print}' temp.cprops temp.asm > temp.subtiny.asm
awk -v subtiny=${subtiny_size} -v tiny=${input_size} 'FILENAME==ARGV[1]{split($1,a,":::overhang_"); len[a[1]]+=$3; oname[$2]=a[1]; next}{gsub("-","")}(NF==1&&oname[$1]!~/:::fragment_/&&len[oname[$1]]>=subtiny&&len[oname[$1]]<tiny){print}' temp.cprops temp.asm > temp.tiny.asm
cat temp.chr-length.asm temp.subtiny.asm temp.tiny.asm | awk 'FILENAME==ARGV[1]{skip[$0]=1;next}(!skip[$0]){print}' - temp.asm > temp.small.asm
# make scaffold sequences of groups and calculate statistics
echo "...generating fastas"
bash ${pipeline}/finalize/construct-fasta-from-asm.sh temp.cprops temp.chr-length.asm temp.fasta | awk -f ${pipeline}/utils/wrap-fasta-sequence.awk - > chr-length.fasta
awk -f ${pipeline}/supp/print-n50.awk chr-length.fasta > chr-length.fasta.stats
awk '$0~/>/{if(c){print c}; c=0; next}{c+=length}END{print c}' chr-length.fasta > T3.dat
awk '$0~/>/{if(c){print c}; c=0; next}{gsub("N","");gsub("n","");c+=length}END{print c}' chr-length.fasta > tmp
paste T3.dat tmp > T3.dat.tmp && mv T3.dat.tmp T3.dat && rm tmp
bash ${pipeline}/finalize/construct-fasta-from-asm.sh temp.cprops temp.small.asm temp.fasta | awk -f ${pipeline}/utils/wrap-fasta-sequence.awk - > small.fasta
awk -f ${pipeline}/supp/print-n50.awk small.fasta > small.fasta.stats
bash ${pipeline}/finalize/construct-fasta-from-asm.sh temp.cprops temp.tiny.asm temp.fasta | awk -f ${pipeline}/utils/wrap-fasta-sequence.awk - > tiny.fasta
awk -f ${pipeline}/supp/print-n50.awk tiny.fasta > tiny.fasta.stats
# merge final output
cat chr-length.fasta small.fasta | awk -v label=${label} '$0~/>/{counter++; $0=">"label"_hic_scaffold_"counter}1' > ${label}.FINAL.from_input.fasta
awk -f ${pipeline}/supp/print-n50.awk ${label}.FINAL.from_input.fasta > chr-length-and-small.fasta.stats
cat chr-length.fasta small.fasta tiny.fasta | awk -v label=${label} '$0~/>/{counter++; $0=">"label"_hic_scaffold_"counter}1' > ${label}.FINAL.from_draft.fasta
awk -f ${pipeline}/supp/print-n50.awk ${label}.FINAL.from_draft.fasta > chr-length-and-small-and-tiny.fasta.stats
cat small.fasta tiny.fasta > temp.fasta && awk -f ${pipeline}/supp/print-n50.awk temp.fasta > small-and-tiny.fasta.stats
# clean up
rm temp.fasta temp.cprops temp.asm temp.small.asm temp.tiny.asm temp.chr-length.asm temp.subtiny.asm
;;
*)
echo >&2 ":( Unknown type. Please choose one of the following: draft/tiled/merged. Exiting!" && exit 1
;;
esac
| Shell |
3D | theaidenlab/AGWG-merge | finalize/finalize-output.sh | .sh | 7,681 | 182 | #!/bin/bash
## Wrapper script to generate final fasta as well as various component fastas (unprompted)
## Adds 500bp gaps between assembly components scaffolded via Hi-C
## TODO: make gap length a parameter
## Written by OD
pipeline=`cd "$( dirname $0)" && cd .. && pwd`
## unprompted
gap_size=500
subtiny_size=1000
# default
label="HiC"
USAGE="
***********************************************
./finalize-output.sh -c <number_of_chromosomes> -s <tiny_threshold> -l <label> <cprops> <asm> <fasta> <type>
***********************************************
"
## HANDLE OPTIONS
while getopts "c:s:l:h" opt; do
case $opt in
h) echo "$USAGE"
exit 0
;;
c) re='^[0-9]+$'
if [[ $OPTARG =~ $re ]] && [[ $OPTARG -gt 0 ]]; then
chrom_num=$OPTARG
else
echo ":( Wrong syntax for chromosome number. Exiting!" >&2 && exit 1
fi
;;
s) re='^[0-9]+$'
if [[ $OPTARG =~ $re ]] && [[ $OPTARG -gt 0 ]]; then
echo "... -s flag was triggered, treating all contigs/scaffolds shorter than $OPTARG as unattempted" >&1
input_size=$OPTARG
else
echo ":( Wrong syntax for minimal input contig/scaffold size. Exiting!" >&2 && exit 1
fi
;;
l) label=$OPTARG
echo "... -l flag was triggered. Output will appear with headers of the form ${OPTARG}_hic_scaffold_#"
;;
*) echo "$USAGE" >&2
exit 1
;;
esac
done
shift $(( OPTIND-1 ))
## HANDLE ARGUMENTS
[ -z $1 ] || [ -z $2 ] || [ -z $3 ] || [ -z $4 ] && echo >&2 ":( Some input seems to be missing." && echo >&2 "$USAGE" && exit 1
cprops=$1
asm=$2
fasta=$3
type=$4
case $type in
"draft")
[ -z $input_size ] && echo >&2 ":( Please type in the minimal contig/scaffold input size -s <num>. Exiting!" && exit 1
echo "Analyzing the draft assembly"
# Note that if the draft is not trimmed might need to trim first...
awk -v except=1 -f ${pipeline}/finalize/make-fasta-subset.awk <( awk -v subtiny=${subtiny_size} '$3<subtiny' ${cprops}) ${fasta} > draft.fasta
awk -v except=1 -f ${pipeline}/finalize/make-fasta-subset.awk <( awk -v tiny=${input_size} '$3<tiny' ${cprops}) ${fasta} > input.fasta
awk -f ${pipeline}/finalize/make-fasta-subset.awk <( awk -v subtiny=${subtiny_size} -v tiny=${input_size} '$3>=subtiny&&$3<tiny' ${cprops}) ${fasta} > tiny.fasta
ln -sf tiny.fasta unattempted.fasta
;;
"raw")
echo "Analyzing the tiled assembly"
[ -z ${input_size} ] || [ -z ${chrom_num} ] && echo >&2 ":( Please type in the number of chromosomes -c <num> and the tiny size -s <num>. Exiting!" && exit 1
awk 'gsub("{||}","")' ${asm} > temp.asm
asm=temp.asm
bash ${pipeline}/finalize/remove-N-overhangs-from-asm.sh ${cprops} ${asm} ${fasta}
prefix=`basename ${cprops} .cprops`
cprops=${prefix}.no_overhangs.cprops
prefix=`basename ${asm} .asm`
asm=${prefix}.no_overhangs.asm
prefix=`basename ${fasta} .fa`
prefix=`basename ${prefix} .fna`
prefix=`basename ${prefix} .fasta`
fasta=${prefix}.no_overhangs.fasta
awk -v chrom_num=${chrom_num} 'FILENAME==ARGV[1]{oname[$2]=$1;next}FNR<=chrom_num{gsub("-","");for(i=1;i<=NF;i++){print oname[$i]}}' ${cprops} ${asm} | awk -f ${pipeline}/finalize/make-fasta-subset.awk - ${fasta} > resolved.fasta
awk -v chrom_num=${chrom_num} -v tiny=${input_size} 'FILENAME==ARGV[1]{oname[$2]=$1;split($1,b,":::overhang_||:::gap");len[b[1]]+=$3;next}FNR>chrom_num{gsub("-","");for(i=1;i<=NF;i++){split(oname[$i],b,":::overhang_||:::gap");if(oname[$i]~/:::fragment_/||len[b[1]]>=tiny){print oname[$i]}}}' ${cprops} ${asm} | awk -f ${pipeline}/finalize/make-fasta-subset.awk - ${fasta} > unresolved-and-inconsistent.fasta
rm temp.asm
;;
"final")
echo "Analyzing the merged assembly"
# trim N overhangs
echo "...trimming N overhangs"
bash ${pipeline}/finalize/remove-N-overhangs-from-asm.sh ${cprops} ${asm} ${fasta}
prefix=`basename ${cprops} .cprops`
cprops=${prefix}.no_overhangs.cprops
prefix=`basename ${asm} .asm`
asm=${prefix}.no_overhangs.asm
prefix=`basename ${fasta} .fa`
prefix=`basename ${prefix} .fna`
prefix=`basename ${prefix} .fasta`
fasta=${prefix}.no_overhangs.fasta
# riffle
echo "...adding gaps"
gap_id=`awk 'END{print \$2+1}' ${cprops}`
awk -v riffle=${gap_id} -f ${pipeline}/finalize/riffle-asm.awk ${asm} > temp.asm
cp ${cprops} temp.cprops
echo "hic_gap_${gap_id} ${gap_id} ${gap_size}" >> temp.cprops
cp ${fasta} temp.fasta
echo ">hic_gap_${gap_id}" >> temp.fasta
awk -v gap_size=${gap_size} 'BEGIN{for(i=1; i<=gap_size;i++){str=str"N"}; print str}' >> temp.fasta
bash ${pipeline}/finalize/construct-fasta-from-asm.sh temp.cprops temp.asm temp.fasta | awk -f ${pipeline}/utils/wrap-fasta-sequence.awk - > ${label}.FINAL.fasta
# clean up: remove no_overhangs files
rm temp.cprops temp.asm temp.fasta ${cprops} ${asm} ${fasta}
exit
# remove rest from script, not specialized script for this
# skip component analysis if no optional data like chrom number
[ -z ${input_size} ] || [ -z ${chrom_num} ] || [ -z ${label} ] && echo >&2 ":| Do not know the number of expected chromosomes: skipping component fasta analysis. Done!" && rm temp.cprops temp.asm temp.fasta && exit 0
awk -v chrom_num=${chrom_num} 'NR<=chrom_num' temp.asm > temp.chr-length.asm
awk -v subtiny=${subtiny_size} 'FILENAME==ARGV[1]{split($1,a,":::overhang_"); len[a[1]]+=$3; oname[$2]=a[1]; next}{gsub("-","")}(NF==1&&oname[$1]!~/:::fragment_/&&len[oname[$1]]<subtiny){print}' temp.cprops temp.asm > temp.subtiny.asm
awk -v subtiny=${subtiny_size} -v tiny=${input_size} 'FILENAME==ARGV[1]{split($1,a,":::overhang_"); len[a[1]]+=$3; oname[$2]=a[1]; next}{gsub("-","")}(NF==1&&oname[$1]!~/:::fragment_/&&len[oname[$1]]>=subtiny&&len[oname[$1]]<tiny){print}' temp.cprops temp.asm > temp.tiny.asm
cat temp.chr-length.asm temp.subtiny.asm temp.tiny.asm | awk 'FILENAME==ARGV[1]{skip[$0]=1;next}(!skip[$0]){print}' - temp.asm > temp.small.asm
# make scaffold sequences of groups and calculate statistics
echo "...generating fastas"
bash ${pipeline}/finalize/construct-fasta-from-asm.sh temp.cprops temp.chr-length.asm temp.fasta | awk -f ${pipeline}/utils/wrap-fasta-sequence.awk - > chr-length.fasta
awk '$0~/>/{if(c){print c}; c=0; next}{c+=length}END{print c}' chr-length.fasta > T3.dat
awk '$0~/>/{if(c){print c}; c=0; next}{gsub("N","");gsub("n","");c+=length}END{print c}' chr-length.fasta > tmp
# paste T3.dat tmp > T3.dat.tmp && mv T3.dat.tmp T3.dat && rm tmp
bash ${pipeline}/finalize/construct-fasta-from-asm.sh temp.cprops temp.small.asm temp.fasta | awk -f ${pipeline}/utils/wrap-fasta-sequence.awk - > small.fasta
bash ${pipeline}/finalize/construct-fasta-from-asm.sh temp.cprops temp.tiny.asm temp.fasta | awk -f ${pipeline}/utils/wrap-fasta-sequence.awk - > tiny.fasta
# merge final output
cat chr-length.fasta small.fasta | awk -v label=${label} '$0~/>/{counter++; $0=">"label"_hic_scaffold_"counter}1' > ${label}.FINAL.from_input.fasta
cat chr-length.fasta small.fasta tiny.fasta | awk -v label=${label} '$0~/>/{counter++; $0=">"label"_hic_scaffold_"counter}1' > ${label}.FINAL.from_draft.fasta
cat small.fasta tiny.fasta > small-and-tiny.fasta
# clean up: remove no_overhangs files
rm ${cprops} ${asm} ${fasta}
# clean up: remove component cprops and asm
rm temp.fasta temp.cprops temp.asm temp.small.asm temp.tiny.asm temp.chr-length.asm temp.subtiny.asm
# clean up: comment if component fastas are needed
rm chr-length.fasta small.fasta tiny.fasta small-and-tiny.fasta
;;
*)
echo >&2 ":( Unknown type. Please choose one of the following: draft/tiled/merged. Exiting!" && exit 1
;;
esac
| Shell |
3D | theaidenlab/AGWG-merge | edit/reconstruct-edits-from-cprops.sh | .sh | 550 | 9 | #!/bin/bash
pipeline=`cd "$( dirname $0)" && cd .. && pwd`
orig_cprops=$1
current_cprops=$2
awk 'BEGIN{OFS="\t"; print "chr1", "x1", "x2", "chr2", "y1", "y2", "color", "id", "X1", "X2", "Y1", "Y2"}$1~/:::debris/{print $1, 0, $3, $1, 0, $3, "0,0,0", "debris", 0, $3, 0, $3}' ${current_cprops} | awk -f ${pipeline}/lift/lift-input-annotations-to-asm-annotations.awk ${current_cprops} <(awk '{print $2}' ${current_cprops}) - | awk -f ${pipeline}/lift/lift-asm-annotations-to-input-annotations.awk ${orig_cprops} <(awk '{print $2}' ${orig_cprops}) -
| Shell |
3D | theaidenlab/AGWG-merge | edit/edit-asm-according-to-new-cprops.sh | .sh | 1,396 | 14 | #!/bin/bash
# Script to make an equivalent of the old asm file in terms of new cprops if contigs/scaffolds were edited
# Written by OD
new_cprops=$1
old_cprops=$2
old_asm=$3
pipeline=`cd "$( dirname $0)" && cd .. && pwd`
## TODO: SAFEGUARDS
((awk 'BEGIN{OFS="\t"; print "chr1", "x1", "x2", "chr2", "y1", "y2", "color", "id", "X1", "X2", "Y1", "Y2"}{print $1, 0, $3, $1, 0, $3, "0,0,0", $2, 0, $3, 0, $3}' ${old_cprops} | awk -f ${pipeline}/lift/lift-input-annotations-to-asm-annotations.awk ${old_cprops} <(awk '{print $2}' ${old_cprops}) - | awk 'NR>1{print $2, $3, $8, 2}') && (awk 'BEGIN{OFS="\t"; print "chr1", "x1", "x2", "chr2", "y1", "y2", "color", "id", "X1", "X2", "Y1", "Y2"}{print $1, 0, $3, $1, 0, $3, "0,0,0", $2, 0, $3, 0, $3}' ${new_cprops} | awk -f ${pipeline}/lift/lift-input-annotations-to-asm-annotations.awk ${new_cprops} <(awk '{print $2}' ${new_cprops}) - | awk 'NR>1{print $2, $3, $8, 1}')) | sort -k 2,2n -k 4,4n | awk '$4==2{print $3, substr(new_id,2); new_id=""; next}{new_id=new_id" "$3}' | awk 'function reverse(seq){n=split(seq,s); tmpstring = -s[n]; for(k=n-1; k>=1; k--){tmpstring = sprintf("%s %s",tmpstring, -s[k])};return tmpstring}FILENAME==ARGV[1]{str=$2; for(i=3;i<=NF;i++){str=str" "$i}; substitute[$1]=str; next}{str=""; for(i=1;i<=NF;i++){if($i<0){str=str" "reverse(substitute[-$i])}else{str=str" "substitute[$i]}}; print substr(str,2)}' - ${old_asm}
| Shell |
3D | theaidenlab/AGWG-merge | edit/run-coverage-analyzer.sh | .sh | 3,975 | 94 | #!/bin/bash
#### Description: Wrapper script to annotate likely repeats. Analyzes normalization vector at a specified resolution (default 25kb).
#### Usage: run-mismatch-detector.sh -w <bin_size> <path-to-hic-file>
#### Dependencies: Juicebox_tools
#### Input: Juicebox hic file.
#### Output: "Wide" bed file highlighting likely repeat regions [repeat_wide.bed; maybe later: repeat_narrow.bed]. Additional output generated as part of this wrapper script includes repeat_score_wide.wig (repeat_score_narrow.wig) track files.
#### Written by: Olga Dudchenko - olga.dudchenko@bcm.edu. Version date 12/19/2016.
USAGE="
*****************************************************
This is a wrapper for a fragment of Hi-C misassembly detection pipeline, version date: Dec 3, 2016. This fragment concerns with generating a mismatch annotation file that will later be overlaid with scaffold boundaries to excise regions spanning misassemblies.
Usage: ./run-mismatch-detector.sh [-h] [-p percentile] [-b bin_size_aka_resolution] [-d depletion_region_size] path_to_hic_file
ARGUMENTS:
path_to_hic_file Path to Juicebox .hic file of the current assembly.
OPTIONS:
-h Shows this help
-w wide_res Sets resolution for the first-pass search of repeats (default is 25000 bp)
-t thr_cov Threshold coverage [default is 2]
Unprompted:
...
Uses juicebox_tools.sh that should be in the same folder as the wrapper script.
*****************************************************
"
## Set defaults
bin_size=25000 # default bin size to do a first-pass search for repeats
thr_cov=2
## HANDLE OPTIONS
while getopts "hw:t:" opt; do
case $opt in
h) echo "$USAGE" >&1
exit 0
;;
w) re='^[0-9]+$'
if [[ $OPTARG =~ $re ]]; then
echo ":) -w flag was triggered, performing cursory search for repeat at $OPTARG resolution" >&1
bin_size=$OPTARG
else
echo ":( Wrong syntax for bin size. Using the default value 25000" >&2
fi
;;
t) re='^[0-9]+$'
if [[ $OPTARG =~ $re ]]; then
echo ":) -t flag was triggered, flagging regions with coverage higher than $OPTARG" >&1
thr_cov=$OPTARG
else
echo ":( Wrong syntax for threshold coverage. Using the default value 2" >&2
fi
;;
*) echo "$USAGE" >&2
exit 1
;;
esac
done
shift $(( OPTIND-1 ))
## HANDLE ARGUMENTS: TODO check file format
if [ $# -lt 1 ]; then
echo ":( Required arguments not found. Please double-check your input!!" >&2
echo "$USAGE" >&2
exit 1
fi
hic_file=$1
## CHECK DEPENDENCIES
type java >/dev/null 2>&1 || { echo >&2 ":( Java is not available, please install/add to path Java to run Juicer and Juicebox. Exiting!"; exit 1; }
path_to_scripts=`cd "$( dirname $0)" && pwd`
path_to_vis=$(dirname ${path_to_scripts})"/visualize"
juicebox=${path_to_vis}/"juicebox_tools.sh"
## DUMP NV FOR ANALYSIS, ANNOTATE ALL >=2. TODO: check that the matrix is not too sparse for chosen resolution
echo "...Dumping ${bin_size} resolution coverage track"
bash ${juicebox} dump norm VC ${hic_file} assembly BP ${bin_size} "coverage_wide.wig"
[ $? -ne 0 ] && echo >&2 ":( Juicebox coverage dump is empty! Perhaps something is wrong with the hic file or the requested resolution is too high. Exiting!" && exit 1
echo "fixedStep chrom=assembly start=1 step="${bin_size}" span="${bin_size} | cat - "coverage_wide.wig" > temp && mv temp "coverage_wide.wig"
## NOTE: if(start) was added after 3d-dna release. Caused bug when nothing was annotated due to coverage.
awk -v thr_cov=${thr_cov} -v bin_size=${bin_size} 'NR>1&&$0>=thr_cov{print (NR-2)*bin_size, (NR-1)*bin_size}' coverage_wide.wig | awk 'BEGIN{OFS="\t"}NR==1{start=$1; end=$2;next}$1==end{end=$2;next}{print "assembly", start, end; start=$1; end=$2}END{if(start){print "assembly", start, end}}' > repeats_wide.bed
## TODO: maybe downstread add filtering by looking for nearby mismatches to get rid of false positives
| Shell |
3D | theaidenlab/AGWG-merge | edit/edit-mnd-according-to-new-cprops.sh | .sh | 919 | 22 | #!/bin/bash
# Wrapper around gnu parallel for editing the mnd in case all of apply-edits is not needed. Not employed at the moment.
# Written by: OD
## GNU Parallel Dependency
parallel="false"
if hash parallel 2>/dev/null; then
ver=`parallel --version | awk 'NR==1{print \$3}'`
[ $ver -ge 20150322 ] && parallel="true"
fi
[ $parallel == "false" ] && echo >&2 ":| WARNING: GNU Parallel version 20150322 or later not installed. We highly recommend to install it to increase performance. Starting pipeline without parallelization!"
## HANDLE ARGUMENTS TODO: handle arguments better
new_cprops=$1
mnd=$2
pipeline=`cd "$( dirname $0)" && cd .. && pwd`
edit_mnd_script=${pipeline}/edit/edit-mnd-according-to-new-cprops.awk
[ $parallel == "true" ] && parallel -a ${mnd} --pipepart --will-cite --jobs 80% --block 1G "awk -f ${edit_mnd_script} ${new_cprops} - " || awk -f ${edit_mnd_script} ${new_cprops} -
| Shell |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.