keyword stringclasses 7 values | repo_name stringlengths 8 98 | file_path stringlengths 4 244 | file_extension stringclasses 29 values | file_size int64 0 84.1M | line_count int64 0 1.6M | content stringlengths 1 84.1M ⌀ | language stringclasses 14 values |
|---|---|---|---|---|---|---|---|
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Convolution/fft_convolve.hpp | .hpp | 3,854 | 117 | #ifndef _FFT_CONVOLVE_HPP
#define _FFT_CONVOLVE_HPP
#include "naive_convolve.hpp"
#include "../FFT/FFT.hpp"
inline unsigned char log2_ceiling(unsigned long len){
return (unsigned char)ceil(log2(len));
};
inline unsigned long power_of_2_ceiling(unsigned long len){
return 1ul<<log2_ceiling(len);
};
inline Tensor<cpx> fft_convolve(const Tensor<cpx> & lhs, const Tensor<cpx> & rhs) {
#ifdef SHAPE_CHECK
assert(lhs.dimension() == rhs.dimension());
assert(lhs.data_shape() + rhs.data_shape() >= 1ul);
#endif
if (lhs.dimension() == 0)
return Tensor<cpx>();
unsigned long k;
Vector<unsigned long> conv_shape(lhs.dimension());
for (k=0; k<lhs.dimension(); ++k) {
unsigned long larger = std::max(lhs.data_shape()[k], rhs.data_shape()[k]);
conv_shape[k] = power_of_2_ceiling(larger) * 2;
}
Tensor<cpx> lhs_padded(conv_shape);
embed(lhs_padded, lhs);
Tensor<cpx> rhs_padded(conv_shape);
embed(rhs_padded, rhs);
apply_fft<DIF, false, false, true>(lhs_padded);
apply_fft<DIF, false, false, true>(rhs_padded);
lhs_padded.flat() *= rhs_padded.flat();
// Allow rhs_padded to deallocate:
rhs_padded.clear();
// Perform in-place inverse FFT on packed values:
apply_ifft<DIT, false, false>(lhs_padded);
lhs_padded.shrink(lhs.data_shape() + rhs.data_shape() - 1ul);
return lhs_padded;
}
inline Vector<unsigned long> padded_convolution_shape(const Tensor<double> & lhs, const Tensor<double> & rhs) {
unsigned long k;
Vector<unsigned long> conv_shape_doubles(lhs.dimension());
#ifdef SHAPE_CHECK
assert(lhs.dimension() > 0);
#endif
for (k=0; k<lhs.dimension()-1u; ++k) {
unsigned long larger = std::max(lhs.data_shape()[k], rhs.data_shape()[k]);
conv_shape_doubles[k] = power_of_2_ceiling(larger) * 2;
}
// Final axis is n/2+1 cpx values, after *2 it becomes n+1 cpx
// values, which will be 2*(n+1) double values:
conv_shape_doubles[k] = 2*(power_of_2_ceiling(std::max(lhs.data_shape()[k], rhs.data_shape()[k])) + 1);
return conv_shape_doubles;
}
inline Tensor<double> fft_convolve_already_padded_rvalue(Tensor<double> && lhs_padded_doubles, Tensor<double> && rhs_padded_doubles, Vector<unsigned long> result_shape) {
#ifdef SHAPE_CHECK
assert(lhs_padded_doubles.dimension() == rhs_padded_doubles.dimension());
assert(lhs_padded_doubles.data_shape() + rhs_padded_doubles.data_shape() >= 1ul);
#endif
if (lhs_padded_doubles.dimension() == 0)
return Tensor<double>();
Tensor<cpx> lhs_padded = Tensor<cpx>::create_reinterpreted(std::move(lhs_padded_doubles));
Tensor<cpx> rhs_padded = Tensor<cpx>::create_reinterpreted(std::move(rhs_padded_doubles));
apply_real_fft_packed<DIF, false, false, true>(lhs_padded);
apply_real_fft_packed<DIF, false, false, true>(rhs_padded);
lhs_padded.flat() *= rhs_padded.flat();
// Allow rhs_padded to deallocate:
rhs_padded.clear();
// Perform in-place inverse FFT on packed values:
apply_real_ifft_packed<DIT, false, false>(lhs_padded);
// Unpack:
Tensor<double> result = Tensor<double>::create_reinterpreted(std::move(lhs_padded));
result.shrink(result_shape);
return result;
}
inline Tensor<double> fft_convolve(const Tensor<double> & lhs, const Tensor<double> & rhs) {
#ifdef SHAPE_CHECK
assert(lhs.dimension() == rhs.dimension());
assert(lhs.data_shape() + rhs.data_shape() >= 1ul);
#endif
if (lhs.dimension() == 0)
return Tensor<double>();
Vector<unsigned long> conv_shape_doubles = padded_convolution_shape(lhs, rhs);
Tensor<double> lhs_padded_doubles(conv_shape_doubles);
embed(lhs_padded_doubles, lhs);
Tensor<double> rhs_padded_doubles(conv_shape_doubles);
embed(rhs_padded_doubles, rhs);
return fft_convolve_already_padded_rvalue(std::move(lhs_padded_doubles), std::move(rhs_padded_doubles), lhs.data_shape() + rhs.data_shape() - 1ul);
}
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/BitReversedShuffle/BitReversal.hpp | .hpp | 6,822 | 132 | #ifndef _BITREVERSAL_HPP
#define _BITREVERSAL_HPP
template <unsigned char LOG_N>
class BitReversal {
protected:
static const unsigned char reversed_byte_table[256];
inline static int fast_log2(unsigned int i) {
// Note: This is left in for reference, but it can be performed
// more generally (i.e., LOG_N>=25) using the clz opcode and then
// subtracting.
static_assert(LOG_N < 25, "Fast logarithm by float casting only works for 24 bits or less; result for larger numbers of bits can be built from the total number of bits and either __builtin_clz or __builtin_clzl.");
float f = i;
unsigned int exponent = ((*(unsigned int *)&f >> 23) - 0x7f);
return exponent;
}
public:
inline static unsigned int reverse_int_logical(unsigned int x) {
// swap odd and even bits
x = ((x >> 1) & 0x55555555) | ((x & 0x55555555) << 1);
// swap consecutive pairs
x = ((x >> 2) & 0x33333333) | ((x & 0x33333333) << 2);
// swap nibbles ...
x = ((x >> 4) & 0x0F0F0F0F) | ((x & 0x0F0F0F0F) << 4);
// swap bytes
x = ((x >> 8) & 0x00FF00FF) | ((x & 0x00FF00FF) << 8);
// swap 2-byte long pairs
x = ( x >> 16 ) | ( x << 16);
return x;
}
inline static unsigned short reverse_short_byte_table(unsigned short x){
unsigned char inByte0 = (x & 0xFF);
unsigned char inByte1 = (x & 0xFF00) >> 8;
return (reversed_byte_table[inByte0] << 8) | reversed_byte_table[inByte1];
}
inline static unsigned int reverse_int_byte_table(unsigned int x){
unsigned char inByte0 = (x & 0xFF);
unsigned char inByte1 = (x & 0xFF00) >> 8;
unsigned char inByte2 = (x & 0xFF0000) >> 16;
unsigned char inByte3 = (x & 0xFF000000) >> 24;
return (reversed_byte_table[inByte0] << 24) | (reversed_byte_table[inByte1] << 16) | (reversed_byte_table[inByte2] << 8) | reversed_byte_table[inByte3];
}
inline static unsigned long reverse_long_byte_table(unsigned long x){
unsigned char inByte0 = (x & 0xFF);
unsigned char inByte1 = (x & 0xFF00) >> 8;
unsigned char inByte2 = (x & 0xFF0000) >> 16;
unsigned char inByte3 = (x & 0xFF000000) >> 24;
unsigned char inByte4 = (x & 0xFF00000000ul) >> 32;
unsigned char inByte5 = (x & 0xFF0000000000ul) >> 40;
unsigned char inByte6 = (x & 0xFF000000000000ul) >> 48;
unsigned char inByte7 = (x & 0xFF00000000000000ul) >> 56;
return ((unsigned long)reversed_byte_table[inByte0] << 56) | ((unsigned long)reversed_byte_table[inByte1] << 48) | ((unsigned long)reversed_byte_table[inByte2] << 40) | ((unsigned long)reversed_byte_table[inByte3] << 32) | (reversed_byte_table[inByte4] << 24) | (reversed_byte_table[inByte5] << 16) | (reversed_byte_table[inByte6] << 8) | reversed_byte_table[inByte7];
}
inline static unsigned long reverse_bitwise(unsigned long x) {
unsigned long maskFromLeft = 1<<LOG_N;
unsigned long res = 0;
unsigned int bitNum = LOG_N;
while (maskFromLeft > 0) {
unsigned char bit = (x & maskFromLeft) >> bitNum;
res |= ( bit << (LOG_N-1-bitNum) );
--bitNum;
maskFromLeft >>= 1;
}
return res;
}
inline static unsigned long reverse_bytewise(unsigned long x) {
// if (constexpr) statements should be eliminated by compiler to
// choose correct case at compile time:
if (LOG_N > sizeof(unsigned int)*8) {
// Work with long reversal:
// sizeof(unsigned long) * 8:
// const unsigned int bitsPerLong = sizeof(unsigned long)<<3;
// Pure bit reversal of 1 will result in 1<<63; need to shift
// right so that reversal of 1 yields LOG_N:
return reverse_long_byte_table(x) >> (sizeof(unsigned long)*8 - LOG_N);
}
else if (LOG_N > sizeof(unsigned short)*8) {
// Work with int reversal:
// sizeof(unsigned int) * 8:
// const unsigned int bitsPerInt = sizeof(unsigned int)<<3;
// Pure bit reversal of 1 will result in 1<<31; need to shift
// right so that reversal of 1 yields LOG_N:
return reverse_int_byte_table(x) >> (sizeof(unsigned int)*8 - LOG_N);
}
else if (LOG_N > sizeof(unsigned char)*8) {
// Work with short int reversal:
return reverse_short_byte_table(x) >> (sizeof(unsigned short)*8 - LOG_N);
}
// Work with char reversal:
return reversed_byte_table[x] >> (sizeof(unsigned char)*8 - LOG_N);
}
inline static unsigned int reverse_bytewise(unsigned int x) {
// To prevent unnecessary warnings about (desired) behavior when LOG_N == 0:
if (LOG_N == 0)
return x;
return reverse_int_byte_table(x) >> (sizeof(unsigned int)*8 - LOG_N);
}
// Using XOR recurrence:
inline static void advance_index_and_reversed(unsigned long & index, unsigned long & reversed) {
unsigned long temp = index+1;
unsigned long tail = ( index ^ temp );
// tail is of the form 00...011...1
index = temp;
// create the reverse of tail, which is of form 11...100...0:
auto shift = __builtin_clzl(tail);
tail <<= shift;
tail >>= ((sizeof(unsigned long)*8)-LOG_N);
// xor reversed with reversed tail gives reversed of index+1:
reversed ^= tail;
}
};
template<unsigned char LOG_N>
const unsigned char BitReversal<LOG_N>::reversed_byte_table[256] = {0x00, 0x80, 0x40, 0xC0, 0x20, 0xA0, 0x60, 0xE0, 0x10, 0x90, 0x50, 0xD0, 0x30, 0xB0, 0x70, 0xF0, 0x08, 0x88, 0x48, 0xC8, 0x28, 0xA8, 0x68, 0xE8, 0x18, 0x98, 0x58, 0xD8, 0x38, 0xB8, 0x78, 0xF8, 0x04, 0x84, 0x44, 0xC4, 0x24, 0xA4, 0x64, 0xE4, 0x14, 0x94, 0x54, 0xD4, 0x34, 0xB4, 0x74, 0xF4, 0x0C, 0x8C, 0x4C, 0xCC, 0x2C, 0xAC, 0x6C, 0xEC, 0x1C, 0x9C, 0x5C, 0xDC, 0x3C, 0xBC, 0x7C, 0xFC, 0x02, 0x82, 0x42, 0xC2, 0x22, 0xA2, 0x62, 0xE2, 0x12, 0x92, 0x52, 0xD2, 0x32, 0xB2, 0x72, 0xF2, 0x0A, 0x8A, 0x4A, 0xCA, 0x2A, 0xAA, 0x6A, 0xEA, 0x1A, 0x9A, 0x5A, 0xDA, 0x3A, 0xBA, 0x7A, 0xFA, 0x06, 0x86, 0x46, 0xC6, 0x26, 0xA6, 0x66, 0xE6, 0x16, 0x96, 0x56, 0xD6, 0x36, 0xB6, 0x76, 0xF6, 0x0E, 0x8E, 0x4E, 0xCE, 0x2E, 0xAE, 0x6E, 0xEE, 0x1E, 0x9E, 0x5E, 0xDE, 0x3E, 0xBE, 0x7E, 0xFE, 0x01, 0x81, 0x41, 0xC1, 0x21, 0xA1, 0x61, 0xE1, 0x11, 0x91, 0x51, 0xD1, 0x31, 0xB1, 0x71, 0xF1, 0x09, 0x89, 0x49, 0xC9, 0x29, 0xA9, 0x69, 0xE9, 0x19, 0x99, 0x59, 0xD9, 0x39, 0xB9, 0x79, 0xF9, 0x05, 0x85, 0x45, 0xC5, 0x25, 0xA5, 0x65, 0xE5, 0x15, 0x95, 0x55, 0xD5, 0x35, 0xB5, 0x75, 0xF5, 0x0D, 0x8D, 0x4D, 0xCD, 0x2D, 0xAD, 0x6D, 0xED, 0x1D, 0x9D, 0x5D, 0xDD, 0x3D, 0xBD, 0x7D, 0xFD, 0x03, 0x83, 0x43, 0xC3, 0x23, 0xA3, 0x63, 0xE3, 0x13, 0x93, 0x53, 0xD3, 0x33, 0xB3, 0x73, 0xF3, 0x0B, 0x8B, 0x4B, 0xCB, 0x2B, 0xAB, 0x6B, 0xEB, 0x1B, 0x9B, 0x5B, 0xDB, 0x3B, 0xBB, 0x7B, 0xFB, 0x07, 0x87, 0x47, 0xC7, 0x27, 0xA7, 0x67, 0xE7, 0x17, 0x97, 0x57, 0xD7, 0x37, 0xB7, 0x77, 0xF7, 0x0F, 0x8F, 0x4F, 0xCF, 0x2F, 0xAF, 0x6F, 0xEF, 0x1F, 0x9F, 0x5F, 0xDF, 0x3F, 0xBF, 0x7F, 0xFF};
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/BitReversedShuffle/StockhamShuffle.hpp | .hpp | 1,068 | 36 | #ifndef _STOCKHAMSHUFFLE_HPP
#define _STOCKHAMSHUFFLE_HPP
#include <algorithm>
#include "BitReversal.hpp"
#include "RecursiveShuffle.hpp"
#include "../Tensor/Tensor.hpp"
template<typename T, unsigned char LOG_N>
class StockhamShuffle {
public:
inline static void apply_with_existing_buffer(T* __restrict const v, T* __restrict const buffer) {
lsb_to_msb_with_existing_buffer<T, LOG_N>(v, buffer);
StockhamShuffle<T,LOG_N-1>::apply_with_existing_buffer(v, buffer);
StockhamShuffle<T,LOG_N-1>::apply_with_existing_buffer(v+(1ul<<LOG_N>>1), buffer+(1ul<<LOG_N>>2));
}
inline static void apply_out_of_place(T* __restrict const v) {
T* __restrict const buffer = (T*)aligned_malloc<T>(1ul<<LOG_N>>1);
apply_with_existing_buffer(v, buffer);
free(buffer);
}
};
template<typename T>
class StockhamShuffle<T, 1> {
public:
inline static void apply_with_existing_buffer(T* __restrict const v, T* __restrict const buffer) {
// Do nothing
}
inline static void apply_out_of_place(T* __restrict const v) {
// Do nothing
}
};
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/BitReversedShuffle/LocalPairwiseShuffle.hpp | .hpp | 1,645 | 50 | #ifndef _LOCALPAIRWISESHUFFLE_HPP
#define _LOCALPAIRWISESHUFFLE_HPP
#include <algorithm>
// From Jośe M. Ṕerez-Jord́a1 1997
template<typename T, unsigned char LOG_N, unsigned char LOG_SUB_N>
class LocalPairwiseShuffleHelper {
public:
inline static void apply(T* __restrict const v) {
constexpr unsigned long SUB_N = 1ul << LOG_SUB_N;
constexpr unsigned long RECURSION_DEPTH = LOG_N-LOG_SUB_N;
// RECURSION_DEPTH=0: skip 1 (start at 1), += 2 , do blocks of 1
// RECURSION_DEPTH=1: skip 2 (start at 2), += 4 , do blocks of 2
// ...
// Find indices with bitstrings ending with 1 (end defined by
// LOG_SUB_N). Swap them with the matching reversed index.
for (unsigned long index=1ul<<RECURSION_DEPTH; index<(SUB_N>>1); index+=(1ul<<RECURSION_DEPTH)) {
for (unsigned long block=0; block<1ul<<RECURSION_DEPTH; ++block, ++index) {
unsigned long pair_bit_reversed = (index & ~(1ul<<RECURSION_DEPTH)) | (1ul<<(LOG_SUB_N-1));
std::swap(v[index], v[pair_bit_reversed]);
}
}
// Recursively apply to first and second half of the list:
LocalPairwiseShuffleHelper<T, LOG_N, LOG_SUB_N-1>::apply(v);
LocalPairwiseShuffleHelper<T, LOG_N, LOG_SUB_N-1>::apply(v + (1ul<<(LOG_SUB_N-1)) );
}
};
template<typename T, unsigned char LOG_N>
class LocalPairwiseShuffleHelper<T, LOG_N, 0> {
public:
inline static void apply(T* __restrict const v) {
// Do nothing.
}
};
template<typename T, unsigned char LOG_N>
class LocalPairwiseShuffle {
public:
inline static void apply(T* __restrict const v) {
LocalPairwiseShuffleHelper<T, LOG_N, LOG_N>::apply(v);
}
};
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/BitReversedShuffle/RecursiveShuffle.hpp | .hpp | 5,457 | 192 | #ifndef _RECURSIVESHUFFLE_HPP
#define _RECURSIVESHUFFLE_HPP
#include "UnrolledShuffle.hpp"
#include "../Tensor/Tensor.hpp"
template <typename T, unsigned char LOG_WIDTH, unsigned char LOG_SUB_WIDTH, unsigned long R, unsigned long C>
class LogSquareTranspose {
public:
static void apply(T* __restrict x) {
constexpr unsigned long SUB_WIDTH = 1ul<<LOG_SUB_WIDTH;
LogSquareTranspose<T, LOG_WIDTH, LOG_SUB_WIDTH-1, R, C>::apply(x);
LogSquareTranspose<T, LOG_WIDTH, LOG_SUB_WIDTH-1, R, C+(SUB_WIDTH>>1)>::apply(x);
LogSquareTranspose<T, LOG_WIDTH, LOG_SUB_WIDTH-1, R+(SUB_WIDTH>>1), C>::apply(x);
LogSquareTranspose<T, LOG_WIDTH, LOG_SUB_WIDTH-1, R+(SUB_WIDTH>>1), C+(SUB_WIDTH>>1)>::apply(x);
}
};
const unsigned char LOG_BLOCK_WIDTH = 4;
template <typename T, unsigned char LOG_WIDTH, unsigned long R, unsigned long C>
class LogSquareTranspose<T, LOG_WIDTH, LOG_BLOCK_WIDTH, R, C> {
public:
static void apply(T* __restrict x) {
constexpr unsigned long WIDTH = 1ul<<LOG_WIDTH;
constexpr unsigned long BLOCK_WIDTH = 1ul<<LOG_BLOCK_WIDTH;
for (unsigned int i=R; i<R+BLOCK_WIDTH; ++i)
for (unsigned int j=C; j<C+BLOCK_WIDTH; ++j)
if (i < j) {
std::swap(x[i*WIDTH+j], x[j*WIDTH+i]);
}
}
};
// Uses the idea that rev(AB) = rev(B)rev(A). The word reversal (i.e.,
// changing AB to BA) is a transposition; if A and B have the same
// number of bits (i.e., if the total number of bits is divisible by
// 2), then it is a square matrix transposition. The matrix
// transposition can be performed with improved cache
// performance. Likewise, the rev(A) operations are row operations,
// and so they have good cache locality. Thus the method is more
// efficient than TableShuffle or XORShuffle.
// When the number of bits is odd, first peel off the single least
// significant bit (LSB) and then perform a transposition with a
// buffer (this is essentially ordering of even and odd indices in
// FFT).
template <typename T, unsigned char NUM_BITS>
// Buffer must be at least N/2 in length
inline void lsb_to_msb_with_existing_buffer(T* __restrict x, T* __restrict buffer) {
constexpr unsigned long N = 1ul<<NUM_BITS;
// Copy odd indices to buffer:
for (unsigned long k=1; k<N; k+=2)
buffer[k>>1] = x[k];
// Pack even indices into first half of x (start at x[1] = x[2]
// since x[0] already = x[0]):
for (unsigned long k=2; k<N; k+=2)
x[k>>1] = x[k];
// Copy odd indices into second half of x (memcpy equivalent to
// loopy below):
memcpy(x+(N>>1), buffer, (N>>1)*sizeof(T));
// for (unsigned long k=0; k<N>>1; ++k)
// x[k+(N>>1)] = odds[k];
}
template <typename T, unsigned char NUM_BITS>
inline void lsb_to_msb(T* __restrict x) {
constexpr unsigned long N = 1ul<<NUM_BITS;
// Copy odd indices to buffer:
T* __restrict odds = aligned_malloc<T>(N>>1);
lsb_to_msb_with_existing_buffer<T, NUM_BITS>(x, odds);
free(odds);
}
template <typename T, unsigned char NUM_BITS>
class RecursiveShuffle {
private:
public:
inline static void apply(T* __restrict x) {
// & 1 is the same as % 2:
if ((NUM_BITS & 1) == 1) {
// allocate buffer and perform single LSB --> MSB:
lsb_to_msb<T,NUM_BITS>(x);
RecursiveShuffle<T, NUM_BITS-1>::apply(x);
RecursiveShuffle<T, NUM_BITS-1>::apply(x+(1ul<<(NUM_BITS-1)));
}
else {
constexpr unsigned char HALF_NUM_BITS = NUM_BITS>>1;
constexpr unsigned long SQRT_N = 1ul<<HALF_NUM_BITS;
for (unsigned long k=0; k<SQRT_N; ++k)
RecursiveShuffle< T, HALF_NUM_BITS >::apply(x+(k<<HALF_NUM_BITS));
MatrixTranspose<T>::apply_square(x, SQRT_N);
// LogSquareTranspose<T, NUM_BITS/2, NUM_BITS/2, 0, 0>::apply(x);
for (unsigned long k=0; k<SQRT_N; ++k)
RecursiveShuffle< T, HALF_NUM_BITS >::apply(x+(k<<HALF_NUM_BITS));
}
}
};
// For small problems (NUM_BITS <= 9), simply use the closed form
// generated by UnrolledShuffle:
template <typename T>
class RecursiveShuffle<T, 9> {
public:
inline static void apply(T* __restrict x) {
UnrolledShuffle<T, 9>::apply(x);
}
};
template <typename T>
class RecursiveShuffle<T, 8> {
public:
inline static void apply(T* __restrict x) {
UnrolledShuffle<T, 8>::apply(x);
}
};
template <typename T>
class RecursiveShuffle<T, 7> {
public:
inline static void apply(T* __restrict x) {
UnrolledShuffle<T, 7>::apply(x);
}
};
template <typename T>
class RecursiveShuffle<T, 6> {
public:
inline static void apply(T* __restrict x) {
UnrolledShuffle<T, 6>::apply(x);
}
};
template <typename T>
class RecursiveShuffle<T, 5> {
public:
inline static void apply(T* __restrict x) {
UnrolledShuffle<T, 5>::apply(x);
}
};
template <typename T>
class RecursiveShuffle<T, 4> {
public:
inline static void apply(T* __restrict x) {
UnrolledShuffle<T, 4>::apply(x);
}
};
template <typename T>
class RecursiveShuffle<T, 3> {
public:
inline static void apply(T* __restrict x) {
UnrolledShuffle<T, 3>::apply(x);
}
};
template <typename T>
class RecursiveShuffle<T, 2> {
public:
inline static void apply(T* __restrict x) {
UnrolledShuffle<T, 2>::apply(x);
}
};
template <typename T>
class RecursiveShuffle<T, 1> {
public:
inline static void apply(T* __restrict x) {
UnrolledShuffle<T, 1>::apply(x);
}
};
template <typename T>
class RecursiveShuffle<T, 0> {
public:
inline static void apply(T* __restrict x) {
}
};
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/BitReversedShuffle/TableShuffle.hpp | .hpp | 561 | 25 | #ifndef _TABLESHUFFLE_HPP
#define _TABLESHUFFLE_HPP
#include <algorithm>
#include "BitReversal.hpp"
template<typename T, unsigned char LOG_N>
class TableShuffle {
public:
inline static void apply(T* __restrict const v) {
constexpr unsigned long N = 1ul << LOG_N;
for (unsigned long index=1; index<(N-1); ++index) {
unsigned long reversed = BitReversal<LOG_N>::reverse_bytewise(index);
// Comparison ensures swap is performed only once per unique pair:
if (index<reversed)
std::swap(v[index], v[reversed]);
}
}
};
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/BitReversedShuffle/UnrolledShuffle.hpp | .hpp | 5,072 | 144 | #ifndef _UNROLLEDSHUFFLE_HPP
#define _UNROLLEDSHUFFLE_HPP
#include <algorithm>
// Tools for performing compile-time-optimized bit
// reversal. Substantially faster than other methods, but it requires
// much larger compilation times for large problems (10 bits <-->
// N=2^10 requires roughly 2s to compile).
// Note that the resulting assembly should closely resemble the code
// generated by the following python program:
/*
def rev(n, B):
return int(('{:0' + str(B) + 'b}').format(n)[::-1], 2)
def generate_reversal_code(B):
for i in xrange(2**B):
j = rev(i, B)
if i < j:
print ' std::swap( x[' + str(i) + '], x[' + str(j) + '] );'
# Generate a closed form for some specific numbers of bits (e.g., 8):
generate_reversal_code(8)
# The order of those swap operations could also be intelligently
ordered to minimize cache misses; however, the compiler does fairly
well with that.
*/
static constexpr unsigned long set_bit_right(unsigned char NUM_BITS, unsigned char REM_BITS, unsigned long value) {
return value | ( (1ul<<(NUM_BITS>>1)) >> (REM_BITS>>1) );
}
static constexpr unsigned long set_bit_left(unsigned char NUM_BITS, unsigned char REM_BITS, unsigned long value) {
return value | ( (1ul<<(NUM_BITS-1)) >> ((NUM_BITS>>1)-(REM_BITS>>1)) );
}
static constexpr unsigned long set_bits_left_and_right(unsigned char NUM_BITS, unsigned char REM_BITS, unsigned long value) {
return set_bit_right(NUM_BITS, REM_BITS, set_bit_left(NUM_BITS, REM_BITS, value));
}
template <typename T, unsigned char NUM_BITS, unsigned char REM_BITS, unsigned long VAL, unsigned long REV>
class ShuffleAllValuesHelper {
public:
// __attribute__((always_inline))
static void apply(T * __restrict x) {
// 0*0
ShuffleAllValuesHelper<T, NUM_BITS, REM_BITS-2, VAL, REV>::apply(x);
// 0*1
ShuffleAllValuesHelper<T, NUM_BITS, REM_BITS-2, set_bit_right(NUM_BITS, REM_BITS, VAL), set_bit_left(NUM_BITS, REM_BITS, REV)>::apply(x);
// 1*0
ShuffleAllValuesHelper<T, NUM_BITS, REM_BITS-2, set_bit_left(NUM_BITS, REM_BITS, VAL), set_bit_right(NUM_BITS, REM_BITS, REV)>::apply(x);
// 1*1
ShuffleAllValuesHelper<T, NUM_BITS, REM_BITS-2, set_bits_left_and_right(NUM_BITS, REM_BITS, VAL), set_bits_left_and_right(NUM_BITS, REM_BITS, REV)>::apply(x);
}
};
// When NUM_BITS % 2 == 1:
template <typename T, unsigned char NUM_BITS, unsigned long VAL, unsigned long REV>
class ShuffleAllValuesHelper<T, NUM_BITS, 1, VAL, REV> {
public:
// __attribute__((always_inline))
static void apply(T * __restrict x) {
constexpr unsigned char MIDDLE_BIT = NUM_BITS>>1;
// With 0 in middle:
std::swap(x[VAL], x[REV]);
// With 1 in middle:
std::swap(x[VAL | (1ul<<MIDDLE_BIT)], x[REV | (1ul<<MIDDLE_BIT)]);
}
};
// When NUM_BITS % 2 == 0:
template <typename T, unsigned char NUM_BITS, unsigned long VAL, unsigned long REV>
class ShuffleAllValuesHelper<T, NUM_BITS, 0, VAL, REV> {
public:
// __attribute__((always_inline))
static void apply(T * __restrict x) {
std::swap(x[VAL], x[REV]);
}
};
template <typename T, unsigned char NUM_BITS, unsigned char REM_BITS, unsigned long VAL, unsigned long REV>
class UnrolledShuffleHelper {
public:
// __attribute__((always_inline))
static void apply(T * __restrict x) {
// apply [current_bit digits]0...1[current_bit digits]
// Applies to all inner values (inequality is already guaranteed):
ShuffleAllValuesHelper<T, NUM_BITS, REM_BITS-2, set_bit_right(NUM_BITS, REM_BITS, VAL), set_bit_left(NUM_BITS, REM_BITS, REV)>::apply(x);
// apply [current_bit digits]0...0[current_bit digits]
UnrolledShuffleHelper<T, NUM_BITS, REM_BITS-2, VAL, REV>::apply(x);
// apply [current_bit digits]1...1[current_bit digits]
UnrolledShuffleHelper<T, NUM_BITS, REM_BITS-2, set_bits_left_and_right(NUM_BITS, REM_BITS, VAL), set_bits_left_and_right(NUM_BITS, REM_BITS, REV)>::apply(x);
}
};
// When NUM_BITS % 2 == 1:
template <typename T, unsigned char NUM_BITS, unsigned long VAL, unsigned long REV>
class UnrolledShuffleHelper<T, NUM_BITS, 1, VAL, REV> {
public:
// __attribute__((always_inline))
static void apply(T * __restrict x) {
constexpr unsigned char MIDDLE_BIT = NUM_BITS>>1;
// With 0 in middle:
std::swap(x[VAL], x[REV]);
// With 1 in middle:
// Note: this will swap 1111...1, with 1111...1 (this should be
// the only case where equality will occur); but there will be no
// effect:
std::swap(x[VAL | (1ul<<MIDDLE_BIT)], x[REV | (1ul<<MIDDLE_BIT)]);
}
};
// When NUM_BITS % 2 == 0:
template <typename T, unsigned char NUM_BITS, unsigned long VAL, unsigned long REV>
class UnrolledShuffleHelper<T, NUM_BITS, 0, VAL, REV> {
public:
// __attribute__((always_inline))
static void apply(T * __restrict x) {
std::swap(x[VAL], x[REV]);
}
};
template <typename T, unsigned char NUM_BITS>
class UnrolledShuffle {
public:
// __attribute__((always_inline))
static void apply(T * __restrict x) {
UnrolledShuffleHelper<T, NUM_BITS, NUM_BITS, 0ul, 0ul>::apply(x);
}
};
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/BitReversedShuffle/NaiveShuffle.hpp | .hpp | 564 | 25 | #ifndef _NAIVESHUFFLE_HPP
#define _NAIVESHUFFLE_HPP
#include <algorithm>
#include "BitReversal.hpp"
template<typename T, unsigned char LOG_N>
class NaiveShuffle {
public:
inline static void apply(T* __restrict const v) {
constexpr unsigned long int N = 1ul << LOG_N;
for (unsigned long index=1; index<(N-1); ++index) {
unsigned long reversed = BitReversal<LOG_N>::reverse_bitwise(index);
// Comparison ensures swap is performed only once per unique pair:
if (index>reversed)
std::swap(v[index], v[reversed]);
}
}
};
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/BitReversedShuffle/XORShuffle.hpp | .hpp | 574 | 25 | #ifndef _XORSHUFFLE_HPP
#define _XORSHUFFLE_HPP
#include <algorithm>
#include "BitReversal.hpp"
template<typename T, unsigned char LOG_N>
class XORShuffle {
public:
inline static void apply(T* __restrict const v) {
constexpr unsigned long N = 1ul << LOG_N;
unsigned long reversed = 0;
for (unsigned long index=0; index<(N-1); ) {
// Comparison ensures swap is performed only once per unique pair:
if (index<reversed)
std::swap(v[index], v[reversed]);
BitReversal<LOG_N>::advance_index_and_reversed(index, reversed);
}
}
};
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/BitReversedShuffle/SemiRecursiveShuffle.hpp | .hpp | 5,227 | 205 | #ifndef _SEMIRECURSIVESHUFFLE_HPP
#define _SEMIRECURSIVESHUFFLE_HPP
#include "RecursiveShuffle.hpp"
// Identical to RecursiveShuffle but limits the number of recursions
// to 1. Therefore, it will compile longer, but can be slightly faster
// (empirically tested, reason unclear).
template <typename T, unsigned char NUM_BITS, unsigned char RECURSIONS_REMAINING>
class SemiRecursiveShuffle {
public:
inline static void apply(T* __restrict x) {
// & 1 is the same as % 2:
if ((NUM_BITS & 1) == 1) {
// allocate buffer and perform single LSB --> MSB:
lsb_to_msb<T, NUM_BITS>(x);
SemiRecursiveShuffle<T, NUM_BITS-1, RECURSIONS_REMAINING>::apply(x);
SemiRecursiveShuffle<T, NUM_BITS-1, RECURSIONS_REMAINING>::apply(x+(1ul<<(NUM_BITS-1)));
}
else {
constexpr unsigned char SUB_NUM_BITS = NUM_BITS>>1;
constexpr unsigned long SUB_N = 1ul<<SUB_NUM_BITS;
for (unsigned long k=0; k<SUB_N; ++k)
SemiRecursiveShuffle<T, SUB_NUM_BITS, RECURSIONS_REMAINING-1>::apply(x+(k<<SUB_NUM_BITS));
MatrixTranspose<T>::apply_square(x, SUB_N);
for (unsigned long k=0; k<SUB_N; ++k)
SemiRecursiveShuffle<T, SUB_NUM_BITS, RECURSIONS_REMAINING-1>::apply(x+(k<<SUB_NUM_BITS));
}
}
};
template <typename T, unsigned char NUM_BITS>
class SemiRecursiveShuffle<T, NUM_BITS, 0> {
public:
inline static void apply(T* __restrict x) {
UnrolledShuffle<T, NUM_BITS>::apply(x);
}
};
// NUM_BITS <= 9, simply use UnrolledShuffle:
template <typename T, unsigned char RECURSIONS_REMAINING>
class SemiRecursiveShuffle<T, 9, RECURSIONS_REMAINING> {
public:
inline static void apply(T* __restrict x) {
UnrolledShuffle<T, 9>::apply(x);
}
};
template <typename T, unsigned char RECURSIONS_REMAINING>
class SemiRecursiveShuffle<T, 8, RECURSIONS_REMAINING> {
public:
inline static void apply(T* __restrict x) {
UnrolledShuffle<T, 8>::apply(x);
}
};
template <typename T, unsigned char RECURSIONS_REMAINING>
class SemiRecursiveShuffle<T, 7, RECURSIONS_REMAINING> {
public:
inline static void apply(T* __restrict x) {
UnrolledShuffle<T, 7>::apply(x);
}
};
template <typename T, unsigned char RECURSIONS_REMAINING>
class SemiRecursiveShuffle<T, 6, RECURSIONS_REMAINING> {
public:
inline static void apply(T* __restrict x) {
UnrolledShuffle<T, 6>::apply(x);
}
};
template <typename T, unsigned char RECURSIONS_REMAINING>
class SemiRecursiveShuffle<T, 5, RECURSIONS_REMAINING> {
public:
inline static void apply(T* __restrict x) {
UnrolledShuffle<T, 5>::apply(x);
}
};
template <typename T, unsigned char RECURSIONS_REMAINING>
class SemiRecursiveShuffle<T, 4, RECURSIONS_REMAINING> {
public:
inline static void apply(T* __restrict x) {
UnrolledShuffle<T, 4>::apply(x);
}
};
template <typename T, unsigned char RECURSIONS_REMAINING>
class SemiRecursiveShuffle<T, 3, RECURSIONS_REMAINING> {
public:
inline static void apply(T* __restrict x) {
UnrolledShuffle<T, 3>::apply(x);
}
};
template <typename T, unsigned char RECURSIONS_REMAINING>
class SemiRecursiveShuffle<T, 2, RECURSIONS_REMAINING> {
public:
inline static void apply(T* __restrict x) {
UnrolledShuffle<T, 2>::apply(x);
}
};
template <typename T, unsigned char RECURSIONS_REMAINING>
class SemiRecursiveShuffle<T, 1, RECURSIONS_REMAINING> {
public:
inline static void apply(T* __restrict x) {
UnrolledShuffle<T, 1>::apply(x);
}
};
template <typename T, unsigned char RECURSIONS_REMAINING>
class SemiRecursiveShuffle<T, 0, RECURSIONS_REMAINING> {
public:
inline static void apply(T* __restrict x) {
}
};
// Special cases to prevent ambiguity in template specialization:
template <typename T>
class SemiRecursiveShuffle<T, 9, 0> {
public:
inline static void apply(T* __restrict x) {
UnrolledShuffle<T, 9>::apply(x);
}
};
template <typename T>
class SemiRecursiveShuffle<T, 8, 0> {
public:
inline static void apply(T* __restrict x) {
UnrolledShuffle<T, 8>::apply(x);
}
};
template <typename T>
class SemiRecursiveShuffle<T, 7, 0> {
public:
inline static void apply(T* __restrict x) {
UnrolledShuffle<T, 7>::apply(x);
}
};
template <typename T>
class SemiRecursiveShuffle<T, 6, 0> {
public:
inline static void apply(T* __restrict x) {
UnrolledShuffle<T, 6>::apply(x);
}
};
template <typename T>
class SemiRecursiveShuffle<T, 5, 0> {
public:
inline static void apply(T* __restrict x) {
UnrolledShuffle<T, 5>::apply(x);
}
};
template <typename T>
class SemiRecursiveShuffle<T, 4, 0> {
public:
inline static void apply(T* __restrict x) {
UnrolledShuffle<T, 4>::apply(x);
}
};
template <typename T>
class SemiRecursiveShuffle<T, 3, 0> {
public:
inline static void apply(T* __restrict x) {
UnrolledShuffle<T, 3>::apply(x);
}
};
template <typename T>
class SemiRecursiveShuffle<T, 2, 0> {
public:
inline static void apply(T* __restrict x) {
UnrolledShuffle<T, 2>::apply(x);
}
};
template <typename T>
class SemiRecursiveShuffle<T, 1, 0> {
public:
inline static void apply(T* __restrict x) {
UnrolledShuffle<T, 1>::apply(x);
}
};
template <typename T>
class SemiRecursiveShuffle<T, 0, 0> {
public:
inline static void apply(T* __restrict x) {
}
};
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/BitReversedShuffle/COBRAShuffle.hpp | .hpp | 4,055 | 104 | #ifndef _COBRASHUFFLE_HPP
#define _COBRASHUFFLE_HPP
#include <algorithm>
#include "BitReversal.hpp"
#include "../Tensor/Tensor.hpp"
// From Carter and Gatlin 1998
template<typename T, unsigned char LOG_N, unsigned char LOG_BLOCK_WIDTH>
class COBRAShuffle {
public:
inline static void apply(T* __restrict const v) {
constexpr unsigned char NUM_B_BITS = LOG_N - 2*LOG_BLOCK_WIDTH;
constexpr unsigned long B_SIZE = 1ul << NUM_B_BITS;
constexpr unsigned long BLOCK_WIDTH = 1ul << LOG_BLOCK_WIDTH;
T* __restrict buffer = aligned_malloc<T>(BLOCK_WIDTH*BLOCK_WIDTH);
for (unsigned long b=0; b<B_SIZE; ++b) {
unsigned long b_rev = BitReversal<NUM_B_BITS>::reverse_bytewise(b);
// Copy block to buffer:
for (unsigned long a=0; a<BLOCK_WIDTH; ++a) {
unsigned long a_rev = BitReversal<LOG_BLOCK_WIDTH>::reverse_bytewise(a);
for (unsigned long c=0; c<BLOCK_WIDTH; ++c)
buffer[ (a_rev << LOG_BLOCK_WIDTH) | c ] = v[ (a << NUM_B_BITS << LOG_BLOCK_WIDTH) | (b << LOG_BLOCK_WIDTH) | c ];
}
// Swap v[rev_index] with buffer:
for (unsigned long c=0; c<BLOCK_WIDTH; ++c) {
// Note: Typo in original pseudocode by Carter and Gatlin at
// the following line:
unsigned long c_rev = BitReversal<LOG_BLOCK_WIDTH>::reverse_bytewise(c);
for (unsigned long a_rev=0; a_rev<BLOCK_WIDTH; ++a_rev) {
unsigned long a = BitReversal<LOG_BLOCK_WIDTH>::reverse_bytewise(a_rev);
// To guarantee each value is swapped only one time:
// index < reversed_index <-->
// a b c < c' b' a' <-->
// a < c' ||
// a <= c' && b < b' ||
// a <= c' && b <= b' && a' < c
bool index_less_than_reverse = a < c_rev || (a == c_rev && b < b_rev) || (a == c_rev && b == b_rev && a_rev < c);
if ( index_less_than_reverse )
std::swap( v[(c_rev << NUM_B_BITS << LOG_BLOCK_WIDTH) | (b_rev<<LOG_BLOCK_WIDTH) | a_rev], buffer[ (a_rev<<LOG_BLOCK_WIDTH) | c ]);
}
}
// Copy changes that were swapped into buffer above:
for (unsigned long a=0; a<BLOCK_WIDTH; ++a) {
unsigned long a_rev = BitReversal<LOG_BLOCK_WIDTH>::reverse_bytewise(a);
for (unsigned long c=0; c<BLOCK_WIDTH; ++c) {
unsigned long c_rev = BitReversal<LOG_BLOCK_WIDTH>::reverse_bytewise(c);
bool index_less_than_reverse = a < c_rev || (a == c_rev && b < b_rev) || (a == c_rev && b == b_rev && a_rev < c);
if (index_less_than_reverse)
std::swap(v[ (a << NUM_B_BITS << LOG_BLOCK_WIDTH) | (b << LOG_BLOCK_WIDTH) | c ], buffer[ (a_rev << LOG_BLOCK_WIDTH) | c ]);
}
}
}
free(buffer);
}
inline static void apply_out_of_place(T* __restrict const v) {
T* __restrict const result = (T*)aligned_malloc<T>(1ul<<LOG_N);
constexpr unsigned char NUM_B_BITS = LOG_N - 2*LOG_BLOCK_WIDTH;
constexpr unsigned long B_SIZE = 1ul << NUM_B_BITS;
constexpr unsigned long BLOCK_WIDTH = 1ul << LOG_BLOCK_WIDTH;
T* __restrict buffer = (T*)aligned_malloc<T>(BLOCK_WIDTH*BLOCK_WIDTH);
for (unsigned long b=0; b<B_SIZE; ++b) {
unsigned long b_rev = BitReversal<NUM_B_BITS>::reverse_bytewise(b);
// Copy block to buffer:
for (unsigned long a=0; a<BLOCK_WIDTH; ++a) {
unsigned long a_rev = BitReversal<LOG_BLOCK_WIDTH>::reverse_bytewise(a);
for (unsigned long c=0; c<BLOCK_WIDTH; ++c)
buffer[ (a_rev << LOG_BLOCK_WIDTH) | c ] = v[ (a << NUM_B_BITS << LOG_BLOCK_WIDTH) | (b << LOG_BLOCK_WIDTH) | c ];
}
// Swap from buffer:
for (unsigned long c=0; c<BLOCK_WIDTH; ++c) {
// Note: Typo in original pseudocode by Carter and Gatlin at
// the following line:
unsigned long c_rev = BitReversal<LOG_BLOCK_WIDTH>::reverse_bytewise(c);
for (unsigned long a_rev=0; a_rev<BLOCK_WIDTH; ++a_rev)
result[(c_rev << NUM_B_BITS << LOG_BLOCK_WIDTH) | (b_rev<<LOG_BLOCK_WIDTH) | a_rev] = buffer[ (a_rev<<LOG_BLOCK_WIDTH) | c ];
}
}
free(buffer);
memcpy(v, result, (1ul<<(LOG_N))*sizeof(T));
free(result);
}
};
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Tensor/VectorArithmetic.hpp | .hpp | 6,673 | 185 | #ifndef _VECTORARITHMETIC_HPP
#define _VECTORARITHMETIC_HPP
#include "VectorTRIOT.hpp"
template <typename T>
class Vector;
template <typename T>
class VectorView;
// +=, -=, ... with & lhs:
template <typename S, typename T, template <typename> class VECTOR_A, template <typename> class VECTOR_B>
const WritableVectorLike<S, VECTOR_A> & operator +=(WritableVectorLike<S, VECTOR_A> & lhs, const VectorLike<T, VECTOR_B> & rhs) {
#ifdef SHAPE_CHECK
assert(lhs.size() == rhs.size());
#endif
apply_vectors([](S & vL, T vR){ vL += vR; }, lhs.size(), lhs, rhs);
return lhs;
}
template <typename S, typename T, template <typename> class VECTOR_A, template <typename> class VECTOR_B>
const WritableVectorLike<S, VECTOR_A> & operator -=(WritableVectorLike<S, VECTOR_A> & lhs, const VectorLike<T, VECTOR_B> & rhs) {
#ifdef SHAPE_CHECK
assert(lhs.size() == rhs.size());
#endif
apply_vectors([](S & vL, T vR){ vL -= vR; }, lhs.size(), lhs, rhs);
return lhs;
}
template <typename S, typename T, template <typename> class VECTOR_A, template <typename> class VECTOR_B>
const WritableVectorLike<S, VECTOR_A> & operator *=(WritableVectorLike<S, VECTOR_A> & lhs, const VectorLike<T, VECTOR_B> & rhs) {
#ifdef SHAPE_CHECK
assert(lhs.size() == rhs.size());
#endif
apply_vectors([](S & vL, T vR){ vL *= vR; }, lhs.size(), lhs, rhs);
return lhs;
}
template <typename S, typename T, template <typename> class VECTOR_A, template <typename> class VECTOR_B>
const WritableVectorLike<S, VECTOR_A> & operator /=(WritableVectorLike<S, VECTOR_A> & lhs, const VectorLike<T, VECTOR_B> & rhs) {
#ifdef SHAPE_CHECK
assert(lhs.size() == rhs.size());
#endif
apply_vectors([](S & vL, T vR){ vL /= vR; }, lhs.size(), lhs, rhs);
return lhs;
}
// +=, -=, ... with & lhs, value rhs:
template <typename T, template <typename> class VECTOR_A>
const WritableVectorLike<T, VECTOR_A> & operator +=(WritableVectorLike<T, VECTOR_A> & lhs, T rhs) {
apply_vectors([&rhs](T & vL){ vL += rhs; }, lhs.size(), lhs);
return lhs;
}
template <typename T, template <typename> class VECTOR_A>
const WritableVectorLike<T, VECTOR_A> & operator -=(WritableVectorLike<T, VECTOR_A> & lhs, T rhs) {
apply_vectors([&rhs](T & vL){ vL -= rhs; }, lhs.size(), lhs);
return lhs;
}
template <typename T, template <typename> class VECTOR_A>
const WritableVectorLike<T, VECTOR_A> & operator *=(WritableVectorLike<T, VECTOR_A> & lhs, T rhs) {
apply_vectors([&rhs](T & vL){ vL *= rhs; }, lhs.size(), lhs);
return lhs;
}
template <typename T, template <typename> class VECTOR_A>
const WritableVectorLike<T, VECTOR_A> & operator /=(WritableVectorLike<T, VECTOR_A> & lhs, T rhs) {
apply_vectors([&rhs](T & vL){ vL /= rhs; }, lhs.size(), lhs);
return lhs;
}
// +=, -=, ... with && lhs:
template <typename T, template <typename> class VECTOR_A, template <typename> class VECTOR_B>
const WritableVectorLike<T, VECTOR_A> & operator +=(WritableVectorLike<T, VECTOR_A> && lhs, const VectorLike<T, VECTOR_B> & rhs) {
return lhs += rhs;
}
template <typename T, template <typename> class VECTOR_A, template <typename> class VECTOR_B>
const WritableVectorLike<T, VECTOR_A> & operator -=(WritableVectorLike<T, VECTOR_A> && lhs, const VectorLike<T, VECTOR_B> & rhs) {
return lhs -= rhs;
}
template <typename T, template <typename> class VECTOR_A, template <typename> class VECTOR_B>
const WritableVectorLike<T, VECTOR_A> & operator *=(WritableVectorLike<T, VECTOR_A> && lhs, const VectorLike<T, VECTOR_B> & rhs) {
return lhs *= rhs;
}
template <typename T, template <typename> class VECTOR_A, template <typename> class VECTOR_B>
const WritableVectorLike<T, VECTOR_A> & operator /=(WritableVectorLike<T, VECTOR_A> && lhs, const VectorLike<T, VECTOR_B> & rhs) {
return lhs /= rhs;
}
// +=, -=, ... with & lhs, value rhs:
template <typename T, template <typename> class VECTOR_A>
const WritableVectorLike<T, VECTOR_A> & operator +=(WritableVectorLike<T, VECTOR_A> && lhs, T rhs) {
return lhs += rhs;
}
template <typename T, template <typename> class VECTOR_A>
const WritableVectorLike<T, VECTOR_A> & operator -=(WritableVectorLike<T, VECTOR_A> && lhs, T rhs) {
return lhs -= rhs;
}
template <typename T, template <typename> class VECTOR_A>
const WritableVectorLike<T, VECTOR_A> & operator *=(WritableVectorLike<T, VECTOR_A> && lhs, T rhs) {
return lhs *= rhs;
}
template <typename T, template <typename> class VECTOR_A>
const WritableVectorLike<T, VECTOR_A> & operator /=(WritableVectorLike<T, VECTOR_A> && lhs, T rhs) {
return lhs /= rhs;
}
// +, -, ...
template <typename S, typename T, template <typename> class VECTOR_A, template <typename> class VECTOR_B>
Vector<S> operator +(const VectorLike<S, VECTOR_A> & lhs, const VectorLike<T, VECTOR_B> & rhs) {
Vector<S> result = lhs;
return result += rhs;
}
template <typename S, typename T, template <typename> class VECTOR_A, template <typename> class VECTOR_B>
Vector<S> operator -(const VectorLike<S, VECTOR_A> & lhs, const VectorLike<T, VECTOR_B> & rhs) {
Vector<S> result = lhs;
return result -= rhs;
}
template <typename S, typename T, template <typename> class VECTOR_A, template <typename> class VECTOR_B>
Vector<S> operator *(const VectorLike<S, VECTOR_A> & lhs, const VectorLike<T, VECTOR_B> & rhs) {
Vector<S> result = lhs;
return result *= rhs;
}
template <typename S, typename T, template <typename> class VECTOR_A, template <typename> class VECTOR_B>
Vector<S> operator /(const VectorLike<S, VECTOR_A> & lhs, const VectorLike<T, VECTOR_B> & rhs) {
Vector<S> result = lhs;
return result /= rhs;
}
// +, -, ... Vector with value:
template <typename T, template <typename> class VECTOR_A>
Vector<T> operator +(const VectorLike<T, VECTOR_A> & lhs, T rhs) {
Vector<T> result = lhs;
return result += rhs;
}
template <typename T, template <typename> class VECTOR_A>
Vector<T> operator -(const VectorLike<T, VECTOR_A> & lhs, T rhs) {
Vector<T> result = lhs;
return result -= rhs;
}
template <typename T, template <typename> class VECTOR_A>
Vector<T> operator *(const VectorLike<T, VECTOR_A> & lhs, T rhs) {
Vector<T> result = lhs;
return result *= rhs;
}
template <typename T, template <typename> class VECTOR_A>
Vector<T> operator /(const VectorLike<T, VECTOR_A> & lhs, T rhs) {
Vector<T> result = lhs;
return result /= rhs;
}
template <typename T, template <typename> class VECTOR_A>
Vector<T> operator /(T lhs, const VectorLike<T, VECTOR_A> & rhs) {
Vector<T> result(rhs.size(), lhs);
return result /= rhs;
}
template <typename T>
Vector<T> seq(long length) {
Vector<T> result(length);
for (unsigned long k=0; k<result.size(); ++k)
result[k] = T(k);
return result;
}
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Tensor/TemplateSearch.hpp | .hpp | 1,612 | 44 | #ifndef _TEMPLATESEARCH_HPP
#define _TEMPLATESEARCH_HPP
#include <assert.h>
#include <utility>
typedef unsigned char TEMPLATE_SEARCH_INT_TYPE;
// For dynamically looking up a class and calling the static
// function apply(...). This can be preferred to LogSearch when you
// are amortizing the cost of lookoup. For instance, if the log size
// of an FFT is being looked up, then proceeding in ascending order
// from 0 will guarantee that the search cost is linear in the log
// size, meaning that it's in O(log(N)), which is dwarfed by the FFT
// cost O(N log(N)). This can therefore be more efficient when
// you're processing many short FFTs.
template <TEMPLATE_SEARCH_INT_TYPE MINIMUM, TEMPLATE_SEARCH_INT_TYPE MAXIMUM, template <TEMPLATE_SEARCH_INT_TYPE> class WORKER>
class LinearTemplateSearch {
public:
template <typename...ARG_TYPES>
inline static void apply(TEMPLATE_SEARCH_INT_TYPE v, ARG_TYPES && ... args) {
if (v == MINIMUM)
WORKER<MINIMUM>::apply(std::forward<ARG_TYPES>(args)...);
else
LinearTemplateSearch<MINIMUM+1, MAXIMUM, WORKER>::apply(v, std::forward<ARG_TYPES>(args)...);
}
};
template <TEMPLATE_SEARCH_INT_TYPE MAXIMUM, template <TEMPLATE_SEARCH_INT_TYPE> class WORKER>
class LinearTemplateSearch<MAXIMUM, MAXIMUM, WORKER> {
public:
template <typename...ARG_TYPES>
#ifdef NDEBUG
inline static void apply(TEMPLATE_SEARCH_INT_TYPE /*v*/, ARG_TYPES&&... args) {
#else
inline static void apply(TEMPLATE_SEARCH_INT_TYPE v, ARG_TYPES&&... args) {
#endif
assert(v == MAXIMUM);
WORKER<MAXIMUM>::apply(std::forward<ARG_TYPES>(args)...);
}
};
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Tensor/TensorLike.hpp | .hpp | 4,028 | 115 | #ifndef _TENSORLIKE_HPP
#define _TENSORLIKE_HPP
template <typename T>
class TensorView;
template <typename T>
class WritableTensorView;
// Never instantiate these; always pass by reference & or &&:
template <typename T, template <typename> class TENSOR >
class TensorLike {
public:
unsigned char dimension() const {
return static_cast<const TENSOR<T> &>(*this).dimension();
}
unsigned long flat_size() const {
return static_cast<const TENSOR<T> &>(*this).flat_size();
}
const T & operator[](unsigned long i) const {
return static_cast<const TENSOR<T> &>(*this)[i];
}
const T & operator[](const_tup_t tuple) const {
#ifdef BOUNDS_CHECK
for (unsigned char k=0; k<dimension(); ++k)
assert( tuple[k] < view_shape()[k] );
#endif
return (*this)[ tuple_to_index(tuple, this->data_shape(), this->dimension()) ];
}
template <template <typename> class VECTOR>
const T & operator[](const VectorLike<unsigned long, VECTOR> & tuple) const {
return (*this)[ static_cast<const_tup_t>(tuple) ];
}
const Vector<unsigned long> & data_shape() const {
return static_cast<const TENSOR<T> &>(*this).data_shape();
}
const Vector<unsigned long> & view_shape() const {
return static_cast<const TENSOR<T> &>(*this).view_shape();
}
template <template <typename> class VECTOR>
TensorView<T> start_at_const(const VectorLike<unsigned long, VECTOR> & start) const {
return static_cast<const TENSOR<T> &>(*this).start_at_const(start);
}
template <template <typename> class VECTOR>
TensorView<T> start_at_const(const VectorLike<unsigned long, VECTOR> & start, const VectorLike<unsigned long, VECTOR> & new_view_shape) const {
return static_cast<const TENSOR<T> &>(*this).start_at_const(start, new_view_shape);
}
static void print_helper(std::ostream & os, const T*const rhs, const_tup_t data_shape, const_tup_t view_shape, unsigned char dimension) {
os << "[";
if (dimension > 1) {
unsigned long flat_size_without_first = flat_length(data_shape+1, dimension-1);
for (unsigned long i=0; i<view_shape[0]; ++i) {
print_helper(os, rhs + i*flat_size_without_first, data_shape+1, view_shape+1, dimension-1);
if (i != (view_shape[0]-1))
os << ", ";
}
}
else {
for (unsigned long i=0; i<view_shape[0]; ++i) {
os << rhs[i];
if (i != (view_shape[0]-1))
os << ", ";
}
}
os << "]";
}
};
template <typename T, template <typename> class TENSOR >
class WritableTensorLike : public TensorLike<T, TENSOR> {
public:
T & operator[](unsigned long i) {
return static_cast<TENSOR<T> &>(*this)[i];
}
T & operator[](const_tup_t tuple) {
#ifdef BOUNDS_CHECK
for (unsigned char k=0; k<this->dimension(); ++k)
assert( tuple[k] < this->view_shape()[k] );
#endif
return (*this)[ tuple_to_index(tuple, this->data_shape(), this->dimension()) ];
}
template <template <typename> class VECTOR>
T & operator[](const VectorLike<unsigned long, VECTOR> & tuple) {
return (*this)[ static_cast<const_tup_t>(tuple) ];
}
template <template <typename> class VECTOR>
WritableTensorView<T> start_at(const VectorLike<unsigned long, VECTOR> & start) {
return static_cast<const TENSOR<T> &>(*this).start_at(start);
}
template <template <typename> class VECTOR>
WritableTensorView<T> start_at(const VectorLike<unsigned long, VECTOR> & start, const VectorLike<unsigned long, VECTOR> & new_view_shape) {
return static_cast<const TENSOR<T> &>(*this).start_at(start, new_view_shape);
}
};
template <typename T, template <typename> class TENSOR>
std::ostream & operator<<(std::ostream & os, const TensorLike<T, TENSOR> & rhs) {
// To distinguish 1D Tensor from Vector:
os << "t:";
if ( rhs.flat_size() == 0 ) {
for (unsigned char k=0; k<rhs.dimension(); ++k)
os << "[";
for (unsigned char k=0; k<rhs.dimension(); ++k)
os << "]";
}
else
rhs.print_helper(os, &rhs[0ul], rhs.data_shape(), rhs.view_shape(), rhs.dimension());
return os;
}
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Tensor/product.hpp | .hpp | 415 | 19 | #ifndef _PRODUCT_HPP
#define _PRODUCT_HPP
template <typename T>
inline unsigned long product(const T* __restrict const v, unsigned long length) {
T res = 1ul;
for (unsigned long k=0; k<length; ++k)
res *= v[k];
return res;
}
template <typename T, template <typename> class VECTOR>
inline T product(const VectorLike<T, VECTOR> & v) {
return product(static_cast<const T*const>(v), v.size());
}
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Tensor/any_all.hpp | .hpp | 437 | 21 | #ifndef _ANY_ALL_HPP
#define _ANY_ALL_HPP
template <template <typename> class VECTOR>
bool any(const VectorLike<bool, VECTOR> & rhs) {
for (unsigned long k=0; k<rhs.size(); ++k)
if (rhs[k])
return true;
return false;
}
template <template <typename> class VECTOR>
bool all(const VectorLike<bool, VECTOR> & rhs) {
for (unsigned long k=0; k<rhs.size(); ++k)
if ( ! rhs[k])
return false;
return true;
}
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Tensor/embed.hpp | .hpp | 713 | 26 | #ifndef _EMBED_HPP
#define _EMBED_HPP
template <typename T>
class Tensor;
template <typename S, typename T, template <typename> class TENSOR_A, template <typename> class TENSOR_B>
void embed(WritableTensorLike<S, TENSOR_A> & dest, const TensorLike<T, TENSOR_B> & source) {
#ifdef SHAPE_CHECK
assert( dest.view_shape() >= source.view_shape() );
#endif
apply_tensors([](S & lhs, const T & rhs) {
lhs = (S)rhs;
},
source.view_shape(),
dest, source);
}
template <typename S, typename T, template <typename> class TENSOR_A, template <typename> class TENSOR_B>
void embed(WritableTensorLike<S, TENSOR_A> && dest, const TensorLike<T, TENSOR_B> & source) {
embed(dest, source);
}
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Tensor/alloc.hpp | .hpp | 1,329 | 52 | #ifndef _ALLOC_HPP
#define _ALLOC_HPP
//#include <stdlib.h>
//#include <stdio.h>
#include <string.h>
#include <assert.h>
#ifdef _MSC_VER
#include <malloc.h>
#define alloca _alloca
#else
#include <alloca.h>
#endif
// Note: benefits from being tuned for specific architecture. Could do this with #ifdef...s for AVX512, etc.
const unsigned long int ALLOCATION_ALIGNMENT = 512;
template <typename T>
T* aligned_malloc(unsigned long num_elements) {
/* Aligned malloc makes operations safe for AVX, etc. but results in a slowdown of roughly 3x
#ifdef _WIN32
T*result = (T*)_aligned_malloc(ALLOCATION_ALIGNMENT, num_elements*sizeof(T));
assert(result != NULL);
return result;
#else
void* result = NULL;
posix_memalign(&result, ALLOCATION_ALIGNMENT, num_elements*sizeof(T));
assert(result != NULL);
return (T*)result;
#endif
*/
T*result = (T*)malloc(num_elements*sizeof(T));
assert(result != NULL);
return result;
}
template <typename T>
T* aligned_calloc(unsigned long num_elements) {
T*result = aligned_malloc<T>(num_elements);
assert(result != NULL);
memset(result, 0, sizeof(T)*num_elements);
return result;
}
// Variable length array stack allocation:
template <typename T>
T* vla_alloc(unsigned long num_elements) {
return (T*)alloca(num_elements*sizeof(T));
}
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Tensor/min_max.hpp | .hpp | 610 | 31 | #ifndef _MIN_MAX_HPP
#define _MIN_MAX_HPP
template <typename T, template <typename> class VECTOR>
T min(const VectorLike<T, VECTOR> & rhs) {
#ifdef SHAPE_CHECK
assert(rhs.size() > 0);
#endif
T res = rhs[0];
for (unsigned long k=1; k<rhs.size(); ++k)
if (res > rhs[k])
res = rhs[k];
return res;
}
template <typename T, template <typename> class VECTOR>
T max(const VectorLike<T, VECTOR> & rhs) {
#ifdef SHAPE_CHECK
assert(rhs.size() > 0);
#endif
T res = rhs[0];
for (unsigned long k=1; k<rhs.size(); ++k)
if (res < rhs[k])
res = rhs[k];
return res;
}
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Tensor/Vector.hpp | .hpp | 6,471 | 248 | #ifndef _VECTOR_HPP
#define _VECTOR_HPP
#include <iostream>
#include <vector>
#include "alloc.hpp"
#include "VectorLike.hpp"
#include "VectorView.hpp"
#include "VectorArithmetic.hpp"
#include "VectorComparison.hpp"
#include "min_max.hpp"
#include "p_norm.hpp"
#include "any_all.hpp"
// Note: Vector<T> is for simple numeric T types; uses aligned_malloc
// rather than new[], so no constructor is called:
template <typename T>
class Vector : public WritableVectorLike<T, Vector> {
protected:
unsigned long _length;
T* __restrict _data;
public:
template <typename S>
friend class Vector;
Vector():
_length(0),
_data(NULL)
{ }
explicit Vector(unsigned long length_param):
_length(length_param)
{
_data = aligned_calloc<T>(_length);
}
explicit Vector(unsigned long length_param, T fill_value):
_length(length_param)
{
_data = aligned_malloc<T>(_length);
this->fill(fill_value);
}
explicit Vector(unsigned long length_param, const T*const fill_vec):
_length(length_param)
{
_data = aligned_malloc<T>(_length);
for (unsigned long k=0; k<_length; ++k)
_data[k] = fill_vec[k];
}
// Note: the following two constructors would be duplicates; this is
// necessary because copy ctor is deleted when defining move
// ctor. Delegate the constructor to the most general type to avoid
// duplicate code:
// Casts element S to element T:
template <typename S, template <typename> class VECTOR>
Vector(const VectorLike<S, VECTOR> & rhs):
_length(rhs.size())
{
_data = aligned_malloc<T>(_length);
for (unsigned long k=0; k<_length; ++k)
_data[k] = (T)rhs[k];
}
Vector(const Vector<T> & rhs):
Vector( static_cast<const VectorLike<T, evergreen::Vector> &>(rhs) )
{ }
Vector(Vector<T> && rhs):
_length(rhs._length)
{
_data = rhs._data;
rhs._data = NULL;
rhs._length = 0;
}
Vector(const std::initializer_list<T> & rhs):
_length(rhs.size())
{
_data = aligned_malloc<T>(_length);
unsigned long k=0;
for (auto iter = rhs.begin(); iter != rhs.end(); ++k, ++iter)
_data[k] = *iter;
}
Vector(const std::vector<T> & rhs):
_length(rhs.size())
{
_data = aligned_malloc<T>(_length);
for (unsigned long k=0; k<_length; ++k)
_data[k] = rhs[k];
}
// Casts element S to element T:
template <typename S, template <typename> class VECTOR>
const Vector & operator =(const VectorLike<S, VECTOR> & rhs) {
// Ensure the vectors do not refer to the same memory (otherwise
// freeing _data would also free rhs._data):
assert((_data + _length <= &rhs[0ul]) || (&rhs[0ul] + rhs.size() <= _data));
clear();
_length = rhs.size();
_data = aligned_malloc<T>(_length);
for (unsigned long k=0; k<_length; ++k)
_data[k] = (T)rhs[k];
return *this;
}
// Necessary because = (const&) operator is deleted when = (&&)
// operator is defined:
const Vector & operator =(const Vector & rhs) {
(*this) = static_cast<const VectorLike<T, evergreen::Vector> &>(rhs);
return *this;
}
const Vector & operator =(Vector && rhs) {
// Ensure no overlap (as above):
assert((_data + _length <= rhs._data) || (rhs._data + rhs._length <= _data));
clear();
std::swap(_length, rhs._length);
std::swap(_data, rhs._data);
return *this;
}
~Vector() {
clear();
}
const T & operator [](unsigned long i) const {
#ifdef BOUNDS_CHECK
assert(i < size());
#endif
return _data[i];
}
T & operator [](unsigned long i) {
#ifdef BOUNDS_CHECK
assert(i < size());
#endif
return _data[i];
}
unsigned long size() const {
return _length;
}
void clear() {
_length = 0;
if (_data != NULL) {
free(_data);
_data = NULL;
}
}
operator const T*const() const {
return _data;
}
operator T*const() const {
return _data;
}
WritableVectorView<T> start_at(unsigned long start) {
#ifdef SHAPE_CHECK
assert(start < _length);
#endif
return WritableVectorView<T>(*this, start);
}
WritableVectorView<T> start_at(unsigned long start, unsigned long length) {
#ifdef SHAPE_CHECK
assert(start + length <= _length);
#endif
return WritableVectorView<T>(*this, start, length);
}
// TODO: should the const versions have different names? The
// compiler should be able to choose the const version if necessary,
// so the only reason to say so explicitly is when a Vector is
// passed by & (non const) and you explicitly want a non-writable
// view...
VectorView<T> start_at_const(unsigned long start) const {
#ifdef SHAPE_CHECK
assert(start < _length);
#endif
return VectorView<T>(*this, start);
}
VectorView<T> start_at_const(unsigned long start, unsigned long length) const {
#ifdef SHAPE_CHECK
assert(start + length <= _length);
#endif
return VectorView<T>(*this, start, length);
}
void shrink(unsigned long new_length) {
#ifdef SHAPE_CHECK
assert(new_length <= _length);
#endif
// Note: This may not be cache aligned; it may be worth
// investigating if there is a method to perform an
// aligned_realloc.
_data = (T*) realloc(_data, new_length*sizeof(T));
_length = new_length;
}
// Only bother creating a && version of this function; if rhs cannot
// be destroyed by directly moving the pointer, then it would be
// trivial to simply copy element by element in O(n) time. This O(1)
// solution is necessarily destructive.
// Don't use this unless you know what you're doing.
template <typename S>
static Vector<T> create_reinterpreted(Vector<S> && rhs) {
#ifdef SHAPE_CHECK
assert(rhs._length * sizeof(S) % sizeof(T) == 0);
#endif
Vector<T> res;
res._data = (T*)rhs._data;
rhs._data = NULL;
res._length = (rhs._length * sizeof(S)) /sizeof(T);
rhs._length = 0ul;
return res;
}
};
template <typename T>
Vector<T> reversed(const Vector<T> & rhs) {
Vector<T> result(rhs.size());
for (unsigned long k=0; k<rhs.size(); ++k)
result[rhs.size()-1-k] = rhs[k];
return result;
}
template <typename T>
Vector<T> concatenate(const Vector<T> & lhs, const Vector<T> & rhs) {
Vector<T> result(lhs.size() + rhs.size());
for (unsigned long k=0; k<lhs.size(); ++k)
result[k] = lhs[k];
for (unsigned long k=0; k<rhs.size(); ++k)
result[k+lhs.size()] = rhs[k];
return result;
}
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Tensor/VectorComparison.hpp | .hpp | 3,818 | 116 | #ifndef _VECTORCOMPARISON_HPP
#define _VECTORCOMPARISON_HPP
// Note: Vector comparison is currently performed overall rather than
// element-wise. This could be changed by having every function return
// a Vector<bool>, which could be used with any(...) and all(...)
// functions, which would aggregate. The downside of that approach is
// that [1,2] == [1,4,5] would no longer return false; instead, it
// would assert(false).
// However, a downside to the current implementation is that x <= y
// can be false and x > y can be false (typically, exactly one of
// these must be true).
template <typename T, template <typename> class VECTOR_A, template <typename> class VECTOR_B>
bool operator ==(const VectorLike<T, VECTOR_A> & lhs, const VectorLike<T, VECTOR_B> & rhs) {
if (lhs.size() != rhs.size())
return false;
for (unsigned long k=0; k<lhs.size(); ++k)
if (lhs[k] != rhs[k])
return false;
return true;
}
template <typename T, template <typename> class VECTOR_A, template <typename> class VECTOR_B>
bool operator !=(const VectorLike<T, VECTOR_A> & lhs, const VectorLike<T, VECTOR_B> & rhs) {
return !( lhs == rhs );
}
template <typename T, template <typename> class VECTOR_A, template <typename> class VECTOR_B>
bool operator <(const VectorLike<T, VECTOR_A> & lhs, const VectorLike<T, VECTOR_B> & rhs) {
if (lhs.size() != rhs.size())
return false;
for (unsigned long k=0; k<lhs.size(); ++k)
if (lhs[k] >= rhs[k])
return false;
return true;
}
template <typename T, template <typename> class VECTOR_A, template <typename> class VECTOR_B>
bool operator >(const VectorLike<T, VECTOR_A> & lhs, const VectorLike<T, VECTOR_B> & rhs) {
if (lhs.size() != rhs.size())
return false;
for (unsigned long k=0; k<lhs.size(); ++k)
if (lhs[k] <= rhs[k])
return false;
return true;
}
template <typename T, template <typename> class VECTOR_A, template <typename> class VECTOR_B>
bool operator <=(const VectorLike<T, VECTOR_A> & lhs, const VectorLike<T, VECTOR_B> & rhs) {
if (lhs.size() != rhs.size())
return false;
for (unsigned long k=0; k<lhs.size(); ++k)
if (lhs[k] > rhs[k])
return false;
return true;
}
template <typename T, template <typename> class VECTOR_A, template <typename> class VECTOR_B>
bool operator >=(const VectorLike<T, VECTOR_A> & lhs, const VectorLike<T, VECTOR_B> & rhs) {
if (lhs.size() != rhs.size())
return false;
for (unsigned long k=0; k<lhs.size(); ++k)
if (lhs[k] < rhs[k])
return false;
return true;
}
template <typename T, template <typename> class VECTOR_A>
bool operator ==(const VectorLike<T, VECTOR_A> & lhs, T rhs) {
for (unsigned long k=0; k<lhs.size(); ++k)
if (lhs[k] != rhs)
return false;
return true;
}
template <typename T, template <typename> class VECTOR_A>
bool operator !=(const VectorLike<T, VECTOR_A> & lhs, T rhs) {
return ! (lhs == rhs);
}
template <typename T, template <typename> class VECTOR_A>
bool operator <(const VectorLike<T, VECTOR_A> & lhs, T rhs) {
for (unsigned long k=0; k<lhs.size(); ++k)
if (lhs[k] >= rhs)
return false;
return true;
}
template <typename T, template <typename> class VECTOR_A>
bool operator >(const VectorLike<T, VECTOR_A> & lhs, T rhs) {
for (unsigned long k=0; k<lhs.size(); ++k)
if (lhs[k] <= rhs)
return false;
return true;
}
template <typename T, template <typename> class VECTOR_A>
bool operator <=(const VectorLike<T, VECTOR_A> & lhs, T rhs) {
for (unsigned long k=0; k<lhs.size(); ++k)
if (lhs[k] > rhs)
return false;
return true;
}
template <typename T, template <typename> class VECTOR_A>
bool operator >=(const VectorLike<T, VECTOR_A> & lhs, T rhs) {
for (unsigned long k=0; k<lhs.size(); ++k)
if (lhs[k] < rhs)
return false;
return true;
}
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Tensor/TensorView.hpp | .hpp | 8,144 | 236 | #ifndef _TENSORVIEW_HPP
#define _TENSORVIEW_HPP
template <typename T>
class Tensor;
// Note: TensorView types are for local, temporary use only (e.g.,
// to allow TRIOT expressions that do not start at the same tuple
// indices). But they should not be stored long term; the tensor
// pointer may change, so long-term use is unsafe.
template <typename T>
class TensorView : public TensorLike<T, TensorView> {
protected:
const Tensor<T> & _tensor_ref;
const unsigned long _flat_start;
const Vector<unsigned long> _view_shape;
const unsigned long _flat_size;
// For constructing a TensorView from another TensorView:
explicit TensorView(const TensorView<T> & ten_con, const Vector<unsigned long> & start):
_tensor_ref(ten_con._tensor_ref),
_flat_start( ten_con._flat_start + tuple_to_index(start, data_shape(), ten_con.dimension()) ),
_view_shape(ten_con.data_shape() - start),
_flat_size(flat_length(_view_shape))
{
#ifdef SHAPE_CHECK
// Allows views of size 0:
assert( start <= ten_con.data_shape() );
#endif
}
public:
// View full Tensor:
explicit TensorView(const Tensor<T> & ten, const Vector<unsigned long> & start):
_tensor_ref(ten),
_flat_start(tuple_to_index(start, data_shape(), ten.dimension())),
_view_shape(ten.data_shape() - start),
_flat_size(flat_length(_view_shape))
{
#ifdef SHAPE_CHECK
// Allows views of size 0:
assert( start <= ten.data_shape() );
#endif
}
// View partial Tensor:
template <template <typename> class VECTOR_A, template <typename> class VECTOR_B>
explicit TensorView(const Tensor<T> & ten, const VectorLike<unsigned long, VECTOR_A> & start, const VectorLike<unsigned long, VECTOR_B> & new_view_shape):
_tensor_ref(ten),
_flat_start(tuple_to_index(start, data_shape(), ten.dimension())),
_view_shape(new_view_shape),
_flat_size(flat_length(_view_shape))
{
#ifdef SHAPE_CHECK
// Allows views of size 0:
assert( start + new_view_shape <= ten.data_shape() );
#endif
}
// Note: This is used by TRIOT expressions of views, but is
// deceptive for external use (the view is not guaranteed to be
// contiguous). One option to address this would be to make the []
// operator private and making the TRIOT helper classes friend
// classes; however, access to the [] operator is still desirable to
// the informed user, so it has not yet been changed.
const T & operator[] (unsigned long index) const {
#ifdef BOUNDS_CHECK
// Note: This is expensive; it may be preferred to simply not
// allow [] operation outside of TRIOT expressions (as described
// above).
// Compute starting tuple and starting index + current index tuple
// in reference tensor:
unsigned long* __restrict start_tuple = index_to_tuple(_flat_start, data_shape(), dimension());
unsigned long* __restrict current_tuple = index_to_tuple(_flat_start+index, data_shape(), dimension());
// Subtract starting tuple to get tuple corresponding to view:
for (unsigned char k=0; k<dimension(); ++k)
current_tuple[k] -= start_tuple[k];
// Check that every axis is in range for this view:
for (unsigned char k=0; k<dimension(); ++k)
assert(current_tuple[k] < view_shape()[k]);
free(start_tuple);
free(current_tuple);
#endif
return _tensor_ref[_flat_start + index];
}
// Rewiring other [] operators to TensorLike (unfortunately,
// the compiler cannot detect the appropriate one on its own):
const T & operator[](const_tup_t tuple) const {
return static_cast<const TensorLike<T, evergreen::TensorView> &>(*this)[tuple];
}
template <template <typename> class VECTOR>
const T & operator[](const VectorLike<unsigned long, VECTOR> & tuple) const {
return static_cast<const TensorLike<T, evergreen::TensorView> &>(*this)[tuple];
}
unsigned char dimension() const {
return _tensor_ref.dimension();
}
TensorView<T> start_at_const(const Vector<unsigned long> & start) const {
#ifdef SHAPE_CHECK
assert(start.size() == dimension());
assert(start < view_shape());
#endif
return TensorView<T>(*this, start);
}
const Vector<unsigned long> & data_shape() const {
return _tensor_ref.data_shape();
}
const Vector<unsigned long> & view_shape() const {
return _view_shape;
}
unsigned long flat_size() const {
return _flat_size;
}
};
template <typename T>
class WritableTensorView : public WritableTensorLike<T, WritableTensorView> {
protected:
Tensor<T> & _tensor_ref;
const unsigned long _flat_start;
const Vector<unsigned long> _view_shape;
const unsigned long _flat_size;
// For constructing a TensorView from another TensorView:
explicit WritableTensorView(WritableTensorView<T> & ten_con, const Vector<unsigned long> & start):
_tensor_ref(ten_con._tensor_ref),
_flat_start( ten_con._flat_start + tuple_to_index(start, data_shape(), ten_con.dimension()) ),
_view_shape(ten_con.data_shape() - start),
_flat_size(flat_length(_view_shape))
{
#ifdef SHAPE_CHECK
// Allows views of size 0:
assert( start <= ten_con.data_shape() );
#endif
}
public:
// View full Tensor:
explicit WritableTensorView(Tensor<T> & ten, const Vector<unsigned long> & start):
_tensor_ref(ten),
_flat_start(tuple_to_index(start, data_shape(), ten.dimension())),
_view_shape(ten.data_shape() - start),
_flat_size(flat_length(_view_shape))
{
#ifdef SHAPE_CHECK
// Allows views of size 0:
assert( start <= ten.data_shape() );
#endif
}
// View partial Tensor:
template <template <typename> class VECTOR_A, template <typename> class VECTOR_B>
explicit WritableTensorView(Tensor<T> & ten, const VectorLike<unsigned long, VECTOR_A> & start, const VectorLike<unsigned long, VECTOR_B> & new_view_shape):
_tensor_ref(ten),
_flat_start(tuple_to_index(start, data_shape(), ten.dimension())),
_view_shape(new_view_shape),
_flat_size(flat_length(_view_shape))
{
#ifdef SHAPE_CHECK
// Allows views of size 0:
assert( start + new_view_shape <= ten.data_shape() );
#endif
}
// Note: These are used by TRIOT expressions of views, but is
// deceptive for external use (the view is not guaranteed to be
// contiguous). See above.
T & operator[] (unsigned long index) {
return _tensor_ref[_flat_start + index];
}
const T & operator[] (unsigned long index) const {
return _tensor_ref[_flat_start + index];
}
// Rewiring other [] operators to TensorLike (unfortunately,
// the compiler cannot detect the appropriate one on its own):
const T & operator[](const_tup_t tuple) const {
return static_cast<const TensorLike<T, evergreen::WritableTensorView> &>(*this)[tuple];
}
T & operator[](const_tup_t tuple) {
return static_cast<WritableTensorLike<T, evergreen::WritableTensorView> &>(*this)[tuple];
}
template <template <typename> class VECTOR>
const T & operator[](const VectorLike<unsigned long, VECTOR> & tuple) const {
return static_cast<const TensorLike<T, evergreen::WritableTensorView> &>(*this)[tuple];
}
template <template <typename> class VECTOR>
T & operator[](const VectorLike<unsigned long, VECTOR> & tuple) {
return static_cast<WritableTensorLike<T, evergreen::WritableTensorView> &>(*this)[tuple];
}
unsigned char dimension() const {
return _tensor_ref.dimension();
}
WritableTensorView<T> start_at(const Vector<unsigned long> & start) {
#ifdef SHAPE_CHECK
assert(start.size() == dimension());
assert(start < view_shape());
#endif
return WritableTensorView<T>(*this, start);
}
TensorView<T> start_at_const(const Vector<unsigned long> & start) const {
#ifdef SHAPE_CHECK
assert(start.size() == dimension());
assert(start < view_shape());
#endif
return TensorView<T>(*this, start);
}
const Vector<unsigned long> & data_shape() const {
return _tensor_ref.data_shape();
}
const Vector<unsigned long> & view_shape() const {
return _view_shape;
}
unsigned long flat_size() const {
return _flat_size;
}
};
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Tensor/Tensor.hpp | .hpp | 11,030 | 302 | #ifndef _TENSOR_HPP
#define _TENSOR_HPP
// #include this file to import Vector, Tensor, TRIOT, and everything
// else from this subdirectory.
#include <cmath>
#include "Vector.hpp"
#include "TensorUtils.hpp"
#include "TensorLike.hpp"
#include "TensorView.hpp"
#include "TRIOT.hpp"
#include "transpose.hpp"
#include "embed.hpp"
#include "ArrayShape.hpp"
// Note: Tensor<T> is for simple numeric T types; underneath, it uses
// Vector<T>, which uses aligned_malloc rather than new[], so no
// constructor is called:
template <typename T>
class Tensor : public WritableTensorLike<T, Tensor> {
protected:
Vector<unsigned long> _data_shape;
Vector<T> _flat_vector;
public:
template <typename S>
friend class Tensor;
// Inits to dimension 0:
Tensor()
{ }
template <typename ARRAY>
static Tensor<T> from_array(const ARRAY & arr) {
// ARRAY should be T[A][B]...[Z]:
// Cast will be unsafe when ARRAY is not T[A][B]...[Z], but if so,
// the ArrayShape call below should not compiler. // FIXME: compiler -> compile?
const T*arr_head = (const T*)arr;
constexpr unsigned long flat_length = sizeof(arr) / sizeof(T);
return Tensor<T>(std::move(ArrayShape<const T &,decltype(arr)>::eval(arr)), std::move(Vector<T>(flat_length, arr_head)));
}
explicit Tensor(Vector<unsigned long> && shape):
_data_shape(std::move(shape)),
_flat_vector(flat_length(_data_shape, _data_shape.size()))
{
#ifdef SHAPE_CHECK
assert(dimension() <= MAX_TENSOR_DIMENSION && "Tensor dimension is too large; adjust MAX_TENSOR_DIMENSION value");
#endif
}
template <template <typename> class VECTOR>
explicit Tensor(const VectorLike<unsigned long, VECTOR> & shape):
_data_shape(shape),
_flat_vector(flat_length(_data_shape, _data_shape.size()))
{
#ifdef SHAPE_CHECK
assert(dimension() <= MAX_TENSOR_DIMENSION && "Tensor dimension is too large; adjust MAX_TENSOR_DIMENSION value");
#endif
}
template <template <typename> class VECTOR_A, template <typename> class VECTOR_B>
explicit Tensor(const VectorLike<unsigned long, VECTOR_A> & shape, const VectorLike<T, VECTOR_B> & data):
_data_shape(shape),
_flat_vector(data)
{
#ifdef SHAPE_CHECK
assert( flat_size() == flat_length(_data_shape, _data_shape.size()) );
assert(dimension() <= MAX_TENSOR_DIMENSION && "Tensor dimension is too large; adjust MAX_TENSOR_DIMENSION value");
#endif
}
template <template <typename> class VECTOR>
explicit Tensor(const VectorLike<unsigned long, VECTOR> & shape, Vector<T> && data):
_data_shape(shape),
_flat_vector(std::move(data))
{
#ifdef SHAPE_CHECK
assert( flat_size() == flat_length(_data_shape, _data_shape.size()) );
assert(dimension() <= MAX_TENSOR_DIMENSION && "Tensor dimension is too large; adjust MAX_TENSOR_DIMENSION value");
#endif
}
template <template <typename> class VECTOR>
explicit Tensor(const VectorLike<unsigned long, VECTOR> & shape, const Vector<T> & data):
_data_shape(shape),
_flat_vector(data)
{
#ifdef SHAPE_CHECK
assert( flat_size() == flat_length(_data_shape, _data_shape.size()) );
assert(dimension() <= MAX_TENSOR_DIMENSION && "Tensor dimension is too large; adjust MAX_TENSOR_DIMENSION value");
#endif
}
// Note: the following constructor is used to help the compiler
// detect when you are creating using an initializer list Tensor<T>({1,2}):
explicit Tensor(Vector<unsigned long> && shape, const Vector<T> & data):
_data_shape(std::move(shape)),
_flat_vector(data)
{
#ifdef SHAPE_CHECK
assert( flat_size() == flat_length(_data_shape, _data_shape.size()) );
assert(dimension() <= MAX_TENSOR_DIMENSION && "Tensor dimension is too large; adjust MAX_TENSOR_DIMENSION value");
#endif
}
explicit Tensor(Vector<unsigned long> && shape, Vector<T> && data):
_data_shape(std::move(shape)),
_flat_vector(std::move(data))
{
#ifdef SHAPE_CHECK
assert( flat_size() == flat_length(_data_shape, _data_shape.size()) );
assert(dimension() <= MAX_TENSOR_DIMENSION && "Tensor dimension is too large; adjust MAX_TENSOR_DIMENSION value");
#endif
}
template <template <typename> class TENSOR>
Tensor(const TensorLike<T, TENSOR> & rhs):
_data_shape(rhs.view_shape()),
_flat_vector(rhs.flat_size()) // FIXME: figure out why allign_malloc results in indirect loss of memory
{
embed(*this, rhs);
}
Tensor(const Tensor & rhs):
Tensor( static_cast<const TensorLike<T, evergreen::Tensor> &>(rhs) )
{ }
Tensor(Tensor<T> && rhs):
_data_shape( std::move(rhs._data_shape) ),
_flat_vector( std::move(rhs._flat_vector) )
{ }
template <template <typename> class TENSOR>
const Tensor & operator =(const TensorLike<T, TENSOR> & rhs) {
_data_shape = rhs.data_shape();
_flat_vector = Vector<T>(flat_length(_data_shape, _data_shape.size()));
embed(*this, rhs);
return *this;
}
const Tensor & operator =(Tensor<T> && rhs) {
_data_shape = std::move(rhs._data_shape);
_flat_vector = std::move(rhs._flat_vector);
return *this;
}
const Tensor & operator =(const Tensor<T> & rhs) {
*this = static_cast<const TensorLike<T, evergreen::Tensor> &>(rhs);
return *this;
}
// Providing access as a view essentially gives access as a raw flat
// vector, but prevents assigning tensor.flat() = new_vector, which
// could allow the flat length and the shape to become inconsistent.
WritableVectorView<T> flat() {
return _flat_vector.start_at(0);
}
VectorView<T> flat_const() const {
return _flat_vector.start_at_const(0);
}
VectorView<T> flat() const {
return flat_const();
}
T & operator[] (unsigned long index) {
return _flat_vector[index];
}
const T & operator[] (unsigned long index) const {
return _flat_vector[index];
}
// Rewiring other [] operators to TensorLike (unfortunately,
// the compiler cannot detect the appropriate one on its own):
const T & operator[](const_tup_t tuple) const {
return static_cast<const TensorLike<T, evergreen::Tensor> &>(*this)[tuple];
}
T & operator[](const_tup_t tuple) {
return static_cast<WritableTensorLike<T, evergreen::Tensor> &>(*this)[tuple];
}
template <template <typename> class VECTOR>
const T & operator[](const VectorLike<unsigned long, VECTOR> & tuple) const {
return static_cast<const TensorLike<T, evergreen::Tensor> &>(*this)[tuple];
}
template <template <typename> class VECTOR>
T & operator[](const VectorLike<unsigned long, VECTOR> & tuple) {
return static_cast<WritableTensorLike<T, evergreen::Tensor> &>(*this)[tuple];
}
unsigned char dimension() const {
return _data_shape.size();
}
WritableTensorView<T> start_at(const Vector<unsigned long> & start) {
#ifdef SHAPE_CHECK
assert(start.size() == dimension());
// Shape bounds will be checked by TensorView constructor.
#endif
return WritableTensorView<T>(*this, start);
}
TensorView<T> start_at_const(const Vector<unsigned long> & start) const {
#ifdef SHAPE_CHECK
assert(start.size() == dimension());
// Shape bounds will be checked by TensorView constructor.
#endif
return TensorView<T>(*this, start);
}
WritableTensorView<T> start_at(const Vector<unsigned long> & start, const Vector<unsigned long> & new_view_shape) {
#ifdef SHAPE_CHECK
assert(start.size() == dimension());
// Shape bounds will be checked by TensorView constructor.
#endif
return WritableTensorView<T>(*this, start, new_view_shape);
}
TensorView<T> start_at_const(const Vector<unsigned long> & start, const Vector<unsigned long> & new_view_shape) const {
#ifdef SHAPE_CHECK
assert(start.size() == dimension());
// Shape bounds will be checked by TensorView constructor.
#endif
return TensorView<T>(*this, start, new_view_shape);
}
const Vector<unsigned long> & data_shape() const {
return _data_shape;
}
const Vector<unsigned long> & view_shape() const {
return data_shape();
}
unsigned long flat_size() const {
return _flat_vector.size();
}
void shrink(const Vector<unsigned long> & new_shape) {
#ifdef SHAPE_CHECK
assert(new_shape <= data_shape());
#endif
// Moved elements from larger (or equal) indices into smaller
// (guaranteed because new_shape <= data_shape(), so each dest
// flattened index must be <= source flattened index; therefore,
// should not overwrite any data until it's already been copied to
// the new location.
enumerate_for_each_tensors([this, &new_shape](const_tup_t counter, const unsigned long dim) {
unsigned long old_index = tuple_to_index(counter, _data_shape, dim);
unsigned long new_index = tuple_to_index(counter, new_shape, dim);
_flat_vector[new_index] = _flat_vector[old_index];
},
new_shape
);
_data_shape = new_shape;
_flat_vector.shrink( flat_length(_data_shape, _data_shape.size()) );
}
void shrink(const Vector<unsigned long> & start, const Vector<unsigned long> & new_shape) {
#ifdef SHAPE_CHECK
assert(new_shape <= data_shape());
#endif
// As above but with a start index; the tensor will be seen twice
// from two different perspectives, but this should be compatible
// with the restrict view in the underlying vector since since the
// TensorView stores a reference to the flat vector rather than
// storing an alternate pointer:
TensorView<T> view = start_at_const(start);
enumerate_for_each_tensors([this, &view, &new_shape](const_tup_t counter, const unsigned long dim) {
unsigned long old_index = tuple_to_index(counter, _data_shape, dim);
unsigned long new_index = tuple_to_index(counter, new_shape, dim);
_flat_vector[new_index] = view[old_index];
},
new_shape
);
_data_shape = new_shape;
_flat_vector.shrink( flat_length(_data_shape, _data_shape.size()) );
}
void reshape(const Vector<unsigned long> & new_shape) {
#ifdef SHAPE_CHECK
assert( flat_length(new_shape, new_shape.size()) == flat_size() );
#endif
_data_shape = new_shape;
}
void clear() {
_flat_vector.clear();
_data_shape.fill(0ul);
}
// See Vector<T>::create_reinterpreted for description:
template <typename S>
static Tensor<T> create_reinterpreted(Tensor<S> && rhs) {
#ifdef SHAPE_CHECK
assert(rhs.flat_size() * sizeof(S) % sizeof(T) == 0);
#endif
Tensor<T> res;
res._flat_vector = Vector<T>::create_reinterpreted(std::move(rhs._flat_vector));
res._data_shape = std::move(rhs._data_shape);
res._data_shape[res._data_shape.size()-1] *= sizeof(S);
res._data_shape[res._data_shape.size()-1] /= sizeof(T);
return res;
}
};
// TODO: these operators should be written for TensorLike (as in
// VectorComparison.hpp):
template <typename T>
bool operator ==(const Tensor<T> & lhs, const Tensor<T> & rhs) {
if (lhs.data_shape() != rhs.data_shape())
return false;
return lhs.flat() == rhs.flat();
}
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Tensor/p_norm.hpp | .hpp | 394 | 20 | #ifndef _P_NORM_HPP
#define _P_NORM_HPP
template <typename T, template <typename> class VECTOR>
T p_norm(const VectorLike<T, VECTOR> & rhs, T p) {
#ifdef SHAPE_CHECK
assert(rhs.size() > 0);
#endif
T max_val = max(rhs);
T res = pow((rhs[0]/max_val), p);
for (unsigned long k=1; k<rhs.size(); ++k)
res += pow(rhs[k]/max_val, p);
return max_val*pow(res, T(1.0)/p);
}
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Tensor/VectorLike.hpp | .hpp | 2,682 | 86 | #ifndef _VECTORLIKE_HPP
#define _VECTORLIKE_HPP
template <typename T>
class VectorView;
template <typename T>
class WritableVectorView;
// Never instantiate these; always pass by reference & or &&:
template <typename T, template <typename> class VECTOR >
class VectorLike {
public:
unsigned long size() const {
return static_cast<const VECTOR<T> &>(*this).size();
}
const T & operator [](unsigned long i) const {
return static_cast<const VECTOR<T> &>(*this)[i];
}
operator const T*const() const {
return (const T*const) static_cast<const VECTOR<T> &>(*this);
}
VectorView<T> start_at_const(unsigned long start) const {
return static_cast<const VECTOR<T> &>(*this).start_at_const(start);
}
VectorView<T> start_at_const(unsigned long start, unsigned long length) const {
return static_cast<const VECTOR<T> &>(*this).start_at_const(start, length);
}
};
template <typename T, template <typename> class VECTOR >
class WritableVectorLike : public VectorLike<T, VECTOR> {
public:
T & operator [](unsigned long i) {
return static_cast<VECTOR<T> &>(*this)[i];
}
void fill(T val) {
for (unsigned long k=0; k<this->size(); ++k)
(*this)[k] = val;
}
operator T*const() const {
return (T*const)static_cast<const VECTOR<T> &>(*this);
}
WritableVectorView<T> start_at(unsigned long start) const {
return static_cast<const VECTOR<T> &>(*this).start_at(start);
}
WritableVectorView<T> start_at(unsigned long start, unsigned long length) const {
return static_cast<const VECTOR<T> &>(*this).start_at(start, length);
}
};
template <typename T, typename S, template <typename> class VECTOR_A, template <typename> class VECTOR_B>
// rvalue reference so that it can accept temporary values; however,
// the function is non-destructive:
void copy(WritableVectorLike<T, VECTOR_A> & lhs, const VectorLike<S, VECTOR_B> & rhs) {
#ifdef SHAPE_CHECK
assert(lhs.size() >= rhs.size());
#endif
for (unsigned int k=0; k<rhs.size(); ++k)
lhs[k] = (T)rhs[k];
}
template <typename T, typename S, template <typename> class VECTOR_A, template <typename> class VECTOR_B>
// rvalue reference so that it can accept temporary values; however,
// the function is non-destructive:
void copy(WritableVectorLike<T, VECTOR_A> && lhs, const VectorLike<S, VECTOR_B> & rhs) {
// Calls & version above:
copy(lhs, rhs);
}
template <typename T, template <typename> class VECTOR>
std::ostream & operator <<(std::ostream & os, const VectorLike<T, VECTOR> & rhs) {
os << "[";
for (unsigned long k=0; k<rhs.size(); ++k) {
os << rhs[k];
if (k != rhs.size()-1)
os << ", ";
}
os << "]";
return os;
}
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Tensor/transpose.hpp | .hpp | 6,102 | 168 | #ifndef _TRANSPOSE_HPP
#define _TRANSPOSE_HPP
#include "MatrixTranspose.hpp"
template <typename T>
class Tensor;
// Empirically chosen:
const unsigned long SIZE_WHERE_NAIVE_TRANSPOSE_BECOMES_SLOWER = 8;
template <typename T>
inline Tensor<T> naive_transposed(const Tensor<T> & ten, const Vector<unsigned char> & new_axis_order) {
#ifdef SHAPE_CHECK
assert(ten.dimension() == new_axis_order.size());
verify_permutation(new_axis_order);
#endif
Vector<unsigned long> new_shape(ten.dimension());
for (unsigned char i=0; i<ten.dimension(); ++i)
new_shape[i] = ten.data_shape()[ new_axis_order[i] ];
Tensor<T> result(new_shape);
Vector<unsigned long> reordered_tup(ten.dimension());
enumerate_for_each_tensors([&result, &reordered_tup, &new_axis_order](const_tup_t tup, const unsigned char dim, const T & val){
for (unsigned char i=0; i<dim; ++i)
reordered_tup[i] = tup[ new_axis_order[i] ];
result[ tuple_to_index(reordered_tup, result.data_shape(), dim) ] = val;
},
ten.data_shape(),
ten);
return result;
}
template <typename T>
void naive_transpose(Tensor<T> & ten, const Vector<unsigned char> & new_axis_order) {
ten = std::move(naive_transposed(ten, new_axis_order));
}
// Cache friendly version:
template <typename T>
void cache_friendly_transpose(Tensor<T> & ten, const Vector<unsigned char> & new_axis_order) {
#ifdef SHAPE_CHECK
assert(ten.dimension() == new_axis_order.size());
verify_permutation(new_axis_order);
#endif
// For performance: when the prefix of the new order is already
// partially in order, do not visit those indices.
unsigned char already_ordered_prefix;
for (already_ordered_prefix=0; already_ordered_prefix<new_axis_order.size(); ++already_ordered_prefix)
if ( new_axis_order[already_ordered_prefix] != already_ordered_prefix)
break;
if (already_ordered_prefix < ten.dimension()) {
T* __restrict buffer_from = &ten.flat()[0];
Tensor<T> buffer(ten.data_shape());
T* __restrict buffer_to = &buffer.flat()[0];
/*
This function performs tensor transposition in O(d) matrix
transpositions. This allows it to use the cache-oblivious matrix
transposition, and therefore be more performant..
(a,b,c,d,e,f,g) @
(3,1,5,0,6,4,2) -->
(d,b,c,f,e,g,a)
2D contiguous transposes only swap adjacent inner
indices. Therefore, you can send a given index to the far right
and shift the others left:
(0,1,2,3,4,5,6) -->
(0,1,2,4,5,6,3) -->
(0,2,4,5,6,3,1) -->
(0,2,4,6,3,1,5) -->
(2,4,6,3,1,5,0) -->
(2,4,3,1,5,0,6) -->
(2,3,1,5,0,6,4) -->
(3,1,5,0,6,4,2)
*/
// Note: this method is in O( N d + d^2 ). It could likely be done
// in O( N d + d log(d) ) or better, but O( N d + d^2 ) = O( d ( N +
// d ) ), which will equal O( N d ) when d is in O(N). When d is not
// in O(N), some axes must have a length of 1. Therefore, these axes
// could also alternatively be suppressed, since they will not
// effect the result. If a o(d^2) solution is necessary in general,
// this is likely preferred over using a tree / map to somehow
// reduce the d^2 --> d log(d).
Vector<unsigned char> current_axis_order = seq<unsigned char>(ten.dimension());
for (unsigned char i=already_ordered_prefix; i<ten.dimension(); ++i) {
unsigned char next_axis = new_axis_order[i];
unsigned char next_axis_index = 0;
for (next_axis_index=0; next_axis_index<ten.dimension(); ++next_axis_index)
if (current_axis_order[next_axis_index] == next_axis)
break;
// Compute number of 2D transposes and the R,C:
unsigned long number_of_2d_transposes = 1;
for (unsigned char j=0; j<next_axis_index; ++j)
number_of_2d_transposes *= ten.data_shape()[ current_axis_order[j] ];
unsigned long R = ten.data_shape()[ current_axis_order[next_axis_index] ];
unsigned long C = 1;
for (unsigned char j=next_axis_index+1; j<ten.dimension(); ++j)
C *= ten.data_shape()[ current_axis_order[j] ];
// Note that this could be sped up by swapping in the largest
// blocks possible (e.g., a contig of the first indices may
// already be in order).
// Perform 2D transposes if non-trivial:
if (R > 1 && C > 1) {
for (unsigned long j=0; j<number_of_2d_transposes; ++j)
MatrixTranspose<T>::apply_buffered(buffer_to + j*R*C, buffer_from + j*R*C, R, C);
// The following does not change the vector pointer inside source:
std::swap(buffer_from, buffer_to);
}
// Shift: axes to reflect the updated order:
for (unsigned char j=next_axis_index; j<ten.dimension()-1; ++j)
current_axis_order[j] = current_axis_order[j+1];
current_axis_order[ten.dimension()-1] = next_axis;
}
// Data was last transposed into buffer_to, which was swapped with
// buffer_from. If buffer_from is not the true source array, then
// move it into ten.
if (buffer_from != &ten[0ul])
ten = std::move(buffer);
// Change the shape to correspond:
Vector<unsigned long> old_shape = ten.data_shape();
Vector<unsigned long> new_shape(ten.dimension());
for (unsigned char i=0; i<ten.dimension(); ++i)
new_shape[i] = old_shape[ new_axis_order[i] ];
ten.reshape(new_shape);
}
}
template <typename T>
Tensor<T> cache_friendly_transposed(const Tensor<T> & ten, const Vector<unsigned char> & new_axis_order) {
Tensor<T> res = ten;
transpose(res, new_axis_order);
return res;
}
template <typename T>
void transpose(Tensor<T> & ten, const Vector<unsigned char> & new_axis_order) {
if (ten.flat_size() < SIZE_WHERE_NAIVE_TRANSPOSE_BECOMES_SLOWER)
naive_transpose(ten, new_axis_order);
else
cache_friendly_transpose(ten, new_axis_order);
}
template <typename T>
Tensor<T> transposed(const Tensor<T> & ten, const Vector<unsigned char> & new_axis_order) {
if (ten.flat_size() < SIZE_WHERE_NAIVE_TRANSPOSE_BECOMES_SLOWER)
return naive_transposed(ten, new_axis_order);
return cache_friendly_transposed(ten, new_axis_order);
}
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Tensor/TRIOT.hpp | .hpp | 9,718 | 231 | #ifndef _TRIOT_HPP
#define _TRIOT_HPP
#include "TemplateSearch.hpp"
#include "TensorUtils.hpp"
// TODO: Can a namespace casing like this improve compilation time
// by restricting lookup of template classes within this
// namespace?
// TODO: Would explicit template arguments enable faster compilation?
namespace TRIOT {
//---------------------------------------------
// For each:
//---------------------------------------------
template <unsigned char DIMENSION, unsigned char CURRENT>
class ForEachFixedDimensionHelper {
public:
template <typename FUNCTION, typename ...TENSORS>
inline static void apply(tup_t counter, const_tup_t shape, FUNCTION function, TENSORS & ...args) {
for (counter[CURRENT]=0; counter[CURRENT]<shape[CURRENT]; ++counter[CURRENT])
TRIOT::ForEachFixedDimensionHelper<DIMENSION-1, CURRENT+1>::template apply<FUNCTION, TENSORS...>(counter, shape, function, args...);
}
};
template <unsigned char CURRENT>
class ForEachFixedDimensionHelper<1u, CURRENT> {
public:
template <typename FUNCTION, typename ...TENSORS>
inline static void apply(tup_t counter, const_tup_t shape, FUNCTION function, TENSORS & ...args) {
for (counter[CURRENT]=0; counter[CURRENT]<shape[CURRENT]; ++counter[CURRENT])
// For explicitly forcing the compiler to recognize that the
// dimensionality is constant; this will be necessary on older
// compilers:
function(args[tuple_to_index_fixed_dimension<CURRENT+1>(counter, args.data_shape())]...);
// function(args[tuple_to_index(counter, args.data_shape(), CURRENT+1)]...);
}
};
// for a tensor with dimension 0
template <unsigned char CURRENT>
class ForEachFixedDimensionHelper<0u, CURRENT> {
public:
template <typename FUNCTION, typename ...TENSORS>
inline static void apply(tup_t counter, const_tup_t shape, FUNCTION function, TENSORS & ...args) {
// Do nothing
}
};
template <unsigned char DIMENSION>
class ForEachFixedDimension {
public:
template <typename FUNCTION, typename ...TENSORS>
inline static void apply(const_tup_t shape, FUNCTION function, TENSORS & ...args) {
unsigned long counter[DIMENSION];
memset(counter, 0, DIMENSION*sizeof(unsigned long));
TRIOT::ForEachFixedDimensionHelper<DIMENSION,0>::template apply<FUNCTION, TENSORS...>(counter, shape, function, args...);
}
};
template<>
class ForEachFixedDimension<0U> {
public:
template <typename FUNCTION, typename ...TENSORS>
inline static void apply(const_tup_t /*shape*/, FUNCTION /*function*/, TENSORS & .../*args*/) {
// do nothing, so that memset is not called with size = 0 which is a GCC extension
}
};
//---------------------------------------------
// For each, with visible counter:
//---------------------------------------------
template <unsigned char DIMENSION, unsigned char CURRENT>
class ForEachVisibleCounterFixedDimensionHelper {
public:
template <typename FUNCTION, typename ...TENSORS>
inline static void apply(tup_t counter, const_tup_t shape, FUNCTION function, TENSORS & ...args) {
for (counter[CURRENT]=0; counter[CURRENT]<shape[CURRENT]; ++counter[CURRENT])
ForEachVisibleCounterFixedDimensionHelper<DIMENSION-1, CURRENT+1>::template apply<FUNCTION, TENSORS...>(counter, shape, function, args...);
}
};
template <unsigned char CURRENT>
class ForEachVisibleCounterFixedDimensionHelper<1u, CURRENT> {
public:
template <typename FUNCTION, typename ...TENSORS>
inline static void apply(tup_t counter, const_tup_t shape, FUNCTION function, TENSORS & ...args) {
for (counter[CURRENT]=0; counter[CURRENT]<shape[CURRENT]; ++counter[CURRENT])
// Cast the counter to a const_tup_t pointer so that its
// contents cannot be modified by function:
// For explicitly forcing the compiler to recognize that the
// dimensionality is constant; this will be necessary on older
// compilers (which may not see this through constant
// propagation):
function(static_cast<const_tup_t>(counter), CURRENT+1, args[tuple_to_index_fixed_dimension<CURRENT+1>(counter, args.data_shape())]...);
// function(static_cast<const_tup_t>(counter), CURRENT+1, args[tuple_to_index(counter, args.data_shape(), CURRENT+1)]...);
}
};
template <unsigned char CURRENT>
class ForEachVisibleCounterFixedDimensionHelper<0u, CURRENT> {
public:
template <typename FUNCTION, typename ...TENSORS>
inline static void apply(tup_t counter, const_tup_t shape, FUNCTION function, TENSORS & ...args) {
// Do nothing
}
};
template <unsigned char DIMENSION>
class ForEachVisibleCounterFixedDimension {
public:
template <typename FUNCTION, typename ...TENSORS>
inline static void apply(const_tup_t shape, FUNCTION function, TENSORS & ...args) {
unsigned long counter[DIMENSION];
memset(counter, 0, DIMENSION*sizeof(unsigned long));
ForEachVisibleCounterFixedDimensionHelper<DIMENSION,0>::template apply<FUNCTION, TENSORS...>(counter, shape, function, args...);
}
};
template<>
class ForEachVisibleCounterFixedDimension<0U> {
public:
template <typename FUNCTION, typename ...TENSORS>
inline static void apply(const_tup_t /*shape*/, FUNCTION /*function*/, TENSORS & .../*args*/) {
// do nothing, so that memset is not called with size = 0 which is a GCC extension
}
};
}
template <typename ...TENSORS>
#ifndef SHAPE_CHECK
void check_tensor_pack_bounds(const TENSORS&... /*args*/, const Vector<unsigned long>& /*shape*/)
{
}
#else
void check_tensor_pack_bounds(const TENSORS&... args, const Vector<unsigned long>& shape)
{
// Verify same shapes:
// TODO: this could be faster by using an array of references; C++
// does not allow an array of references, but it would allow an
// array of structs containing only the reference.
Vector<unsigned long> shapes[] = { args.view_shape()... };
for (const Vector<unsigned long> & s : shapes) {
// Check that all dimensions match:
assert(s.size() == shape.size());
// Check that iterating over shape is in bounds with respect to
// the current view_shape:
assert(s >= shape);
}
}
#endif
template <typename ...TENSORS>
void check_tensor_pack_bounds(const Vector<unsigned long> & /*shape*/) {
}
template <typename ...TENSORS>
Vector<unsigned long> bounding_shape(const TENSORS & ...args) {
// Verify same shapes:
Vector<unsigned long> shapes[] = { args.view_shape()... };
Vector<unsigned long> result = shapes[0];
for (const Vector<unsigned long> & s : shapes) {
#ifdef SHAPE_CHECK
// Check that all dimensions match:
assert(s.size() == result.size());
#endif
for (unsigned int i=0; i<result.size(); ++i)
result[i] = std::min(result[i], s[i]);
}
return result;
}
// Interface for external use (note: these functions also work with
// the tensor view types); rather than TENSOR types, they could be
// treated as TensorLike and WritableTensorLike, but for now duck
// typing is a bit simpler. This works because const & parameter
// typing allows rvalues and lvalues to be passed, and && typing would
// normally only allow rvalue references, but because of templating,
// the compiler can automatically choose the type as T& && --> T&.
// Allows no modifications:
template <typename FUNCTION, typename ...TENSORS>
void for_each_tensors(FUNCTION function, const Vector<unsigned long> & shape, const TENSORS & ...args) {
check_tensor_pack_bounds<TENSORS...>(args..., shape);
LinearTemplateSearch<0u,MAX_TENSOR_DIMENSION,TRIOT::ForEachFixedDimension>::apply(shape.size(), shape, function, args...);
}
template <typename FUNCTION, typename ...TENSORS>
void enumerate_for_each_tensors(FUNCTION function, const Vector<unsigned long> & shape, const TENSORS & ...args) {
check_tensor_pack_bounds<TENSORS...>(args..., shape);
LinearTemplateSearch<0u,MAX_TENSOR_DIMENSION,TRIOT::ForEachVisibleCounterFixedDimension>::apply(shape.size(), shape, function, args...);
}
// Allows modifications to all arguments:
template <typename FUNCTION, typename ...DEST_TENSORS>
void modify_tensors(FUNCTION function, const Vector<unsigned long> & shape, DEST_TENSORS && ...args) {
check_tensor_pack_bounds<DEST_TENSORS...>(args..., shape);
LinearTemplateSearch<0u,MAX_TENSOR_DIMENSION,TRIOT::ForEachFixedDimension>::apply(shape.size(), shape, function, args...);
}
template <typename FUNCTION, typename ...TENSORS>
void enumerate_modify_tensors(FUNCTION function, const Vector<unsigned long> & shape, TENSORS && ...args) {
check_tensor_pack_bounds<TENSORS...>(args..., shape);
LinearTemplateSearch<0u,MAX_TENSOR_DIMENSION,TRIOT::ForEachVisibleCounterFixedDimension>::apply(shape.size(), shape, function, args...);
}
// Allow modifications only to dest:
template <typename FUNCTION, typename DEST_TENSOR, typename ...SOURCE_TENSORS>
void apply_tensors(FUNCTION function, const Vector<unsigned long> & shape, DEST_TENSOR && dest, const SOURCE_TENSORS & ...source_args) {
check_tensor_pack_bounds<DEST_TENSOR, SOURCE_TENSORS...>(dest, source_args..., shape);
LinearTemplateSearch<0u,MAX_TENSOR_DIMENSION,TRIOT::ForEachFixedDimension>::apply(shape.size(), shape, function, dest, source_args...);
}
template <typename FUNCTION, typename DEST_TENSOR, typename ...SOURCE_TENSORS>
void enumerate_apply_tensors(FUNCTION function, const Vector<unsigned long> & shape, DEST_TENSOR && dest, const SOURCE_TENSORS & ...source_args) {
check_tensor_pack_bounds<DEST_TENSOR, SOURCE_TENSORS...>(dest, source_args..., shape);
LinearTemplateSearch<0u,MAX_TENSOR_DIMENSION,TRIOT::ForEachVisibleCounterFixedDimension>::apply(shape.size(), shape, function, dest, source_args...);
}
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Tensor/MatrixTranspose.hpp | .hpp | 4,391 | 115 | #ifndef _MATRIXTRANSPOSE_HPP
#define _MATRIXTRANSPOSE_HPP
#include <algorithm>
// Note: This code is a good candidate to perform on a GPU.
// Implements cache-oblivious strategy for transposition. In cases
// where recursion can simply be unrolled into a loop (e.g., tall,
// thin matrix or short, wide matrix) it is unrolled for greater
// performance.
template <typename T>
class MatrixTranspose {
private:
// Base case for recursion; should fit in L1 cache. In practice, it
// just needs to be large enough to amortize out the cost of the
// recursions.
static constexpr unsigned int BLOCK_SIZE = 128 / sizeof(T);;
// Square (in place):
static void square_helper(T* __restrict const mat, const unsigned long N, const unsigned long r_start, const unsigned long r_end, const unsigned long c_start, const unsigned long c_end) {
unsigned long r_span = r_end-r_start;
unsigned long c_span = c_end-c_start;
if ( c_span <= BLOCK_SIZE ) {
// Tall, narrow block: proceed row-by-row:
for (unsigned long r=r_start; r<r_end; ++r)
// Force c > r to not swap multiple times:
for (unsigned long c=std::max(c_start, r+1); c<c_end; ++c)
std::swap(mat[c*N + r], mat[r*N + c]);
}
else if ( r_span <= BLOCK_SIZE ) {
// Short, fat block: proceeding column-by-column will be optimal:
for (unsigned long c=c_start; c<c_end; ++c)
// Force c > r to not swap multiple times:
for (unsigned long r=r_start; r<std::min(r_end, c); ++r)
std::swap(mat[c*N + r], mat[r*N + c]);
}
else {
if (r_span > c_span) {
// if there are any cells c>r for the first subproblem:
if (c_end > r_start)
square_helper(mat, N, r_start,r_start+r_span/2, c_start,c_end);
// if there are any cells c>r for the second subproblem:
if (c_end > r_start+r_span/2)
square_helper(mat, N, r_start+r_span/2,r_end, c_start,c_end);
}
else {
// if there are any cells c>r for the first subproblem:
if (c_start+c_span/2 > r_start)
square_helper(mat, N, r_start,r_end, c_start,c_start+c_span/2);
// if there are any cells c>r for the second subproblem:
if (c_end > r_start)
square_helper(mat, N, r_start,r_end, c_start+c_span/2,c_end);
}
}
}
// Buffered (out of place):
static void buffered_helper(T* __restrict const dest, T* __restrict const source, const unsigned long R, const unsigned long C, const unsigned long r_start, const unsigned long r_end, const unsigned long c_start, const unsigned long c_end) {
unsigned long r_span = r_end-r_start;
unsigned long c_span = c_end-c_start;
if ( c_span <= BLOCK_SIZE ) {
// Tall, narrow block: proceed row-by-row:
for (unsigned long r=r_start; r<r_end; ++r)
for (unsigned long c=c_start; c<c_end; ++c)
// dest[c,r] = source[r,c];
dest[c*R + r] = source[r*C + c];
}
else if ( r_span <= BLOCK_SIZE ) {
// Short, fat block: proceeding column-by-column will be optimal:
for (unsigned long c=c_start; c<c_end; ++c)
for (unsigned long r=r_start; r<r_end; ++r)
// dest[c,r] = source[r,c];
dest[c*R + r] = source[r*C + c];
}
else {
if (r_span > c_span) {
buffered_helper(dest, source, R, C, r_start,r_start+r_span/2, c_start,c_end);
buffered_helper(dest, source, R, C, r_start+r_span/2,r_end, c_start,c_end);
}
else {
buffered_helper(dest, source, R, C, r_start,r_end, c_start,c_start+c_span/2);
buffered_helper(dest, source, R, C, r_start,r_end, c_start+c_span/2,c_end);
}
}
}
public:
inline static void apply_square(T* __restrict const mat, const unsigned long N) {
square_helper(mat, N, 0, N, 0, N);
}
static void apply_square_naive(T* __restrict const mat, const unsigned long N) {
for (unsigned long r=0; r<N; ++r)
for (unsigned long c=r+1; c<N; ++c)
std::swap(mat[r*N+c], mat[c*N+r]);
}
inline static void apply_buffered(T* __restrict const dest, T* __restrict const source, const unsigned long R, const unsigned long C) {
buffered_helper(dest, source, R, C, 0, R, 0, C);
}
static void apply_buffered_naive(T* __restrict const dest, const T* __restrict const source, const unsigned long R, const unsigned long C) {
for (unsigned long r=0; r<R; ++r)
for (unsigned long c=0; c<C; ++c)
dest[c*R+r] = source[r*C+c];
}
};
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Tensor/ArrayShape.hpp | .hpp | 624 | 23 | #ifndef _ARRAYSHAPE_HPP
#define _ARRAYSHAPE_HPP
// Used to expand the shape of an array into a variadic template pack
// at compile time. This particular version puts the shape into a
// Vector.
template <typename T, typename ARR, unsigned long ...SHAPE>
struct ArrayShape {
static Vector<unsigned long> eval(ARR arg) {
return ArrayShape<T,decltype(arg[0]), SHAPE..., sizeof(arg) / sizeof(arg[0])>::eval(arg[0]);
}
};
template <typename T, unsigned long ...SHAPE>
struct ArrayShape<T,T, SHAPE...> {
static Vector<unsigned long> eval(T /*element*/) {
return Vector<unsigned long>({SHAPE...});
}
};
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Tensor/VectorTRIOT.hpp | .hpp | 1,643 | 50 | #ifndef _VECTORTRIOT_HPP
#define _VECTORTRIOT_HPP
template <typename ...VECTORS>
#ifndef SHAPE_CHECK
void check_vector_pack_lengths(const VECTORS&... /*args*/, unsigned long /*length*/) {}
#else
void check_vector_pack_lengths(const VECTORS&... args, unsigned long length)
{
unsigned long sizes[] = { args.size()... };
for (unsigned long s : sizes)
assert(s >= length);
}
#endif
// Note: Vectorizing functions also work on VectorView types; a
// common base class could be used, but that would require virtual
// functions, which would considerably slow the methods. So for now,
// this is performed with duck typing:
// Allows no modifications:
template <typename FUNCTION, typename ...VECTORS>
void for_each_vectors(FUNCTION function, unsigned long length, const VECTORS & ...args) {
check_vector_pack_lengths<VECTORS...>(args..., length);
for(unsigned long k=0; k<length; ++k) {
function(args[k]...);
}
}
// Allows modifications to all arguments:
template <typename FUNCTION, typename ...VECTORS>
void modify_vectors(FUNCTION function, unsigned long length, VECTORS & ...args) {
check_vector_pack_lengths<VECTORS...>(args..., length);
for(unsigned long k=0; k<length; ++k) {
function(args[k]...);
}
}
// Allows modifications only to dest:
template <typename FUNCTION, typename DEST_VECTOR, typename ...SOURCE_VECTORS>
void apply_vectors(FUNCTION function, unsigned long length, DEST_VECTOR & dest, const SOURCE_VECTORS & ...args) {
check_vector_pack_lengths<DEST_VECTOR, SOURCE_VECTORS...>(dest, args..., length);
for(unsigned long k=0; k<length; ++k) {
function(dest[k], args[k]...);
}
}
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Tensor/TensorUtils.hpp | .hpp | 3,469 | 116 | #ifndef _TENSOR_UTILS_HPP
#define _TENSOR_UTILS_HPP
#include <vector>
#include "product.hpp"
#include "sum.hpp"
#include "Vector.hpp"
#ifndef MAX_TENSOR_DIMENSION
#define MAX_TENSOR_DIMENSION 12
#endif
// Tuple types:
typedef unsigned long* __restrict const tup_t;
typedef const unsigned long* __restrict const const_tup_t;
inline void print_tuple(const_tup_t tup, unsigned char dim) {
for (unsigned char i=0; i<dim; ++i)
std::cout << tup[i] << " ";
std::cout << std::endl;
}
inline unsigned long flat_length(const_tup_t shape, unsigned char dimension) {
if (dimension > 0)
return product(shape, dimension);
return 0;
}
inline unsigned long flat_length(const Vector<unsigned long> & shape) {
return flat_length(static_cast<const unsigned long*const>(shape), shape.size());
}
inline void advance_tuple(unsigned long* __restrict tup, const_tup_t shape, unsigned char dimension) {
++tup[dimension-1];
for (unsigned char k=dimension-1; k>=1; --k)
if ( tup[k] >= shape[k] ) {
++tup[k-1];
tup[k] = 0;
}
else
// No more carry operations:
return;
}
inline unsigned long tuple_to_index(const_tup_t tup, const_tup_t shape, unsigned char dimension) {
unsigned long res = 0;
unsigned char k;
for (k=1; k<dimension; ++k) {
res += tup[k-1];
res *= shape[k];
}
res += tup[k-1];
return res;
}
template <unsigned int DIMENSION>
inline unsigned long tuple_to_index_fixed_dimension(const_tup_t tup, const_tup_t shape) {
unsigned long res = 0;
unsigned int k;
for (k=0; k<DIMENSION-1; ++k) {
res += tup[k];
res *= shape[k+1]; }
res += tup[k];
return res;
}
// Note: This is not very efficient, but is useful for debugging.
inline unsigned long* index_to_tuple(unsigned long index, const unsigned long* __restrict const shape, unsigned int dimension) {
unsigned long* __restrict result = aligned_calloc<unsigned long>(dimension);
for (int i=dimension-1; index>0 && i>=0; --i) {
unsigned long next_axis = shape[i];
// Note: There may be a speedup lurking in here where shared work
// between index / next_axis and index % next_axis can be reused;
// however, this code will only be used for bounds checking, so
// speed is not very important.
unsigned long next_value = index % next_axis;
result[i] = next_value;
index /= next_axis;
}
return result;
}
// No assertions when there are no duplicate indices and all are in
// range. Could also be implemented a set in O(n log(n)), but this
// version is in O(n).
inline void verify_subpermutation(const Vector<unsigned char> & permutation, unsigned char dim) {
std::vector<bool> indices(dim, false);
for (unsigned char i=0; i<permutation.size(); ++i) {
// All values must be in 0, 1, ... n-1, where n is the number of
// dimensions allowed:
assert(permutation[i] < dim);
indices[ permutation[i] ] = true;
}
unsigned char cardinality = 0;
for (unsigned char i=0; i<permutation.size(); ++i)
cardinality += indices[ permutation[i] ];
// All indices must be included exactly once (therefore, there must
// be no duplicates). Given all indices must also be in range (by
// the assertion above), this means it is a valid subpermutation.
assert(cardinality == permutation.size());
}
inline void verify_permutation(const Vector<unsigned char> & permutation) {
verify_subpermutation(permutation, permutation.size());
}
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Tensor/VectorView.hpp | .hpp | 3,346 | 131 | #ifndef _VECTORVIEW_HPP
#define _VECTORVIEW_HPP
template <typename T>
class Vector;
template <typename T>
class VectorView : public VectorLike<T, VectorView> {
protected:
const Vector<T> & _vec_ref;
const unsigned long _start;
const unsigned long _length;
public:
explicit VectorView(const Vector<T> & vec, unsigned long start):
_vec_ref(vec),
_start(start),
_length(vec.size() - _start)
{
#ifdef SHAPE_CHECK
// Allows views of size 0:
assert( start <= vec.size() );
#endif
}
explicit VectorView(const Vector<T> & vec, unsigned long start, unsigned long length):
_vec_ref(vec),
_start(start),
_length(length)
{
#ifdef SHAPE_CHECK
// Allows views of size 0:
assert( _start + _length <= vec.size() );
#endif
}
const T & operator [] (unsigned long i) const {
#ifdef BOUNDS_CHECK
assert(i < size());
#endif
return _vec_ref[_start + i];
}
operator const T*const() const {
return (const T*const)(_vec_ref) + _start;
}
unsigned long size() const {
return _length;
}
VectorView<T> start_at_const(unsigned long start) const {
return VectorView(_vec_ref, start+_start);
}
VectorView<T> start_at_const(unsigned long start, unsigned long length) const {
return VectorView(_vec_ref, start+_start, length);
}
};
template <typename T>
class WritableVectorView : public WritableVectorLike<T, WritableVectorView> {
protected:
Vector<T> & _vec_ref;
const unsigned long _start;
const unsigned long _length;
public:
explicit WritableVectorView(Vector<T> & vec, unsigned long start):
_vec_ref(vec),
_start(start),
_length(vec.size() - _start)
{
#ifdef SHAPE_CHECK
// Allows views of size 0:
assert( start <= vec.size() );
#endif
}
explicit WritableVectorView(Vector<T> & vec, unsigned long start, unsigned long length):
_vec_ref(vec),
_start(start),
_length(length)
{
#ifdef SHAPE_CHECK
// Allows views of size 0:
assert( _start + _length <= vec.size() );
#endif
}
template <typename S, template <typename> class VECTOR>
const WritableVectorView & operator =(const VectorLike<S, VECTOR> & rhs) {
copy(*this, rhs);
return *this;
}
const T & operator [] (unsigned long i) const {
#ifdef BOUNDS_CHECK
assert(i < size());
#endif
return _vec_ref[_start + i];
}
T & operator [] (unsigned long i) {
#ifdef BOUNDS_CHECK
assert(i < size());
#endif
return _vec_ref[_start + i];
}
operator const T*const() const {
return (const T*const)(_vec_ref) + _start;
}
operator T*const() const {
return (T*const)(_vec_ref) + _start;
}
unsigned long size() const {
return _length;
}
void fill(T value) {
for (unsigned long k=0; k<_length; ++k)
(*this)[k] = value;
}
WritableVectorView start_at(unsigned long start) {
return WritableVectorView(_vec_ref, start+_start);
}
WritableVectorView start_at(unsigned long start, unsigned long length) {
return WritableVectorView(_vec_ref, start+_start, length);
}
VectorView<T> start_at_const(unsigned long start) const {
return VectorView<T>(_vec_ref, start+_start);
}
VectorView<T> start_at_const(unsigned long start, unsigned long length) const {
return VectorView<T>(_vec_ref, start+_start, length);
}
};
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Tensor/sum.hpp | .hpp | 381 | 19 | #ifndef _SUM_HPP
#define _SUM_HPP
template <typename T>
inline T sum(const T* __restrict const v, unsigned long length) {
T res = 0;
for (unsigned long k=0; k<length; ++k)
res += v[k];
return res;
}
template <typename T, template <typename> class VECTOR>
inline T sum(const VectorLike<T, VECTOR> & v) {
return sum(static_cast<const T*const>(v), v.size());
}
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/demos/1-convolution/numpy_benchmark.py | .py | 397 | 23 | import numpy as np
from scipy.signal import fftconvolve
from time import time
import sys
def main(argv):
if len(argv) != 1:
print 'Usage: numpy_benchmark.py <LOG_N>'
return;
log_n = int(argv[0])
n = 2**log_n
x=np.arange(n)*(1+1j)
y=np.arange(n)*(-1-1j)
t1=time()
z = fftconvolve(x,y)
t2=time()
if __name__ == '__main__':
main(sys.argv[1:])
| Python |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/demos/1-convolution/fft_benchmark.cpp | .cpp | 660 | 31 | #include <iostream>
#include <cstring>
#include "../../Utility/Clock.hpp"
#include "../../Convolution/p_convolve.hpp"
int main(int argc, char**argv) {
if (argc != 2) {
std::cerr << "Usage: fft_conv_benchmark <LOG_N>" << std::endl;
return 1;
}
int log_n = atoi(argv[1]);
unsigned long n = 1ul<<log_n;
// Actual inputs:
Tensor<cpx> x({n});
for (unsigned long i=0; i<n; ++i)
x[i] = cpx{double(i),double(i)};
Tensor<cpx> y({n});
for (unsigned long i=0; i<n; ++i)
y[i] = cpx{-double(i),-double(i)};
std::cout << n << " ";
Clock c;
Tensor<cpx> z = fft_convolve(x, y);
std::cout << c.tock() << std::endl;
return 0;
}
| C++ |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/demos/1-convolution/naive_benchmark.cpp | .cpp | 658 | 31 | #include <iostream>
#include <cstring>
#include "../../Utility/Clock.hpp"
#include "../../Convolution/p_convolve.hpp"
int main(int argc, char**argv) {
if (argc != 2) {
std::cerr << "Usage: conv_benchmark <LOG_N>" << std::endl;
return 1;
}
int log_n = atoi(argv[1]);
unsigned long n = 1ul<<log_n;
// Actual inputs:
Tensor<cpx> x({n});
for (unsigned long i=0; i<n; ++i)
x[i] = cpx{double(i),double(i)};
Tensor<cpx> y({n});
for (unsigned long i=0; i<n; ++i)
y[i] = cpx{-double(i),-double(i)};
std::cout << n << " ";
Clock c;
Tensor<cpx> z = naive_convolve(x, y);
std::cout << c.tock() << std::endl;
return 0;
}
| C++ |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/demos/1-convolution/fftw_benchmark.cpp | .cpp | 2,701 | 120 | #include <iostream>
#include <cstring>
#include "fftw3.h"
#include "../../Utility/Clock.hpp"
fftw_complex* convolve(fftw_complex*x, fftw_complex*y, int n) {
// Buffers:
fftw_complex *input = (fftw_complex*)fftw_malloc(2*n*sizeof(fftw_complex));
fftw_complex *output = (fftw_complex*)fftw_malloc(2*n*sizeof(fftw_complex));
fftw_complex *temp = (fftw_complex*)fftw_malloc(2*n*sizeof(fftw_complex));
const int shape[] = {2*n};
auto plan = fftw_plan_dft(sizeof(shape)/sizeof(int), &shape[0], input, output, FFTW_FORWARD, FFTW_ESTIMATE);
// Zero pad x:
for (int i=0; i<n; ++i) {
input[i][0] = x[i][0];
input[i][1] = x[i][1];
}
for (int i=n; i<2*n; ++i) {
input[i][0] = 0;
input[i][1] = 0;
}
// FFT zero padded x:
fftw_execute(plan);
// Copy FFT of zero padded x to temp:
for (int i=0; i<2*n; ++i) {
temp[i][0] = output[i][0];
temp[i][1] = output[i][1];
}
// Zero pad y:
for (int i=0; i<n; ++i) {
input[i][0] = y[i][0];
input[i][1] = y[i][1];
}
for (int i=n; i<2*n; ++i) {
input[i][0] = 0;
input[i][1] = 0;
}
// FFT zero padded y:
fftw_execute(plan);
// Multiply FFT results:
for (int i=0; i<2*n; ++i) {
const double r1 = output[i][0];
const double i1 = output[i][1];
const double r2 = temp[i][0];
const double i2 = temp[i][1];
input[i][0] = r1*r2 - i1*i2;
// Conjugate inline:
input[i][1] = -(i1*r2 + r1*i2);
}
// input contains conjugated FFT of result
// Conjugate input and output to reuse plan (input conjugation is
// already performed above):
fftw_execute(plan);
fftw_destroy_plan(plan);
// Conjugate output and divide by 2*n:
fftw_complex *z = new fftw_complex[2*n-1];
double one_over_two_n = 1.0 / (2*n);
for (int i=0; i<2*n-1; ++i) {
// Multiplication is faster than division:
z[i][0] = output[i][0] * one_over_two_n;
// Conjugation inline:
z[i][1] = output[i][1] * -one_over_two_n;
}
fftw_free(input);
fftw_free(output);
fftw_free(temp);
return z;
}
int main(int argc, char**argv) {
if (argc != 2) {
std::cerr << "Usage: fftw_conv_benchmark <LOG_N>" << std::endl;
return 1;
}
int log_n = atoi(argv[1]);
int n = 1<<log_n;
// Actual inputs:
fftw_complex*x = new fftw_complex[n];
fftw_complex*y = new fftw_complex[n];
// Initialize input data:
for (int i=0; i<n; ++i) {
x[i][0] = i;
x[i][1] = i;
}
// Initialize input data:
for (int i=0; i<n; ++i) {
y[i][0] = -i;
y[i][1] = -i;
}
std::cout << n << " ";
Clock c;
fftw_complex*z = convolve(x, y, n);
std::cout << c.tock() << std::endl;
delete[] x;
delete[] y;
delete[] z;
return 0;
}
| C++ |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/demos/fft/numpy_benchmark.py | .py | 347 | 23 | import numpy as np
from time import time
import sys
def main(argv):
if len(argv) != 1:
print 'Usage: numpy_benchmark.py <LOG_N>'
return;
logN = int(argv[0])
N = 2**logN
x=np.arange(N)*(1+1j)
t1=time()
y=np.fft.fftn(x)
t2=time()
print N, t2-t1
if __name__ == '__main__':
main(sys.argv[1:])
| Python |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/demos/fft/fftw_estimate_benchmark.cpp | .cpp | 1,032 | 41 | #include <iostream>
#include <cstring>
#include "fftw3.h"
#include "../../Utility/Clock.hpp"
int main(int argc, char**argv) {
if (argc != 2) {
std::cerr << "Usage: fftw_benchmark <LOG_N>" << std::endl;
return 1;
}
int log_n = atoi(argv[1]);
int n = 1<<log_n;
// To avoid allocating buffers, use existing memory (which in C++,
// will usually be allocated with new); the alternative would be to
// use fftw_malloc and then copy memory.
fftw_complex*x = new fftw_complex[n];
fftw_complex*y = new fftw_complex[n];
// Initialize input data:
for (int i=0; i<n; ++i) {
x[i][0] = i;
x[i][1] = i;
}
std::cout << n << " ";
// Cold start with FFTW_ESTIMATE (good use-case for FFTs of unknown
// size, no buffers needed):
Clock c;
const int shape[] = {n};
auto plan = fftw_plan_dft(sizeof(shape)/sizeof(int), &shape[0], x, y, FFTW_FORWARD, FFTW_ESTIMATE);
fftw_execute(plan);
fftw_destroy_plan(plan);
std::cout << c.tock() << std::endl;
delete[] x;
delete[] y;
return 0;
}
| C++ |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/demos/fft/fft_benchmark.cpp | .cpp | 713 | 29 | #include <iostream>
#include "../../FFT/FFT.hpp"
#include "../../Utility/Clock.hpp"
Clock c;
int main(int argc, char**argv) {
if (argc != 2) {
std::cerr << "Usage: fft_benchmark <LOG_N>" << std::endl;
return 1;
}
int log_n = atoi(argv[1]);
unsigned long n = 1ul<<log_n;
Tensor<cpx> x({n});
for (unsigned long i=0; i<n; ++i)
x[i] = cpx{double(i), double(i)};
std::cout << n << " ";
Clock c;
// In-place FFT:
// true, true arguments say to apply shuffling and to undo
// transpositions. If the application was complex convolution, both
// of these could be false to get the convolution result faster.
apply_fft<DIF, true, true>(x);
std::cout << c.tock() << std::endl;
}
| C++ |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/demos/fft/fftw_measure_benchmark.cpp | .cpp | 1,374 | 60 | #include <iostream>
#include <cstring>
#include "fftw3.h"
#include "../../Utility/Clock.hpp"
int main(int argc, char**argv) {
if (argc != 2) {
std::cerr << "Usage: fftw_benchmark <LOG_N>" << std::endl;
return 1;
}
int log_n = atoi(argv[1]);
int n = 1<<log_n;
// Actual inputs:
fftw_complex*x = new fftw_complex[n];
fftw_complex*y = new fftw_complex[n];
// Initialize input data:
for (int i=0; i<n; ++i) {
x[i][0] = i;
x[i][1] = i;
}
// Buffers (must be hard-coded in place to reuse plan)
fftw_complex*in = (fftw_complex*)fftw_malloc(n*sizeof(fftw_complex));
fftw_complex*out = (fftw_complex*)fftw_malloc(n*sizeof(fftw_complex));
std::cout << n << " ";
// Cold start:
Clock c;
const int shape[] = {n};
memcpy(in, x, n*sizeof(fftw_complex));
auto plan = fftw_plan_dft(sizeof(shape)/sizeof(int), &shape[0], in, out, FFTW_FORWARD, FFTW_MEASURE);
fftw_execute(plan);
memcpy(y, out, n*sizeof(fftw_complex));
std::cout << c.tock() << " ";
// Re-initialize the data:
for (int i=0; i<n; ++i) {
x[i][0] = i;
x[i][1] = -i;
}
// Warm start:
c.tick();
memcpy(in, x, n*sizeof(fftw_complex));
fftw_execute(plan);
memcpy(y, out, n*sizeof(fftw_complex));
std::cout << c.tock() << std::endl;
fftw_destroy_plan(plan);
fftw_free(in);
fftw_free(out);
delete[] x;
delete[] y;
return 0;
}
| C++ |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/demos/binary-additive/main.cpp | .cpp | 1,340 | 40 | #include "../../Evergreen/evergreen.hpp"
#include "../../Utility/inference_utilities.hpp"
int main(int argc, char**argv) {
if (argc != 2) {
std::cerr << "Usage: binary_tree <LOG_N>" << std::endl;
exit(1);
}
int log_n = atoi(argv[1]);
const double p=std::numeric_limits<double>::infinity();
BetheInferenceGraphBuilder<unsigned long> igb;
const unsigned long n=1ul<<log_n;
std::cout << "Creating dependencies..." << std::endl;
for (unsigned long i=0; i<=n; ++i) {
double prob0 = rand() % 1000 / 999.0;
double prob[] = {prob0+0.01, 1-prob0+0.01};
// LabeledPMF<unsigned long> lpmf({i},PMF({0L},Tensor<double>::from_array(prob)));
LabeledPMF<unsigned long> lpmf({i}, PMF({0L},Tensor<double>::from_array(prob)));
igb.insert_dependency( TableDependency<unsigned long>(lpmf,p) );
}
std::vector<std::vector<unsigned long> > inputs;
for (unsigned long i=0; i<n; ++i)
inputs.push_back({i});
igb.insert_dependency( AdditiveDependency<unsigned long>(inputs,{n},p) );
std::cout << "Constructing graph..." << std::endl;
InferenceGraph<unsigned long> ig = igb.to_graph();
FIFOScheduler<unsigned long> fifo(0.001, 1e-16, 1ul<<32);
fifo.add_ab_initio_edges(ig);
BeliefPropagationInferenceEngine<unsigned long> bpie(fifo, ig);
estimate_and_print_posteriors(bpie, {{0}, {n}});
}
| C++ |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/demos/peptide-decomposition/PeptideSolver.hpp | .hpp | 4,806 | 116 | #ifndef _PEPTIESOLVER_HPP
#define _PEPTIESOLVER_HPP
#include "../../Evergreen/evergreen.hpp"
#include "../../Utility/inference_utilities.hpp"
#include "../../Utility/Clock.hpp"
#include "Peptide.hpp"
#include "../../Utility/graph_to_dot.hpp"
class PeptideSolver {
private:
Scheduler<std::string> & _sched;
InferenceGraph<std::string> *_ig_ptr;
static constexpr double DITHERING_SIGMA = 0.1;
// When goal mass or goal hydrophobicity is non-integral, distribute
// mass equally over both adjacent bins.
static constexpr double DITHERING_SIGMA_GOALS = 10000.0;
public:
// Note: This could easily use total mass / amino acid mass (+ some
// small amount for stability) to make a custom maximum number of
// copies for each amino acid. Same could be done with hydrophobicity,
// and the minimum of both maxes could be used:
PeptideSolver(double mass_goal, double hydrophobicity_goal, const double & p, const unsigned int max_num_copies, const double mass_discretization, const double hydrophobicity_discretization, Scheduler<std::string> & sched):
_sched(sched)
{
///////////////////////////
///// Construct Graph /////
///////////////////////////
BetheInferenceGraphBuilder<std::string> igb;
std::vector<std::string> amino_acid_strings(Peptide::amino_acids.size());
for (unsigned int i=0; i<Peptide::amino_acids.size(); ++i)
amino_acid_strings[i] += Peptide::amino_acids[i];
// Vectors used later on for graph construction.
std::vector<std::vector<std::string> > aa_mass_singletons;
std::vector<std::vector<std::string> > aa_hydrophobicity_singletons;
//// Add Table Dependencies ////
// Make uniform distribution for each amino acid count
for (const std::string & aa : amino_acid_strings) {
// Note: max_num_copies could be inferred for each amino acid by
// dividing goal mass (or hydrophobicity) by the mass of each
// amino acid; but to start with, make it simple:
aa_mass_singletons.push_back({"mass " + aa});
aa_hydrophobicity_singletons.push_back({"hydrophobicity " + aa});
igb.insert_dependency( TableDependency<std::string>(make_nonneg_uniform(aa, max_num_copies), p) );
}
//// Add Constant Multiplication Dependencies ////
// For each amino acid, make constant mult. dep. for both mass and hydrophobicity.
for (unsigned long i=0; i<amino_acid_strings.size(); ++i) {
igb.insert_dependency( ConstantMultiplierDependency<std::string>({amino_acid_strings[i]}, {aa_mass_singletons[i]}, {Peptide::masses[i]*mass_discretization}, false, true, DITHERING_SIGMA) );
igb.insert_dependency( ConstantMultiplierDependency<std::string>({amino_acid_strings[i]}, {aa_hydrophobicity_singletons[i]}, {Peptide::hydrophobicities[i]*hydrophobicity_discretization}, false, true, DITHERING_SIGMA) );
}
// Make additive dep. for total mass.
LabeledPMF<std::string> total_mass = LabeledPMF<std::string>( {"total_mass"}, scaled_pmf_dither(PMF({1L},Tensor<double>({1ul},{1.0})), {mass_goal*mass_discretization}, DITHERING_SIGMA_GOALS) );
igb.insert_dependency( TableDependency<std::string>(total_mass, p) );
igb.insert_dependency( AdditiveDependency<std::string>(aa_mass_singletons, {"total_mass"}, p) );
// Make additive dep. for total hydrophobicity.
LabeledPMF<std::string> total_hydrophobicity = LabeledPMF<std::string>( {"total_hydrophobicity"}, scaled_pmf_dither(PMF({1L},Tensor<double>({1ul},{1.0})), {hydrophobicity_goal*hydrophobicity_discretization}, DITHERING_SIGMA_GOALS) );
igb.insert_dependency( TableDependency<std::string>(total_hydrophobicity, p) );
igb.insert_dependency( AdditiveDependency<std::string>(aa_hydrophobicity_singletons, {"total_hydrophobicity"}, p) );
// create inference graph
_ig_ptr = new InferenceGraph<std::string>(igb.to_graph());
write_graph_to_dot_file(*_ig_ptr, "peptide_graph.dot");
}
~PeptideSolver() {
delete _ig_ptr;
}
void solve_and_print() {
///////////////////////
///// Solve Graph /////
///////////////////////
//ig.print(std::cout);
std::cout << "solving..." << std::endl;
// apply message scheduler to inference graph
_sched.add_ab_initio_edges(*_ig_ptr);
// apply belief propagation to inference graph
BeliefPropagationInferenceEngine<std::string> bpie(_sched, *_ig_ptr);
std::vector<std::vector<std::string> > aa_singletons;
for (char aa : Peptide::amino_acids)
aa_singletons.push_back({std::string("")+aa});
Clock c;
c.tick();
auto result = bpie.estimate_posteriors(aa_singletons);
std::cout << "Time " << c.tock() << " in seconds" << std::endl;
for (auto res : result)
std::cout << res << std::endl;
}
};
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/demos/peptide-decomposition/Peptide.hpp | .hpp | 3,175 | 105 | #ifndef _Peptide_HPP
#define _Peptide_HPP
#include <string>
#include <set>
#include <iostream>
#include <vector>
#include <map>
#include <assert.h>
class Peptide {
protected:
std::string _amino_acids;
double _mass;
double _hydrophobicity;
void verify_valid_characters() {
std::set<char> amino_set(_amino_acids.begin(), _amino_acids.end());
for (char c : _amino_acids) {
if (amino_set.find(c) == amino_set.end()) {
std::cerr << "Invalid character: " << c << std::endl;
assert(false);
}
}
}
// Calculate the mass of the peptide.
void init_mass() {
std::map<char, double> amino_acid_to_mass;
for (unsigned long i=0; i<amino_acids.size(); ++i)
amino_acid_to_mass[amino_acids[i]] = masses[i];
_mass = 0.0;
for (char aa : _amino_acids) {
assert(amino_acid_to_mass.find(aa) != amino_acid_to_mass.end() && "Error: Amino acid not found.");
_mass += amino_acid_to_mass[aa];
}
}
// Calculate the hydrophobicity of the peptide.
void init_hydrophobicity() {
std::map<char, double> amino_acid_to_hydrophobicity;
for (unsigned long i=0; i<amino_acids.size(); ++i)
amino_acid_to_hydrophobicity[amino_acids[i]] = hydrophobicities[i];
_hydrophobicity = 0.0;
for (char aa : _amino_acids) {
assert(amino_acid_to_hydrophobicity.find(aa) != amino_acid_to_hydrophobicity.end() && "Error: Amino acid not found.");
_hydrophobicity += amino_acid_to_hydrophobicity[aa];
}
}
public:
static const std::vector<char> amino_acids;
static const std::vector<double> masses;
static const std::vector<double> hydrophobicities;
Peptide(const std::string & seq):
_amino_acids(seq)
{
verify_valid_characters();
init_mass();
init_hydrophobicity();
}
unsigned long size() const {
return _amino_acids.size();
}
char operator [] (unsigned long i) const {
return _amino_acids[i];
}
const double & mass() const{
return _mass;
}
const double & hydrophobicity() const{
return _hydrophobicity;
}
};
// {A:Ala, R:Arg, N:Asn, D:Asp, C:Cys, E:Glu, Q:Gln, G:Gly, H:His, I:Ile, L:Leu, K:Lys,
// M:Met, F:Phe, P:Pro, S:Ser, T:Thr ,W:Trp , Y:Tyr, V:Val}
const std::vector<char> Peptide::amino_acids = {'A','R','N','D','C','E','Q','G','H','I','L','K','M','F','P','S','T','W','Y','V'};
// http://www.matrixscience.com/help/aa_help.html (average mass)
const std::vector<double> Peptide::masses = {71.0779, 156.1857, 114.1026, 115.0874, 103.1429, 129.114, 128.1292, 57.0513, 137.1393, 113.1576, 113.1576, 128.1723, 131.1961, 147.1739, 97.1152, 87.0773, 101.1039, 186.2099, 163.1733, 99.1311};
// wwHydrophobicity from
// https://www.cgl.ucsf.edu/chimera/docs/UsersGuide/midas/hydrophob.html
const std::vector<double> Peptide::hydrophobicities = {-0.17, -0.81, -0.42, -1.23, 0.24, -2.02, -0.58, -0.01, -0.96, 0.31, 0.56, -0.99, 0.23, 1.13, -0.45, -0.13, -0.14, 1.85, 0.94, -0.07};
std::ostream & operator<<(std::ostream & os, const Peptide & rhs) {
for (unsigned long i=0; i<rhs.size(); ++i)
os << rhs[i];
os << ": mass=" << rhs.mass() << " hydrophobicity=" << rhs.hydrophobicity();
return os;
}
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/demos/peptide-decomposition/estimate_amino_acids_hydro.cpp | .cpp | 646 | 22 | #include "HydrophobicityPeptideSolver.hpp"
int main(int argc, char**argv) {
if (argc != 5) {
std::cout << "Usage: hydro_pep_solver <observed hydrophobicity> <hydrophobicity discretization> <maximum peptide length> <p>" << std::endl;
exit(1);
}
double hydrophobicity = atof(argv[1]);
double hydrophobicity_discretization = atof(argv[2]);
unsigned long max_length = atoi(argv[3]);
double p = atof(argv[4]);
FIFOScheduler<std::string> sched(0.01, 1e-8, 10000);
HydrophobicityPeptideSolver pep_solver(hydrophobicity, p, max_length, hydrophobicity_discretization, sched);
pep_solver.solve_and_print();
return 0;
}
| C++ |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/demos/peptide-decomposition/estimate_amino_acids.cpp | .cpp | 755 | 25 | #include "PeptideSolver.hpp"
int main(int argc, char**argv) {
if (argc != 7) {
std::cout << "Usage: pep_solver <observed mass> <observed hydrophobicity> <mass discretization> <hydrophobicity discretization> <maximum peptide length> <p>" << std::endl;
exit(1);
}
double mass = atof(argv[1]);
double hydrophobicity = atof(argv[2]);
double mass_discretization = atof(argv[3]);
double hydrophobicity_discretization = atof(argv[4]);
unsigned long max_length = atoi(argv[5]);
double p = atof(argv[6]);
FIFOScheduler<std::string> sched(0.01, 1e-8, 10000);
PeptideSolver pep_solver(mass, hydrophobicity, p, max_length, mass_discretization, hydrophobicity_discretization, sched);
pep_solver.solve_and_print();
return 0;
}
| C++ |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/demos/peptide-decomposition/peptide_to_mass_and_hydrophibicity.cpp | .cpp | 290 | 15 | #include "Peptide.hpp"
int main(int argc, char**argv) {
if (argc != 2) {
std::cout << "Usage: <peptide sequence>" << std::endl;
exit(1);
}
std::string seq = argv[1];
Peptide pep(seq);
std::cout << pep.mass() << " " << pep.hydrophobicity() << std::endl;
return 0;
}
| C++ |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/demos/peptide-decomposition/estimate_amino_acids_mass.cpp | .cpp | 565 | 22 | #include "MassPeptideSolver.hpp"
int main(int argc, char**argv) {
if (argc != 5) {
std::cout << "Usage: mass_pep_solver <observed mass> <mass discretization> <maximum peptide length> <p>" << std::endl;
exit(1);
}
double mass = atof(argv[1]);
double mass_discretization = atof(argv[2]);
unsigned long max_length = atoi(argv[3]);
double p = atof(argv[4]);
FIFOScheduler<std::string> sched(0.01, 1e-8, 10000);
MassPeptideSolver pep_solver(mass, p, max_length, mass_discretization, sched);
pep_solver.solve_and_print();
return 0;
}
| C++ |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/demos/peptide-decomposition/MassPeptideSolver.hpp | .hpp | 3,289 | 97 | #ifndef _MASSPEPTIDESOLVER_HPP
#define _MASSPEPTIDESOLVER_HPP
#include "../../Evergreen/evergreen.hpp"
#include "../../Utility/inference_utilities.hpp"
#include "../../Utility/Clock.hpp"
#include "Peptide.hpp"
#include "../../Utility/graph_to_dot.hpp"
class MassPeptideSolver {
private:
Scheduler<std::string> & _sched;
InferenceGraph<std::string> *_ig_ptr;
static constexpr double DITHERING_SIGMA = 0.1;
// The value beyond which Gaussian tails are no longer considered:
static constexpr double GAUSSIAN_TAIL_EPSILON = 0.005;
public:
MassPeptideSolver(double mass_goal, const double & p, const unsigned int max_num_copies, const double mass_discretization, Scheduler<std::string> & sched):
_sched(sched)
{
///////////////////////////
///// Construct Graph /////
///////////////////////////
BetheInferenceGraphBuilder<std::string> igb;
std::vector<std::string> amino_acid_strings(Peptide::amino_acids.size());
for (unsigned int i=0; i<Peptide::amino_acids.size(); ++i)
amino_acid_strings[i] += Peptide::amino_acids[i];
// Vectors used later on for graph construction.
std::vector<std::vector<std::string> > aa_mass_singletons;
//// Add Table Dependencies ////
// Make uniform distribution for each amino acid count
for (const std::string & aa : amino_acid_strings) {
aa_mass_singletons.push_back({"mass_" + aa});
igb.insert_dependency( TableDependency<std::string>(make_nonneg_uniform(aa, max_num_copies), p) );
}
//// Add Constant Multiplication Dependencies ////
for (unsigned long i=0; i<amino_acid_strings.size(); ++i)
igb.insert_dependency( ConstantMultiplierDependency<std::string>({amino_acid_strings[i]}, {aa_mass_singletons[i]}, {Peptide::masses[i]*mass_discretization}, false, true, DITHERING_SIGMA) );
// Make additive dep. for total mass.
LabeledPMF<std::string> total_mass = LabeledPMF<std::string>( {"total_mass"}, scaled_pmf_dither(PMF({1L},Tensor<double>({1ul},{1.0})), {mass_goal*mass_discretization}, DITHERING_SIGMA) );
igb.insert_dependency( TableDependency<std::string>(total_mass, p) );
igb.insert_dependency( AdditiveDependency<std::string>(aa_mass_singletons, {"total_mass"}, p) );
// create inference graph
_ig_ptr = new InferenceGraph<std::string>(igb.to_graph());
write_graph_to_dot_file(*_ig_ptr, "mass_peptide_graph.dot");
}
~MassPeptideSolver() {
delete _ig_ptr;
}
void solve_and_print() {
///////////////////////
///// Solve Graph /////
///////////////////////
//ig.print(std::cout);
std::cout << "solving..." << std::endl;
// apply message scheduler to inference graph
_sched.add_ab_initio_edges(*_ig_ptr);
// apply belief propagation to inference graph
BeliefPropagationInferenceEngine<std::string> bpie(_sched, *_ig_ptr);
std::vector<std::vector<std::string> > aa_singletons;
for (char aa : Peptide::amino_acids)
aa_singletons.push_back({std::string("")+aa});
Clock c;
c.tick();
auto result = bpie.estimate_posteriors(aa_singletons);
std::cout << "Time " << c.tock() << " in seconds" << std::endl;
for (auto res : result)
std::cout << res << std::endl;
}
};
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/demos/peptide-decomposition/HydrophobicityPeptideSolver.hpp | .hpp | 3,472 | 96 | #ifndef _HYDROPHOBICITYPEPTIDESOLVER_HPP
#define _HYDROPHOBICITYPEPTIDESOLVER_HPP
#include "../../Evergreen/evergreen.hpp"
#include "../../Utility/inference_utilities.hpp"
#include "../../Utility/Clock.hpp"
#include "Peptide.hpp"
#include "../../Utility/graph_to_dot.hpp"
class HydrophobicityPeptideSolver {
private:
Scheduler<std::string> & _sched;
InferenceGraph<std::string> *_ig_ptr;
static constexpr double DITHERING_SIGMA = 0.1;
// The value beyond which Gaussian tails are no longer considered:
static constexpr double GAUSSIAN_TAIL_EPSILON = 0.005;
public:
HydrophobicityPeptideSolver(double hydrophobicity_goal, const double & p, const unsigned int max_num_copies, const double hydrophobicity_discretization, Scheduler<std::string> & sched):
_sched(sched)
{
///////////////////////////
///// Construct Graph /////
///////////////////////////
BetheInferenceGraphBuilder<std::string> igb;
std::vector<std::string> amino_acid_strings(Peptide::amino_acids.size());
for (unsigned int i=0; i<Peptide::amino_acids.size(); ++i)
amino_acid_strings[i] += Peptide::amino_acids[i];
// Vectors used later on for graph construction.
std::vector<std::vector<std::string> > aa_hydrophobicity_singletons;
//// Add Table Dependencies ////
// Make uniform distribution for each amino acid count
for (const std::string & aa : amino_acid_strings) {
aa_hydrophobicity_singletons.push_back({"hydrophobicity_" + aa});
igb.insert_dependency( TableDependency<std::string>(make_nonneg_uniform(aa, max_num_copies), p) );
}
//// Add Constant Multiplication Dependencies ////
for (unsigned long i=0; i<amino_acid_strings.size(); ++i) {
igb.insert_dependency( ConstantMultiplierDependency<std::string>({amino_acid_strings[i]}, {aa_hydrophobicity_singletons[i]}, {Peptide::hydrophobicities[i]*hydrophobicity_discretization}, false, true, DITHERING_SIGMA) );
}
// Make additive dep. for total hydrophobicity.
LabeledPMF<std::string> total_hydrophobicity = LabeledPMF<std::string>( {"total_hydrophobicity"}, scaled_pmf_dither(PMF({1L},Tensor<double>({1ul},{1.0})), {hydrophobicity_goal*hydrophobicity_discretization}, DITHERING_SIGMA) );
igb.insert_dependency( TableDependency<std::string>(total_hydrophobicity, p) );
igb.insert_dependency( AdditiveDependency<std::string>(aa_hydrophobicity_singletons, {"total_hydrophobicity"}, p) );
// create inference graph
_ig_ptr = new InferenceGraph<std::string>(igb.to_graph());
write_graph_to_dot_file(*_ig_ptr, "hydro_peptide_graph.dot");
}
~HydrophobicityPeptideSolver() {
delete _ig_ptr;
}
void solve_and_print() {
///////////////////////
///// Solve Graph /////
///////////////////////
std::cout << "solving..." << std::endl;
// apply message scheduler to inference graph
_sched.add_ab_initio_edges(*_ig_ptr);
// apply belief propagation to inference graph
BeliefPropagationInferenceEngine<std::string> bpie(_sched, *_ig_ptr);
std::vector<std::vector<std::string> > aa_singletons;
for (char aa : Peptide::amino_acids)
aa_singletons.push_back({std::string("")+aa});
Clock c;
c.tick();
auto result = bpie.estimate_posteriors(aa_singletons);
std::cout << "Time " << c.tock() << " in seconds" << std::endl;
for (auto res : result)
std::cout << res << std::endl;
}
};
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/demos/gc-rich-hmm/HMMScheduler.hpp | .hpp | 1,234 | 38 | #ifndef _HMMSCHEDULER_HPP
#define _HMMSCHEDULER_HPP
// A small, custom-made scheduler for HMMs. HMM should be constructed
// manually (without hyperedge types that would be produced by
// BetheGraphBuilder).
#include "../../Evergreen/evergreen.hpp"
template <typename VARIABLE_KEY>
class HMMScheduler : public FIFOScheduler<VARIABLE_KEY> {
public:
HMMScheduler():
// HMM graphs should have no loops, and hence dampening and
// convergence threshold are moot. Likewise, use maximum unsigned
// long as allowed number of iterations (convergence will occur
// when no messages are woken).
FIFOScheduler<VARIABLE_KEY>(0.0, 1e-6, -1ul)
{}
void add_ab_initio_edges(InferenceGraph<VARIABLE_KEY> & graph) {
for (Edge<VARIABLE_KEY>* edge : graph.edges_ready_ab_initio()) {
// Only allow ab initio edges coming from leaf nodes. Note that
// this will not guaranteee all messages will be passed on
// general graphs.
bool source_is_leaf = edge->source->number_edges() == 1;
if (source_is_leaf)
this->_queue.push_if_not_in_queue(edge);
}
}
// Note: An alternative approach would be to simply hard-code
// message passing by overriding run_until_convergence.
};
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/demos/gc-rich-hmm/HMM.hpp | .hpp | 4,188 | 132 | #ifndef _ISOTOPESOLVER_HPP
#define _ISOTOPESOLVER_HPP
#include "../../Evergreen/evergreen.hpp"
#include "../../Utility/Clock.hpp"
class HMM {
private:
PMF _prior;
PMF _transition;
PMF _emission;
// std::vector<unsigned long> _hidden_variables;
// std::vector<unsigned long> _observed_variables;
std::vector<std::string> _hidden_variables;
std::vector<std::string> _observed_variables;
const std::string & _evidence;
InferenceGraph<std::string> *_ig;
Scheduler<std::string> & _sched;
// Note: could be done faster via a table of 256 chars --> indices
// in {G,A,T,C}.
PMF create_nucleotide_evidence_pmf(char gatc) {
Vector<double> evidence({0.0, 0.0, 0.0, 0.0});
switch (gatc) {
case 'G':
evidence[0] = 1.0;
break;
case 'A':
evidence[1] = 1.0;
break;
case 'T':
evidence[2] = 1.0;
break;
case 'C':
evidence[3] = 1.0;
break;
default:
assert(false && "Not a valid nucleotide 'G' 'A' 'T' or 'C'");
break;
}
return PMF({0L}, Tensor<double>({4ul}, evidence));
}
void construct_graph(double p) {
// std::vector<MessagePasser<unsigned long>* > mps;
std::vector<MessagePasser<std::string>* > mps;
const unsigned long n = _evidence.size();
HUGINMessagePasser<std::string>*current_node = new HUGINMessagePasser<std::string>(LabeledPMF<std::string>({_hidden_variables[0]}, _prior), p);
for(unsigned long i=0; i<n; ++i) {
// Create observed DNA evidence:
HUGINMessagePasser<std::string>*hmp_data = new HUGINMessagePasser<std::string>(LabeledPMF<std::string>({_observed_variables[i]}, create_nucleotide_evidence_pmf(_evidence[i])), p);
mps.push_back(hmp_data);
// Create emission between hypotheses and observed DNA evidence:
HUGINMessagePasser<std::string>*hmp_emission = new HUGINMessagePasser<std::string>(LabeledPMF<std::string>({_hidden_variables[i], _observed_variables[i]}, _emission), p);
mps.push_back(hmp_emission);
hmp_emission->bind_to(hmp_data, new std::vector<std::string>{_observed_variables[i]});
// Note: the above two HUGINMessagePasser types could be
// compressed into one, which basically inlines hmp_emission
// conditional on data=_evidence[i].
current_node->bind_to(hmp_emission, new std::vector<std::string>{_hidden_variables[i]});
mps.push_back(current_node);
// Create transition to next nucleotide (if not at the final node):
if (i+1 < n) {
HUGINMessagePasser<std::string>*hmp_transition = new HUGINMessagePasser<std::string>(LabeledPMF<std::string>({_hidden_variables[i], _hidden_variables[i+1]}, _transition), p);
current_node->bind_to(hmp_transition, new std::vector<std::string>{_hidden_variables[i]});
mps.push_back(hmp_transition);
current_node = new HUGINMessagePasser<std::string>(p);
hmp_transition->bind_to(current_node, new std::vector<std::string>{_hidden_variables[i+1]});
}
}
_ig = new InferenceGraph<std::string>(std::move(mps));
}
public:
HMM(const PMF & prior, const PMF & transition, const PMF & emission, const std::string & evidence, double p, Scheduler<std::string> & sched):
_prior(prior),
_transition(transition),
_emission(emission),
_evidence(evidence),
_sched(sched)
{
for(unsigned long i=0; i<_evidence.size(); ++i) {
_hidden_variables.push_back("H" + to_string(i));
_observed_variables.push_back("D" + to_string(i));
}
// create inference graph
construct_graph(p);
}
~HMM() {
delete _ig;
}
std::vector<LabeledPMF<std::string> > solve() {
std::cout << "solving..." << std::endl;
// apply belief propagation to inference graph
_sched.add_ab_initio_edges(*_ig);
BeliefPropagationInferenceEngine<std::string> bpie(_sched, *_ig);
Clock c;
std::vector<std::vector<std::string> > hidden_variable_singletons;
for(unsigned long i=0; i<_evidence.size(); ++i)
hidden_variable_singletons.push_back({_hidden_variables[i]});
auto result = bpie.estimate_posteriors(hidden_variable_singletons);
c.ptock();
return result;
}
};
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/demos/gc-rich-hmm/main.cpp | .cpp | 1,842 | 53 | #include <fstream>
#include <iostream>
#include "../../Evergreen/evergreen.hpp"
#include "HMM.hpp"
#include "HMMScheduler.hpp"
const double p = std::numeric_limits<double>::infinity(); // constant for p-norm approximation
std::string load_sequence(std::string file) {
std::ifstream myfile(file);
assert(myfile.is_open() == true && "Error: File not found");
std::string line;
std::string sequence;
while ( std::getline(myfile,line) ) {
std::stringstream ss_input(line);
ss_input >> sequence;
}
return sequence;
}
int main() {
// [Pr(H_1 = 0), Pr(H_1 = 1)]
PMF prior({0L}, Tensor<double>({2ul}, {0.996, 0.004}));
// [Pr(H_{i+1} = 0 | H_i = 0), Pr(H_{i+1} = 1 | H_i = 0), Pr(H_{i+1} = 0 | H_i = 1), Pr(H_{i+1} = 1 | H_i = 1)]
PMF transition({0L,0L}, Tensor<double>({2ul,2ul},{0.99957, 0.00043, 0.00116954, 0.9988305}));
// [Pr(D_i = G | H_i = 0), Pr(D_i = A | H_i = 0), Pr(D_i = T | H_i = 0), Pr(D_i = C | H_i = 0),
// Pr(D_i = G | H_i = 0), Pr(D_i = A | H_i = 0), Pr(D_i = T | H_i = 1), Pr(D_i = C | H_i = 1)]
PMF emission({0L, 0L}, Tensor<double>({2ul,4ul},{0.209, 0.291, 0.291, 0.209, 0.331, 0.169, 0.169, 0.331}));
// Data obtained from: https://www.ncbi.nlm.nih.gov/nuccore/CP000037
std::string sequence = load_sequence("Shigella_boydii.fasta");
std::cout << "RandomSubtreeScheduler" << std::endl;
RandomSubtreeScheduler<std::string> rs_sched(0.0, 1e-3, -1ul);
HMM hmm(prior, transition, emission, sequence, p, rs_sched);
auto posteriors = hmm.solve();
std::cout << std::endl;
// This custom HMMScheduler is faster, but less general:
std::cout << "HMMScheduler" << std::endl;
HMMScheduler<std::string> hmm_sched;
HMM hmm2(prior, transition, emission, sequence, p, hmm_sched);
auto posteriors2 = hmm2.solve();
std::cout << std::endl;
return 0;
}
| C++ |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/demos/brute-force-vs-loopy/main.cpp | .cpp | 1,568 | 41 | #include <string>
#include "../../Evergreen/evergreen.hpp"
#include "../../Utility/inference_utilities.hpp"
const double p = 16.0;
void brute_force(const std::vector<TableDependency<std::string> > & deps, const std::vector<std::vector<std::string> > & vars) {
BruteForceInferenceEngine<std::string> bf(deps,p);
estimate_and_print_posteriors(bf, vars);
std::cout << std::endl;
}
void loopy(const std::vector<TableDependency<std::string> > & deps, const std::vector<std::vector<std::string> > & vars) {
BetheInferenceGraphBuilder<std::string> igb;
for (const TableDependency<std::string> & td : deps)
igb.insert_dependency(td);
InferenceGraph<std::string> ig = igb.to_graph();
FIFOScheduler<std::string> sched(0.0, 1e-8, 10000);
sched.add_ab_initio_edges(ig);
BeliefPropagationInferenceEngine<std::string> bpie(sched, ig);
estimate_and_print_posteriors(bpie, vars);
std::cout << std::endl;
}
int main() {
TableDependency<std::string> td1(LabeledPMF<std::string>({"a", "b"}, PMF({0L,0L}, Tensor<double>({2ul,2ul}, {.87, .13, .74, .26}))), p);
TableDependency<std::string> td2(LabeledPMF<std::string>({"b", "c"}, PMF({0L,0L}, Tensor<double>({2ul,2ul}, {.4, .2, .1, .3}))), p);
TableDependency<std::string> td3(LabeledPMF<std::string>({"a", "c"}, PMF({0L,0L}, Tensor<double>({2ul,2ul}, {.3, .1, .45, .15}))), p);
std::cout << "Brute force" << std::endl;
brute_force({td1,td2,td3}, {{"a","b"}, {"b","c"}});
std::cout << "Loopy belief propagation" << std::endl;
loopy({td1,td2,td3}, {{"a","b"}, {"b","c"}});
return 0;
}
| C++ |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/demos/isotope-quantification/IsotopeQuantifier.hpp | .hpp | 12,230 | 293 | #ifndef _ISOTOPEQUANTIFIER_HPP
#define _ISOTOPEQUANTIFIER_HPP
#include <string>
#include <sstream>
#include <fstream>
#include <iostream>
#include "../../Evergreen/evergreen.hpp"
#include "Elements.hpp"
#include "../../Utility/inference_utilities.hpp"
#include "../../Utility/to_string.hpp"
#include "../../Utility/Clock.hpp"
#include "../../Utility/L1Regularization.hpp"
#include "../../Utility/graph_to_dot.hpp"
#include <fstream>
// To consider missing peaks, insert them into the spectra as values
// with small or zero intensity.
class IsotopeQuantifier {
private:
const Elements _elements;
const unsigned int _prior_maximum_copies_of_element;
const unsigned int _maximum_number_unique_elements;
const unsigned int _intensity_discretization;
const double _sigma_observed_intensities;
double _mass_discretization;
static constexpr double DITHERING_SIGMA = 0.1;
// The value beyond which Gaussian tails are no longer considered:
static constexpr double GAUSSIAN_TAIL_EPSILON = 1e-32;
std::map<double, std::vector<Isotope> > _theoretical_peaks_to_isotopes;
bool _include_unobserved_peaks;
// observed:
std::map<double, double> _observed_peak_masses_to_intensities;
std::set<std::string> _used_elements;
std::set<std::string> _used_isotopes;
Scheduler<std::string> & _scheduler;
InferenceGraph<std::string>* _ig_ptr;
static const std::string intensity_prefix;
void load_peaks_from_file_and_discretize(const std::string & peak_file){
std::ifstream fin(peak_file);
assert(fin.is_open() && "Error: File not found");
std::string garbage;
fin >> garbage;
assert(garbage == "mass_discretization");
fin >> _mass_discretization;
std::string line;
double mass;
double intensity;
while ( fin >> mass >> intensity ) {
if (_observed_peak_masses_to_intensities.find(mass) == _observed_peak_masses_to_intensities.end())
_observed_peak_masses_to_intensities[mass] = 0.0;
_observed_peak_masses_to_intensities[mass] += intensity;
}
fin.close();
_observed_peak_masses_to_intensities = mass_discretized_peaks(_observed_peak_masses_to_intensities, _mass_discretization, _include_unobserved_peaks);
}
void map_observed_peaks_to_isotopes_with_similar_mass() {
for (const std::pair<std::string, std::vector<Isotope> > & ele: _elements) {
for (const Isotope & iso: ele.second) {
const double discretized_mass = round(iso.mass * _mass_discretization) / _mass_discretization;
auto iter = _observed_peak_masses_to_intensities.find(discretized_mass);
// If there is an observed peak at this discretized_mass:
if (iter != _observed_peak_masses_to_intensities.end()) {
// theoretical mass for isotope matches an observed mass
_theoretical_peaks_to_isotopes[discretized_mass].push_back(iso);
_used_isotopes.insert(iso.name + " " + to_string(iso.mass));
_used_elements.insert(ele.first);
}
}
}
}
void add_regularization(InferenceGraphBuilder<std::string> & igb, double p) {
LabeledPMF<std::string> sum_of_indicators = make_nonneg_uniform<std::string>("SumOfIndicators", _maximum_number_unique_elements);
std::vector<std::string> indicators_for_used_elements(_used_elements.size());
std::vector<std::string> used_elements_vector(_used_elements.begin(), _used_elements.end());
for (unsigned long i=0; i<indicators_for_used_elements.size(); ++i)
indicators_for_used_elements[i] = "Indicator[ " + used_elements_vector[i] + ">0 ]";
L1Regularization<std::string>::apply(igb, used_elements_vector, indicators_for_used_elements, sum_of_indicators, p, _prior_maximum_copies_of_element);
}
void print_isotopes_matching_observed_peaks() {
std::cout << "discretized data & matching isotopes" << std::endl;
for (auto pr : _observed_peak_masses_to_intensities) {
std::cout << pr.first << " " << pr.second << " ";
auto iter = _theoretical_peaks_to_isotopes.find(pr.first);
if (iter != _theoretical_peaks_to_isotopes.end()) {
const std::vector<Isotope> & matching_isos = iter->second;
for (const Isotope & iso : matching_isos) {
std::cout << iso << " ";
}
}
std::cout << std::endl;
}
std::cout << std::endl;
}
void add_constant_multipliers(InferenceGraphBuilder<std::string> & igb) {
// Make constant multiplier dependencies that say isotope
// abundance is some constant times the element abundance.
std::set<Isotope> isotopes_matching_any_observed;
for (const std::pair<double, std::vector<Isotope> > & peak_and_isotopes: _theoretical_peaks_to_isotopes)
for (const Isotope & iso: peak_and_isotopes.second)
isotopes_matching_any_observed.insert(iso);
for (const Isotope & iso : isotopes_matching_any_observed) {
std::string isotope_id = intensity_prefix + iso.name + " " + to_string(iso.mass);
// false, true --> when multiplying don't interpolate (since
// we're starting with counts), but interpolate when dividing:
igb.insert_dependency( ConstantMultiplierDependency<std::string>({iso.name}, {isotope_id}, {iso.abundance * _intensity_discretization}, false, true, DITHERING_SIGMA) );
}
}
void add_gaussians_for_observed_peaks(InferenceGraphBuilder<std::string> & igb, double p) {
// Make table dependency for intensity of each peak_i, where
// intensity is a nonnegative gaussian distribution with
// mean=observed intensity and standard
// deviation=_sigma_observed_intensities.
for (const std::pair<double, double> & peak: _observed_peak_masses_to_intensities ) {
double observed_mass = peak.first;
std::string peak_var = intensity_prefix + "peak" + to_string(observed_mass);
double pre_discretized_observed_intensity = peak.second * _intensity_discretization;
auto nonneg_gaussian_for_peak = make_nonneg_pseudo_gaussian(peak_var, pre_discretized_observed_intensity, _sigma_observed_intensities, GAUSSIAN_TAIL_EPSILON, long(pre_discretized_observed_intensity*10), 1e-5);
igb.insert_dependency( TableDependency<std::string>(nonneg_gaussian_for_peak, p));
}
}
void add_additive_dependencies(InferenceGraphBuilder<std::string> & igb, double p) {
// Make additive dep. for intensity of peak_i (it should equal the
// sum of the quantities of the element isotopes matching it).
for (const std::pair<double, std::vector<Isotope> > & peak : _theoretical_peaks_to_isotopes) {
double observed_mass = peak.first;
std::string peak_var = intensity_prefix + "peak" + to_string(observed_mass);
std::vector<std::vector<std::string> > isotopes_that_sum_to_this_peak;
for(const Isotope & responsible_iso : peak.second) {
assert(peak.second.size() != 0 && "Observed peak did not match any theoretical element isotope peaks");
isotopes_that_sum_to_this_peak.push_back({ intensity_prefix + responsible_iso.name + " " + to_string(responsible_iso.mass) });
}
igb.insert_dependency( AdditiveDependency<std::string>(isotopes_that_sum_to_this_peak, {peak_var}, p) );
}
}
void build_graph(const double p) {
BetheInferenceGraphBuilder<std::string> igb;
// Add uniform priors for each candidate element:
for (const std::string el : _used_elements)
igb.insert_dependency( TableDependency<std::string>(make_nonneg_uniform(el, _prior_maximum_copies_of_element), p) );
// Add regularization if it is used:
if (_maximum_number_unique_elements != 0)
add_regularization(igb, p);
add_constant_multipliers(igb);
add_gaussians_for_observed_peaks(igb, p);
add_additive_dependencies(igb, p);
// Create inference graph from the graph builder:
_ig_ptr = new InferenceGraph<std::string>(igb.to_graph());
write_graph_to_dot_file(*_ig_ptr, "isotope_graph.dot");
}
public:
// Default value of _maximum_number_unique_elements=0 --> don't use regularization.
IsotopeQuantifier(const std::string & peak_file, const Elements & ele, Scheduler<std::string> & scheduler, const double p, unsigned long intensity_discretization, const double standard_deviation_observed_intensities, unsigned long prior_maximum_copies_of_element, bool include_unobserved_peaks, unsigned long maximum_number_unique_elements=0):
_elements(ele),
_prior_maximum_copies_of_element(prior_maximum_copies_of_element),
_maximum_number_unique_elements(maximum_number_unique_elements),
_intensity_discretization(intensity_discretization),
_sigma_observed_intensities(standard_deviation_observed_intensities*intensity_discretization),
_include_unobserved_peaks(include_unobserved_peaks),
_scheduler(scheduler)
{
load_peaks_from_file_and_discretize(peak_file);
map_observed_peaks_to_isotopes_with_similar_mass();
build_graph(p);
print_isotopes_matching_observed_peaks();
}
static std::map<double, double> theoretical_peaks_from_chemical_formula(const std::map<std::string, unsigned int> & formula, const Elements & element_collection) {
std::map<double, double> result;
for (const std::pair<std::string, unsigned int> & element: formula) {
assert(element.second != 0 && "Error: Element count must be >0");
for(const Isotope & iso: element_collection.get(element.first) ) {
auto iter = result.find(iso.mass);
// Just in case two values have identical masses:
if (iter == result.end())
result[iso.mass] = 0.0;
result[iso.mass] += iso.abundance*element.second;
}
}
return result;
}
static std::map<double, double> mass_discretized_peaks(const std::map<double, double> & exact, double mass_discretization, bool include_unobserved_peaks) {
// Get the maximum by using the fact that map is sorted ascending (add 1 because of 0 bin):
std::vector<double> pre_result( (unsigned long)ceil(exact.rbegin()->first * mass_discretization) + 1, 0.0 );
for (const std::pair<double, double> & mass_and_intensity : exact) {
const double mass = mass_and_intensity.first;
const double intensity = mass_and_intensity.second;
const long discretized_mass = round(mass*mass_discretization);
pre_result[discretized_mass] += intensity;
}
std::map<double, double> result;
for (unsigned long i=0; i<pre_result.size(); ++i) {
if (pre_result[i] > 0.0 || include_unobserved_peaks) {
double mass = double(i) / mass_discretization;
// add to result map
if (result.find(mass) == result.end())
result[mass] = 0.0;
result[mass] += pre_result[i];
}
}
return result;
}
// mass_discretization = 100 means that accuracy is to 1/100 dalton
// (pre rounding).
static std::map<double, double> mass_discretized_theoretical_peaks_from_chemical_formula(const std::map<std::string, unsigned int> & formula, const Elements & element_collection, double mass_discretization, bool include_unobserved_peaks) {
std::map<double, double> exact = theoretical_peaks_from_chemical_formula(formula, element_collection);
return mass_discretized_peaks(exact, mass_discretization, include_unobserved_peaks);
}
void run_and_print_results() {
// apply message scheduler to inference graph
_scheduler.add_ab_initio_edges(*_ig_ptr);
// apply belief propagation to inference graph
BeliefPropagationInferenceEngine<std::string> bpie(_scheduler, *_ig_ptr);
Clock c;
c.tick();
std::vector<std::vector<std::string> > element_singletons;
for (const std::string & el : _used_elements)
element_singletons.push_back( {el} );
auto result = bpie.estimate_posteriors(element_singletons);
std::cout << "Time " << c.tock() << " in seconds" << std::endl;
for (auto res : result)
std::cout << res << std::endl;
std::cout << "Elements matching no observed peaks (treat as having 0 abundance with probability ~1):" << std::endl;
for (const std::pair<std::string, std::vector<Isotope> > & ele: _elements ) {
if ( _used_elements.find(ele.first) == _used_elements.end() ){
std::cout << ele.first << " " << PMF({0L}, Tensor<double>({1ul},{1.0})) << std::endl;
}
}
}
};
const std::string IsotopeQuantifier::intensity_prefix = "intensity ";
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/demos/isotope-quantification/isotope_quantifier.cpp | .cpp | 1,611 | 45 | #include "IsotopeQuantifier.hpp"
const Elements elements("element_isotope_list.txt");
void print_usage() {
std::cerr << "Usage: isotope_quant <peak tsv filename> <intensity discretization> <intensity Gaussian std. dev> <maximum number copies for element> {missing, no_missing} <p> [maximum number of unique elements]" << std::endl;
exit(1);
}
int main(int argc, char**argv) {
if (argc != 7 && argc != 8) {
print_usage();
}
std::string peak_file = argv[1];
std::cerr << "peak_file = " << peak_file << std::endl;
int intensity_discretization = atoi(argv[2]);
std::cerr << "intensity_discretization = " << intensity_discretization << std::endl;
double intensity_std_dev = atof(argv[3]);
std::cerr << "intensity_std_dev = " << intensity_std_dev << std::endl;
int maximum_copies_per_element = atoi(argv[4]);
std::cerr << "maximum_copies_per_element = " << maximum_copies_per_element << std::endl;
std::string missing_str = argv[5];
bool include_missing = (missing_str == "missing");
if( ! include_missing && (missing_str != "no_missing") )
print_usage();
double p = atof(argv[6]);
int maximum_unique_elements = 0;
if (argc == 8) {
maximum_unique_elements = atoi(argv[7]);
std::cerr << "maximum_unique_elements = " << maximum_unique_elements << std::endl;
}
FIFOScheduler<std::string> sched(0.01, 1e-16, 1000000ul);
IsotopeQuantifier ms_solver(peak_file, elements, sched, p, intensity_discretization, intensity_std_dev, maximum_copies_per_element, include_missing, maximum_unique_elements);
ms_solver.run_and_print_results();
return 0;
}
| C++ |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/demos/isotope-quantification/Elements.hpp | .hpp | 2,069 | 85 | #ifndef _Elements_HPP
#define _Elements_HPP
struct Isotope {
std::string name;
double mass;
double abundance;
};
// To enable std::set<Isotope>
bool operator <(const Isotope & lhs, const Isotope & rhs) {
return lhs.name < rhs.name || (lhs.name == rhs.name && lhs.mass < rhs.mass);
}
std::ostream & operator<<(std::ostream & os, const Isotope & rhs) {
os << rhs.name << ": mass=" << rhs.mass << " abundance=" << rhs.abundance;
return os;
}
class Elements {
protected:
std::map<std::string, std::vector<Isotope> > _isotope_list;
public:
Elements(const std::string & isotop_file) {
std::ifstream myfile(isotop_file);
assert(myfile.is_open() == true && "Error: File not found");
std::string line;
std::string element;
double mass;
double min_abundance;
double max_abundance;
while ( std::getline(myfile,line) ) {
std::istringstream ist(line);
ist >> element;
ist >> mass;
ist >> min_abundance;
ist >> max_abundance;
Isotope iso = {element, mass, (max_abundance + min_abundance)/2};
_isotope_list[element].push_back(iso);
}
myfile.close();
}
void print_elements_list() const {
std::cout << "[ ";
for(auto const & key: _isotope_list) {
std::cout << "[";
for(unsigned long i=0; i+1<key.second.size(); ++i) {
std::cout << key.second[i] << ", ";
}
std::cout << key.second.back() << "] ";
}
std::cout << "]" << std::endl;
}
std::map<std::string, std::vector<Isotope> >::const_iterator find(const std::string & key) const {
return _isotope_list.find(key);
}
std::map<std::string, std::vector<Isotope> >::const_iterator begin() const {
return _isotope_list.begin();
}
std::map<std::string, std::vector<Isotope> >::const_iterator end() const {
return _isotope_list.end();
}
unsigned long size() const {
return _isotope_list.size();
}
const std::vector<Isotope> get(const std::string & key) const {
return _isotope_list.at(key);
}
};
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/demos/isotope-quantification/formula_to_spectrum.cpp | .cpp | 1,943 | 80 | #include "IsotopeQuantifier.hpp"
const Elements elements("element_isotope_list.txt");
void print_usage() {
std::cerr << "Usage:\n";
std::cerr << "\tformula2spectrum discretize_mass=15 Ca=10 [Ar=2 ...]" << std::endl;
exit(1);
}
int main(int argc, char**argv) {
if (argc <= 1)
print_usage();
double discretization = -1;
std::string exact_or_disc = argv[1];
int eq = exact_or_disc.find("=");
if (eq == -1)
print_usage();
else {
exact_or_disc[eq] = ' ';
std::istringstream ist(exact_or_disc);
std::string garbage;
ist >> garbage;
if (garbage != "discretize_mass")
print_usage();
ist >> discretization;
if (discretization <= 0) {
std::cerr << "discretize_mass must be >0" << std::endl;
return 1;
}
}
std::map<std::string, unsigned int> element_to_count;
for (int i=2; i<argc; ++i) {
std::string element_and_count = argv[i];
int eq = element_and_count.find("=");
if (eq == -1)
print_usage();
element_and_count[eq] = ' ';
std::string element;
int count;
std::istringstream ist(element_and_count);
ist >> element >> count;
if (count <= 0) {
std::cerr << "Abundance of element must be integer > 0" << std::endl;
return 1;
}
if (element_to_count.find(element) != element_to_count.end()) {
std::cerr << "Error: " + element + "added multiple times" << std::endl;
return 1;
}
element_to_count[element] = count;
}
std::map<double, double> peaks;
// Discretize:
// Use false to ignore unobserved peaks
peaks = IsotopeQuantifier::mass_discretized_theoretical_peaks_from_chemical_formula(element_to_count, elements, discretization, false);
// Print:
std::cout << "mass_discretization " << discretization << std::endl;
for (const std::pair<double, double> & x : peaks) {
std::cout << x.first << "\t" << x.second << std::endl;
}
return 0;
}
| C++ |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/demos/p-convolution/create_data_for_max_convolution.py | .py | 720 | 25 | import numpy as np
from scipy.signal import fftconvolve
import pylab as P
norm = lambda a : a / float(max(a))
N=4096
padded_N = N+9
i=np.arange(padded_N)
x= fftconvolve(np.random.uniform(0.5,1.0,N+9) * ( np.exp( -((i-400)/2500.0)**2 ) + 0.3*np.exp( -((i-padded_N/2.0)/100.0)**2 ) + 0.7*np.exp( -((i-padded_N)/400.0)**2 ) ), [1,2,3,4,4,3,2,1])[8:-8]
y= fftconvolve(np.random.uniform(0.5,1.0,padded_N) * ( 3.0*np.exp( -((i**0.9)/1000.0)**2 ) + np.exp( -((i-padded_N)/600.0)**2 ) ), [1,2,3,4,4,3,2,1])[8:-8]
x = norm(x)
y = norm(y)
outfile=open('x_and_y.txt', 'w')
outfile.write(str(N) + '\n')
for v in x:
outfile.write(str(v) + ' ')
outfile.write('\n')
for v in y:
outfile.write(str(v) + ' ')
outfile.write('\n')
| Python |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/demos/p-convolution/max_convolution.cpp | .cpp | 768 | 37 | #include <iostream>
#include <fstream>
#include "../../Convolution/p_convolve.hpp"
#include "../../Utility/Clock.hpp"
Clock c;
int main(int argc, char**argv) {
if (argc != 2) {
std::cerr << "usage: max_conv <filename with n and x and y>" << std::endl;
exit(1);
}
std::cout.precision(100);
std::ifstream fin(argv[1]);
unsigned long n;
fin >> n;
Tensor<double> x({n});
for (unsigned long i=0; i<n; ++i)
fin >> x[i];
Tensor<double> y({n});
for (unsigned long i=0; i<n; ++i)
fin >> y[i];
Clock c;
auto z = naive_max_convolve(x,y);
c.ptock();
std::cout << z.flat() << std::endl;
c.tick();
auto z2 = numeric_p_convolve(x,y,std::numeric_limits<double>::infinity());
c.ptock();
std::cout << z2.flat() << std::endl;
}
| C++ |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/demos/p-convolution/naive_vs_numeric_max_convolution_benchmark.cpp | .cpp | 979 | 42 | #include <iostream>
#include "../../Convolution/p_convolve.hpp"
#include "../../Utility/Clock.hpp"
Clock c;
void init_data(Tensor<double> & x, Tensor<double> & y) {
unsigned long k;
for (k=0; k<x.flat_size(); ++k)
x[k] = exp( - (k - 128.0)*(k - 128.0) / (100.0*100.0) );
for (k=0; k<y.flat_size(); ++k)
y[k] = x[k] + exp( - (k - 700.0)*(k - 700.0) / (10.0*10.0) );
}
int main(int argc, char**argv) {
if (argc != 2) {
std::cerr << "Usage: convolution_benchmark <LOG_N>" << std::endl;
return 1;
}
const unsigned int log_n = atoi(argv[1]);
const unsigned long n = 1ul<<log_n;
Tensor<double> x({n});
Tensor<double> y({n});
init_data(x,y);
x.flat() /= sum( x.flat() );
y.flat() /= sum( y.flat() );
Clock c;
c.tick();
auto z = naive_max_convolve(x,y);
std::cout << n << " " << c.tock() << " ";
c.tick();
auto z2 = numeric_p_convolve(x,y,std::numeric_limits<double>::infinity());
std::cout << c.tock() << std::endl;
}
| C++ |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/demos/restaurant-bill/disable_trimming.cpp | .cpp | 49 | 5 | #define DISABLE_TRIM
#include "big_dipper.cpp"
| C++ |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/demos/restaurant-bill/big_dipper.cpp | .cpp | 4,002 | 123 | #include "../../Evergreen/evergreen.hpp"
#include "../../Utility/inference_utilities.hpp"
#include <fstream>
class BigDipperIceCream {
private:
static std::set<double> load_prices(const std::string & menu_filename) {
std::set<double> result;
std::ifstream fin(menu_filename);
std::string item_name;
double price;
while (fin >> item_name >> price)
result.insert(price);
return result;
}
static std::set<unsigned int> load_prices_in_quarters(const std::string & menu_filename) {
std::set<double> prices = load_prices(menu_filename);
std::set<unsigned int> result;
for (double price : prices)
// Prices are all divisible by 0.25-- thanks Big Dipper!
// Regardless, round just to be safe (the value 0.99999 would
// cast to integer 0).
result.insert( (unsigned int)round(price / 0.25) );
return result;
}
std::vector<unsigned int> _prices_in_quarters;
public:
BigDipperIceCream(const std::string & menu_filename) {
std::set<unsigned int> price_set = load_prices_in_quarters(menu_filename);
_prices_in_quarters = std::vector<unsigned int>(price_set.begin(), price_set.end());
std::cout << "K=" << _prices_in_quarters[_prices_in_quarters.size()-1] << std::endl;
}
PMF generate_pmf_of_preferences() {
// Distribution will be in {0, 1, ... maximum price}. Use sorted
// order of set to get maximum value and add 1:
Tensor<double> probability_table( {*_prices_in_quarters.rbegin()+1ul} );
for (unsigned int price : _prices_in_quarters) {
// Choose a probability that the person buys this item (note: it
// is not yet a true probability, since we do not know if it
// sums to 1 with the other items, but that will be normalized
// in the PMF constructor).
double prob = rand() % 10000 / 9999.0 + 0.1;
probability_table[price] = prob;
}
return PMF({0L}, probability_table);
}
};
unsigned int randomly_sample_from_1d_pmf(const PMF & pmf) {
double uniform = rand() % 10000 / 9999.0;
double cumulative = 0.0;
for (unsigned long i=0; i<pmf.table().flat_size(); ++i) {
cumulative += pmf.table()[i];
if (cumulative >= uniform)
return i + pmf.first_support()[0];
}
// Should be impossible (sum of masses should = 1.0), but just in
// case:
return pmf.last_support()[0];
}
int main(int argc, char**argv) {
if (argc != 3) {
std::cerr << "Usage: bill_solver <N> <p>" << std::endl;
exit(1);
}
const unsigned long N = atoi(argv[1]);
const double p = atof(argv[2]);
BetheInferenceGraphBuilder<std::string> igb;
/*
Prices from
Big Dipper Ice Cream
631S Higgins Ave.
Missoula Montana
*/
BigDipperIceCream bdic("big-dipper-prices.txt");
unsigned long total_spent_in_quarters = 0;
for (unsigned long i=0; i<N; ++i) {
PMF pmf = bdic.generate_pmf_of_preferences();
unsigned int person_spent = randomly_sample_from_1d_pmf(pmf);
total_spent_in_quarters += person_spent;
LabeledPMF<std::string> lpmf( {"X_" + to_string(i)}, pmf );
igb.insert_dependency( TableDependency<std::string>(lpmf, p) );
std::cout << lpmf << " " << person_spent << std::endl;
}
// We know that Y = total_spent_in_quarters with 100% probability:
igb.insert_dependency( TableDependency<std::string>(LabeledPMF<std::string>({"Y"}, PMF({long(total_spent_in_quarters)}, Tensor<double>({1ul},{1.0}))), p) );
// We know that Y = X_0 + X_1 + ... + X_{n-1}
std::vector<std::vector<std::string> > input_singletons;
for (unsigned long i=0; i<N; ++i)
input_singletons.push_back( {"X_" + to_string(i)} );
igb.insert_dependency( AdditiveDependency<std::string>(input_singletons, {"Y"}, p) );
InferenceGraph<std::string> ig = igb.to_graph();
FIFOScheduler<std::string> sched(0.0, 1e-8, N*8ul);
sched.add_ab_initio_edges(ig);
BeliefPropagationInferenceEngine<std::string> bpie(sched, ig);
estimate_and_print_posteriors(bpie, {{"X_0"}});
return 0;
}
| C++ |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/demos/brute-force/prisoners_dilemma.cpp | .cpp | 1,338 | 40 | #include <string>
#include "../../Evergreen/evergreen.hpp"
#include "../../Utility/inference_utilities.hpp"
// A simple demo of brute force inference
// Problem explained in
// https://en.wikipedia.org/wiki/Prisoner's_dilemma
int main() {
const double p = 2.0;
//////////////////////////////////
///// Construct Dependencies /////
//////////////////////////////////
// prior distribution of person1
TableDependency<std::string> td1(LabeledPMF<std::string>({"person1"}, PMF({0L}, Tensor<double>({2ul}, {0.8, 0.2}))), p);
// prior distribution of person2
TableDependency<std::string> td2(LabeledPMF<std::string>({"person2"}, PMF({0L}, Tensor<double>({2ul}, {0.2, 0.8}))), p);
// conditional dependency of person1 and pearson2
TableDependency<std::string> td3(LabeledPMF<std::string>({"person1", "person2"}, PMF({0L,0L}, Tensor<double>({2ul,2ul}, {.87, .13, .74, .26}))), p);
///////////////////////
///// Solve Graph /////
///////////////////////
BruteForceInferenceEngine<std::string> bf({td1, td2, td3},p);
Clock c;
std::vector<LabeledPMF<std::string> > result = bf.estimate_posteriors({{"person1"}, {"person2"}});
std::cout << "BF Time: " << c.tock() << " in seconds" << std::endl;
for (auto res : result)
std::cout << res << std::endl;
std::cout << std::endl;
return 0;
}
| C++ |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/demos/simple-additive-2d/main.cpp | .cpp | 5,093 | 114 | #include <fstream>
#include "../../Evergreen/evergreen.hpp"
#include "../../Utility/inference_utilities.hpp"
#include "../../Utility/graph_to_dot.hpp"
const double p=16;
void solve_1d_bethe(const std::vector<LabeledPMF<std::string> > & inputs, const LabeledPMF<std::string> & output, const std::vector<std::vector<std::string> > & vars_for_posteriors) {
BetheInferenceGraphBuilder<std::string> igb;
for (const LabeledPMF<std::string> & lpmf : inputs)
igb.insert_dependency( TableDependency<std::string>(lpmf,p) );
igb.insert_dependency( TableDependency<std::string>(output,p) );
// 2x AdditiveDependency types:
std::vector<std::vector<std::string> > input_vars_0;
for (const LabeledPMF<std::string> & lpmf : inputs)
input_vars_0.push_back( {lpmf.ordered_variables()[0]} );
igb.insert_dependency( AdditiveDependency<std::string>(input_vars_0, {output.ordered_variables()[0]}, p) );
std::vector<std::vector<std::string> > input_vars_1;
for (const LabeledPMF<std::string> & lpmf : inputs)
input_vars_1.push_back( {lpmf.ordered_variables()[1]} );
igb.insert_dependency( AdditiveDependency<std::string>(input_vars_1, {output.ordered_variables()[1]}, p) );
InferenceGraph<std::string> ig = igb.to_graph();
FIFOScheduler<std::string> fifo(0.01, 1e-8, 10000);
fifo.add_ab_initio_edges(ig);
BeliefPropagationInferenceEngine<std::string> bpie(fifo, ig);
estimate_and_print_posteriors(bpie, vars_for_posteriors);
write_graph_to_dot_file(ig, "bethe_1d.dot");
}
void solve_2d_bethe(const std::vector<LabeledPMF<std::string> > & inputs, const LabeledPMF<std::string> & output, const std::vector<std::vector<std::string> > & vars_for_posteriors) {
BetheInferenceGraphBuilder<std::string> igb;
for (const LabeledPMF<std::string> & lpmf : inputs)
igb.insert_dependency( TableDependency<std::string>(lpmf,p) );
igb.insert_dependency( TableDependency<std::string>(output,p) );
// AdditiveDependency:
std::vector<std::vector<std::string> > input_vars;
for (const LabeledPMF<std::string> & lpmf : inputs)
input_vars.push_back(lpmf.ordered_variables());
igb.insert_dependency( AdditiveDependency<std::string>(input_vars, output.ordered_variables(), p) );
InferenceGraph<std::string> ig = igb.to_graph();
FIFOScheduler<std::string> fifo(0.01, 1e-8, 10000);
fifo.add_ab_initio_edges(ig);
BeliefPropagationInferenceEngine<std::string> bpie(fifo, ig);
estimate_and_print_posteriors(bpie, vars_for_posteriors);
write_graph_to_dot_file(ig, "bethe_2d.dot");
}
void solve_2d_exact(const std::vector<LabeledPMF<std::string> > & inputs, const LabeledPMF<std::string> & output, const std::vector<std::vector<std::string> > & vars_for_posteriors) {
std::vector<ContextFreeMessagePasser<std::string>* > input_mps;
std::vector<std::vector<std::string>* > input_labels;
for (const LabeledPMF<std::string> & lpmf : inputs) {
input_mps.push_back( new HUGINMessagePasser<std::string>(lpmf, p) );
input_labels.push_back( new std::vector<std::string>(lpmf.ordered_variables()) );
}
ContextFreeMessagePasser<std::string>*output_mp = new HUGINMessagePasser<std::string>(output, p);
std::vector<std::string>*output_label = new std::vector<std::string>(output.ordered_variables());
ConvolutionTreeMessagePasser<std::string>*ctmp = new ConvolutionTreeMessagePasser<std::string>(input_mps, input_labels, output_mp, output_label, 2, p);
std::vector<MessagePasser<std::string>* > mps;
for (ContextFreeMessagePasser<std::string>*hmp : input_mps)
mps.push_back(hmp);
mps.push_back(output_mp);
mps.push_back(ctmp);
InferenceGraph<std::string> ig(std::move(mps));
FIFOScheduler<std::string> fifo(0.01, 1e-8, 10000);
fifo.add_ab_initio_edges(ig);
BeliefPropagationInferenceEngine<std::string> bpie(fifo, ig);
estimate_and_print_posteriors(bpie, vars_for_posteriors);
write_graph_to_dot_file(ig, "exact_2d.dot");
}
int main() {
LabeledPMF<std::string> av({"A","V"},PMF({2L,1L},Tensor<double>({3,3},{1,10,9,3,7,2,1,2,6})));
LabeledPMF<std::string> bw({"B","W"},PMF({1L,0L},Tensor<double>({3,3},{1,2,3,4,5,6,7,8,9})));
LabeledPMF<std::string> cx({"C","X"},PMF({-1L,0L},Tensor<double>({3,2},{2,8,4,1,2,3})));
LabeledPMF<std::string> dy({"D","Y"},PMF({0L,0L},Tensor<double>({2,3},{7,5,2,5,6,3})));
LabeledPMF<std::string> ez({"E","Z"},PMF({0L,1L},Tensor<double>({2,3},{10,3,6,4,1,7})));
std::cout << av << std::endl;
std::cout << bw << std::endl;
std::cout << cx << std::endl;
std::cout << dy << std::endl;
std::cout << ez << std::endl;
// (A,V) = (B,W) + (C,X) + (D,Y) + (E,Z)
std::cout << "2x 1D Convolution trees (Bethe construction)" << std::endl;
solve_1d_bethe({bw, cx, dy, ez}, av, {{"A","V"}, {"E","Z"}});
std::cout << std::endl;
std::cout << "2D Convolution tree (Bethe construction with 1D bottlenecks)" << std::endl;
solve_2d_bethe({bw, cx, dy, ez}, av, {{"A","V"}, {"E","Z"}});
std::cout << std::endl;
std::cout << "2D Convolution tree (exact)" << std::endl;
solve_2d_exact({bw, cx, dy, ez}, av, {{"A","V"}, {"E","Z"}});
std::cout << std::endl;
}
| C++ |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Language/TensorLang.hpp | .hpp | 106 | 5 | typedef struct TensorLangT {
std::vector<double> flat_vector;
std::vector<long> shape;
} TensorLang;
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Language/PrintBlock.hpp | .hpp | 90 | 4 | typedef struct PrintBlockT {
std::vector<std::vector<std::string> > vars;
} PrintBlock;
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Language/LangDigraph.hpp | .hpp | 1,842 | 55 | #ifndef _LANGGRAPH_HPP
#define _LANGGRAPH_HPP
#include <unordered_map>
#include <map>
#include "FrozenSet.hpp"
template <typename NODE>
class LangGraph {
private:
std::unordered_map<NODE, FrozenSet<NODE> > node_to_edges;
public:
void insert_edge(const NODE & u, const std::set<NODE> & v) {
FrozenSet<std::string> set(v);
node_to_edges.insert(std::pair<NODE, FrozenSet<NODE> >(u, set));
}
void insert_clique(const FrozenSet<NODE> & fs);
std::set<NODE>dfs(const NODE & u, std::set<NODE> & connected_component, std::unordered_map<NODE, bool> & node_is_connected) const {
if(node_is_connected[u]) {
return connected_component;
}
connected_component.insert(u);
node_is_connected[u] = true;
auto adj_frozenset = node_to_edges.find(u);
if (adj_frozenset != node_to_edges.end()) {
std::set<NODE> adj_nodes = adj_frozenset->second.get_set();
for (NODE v : adj_nodes) {
dfs(v, connected_component, node_is_connected);
}
}
return connected_component;
}
std::vector<FrozenSet<NODE> > get_connected_subgraphs() const {
std::vector<FrozenSet<NODE> > connected_components;
std::unordered_map<NODE, bool> node_is_connected;
for(auto node_iter = node_to_edges.begin(); node_iter != node_to_edges.end(); ++node_iter)
node_is_connected[node_iter->first] = false;
for(auto node_iter = node_is_connected.begin(); node_iter != node_is_connected.end(); ++node_iter) {
if (!node_iter->second) {
std::set<NODE> connected_component;
dfs(node_iter->first, connected_component, node_is_connected);
FrozenSet<std::string> frozen_connected_component(connected_component);
connected_components.push_back(frozen_connected_component);
}
}
return connected_components;
}
};
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Language/LoopyLangEngine.hpp | .hpp | 1,338 | 39 | #ifndef _LOOPYLANGENGINE_HPP
#define _LOOPYLANGENGINE_HPP
#include "LangEngine.hpp"
typedef struct LoopyLangEngineT : public LangEngineT {
void build(std::vector<std::vector<Dependency<std::string>* > > dependencies_of_subgraphs, double dampening, double epsilon, long max_iter) {
for(InferenceGraph<std::string>* ptr: ig_ptrs)
delete(ptr);
ig_ptrs.clear();
for(Scheduler<std::string>* ptr: sched_ptrs)
delete(ptr);
for(InferenceEngine<std::string>* ptr : eng_ptrs)
delete(ptr);
eng_ptrs.resize(dependencies_of_subgraphs.size());
sched_ptrs.resize(dependencies_of_subgraphs.size());
ig_ptrs.resize(dependencies_of_subgraphs.size());
for (int i = 0; i < dependencies_of_subgraphs.size(); ++i) {
const std::vector<Dependency<std::string>* > & deps = dependencies_of_subgraphs[i];
BetheInferenceGraphBuilder<std::string> igb;
for (Dependency<std::string>* dep : deps)
igb.insert_dependency(*dep);
sched_ptrs[i] = new FIFOScheduler<std::string>(dampening, epsilon, max_iter);
ig_ptrs[i] = new InferenceGraph<std::string>(igb.to_graph());
sched_ptrs[i]->add_ab_initio_edges(*ig_ptrs[i]);
eng_ptrs[i] = new BeliefPropagationInferenceEngine<std::string>(*sched_ptrs[i], *ig_ptrs[i]);
}
is_built = true;
}
} LoopyLangEngine;
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Language/InferenceEnginesBuilder.hpp | .hpp | 3,610 | 101 | #ifndef _INFERENCEENGINESBUILDER_HPP
#define _INFERENCEENGINESBUILDER_HPP
#include <string>
#include "../Evergreen/evergreen.hpp"
class InferenceEnginesBuilder {
public:
virtual std::vector<InferenceEngine<std::string>* > build_engines(const std::vector<std::vector<Dependency<std::string>*> > & deps) = 0;
virtual ~InferenceEnginesBuilder() {}
};
class BruteForceInferenceEnginesBuilder : public InferenceEnginesBuilder {
public:
std::vector<InferenceEngine<std::string>* > build_engines(const std::vector<std::vector<Dependency<std::string>* > > & dependencies_of_subgraphs) {
std::vector<InferenceEngine<std::string>* > result(dependencies_of_subgraphs.size());
for (unsigned long i=0; i<dependencies_of_subgraphs.size(); ++i) {
std::vector<TableDependency<std::string> > table_deps;
std::vector<AdditiveDependency<std::string> > additive_deps;
const std::vector<Dependency<std::string>* > & dependency_subgraph = dependencies_of_subgraphs[i];
for (Dependency<std::string>* dep : dependency_subgraph) {
if (dynamic_cast<TableDependency<std::string>*>(dep) != NULL) {
// TableDependency type
TableDependency<std::string>*table_dep = dynamic_cast<TableDependency<std::string>*>(dep);
table_deps.push_back(*table_dep);
}
else if (dynamic_cast<AdditiveDependency<std::string>*>(dep) != NULL) {
// AdditiveDependency type
AdditiveDependency<std::string>*additive_dep = dynamic_cast<AdditiveDependency<std::string>*>(dep);
additive_deps.push_back(*additive_dep);
}
else {
// error: user needs to define new dependency type in this if-else ladder
}
}
result[i] = new BruteForceInferenceEngine<std::string>(table_deps, additive_deps, p);
}
return result;
}
};
class BeliefPropagationInferenceEnginesBuilder : public InferenceEnginesBuilder {
protected:
double dampening_lambda;
double epsilon;
long max_iter;
std::vector<Scheduler<std::string>*> scheduler_ptrs;
std::vector<InferenceGraph<std::string>*> graph_ptrs;
public:
BeliefPropagationInferenceEnginesBuilder(double damp, double eps, long max_it):
dampening_lambda(damp),
epsilon(eps),
max_iter(max_it)
{ }
~BeliefPropagationInferenceEnginesBuilder() {
for (Scheduler<std::string>*sp : scheduler_ptrs)
delete sp;
scheduler_ptrs.clear();
for (InferenceGraph<std::string>*ig : graph_ptrs)
delete ig;
graph_ptrs.clear();
}
std::vector<InferenceEngine<std::string>* > build_engines(const std::vector<std::vector<Dependency<std::string>*> > & dependencies_of_subgraphs) {
for (Scheduler<std::string>*sp : scheduler_ptrs)
delete sp;
scheduler_ptrs.clear();
for (InferenceGraph<std::string>*ig : graph_ptrs)
delete ig;
graph_ptrs.clear();
std::vector<InferenceEngine<std::string>* > result(dependencies_of_subgraphs.size());
for (unsigned long i=0; i<dependencies_of_subgraphs.size(); ++i) {
const std::vector<Dependency<std::string>* > & deps = dependencies_of_subgraphs[i];
BetheInferenceGraphBuilder<std::string> igb;
for (Dependency<std::string>* dep : deps)
igb.insert_dependency(*dep);
Scheduler<std::string>* sched = new FIFOScheduler<std::string>(dampening_lambda, epsilon, max_iter);
scheduler_ptrs.push_back(sched);
InferenceGraph<std::string> *ig_ptr = new InferenceGraph<std::string>(igb.to_graph());
graph_ptrs.push_back(ig_ptr);
sched->add_ab_initio_edges(*ig_ptr);
result[i] = new BeliefPropagationInferenceEngine<std::string>(*sched, *ig_ptr);
}
return result;
}
};
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Language/IntTuple.hpp | .hpp | 65 | 4 | typedef struct IntTupleT {
std::vector<long> ints;
} IntTuple;
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Language/BruteForceLangEngine.hpp | .hpp | 2,109 | 53 | #ifndef _BRUTEFORCELANGENGINE_HPP
#define _BRUTEFORCELANGENGINE_HPP
#include "LangEngine.hpp"
typedef struct BruteForceLangEngineT : public LangEngineT {
void build(const std::vector<std::vector<Dependency<std::string>* > > & dependencies_of_subgraphs, double dampening, double epsilon, long max_iter) {
for(InferenceGraph<std::string>* ptr: ig_ptrs)
delete(ptr);
ig_ptrs.clear();
for(Scheduler<std::string>* ptr: sched_ptrs)
delete(ptr);
//sched_ptrs.clear();
for(InferenceEngine<std::string>* ptr : eng_ptrs)
delete(ptr);
//eng_ptrs.clear();
eng_ptrs.resize(dependencies_of_subgraphs.size());
sched_ptrs.resize(dependencies_of_subgraphs.size());
ig_ptrs.resize(dependencies_of_subgraphs.size());
//#pragma omp parallel for // FIXME: Figure out why this makes valgrind throw a fit
for (int i = 0; i < dependencies_of_subgraphs.size(); ++i) {
std::vector<TableDependency<std::string> > table_deps;
std::vector<AdditiveDependency<std::string> > additive_deps;
const std::vector<Dependency<std::string>* > & dependency_subgraph = dependencies_of_subgraphs[i];
for (Dependency<std::string>* dep : dependency_subgraph) {
if (dynamic_cast<TableDependency<std::string>*>(dep) != NULL) {
// TableDependency type
TableDependency<std::string>*table_dep = dynamic_cast<TableDependency<std::string>*>(dep);
table_deps.push_back(*table_dep);
}
else if (dynamic_cast<AdditiveDependency<std::string>*>(dep) != NULL) {
// AdditiveDependency type
AdditiveDependency<std::string>*additive_dep = dynamic_cast<AdditiveDependency<std::string>*>(dep);
additive_deps.push_back(*additive_dep);
}
else {
// error: user needs to define new dependency type in this if-else ladder
}
}
eng_ptrs[i] = new BruteForceInferenceEngine<std::string>(table_deps, additive_deps, p);
}
is_built = true;
}
} BruteForceLangEngine;
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Language/VarTuple.hpp | .hpp | 72 | 4 | typedef struct VarTupleT {
std::vector<std::string> vars;
} VarTuple;
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Language/LangEngine.hpp | .hpp | 8,396 | 187 | #ifndef _LANGENGINE_HPP
#define _LANGENGINE_HPP
#include "InferenceEnginesBuilder.hpp"
#include "LangDigraph.hpp"
#include <unordered_map>
#include <fstream>
#include "../Utility/graph_to_dot.hpp"
typedef struct LangEngineT {
LangGraph<std::string> graph;
InferenceEnginesBuilder*ieb_ptr;
std::unordered_map<std::string, std::vector<unsigned long> > var_to_graphs_containing;
std::vector<Dependency<std::string>* > dependencies;
std::vector<InferenceEngine<std::string>* > engine_ptrs;
LangEngineT(const double & default_damp, const double & default_eps, const long & default_max_iter):
ieb_ptr(new BeliefPropagationInferenceEnginesBuilder(default_damp, default_eps, default_max_iter))
{ }
~LangEngineT() {
delete ieb_ptr;
for (Dependency<std::string>*dep_ptr : dependencies)
delete dep_ptr;
}
void insert_dependency(Dependency<std::string> * dep) {
dependencies.push_back(dep);
const std::vector<std::string> & vars_used = dep->get_all_variables_used();
for (const std::string & var : vars_used)
var_to_graphs_containing[var].push_back(dependencies.size()-1);
}
void set_engine(InferenceEnginesBuilder*ieb) {
delete ieb_ptr;
ieb_ptr = ieb;
}
void print(const std::vector<std::vector<std::string> > & result_vars) {
const std::vector<std::string> & flat_result_vars = flatten(result_vars);
const std::vector<std::vector<std::string> > & partitioned_subgraphs = partition_into_subgraphs(flat_result_vars);
const std::vector<std::vector<Dependency<std::string>* > > & dependencies_of_subgraphs = get_dependencies_of_subgraphs(partitioned_subgraphs);
engine_ptrs = ieb_ptr->build_engines(dependencies_of_subgraphs);
std::unordered_map<std::string, int> var_to_graph_number;
for (unsigned long i=0; i<partitioned_subgraphs.size(); ++i) {
const std::vector<std::string> & vars_in_connected_graph = partitioned_subgraphs[i];
for (const std::string & var : vars_in_connected_graph)
var_to_graph_number[var] = i;
}
// outer vector is for subgraph
// middle vector is for the possibly many tuples on which we want posteriors
// inner vector is a tuple of variables.
std::vector<std::vector<std::vector<std::string> > > printed_partitioned_subgraphs;
printed_partitioned_subgraphs.resize(dependencies_of_subgraphs.size());
for (const std::vector<std::string> & result_var : result_vars) {
int graph_num = var_to_graph_number[result_var[0]];
for (const std::string & var : result_var)
if (var_to_graph_number[var] != graph_num)
std::cerr << "ERROR: Printing error, tried to print posteriors on set of vars that belong in different subgraphs." << std::endl;
printed_partitioned_subgraphs[var_to_graph_number[result_var[0]]].push_back(result_var);
}
std::vector<std::vector<LabeledPMF<std::string> > > all_results_to_print(printed_partitioned_subgraphs.size());
#pragma omp parallel for
for (unsigned long i=0; i<printed_partitioned_subgraphs.size(); ++i)
all_results_to_print[i] = engine_ptrs[i]->estimate_posteriors(printed_partitioned_subgraphs[i]);
for (const std::vector<LabeledPMF<std::string> > & results_to_print : all_results_to_print)
for (const LabeledPMF<std::string> & result_to_print : results_to_print)
std::cout << result_to_print << std::endl;
}
// ----------------------------------------------------
// not for client use (i.e., private helper functions):
// ----------------------------------------------------
std::vector<std::vector<std::string> > partition_into_subgraphs(const std::vector<std::string> & result_vars) {
std::vector<std::vector<std::string> > partitioned_subgraphs;
std::set<std::string> vars_visited;
std::set<std::string> result_vars_visited;
for(const std::string & result_var : result_vars) {
if (result_vars_visited.find(result_var) == result_vars_visited.end()) {
std::vector<std::string> subgraph;
partition_into_single_subgraph(result_var, subgraph, vars_visited, result_vars, result_vars_visited);
if (subgraph.size() > 0)
partitioned_subgraphs.push_back(subgraph);
}
}
return partitioned_subgraphs;
}
void partition_into_single_subgraph(const std::string & result_var, std::vector<std::string> & subgraph, std::set<std::string> & vars_visited, const std::vector<std::string> & result_vars, std::set<std::string> & result_vars_visited) {
if (vars_visited.find(result_var) == vars_visited.end()) {
vars_visited.insert(result_var);
subgraph.push_back(result_var);
for(const int & dep_index : var_to_graphs_containing[result_var]) {
std::vector<std::string> adj_vars = dependencies[dep_index]->get_all_variables_used();
for (const std::string & adj_var : adj_vars) {
if (result_vars_visited.find(adj_var) == result_vars_visited.end()) {
if (find(result_vars.begin(), result_vars.end(), adj_var) != result_vars.end())
result_vars_visited.insert(adj_var);
partition_into_single_subgraph(adj_var, subgraph, vars_visited, result_vars, result_vars_visited);
}
}
}
}
}
void get_dependencies_in_single_subgraph(const std::string & var, std::vector<bool> & deps_visited, std::vector<Dependency<std::string>* > & connected_dependencies) {
for (const int & dep_index : var_to_graphs_containing[var]) {
if (!deps_visited[dep_index]) {
deps_visited[dep_index] = true;
connected_dependencies.push_back(dependencies[dep_index]);
const std::vector<std::string> & vars_used = dependencies[dep_index]->get_all_variables_used();
for (const std::string & var_used : vars_used) {
get_dependencies_in_single_subgraph(var_used, deps_visited, connected_dependencies);
}
}
}
if (connected_dependencies.size() == 0) {
std::cerr << "ERROR: printing error, tried to print posteriors on var " << var << " that doesn't exist in any graph" << std::endl;
}
}
std::vector<std::vector<Dependency<std::string>* > > get_dependencies_of_subgraphs(const std::vector<std::vector<std::string> > & partitioned_subgraphs) {
std::vector<std::vector<Dependency<std::string>* > > dependencies_of_subgraphs;
std::vector<bool> deps_visited;
deps_visited.resize(dependencies.size());
for(const std::vector<std::string> & subgraph : partitioned_subgraphs) {
std::vector<Dependency<std::string>* > dependencies_in_single_graph;
get_dependencies_in_single_subgraph(subgraph[0], deps_visited, dependencies_in_single_graph);
dependencies_of_subgraphs.push_back(dependencies_in_single_graph);
}
return dependencies_of_subgraphs;
}
void recompute_and_print_normalization_constant() {
std::vector<std::string> all_vars;
for (const std::pair<std::string, std::vector<unsigned long> > & p : var_to_graphs_containing)
all_vars.push_back(p.first);
std::vector<std::vector<std::string> > partitioned_subgraphs = partition_into_subgraphs(all_vars);
std::vector<std::vector<Dependency<std::string>* > > all_partitioned_dependencies = get_dependencies_of_subgraphs(partitioned_subgraphs);
engine_ptrs = ieb_ptr->build_engines(all_partitioned_dependencies);
std::vector<std::vector<std::vector<std::string> > > singleton_partitions;
for (std::vector<std::string> & subgraph_vars : partitioned_subgraphs)
singleton_partitions.push_back(make_singletons(subgraph_vars));
#pragma omp parallel for
for (unsigned long i=0; i<partitioned_subgraphs.size(); ++i)
engine_ptrs[i]->estimate_posteriors(singleton_partitions[i]);
print_normalization_constant();
}
void print_normalization_constant() {
double log_nc = 0.0;
for (InferenceEngine<std::string>*ie : engine_ptrs)
log_nc += ie->log_normalization_constant();
std::cout << "Log probability of model: " << log_nc << std::endl;
for (unsigned long i=0; i<engine_ptrs.size(); ++i)
delete engine_ptrs[i];
}
void save_graph(char*str) {
BetheInferenceGraphBuilder<std::string> igb;
for (Dependency<std::string>* dep : dependencies)
igb.insert_dependency(*dep);
std::ofstream fout(str);
graph_to_dot(igb.to_graph(), fout);
fout.close();
}
} LangEngine;
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Language/Additive.hpp | .hpp | 142 | 5 | typedef struct AdditiveT {
std::vector<std::vector<std::string>> plus_vars;
std::vector<std::vector<std::string>> minus_vars;
} Additive;
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Language/FrozenSet.hpp | .hpp | 1,322 | 57 | #ifndef _FROZENSET_HPP
#define _FROZENSET_HPP
// From Python's frozenset class:
// See: https://stackoverflow.com/questions/20832279/python-frozenset-hashing-algorithm-implementation
template <typename K>
struct SetFrozenHash {
std::size_t operator() (const std::set<K> & s) const {
std::hash<K> single_hash;
std::size_t combined_hash_value = 0;
for (const K & obj : s) {
unsigned long single = single_hash(obj);
combined_hash_value ^= (single^(single<<16)^89869747ul) * 3644798167ul;
}
return combined_hash_value * 69069 + 907133923;
}
};
template <typename K>
class FrozenSet {
private:
std::set<K> _data;
std::size_t _hash_value;
public:
FrozenSet(const std::set<K> & s):
_data(s)
{
SetFrozenHash<K> sh;
_hash_value = sh(_data);
}
const std::set<K> & get_set() const {
return _data;
}
const std::size_t hash_value() const {
return _hash_value;
}
friend bool operator <(const FrozenSet<K> & lhs, const FrozenSet<K> & rhs) {
return lhs._data < rhs._data;
}
friend bool operator ==(const FrozenSet<K> & lhs, const FrozenSet<K> & rhs) {
return lhs._data == rhs._data;
}
};
template <typename K>
struct FrozenSetHash {
std::size_t operator() (const FrozenSet<K> & fs) const {
return fs.hash_value();
}
};
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Language/from_string.hpp | .hpp | 223 | 15 | #ifndef _FROM_STRING_HPP
#define _FROM_STRING_HPP
#include <sstream>
#include <string>
double from_string(const std::string & s) {
std::istringstream ist(s);
double result;
ist >> result;
return result;
}
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/PMF/squared.hpp | .hpp | 101 | 9 | #ifndef _SQUARED_HPP
#define _SQUARED_HPP
inline double squared(double x) {
return x*x;
}
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/PMF/LabeledPMF.hpp | .hpp | 16,412 | 437 | #ifndef _LABELEDPMF_H
#define _LABELEDPMF_H
#include <unordered_map>
#include <vector>
#include "PMF.hpp"
#include "semi_outer_product_and_quotient.hpp"
#include "divergence.hpp"
#include "dampen.hpp"
template <typename VARIABLE_KEY>
class LabeledPMF {
protected:
std::vector<VARIABLE_KEY> _ordered_variables;
std::unordered_map<VARIABLE_KEY, unsigned char> _variable_to_index;
PMF _pmf;
void construct_var_to_index() {
for (unsigned char i=0; i<_ordered_variables.size(); ++i) {
#ifdef SHAPE_CHECK
auto iter = _variable_to_index.find(_ordered_variables[i]);
// The ordered variables must be unique:
assert(iter == _variable_to_index.end() );
#endif
_variable_to_index[_ordered_variables[i]] = i;
}
}
public:
LabeledPMF() { }
LabeledPMF(const std::vector<VARIABLE_KEY> & ordered_variables, const PMF & pmf_param):
_ordered_variables(ordered_variables),
_pmf(pmf_param)
{
#ifdef SHAPE_CHECK
assert(_ordered_variables.size() == _pmf.dimension());
#endif
construct_var_to_index();
}
LabeledPMF(const std::vector<VARIABLE_KEY> & ordered_variables, PMF && pmf_param):
_ordered_variables(ordered_variables),
_pmf(pmf_param)
{
#ifdef SHAPE_CHECK
assert(_ordered_variables.size() == _pmf.dimension());
#endif
construct_var_to_index();
}
LabeledPMF(std::vector<VARIABLE_KEY> && ordered_variables, PMF && pmf_param):
_ordered_variables(ordered_variables),
_pmf(pmf_param)
{
#ifdef SHAPE_CHECK
assert(_ordered_variables.size() == _pmf.dimension());
#endif
construct_var_to_index();
}
const PMF & pmf() const {
return _pmf;
}
const unsigned char dimension() const {
return _pmf.dimension();
}
const double log_normalization_constant() const {
return _pmf.log_normalization_constant();
}
void add_to_log_normalization_constant(const double log_c) {
_pmf.add_to_log_normalization_constant(log_c);
}
void reset_log_normalization_constant() {
_pmf.reset_norm_constant();
}
const std::vector<VARIABLE_KEY> & ordered_variables() const {
return _ordered_variables;
}
LabeledPMF marginal(const std::vector<VARIABLE_KEY> & ordered_vars_to_keep, double p) const {
Vector<unsigned char> indices = lookup_indices(ordered_vars_to_keep);
#ifdef SHAPE_CHECK
verify_subpermutation(indices, dimension());
#endif
// When the vars are just a permutation (none are eliminated),
// then simply transpose (this can be more efficient):
if (ordered_vars_to_keep.size() == dimension())
return transposed(ordered_vars_to_keep);
return LabeledPMF(ordered_vars_to_keep, _pmf.marginal(indices,p));
}
LabeledPMF transposed(const Vector<unsigned char> & new_axis_order) const {
#ifdef BOUNDSCHECK
assert(new_variable_order.size() == dimension());
verify_permutation(new_axis_order);
#endif
std::vector<VARIABLE_KEY> new_variable_order(dimension());
for (unsigned char i=0; i<dimension(); ++i)
new_variable_order[i] = _ordered_variables[ new_axis_order[i] ];
return LabeledPMF(new_variable_order, _pmf.transposed(new_axis_order));
}
LabeledPMF transposed(const std::vector<VARIABLE_KEY> & new_variable_order) const {
Vector<unsigned char> new_axis_order = lookup_indices(new_variable_order);
// Note: there is code shared with the function above, but write
// it from scratch because new_variable_order does not need to be
// constructed for this version.
#ifdef BOUNDSCHECK
assert(new_variable_order.size() == dimension());
verify_permutation(new_axis_order);
#endif
return LabeledPMF(new_variable_order, _pmf.transposed(new_axis_order));
}
void transpose(const std::vector<VARIABLE_KEY> & new_variable_order) {
if (new_variable_order == _ordered_variables)
return;
Vector<unsigned char> new_axis_order = lookup_indices(new_variable_order);
// Note: there is code shared with the function above, but write
// it from scratch because new_variable_order does not need to be
// constructed for this version.
#ifdef BOUNDSCHECK
assert(new_variable_order.size() == dimension());
verify_permutation(new_axis_order);
#endif
_ordered_variables = new_variable_order;
_pmf.transpose(new_axis_order);
}
// To avoid building sets (since the _variable_to_index table is
// already built):
int variable_index(const VARIABLE_KEY & var) const {
auto iter = _variable_to_index.find(var);
if (iter != _variable_to_index.end())
return iter->second;
return -1;
}
Vector<unsigned char> lookup_indices(const std::vector<VARIABLE_KEY> & vars) const {
Vector<unsigned char> res(vars.size());
for (unsigned char i=0; i<vars.size(); ++i) {
auto iter = _variable_to_index.find(vars[i]);
#ifdef SHAPE_CHECK
assert(iter != _variable_to_index.end() && "Variable not found in LabeledPMF");
#endif
res[i] = iter->second;
}
#ifdef SHAPE_CHECK
verify_subpermutation(res, dimension());
#endif
return res;
}
bool contains_variable(const VARIABLE_KEY & var) const {
return variable_index(var) != -1;
}
// Helper function for efficiently computing products, quotients,
// and other tasks that require aligned tables to be intersected:
std::pair<TensorView<double>, Vector<long> > view_of_intersection_with(const LabeledPMF & rhs) const {
// Uses existing dictionaries to avoid creating a set on the fly:
unsigned char intersection_size = 0;
for (unsigned char i=0; i<dimension(); ++i) {
const VARIABLE_KEY & var = ordered_variables()[i];
if (rhs.contains_variable(var))
++intersection_size;
}
Vector<long> first_sup = _pmf.first_support();
Vector<long> view_shape(dimension());
for (unsigned char i=0; i<dimension(); ++i) {
const VARIABLE_KEY & var = ordered_variables()[i];
int index_rhs = rhs.variable_index(var);
// Compute the intersection over the minumum supports:
if (index_rhs != -1)
first_sup[i] = std::max(first_sup[i], rhs._pmf.first_support()[index_rhs]);
// Compute the intersection over the maximum supports (as a
// shape):
const long max_sup_plus_one = _pmf.first_support()[i] + _pmf.table().data_shape()[i];
view_shape[i] = max_sup_plus_one;
if (index_rhs != -1) {
const long rhs_max_sup_plus_one = rhs._pmf.first_support()[index_rhs] + rhs._pmf.table().data_shape()[index_rhs];
view_shape[i] = std::min(view_shape[i], rhs_max_sup_plus_one);
}
#ifdef SHAPE_CHECK
if (view_shape[i] < first_sup[i]) {
std::cerr << "Error: narrowing LabeledPMF would produce empty LabeledPMF" << std::endl;
assert(false);
}
#endif
view_shape[i] -= first_sup[i];
}
return std::make_pair(_pmf.table().start_at_const(first_sup - _pmf.first_support(), view_shape), first_sup);
}
bool has_same_variables(const LabeledPMF & rhs) const {
// Checks that the variable sets are identical:
for (unsigned char i=0; i<dimension(); ++i) {
const VARIABLE_KEY & var = ordered_variables()[i];
if ( ! rhs.contains_variable(var) )
return false;
}
for (unsigned char i=0; i<rhs.dimension(); ++i) {
const VARIABLE_KEY & var = rhs.ordered_variables()[i];
if ( ! contains_variable(var) )
return false;
}
return true;
}
};
template <typename VARIABLE_KEY, bool MULT> // true is means multiply, false means divide
LabeledPMF<VARIABLE_KEY> mult_or_div(const LabeledPMF<VARIABLE_KEY> & lhs, const LabeledPMF<VARIABLE_KEY> & rhs) {
//#ifdef SHAPE_CHECK
// Check that bounds intersect:
for (unsigned int lhs_index=0; lhs_index<lhs.ordered_variables().size(); ++lhs_index) {
const VARIABLE_KEY & var = lhs.ordered_variables()[lhs_index];
int rhs_index = rhs.variable_index(var);
if (rhs_index != -1) {
// Variable is in both lhs and rhs:
long min_lhs_outcome = lhs.pmf().first_support()[lhs_index];
long max_lhs_outcome = min_lhs_outcome + lhs.pmf().table().view_shape()[lhs_index] - 1;
long min_rhs_outcome = rhs.pmf().first_support()[rhs_index];
long max_rhs_outcome = min_rhs_outcome + rhs.pmf().table().view_shape()[rhs_index] - 1;
//assert( ((min_rhs_outcome <= max_lhs_outcome && max_rhs_outcome >= min_lhs_outcome) || (min_lhs_outcome <= max_rhs_outcome && max_lhs_outcome >= min_rhs_outcome)) && "Error: multiplying LabeledPMFs would produce empty product");
if (!((min_rhs_outcome <= max_lhs_outcome && max_rhs_outcome >= min_lhs_outcome) || (min_lhs_outcome <= max_rhs_outcome && max_lhs_outcome >= min_rhs_outcome)))
{
std::stringstream ss;
ss << "Multiplying/dividing PMFs with supports [" << min_lhs_outcome << "," << max_lhs_outcome << "] and ["
<< min_rhs_outcome << "," << max_rhs_outcome << "] would result in empty outcome. Contradiction occurred?" << std::endl;
throw std::runtime_error(ss.str());
}
}
}
//#endif
std::pair<TensorView<double>, Vector<long> > lhs_view_and_first_sup = lhs.view_of_intersection_with(rhs);
std::pair<TensorView<double>, Vector<long> > rhs_view_and_first_sup = rhs.view_of_intersection_with(lhs);
unsigned char intersection_size=0;
// Check if the shared variables are already in the same order and
// in the inner-most indices; in that case, transposition is
// unnecessary. Simultaneously compute the number of shared
// variables.
int last_shared_index = -1;
bool already_in_order = true;
int rhs_index = -1;
for (unsigned char i=0; i<lhs.dimension(); ++i) {
const VARIABLE_KEY & var = lhs.ordered_variables()[i];
rhs_index = rhs.variable_index(var);
if (rhs_index != -1) {
// Unrelated to other code here; these loops are fused to
// compute intersection_size without performing variable
// lookup again.
++intersection_size;
if (last_shared_index != -1 && last_shared_index != rhs_index-1) {
// A block of consecutive in-order intersecting variables was
// broken by a non-consecutive shared variable.
already_in_order = false;
}
last_shared_index = rhs_index;
}
else {
if (last_shared_index != -1) {
// A block of consecutive in-order intersecting variables was
// broken by a non-intersecting shared variable. This is valid
// because shared variables must be inner-most, so any time an
// shared variable is found, the rest of the variables must be
// shared).
already_in_order = false;
}
}
}
// To be in order, a block must be consecutive and the final
// variable in that block (i.e., the final variable in lhs) must be
// the final variable in rhs:
already_in_order = already_in_order && rhs_index+1 == rhs.dimension();
const unsigned char unique_lhs_dims=lhs.dimension()-intersection_size;
const unsigned char unique_rhs_dims=rhs.dimension()-intersection_size;
std::vector<VARIABLE_KEY> new_variable_order;
// First, insert variables unique to lhs:
for (unsigned char i=0; i<lhs.dimension(); ++i) {
const VARIABLE_KEY & var = lhs.ordered_variables()[i];
if ( ! rhs.contains_variable(var))
new_variable_order.push_back(var);
}
// Second, insert variables unique to rhs:
for (unsigned char i=0; i<rhs.dimension(); ++i) {
const VARIABLE_KEY & var = rhs.ordered_variables()[i];
if ( ! lhs.contains_variable(var))
new_variable_order.push_back(var);
}
// Lastly, insert variables shared (use order of lhs):
for (unsigned char i=0; i<lhs.dimension(); ++i) {
const VARIABLE_KEY & var = lhs.ordered_variables()[i];
if ( rhs.contains_variable(var))
new_variable_order.push_back(var);
}
Vector<long> new_first_support(lhs.dimension() + rhs.dimension() - intersection_size);
if (already_in_order) {
// Work directly with the tensor views:
// Compute the first support of the result distribution:
for (unsigned char i=0; i<unique_lhs_dims; ++i)
new_first_support[i] = lhs_view_and_first_sup.second[i];
for (unsigned char i=0; i<unique_rhs_dims; ++i)
new_first_support[unique_lhs_dims + i] = rhs_view_and_first_sup.second[i];
for (unsigned char i=0; i<intersection_size; ++i)
new_first_support[unique_lhs_dims + unique_rhs_dims + i] = lhs_view_and_first_sup.second[unique_lhs_dims + i];
if (MULT) {
PMF res( new_first_support, semi_outer_product(lhs_view_and_first_sup.first, rhs_view_and_first_sup.first, intersection_size) );
res.add_to_log_normalization_constant( lhs.log_normalization_constant() + rhs.log_normalization_constant() );
return LabeledPMF<VARIABLE_KEY>( new_variable_order, res );
}
else {
PMF res(new_first_support, semi_outer_quotient(lhs_view_and_first_sup.first, rhs_view_and_first_sup.first, intersection_size));
res.add_to_log_normalization_constant( lhs.log_normalization_constant() - rhs.log_normalization_constant() );
return LabeledPMF<VARIABLE_KEY>( new_variable_order, res);
}
}
else {
// Transpose into the proper order:
Tensor<double> lhs_part(lhs_view_and_first_sup.first);
Tensor<double> rhs_part(rhs_view_and_first_sup.first);
Vector<unsigned char> new_lhs_order(lhs.dimension());
for (unsigned char i=0; i<unique_lhs_dims; ++i)
new_lhs_order[i] = lhs.variable_index(new_variable_order[i]);
for (unsigned char i=0; i<intersection_size; ++i)
new_lhs_order[unique_lhs_dims+i] = lhs.variable_index(new_variable_order[unique_lhs_dims+unique_rhs_dims+i]);
Vector<unsigned char> new_rhs_order(rhs.dimension());
for (unsigned char i=0; i<unique_rhs_dims; ++i)
new_rhs_order[i] = rhs.variable_index(new_variable_order[unique_lhs_dims + i]);
for (unsigned char i=0; i<intersection_size; ++i)
new_rhs_order[unique_rhs_dims+i] = rhs.variable_index(new_variable_order[unique_lhs_dims+unique_rhs_dims+i]);
evergreen::transpose(lhs_part, new_lhs_order);
evergreen::transpose(rhs_part, new_rhs_order);
// Compute the first support of the result distribution:
for (unsigned char i=0; i<unique_lhs_dims; ++i) {
unsigned char new_index = new_lhs_order[i];
new_first_support[i] = lhs_view_and_first_sup.second[new_index];
}
for (unsigned char i=0; i<unique_rhs_dims; ++i) {
unsigned char new_index = new_rhs_order[i];
new_first_support[unique_lhs_dims+i] = rhs_view_and_first_sup.second[new_index];
}
for (unsigned char i=0; i<intersection_size; ++i) {
unsigned char new_index = new_lhs_order[unique_lhs_dims+i];
new_first_support[unique_lhs_dims+unique_rhs_dims+i] = lhs_view_and_first_sup.second[new_index];
}
if (MULT) {
PMF res( new_first_support, semi_outer_product(lhs_part, rhs_part, intersection_size));
res.add_to_log_normalization_constant(lhs.log_normalization_constant() + rhs.log_normalization_constant());
return LabeledPMF<VARIABLE_KEY>(new_variable_order, res);
}
else {
PMF res(new_first_support, semi_outer_quotient(lhs_part, rhs_part, intersection_size));
res.add_to_log_normalization_constant(lhs.log_normalization_constant() - rhs.log_normalization_constant());
return LabeledPMF<VARIABLE_KEY>(new_variable_order, res);
}
}
}
template <typename VARIABLE_KEY>
LabeledPMF<VARIABLE_KEY> operator *(const LabeledPMF<VARIABLE_KEY> & lhs, const LabeledPMF<VARIABLE_KEY> & rhs) {
if (rhs.dimension() == 0)
return lhs;
if (lhs.dimension() == 0)
return rhs;
return mult_or_div<VARIABLE_KEY, true>(lhs, rhs);
}
template <typename VARIABLE_KEY>
LabeledPMF<VARIABLE_KEY> operator /(const LabeledPMF<VARIABLE_KEY> & lhs, const LabeledPMF<VARIABLE_KEY> & rhs) {
#ifdef SHAPE_CHECK
// Dividing an empty LabeledPMF by a non-empty LabeledPMF makes no
// sense (unless you were to make every element 1.0/old_value; but
// it is unclear when that would ever be useful):
if (rhs.dimension() > 0)
assert(lhs.dimension() > 0);
#endif
if (rhs.dimension() == 0)
return lhs;
return mult_or_div<VARIABLE_KEY, false>(lhs, rhs);
}
template <typename VARIABLE_KEY>
std::ostream & operator << (std::ostream & os, const LabeledPMF<VARIABLE_KEY> & rhs) {
for (unsigned char i=0; i<rhs.dimension(); ++i)
os << rhs.ordered_variables()[i] << " ";
os << rhs.pmf();
return os;
}
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/PMF/scaled_pmf.hpp | .hpp | 2,235 | 60 | #ifndef _SCALED_PMF_HPP
#define _SCALED_PMF_HPP
#include "squared.hpp"
// Note: could be sped up to not make local Vector objects and to not
// use tuple indexing (using integer offset):
inline void add_scaled_outcome(Tensor<double> & ten, const Vector<long> & new_first_support, const Vector<double> & scaled_tup, double mass) {
// For performance, don't bother if the mass is 0.
if (mass > 0.0) {
Vector<unsigned long> start_index(ten.dimension());
for (unsigned char i=0; i<ten.dimension(); ++i)
start_index[i] = floor(scaled_tup[i]) - new_first_support[i];
Vector<unsigned long> scaled_bounding_box(ten.dimension());
for (unsigned char i=0; i<ten.dimension(); ++i)
scaled_bounding_box[i] = (ceil(scaled_tup[i]) - new_first_support[i] + 1) - start_index[i];
// Split the mass over the partitioned boxes:
for (unsigned char i=0; i<ten.dimension(); ++i)
mass /= scaled_bounding_box[i];
enumerate_apply_tensors([mass](const_tup_t /*tup*/, const unsigned char /*dim*/, double & val){
val += mass;
},
scaled_bounding_box,
ten.start_at(start_index));
}
}
inline PMF scaled_pmf(const PMF & pmf, const Vector<double> & factor) {
Vector<double> extreme_a = pmf.first_support();
extreme_a *= factor;
Vector<double> extreme_b = pmf.last_support();
extreme_b *= factor;
Vector<long> new_first_support(pmf.dimension());
Vector<long> new_last_support(pmf.dimension());
for (unsigned char i=0; i<pmf.dimension(); ++i) {
new_first_support[i] = floor( std::min(extreme_a[i], extreme_b[i]) );
new_last_support[i] = ceil( std::max(extreme_a[i], extreme_b[i]) );
}
Tensor<double> result_table(new_last_support - new_first_support + 1L);
Vector<double> scaled_tup(pmf.dimension());
enumerate_for_each_tensors([&pmf, &result_table, &new_first_support, &scaled_tup, &factor](const_tup_t tup, const unsigned char dim, double mass){
for (unsigned char i=0; i<dim; ++i)
scaled_tup[i] = (pmf.first_support()[i] + long(tup[i])) * factor[i];
add_scaled_outcome(result_table, new_first_support, scaled_tup, mass);
},
pmf.table().data_shape(),
pmf.table());
return PMF(new_first_support, std::move(result_table));
}
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/PMF/nonzero_bounding_box.hpp | .hpp | 1,172 | 30 | #ifndef _NONZERO_BOUNDING_BOX_HPP
#define _NONZERO_BOUNDING_BOX_HPP
inline std::array<Vector<unsigned long>, 2> nonzero_bounding_box(const Tensor<double> & rhs, const double relative_mass_threshold_for_bounding_box) {
// Initialize min with value greater than maximum possible, and
// initialize max with minimum possible:
Vector<unsigned long> min_tup = rhs.data_shape(), max_tup(rhs.dimension());
double max_mass = max(rhs.flat());
const double epsilon = max_mass*relative_mass_threshold_for_bounding_box;
bool exist_any_nonzero = false;
enumerate_for_each_tensors([&min_tup, &max_tup, &exist_any_nonzero, epsilon](const_tup_t counter, const unsigned char dim, double val){
if (val > epsilon) {
exist_any_nonzero = true;
for (unsigned char i=0; i<dim; ++i) {
min_tup[i] = std::min(min_tup[i], counter[i]);
max_tup[i] = std::max(max_tup[i], counter[i]);
}
}
},
rhs.data_shape(),
rhs);
assert(exist_any_nonzero && "PMF must be constructed from a tensor with at least one nonzero entry; this model has a contradiction in it (or is numerically very close to a contradiction).");
return {{min_tup, max_tup}};
}
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/PMF/dampen.hpp | .hpp | 2,180 | 60 | #ifndef _DAMPEN_HPP
#define _DAMPEN_HPP
// For computing a convex combination of two LabeledPMFs (using only the
// intersecting support):
template <typename VARIABLE_KEY>
LabeledPMF<VARIABLE_KEY> dampen(const LabeledPMF<VARIABLE_KEY> & lhs, const LabeledPMF<VARIABLE_KEY> & rhs, double lambda) {
#ifdef SHAPE_CHECK
assert(lhs.has_same_variables(rhs));
#endif
#ifdef NUMERIC_CHECK
assert(lambda >= 0 && lambda <= 1);
#endif
// It is important to call this in the consistent order (lhs,
// rhs) so that lambda and 1-lambda are multiplied with the
// appropriate respective values:
auto convex_combination = [lambda](double a, double b) {
return lambda*a + (1-lambda)*b;
};
std::pair<TensorView<double>, Vector<long> > lhs_view_and_first_sup = lhs.view_of_intersection_with(rhs);
std::pair<TensorView<double>, Vector<long> > rhs_view_and_first_sup = rhs.view_of_intersection_with(lhs);
const TensorView<double> & lhs_view = lhs_view_and_first_sup.first;
const TensorView<double> & rhs_view = rhs_view_and_first_sup.first;
Vector<long> & first_support = lhs_view_and_first_sup.second;
if (lhs.ordered_variables() == rhs.ordered_variables()) {
// variables are in the same order; no need to transpose:
Tensor<double> res_table(lhs_view);
apply_tensors([&convex_combination](double & res_val, double rhs_val){
res_val = convex_combination(res_val, rhs_val);
},
res_table.data_shape(),
res_table, rhs_view);
PMF pmf(first_support, std::move(res_table));
return LabeledPMF<VARIABLE_KEY>(lhs.ordered_variables(), std::move(pmf));
}
else {
// transpose rhs to get variables in the same order:
Tensor<double> res_table(lhs_view);
Vector<unsigned int> new_rhs_order = rhs.lookup_indices(lhs.ordered_variables());
transpose(res_table, new_rhs_order);
apply_tensors([&convex_combination](double & res_val, double rhs_val){
res_val = convex_combination(res_val, rhs_val);
},
res_table.data_shape(),
res_table, rhs_view);
PMF pmf(first_support, std::move(res_table));
return LabeledPMF<VARIABLE_KEY>(lhs.ordered_variables(), std::move(pmf));
}
}
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/PMF/scaled_pmf_dither.hpp | .hpp | 3,762 | 92 | #ifndef _SCALED_PMF_DITHER_HPP
#define _SCALED_PMF_DITHER_HPP
#include "squared.hpp"
// Note: For performance, it may be beneficial to manually add the
// offset and use the Tensor[unsigned long] operator instead;
// constructing a tensor view for each function call will construct
// a Vector, which may not get optimized out (unclear).
inline static void add_scaled_outcome_dither(Tensor<double> & ten, const Vector<double> & weighting_partition, const Vector<unsigned long> & scaled_counter_lower, const Vector<unsigned long> & scaled_bounding_box, double mass) {
if (mass > 0.0) {
enumerate_apply_tensors([mass, &weighting_partition](const_tup_t tup, const unsigned char dim, double & ten_value){
double mass_partition = 1.0;
for (unsigned char i=0; i<dim; ++i)
// When tup[i] == 0, use weighting_partition[i]
// When tup[i] == 1, use 1-weighting_partition[i]
mass_partition *= tup[i]*(1.0-weighting_partition[i]) + (1-tup[i])*weighting_partition[i];
ten_value += mass_partition*mass;
},
scaled_bounding_box,
ten.start_at(scaled_counter_lower));
}
}
inline PMF scaled_pmf_dither(const PMF & pmf, const Vector<double> & factor, double sigma_squared) {
// Largest index is shape - 1:
Vector<double> abs_factor = factor;
for (unsigned char i=0; i<factor.size(); ++i)
abs_factor[i] = fabs(abs_factor[i]);
Vector<long> res_shape = pmf.table().view_shape();
res_shape -= 1L;
for (unsigned char i=0; i<res_shape.size(); ++i)
res_shape[i] = ceil( res_shape[i]*abs_factor[i] );
// For the result shape, add +2 (the first support could round
// down while last support could round up):
Tensor<double> res_table(std::move(res_shape+2L));
const Vector<long> & first_sup = pmf.first_support();
Vector<long> last_sup = pmf.last_support();
Vector<double> new_first_sup_double(pmf.dimension());
for (unsigned char i=0; i<pmf.dimension(); ++i)
new_first_sup_double[i] = std::min(first_sup[i]*factor[i], last_sup[i]*factor[i]);
Vector<long> new_first_sup(pmf.dimension());
for (unsigned char i=0; i<pmf.dimension(); ++i)
new_first_sup[i] = floor(new_first_sup_double[i]);
Vector<double> scaled_outcome(pmf.dimension());
Vector<unsigned long> scaled_counter_lower(pmf.dimension());
Vector<unsigned long> scaled_bounding_box(pmf.dimension());
enumerate_for_each_tensors([&res_table, &scaled_counter_lower, &scaled_bounding_box, &factor, &first_sup, &new_first_sup, &scaled_outcome, sigma_squared](const_tup_t index, const unsigned char dim, double mass){
for (unsigned char i=0; i<dim; ++i)
scaled_outcome[i] = (long(index[i]) + first_sup[i])*factor[i];
for (unsigned char i=0; i<dim; ++i)
scaled_counter_lower[i] = floor(scaled_outcome[i]) - new_first_sup[i];
for (unsigned char i=0; i<dim; ++i)
scaled_bounding_box[i] = ceil(scaled_outcome[i]) - floor(scaled_outcome[i]) + 1;
for (unsigned char i=0; i<dim; ++i) {
if (scaled_bounding_box[i] == 1)
scaled_outcome[i] = 1.0;
else {
// scaled_bounding_box[i] == 2:
scaled_outcome[i] -= floor(scaled_outcome[i]);
// Outcome is either +0 or +1. These are then smoothed to
// weight how much of the mass is partitioned into the +0
// outcome.
double smoothed_0 = exp(-squared(scaled_outcome[i])/sigma_squared);
double smoothed_1 = exp(-squared(scaled_outcome[i]-1.0)/sigma_squared);
scaled_outcome[i] = smoothed_0 / (smoothed_0 + smoothed_1);
}
// scaled_outcome[i] is now the partition in the +0 category.
}
add_scaled_outcome_dither(res_table, scaled_outcome, scaled_counter_lower, scaled_bounding_box, mass);
},
pmf.table().view_shape(),
pmf.table());
auto result = PMF(new_first_sup, std::move(res_table));
return result;
}
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/PMF/scaled_pmf_interpolate.hpp | .hpp | 3,158 | 77 | #ifndef _SCALED_PMF_INTERPOLATE_HPP
#define _SCALED_PMF_INTERPOLATE_HPP
#include "squared.hpp"
// Note: could be sped up to not make local Vector objects and to not
// use tuple indexing (using integer offset):
inline void add_scaled_outcome_interpolate(Tensor<double> & ten, const Vector<long> & new_first_support, const Vector<double> & scaled_tup, const Vector<double> & next_scaled_tup, double mass, const Vector<double> & /*factor*/) {
// For performance, don't bother if the mass is 0.
if (mass > 0.0) {
Vector<unsigned long> start_index(ten.dimension());
for (unsigned char i=0; i<ten.dimension(); ++i)
start_index[i] = floor( std::min(scaled_tup[i], next_scaled_tup[i]) ) - new_first_support[i];
Vector<unsigned long> scaled_bounding_box(ten.dimension());
for (unsigned char i=0; i<ten.dimension(); ++i)
scaled_bounding_box[i] = ceil( std::max(scaled_tup[i], next_scaled_tup[i]) - start_index[i] ) - new_first_support[i];
// Split the mass over the partitioned boxes:
for (unsigned char i=0; i<ten.dimension(); ++i)
mass /= scaled_bounding_box[i];
enumerate_apply_tensors([mass](const_tup_t /*tup*/, const unsigned char /*dim*/, double& val) {
val += mass;
},
scaled_bounding_box,
ten.start_at(start_index));
}
}
inline PMF scaled_pmf_interpolate(const PMF & pmf, const Vector<double> & factor) {
Vector<double> extreme_a = pmf.first_support();
extreme_a *= factor;
Vector<double> extreme_b = pmf.last_support();
extreme_b *= factor;
Vector<long> new_first_support(pmf.dimension());
Vector<unsigned long> new_shape(pmf.dimension());
for (unsigned char i=0; i<pmf.dimension(); ++i) {
new_first_support[i] = floor( std::min(extreme_a[i], extreme_b[i]) );
new_shape[i] = long(ceil( std::max(extreme_a[i], extreme_b[i]) )) - new_first_support[i] + long(ceil(fabs(factor[i]))) ;
}
Tensor<double> result_table(new_shape);
Vector<double> scaled_tup(pmf.dimension());
Vector<double> next_scaled_tup(pmf.dimension());
enumerate_for_each_tensors([&pmf, &result_table, &new_first_support, &scaled_tup, &next_scaled_tup, &factor](const_tup_t tup, const unsigned char dim, double mass){
for (unsigned char i=0; i<dim; ++i) {
scaled_tup[i] = (pmf.first_support()[i] + long(tup[i])) * factor[i];
next_scaled_tup[i] = scaled_tup[i] + factor[i];
// This hack is necessary in order to allow scaling by S and
// then by 1/S come out the same as scaling by -S and -1/S. It
// occurs because the continuous interpretation of bin 1 is
// actually [1,2); however, this becomes inverted with
// negative support: -1 indicates [-1, 0), which is not
// symmetric. By shifting negatives left by 1 (meaning that
// their scaled interpretations will shift left by factor[i]),
// -1 will indicate (-2,-1].
if (factor[i] < 0) {
scaled_tup[i] -= factor[i];
next_scaled_tup[i] -= factor[i];
}
}
add_scaled_outcome_interpolate(result_table, new_first_support, scaled_tup, next_scaled_tup, mass, factor);
},
pmf.table().data_shape(),
pmf.table());
return PMF(new_first_support, std::move(result_table));
}
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/PMF/scaled_pmf_dither_interpolate.hpp | .hpp | 560 | 16 | #ifndef _SCALED_PMF_DITHER_INTERPOLATE_HPP
#define _SCALED_PMF_DITHER_INTERPOLATE_HPP
#include "scaled_pmf_dither.hpp"
inline PMF scaled_pmf_dither_interpolate(const PMF & pmf, const Vector<double> & factor, double sigma_squared) {
// TODO: implement more general form that simultaneously dithers and
// interpolates. If fabs of all scaling factors are <= 1, then
// interpolation is unnecessary:
if ( factor <= 1.0 && factor >= -1.0 )
return scaled_pmf_dither(pmf, factor, sigma_squared);
return scaled_pmf_interpolate(pmf, factor);
}
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/PMF/marginal.hpp | .hpp | 6,055 | 155 | #ifndef _MARGINAL_HPP
#define _MARGINAL_HPP
// Note: it may be possible to optimize these marginal routines for
// rvalue references, directly writing the results to the first
// element in each collapsed group, then collapsing values down, and
// then shrinking the tensor:
// Empirically chosen:
const unsigned long SIZE_WHERE_NAIVE_MARGINAL_BECOMES_SLOWER = 32;
// Naive marginal: more optimized than the obvious version; saves
// time by iterating over new tuple and then inside that iterating
// over remaining tuple. This allows the pow computation to be
// performed in a numerically stable manner (by dividing out the
// maximum) without computing a separate tensor full of the maximum
// marginals first.
inline Tensor<double> naive_marginal(const Tensor<double> & table, Vector<unsigned char> axes_to_keep, double p) {
#ifdef SHAPE_CHECK
verify_subpermutation(axes_to_keep, table.dimension());
#endif
unsigned char k;
Vector<unsigned long> new_shape(axes_to_keep.size());
for (k=0; k<axes_to_keep.size(); ++k)
new_shape[k] = table.data_shape()[ axes_to_keep[k] ];
std::vector<bool> axes_eliminated(table.dimension(), true);
for (unsigned char i=0; i<axes_to_keep.size(); ++i)
axes_eliminated[ axes_to_keep[i] ] = false;
Vector<unsigned char> axes_to_remove( table.dimension() - axes_to_keep.size() );
for (unsigned char i=0, j=0; i<axes_eliminated.size(); ++i)
if (axes_eliminated[i]) {
axes_to_remove[j] = i;
++j;
}
Vector<unsigned long> shape_removed( axes_to_remove.size() );
for (unsigned char i=0; i<shape_removed.size(); ++i)
shape_removed[i] = table.data_shape()[ axes_to_remove[i] ];
Tensor<double> new_table(new_shape);
Vector<unsigned long> full_counter(table.dimension());
enumerate_apply_tensors([&axes_to_keep, &axes_to_remove, &full_counter, &table, p, &shape_removed](const_tup_t counter_kept, const unsigned char dim_kept, double & new_val){
for (unsigned char i=0; i<dim_kept; ++i)
full_counter[ axes_to_keep[i] ] = counter_kept[i];
double max_val = 0.0;
enumerate_for_each_tensors([&axes_to_remove, &full_counter, &table, p, &max_val, dim_kept](const_tup_t counter_removed, const unsigned char dim_removed){
for (unsigned char i=0; i<dim_removed; ++i)
full_counter[ axes_to_remove[i] ] = counter_removed[i];
unsigned long full_index = tuple_to_index(full_counter, table.data_shape(), dim_kept + dim_removed);
max_val = std::max(max_val, table[full_index]);
},
shape_removed);
if ( max_val > tau_denom ) {
enumerate_for_each_tensors([&axes_to_remove, &full_counter, &table, p, max_val, dim_kept, &new_val](const_tup_t counter_removed, const unsigned char dim_removed){
for (unsigned char i=0; i<dim_removed; ++i)
full_counter[ axes_to_remove[i] ] = counter_removed[i];
unsigned long full_index = tuple_to_index(full_counter, table.data_shape(), dim_kept + dim_removed);
new_val += custom_pow(table[full_index] / max_val, p);
},
shape_removed);
}
// Otherwise, let result = 0.0.
// Note: The numeric stability of this could possibly be
// improved; e.g., when max is close to zero, the 1-norm may not
// necessarily be 0.
new_val = custom_pow(new_val, 1.0/p) * max_val;
},
new_table.data_shape(),
new_table);
return new_table;
}
// First transpose so that the innermost indices are the ones lost:
// (this makes marginalization more cache friendly):
inline Tensor<double> transposed_marginal(const Tensor<double> & table, Vector<unsigned char> axes_to_keep, double p) {
#ifdef SHAPE_CHECK
verify_subpermutation(axes_to_keep, table.dimension());
#endif
// Initialize variables:
unsigned long k;
Vector<unsigned long> new_shape(axes_to_keep.size());
for (k=0; k<axes_to_keep.size(); ++k)
new_shape[k] = table.data_shape()[ axes_to_keep[k] ];
// Transpose so that the axes kept are first:
Vector<unsigned char> new_axis_order(table.dimension());
copy( new_axis_order, axes_to_keep );
std::vector<bool> axes_eliminated(table.dimension(), true);
for (unsigned char i=0; i<axes_to_keep.size(); ++i)
axes_eliminated[ axes_to_keep[i] ] = false;
for (unsigned char i=0, j=0; i<axes_eliminated.size(); ++i)
if (axes_eliminated[i]) {
new_axis_order[j+axes_to_keep.size()] = i;
++j;
}
Tensor<double> table_copy = table;
transpose(table_copy, new_axis_order);
if (axes_to_keep.size() == table.dimension())
// all axes are kept:
return table_copy;
Tensor<double> new_table(new_shape);
// Compute marginal:
// Note: this strategy can be more efficient (unrolling final axes
// into single, longer axis), but is not valid for TensorView since
// the memory would not necessarily be contiguous. If this code were
// generalized for TensorView, the following would therefore need to
// be changed.
unsigned long removed_axes_flat_length = flat_length( table_copy.data_shape().start_at_const(axes_to_keep.size() ) );
enumerate_apply_tensors([&table_copy, &removed_axes_flat_length, p](const_tup_t counter, const unsigned char dim, double & new_val){
unsigned long bias = tuple_to_index(counter, table_copy.data_shape(), dim) * removed_axes_flat_length;
double max_val = 0.0;
for (unsigned long k=0; k<removed_axes_flat_length; ++k)
max_val = std::max(max_val, table_copy[bias + k]);
if ( max_val > tau_denom ) {
for (unsigned long k=0; k<removed_axes_flat_length; ++k)
new_val += custom_pow(table_copy[bias + k]/max_val, p);
new_val = custom_pow(new_val, 1.0/p) * max_val;
}
// otherwise the result will be 0.0:
},
new_table.data_shape(),
new_table);
return new_table;
}
inline Tensor<double> marginal(const Tensor<double> & table, Vector<unsigned char> axes_to_keep, double p) {
if (table.flat_size() < SIZE_WHERE_NAIVE_MARGINAL_BECOMES_SLOWER)
return naive_marginal(table, axes_to_keep, p);
return transposed_marginal(table, axes_to_keep, p);
}
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/PMF/divergence.hpp | .hpp | 2,268 | 74 | #ifndef _DIVERGENCE_HPP
#define _DIVERGENCE_HPP
#include "squared.hpp"
template <template <typename> class TENSOR_LHS, template <typename> class TENSOR_RHS>
double se(const TensorLike<double, TENSOR_LHS> & lhs, const TensorLike<double, TENSOR_RHS> & rhs) {
#ifdef SHAPE_CHECK
assert( lhs.view_shape() == rhs.view_shape() );
#endif
double tot = 0.0;
for_each_tensors([&tot](double lhs_val, double rhs_val){
tot += squared(lhs_val - rhs_val);
},
lhs.view_shape(),
lhs, rhs);
return tot;
}
template <typename VARIABLE_KEY>
class LabeledPMF;
template <typename VARIABLE_KEY>
double mse_divergence(const LabeledPMF<VARIABLE_KEY> & lhs, const LabeledPMF<VARIABLE_KEY> & rhs) {
#ifdef SHAPE_CHECK
assert(lhs.has_same_variables(rhs));
#endif
std::pair<TensorView<double>, Vector<long> > lhs_view_and_first_sup = lhs.view_of_intersection_with(rhs);
std::pair<TensorView<double>, Vector<long> > rhs_view_and_first_sup = rhs.view_of_intersection_with(lhs);
const TensorView<double> & lhs_view = lhs_view_and_first_sup.first;
const TensorView<double> & rhs_view = rhs_view_and_first_sup.first;
double lhs_view_mass = 0.0;
for_each_tensors([&lhs_view_mass](double val){
lhs_view_mass += val;
},
lhs_view.view_shape(),
lhs_view
);
double rhs_view_mass = 0.0;
for_each_tensors([&rhs_view_mass](double val){
rhs_view_mass += val;
},
rhs_view.view_shape(),
rhs_view
);
double nonintersecting_se = squared(1.0 - lhs_view_mass) + squared(1.0 - rhs_view_mass);
double intersecting_se;
if (lhs.ordered_variables() == rhs.ordered_variables()) {
// variables are in the same order; no need to transpose:
intersecting_se = se(lhs_view, rhs_view);
}
else {
// transpose rhs to get variables in the same order:
Tensor<double> rhs_part(rhs_view);
Vector<unsigned int> new_rhs_order = rhs.lookup_indices(lhs.ordered_variables());
transpose(rhs_part, new_rhs_order);
intersecting_se = se(lhs_view_and_first_sup.first, rhs_part);
}
// Note: lhs_view.flat_size() == rhs_view.flat_size()
return (nonintersecting_se + intersecting_se) / (lhs.pmf().table().flat_size() + rhs.pmf().table().flat_size() - lhs_view.flat_size());
}
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/PMF/PMF.hpp | .hpp | 9,881 | 314 | #ifndef _PMF_HPP
#define _PMF_HPP
#include "../Utility/Clock.hpp"
#include "../Convolution/p_convolve.hpp"
#include "marginal.hpp"
#include "nonzero_bounding_box.hpp"
// Forward declarations to allow ostream << PMF in this file.
class PMF;
std::ostream & operator <<(std::ostream & os, const PMF & rhs);
class PMF {
public:
static constexpr double mass_threshold_for_normalization = 0.0;
static constexpr double relative_mass_threshold_for_bounding_box = 0.0;
protected:
Vector<long> _first_support;
Tensor<double> _table;
double _log_normalization_constant;
void narrow_to_nonzero_support() {
std::array<Vector<unsigned long>, 2> nonzero_box = nonzero_bounding_box(_table, relative_mass_threshold_for_bounding_box);
narrow_support(_first_support + nonzero_box[0], _first_support + nonzero_box[1]);
}
double normalize() {
double tot = sum(_table.flat());
//#ifdef NUMERIC_CHECK
//assert(tot > mass_threshold_for_normalization);
//#endif
if (tot <= mass_threshold_for_normalization)
{
std::stringstream ss;
ss << "Total probability mass" << tot << " in " << _table
<< " is too small to normalize. Contradiction occurred?" << std::endl;
throw std::runtime_error(ss.str());
}
_table.flat() /= tot;
return tot;
}
void verify_nonnegative() const {
assert( _table.flat() >= 0.0 && "PMF must be constructed from nonnegative Tensor<double>" );
}
public:
// Construct dimension 0 by default.
PMF():
_log_normalization_constant(0.0)
{ }
PMF(const Vector<long> & sup, const Tensor<double> & tab):
_first_support(sup),
_table(tab)
{
#ifdef SHAPE_CHECK
assert(_first_support.size() == _table.dimension());
#endif
#ifdef NUMERIC_CHECK
verify_nonnegative();
#endif
_log_normalization_constant = log(normalize());
narrow_to_nonzero_support();
}
PMF(const Vector<long> & sup, Tensor<double> && tab):
_first_support(sup),
_table(std::move(tab))
{
#ifdef SHAPE_CHECK
assert(_first_support.size() == _table.dimension());
#endif
#ifdef NUMERIC_CHECK
verify_nonnegative();
#endif
_log_normalization_constant = log(normalize());
narrow_to_nonzero_support();
}
PMF(Vector<long> && sup, Tensor<double> && tab):
_first_support(sup),
_table(std::move(tab))
{
#ifdef SHAPE_CHECK
assert(_first_support.size() == _table.dimension());
#endif
#ifdef NUMERIC_CHECK
verify_nonnegative();
#endif
_log_normalization_constant = log(normalize());
narrow_to_nonzero_support();
}
PMF(const PMF & rhs):
_first_support(rhs._first_support),
_table(rhs._table), _log_normalization_constant(rhs._log_normalization_constant)
{
// Do not need to normalize or check bounding box
}
PMF(PMF && rhs):
_first_support(std::move(rhs._first_support)),
_table(std::move(rhs._table)),
_log_normalization_constant(rhs._log_normalization_constant)
{
// Do not need to normalize or check bounding box
}
const PMF & operator =(const PMF & rhs) {
_first_support = rhs._first_support;
_table = rhs._table;
_log_normalization_constant = rhs._log_normalization_constant;
return *this;
}
const PMF & operator =(PMF && rhs) {
_first_support = std::move(rhs._first_support);
_table = std::move(rhs._table);
_log_normalization_constant = rhs._log_normalization_constant;
return *this;
}
void narrow_support(const Vector<long> & new_first_support, const Vector<long> & new_last_support) {
#ifdef SHAPE_CHECK
assert(dimension() == new_first_support.size() && new_first_support.size() == new_last_support.size());
assert(new_first_support <= new_last_support);
#endif
Vector<long> intersecting_first_support = _first_support;
Vector<unsigned long> new_shape(new_last_support.size());
for (unsigned char i=0; i<new_last_support.size(); ++i)
new_shape[i] = new_last_support[i] - new_first_support[i] + 1ul;
for (unsigned char i=0; i<new_shape.size(); ++i) {
long new_last = std::min(new_last_support[i], (long)(intersecting_first_support[i] + _table.data_shape()[i]) - 1);
intersecting_first_support[i] = std::max(intersecting_first_support[i], new_first_support[i]);
long new_shape_i = new_last - intersecting_first_support[i] + 1;
//#ifdef SHAPE_CHECK
if (new_shape_i <= 0) {
//std::cerr << "Narrowing to " << new_first_support << " " << new_last_support << " results in empty PMF" << std::endl;
//assert(false);
std::stringstream ss;
ss << "Narrowing to " << new_first_support << " " << new_last_support << " results in empty PMF" << std::endl;
throw std::runtime_error(ss.str());
}
//#endif
new_shape[i] = (unsigned long) new_shape_i;
}
// intersecting_first_support will only have increased compared to
// _first_support:
Vector<unsigned long> tensor_start = intersecting_first_support - _first_support;
_table.shrink(tensor_start, new_shape);
add_to_log_normalization_constant( log(normalize()) );
copy(_first_support, intersecting_first_support);
}
unsigned char dimension() const {
return _first_support.size();
}
double log_normalization_constant() const {
return _log_normalization_constant;
}
void reset_norm_constant() {
_log_normalization_constant = 0.0;
}
void add_to_log_normalization_constant(const double log_scale_factor) {
_log_normalization_constant += log_scale_factor;
}
const Tensor<double> & table() const {
return _table;
}
const Vector<long> & first_support() const {
return _first_support;
}
// Note: The following could also be cached during construction, but
// it isn't really a large performance benefit and would take up
// more memory and make construction more expensive.
Vector<long> last_support() const {
return _first_support + _table.view_shape() - 1L;
}
// Slow: for end use, not inside engine:
double get_probability(const Vector<long> & tuple) const {
#ifdef SHAPE_CHECK
assert(tuple.size() == dimension());
#endif
bool all_at_least_first_support = tuple >= _first_support;
bool all_at_most_last_support = tuple <= last_support();
// If out of bounds for support, return 0.0:
if ( ! all_at_least_first_support || ! all_at_most_last_support )
return 0.0;
Vector<unsigned long> table_index = tuple - _first_support;
return table()[table_index];
}
PMF marginal(const Vector<unsigned char> & axes_to_keep, double p) const {
#ifdef SHAPE_CHECK
verify_subpermutation(axes_to_keep, dimension());
#endif
if (axes_to_keep.size() == dimension())
// all axes are kept (transpose to avoid pow computation and
// normalization)
return transposed(axes_to_keep);
if ( axes_to_keep.size() == 0 )
return PMF();
Vector<long> new_first_support(axes_to_keep.size());
unsigned char k;
for (k=0; k<axes_to_keep.size(); ++k)
new_first_support[k] = _first_support[ axes_to_keep[k] ];
PMF result( new_first_support, evergreen::marginal(_table, axes_to_keep, p));
result.add_to_log_normalization_constant( _log_normalization_constant );
return result;
}
PMF transposed(const Vector<unsigned char> & new_order) const {
#ifdef SHAPE_CHECK
assert(new_order.size() == dimension());
verify_permutation(new_order);
#endif
// Does not need to renormalize:
PMF result(*this);
result.transpose(new_order);
return result;
}
void transpose(const Vector<unsigned char> & new_order) {
#ifdef SHAPE_CHECK
assert(new_order.size() == dimension());
verify_permutation(new_order);
#endif
Vector<long> new_first_support(new_order.size());
for (unsigned char i=0; i<new_first_support.size(); ++i)
new_first_support[i] = _first_support[ new_order[i] ];
_first_support = std::move(new_first_support);
evergreen::transpose(_table, new_order);
}
};
inline PMF p_add(const PMF & lhs, const PMF & rhs, double p) {
#ifdef SHAPE_CHECK
assert(lhs.table().dimension() == rhs.table().dimension());
#endif
PMF result(lhs.first_support() + rhs.first_support(), numeric_p_convolve(lhs.table(), rhs.table(), p) );
result.add_to_log_normalization_constant(lhs.log_normalization_constant() + rhs.log_normalization_constant());
return result;
}
inline PMF p_sub(const PMF & lhs, const PMF & rhs, double p) {
#ifdef SHAPE_CHECK
assert(lhs.table().dimension() == rhs.table().dimension());
#endif
// Flip the rhs table along every axis so that addition of the
// flipped table corresponds to subtraction with the original table:
Tensor<double> rhs_table_flipped(rhs.table().data_shape());
Vector<unsigned long> counter_flipped(lhs.dimension());
enumerate_for_each_tensors([&rhs_table_flipped, &counter_flipped](const_tup_t counter, const unsigned char dim, double val){
for (unsigned char i=0; i<dim; ++i)
counter_flipped[i] = rhs_table_flipped.data_shape()[i] - counter[i] - 1ul;
rhs_table_flipped[ tuple_to_index(counter_flipped, rhs_table_flipped.data_shape(), dim) ] = val;
},
rhs_table_flipped.data_shape(),
rhs.table());
PMF result(lhs.first_support() - rhs.last_support(), numeric_p_convolve(lhs.table(), rhs_table_flipped, p) );
result.add_to_log_normalization_constant(lhs.log_normalization_constant() + rhs.log_normalization_constant());
return result;
}
inline std::ostream & operator <<(std::ostream & os, const PMF & rhs) {
os << "PMF:" << "{"<< rhs.first_support() << " to " << rhs.last_support() << "} " << rhs.table();
return os;
}
#include "scaled_pmf.hpp"
#include "scaled_pmf_interpolate.hpp"
#include "scaled_pmf_dither.hpp"
#include "scaled_pmf_dither_interpolate.hpp"
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/PMF/semi_outer_product_and_quotient.hpp | .hpp | 3,713 | 82 | #ifndef _SEMI_OUTER_PRODUCT_AND_QUOTIENT_HPP
#define _SEMI_OUTER_PRODUCT_AND_QUOTIENT_HPP
// For performing semi_outer_... functions:
template <typename FUNCTION, template <typename> class TENSOR>
Tensor<double> semi_outer_apply(const TensorLike<double, TENSOR> & lhs, const TensorLike<double, TENSOR> & rhs, const unsigned char overlapping_inner_dims, FUNCTION semi_outer_function) {
#ifdef SHAPE_CHECK
assert(lhs.dimension() > 0 && rhs.dimension() > 0);
#endif
// semi_outer_function is either or semi_outer_product or semi_outer_quotient
const unsigned char unique_lhs_dims = lhs.dimension() - overlapping_inner_dims;
const unsigned char unique_rhs_dims = rhs.dimension() - overlapping_inner_dims;
Vector<unsigned long> outer_shape_lhs = lhs.view_shape().start_at_const(0, unique_lhs_dims);
Vector<unsigned long> outer_shape_rhs = rhs.view_shape().start_at_const(0, unique_rhs_dims);
Vector<unsigned long> inner_shape_lhs = lhs.view_shape().start_at_const(unique_lhs_dims, overlapping_inner_dims);
Vector<unsigned long> inner_shape_rhs = rhs.view_shape().start_at_const(unique_rhs_dims, overlapping_inner_dims);
Vector<unsigned long> result_shape = concatenate(concatenate(outer_shape_lhs, outer_shape_rhs), inner_shape_lhs);
#ifdef SHAPE_CHECK
assert( lhs.dimension() >= overlapping_inner_dims );
assert( rhs.dimension() >= overlapping_inner_dims );
// Inner shapes must match:
assert(inner_shape_lhs == inner_shape_rhs);
#endif
Tensor<double> result( result_shape );
if (unique_lhs_dims > 0 || unique_rhs_dims > 0) {
Vector<unsigned long> counter_lhs(lhs.dimension());
Vector<unsigned long> counter_rhs(rhs.dimension());
enumerate_apply_tensors([&counter_lhs, &counter_rhs, &lhs, &rhs, unique_lhs_dims, unique_rhs_dims, overlapping_inner_dims, &semi_outer_function](const_tup_t counter_result, const unsigned char /*result_dims*/, double & res_val) {
// Note: This could be optimized to not use the counter Vectors:
for (unsigned char i=0; i<unique_lhs_dims; ++i)
counter_lhs[i] = counter_result[i];
for (unsigned char i=0; i<overlapping_inner_dims; ++i)
counter_lhs[unique_lhs_dims+i] = counter_result[unique_lhs_dims+unique_rhs_dims+i];
for (unsigned char i=0; i<unique_rhs_dims; ++i)
counter_rhs[i] = counter_result[unique_lhs_dims+i];
for (unsigned char i=0; i<overlapping_inner_dims; ++i)
counter_rhs[unique_rhs_dims+i] = counter_result[unique_lhs_dims+unique_rhs_dims+i];
res_val = semi_outer_function(lhs[counter_lhs], rhs[counter_rhs]);
},
result.data_shape(),
result);
}
else // unique_lhs_dims == 0 && unique_rhs_dims == 0 (compute element-wise product):
apply_tensors([&semi_outer_function](double & res_val, double lhs_val, double rhs_val) {
res_val = semi_outer_function(lhs_val, rhs_val);
},
result.data_shape(),
result, lhs, rhs);
return result;
}
template <template <typename> class TENSOR>
Tensor<double> semi_outer_product(const TensorLike<double, TENSOR> & lhs, const TensorLike<double, TENSOR> & rhs, const unsigned char overlapping_inner_dims) {
return semi_outer_apply(lhs, rhs, overlapping_inner_dims, [](double x, double y) {
return x * y;
});
}
template <template <typename> class TENSOR>
Tensor<double> semi_outer_quotient(const TensorLike<double, TENSOR> & lhs, const TensorLike<double, TENSOR> & rhs, const unsigned char overlapping_inner_dims) {
return semi_outer_apply(lhs, rhs, overlapping_inner_dims, [](double x, double y) {
// Note: fabs not necessary for probabilistic problems (since
// PMFs are >=0), but it's better to be tidy:
if ( fabs(y) > tau_denom )
return x / y;
return 0.0;
});
}
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Engine/RandomSubtreeScheduler.hpp | .hpp | 3,275 | 91 | #ifndef _RANDOMSUBTREESCHEDULER_HPP
#define _RANDOMSUBTREESCHEDULER_HPP
#include "Scheduler.hpp"
#include "random_tree_subgraph.hpp"
template <typename VARIABLE_KEY>
class RandomSubtreeScheduler : public Scheduler<VARIABLE_KEY> {
private:
std::list<MessagePasser<VARIABLE_KEY>* > _mp_ordering_1, _mp_ordering_2;
std::list<MessagePasser<VARIABLE_KEY>* >* _current_mp_ordering;
bool _any_passed_this_batch;
// Returns true if any non-convergent messages could be passed,
// false otherwise:
bool pass_all_messages_possible(MessagePasser<VARIABLE_KEY>*mp) {
bool any_passed = false;
// Note: Could save some time by ignoring cases where it's clear
// no message can be passed:
// if (mp->can_potentially_pass_any_messages())
// However, this would be a little tricky since it does not
// include the ab initio case.
for (unsigned long i=0; i<mp->number_edges(); ++i) {
if (mp->ready_to_send_message_ab_initio(i) || mp->ready_to_send_message(i)) {
Edge<VARIABLE_KEY>*edge = mp->get_edge_out(i);
LabeledPMF<VARIABLE_KEY> new_msg = mp->update_and_get_message_out(i);
if ( ! edge->has_message() || (edge->has_message() && mse_divergence(edge->get_possibly_outdated_message(), new_msg) > this->_convergence_threshold) ) {
any_passed = true;
if (edge->has_message())
// Dampen:
new_msg = dampen(edge->get_possibly_outdated_message(), new_msg, this->_dampening_lambda).transposed(*edge->variables_ptr);
edge->set_message( std::move(new_msg) );
MessagePasser<VARIABLE_KEY>*dest_mp = edge->dest;
dest_mp->receive_message_in_and_update(edge->dest_edge_index);
}
}
}
return any_passed;
}
public:
RandomSubtreeScheduler(double dampening_lambda_param, double convergence_threshold_param, unsigned long maximum_iterations_param):
Scheduler<VARIABLE_KEY>(dampening_lambda_param, convergence_threshold_param, maximum_iterations_param),
_current_mp_ordering(NULL),
_any_passed_this_batch(true)
{ }
void add_ab_initio_edges(InferenceGraph<VARIABLE_KEY> & ig) {
_mp_ordering_1 = random_tree_subgraph(ig);
_mp_ordering_2 = random_tree_subgraph(ig);
_current_mp_ordering = &_mp_ordering_1;
}
unsigned long process_next_edges() {
unsigned long iteration = 0;
_any_passed_this_batch = false;
// Gather messages in:
for (auto iter = _current_mp_ordering->rbegin(); iter != _current_mp_ordering->rend() && iteration < this->_maximum_iterations; ++iter, ++iteration) {
bool iter_passes = pass_all_messages_possible(*iter);
_any_passed_this_batch = _any_passed_this_batch || iter_passes;
}
// Scatter messages out:
for (auto iter = _current_mp_ordering->begin(); iter != _current_mp_ordering->end() && iteration < this->_maximum_iterations; ++iter, ++iteration) {
bool iter_passes = pass_all_messages_possible(*iter);
_any_passed_this_batch = _any_passed_this_batch || iter_passes;
}
// Oscillate the current ordering between the two trees:
if (_current_mp_ordering == &_mp_ordering_1)
_current_mp_ordering = &_mp_ordering_2;
else
_current_mp_ordering = &_mp_ordering_1;
return iteration;
}
bool has_converged() const {
return ! _any_passed_this_batch;
}
};
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Engine/MessagePasser.hpp | .hpp | 7,743 | 207 | #ifndef _MESSAGEPASSER_HPP
#define _MESSAGEPASSER_HPP
#include "Edge.hpp"
#include <unordered_set>
template <typename VARIABLE_KEY>
class ContextFreeMessagePasser;
// Interface for message passers in the engine:
template <typename VARIABLE_KEY>
class MessagePasser {
protected:
// Note: _edges_in[i] must be the reverse edge of
// _edges_out{i]. This is ensured by only modifying via
// add_input_and_output_edges.
// Note: The members _edges_in and _edges_out would ideally would be
// private for the reason above. However, the
// add_input_and_output_edges function, which decides whether a
// particular edge begins ready to pass or not, modifies them. For
// this reason, it is simpler for them to be protected. However, at
// a later date, it could be nice to refactor so that they're
// private and there is a virtual bool ready_to_pass function that
// is called when adding edges. This would also require that all
// accesses of _edges_in and _edges_out were performed through
// accessors.
std::vector<Edge<VARIABLE_KEY>* > _edges_in;
std::vector<Edge<VARIABLE_KEY>* > _edges_out;
virtual void add_input_and_output_edges(Edge<VARIABLE_KEY>*edge_in, Edge<VARIABLE_KEY>*edge_out) {
_edges_in.push_back(edge_in);
_edges_out.push_back(edge_out);
_edge_received.push_back(false);
}
std::vector<bool> _edge_received;
// For determining the edges that need to be dirtied (possibly in
// amortized O(1)):
unsigned long _number_edges_with_messages_received;
bool _all_edges_out_not_up_to_date;
bool _all_edges_out_but_one_not_up_to_date;
long _up_to_date_edge_if_one_exists;
// Derived classes will override the following functions:
virtual void receive_message_in(unsigned long edge_index) = 0;
virtual LabeledPMF<VARIABLE_KEY> get_message_out(unsigned long edge_index) = 0;
void update_after_receiving_message_in(unsigned long edge_index) {
// Update which messages have been received and the count:
if ( ! _edge_received[edge_index] ) {
_edge_received[edge_index] = true;
++_number_edges_with_messages_received;
}
// Make local vars so that these can be modified below:
bool all_not_up_to_date = _all_edges_out_not_up_to_date;
bool all_but_this_one_not_up_to_date = _number_edges_with_messages_received > 0 && _all_edges_out_but_one_not_up_to_date && (_up_to_date_edge_if_one_exists == (long)edge_index);
// after receiving a message, either all edges out or all edges
// out but one are not up to date.
if (_edges_out[edge_index]->up_to_date()) {
_all_edges_out_not_up_to_date = false;
_all_edges_out_but_one_not_up_to_date = true;
_up_to_date_edge_if_one_exists = edge_index;
}
else {
_all_edges_out_not_up_to_date = true;
_all_edges_out_but_one_not_up_to_date = false;
// value of _up_to_date_edge_if_one_exists does not matter if
// _all_edges_out_not_up_to_date is true.
_up_to_date_edge_if_one_exists = -1L;
}
// Don't bother dirtying edges if they were all already
// dirty. Likewise, don't bother dirtying edges if all edges that
// would be dirtied are already dirty.
if ( ! all_not_up_to_date && ! all_but_this_one_not_up_to_date )
for (unsigned long i=0; i<number_edges(); ++i)
if (i != edge_index) {
// Messages out along every edge (except the opposite edge of
// the message being received) now become invalid:
// Mark old messages along e as no longer valid:
_edges_out[i]->set_not_up_to_date();
}
}
// Only allows rhs to be ContextFreeMessagePasser* so that calling
// bind_to does not violate existing context.
void bind_to(ContextFreeMessagePasser<VARIABLE_KEY>*rhs, const std::vector<VARIABLE_KEY>*const ordered_edge_vars) {
unsigned long num_this_edges = number_edges();
unsigned long num_rhs_edges = rhs->number_edges();
// Edges should only ever be created as pairs (as below):
Edge<VARIABLE_KEY>*edge = new Edge<VARIABLE_KEY>(this, rhs, ordered_edge_vars, num_this_edges, num_rhs_edges);
Edge<VARIABLE_KEY>*opposite_edge = new Edge<VARIABLE_KEY>(rhs, this, ordered_edge_vars, num_rhs_edges, num_this_edges);
add_input_and_output_edges(opposite_edge, edge);
rhs->add_input_and_output_edges(edge, opposite_edge);
}
MessagePasser():
_number_edges_with_messages_received(0),
_all_edges_out_not_up_to_date(true),
_all_edges_out_but_one_not_up_to_date(false),
_up_to_date_edge_if_one_exists(-1L),
color(0)
{ }
public:
// To permit basic graph operations by marking in O(n):
long color;
// Note: costs Omega(n) each call, so shouldn't be called
// frequently; however, it's a useful shorthand to prevent duplicate
// code in other functions that repeatedly do the same thing.
std::unordered_set<VARIABLE_KEY> variables_used_by_incident_edges() const {
std::unordered_set<VARIABLE_KEY> result;
for (const Edge<VARIABLE_KEY>*edge : _edges_in)
for (const VARIABLE_KEY & var : *edge->variables_ptr)
result.insert( var );
return result;
}
virtual ~MessagePasser() {}
void receive_message_in_and_update(unsigned long edge_index) {
receive_message_in(edge_index);
Edge<VARIABLE_KEY>*incoming_edge = _edges_in[edge_index];
update_after_receiving_message_in(incoming_edge->dest_edge_index);
}
LabeledPMF<VARIABLE_KEY> update_and_get_message_out(unsigned long edge_index) {
// Assume this will be used to set _edges_out[edge_index] is up-to-date.
_all_edges_out_but_one_not_up_to_date = _all_edges_out_not_up_to_date;
_up_to_date_edge_if_one_exists = edge_index;
_all_edges_out_not_up_to_date = false;
return get_message_out(edge_index);
}
// Note: excludes ab initio messages; to check ab initio, use
// ready_to_send_message_ab_initio.
virtual bool ready_to_send_message(unsigned long edge_index) const {
// To be ready to send, either all messages should be received, or
// all but the message requested out.
return _number_edges_with_messages_received == number_edges() || (_number_edges_with_messages_received+1 == number_edges() && !_edge_received[edge_index]);
}
virtual bool ready_to_send_message_ab_initio(unsigned long /*edge_index*/) const {
return false;
}
// Provides access for passing on new messages:
Edge<VARIABLE_KEY>* get_edge_out(unsigned long edge_index) const {
return _edges_out[edge_index];
}
unsigned long number_edges() const {
// Equivalent to _edges_out.size():
return _edges_in.size();
}
// Note: excludes ab initio messages.
virtual bool can_potentially_pass_any_messages() const {
return _number_edges_with_messages_received+1 >= number_edges();
}
bool edge_received(unsigned long edge_index) const {
return _edge_received[edge_index];
}
virtual void print(std::ostream & os) const = 0;
// Used to replace edges; Edge type has immutable source, dest
// pointers and integer indices, so they must be replaced to edit
// the graph. This is primarily used to merge hyperedges within
// InferenceGraphBuilder.
void rewire_edge(unsigned long edge_index, Edge<VARIABLE_KEY>*new_edge_in, Edge<VARIABLE_KEY>*new_edge_out) {
Edge<VARIABLE_KEY>*edge_in = _edges_in[edge_index];
Edge<VARIABLE_KEY>*edge_out = _edges_out[edge_index];
_edges_in[edge_index] = new_edge_in;
_edges_out[edge_index] = new_edge_out;
if (edge_in->variables_ptr != new_edge_in->variables_ptr)
delete edge_in->variables_ptr;
delete edge_out;
delete edge_in;
}
};
template <typename VARIABLE_KEY>
std::ostream & operator << (std::ostream & os, const MessagePasser<VARIABLE_KEY> & rhs) {
rhs.print(os);
return os;
}
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Engine/ConvolutionTree.hpp | .hpp | 14,858 | 424 | #ifndef _CONVOLUTIONTREE_HPP
#define _CONVOLUTIONTREE_HPP
#include "../PMF/PMF.hpp"
#include <limits>
// A convolution tree optimized for online processing. Note that the
// messages received through a given channel should never grow in
// support (otherwise, the cache of possible supports can become
// corrupted). This should always be the case in loopy belief
// propagation, since a growing product of PMFs will be passed. This
// is currently not checked because only the narrowed prior and
// narrowed likelihood are stored (not the raw prior and likelihood
// that were sent in). It could be checked, but at the expense of
// storing additional PMFs.
// Note: the recursive design of the convolution tree isn't the
// fastest or most robust (iterative would be better), but it easily
// allows lazy updating (i.e., messages are not propagated until a
// message out is requested). It could also be beneficial to allocate
// nodes in a single block rather, so that there is greater
// localization.
// The #define DISABLE_TRIM turns off trimming; since trimming has no
// real disadvantages, this is only used to measure the benefits of
// trimming.
class TreeNode {
#ifdef CONVOLUTIONTREE_CONVOLUTION_SIZE_CHECK
public:
// For testing that supports are properly trimmed:
static unsigned long largest_convolution_size;
#endif
protected:
PMF _prior;
PMF _likelihood;
// Used to conservatively bound the support (this allows PMFs to be
// narrowed in certain cases, which can dramatically improve
// runtime). Note that the updating scheme updates these supports
// before updating the related distribution; this is necessary to
// allow the distribution to be fully trimmed (consider a tree where
// nothing is yet cached, but where all inputs are binary and all
// outputs are binary; without first setting support for all nodes
// to binary, requesting the prior message out from the root will
// produce long convolutions throughout the tree). This does have a
// quirk where the supports will not be "dirtied" when a
// distribution is narrowed additionally because initial narrowing
// produces a distribution whose initial bounding box is full of
// zeros. However, this case is addressed in the direction messages
// have been requested, because changing the distrubution also
// narrows both the support and distribution to the intersection of
// both. The only case not addressed is dirtying the supports going
// in the opposite direction. As a result, messages requested in
// that direction may not benefit from this extra narrowing of
// supports; however, dirtying them would be excessive, since this
// case of a zero bounding box can be considered non-general, and
// aggressively dirtying the supports in every direction could harm
// the general runtime by making updating of supports no longer lazy
// (as implemented, the dirtying cost can be amortized out).
Vector<long> _minimum_possible_first_support;
Vector<long> _maximum_possible_last_support;
bool _prior_ready;
bool _likelihood_ready;
bool _support_from_below_ready;
bool _support_from_above_ready;
// Note that (with the exception of the root) sibling should exist,
// and this function should never return NULL, because the tree
// should be full.
TreeNode* sibling_ptr() {
if (parent->child_lhs == this)
return parent->child_rhs;
return parent->child_lhs;
}
bool has_children() {
// Note that nodes have either 0 or 2 children since it is a full
// binary tree; therefore, the following line can be simplified:
//return child_lhs != NULL || child_rhs != NULL;
return child_lhs != NULL;
}
void set_dependents_up_not_ready() {
// Continue until reaching node that is not ready from below
// (_prior_ready and _support_from_below_ready should match, but
// use both to be tidy):
if ( _prior_ready || _support_from_below_ready ) {
_prior_ready = false;
_support_from_below_ready = false;
if (parent != NULL) {
parent->set_dependents_up_not_ready();
// If parent != NULL, sib should exist because the tree should
// be full.
TreeNode*sib = sibling_ptr();
sib->set_dependents_down_not_ready();
}
}
}
void set_dependents_down_not_ready() {
// Continue until reaching node that is not ready from above
// (_likelihood_ready and _support_from_above_ready should match,
// but use both to be tidy):
if ( _likelihood_ready || _support_from_above_ready ) {
_likelihood_ready = false;
_support_from_above_ready = false;
if (child_lhs != NULL)
child_lhs->set_dependents_down_not_ready();
if (child_rhs != NULL)
child_rhs->set_dependents_down_not_ready();
}
}
// add and sub return only the initialized sum or difference,
// otherwise only add/subtract the initialized argument:
inline static PMF add(const PMF & lhs, const PMF & rhs, double p) {
if (lhs.dimension() == 0)
return rhs;
if (rhs.dimension() == 0)
return lhs;
#ifdef CONVOLUTIONTREE_CONVOLUTION_SIZE_CHECK
// Update the size of the maximum convolution performed:
unsigned long n = std::max(lhs.table().flat_size(), rhs.table().flat_size());
largest_convolution_size = std::max(largest_convolution_size, n);
#endif
return p_add(lhs, rhs, p);
}
inline static PMF sub(const PMF & lhs, const PMF & rhs, double p) {
if (lhs.dimension() == 0)
return rhs;
if (rhs.dimension() == 0)
return lhs;
#ifdef CONVOLUTIONTREE_CONVOLUTION_SIZE_CHECK
// Update the size of the maximum convolution performed:
unsigned long n = std::max(lhs.table().flat_size(), rhs.table().flat_size());
largest_convolution_size = std::max(largest_convolution_size, n);
#endif
return p_sub(lhs, rhs, p);
}
void narrow_support_with(PMF & dist) {
if (dist.dimension() != 0) {
// Narrow dist to minimum and maximum supports:
dist.narrow_support(_minimum_possible_first_support, _maximum_possible_last_support);
// Narrow minimum and maximum supports to dist:
for (unsigned char i=0; i<_minimum_possible_first_support.size(); ++i) {
_minimum_possible_first_support[i] = std::max(_minimum_possible_first_support[i], dist.first_support()[i]);
_maximum_possible_last_support[i] = std::min(_maximum_possible_last_support[i], long(dist.first_support()[i] + dist.table().view_shape()[i]) - 1);
}
}
}
void narrow_all() {
#ifndef DISABLE_TRIM
narrow_support_with(_likelihood);
narrow_support_with(_prior);
// Just in case prior narrows min/max supports, propagate that
// change to likelihood:
narrow_support_with(_likelihood);
#endif
}
void update_prior(double p) {
if ( ! _prior_ready ) {
// Full binary tree means both must be non-null
// simultaneously, so no need to consider cases where exactly
// one is NULL:
if (has_children()) {
child_lhs->update_prior(p);
child_rhs->update_prior(p);
if (child_lhs->_prior_ready && child_rhs->_prior_ready)
set_prior( add(child_lhs->get_prior(p), child_rhs->get_prior(p), p) );
}
}
}
void update_likelihood(double p) {
if ( ! _likelihood_ready ) {
if (parent != NULL) {
parent->update_likelihood(p);
TreeNode*sib = sibling_ptr();
sib->update_prior(p);
if (parent->_likelihood_ready && sib->_prior_ready)
set_likelihood( sub(parent->get_likelihood(p), sib->get_prior(p), p) );
}
}
}
void update_support_from_below() {
if ( ! _support_from_below_ready ) {
if (child_lhs != NULL && child_rhs != NULL) {
child_lhs->update_support_from_below();
child_rhs->update_support_from_below();
if ( child_lhs->_support_from_below_ready && child_rhs->_support_from_below_ready ) {
for (unsigned char i=0; i<_minimum_possible_first_support.size(); ++i) {
_minimum_possible_first_support[i] = std::max(_minimum_possible_first_support[i], child_lhs->_minimum_possible_first_support[i] + child_rhs->_minimum_possible_first_support[i]);
_maximum_possible_last_support[i] = std::min(_maximum_possible_last_support[i], child_lhs->_maximum_possible_last_support[i] + child_rhs->_maximum_possible_last_support[i]);
}
narrow_all();
_support_from_below_ready = true;
}
}
}
}
void update_support_from_above() {
if ( ! _support_from_above_ready ) {
if (parent != NULL) {
parent->update_support_from_above();
TreeNode*sib = sibling_ptr();
sib->update_support_from_below();
if (parent->_support_from_above_ready && sib->_support_from_below_ready) {
// Note: This can be done more efficiently by inlining the
// following two lines into the loop below (memory
// allocation is more expensive than performing
// elementwise).
Vector<long> likelihood_minimum_possible_first_support = parent->_minimum_possible_first_support - sib->_maximum_possible_last_support;
Vector<long> likelihood_maximum_possible_last_support = parent->_maximum_possible_last_support - sib->_minimum_possible_first_support;
for (unsigned char i=0; i<likelihood_minimum_possible_first_support.size(); ++i) {
_minimum_possible_first_support[i] = std::max(_minimum_possible_first_support[i], likelihood_minimum_possible_first_support[i]);
_maximum_possible_last_support[i] = std::min(_maximum_possible_last_support[i], likelihood_maximum_possible_last_support[i]);
}
narrow_all();
_support_from_above_ready = true;
}
}
}
}
public:
TreeNode *parent, *child_lhs, *child_rhs;
TreeNode(unsigned char dimension):
_minimum_possible_first_support(dimension),
_maximum_possible_last_support(dimension),
_prior_ready(false),
_likelihood_ready(false),
_support_from_below_ready(false),
_support_from_above_ready(false),
parent(NULL),
child_lhs(NULL),
child_rhs(NULL)
{
for (unsigned char i=0; i<dimension; ++i) {
_minimum_possible_first_support[i] = std::numeric_limits<long>::min();
_maximum_possible_last_support[i] = std::numeric_limits<long>::max();
}
}
void set_prior(PMF && pmf) {
_prior = std::move(pmf);
narrow_all();
// Set all dependents of prior to be unready:
// Note: this will be called multiple times when updating many
// nodes that are on a path to the root, but it will not matter
// since the runtime will be O(1) per call after the first call:
set_dependents_up_not_ready();
// Set local prior to be ready:
_prior_ready = true;
if (! has_children())
_support_from_below_ready = true;
}
void set_likelihood(PMF && pmf) {
_likelihood = std::move(pmf);
narrow_all();
// Set all dependents of likelihood to be unready:
// Note: this will be called multiple times when updating many
// nodes that are on a path to the root, but it will not matter
// since the runtime will be O(1) per call after the first call:
set_dependents_down_not_ready();
// Set local likelihood to be ready:
_likelihood_ready = true;
if (parent == NULL)
_support_from_above_ready = true;
}
const PMF & get_prior(double p) {
update_support_from_above();
update_prior(p);
#ifdef ENGINE_CHECK
assert(_prior_ready);
#endif
return _prior;
}
const PMF & get_likelihood(double p) {
update_support_from_above();
update_likelihood(p);
#ifdef ENGINE_CHECK
assert(_likelihood_ready);
#endif
return _likelihood;
}
void add_child_lhs(TreeNode*lhs) {
child_lhs = lhs;
lhs->parent = this;
}
void add_child_rhs(TreeNode*rhs) {
child_rhs = rhs;
rhs->parent = this;
}
// For debugging:
void print(std::ostream & os, unsigned int depth=0) {
for (unsigned int i=0; i<3*depth; ++i)
os << " ";
os << this << " prior&support " << _prior_ready << _support_from_below_ready << " likelihood&support " << _likelihood_ready << _support_from_above_ready << " min/max possible support " << _minimum_possible_first_support << " " << _maximum_possible_last_support << " prior/likelihood " << _prior << " " << _likelihood << std::endl;
if (child_lhs != NULL && child_rhs != NULL) {
child_lhs->print(os, depth+1);
child_rhs->print(os, depth+1);
}
}
};
#ifdef CONVOLUTIONTREE_CONVOLUTION_SIZE_CHECK
unsigned long TreeNode::largest_convolution_size = 0;
#endif
class ConvolutionTree {
protected:
const unsigned char _dimension;
const double _p;
TreeNode * _root;
std::vector<TreeNode*> _inputs;
// _output is the same as _root:
// Construct a full binary tree with n leaves:
TreeNode* create_tree(unsigned long number_priors_to_add) {
TreeNode*res = new TreeNode(_dimension);
if (number_priors_to_add > 1) {
// When n == 1, allocate a single leaf. Otherwise, allocate
// floor(n/2) leaves on the left subtree and n - floor(n/2)
// leaves on the right subtree. When n>1, the left subtree will
// get at least one child and the right subtree will get at
// least one child, guaranteeing a full tree (when n != 1,
// two subtrees will be created).
res->add_child_lhs( create_tree(number_priors_to_add >> 1) );
res->add_child_rhs( create_tree( number_priors_to_add - (number_priors_to_add >> 1) ) );
}
else {
// leaf: add it to _inputs:
_inputs.push_back(res);
}
return res;
}
void destroy_tree(TreeNode*&node) {
if (node == NULL)
return;
if (node->child_lhs != NULL)
destroy_tree(node->child_lhs);
if (node->child_rhs != NULL)
destroy_tree(node->child_rhs);
delete node;
node = NULL;
}
public:
// For debugging:
void print(std::ostream & os) {
_root->print(os);
}
ConvolutionTree(unsigned long number_priors_to_add, const unsigned char dim, const double p):
_dimension(dim),
_p(p)
{
_root = create_tree(number_priors_to_add);
}
~ConvolutionTree() {
destroy_tree(_root);
}
void receive_message_in(unsigned long index, PMF msg) {
if (index < _inputs.size())
// If the index is in 0, ... n-1, it refers to an input prior:
_inputs[index]->set_prior(std::move(msg));
else
// Otherwise, the index refers to the output likelihood:
_root->set_likelihood(std::move(msg));
}
PMF get_message_out(unsigned long index) {
// The check as to whether this message can be computed will be
// handled by the TreeNode types.
if (index < _inputs.size()) {
// If the index is in 0, ... n-1, it refers to an input prior:
return _inputs[index]->get_likelihood(_p);
}
// Otherwise, the index refers to the output likelihood:
return _root->get_prior(_p);
}
unsigned char dimension() const {
return _dimension;
}
};
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Engine/ListQueue.hpp | .hpp | 954 | 46 | #ifndef _LISTQUEUE_HPP
#define _LISTQUEUE_HPP
#include <list>
template <typename VARIABLE_KEY>
class ListQueue {
protected:
std::list<Edge<VARIABLE_KEY>* > _next_edges;
public:
bool is_empty() const {
return _next_edges.size() == 0;
}
std::size_t size() const {
return _next_edges.size();
}
void push_if_not_in_queue(Edge<VARIABLE_KEY>* val) {
if (val->in_queue)
return;
_next_edges.push_back(val);
val->in_queue = true;
}
Edge<VARIABLE_KEY>* pop_next() {
#ifdef ENGINE_CHECK
assert( ! is_empty() );
#endif
Edge<VARIABLE_KEY>* res = _next_edges.front();
_next_edges.pop_front();
res->in_queue = false;
return res;
}
void print(std::ostream & os) const {
os << "Size " << size() << std::endl;
for (const Edge<VARIABLE_KEY>* val : _next_edges) {
os << val << " from " << val->source << " to " << val->dest << std::endl;
}
os << std::endl;
}
};
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Engine/KikuchiGraph.hpp | .hpp | 1,300 | 45 | #ifndef _KIKUCHIGRAPH_HPP
#define _KIKUCHIGRAPH_HPP
template <typename VARIABLE_KEY>
class KikuchiGraph {
public:
// Permits modification of MessagePasser types via pointer, but not
// modification of the pointers themselves:
const std::vector<MessagePasser<VARIABLE_KEY>* > _message_passers;
KikuchiGraph(std::vector<MessagePasser<VARIABLE_KEY>* > && message_passers):
_message_passers(std::move(message_passers))
{ }
~KikuchiGraph() {
// Delete _variables_ptr collections first so that edges are still
// available (to get opposite):
for (MessagePasser<VARIABLE_KEY>*mp : _message_passers) {
for (unsigned long k=0; k<mp->number_edges(); ++k) {
Edge<VARIABLE_KEY>*edge = mp->get_edge_out(k);
if (edge->variables_ptr != NULL) {
delete edge->variables_ptr;
edge->variables_ptr = NULL;
edge->opposite()->variables_ptr = NULL;
}
}
}
// Delete all edges out (ensures every edge will be deleted
// exactly once):
for (MessagePasser<VARIABLE_KEY>*mp : _message_passers) {
for (unsigned long k=0; k<mp->number_edges(); ++k) {
Edge<VARIABLE_KEY>*edge = mp->get_edge_out(k);
delete edge;
}
}
// Delete message passers:
for (MessagePasser<VARIABLE_KEY>*mp : _message_passers) {
delete mp;
}
};
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Engine/Queueable.hpp | .hpp | 328 | 18 | #ifndef _QUEUEABLE_HPP
#define _QUEUEABLE_HPP
// Mixin to allow pointers of objects to be inserted into queues:
struct Queueable {
// A member is faster than storing a map to which messages are in
// queue in code:
double priority;
bool in_queue;
Queueable():
priority(0.0),
in_queue(false)
{ }
};
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Engine/Hyperedge.hpp | .hpp | 3,799 | 110 | #ifndef _HYPEREDGE_HPP
#define _HYPEREDGE_HPP
#include "HUGINMessagePasser.hpp"
// Like a HUGINMessagePasser, but it is elligible to pass once every
// variable along an edge has been received.
template <typename VARIABLE_KEY>
class Hyperedge : public HUGINMessagePasser<VARIABLE_KEY> {
private:
std::unordered_set<VARIABLE_KEY> _vars_received;
std::vector<bool> _ready_to_send;
bool _all_ready_to_send;
protected:
void add_input_and_output_edges(Edge<VARIABLE_KEY>*edge_in, Edge<VARIABLE_KEY>*edge_out) {
HUGINMessagePasser<VARIABLE_KEY>::add_input_and_output_edges(edge_in, edge_out);
_ready_to_send.push_back(false);
}
// Note that this relies on the fact that MessagePasser updates by
// performing _ready_to_send[i] = _ready_to_send[i] | other_edges_received;
// therefore, setting _ready_to_send[edge_index] here will ensure
// that when _vars_received is a superset of the variables along
// the edge, then the edge will be marked as ready.
void receive_message_in(unsigned long edge_index) {
HUGINMessagePasser<VARIABLE_KEY>::receive_message_in(edge_index);
// Set edges out as ready to send where appropriate.
if (! _all_ready_to_send) {
// For greater performance, don't bother updating if this edge has
// already been received.
if (! this->_edge_received[edge_index]) {
// Add the variables to the set _vars_received.
Edge<VARIABLE_KEY>*incoming_edge = this->_edges_in[edge_index];
for (const VARIABLE_KEY & var : *incoming_edge->variables_ptr)
_vars_received.insert(var);
for (unsigned long i=0; i<this->number_edges(); ++i) {
// Don't bother waking edge opposite to the message received (it
// will by definition be elligible to send, but nothing will
// have changed since the message received will not be used to
// send back).
if (i != edge_index) {
bool vars_received_are_superset = true;
Edge<VARIABLE_KEY>*other_edge = this->_edges_in[i];
for (const VARIABLE_KEY & var : *other_edge->variables_ptr)
vars_received_are_superset = vars_received_are_superset && _vars_received.find(var) != _vars_received.end();
_ready_to_send[i] = vars_received_are_superset;
}
}
_all_ready_to_send = true;
for (unsigned long i=0; i<this->number_edges(); ++i)
_all_ready_to_send = _all_ready_to_send && _ready_to_send[i];
}
}
}
virtual bool ready_to_send_message(unsigned long edge_index) const {
return _ready_to_send[edge_index];
}
bool can_potentially_pass_any_messages() const {
return true;
}
public:
Hyperedge():
// Hyperedges use p=1.0; they exist solely to cache products via
// the HUGIN algorithm.
HUGINMessagePasser<VARIABLE_KEY>(1.0),
_all_ready_to_send(false)
{ }
void absorb_hyperedge(Hyperedge<VARIABLE_KEY>* he_to_absorb) {
// Add edges from he_to_absorb into this:
for (unsigned long i=0; i<he_to_absorb->number_edges(); ++i) {
Edge<VARIABLE_KEY>*edge = he_to_absorb->get_edge_out(i);
MessagePasser<VARIABLE_KEY>*dest_mp = edge->dest;
if (dest_mp != this) {
unsigned long source_edge_index = this->number_edges();
unsigned long dest_edge_index = edge->dest_edge_index;
Edge<VARIABLE_KEY>*edge_in = new Edge<VARIABLE_KEY>(dest_mp, this, edge->variables_ptr, dest_edge_index, source_edge_index);
Edge<VARIABLE_KEY>*edge_out = new Edge<VARIABLE_KEY>(this, dest_mp, edge->variables_ptr, source_edge_index, dest_edge_index);
this->add_input_and_output_edges(edge_in, edge_out);
// Edge into this becomes edge out of dest_mp and vice versa:
dest_mp->rewire_edge(edge->dest_edge_index, edge_out, edge_in);
}
}
delete he_to_absorb;
}
void print(std::ostream & os) const {
os << "Hyperedge " << this->_product;
}
};
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Engine/FIFOScheduler.hpp | .hpp | 2,849 | 88 | #ifndef _FIFOSCHEDULER_HPP
#define _FIFOSCHEDULER_HPP
#include "ListQueue.hpp"
template <typename VARIABLE_KEY>
std::ostream & operator <<(std::ostream & os, const std::vector<VARIABLE_KEY> & rhs) {
os << "[ ";
for (const VARIABLE_KEY & var : rhs)
os << var << " ";
os << "]";
return os;
}
template <typename VARIABLE_KEY>
class FIFOScheduler : public Scheduler<VARIABLE_KEY> {
protected:
ListQueue<VARIABLE_KEY> _queue;
public:
FIFOScheduler(double dampening_lambda, double convergence_threshold, unsigned long maximum_iterations):
Scheduler<VARIABLE_KEY>(dampening_lambda, convergence_threshold, maximum_iterations)
{}
void add_ab_initio_edges(InferenceGraph<VARIABLE_KEY> & graph){
// todo: shuffles ab initio edges (could do them in DFS/BFS formation for greater efficiency)
std::vector<Edge<VARIABLE_KEY>*> starters;
for (Edge<VARIABLE_KEY>* edge : graph.edges_ready_ab_initio())
starters.push_back(edge);
// shuffle:
for (unsigned int i=0; i<starters.size(); ++i) {
int j = rand()%starters.size();
std::swap(starters[i], starters[j]);
}
for (Edge<VARIABLE_KEY>* edge : starters)
_queue.push_if_not_in_queue(edge);
}
unsigned long process_next_edges() {
if ( _queue.is_empty() )
return 0;
Edge<VARIABLE_KEY>*edge = _queue.pop_next();
MessagePasser<VARIABLE_KEY>*source_mp = edge->source;
// Update the message in the edge immediately before use (in a
// lazy manner):
LabeledPMF<VARIABLE_KEY> new_msg = source_mp->update_and_get_message_out(edge->source_edge_index);
if ( ! edge->has_message() || (edge->has_message() && mse_divergence(edge->get_possibly_outdated_message(), new_msg) > this->_convergence_threshold) ) {
if (edge->has_message())
// Dampen:
new_msg = dampen(edge->get_possibly_outdated_message(), new_msg, this->_dampening_lambda).transposed(*edge->variables_ptr);
edge->set_message( std::move(new_msg) );
// Receive the message:
MessagePasser<VARIABLE_KEY>*dest_mp = edge->dest;
dest_mp->receive_message_in_and_update(edge->dest_edge_index);
// Wake up other edges:
// Do not bother trying to wake any edges if <n-1 messages have
// been received by dest_mp:
if (dest_mp->can_potentially_pass_any_messages()) {
unsigned long edge_index_received = edge->dest_edge_index;
for (unsigned long edge_index_out=0; edge_index_out<dest_mp->number_edges(); ++edge_index_out) {
// Do not wake edge opposite to the edge received:
if (edge_index_out != edge_index_received && dest_mp->ready_to_send_message(edge_index_out)) {
Edge<VARIABLE_KEY>*e = dest_mp->get_edge_out(edge_index_out);
_queue.push_if_not_in_queue(e);
}
}
}
}
return 1;
}
bool has_converged() const {
return _queue.is_empty();
}
};
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Engine/PriorityScheduler.hpp | .hpp | 6,113 | 160 | #ifndef _PRIORITYSCHEDULER_HPP
#define _PRIORITYSCHEDULER_HPP
#include "Scheduler.hpp"
#include "SetQueue.hpp"
// Note: for some graphs (e.g., trees and HMM-like graphs), the
// runtime may be in O(n log(n)) instead of O(n), because of
// SetQueue. These graphs will be solved in O(n) with
// FIFOScheduler.
template <typename VARIABLE_KEY>
class PriorityScheduler : public Scheduler<VARIABLE_KEY> {
protected:
SetQueue<VARIABLE_KEY> _queue;
void set_priority_without_updating_message_and_update_queue(Edge<VARIABLE_KEY>*e, double new_priority) {
// If message is not up to date, it will be refreshed when
// passing.
if ( ! e->in_queue ) {
// If the edge is not in the queue, only add it to the queue
// (which will update its priority) as long as convergence has
// not been reached:
if (new_priority > this->_convergence_threshold) {
// This edge has changed more than the convergence criteria
// allows; add it to the queue.
_queue.push_or_update(e, new_priority);
}
}
}
void set_message_at_edge_and_update_queue(Edge<VARIABLE_KEY>*e, LabeledPMF<VARIABLE_KEY> && msg, double priority_bias=0.0) {
double new_priority;
if (e->has_message()) {
// Transpose to guarantee that the message gets correct variable
// order (this is important for some context-dependent message
// passers, e.g. a multidimensional ConvolutionTreeMessagePasser):
new_priority = mse_divergence(e->get_possibly_outdated_message(), msg);
// Note that since the edge has been awoken, it will be out of
// date (that is what this function is addressing); therefore,
// call get_possibly_outdated_message(), because that does not
// enforce check of whether or not it is up to date.
msg = dampen(e->get_possibly_outdated_message(), msg, this->_dampening_lambda).transposed(*e->variables_ptr);
}
else {
// Otherwise rank with sparsest messages first:
const Tensor<double> & tab = msg.pmf().table();
#ifdef SHAPE_CHECK
assert( tab.flat_size() > 0 );
#endif
// When priority_bias > 1.0, ensures priority > 1 >= max MSE,
// which means this sparsity score will always trump the
// divergence score (and therefore edges with no previous
// message will always be prioritized earlier than messages with
// a previous message, regardless of the respective
// sparsity-based priority and MSE).
// Therefore, use priority_bias=2.0 for initial edges to
// hyperdges and priority_bias=1.0 for initial edges back from
// hyperedges. This will ensure ab initio messages are first
// passed to hyperedges, then edges back from hyperedges, then
// edges woken up.
new_priority = priority_bias + 1.0 / tab.flat_size();
}
if ( ! e->in_queue ) {
// If the edge is not in the queue, only add it to the queue
// (which will update its priority) as long as convergence has
// not been reached:
if (new_priority >= this->_convergence_threshold)
// This edge has changed more than the convergence criteria
// allows; add it to the queue.
_queue.push_or_update(e, new_priority);
}
else {
// If the edge is in the queue, it has not yet passed the old
// message. Therefore, even if the change between the old
// message and the new message is very small, neither have been
// passed, and so convergence is not necessarily reached. Thus,
// only allow the edge to move forward in the queue, but not
// backward.
if (new_priority > e->priority)
_queue.push_or_update(e, new_priority);
}
e->set_message(std::move(msg));
}
public:
PriorityScheduler(double dampening_lambda, double convergence_threshold, unsigned long maximum_iterations):
Scheduler<VARIABLE_KEY>(dampening_lambda, convergence_threshold, maximum_iterations)
{}
void add_ab_initio_edges(InferenceGraph<VARIABLE_KEY> & graph){
for (Edge<VARIABLE_KEY>* edge : graph.edges_ready_ab_initio())
set_priority_without_updating_message_and_update_queue(edge, 2.0);
}
unsigned long process_next_edges() {
if ( _queue.is_empty() )
return 0;
Edge<VARIABLE_KEY>*edge = _queue.pop_max();
// If this edge was enqueued lazily (i.e., if the message has not
// been set) or if the edge is not up to date, set its message
// now:
MessagePasser<VARIABLE_KEY>*source_mp = edge->source;
if ( ! edge->ready_to_pass() ) {
edge->set_message( std::move(source_mp->update_and_get_message_out(edge->source_edge_index)) );
}
MessagePasser<VARIABLE_KEY>*dest_mp = edge->dest;
#ifdef PRINT_MESSAGES
std::cout << "Message Passed: " << std::endl;
std::cout << "FROM ";
edge->source->print(std::cout);
std::cout << " TO ";
edge->dest->print(std::cout);
std::cout << " WITH " << edge->get_message() << std::endl;
#endif
dest_mp->receive_message_in_and_update(edge->dest_edge_index);
// Iterate through the outgoing edges other than the one just
// received:
// Relies on the fact that edges must be constructed symmetrically
// (i.e., input and output edges must be added simultaneously, so
// for any MessagePasser, edge->dest_edge_index ==
// edge->get_opposite_edge_ptr()->source_edge_index).
unsigned long edge_index_received = edge->dest_edge_index;
for (unsigned long edge_index_out=0; edge_index_out<dest_mp->number_edges(); ++edge_index_out) {
// Do not wake edge opposite to the edge received:
if (edge_index_out != edge_index_received && dest_mp->ready_to_send_message(edge_index_out)) {
Edge<VARIABLE_KEY>*e = dest_mp->get_edge_out(edge_index_out);
set_message_at_edge_and_update_queue(e, dest_mp->update_and_get_message_out(edge_index_out));
}
}
return 1;
}
bool has_converged() const {
// Edges will be added to the queue in a non-lazy fashion;
// therefore, all edges with messages that are not converged
// should be in the queue.
return _queue.is_empty();
}
};
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Engine/InferenceGraph.hpp | .hpp | 6,979 | 201 | #ifndef _INFERENCEGRAPH_HPP
#define _INFERENCEGRAPH_HPP
#include <unordered_set>
#include <list>
#include "MessagePasser.hpp"
#include "HUGINMessagePasser.hpp"
#include "../Utility/shuffled_sequence.hpp"
template <typename VARIABLE_KEY>
class InferenceGraph {
protected:
void verify_all_connected_message_passers_included() {
std::unordered_set<MessagePasser<VARIABLE_KEY>* > connected_mps(message_passers.begin(), message_passers.end());
for (MessagePasser<VARIABLE_KEY>*mp : message_passers) {
for (unsigned long edge_ind=0; edge_ind<mp->number_edges(); ++edge_ind) {
Edge<VARIABLE_KEY>*edge = mp->get_edge_out(edge_ind);
assert( connected_mps.find(edge->dest) != connected_mps.end() );
}
}
}
void verify_edges() {
// Verify opposite edges:
for (MessagePasser<VARIABLE_KEY>*mp : message_passers) {
for (unsigned long edge_ind=0; edge_ind<mp->number_edges(); ++edge_ind) {
Edge<VARIABLE_KEY>*edge = mp->get_edge_out(edge_ind);
assert(edge->source == mp);
assert(edge->source_edge_index == edge_ind);
assert(edge->get_opposite_edge_ptr()->dest == mp);
}
}
}
void verify() {
verify_all_connected_message_passers_included();
verify_edges();
}
public:
// Permits modification of MessagePasser types via pointer, but not
// modification of the pointers themselves:
std::vector<MessagePasser<VARIABLE_KEY>* > message_passers;
// Using rvalue references in constructors is efficient, but it also
// pushes a bit for the InferenceGraph to own the underlying data
// from here on out (InferenceGraph will delete allocated Edge and
// MessagePasser types when it destructs).
InferenceGraph(std::vector<MessagePasser<VARIABLE_KEY>* > && message_passers_param):
message_passers(std::move(message_passers_param))
{
#ifdef ENGINE_CHECK
// Only necessary on new construction (if constructing from
// another InferenceGraph, that graph should have already been
// verified).
verify();
#endif
}
InferenceGraph(InferenceGraph<VARIABLE_KEY> && ig):
message_passers(std::move(ig.message_passers))
{ }
// Disable copying so that destructor will not be called multiple times:
InferenceGraph(const InferenceGraph<VARIABLE_KEY> &) = delete;
// Disable copying so that destructor will not be called multiple times:
const InferenceGraph & operator =(const InferenceGraph<VARIABLE_KEY> &) = delete;
~InferenceGraph() {
// Delete _variables_ptr collections first so that edges are still
// available. This is slightly tricky since these pointers are
// shared between the forward and reverse edges, which could lead
// to them being deleted multiple times. One solution would be to
// set to NULL when deleted and only delete if not NULL, but since
// that pointer is a *const, it cannot be assigned. Therefore,
// implemented using a set for simplicity; can be implemented more
// efficiently:
std::unordered_set<const std::vector<VARIABLE_KEY>* > all_edge_labels;
for (MessagePasser<VARIABLE_KEY>*mp : message_passers) {
for (unsigned long k=0; k<mp->number_edges(); ++k) {
const Edge<VARIABLE_KEY>*edge = mp->get_edge_out(k);
all_edge_labels.insert(edge->variables_ptr);
}
}
for (const std::vector<VARIABLE_KEY>*edge_label : all_edge_labels)
delete edge_label;
// Delete all edges out (ensures every edge will be deleted
// exactly once):
for (MessagePasser<VARIABLE_KEY>*mp : message_passers) {
for (unsigned long k=0; k<mp->number_edges(); ++k) {
Edge<VARIABLE_KEY>*edge = mp->get_edge_out(k);
delete edge;
}
}
// Delete message passers:
for (MessagePasser<VARIABLE_KEY>*mp : message_passers)
delete mp;
}
std::vector<Edge<VARIABLE_KEY>*> edges_ready_ab_initio() const {
// Find edges that can pass from the first iteration (e.g.,
// HUGINMessagePasser nodes with priors may sometimes start ready
// to pass some of their edges):
std::vector<Edge<VARIABLE_KEY>*> result;
for (MessagePasser<VARIABLE_KEY>*mp : message_passers) {
for (unsigned long edge_index=0; edge_index<mp->number_edges(); ++edge_index)
if (mp->ready_to_send_message_ab_initio(edge_index)) {
Edge<VARIABLE_KEY>*edge = mp->get_edge_out(edge_index);
result.push_back(edge);
}
}
return result;
}
// For debugging:
void print(std::ostream & os) const {
for (MessagePasser<VARIABLE_KEY>*mp : message_passers) {
os << mp << " ";
mp->print(os);
os << std::endl;
for (unsigned long k=0; k<mp->number_edges(); ++k) {
Edge<VARIABLE_KEY>*edge = mp->get_edge_out(k);
os << "\t";
for (const VARIABLE_KEY & var : *edge->variables_ptr)
os << var << " ";
os << edge->ready_to_pass() << " ";
os << edge << ": ";
os << edge->dest << " ";
edge->dest->print(os);
os << " received opposite on " << edge->get_opposite_edge_ptr() << " " << edge->source->edge_received( edge->get_opposite_edge_ptr()->dest_edge_index );
os << std::endl;
}
os << std::endl;
}
}
};
// For applying depth and breadth first search on any lambda on
// MessagePasser<VARIABLE_KEY>*.
// Note: function is responsible for coloring edges. This is important
// to prevent infinite looping and infinite memory use (= crash).
template <typename VARIABLE_KEY, typename FUNCTION>
void node_dfs(std::list<MessagePasser<VARIABLE_KEY>* > queued_mps, FUNCTION function) {
while (queued_mps.size() > 0) {
MessagePasser<VARIABLE_KEY>*mp = queued_mps.front();
queued_mps.pop_front();
if (mp->color >= 0)
continue;
function(mp);
// Visit the edges in random order:
std::vector<unsigned long> shuffled_edge_indices = shuffled_sequence(mp->number_edges());
for (unsigned long i : shuffled_edge_indices) {
MessagePasser<VARIABLE_KEY>*next_mp = mp->get_edge_out(i)->dest;
if (next_mp->color < 0)
queued_mps.push_front(next_mp);
}
}
}
// To help the compiler wire an inlined list {a,b,...} to a std::list:
template <typename VARIABLE_KEY, typename FUNCTION>
void node_dfs(std::initializer_list<MessagePasser<VARIABLE_KEY>* > queued_mps_il, FUNCTION function) {
node_dfs(std::list<MessagePasser<VARIABLE_KEY>* >(queued_mps_il), function);
}
template <typename VARIABLE_KEY, typename FUNCTION>
void node_bfs(std::list<MessagePasser<VARIABLE_KEY>* > queued_mps, FUNCTION function) {
while (queued_mps.size() > 0) {
MessagePasser<VARIABLE_KEY>*mp = queued_mps.front();
queued_mps.pop_front();
if (mp->color >= 0)
continue;
function(mp);
// Visit the edges in random order:
std::vector<unsigned long> shuffled_edge_indices = shuffled_sequence(mp->number_edges());
for (unsigned long i : shuffled_edge_indices) {
MessagePasser<VARIABLE_KEY>*next_mp = mp->get_edge_out(i)->dest;
if (next_mp->color < 0)
queued_mps.push_back(next_mp);
}
}
}
#include "split_connected_components.hpp"
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/evergreen/src/Engine/Edge.hpp | .hpp | 2,320 | 93 | #ifndef _EDGE_HPP
#define _EDGE_HPP
#include "Queueable.hpp"
#include "../PMF/LabeledPMF.hpp"
template <typename VARIABLE_KEY>
class MessagePasser;
// Note: this is currently hard coded to use MSE-based divergence; it
// could be made more general later.
template <typename VARIABLE_KEY>
class Edge : public Queueable {
public:
MessagePasser<VARIABLE_KEY> *const source, *const dest;
const unsigned long source_edge_index, dest_edge_index;
const std::vector<VARIABLE_KEY> *const variables_ptr;
long color;
protected:
bool _up_to_date;
// Store current and previous message for use with dampening and
// with divergence-based priority (i.e., edges with most changed
// messages updated first among those ready to pass).
LabeledPMF<VARIABLE_KEY> _current_message;
public:
Edge(MessagePasser<VARIABLE_KEY>*source_param, MessagePasser<VARIABLE_KEY>*dest_param, const std::vector<VARIABLE_KEY>*variables_ptr_param, unsigned long source_edge_index_param, unsigned long dest_edge_index_param):
source(source_param),
dest(dest_param),
source_edge_index(source_edge_index_param),
dest_edge_index(dest_edge_index_param),
variables_ptr(variables_ptr_param),
color(0),
_up_to_date(false)
{ }
void set_message(LabeledPMF<VARIABLE_KEY> && msg) {
// To prevent exponential feedback:
msg.reset_log_normalization_constant();
_current_message = std::move(msg);
_up_to_date = true;
}
Edge*get_opposite_edge_ptr() const {
return dest->get_edge_out(dest_edge_index);
}
const LabeledPMF<VARIABLE_KEY> & get_message() const {
#ifdef ENGINE_CHECK
assert( ready_to_pass() );
#endif
return _current_message;
}
void reset_message_norm_constant() {
_current_message.reset_log_normalization_constant();
}
// Does not require ready_to_pass(), only has_message():
const LabeledPMF<VARIABLE_KEY> & get_possibly_outdated_message() const {
#ifdef ENGINE_CHECK
assert( has_message() );
#endif
return _current_message;
}
void set_not_up_to_date() {
_up_to_date = false;
}
bool up_to_date() const {
return _up_to_date;
}
bool has_message() const {
return _current_message.dimension() > 0;
}
bool ready_to_pass() const {
return has_message() && _up_to_date;
}
};
#endif
| Unknown |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.