keyword stringclasses 7 values | repo_name stringlengths 8 98 | file_path stringlengths 4 244 | file_extension stringclasses 29 values | file_size int64 0 84.1M | line_count int64 0 1.6M | content stringlengths 1 84.1M ⌀ | language stringclasses 14 values |
|---|---|---|---|---|---|---|---|
2D | dean0x7d/pybinding | cppcore/include/Model.hpp | .hpp | 3,994 | 117 | #pragma once
#include "system/Registry.hpp"
#include "system/System.hpp"
#include "system/Shape.hpp"
#include "system/Symmetry.hpp"
#include "system/StructureModifiers.hpp"
#include "leads/Leads.hpp"
#include "hamiltonian/Hamiltonian.hpp"
#include "hamiltonian/HamiltonianModifiers.hpp"
#include "utils/Chrono.hpp"
#include "detail/sugar.hpp"
#include <string>
#include <vector>
namespace cpb {
class Model {
public:
Model(Lattice const& lattice);
/// The arguments can any type accepted by `Model::add()`
template<class... Args>
Model(Lattice const& lattice, Args&&... args) : Model(lattice) {
detail::eval_ordered({(add(std::forward<Args>(args)), 0)...});
}
public: // add parameters
void add(Primitive primitive);
void add(Shape const& shape);
void add(TranslationalSymmetry const& s);
void attach_lead(int direction, Shape const& shape);
void add(SiteStateModifier const& m);
void add(PositionModifier const& m);
void add(OnsiteModifier const& m);
void add(HoppingModifier const& m);
void add(SiteGenerator const& g);
void add(HoppingGenerator const& g);
void set_wave_vector(Cartesian const& k);
public:
/// Are any of the onsite or hopping energies given as matrices instead of scalars?
bool is_multiorbital() const;
/// Uses double precision values in the Hamiltonian matrix?
bool is_double() const;
/// Uses complex values in the Hamiltonian matrix?
bool is_complex() const;
public: // get parameters
Lattice const& get_lattice() const { return lattice; }
SiteRegistry const& get_site_registry() const { return site_registry; }
HoppingRegistry const& get_hopping_registry() const { return hopping_registry; }
Primitive const& get_primitive() const { return primitive; }
Shape const& get_shape() const { return shape; }
TranslationalSymmetry const& get_symmetry() const { return symmetry; }
public: // get properties
std::shared_ptr<System const> const& system() const;
Hamiltonian const& hamiltonian() const;
/// Return all leads
Leads const& leads() const;
/// Return lead at index
Lead lead(size_t i) const { return leads()[i]; }
/// The model properties listed above are usually evaluated lazily, only as needed.
/// Calling this function will evaluate the entire model ahead of time. Always returns itself.
Model const& eval() const;
public: // get information
/// Report of the last build operation: system and Hamiltonian
std::string report();
double system_build_seconds() const { return system_build_time.elapsed_seconds(); }
double hamiltonian_build_seconds() const { return hamiltonian_build_time.elapsed_seconds(); }
public:
void clear_structure_modifiers() { structure_modifiers.clear(); }
void clear_hamiltonian_modifiers() { hamiltonian_modifiers.clear(); }
void clear_all_modifiers() { clear_structure_modifiers(); clear_hamiltonian_modifiers(); }
private:
std::shared_ptr<System> make_system() const;
Hamiltonian make_hamiltonian() const;
/// Clear any existing structural data, implies clearing Hamiltonian
void clear_structure();
/// Clear Hamiltonian, but leave structural data untouched
void clear_hamiltonian();
private:
Lattice lattice;
SiteRegistry site_registry;
HoppingRegistry hopping_registry;
Primitive primitive;
Shape shape;
TranslationalSymmetry symmetry;
Cartesian wave_vector = {0, 0, 0};
std::vector<StructureModifier> structure_modifiers;
HamiltonianModifiers hamiltonian_modifiers;
mutable std::shared_ptr<System const> _system;
mutable Hamiltonian _hamiltonian;
mutable Leads _leads;
mutable Chrono system_build_time;
mutable Chrono hamiltonian_build_time;
mutable bool complex_override = false; ///< set if a modifier was found to (dynamically)
///< return complex output for real input data
};
} // namespace cpb
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/utils/Chrono.hpp | .hpp | 1,112 | 53 | #pragma once
#include <string>
#include <chrono>
#include <ostream>
namespace cpb {
/**
High resolution timer (below 1 microsecond accuracy).
*/
class Chrono {
public:
Chrono() { tic(); };
void tic() {
tic_time = std::chrono::high_resolution_clock::now();
}
Chrono& toc() {
elapsed = std::chrono::high_resolution_clock::now() - tic_time;
return *this;
}
Chrono& toc_accumulate() {
elapsed += std::chrono::high_resolution_clock::now() - tic_time;
return *this;
}
template<class Fn>
Chrono& timeit(Fn lambda) {
tic(); lambda(); toc();
return *this;
}
double elapsed_seconds() const {
return 1e-9 * static_cast<double>(elapsed.count());
}
std::string str() const;
Chrono& print(std::string msg = "");
friend std::ostream& operator<<(std::ostream& os, Chrono const& chrono) {
os << chrono.str();
return os;
}
private:
std::chrono::time_point<std::chrono::high_resolution_clock> tic_time;
std::chrono::nanoseconds elapsed{0};
};
} // namespace cpb
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/system/CompressedSublattices.hpp | .hpp | 3,740 | 102 | #pragma once
#include "Lattice.hpp"
namespace cpb {
/**
Stores the sublattice IDs for all sites in a system in a compressed format
Since sublattices with the same ID are always arranges as consecutive elements, this
data is easily compressed using RLE (run-length encoding). The same IDs are consecutive
and appear in one block. But the IDs and not necessarily sorted:
E.g. possible sublattice IDs are: 111000002222 --> encoded as: [1, 0, 2], [3, 5, 4]
*/
class CompressedSublattices {
public:
struct Element {
SiteID id; ///< the alias ID of each sublattice (unique among elements)
storage_idx_t num_sites; ///< number of sublattice sites in the final system
storage_idx_t num_orbitals; ///< number of orbitals on this sublattice
};
class It {
public:
using iterator_category = std::input_iterator_tag;
using difference_type = std::ptrdiff_t;
using value_type = It;
using reference = value_type const&;
using pointer = value_type const*;
It(std::vector<Element>::const_iterator it) : it(it) {}
/// Directly correspond to fields in `Element`
SiteID id() const { return it->id; }
idx_t num_sites() const { return it->num_sites; }
idx_t num_orbitals() const { return it->num_orbitals; }
/// The starting system index for sites of this sublattice
idx_t sys_start() const { return sys_idx; }
/// The past end system site index (== sys_start + num_sites)
idx_t sys_end() const { return sys_idx + it->num_sites; }
/// The starting hamiltonian index (>= sys_start due to multiple orbitals)
idx_t ham_start() const { return ham_idx; }
/// The past end hamiltonian index
idx_t ham_end() const { return ham_idx + ham_size(); }
/// The number of Hamiltonian matrix elements for this sublattice
idx_t ham_size() const { return it->num_sites * it->num_orbitals; }
reference operator*() const { return *this; }
pointer operator->() const { return this; }
It& operator++() {
sys_idx += it->num_sites;
ham_idx += it->num_sites * it->num_orbitals;
++it;
return *this;
}
friend bool operator==(It const& a, It const& b) { return a.it == b.it; }
friend bool operator!=(It const& a, It const& b) { return !(a == b); }
private:
std::vector<Element>::const_iterator it;
idx_t sys_idx = 0;
idx_t ham_idx = 0;
};
CompressedSublattices() = default;
CompressedSublattices(ArrayXi const& alias_ids, ArrayXi const& site_counts,
ArrayXi const& orbital_counts);
/// Start a new sublattice block or increment the site count for the existing block
void add(SiteID id, idx_t norb, idx_t count = 1);
/// Remove sites for which `keep == false`
void filter(VectorX<bool> const& keep);
/// Verify that the stored data is correct: `sum(site_counts) == num_sites`
void verify(idx_t num_sites) const;
/// Return the index of the first site with the given number of orbitals
idx_t start_index(idx_t num_orbitals) const;
/// Total size if decompressed (sum of the number of sites in all sublattices)
idx_t decompressed_size() const;
/// Return the full uncompressed array of IDs
ArrayX<storage_idx_t> decompressed() const;
It begin() const { return data.begin(); }
It end() const { return data.end(); }
/// Assess raw data
ArrayXi alias_ids() const;
ArrayXi site_counts() const;
ArrayXi orbital_counts() const;
private:
std::vector<Element> data; ///< sorted by `num_orbitals` (not `alias_id`)
};
} // namespace cpb
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/system/Symmetry.hpp | .hpp | 1,706 | 54 | #pragma once
#include "detail/slice.hpp"
#include "numeric/dense.hpp"
#include <vector>
namespace cpb {
class Foundation;
struct SymmetryArea {
Index3D left, right, middle;
/// Is this lattice site contained in the symmetry unit cell?
bool contains(Index3D const& index) const;
};
struct Translation {
Index3D direction; ///< translation direction in number of unit cell
SliceIndex3D boundary_slice; ///< Foundation slice which has the boundary sites
Index3D shift_index; ///< width of a translation unit in lattice sites
Cartesian shift_lenght; ///< width of a translation unit in nanometers
};
/**
Translational symmetry
The constructor takes the translation length in each lattice vector direction.
A positive number is a valid length. A negative number disables translation in that direction.
Zero is a special value which automatically sets the minimal translation length for the lattice,
i.e. the lattice vector length.
*/
class TranslationalSymmetry {
public:
TranslationalSymmetry(float a1 = -1, float a2 = -1, float a3 = -1);
SymmetryArea area(Foundation const& foundation) const;
std::vector<Translation> translations(Foundation const& foundation) const;
void apply(Foundation& foundation) const;
explicit operator bool() const { return enabled_directions != Vector3b{false, false, false}; }
private:
Cartesian length;
Vector3b enabled_directions = {false, false, false};
};
namespace detail {
/// Return all the combinations of enabled directions
/// Example: [1, 1, 0] -> [0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 0]
std::vector<Index3D> make_masks(Vector3b enabled_directions, int ndim);
}
} // namespace cpb
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/system/Registry.hpp | .hpp | 2,544 | 77 | #pragma once
#include "numeric/dense.hpp"
#include "detail/opaque_alias.hpp"
#include <string>
#include <vector>
#include <unordered_map>
namespace cpb {
// TODO: replace with proper string_view
using string_view = std::string const&;
/// Sublattice and hopping ID data types
using SubID = detail::OpaqueIntegerAlias<class SubIDTag>;
using SubAliasID = detail::OpaqueIntegerAlias<class SubAliasIDTag>;
using SiteID = detail::OpaqueIntegerAlias<class SiteIDTag>;
using HopID = detail::OpaqueIntegerAlias<class HopIDTag>;
/// Map from friendly sublattice/hopping name to numeric ID
using NameMap = std::unordered_map<std::string, storage_idx_t>;
template<class ID>
class Registry {
public:
Registry(std::vector<MatrixXcd> energies, std::vector<std::string> names);
std::vector<MatrixXcd> const& get_energies() const { return energies; }
std::vector<std::string> const& get_names() const { return names; }
void register_family(string_view name, MatrixXcd const& energy);
idx_t size() const { return static_cast<idx_t>(names.size()); }
/// Mapping from friendly names to unique IDs
NameMap name_map() const;
string_view name(ID id) const;
MatrixXcd const& energy(ID id) const;
ID id(string_view name) const;
/// Is at least one energy term not equal to zero?
bool has_nonzero_energy() const;
/// Is at least one energy term complex?
bool any_complex_terms() const;
/// Is at least one energy term a matrix?
bool has_multiple_orbitals() const;
private:
std::vector<MatrixXcd> energies;
std::vector<std::string> names;
};
using SiteRegistry = Registry<SiteID>;
using HoppingRegistry = Registry<HopID>;
extern template class Registry<SiteID>;
extern template class Registry<HopID>;
namespace detail {
/// Check that the onsite energy matrix satisfies all the requirements
void check_onsite_energy(MatrixXcd const& energy);
/// Convert the onsite energy into the canonical format
MatrixXcd canonical_onsite_energy(std::complex<double> energy);
MatrixXcd canonical_onsite_energy(VectorXd const& energy);
inline MatrixXcd canonical_onsite_energy(MatrixXcd const& energy) { return energy; }
/// Check that the hopping energy matrix satisfies all the requirements
void check_hopping_energy(MatrixXcd const& energy);
/// Convert the hopping energy into the canonical format
MatrixXcd canonical_hopping_energy(std::complex<double> energy);
inline MatrixXcd canonical_hopping_energy(MatrixXcd const& energy) { return energy; }
} // namespace detail
} // namespace cpb
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/system/HoppingBlocks.hpp | .hpp | 5,479 | 146 | #pragma once
#include "Lattice.hpp"
#include "numeric/dense.hpp"
#include "numeric/sparse.hpp"
namespace cpb {
/// Alternative CSR mapping of hopping IDs
using HoppingCSR = SparseMatrixX<storage_idx_t>;
/**
A simple row and column index pair
*/
struct COO {
storage_idx_t row;
storage_idx_t col;
COO() = default;
COO(idx_t row, idx_t col)
: row(static_cast<storage_idx_t>(row)),
col(static_cast<storage_idx_t>(col)) {}
friend bool operator==(COO const& a, COO const& b) {
return std::tie(a.row, a.col) == std::tie(b.row, b.col);
}
friend bool operator<(COO const& a, COO const& b) {
return std::tie(a.row, a.col) < std::tie(b.row, b.col);
}
};
/**
Hopping coordinates arranged in per-family blocks
A hopping here represents a connection between two sites, not orbitals.
The `row` and `col` index sites which are connected by a hopping family
represented by numerical ID (the sparse matrix `data`). If a hopping
family has an energy matrix (instead of a scalar) then it will need to
be expanded to get the full orbital-to-orbital hoppings. But this happens
at a later stage. This data structure is only concerned with site-to-site
hoppings arranged in per-family blocks.
Each block corresponds to a COO sparse matrix where all the elements in
the data array are the same and correspond to the index of the block,
i.e. the hopping family ID:
block 0 block 1 block 2
row | col | data row | col | data row | col | data
---------------- ---------------- ----------------
0 | 1 | 0 0 | 4 | 1 1 | 3 | 2
0 | 4 | 0 2 | 3 | 1 4 | 4 | 2
1 | 2 | 0 2 | 0 | 1 7 | 9 | 2
3 | 2 | 0 ---------------- 8 | 1 | 2
7 | 5 | 0 ----------------
----------------
Because the data array is trivial, it doesn't actually need to be stored.
The full COO sparse matrix can be reconstructed by appending all the blocks
and reconstructing the implicit data array.
The row-col coordinate pairs are unique (over all blocks) and sorted to
maintain an upper triangular matrix per block (implied hermiticity supplies
the lower triangular portion and is not actually stored in memory).
*/
class HoppingBlocks {
public:
using Block = std::vector<COO>;
using Blocks = std::vector<Block>;
using SerializedBlocks = std::vector<std::pair<ArrayXi, ArrayXi>>; // format for saving to file
class Iterator {
public:
using iterator_category = std::input_iterator_tag;
using difference_type = std::ptrdiff_t;
using value_type = Iterator;
using reference = value_type const&;
using pointer = value_type const*;
Iterator(Blocks::const_iterator it) : it(it) {}
HopID family_id() const { return HopID(id); }
Block const& coordinates() const { return *it; }
idx_t size() const { return static_cast<idx_t>(it->size()); }
reference operator*() { return *this; }
pointer operator->() { return this; }
Iterator& operator++() { ++it; ++id; return *this; }
friend bool operator==(Iterator const& a, Iterator const& b) { return a.it == b.it; }
friend bool operator!=(Iterator const& a, Iterator const& b) { return !(a == b); }
private:
Blocks::const_iterator it;
storage_idx_t id = 0;
};
public:
HoppingBlocks() = default;
HoppingBlocks(idx_t num_sites, NameMap name_map)
: num_sites(num_sites), blocks(name_map.size()), name_map(std::move(name_map)) {}
/// Internal: construct from serialized data
HoppingBlocks(idx_t num_sites, SerializedBlocks const& data, NameMap name_map);
/// Internal: return serialized data
idx_t get_num_sites() const { return num_sites; }
SerializedBlocks get_serialized_blocks() const;
NameMap const& get_name_map() const { return name_map; }
Iterator begin() const { return blocks.begin(); }
Iterator end() const { return blocks.end(); }
/// Number of non-zeros in this COO sparse matrix, i.e. the total number of hoppings.
/// This only includes the upper triangular part (i.e. does not include 2x for hermiticity).
idx_t nnz() const;
/// Return the number of neighbors for each site
ArrayXi count_neighbors() const;
/// Reserve space the given number of hoppings per family
void reserve(ArrayXi const& counts);
/// Add a single coordinate pair to the given family block
void add(HopID family_id, idx_t row, idx_t col) {
blocks[family_id.as<size_t>()].push_back({row, col});
}
/// Append a range of coordinates to the given family block
void append(HopID family_id, ArrayXi&& rows, ArrayXi&& cols);
/// Remove sites for which `keep == false`
void filter(VectorX<bool> const& keep);
/// Account for the addition of new sites (no new hoppings)
void add_sites(idx_t num_new_sites);
/// Return the matrix in the CSR sparse matrix format
HoppingCSR tocsr() const;
private:
idx_t num_sites; ///< number of lattice sites, i.e. the size of the square matrix
Blocks blocks; ///< the coordinate blocks indexed by hopping family ID
NameMap name_map; ///< map from friendly hopping family names to their numeric IDs
};
} // namespace cpb
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/system/Shape.hpp | .hpp | 2,216 | 85 | #pragma once
#include "numeric/dense.hpp"
#include <vector>
#include <functional>
namespace cpb {
/**
Shape of the primitive unit cell
*/
class Primitive {
public:
Primitive(int a1 = 1, int a2 = 1, int a3 = 1);
Index3D size;
};
/**
Shape defined by bounding vertices and `contains` function
The bounding vertices specify the maximum area (or volume) where the shape will be located.
The entire volume is filled with lattice sites and then the `contains` function decides which
of those sites are actually located within the desired shape. It's like carving a sculpture
from a block of stone.
*/
class Shape {
public:
using Vertices = std::vector<Cartesian>;
using Contains = std::function<ArrayX<bool>(CartesianArrayConstRef)>;
Shape() = default;
explicit Shape(Vertices const& vertices, Contains const& contains = {});
/// A shape is valid if it has a `contains` function
explicit operator bool() const { return static_cast<bool>(contains); }
Vertices vertices; ///< bounding vertices which define the initial volume
Contains contains; ///< return `true` for `positions` located within the shape
Cartesian lattice_offset = {0, 0, 0}; ///< set a specific lattice offset, see Lattice class
};
/**
1D line
*/
class Line : public Shape {
public:
Line(Cartesian a, Cartesian b);
};
/**
Polygon shape defined by a list of points
Strictly 2D within the xy plane.
*/
class Polygon : public Shape {
public:
Polygon(Vertices const& vertices);
};
/**
Shape defined by a bounding box and a function
*/
class FreeformShape : public Shape {
public:
FreeformShape(Contains const& contains, Cartesian width, Cartesian center = {0, 0, 0});
};
namespace detail {
// Is the angle formed by three points acute? The vertex is `b`.
ArrayX<bool> is_acute_angle(Cartesian a, Cartesian b, CartesianArrayConstRef c);
/// Function object which determines if a point is within a polygon
class WithinPolygon {
public:
WithinPolygon(Shape::Vertices const& vertices);
ArrayX<bool> operator()(CartesianArrayConstRef positions) const;
private:
ArrayX<float> x, y;
};
} // namespace detail
} // namespace cpb
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/system/System.hpp | .hpp | 2,748 | 81 | #pragma once
#include "Lattice.hpp"
#include "system/CompressedSublattices.hpp"
#include "system/HoppingBlocks.hpp"
#include "system/Registry.hpp"
#include "numeric/dense.hpp"
#include "numeric/sparse.hpp"
#include <vector>
#include <memory>
namespace cpb {
class Foundation;
class FinalizedIndices;
class TranslationalSymmetry;
struct Range { idx_t start, end; };
/**
Stores the positions, sublattice and hopping IDs for all lattice sites.
*/
struct System {
struct Boundary;
SiteRegistry site_registry;
HoppingRegistry hopping_registry;
CartesianArray positions;
CompressedSublattices compressed_sublattices;
HoppingBlocks hopping_blocks;
std::vector<Boundary> boundaries;
ArrayX<bool> is_valid;
System(SiteRegistry const& site_registry, HoppingRegistry const& hopping_registry)
: site_registry(site_registry), hopping_registry(hopping_registry) {}
/// The total number of lattice sites i.e. unique positions. Note that a single site may
/// consist of several orbitals/spins which means that the size of the Hamiltonian matrix
/// must be >= to the number of sites. See `System::hamiltonian_size()`.
idx_t num_sites() const { return positions.size(); }
/// The square matrix size required to hold all the Hamiltonian terms after taking into
/// account the number of orbitals/spins at each lattice site.
idx_t hamiltonian_size() const;
/// Total number of non-zero values which need to be reserved for a Hamiltonian.
/// This function takes multi-orbital hopping terms into account.
idx_t hamiltonian_nnz() const;
/// Translate the given System site index into its corresponding Hamiltonian indices
ArrayXi to_hamiltonian_indices(idx_t system_index) const;
/// The [start, end) range (pair of system indices) of all sites of a sublattice
Range sublattice_range(string_view sublattice) const;
/// Find the index of the site nearest to the given position. Optional: filter by sublattice.
idx_t find_nearest(Cartesian position, string_view sublattice_name = "") const;
/// Expand `positions` to `hamiltonian_size` by replicating site positions for each orbital
CartesianArray expanded_positions() const;
};
/**
Stores sites that belong to a system boundary
*/
struct System::Boundary {
HoppingBlocks hopping_blocks;
Cartesian shift; ///< shift length (periodic boundary condition)
};
namespace detail {
void populate_system(System& system, Foundation const& foundation);
void populate_boundaries(System& system, Foundation const& foundation,
TranslationalSymmetry const& symmetry);
void remove_invalid(System& system);
} // namespace detail
} // namespace cpb
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/system/Foundation.hpp | .hpp | 13,581 | 388 | #pragma once
#include "Lattice.hpp"
#include "detail/slice.hpp"
#include "numeric/dense.hpp"
#include "support/cppfuture.hpp"
namespace cpb {
class Primitive;
class Shape;
class Site;
class Foundation;
namespace detail {
/// Return the lower and upper bounds of the shape in lattice vector coordinates
std::pair<Index3D, Index3D> find_bounds(Shape const& shape, Lattice const& lattice);
/// Generate real space coordinates for a block of lattice sites
CartesianArray generate_positions(Cartesian origin, Index3D size, Lattice const& lattice);
/// Initialize the neighbor count for each site
ArrayXi count_neighbors(Foundation const& foundation);
/// Reduce this site's neighbor count to zero and inform its neighbors of the change
void clear_neighbors(Site& site, ArrayXi& neighbor_count, int min_neighbors);
} // namespace detail
/// Remove sites which have a neighbor count lower than `min_neighbors`
void remove_dangling(Foundation& foundation, int min_neighbors);
/**
Keeps the final indices of valid `Foundation` sites as they should appear in `System`
*/
class FinalizedIndices {
public:
FinalizedIndices() = default;
FinalizedIndices(ArrayXi indices, ArrayXi hopping_counts, idx_t total_valid_sites);
explicit operator bool() const { return total_valid_sites != 0; }
/// Return the System index for the given site
storage_idx_t operator[](Site const& site) const;
/// Size of the Hamiltonian matrix
idx_t size() const { return total_valid_sites; }
/// Upper limit for the number of hoppings, indexed by family ID (useful for reservation)
ArrayXi const& max_hoppings_per_family() const { return hopping_counts; }
private:
ArrayXi indices;
ArrayXi hopping_counts;
idx_t total_valid_sites = 0;
};
/**
The foundation class creates a lattice-vector-aligned set of sites. The number of sites is high
enough to encompass the given shape. After creation, the foundation can be cut down to the shape.
*/
class Foundation {
template<bool is_const> class Iterator;
using ConstIterator = Iterator<true>;
using NonConstIterator = Iterator<false>;
template<bool is_const> class SpatialSlice;
using ConstSpatialSlice = SpatialSlice<true>;
using NonConstSpatialSlice = SpatialSlice<false>;
template<bool is_const> class SublatticeSlice;
using ConstSublatticeSlice = SublatticeSlice<true>;
using NonConstSublatticeSlice = SublatticeSlice<false>;
public:
Foundation(Lattice const& lattice, Primitive const& shape);
Foundation(Lattice const& lattice, Shape const& shape);
ConstIterator begin() const;
ConstIterator end() const;
NonConstIterator begin();
NonConstIterator end();
ConstSpatialSlice operator[](SliceIndex3D const& index) const;
NonConstSpatialSlice operator[](SliceIndex3D const& index);
ConstSublatticeSlice operator[](SubID id) const;
NonConstSublatticeSlice operator[](SubID id);
/// Total number of sites: product of all sizes (3D space and sublattice)
idx_t size() const { return spatial_size.prod() * sub_size; }
Lattice const& get_lattice() const { return lattice; }
OptimizedUnitCell const& get_optimized_unit_cell() const { return unit_cell; }
std::pair<Index3D, Index3D> const& get_bounds() const { return bounds; }
Index3D const& get_spatial_size() const { return spatial_size; }
idx_t get_sub_size() const { return sub_size; }
CartesianArray const& get_positions() const { return positions; }
CartesianArray& get_positions() { return positions; }
ArrayX<bool> const& get_states() const { return is_valid; }
ArrayX<bool>& get_states() { return is_valid; }
FinalizedIndices const& get_finalized_indices() const;
private:
Lattice const& lattice;
OptimizedUnitCell unit_cell;
std::pair<Index3D, Index3D> bounds; ///< in lattice vector coordinates
Index3D spatial_size; ///< number of unit cells in each lattice vector direction
idx_t sub_size; ///< number of sites in a unit cell (sublattices)
CartesianArray positions; ///< real space coordinates of lattice sites
ArrayX<bool> is_valid; ///< indicates if the site should be included in the final system
mutable FinalizedIndices finalized_indices;
friend class Site;
};
/// Convenient alias
using Hopping = OptimizedUnitCell::Hopping;
/**
Describes a site on the lattice foundation
Proxy type for a single index in the foundation arrays.
*/
class Site {
public:
/// Direct index assignment
Site(Foundation* foundation, Index3D spatial_idx, idx_t sub_idx, idx_t idx)
: foundation(foundation), spatial_idx(spatial_idx), sub_idx(sub_idx), flat_idx(idx) {}
/// Compute flat index based on 3D space + sub_idx
Site(Foundation* foundation, Index3D spatial_idx, idx_t sub_idx)
: foundation(foundation), spatial_idx(spatial_idx), sub_idx(sub_idx) { reset_idx(); }
Index3D const& get_spatial_idx() const { return spatial_idx; }
idx_t get_sub_idx() const { return sub_idx; }
idx_t get_flat_idx() const { return flat_idx; }
SubAliasID get_alias_id() const { return foundation->unit_cell[sub_idx].alias_id; }
storage_idx_t get_norb() const { return foundation->unit_cell[sub_idx].norb; }
Cartesian get_position() const { return foundation->positions[flat_idx]; }
bool is_valid() const { return foundation->is_valid[flat_idx]; }
void set_valid(bool state) {foundation->is_valid[flat_idx] = state; }
/// Return a new site which has a shifted spatial index
Site shifted(Index3D shift) const { return {foundation, spatial_idx + shift, sub_idx}; }
/// Loop over all neighbours of this site
template<class Fn>
void for_each_neighbor(Fn lambda) const {
auto const spatial_size = foundation->spatial_size.array();
for (auto const& hopping : foundation->unit_cell[sub_idx].hoppings) {
auto const neighbor_index = Array3i(spatial_idx + hopping.relative_index);
if ((neighbor_index < 0).any() || (neighbor_index >= spatial_size).any())
continue; // out of bounds
lambda(Site(foundation, neighbor_index, hopping.to_sub_idx), hopping);
}
}
friend bool operator==(Site const& l, Site const& r) { return l.flat_idx == r.flat_idx; }
friend bool operator!=(Site const& l, Site const& r) { return !(l == r); }
protected:
/// Recalculate `flat_idx`
void reset_idx() {
auto const& i = spatial_idx;
auto const& size = foundation->spatial_size;
flat_idx = ((sub_idx * size[2] + i[2]) * size[1] + i[1]) * size[0] + i[0];
}
protected:
Foundation* foundation; ///< the site's parent foundation
Index3D spatial_idx; ///< unit cell spatial index
idx_t sub_idx; ///< sublattice index
idx_t flat_idx; ///< directly corresponds to array elements
};
/**
Iterate over all foundation sites
*/
template<bool is_const>
class Foundation::Iterator : public Site {
using iterator_category = std::input_iterator_tag;
using difference_type = std::ptrdiff_t;
using value_type = std14::conditional_t<is_const, Site const, Site>;
using reference = value_type&;
using pointer = value_type*;
public:
Iterator(Foundation* foundation, idx_t flat_idx) : Site(foundation, {0, 0, 0}, 0, flat_idx) {}
reference operator*() { return *this; }
pointer operator->() { return this; }
Iterator& operator++() {
++flat_idx;
++spatial_idx[0];
if (spatial_idx[0] == foundation->spatial_size[0]) {
spatial_idx[0] = 0;
++spatial_idx[1];
if (spatial_idx[1] == foundation->spatial_size[1]) {
spatial_idx[1] = 0;
++spatial_idx[2];
if (spatial_idx[2] == foundation->spatial_size[2]) {
spatial_idx[2] = 0;
++sub_idx;
}
}
}
return *this;
}
};
/**
Iterate only over sites inside the spatial slice
*/
template<bool is_const>
class SpatialSliceIterator : public Site {
using It = SpatialSliceIterator<is_const>;
using iterator_category = std::input_iterator_tag;
using difference_type = std::ptrdiff_t;
using value_type = std14::conditional_t<is_const, It const, It>;
using reference = value_type&;
using pointer = value_type*;
public:
SpatialSliceIterator(Foundation* foundation, SliceIndex3D range, idx_t slice_idx)
: Site(foundation, {range[0].start, range[1].start, range[2].start}, 0),
range(range), slice_idx(slice_idx) {}
/// Flat index within the slice: `0 < slice_idx < slice_size`
idx_t get_slice_idx() const { return slice_idx; }
reference operator*() { return *this; }
pointer operator->() { return this; }
It& operator++() {
++slice_idx;
++spatial_idx[0];
if (spatial_idx[0] == range[0].end) {
spatial_idx[0] = range[0].start;
++spatial_idx[1];
if (spatial_idx[1] == range[1].end) {
spatial_idx[1] = range[1].start;
++spatial_idx[2];
if (spatial_idx[2] == range[2].end) {
spatial_idx[2] = range[2].start;
++sub_idx;
}
}
}
reset_idx();
return *this;
}
friend bool operator==(It const& l, It const& r) { return l.slice_idx == r.slice_idx; }
friend bool operator!=(It const& l, It const& r) { return !(l == r); }
private:
SliceIndex3D range; ///< slice start and end indices in 3 dimensions
idx_t slice_idx; ///< flat index within the slice
};
/**
A 3D slice view of a foundation
*/
template<bool is_const>
class Foundation::SpatialSlice {
using Iterator = SpatialSliceIterator<is_const>;
public:
SpatialSlice(Foundation* foundation, SliceIndex3D const& range)
: foundation(foundation), range(range) { normalize(); }
Iterator begin() const { return {foundation, range, 0}; }
Iterator end() const { return {foundation, range, size()}; }
idx_t size() const { return range.size() * foundation->get_sub_size(); }
SliceIndex const& operator[](int n) const { return range[n]; }
SliceIndex& operator[](int n) { return range[n]; }
/// Replace open ended indices [0, -1) with proper [0, size) indices
void normalize() {
for (auto i = 0; i < range.ndims(); ++i) {
if (range[i].end < 0) {
range[i].end = foundation->get_spatial_size()[i];
}
}
}
CartesianArray positions() const {
auto positions = CartesianArray(size());
for (auto const& site : *this) {
positions[site.get_slice_idx()] = site.get_position();
}
return positions;
}
private:
Foundation* foundation;
SliceIndex3D range;
};
/**
A single sublattice slice view of a foundation
*/
template<bool is_const>
class Foundation::SublatticeSlice {
using Iterator = SpatialSliceIterator<is_const>;
public:
SublatticeSlice(Foundation* foundation, SubID unique_id) : foundation(foundation) {
using CellSite = OptimizedUnitCell::Site;
auto const& unit_cell = foundation->get_optimized_unit_cell();
auto const it = std::find_if(unit_cell.begin(), unit_cell.end(),
[&](CellSite const& s) { return s.unique_id == unique_id; });
if (it == unit_cell.end()) {
throw std::runtime_error("Foundation::SublatticeSlice: invalid sublattice unique_id");
}
slice_size = foundation->get_spatial_size().prod();
start_idx = (it - unit_cell.begin()) * slice_size;
}
idx_t size() const { return slice_size; }
Foundation::Iterator<is_const> begin() const { return {foundation, start_idx}; }
Foundation::Iterator<is_const> end() const { return {foundation, start_idx + slice_size}; }
Eigen::Ref<ArrayX<bool> const> get_states() const {
return foundation->is_valid.segment(start_idx, slice_size);
}
Eigen::Ref<ArrayX<bool>> get_states() {
return foundation->is_valid.segment(start_idx, slice_size);
}
CartesianArrayConstRef get_positions() const {
return foundation->positions.segment(start_idx, slice_size);
}
CartesianArrayRef get_positions() {
return foundation->positions.segment(start_idx, slice_size);
}
private:
Foundation* foundation;
idx_t start_idx;
idx_t slice_size;
};
inline storage_idx_t FinalizedIndices::operator[](Site const& site) const {
return indices[site.get_flat_idx()];
}
inline Foundation::ConstIterator Foundation::begin() const {
return {const_cast<Foundation*>(this), 0};
}
inline Foundation::ConstIterator Foundation::end() const {
return {const_cast<Foundation*>(this), size()};
}
inline Foundation::NonConstIterator Foundation::begin() {
return {this, 0};
}
inline Foundation::NonConstIterator Foundation::end() {
return {this, size()};
}
inline Foundation::ConstSpatialSlice Foundation::operator[](SliceIndex3D const& index) const {
return {const_cast<Foundation*>(this), index};
}
inline Foundation::NonConstSpatialSlice Foundation::operator[](SliceIndex3D const& index) {
return {this, index};
}
inline Foundation::ConstSublatticeSlice Foundation::operator[](SubID unique_id) const {
return {const_cast<Foundation*>(this), unique_id};
}
inline Foundation::NonConstSublatticeSlice Foundation::operator[](SubID unique_id) {
return {this, unique_id};
}
} // namespace cpb
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/system/StructureModifiers.hpp | .hpp | 5,700 | 158 | #pragma once
#include "CompressedSublattices.hpp"
#include "HoppingBlocks.hpp"
#include "numeric/dense.hpp"
#include <vector>
#include <algorithm>
#include <memory>
namespace cpb {
class Foundation;
struct System;
/**
Modify the state (valid or invalid) of lattice sites, e.g. to create vacancies
*/
class SiteStateModifier {
public:
using Function = std::function<void(Eigen::Ref<ArrayX<bool>> state, CartesianArrayConstRef pos,
string_view sublattice)>;
Function apply; ///< to be user-implemented
int min_neighbors; ///< afterwards, remove sites with less than this number of neighbors
SiteStateModifier(Function const& apply, int min_neighbors = 0)
: apply(apply), min_neighbors(min_neighbors) {}
};
/**
Modify the position of lattice sites, e.g. to apply geometric deformations
*/
class PositionModifier {
public:
using Function = std::function<void(CartesianArrayRef position, string_view sublattice)>;
Function apply; ///< to be user-implemented
PositionModifier(Function const& apply) : apply(apply) {}
};
/**
Introduces a new site family (with new sub_id)
This can be used to create new sites independent of the translations of the main unit cell
as define by the `Lattice` class. It's useful for disorder or terminating system edges with
atoms of a different element.
*/
class SiteGenerator {
public:
using Function = std::function<CartesianArray(System const&)>;
std::string name; ///< friendly site family identifier
MatrixXcd energy; ///< onsite energy - also added to the site registry
Function make; ///< function which will generate the new site positions
SiteGenerator(string_view name, MatrixXcd const& energy, Function const& make)
: name(name), energy(energy), make(make) {}
explicit operator bool() const { return static_cast<bool>(make); }
};
/**
Introduces a new hopping family (with new hop_id) via a list of index pairs
This can be used to create new hoppings independent of the main Lattice definition.
It's especially useful for creating additional local hoppings, e.g. to model defects.
*/
class HoppingGenerator {
public:
/// Site index pairs which should form new hoppings
struct Result {
ArrayXi from;
ArrayXi to;
};
using Function = std::function<Result(System const&)>;
std::string name; ///< friendly hopping identifier - will be added to lattice registry
MatrixXcd energy; ///< hopping energy - also added to hopping registry
Function make; ///< function which will generate the new hopping index pairs
HoppingGenerator(string_view name, MatrixXcd const& energy, Function const& make)
: name(name), energy(energy), make(make) {}
HoppingGenerator(string_view name, std::complex<double> energy, Function const& make)
: HoppingGenerator(name, MatrixXcd::Constant(1, 1, energy), make) {}
explicit operator bool() const { return static_cast<bool>(make); }
};
template<class M> void apply(M const&, Foundation&) {}
void apply(SiteStateModifier const& m, Foundation& f);
void apply(PositionModifier const& m, Foundation& f);
template<class M> void apply(M const&, System&) {}
void apply(SiteStateModifier const& m, System& s);
void apply(PositionModifier const& m, System& s);
void apply(SiteGenerator const& g, System& s);
void apply(HoppingGenerator const& g, System& s);
template<class M> constexpr bool requires_system(M const&) { return false; }
constexpr bool requires_system(SiteGenerator const&) { return true; }
constexpr bool requires_system(HoppingGenerator const&) { return true; }
template<class M> constexpr bool is_generator(M const&) { return false; }
constexpr bool is_generator(SiteGenerator const&) { return true; }
constexpr bool is_generator(HoppingGenerator const&) { return true; }
/**
Polymorphic storage for system/foundation modifiers
Behaves like a common base for several classes but without actually needing
to inherit from anything -- a class just needs to satisfy the interface.
This allows us to use value semantics with polymorphic behavior.
See: "Inheritance Is The Base Class of Evil" by Sean Parent.
*/
class StructureModifier {
public:
template<class T>
StructureModifier(T x) : impl(std::make_shared<Storage<T>>(std::move(x))) { }
friend void apply(StructureModifier const& x, Foundation& f) { x.impl->v_apply(f); }
friend void apply(StructureModifier const& x, System& s) { x.impl->v_apply(s); }
friend bool requires_system(StructureModifier const& x) { return x.impl->v_requires_system(); }
friend bool is_generator(StructureModifier const& x) { return x.impl->v_is_generator(); }
private:
struct Interface {
Interface() = default;
virtual ~Interface() = default;
Interface(Interface const&) = delete;
Interface(Interface&&) = delete;
Interface& operator=(Interface const&) = delete;
Interface& operator=(Interface&&) = delete;
virtual void v_apply(Foundation&) const = 0;
virtual void v_apply(System&) const = 0;
virtual bool v_requires_system() const = 0;
virtual bool v_is_generator() const = 0;
};
template<class T>
struct Storage : Interface {
Storage(T x) : data(std::move(x)) { }
void v_apply(Foundation& f) const override { apply(data, f); }
void v_apply(System& s) const override { apply(data, s); }
bool v_requires_system() const override { return requires_system(data); }
bool v_is_generator() const override { return is_generator(data); }
T data;
};
std::shared_ptr<Interface const> impl;
};
} // namespace cpb
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/support/cppfuture.hpp | .hpp | 1,229 | 51 | #pragma once
#include <memory>
#include <type_traits>
// helper functions for use until C++14 brings this into std
namespace cpb { namespace std14 {
template<class T, class... Args>
std::unique_ptr<T> make_unique(Args&&... args) {
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
template<class T>
auto cbegin(const T& t) -> decltype(t.cbegin()) {
return t.cbegin();
}
template<class T>
auto cend(const T& t) -> decltype(t.cend()) {
return t.cend();
}
template <bool condition, class T = void>
using enable_if_t = typename std::enable_if<condition, T>::type;
template<bool condition, class If, class Else>
using conditional_t = typename std::conditional<condition, If, Else>::type;
template<class T>
using add_const_t = typename std::add_const<T>::type;
template<class T>
using remove_const_t = typename std::remove_const<T>::type;
template<class T>
using remove_pointer_t = typename std::remove_pointer<T>::type;
template<class T>
using decay_t = typename std::decay<T>::type;
} // namespace std14
namespace std17 {
template<class T>
constexpr std14::add_const_t<T>& as_const(T& x) noexcept { return x; }
template <class T>
void as_const(const T&&) = delete;
}} // namespace cpb::std17
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/support/simd.hpp | .hpp | 14,323 | 438 | #pragma once
#if defined(__AVX2__)
# define SIMDPP_ARCH_X86_AVX2
#elif defined(__AVX__)
# define SIMDPP_ARCH_X86_AVX
#elif defined(__SSE3__)
# define SIMDPP_ARCH_X86_SSE3
#elif defined(__SSE2__) || defined(_M_X64) || _M_IX86_FP == 2
# define SIMDPP_ARCH_X86_SSE2
#endif
#if defined(__FMA__) || (defined(_MSC_VER) && defined(__AVX2__))
# define SIMDPP_ARCH_X86_FMA3
#endif
#ifdef _MSC_VER
# pragma warning(push)
# pragma warning(disable:4244)
# pragma warning(disable:4556)
#endif
#include <simdpp/simd.h>
#ifdef _MSC_VER
# pragma warning(pop)
#endif
#include "detail/config.hpp"
#include "detail/macros.hpp"
#include "support/cppfuture.hpp"
#include <complex>
namespace cpb { namespace simd {
using namespace simdpp;
struct basic_traits {
static constexpr auto align_bytes = 32;
static constexpr auto register_size_bytes = 32; // emulated for < AVX-256
};
/// All SIMD vectors have the following traits
template<class T>
struct traits : basic_traits {
static constexpr auto size = register_size_bytes / sizeof(T);
};
namespace detail {
template<class T> struct select_vector;
template<> struct select_vector<float> { using type = float32<traits<float>::size>; };
template<> struct select_vector<double> { using type = float64<traits<double>::size>; };
template<> struct select_vector<std::complex<float>> : select_vector<float> {};
template<> struct select_vector<std::complex<double>> : select_vector<double> {};
template<class T>
using requires_real = std14::enable_if_t<std::is_floating_point<T>::value, int>;
template<class T>
using requires_complex = std14::enable_if_t<!std::is_floating_point<T>::value, int>;
} // namespace detail
/**
Select the proper SIMD vector type for the given scalar type
*/
template<class scalar_t>
using select_vector_t = typename detail::select_vector<scalar_t>::type;
/**
Stack array with size matching a SIMD register of the corresponding type
*/
template<class scalar_t>
using array = std::array<scalar_t, traits<scalar_t>::size>;
/**
Check if the data pointed to by `p` has `bytes` alignment
*/
template<std::size_t bytes>
inline bool is_aligned(void const* p) {
return reinterpret_cast<std::uintptr_t>(p) % bytes == 0;
}
/**
Split loop data, see `split_loop()` function
*/
template<idx_t N>
struct split_loop_t {
static constexpr auto step = N;
idx_t start, peel_end, vec_end, end;
/**
Failed experiment
The intent here was to make writing vectorized loops easier by wrapping
everything in a function and accepting two lambdas for the scalar and
vector parts. Everything worked well on clang but GCC did not inline the
lambdas as expected, thus crippling performance. Abandoned for now, but
it may be nice to revisit this idea someday.
*/
template<class FnScalar, class FnVector>
CPB_ALWAYS_INLINE void for_each(FnScalar fn_scalar, FnVector fn_vector) const {
#if defined(__clang__)
# pragma clang loop vectorize(disable) unroll(disable)
#endif
for (auto i = start; i < peel_end; ++i) {
fn_scalar(i);
}
for (auto i = peel_end; i < vec_end; i += step) {
fn_vector(i);
}
#if defined(__clang__)
# pragma clang loop vectorize(disable) unroll(disable)
#endif
for (auto i = vec_end; i < end; ++i) {
fn_scalar(i);
}
};
};
/**
Split the loop into 3 sections:
1. Peel: [start, peel_end) scalar loop for the first few unaligned elements
2. Vector: [peel_end, vec_end) SIMD loop for aligned elements
3. Remainder: [vec_end, end) scalar loop for the leftover (end - vec_end < step) elements
*/
template<class scalar_t, idx_t step = traits<scalar_t>::size>
split_loop_t<step> split_loop(scalar_t const* p, idx_t start, idx_t end) {
auto peel_end = start;
static constexpr auto bytes = traits<scalar_t>::align_bytes;
while (!is_aligned<bytes>(p + peel_end) && peel_end < end) {
++peel_end;
}
auto vec_end = end;
while ((vec_end - peel_end) % step != 0) {
--vec_end;
}
return {start, peel_end, vec_end, end};
}
/**
RAII class which disables floating-point denormals (flush-to-zero mode)
*/
struct scope_disable_denormals {
CPB_ALWAYS_INLINE scope_disable_denormals() { _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); }
CPB_ALWAYS_INLINE ~scope_disable_denormals() { _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_OFF); }
};
namespace detail {
template<class Vector> struct Gather;
#if SIMDPP_USE_SSE2
template<>
struct Gather<float64x2> {
CPB_ALWAYS_INLINE
static __m128d call(double const* data, std::int32_t const* indices) {
auto const low = _mm_load_sd(data + indices[0]);
return _mm_loadh_pd(low, data + indices[1]);
}
CPB_ALWAYS_INLINE
static __m128d call(std::complex<double> const* data, std::int32_t const* indices) {
return _mm_load_pd(reinterpret_cast<double const*>(data + indices[0]));
}
};
template<>
struct Gather<float32x4> {
CPB_ALWAYS_INLINE
static __m128 call(float const* data, std::int32_t const* indices) {
auto const a = _mm_load_ss(data + indices[0]);
auto const b = _mm_load_ss(data + indices[1]);
auto const c = _mm_load_ss(data + indices[2]);
auto const d = _mm_load_ss(data + indices[3]);
auto const ac = _mm_unpacklo_ps(a, c);
auto const bd = _mm_unpacklo_ps(b, d);
return _mm_unpacklo_ps(ac, bd);
}
CPB_ALWAYS_INLINE
static __m128 call(std::complex<float> const* data, std::int32_t const* indices) {
auto const r = Gather<float64x2>::call(reinterpret_cast<double const*>(data), indices);
return _mm_castpd_ps(r);
}
};
#endif // SIMDPP_USE_SSE2
#if SIMDPP_USE_AVX && !SIMDPP_USE_AVX2
template<>
struct Gather<float64x4> {
CPB_ALWAYS_INLINE
static __m256d call(double const* data, std::int32_t const* indices) {
return _mm256_set_pd(data[indices[3]], data[indices[2]],
data[indices[1]], data[indices[0]]);
}
CPB_ALWAYS_INLINE
static __m256d call(std::complex<double> const* data, std::int32_t const* indices) {
auto const a = _mm256_castpd128_pd256(Gather<float64x2>::call(data, indices));
auto const b = Gather<float64x2>::call(data, indices + 1);
return _mm256_insertf128_pd(a, b, 1);
}
};
template<>
struct Gather<float32x8> {
CPB_ALWAYS_INLINE
static __m256 call(float const* data, std::int32_t const* indices) {
auto const a = _mm256_castps128_ps256(Gather<float32x4>::call(data, indices));
auto const b = Gather<float32x4>::call(data, indices + 4);
return _mm256_insertf128_ps(a, b, 1);
}
CPB_ALWAYS_INLINE
static __m256 call(std::complex<float> const* data, std::int32_t const* indices) {
auto const r = Gather<float64x4>::call(reinterpret_cast<double const*>(data), indices);
return _mm256_castpd_ps(r);
}
};
#elif SIMDPP_USE_AVX2
template<>
struct Gather<float64x4> {
CPB_ALWAYS_INLINE
static __m256d call(double const* data, std::int32_t const* indices) {
auto const idx = _mm_load_si128(reinterpret_cast<__m128i const*>(indices));
constexpr auto scale = sizeof(*data);
return _mm256_i32gather_pd(data, idx, scale);
}
CPB_ALWAYS_INLINE
static __m256d call(std::complex<double> const* data, std::int32_t const* indices) {
auto const a = _mm256_castpd128_pd256(Gather<float64x2>::call(data, indices));
auto const b = Gather<float64x2>::call(data, indices + 1);
return _mm256_insertf128_pd(a, b, 1);
}
};
template<>
struct Gather<float32x8> {
CPB_ALWAYS_INLINE
static __m256 call(float const* data, std::int32_t const* indices) {
auto const idx = _mm256_load_si256(reinterpret_cast<__m256i const*>(indices));
constexpr auto scale = sizeof(*data);
return _mm256_i32gather_ps(data, idx, scale);
}
CPB_ALWAYS_INLINE
static __m256 call(std::complex<float> const* data, std::int32_t const* indices) {
auto const r = Gather<float64x4>::call(reinterpret_cast<double const*>(data), indices);
return _mm256_castpd_ps(r);
}
};
#endif
template<template<unsigned, class> class V, unsigned N>
struct Gather<V<N, void>> {
using Vec = V<N, void>;
using BaseVec = typename Vec::base_vector_type;
static constexpr auto element_size = sizeof(typename Vec::element_type);
template<class Scalar, class Index> CPB_ALWAYS_INLINE
static Vec call(Scalar const* data, Index const* indices) {
static constexpr auto index_step = Vec::base_length * element_size / sizeof(Scalar);
Vec r;
for (auto i = unsigned{0}; i < Vec::vec_length; ++i) {
r.vec(i) = Gather<BaseVec>::call(data, indices + i * index_step);
}
return r;
}
};
} // namespace detail
/**
Make vector `V` by gathering N elements from `data` based on N indices from `indices`.
The number of elements N is deduced from the type of vector `V`.
Equivalent to:
for (auto i = 0; i < N; ++i) {
v[i] = data[indices[i]]
}
*/
template<class Vec, class Scalar, class Index> CPB_ALWAYS_INLINE
Vec gather(Scalar const* data, Index const* indices) {
static_assert(std::is_integral<Index>::value, "");
return detail::Gather<Vec>::call(data, indices);
}
/**
Alternatively add and subtract elements
Equivalent to:
r0 = a0 - b0
r1 = a1 + b1
r2 = a2 - b2
r3 = a3 + b3
...
*/
template<template<unsigned, class> class Vec, unsigned N, class E1, class E2> CPB_ALWAYS_INLINE
Vec<N, void> addsub(Vec<N, E1> const& a, Vec<N, E2> const& b) {
return a + simd::shuffle4x2<0, 5, 2, 7>(simd::neg(b), b);
}
#if SIMDPP_USE_SSE3
template<class E1, class E2> CPB_ALWAYS_INLINE
float32x4 addsub(float32<4, E1> const& a, float32<4, E2> const& b) {
return _mm_addsub_ps(a.eval(), b.eval());
}
template<class E1, class E2> CPB_ALWAYS_INLINE
float64x2 addsub(float64<2, E1> const& a, float64<2, E2> const& b) {
return _mm_addsub_pd(a.eval(), b.eval());
}
#endif // SIMDPP_USE_SSE3
#if SIMDPP_USE_AVX
template<class E1, class E2> CPB_ALWAYS_INLINE
float32x8 addsub(float32<8, E1> const& a, float32<8, E2> const& b) {
return _mm256_addsub_ps(a.eval(), b.eval());
}
template<class E1, class E2> CPB_ALWAYS_INLINE
float64x4 addsub(float64<4, E1> const& a, float64<4, E2> const& b) {
return _mm256_addsub_pd(a.eval(), b.eval());
}
#endif // SIMDPP_USE_AVX
/**
Complex multiplication
*/
template<template<unsigned, class> class Vec, unsigned N, class E1, class E2> CPB_ALWAYS_INLINE
Vec<N, void> complex_mul(Vec<N, E1> const& ab, Vec<N, E2> const& xy) {
// (a + ib) * (x + iy) = (ax - by) + i(ay + bx)
auto const aa = permute2<0, 0>(ab);
auto const axay = aa * xy;
auto const bb = permute2<1, 1>(ab);
auto const yx = permute2<1, 0>(xy);
auto const bybx = bb * yx;
return addsub(axay, bybx);
}
/**
Complex conjugate
*/
template<unsigned N, class E> CPB_ALWAYS_INLINE
float32<N> conjugate(float32<N, E> const& a) {
return bit_xor(a, make_uint<uint32<N>>(0, 0x80000000));
}
template<unsigned N, class E> CPB_ALWAYS_INLINE
float64<N> conjugate(float64<N, E> const& a) {
return bit_xor(a, make_uint<uint64<N>>(0, 0x8000000000000000));
}
/**
Multiply and add `a * b + c` for real or complex arguments
*/
template<class scalar_t, class Vec, detail::requires_real<scalar_t> = 1> CPB_ALWAYS_INLINE
Vec madd_rc(Vec const& a, Vec const& b, Vec const& c) {
#if SIMDPP_USE_FMA3
return fmadd(a, b, c);
#else
return a * b + c;
#endif
}
template<class scalar_t, class Vec, detail::requires_complex<scalar_t> = 1> CPB_ALWAYS_INLINE
Vec madd_rc(Vec const& a, Vec const& b, Vec const& c) {
return complex_mul(a, b) + c;
}
/**
Conjugate multiply and add `conjugate(a) * b + c` for real or complex arguments
*/
template<class scalar_t, class Vec, detail::requires_real<scalar_t> = 1> CPB_ALWAYS_INLINE
Vec conjugate_madd_rc(Vec const& a, Vec const& b, Vec const& c) {
return madd_rc<scalar_t>(a, b, c);
}
template<class scalar_t, class Vec, detail::requires_complex<scalar_t> = 1> CPB_ALWAYS_INLINE
Vec conjugate_madd_rc(Vec const& a, Vec const& b, Vec const& c) {
return complex_mul(conjugate(a), b) + c;
}
/**
Reduce add for real or complex arguments
*/
template<class scalar_t, class Vec, detail::requires_real<scalar_t> = 1> CPB_ALWAYS_INLINE
scalar_t reduce_add_rc(Vec const& a) {
return reduce_add(a);
}
template<class scalar_t, class Vec, detail::requires_complex<scalar_t> = 1> CPB_ALWAYS_INLINE
scalar_t reduce_add_rc(Vec const& a) {
auto real = a;
auto imag = make_float<Vec>(0);
transpose2(real, imag);
return {reduce_add(real), reduce_add(imag)};
}
/**
Add the imaginary part of a complex number to the real part (noop for real numbers)
result = std::complex<real_t>(a.real() + a.imag(), 0);
*/
template<class scalar_t, class Vec, detail::requires_real<scalar_t> = 1> CPB_ALWAYS_INLINE
Vec reduce_imag(Vec const& a) {
return a;
}
template<class scalar_t, class Vec, detail::requires_complex<scalar_t> = 1> CPB_ALWAYS_INLINE
Vec reduce_imag(Vec const& a) {
auto real = a;
auto imag = make_float<Vec>(0);
transpose2(real, imag);
return real + imag;
}
/**
Load splat which works for real or complex arguments
*/
template<class Vec, class scalar_t> CPB_ALWAYS_INLINE
Vec load_splat_rc(scalar_t const* p) {
return load_splat<Vec>(p);
}
template<class Vec> CPB_ALWAYS_INLINE
Vec load_splat_rc(std::complex<float> const* p) {
return bit_cast<Vec>(load_splat<select_vector_t<double>>(p));
}
template<class Vec, class real_t> CPB_ALWAYS_INLINE
Vec load_splat_rc(std::complex<real_t> const* p) {
auto a = load_splat<Vec>(p);
auto b = load_splat<Vec>(reinterpret_cast<real_t const*>(p) + 1);
transpose2(a, b);
return a;
}
}} // namespace cpb::simd
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/support/format.hpp | .hpp | 811 | 32 | #pragma once
#include <fmt/format.h>
#include <fmt/ostream.h>
namespace fmt {
/**
Convert number to string with SI suffix, e.g.: 14226 -> 14.2k, 5395984 -> 5.39M
*/
inline std::string with_suffix(double number) {
struct Pair {
double value;
char const* suffix;
};
static constexpr Pair mapping[] = {{1e9, "G"}, {1e6, "M"}, {1e3, "k"}};
auto const result = [&]{
for (auto const& bucket : mapping) {
if (number > 0.999 * bucket.value) {
return Pair{number / bucket.value, bucket.suffix};
}
}
return Pair{number, ""};
}();
return fmt::format("{:.3g}{}", result.value, result.suffix);
}
inline std::string with_suffix(std::ptrdiff_t n) { return with_suffix(static_cast<double>(n)); }
} // namespace fmt
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/support/variant.hpp | .hpp | 685 | 25 | #pragma once
#ifdef _MSC_VER // suppress 'static_visitor' deprecation warning
# pragma warning(disable : 4996)
#endif
#include <mapbox/variant.hpp>
namespace cpb { namespace var {
using namespace mapbox::util;
/// Variant of a container with real elements
template<template<class> class... C>
using real = var::variant<C<float>..., C<double>...>;
/// Variant of a container with real or complex elements
template<template<class> class... C>
using complex = var::variant<C<float>..., C<std::complex<float>>...,
C<double>..., C<std::complex<double>>...>;
template<class T> struct tag {};
using scalar_tag = var::complex<tag>;
}} // namespace cpb::var
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/kpm/Moments.hpp | .hpp | 4,505 | 151 | #pragma once
#include "kpm/OptimizedHamiltonian.hpp"
#include <mutex>
namespace cpb { namespace kpm {
using BatchData = var::complex<ArrayX, ArrayXX>;
/**
Collects moments in the form of simple expectation values:
`mu_n = <r|Tn(H)|r>` where `bra == ket == r`. It's only
compatible with the diagonal `calc_moments` algorithms.
*/
struct DiagonalMoments {
idx_t num_moments;
var::complex<ArrayX> data;
DiagonalMoments(idx_t num_moments) : num_moments(num_moments) {}
};
/**
Same as `DiagonalMoments` but stores multiple moment vectors as columns of `data`.
*/
struct BatchDiagonalMoments {
using Collect = std::function<void (BatchData&, BatchData const&, idx_t, idx_t)>;
idx_t num_moments;
idx_t num_vectors;
Collect collect;
BatchData data;
std::unique_ptr<std::mutex> mutex = std14::make_unique<std::mutex>();
BatchDiagonalMoments(idx_t num_moments, idx_t num_vectors, Collect collect)
: num_moments(num_moments), num_vectors(num_vectors), collect(std::move(collect)) {}
/// `idx` is the index of the `new_data` within `data`
void add(BatchData const& new_data, idx_t idx) {
std::unique_lock<std::mutex> lk(*mutex);
collect(data, new_data, idx, num_vectors);
}
};
/**
Collects moments in the form of expectation values with an optional operator:
`mu_n = <beta|op Tn(H)|alpha>` where `beta != alpha`. The `op` can be empty,
in which case it is not applied.
*/
struct GenericMoments {
idx_t num_moments;
VectorXcd const& alpha;
VectorXcd const& beta;
SparseMatrixXcd const& op;
var::complex<ArrayX> data;
GenericMoments(idx_t num_moments, VectorXcd const& alpha, VectorXcd const& beta,
SparseMatrixXcd const& op)
: num_moments(num_moments), alpha(alpha), beta(beta), op(op) {}
};
/**
Collects the computed moments in the form `mu_n = <l|Tn(H)|r>`
where `l` is a unit vector with `l[i] = 1` and `i` is some
Hamiltonian index. Multiple `l` vectors can be defined simply
by defining a vector of Hamiltonian indices `idx` where each
index is used to form an `l` vector and collect a moment.
The resulting `data` is a vector of vectors where each outer
index corresponds to an index from `idx`.
*/
struct MultiUnitMoments {
template<class scalar_t> using Data = std::vector<ArrayX<scalar_t>>;
idx_t num_moments;
Indices const& idx;
var::complex<Data> data;
MultiUnitMoments(idx_t num_moments, Indices const& idx)
: num_moments(num_moments), idx(idx) {}
};
/**
Collects vectors of the form `vec_n = op * Tn(H)|r>` into a matrix
of shape `num_moments * ham_size`. The sparse matrix operator `op`
is optional.
*/
struct DenseMatrixMoments {
idx_t num_moments;
VariantCSR op;
var::complex<MatrixX> data;
DenseMatrixMoments(idx_t num_moments, VariantCSR op = {})
: num_moments(num_moments), op(std::move(op)) {}
};
/**
Adds up moments for the stochastic KPM procedure
*/
struct BatchAccumulator {
void operator()(BatchData& result, BatchData const& new_data, idx_t idx, idx_t num_vectors);
private:
idx_t count = 0; ///< keeps track of how many moments have been summed up so far
};
/**
Concatenate successive moment arrays
*/
struct BatchConcatenator {
void operator()(BatchData& result, BatchData const& new_data, idx_t idx, idx_t num_vectors);
private:
idx_t count = 0;
};
struct MomentMultiplication {
var::complex<MatrixX> data;
MomentMultiplication(idx_t num_moments, var::scalar_tag tag);
void matrix_mul_add(DenseMatrixMoments const& a, DenseMatrixMoments const& b);
void normalize(idx_t total);
};
using MomentsRef = var::variant<DiagonalMoments*, BatchDiagonalMoments*, GenericMoments*,
MultiUnitMoments*, DenseMatrixMoments*>;
template<class M>
void apply_damping(M& moments, Kernel const& kernel) {
var::apply_visitor(kernel, moments.data);
}
struct ExtractData {
idx_t num_moments;
template<class scalar_t>
ArrayXcd operator()(ArrayX<scalar_t> const& data) const {
return data.template cast<std::complex<double>>().head(num_moments);
}
};
template<class M>
ArrayXcd extract_data(M const& moments, idx_t num_moments) {
return moments.data.match(ExtractData{num_moments});
}
/// Return the velocity operator for the direction given by the `alpha` position vector
VariantCSR velocity(Hamiltonian const& hamiltonian, ArrayXf const& alpha);
}} // namespace cpb::kpm
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/kpm/reconstruct.hpp | .hpp | 6,134 | 146 | #pragma once
#include "kpm/Bounds.hpp"
namespace cpb { namespace kpm {
/// Reconstruct function `F` from the given moments and function arguments.
/// `F` can be any of the function objects defined below: SpectralDensity, GreensFunction, etc.
template<class F, class M, class... Args>
auto reconstruct(M const& moments, Args&&... args)
-> decltype(var::apply_visitor(F{std::forward<Args>(args)...}, moments.data)) {
return var::apply_visitor(F{std::forward<Args>(args)...}, moments.data);
};
/// Reconstruct spectral density based on the given KPM moments
/// f(E) = 2 / (a * pi * sqrt(1 - E^2)) * sum_n( moments * cos(n * acos(E)) )
struct SpectralDensity {
ArrayXd const& energy;
Scale<> const& s;
template<class scalar_t>
ArrayXXdCM operator()(ArrayX<scalar_t> const& moments) const {
using real_t = num::get_real_t<scalar_t>;
auto const scale = Scale<real_t>(s);
auto const scaled_energy = scale(energy.cast<real_t>());
auto const real_moments = ArrayX<real_t>(moments.real());
auto const ns = make_integer_range<real_t>(moments.size());
auto const k = real_t{2 / constant::pi} / scale.a;
return transform<ArrayX>(scaled_energy, [&](real_t E) {
return k / sqrt(1 - E*E) * sum(real_moments * cos(ns * acos(E)));
}).template cast<double>();
}
template<class scalar_t>
ArrayXXdCM operator()(ArrayXX<scalar_t> const& moments) const {
using real_t = num::get_real_t<scalar_t>;
auto const num_moments = moments.rows();
auto const scale = Scale<real_t>(s);
auto const scaled_energy = scale(energy.cast<real_t>());
auto const real_moments = ArrayXX<real_t>(moments.real());
auto const ns = make_integer_range<real_t>(num_moments);
auto const k = real_t{2 / constant::pi} / scale.a;
auto result = ArrayXXdCM(scaled_energy.size(), moments.cols());
for (auto i = idx_t{0}; i < scaled_energy.size(); ++i) {
auto const E = scaled_energy[i];
auto const cos_n = cos(ns * acos(E)).eval();
auto const r = k / sqrt(1 - E*E) * (real_moments.colwise() * cos_n).colwise().sum();
result.row(i) = r.transpose().template cast<double>();
}
return result;
}
};
/// Reconstruct Green's function based on the given KPM moments
/// g(E) = -2*i / (a * sqrt(1 - E^2)) * sum_n( moments * exp(-i*n*acos(E)) )
struct GreensFunction {
ArrayXd const& energy;
Scale<> const& s;
template<class scalar_t>
ArrayXcd operator()(ArrayX<scalar_t> const& moments) const {
using real_t = num::get_real_t<scalar_t>;
using complex_t = num::get_complex_t<scalar_t>;
constexpr auto i1 = complex_t{constant::i1};
auto const scale = Scale<real_t>(s);
auto const scaled_energy = scale(energy.cast<real_t>());
auto const ns = make_integer_range<real_t>(moments.size());
auto const k = -real_t{2} * i1 / scale.a;
return transform<ArrayX>(scaled_energy.eval(), [&](real_t E) {
return k / sqrt(1 - E*E) * sum(moments * exp(-i1 * ns * acos(E)));
}).template cast<std::complex<double>>();
}
template<class scalar_t>
std::vector<ArrayXcd> operator()(std::vector<ArrayX<scalar_t>> const& moments_vector) const {
return transform<std::vector>(moments_vector, [&](ArrayX<scalar_t> const& moments) {
return operator()(moments);
});
}
};
/// Reconstruct the Kubo-Bastin formula for the conductivity:
/// sigma(mu, T) = 4 / a^2 * int_-1^1 fd(E) / (1 - E^2)^2 sum(momenta * gamma(E)) dE
/// The resulting conductivity is in units of `e^2 / h * Omega` where Omega is the volume.
struct KuboBastin {
ArrayXd const& chemical_pot;
ArrayXd const& energy_samples;
double temperature;
Scale<> s;
template<class scalar_t>
ArrayXcd operator()(MatrixX<scalar_t> const& moments) const {
using real_t = num::get_real_t<scalar_t>;
using complex_t = num::get_complex_t<scalar_t>;
auto const scale = Scale<real_t>(s);
auto const inv_kbt_sc = static_cast<real_t>(scale.a / (constant::kb * temperature));
auto const num_moments = moments.rows();
auto const scaled_chemical_potential = scale(chemical_pot.cast<real_t>());
auto const scaled_energy_samples = scale(energy_samples.cast<real_t>());
auto gamma = [&](real_t en) {
constexpr auto i1 = static_cast<complex_t>(constant::i1);
using Row = Eigen::Array<real_t, 1, Eigen::Dynamic>;
auto const ns = Row::LinSpaced(num_moments, 0, static_cast<real_t>(num_moments - 1));
auto const ns_2d = ns.replicate(num_moments, 1).eval();
auto const sqrt_n = en - i1 * ns_2d * sqrt(real_t{1} - en * en);
auto const exp_n = exp(i1 * acos(en) * ns_2d);
auto const t_m = cos(acos(en) * ns_2d.transpose());
auto const g_p = MatrixX<complex_t>(sqrt_n * exp_n * t_m);
return (g_p + g_p.adjoint()).array().eval();
};
auto const coeff = (scaled_energy_samples.maxCoeff() - scaled_energy_samples.minCoeff())
/ static_cast<real_t>(2 * scaled_energy_samples.size());
auto integrate = [&](ArrayX<complex_t> const& func) {
return coeff * (real_t{2} * func.sum() - func(0) - func(func.size() - 1));
};
auto fermi_dirac = [&](real_t mi) {
return real_t{1} / (real_t{1} + exp((scaled_energy_samples - mi) * inv_kbt_sc));
};
auto sum_nm = transform<ArrayX>(scaled_energy_samples, [&](real_t en) {
auto const k = real_t{1} / ((real_t{1} - en * en) * (real_t{1} - en * en));
return k * sum(moments.array() * gamma(en));
});
auto const prefix = scalar_t{4} / (scale.a * scale.a);
return transform<ArrayX>(scaled_chemical_potential, [&](real_t mu) {
return prefix * integrate(fermi_dirac(mu) * sum_nm);
}).template cast<std::complex<double>>();
}
};
}} // namespace cpb::kpm
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/kpm/Stats.hpp | .hpp | 1,513 | 48 | #pragma once
#include "utils/Chrono.hpp"
#include "detail/config.hpp"
#include "support/format.hpp"
namespace cpb { namespace kpm {
inline std::string format_report(std::string msg, Chrono const& time, bool shortform) {
auto const fmt_str = shortform ? "{:s} [{}] " : "- {:<80s} | {}\n";
return fmt::format(fmt_str, msg, time);
}
struct AlgorithmConfig;
class OptimizedHamiltonian;
/**
Stats of the KPM calculation
*/
struct Stats {
idx_t num_moments;
bool uses_full_system;
size_t nnz; ///< original number of processed non-zero matrix elements (over all iterations)
size_t opt_nnz; ///< same as above, but with optimizations applied (if any)
size_t vec; ///< number of elements in a single KPM vector times the number of moments
size_t opt_vec; ///< same as above, but with optimizations applied (if any)
double multiplier = 1; ///< account for any repeated calculations
size_t matrix_memory; ///< memory used by the Hamiltonian matrix
size_t vector_memory; ///< memory used by a single KPM vector
Chrono hamiltonian_timer;
Chrono moments_timer;
void reset(idx_t num_moments, OptimizedHamiltonian const& oh,
AlgorithmConfig const& ac, idx_t multiplier = 1);
/// Non-zero elements per second
double eps() const;
/// Approximate number of executed mul + add operations per second
double ops(bool is_diagonal, bool non_unit_vector) const;
std::string report(bool shortform) const;
};
}} // namespace cpb::kpm
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/kpm/Bounds.hpp | .hpp | 2,393 | 73 | #pragma once
#include "hamiltonian/Hamiltonian.hpp"
#include "utils/Chrono.hpp"
namespace cpb { namespace kpm {
/**
The KPM scaling factors `a` and `b`
*/
template<class real_t = double>
struct Scale {
static constexpr auto tolerance = 0.01f; ///< needed because the energy bounds are not precise
real_t a = 0;
real_t b = 0;
Scale() = default;
Scale(real_t min_energy, real_t max_energy)
: a(0.5f * (max_energy - min_energy) * (1 + tolerance)),
b(0.5f * (max_energy + min_energy)) {
if (std::abs(b / a) < 0.01f * tolerance) {
b = 0; // rounding to zero saves space in the sparse matrix
}
}
template<class T>
Scale(Scale<T> const& other)
: a(static_cast<real_t>(other.a)), b(static_cast<real_t>(other.b)) {}
explicit operator bool() { return a != 0; }
/// Apply the scaling factors to a vector
ArrayX<real_t> operator()(ArrayX<real_t> const& v) const { return (v - b) / a; }
};
/**
Min and max eigenvalues of the Hamiltonian
The bounds can be determined automatically using the Lanczos procedure,
or set manually by the user. Also computes the KPM scaling factors a and b.
*/
class Bounds {
public:
Bounds(Hamiltonian const& hamiltonian, double precision_percent)
: hamiltonian(hamiltonian), precision_percent(precision_percent) {}
/// Set the energy bounds manually, therefore skipping the Lanczos computation
Bounds(double min_energy, double max_energy) : min(min_energy), max(max_energy) {}
double min_energy() { compute_bounds(); return min; }
double max_energy() { compute_bounds(); return max; }
/// The KPM scaling factors a and b
Scale<> scaling_factors() { compute_bounds(); return {min, max}; }
/// Return an array with `size` linearly spaced values within the bounds
ArrayXd linspaced(idx_t size) { return ArrayXd::LinSpaced(size, min_energy(), max_energy()); }
std::string report(bool shortform = false) const;
private:
/// Compute the scaling factors using the Lanczos procedure
void compute_bounds();
private:
double min = .0; ///< the lowest eigenvalue
double max = .0; ///< the highest eigenvalue
int lanczos_loops = 0; ///< number of iterations needed to converge the Lanczos procedure
Hamiltonian hamiltonian;
double precision_percent;
Chrono timer;
};
}} // namespace cpb::kpm
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/kpm/calc_moments.hpp | .hpp | 5,725 | 149 | #pragma once
namespace cpb { namespace kpm { namespace calc_moments {
template<class...> struct void_type { using type = void; };
template<class... Ts> using void_t = typename void_type<Ts...>::type;
template<class Collector, class = void>
struct is_diagonal : std::false_type {};
template<class Collector>
struct is_diagonal<Collector, void_t<decltype(Collector::zero())>> : std::true_type {};
template<class Collector>
using requires_diagonal = typename std::enable_if<is_diagonal<Collector>::value, int>::type;
template<class Collector>
using requires_offdiagonal = typename std::enable_if<!is_diagonal<Collector>::value, int>::type;
/************************************************************************\
Diagonal KPM implementation: the left and right vectors are identical,
i.e. `mu_n = <r|Tn(H)|r>` where `bra == ket == r`. It's 1.5x to 2x times
faster than the general (off-diagonal) version.
\************************************************************************/
/**
Basic implementation with an optional size optimization (when applicable)
When `opt_size == true`, the optimal size optimization is used. This requires
a specially ordered matrix as input. This matrix is divided into slices which
are mapped by `SliceMap`. At each iteration, the computation is performed only
for a subset of the total system which contains non-zero values. The speedup
is about equal to the amount of removed work.
*/
template<class Collector, class Vector, class Matrix, requires_diagonal<Collector> = 1>
void basic(Collector& collect, Vector r0, Vector r1, Matrix const& h2,
SliceMap const& map, bool opt_size) {
auto const num_moments = collect.size();
assert(num_moments % 2 == 0);
auto const zero = Collector::zero();
for (auto n = 2; n <= num_moments / 2; ++n) {
auto m2 = zero, m3 = zero;
auto const size = opt_size ? map.optimal_size(n, num_moments) : h2.rows();
compute::kpm_spmv_diagonal(0, size, h2, r1, r0, m2, m3);
collect(n, m2, m3);
r1.swap(r0);
}
}
/**
Optimized implementation: interleave two consecutive moment calculations
Requires a specially ordered matrix as input.
The two concurrent operations share some of the same data, thus promoting cache
usage and reducing main memory bandwidth.
*/
template<class Collector, class Vector, class Matrix, requires_diagonal<Collector> = 1>
void interleaved(Collector& collect, Vector r0, Vector r1, Matrix const& h2,
SliceMap const& map, bool opt_size) {
auto const num_moments = collect.size();
assert((num_moments - 2) % 4 == 0);
// Interleave moments `n` and `n + 1` for better data locality
// Diagonal + interleaved computes 4 moments per iteration
auto const zero = Collector::zero();
for (auto n = idx_t{2}; n <= num_moments / 2; n += 2) {
auto m2 = zero, m3 = zero, m4 = zero, m5 = zero;
auto const max1 = opt_size ? map.index(n, num_moments) : map.last_index();
auto const max2 = opt_size ? map.index(n + 1, num_moments) : map.last_index();
for (auto k = idx_t{0}, start0 = idx_t{0}, start1 = idx_t{0}; k <= max1; ++k) {
auto const end0 = map[k];
auto const end1 = (k == max1) ? map[max2] : start0;
compute::kpm_spmv_diagonal(start0, end0, h2, r1, r0, m2, m3);
compute::kpm_spmv_diagonal(start1, end1, h2, r0, r1, m4, m5);
start1 = end1;
start0 = end0;
}
collect(n, m2, m3);
collect(n + 1, m4, m5);
}
}
/******************************************************************\
Off-diagonal KPM implementation: different left and right vectors,
i.e. `mu_n = <l|Tn(H)|r>` where `l != r`. The `Moments` collector
contains the left vector.
\******************************************************************/
/**
Basic implementation with an optional size optimization (when applicable)
See the diagonal version of this function for more information.
*/
template<class Collector, class Vector, class Matrix, requires_offdiagonal<Collector> = 1>
void basic(Collector& collect, Vector r0, Vector r1, Matrix const& h2,
SliceMap const& map, bool opt_size) {
auto const num_moments = collect.size();
for (auto n = idx_t{2}; n < num_moments; ++n) {
auto const size = opt_size ? map.optimal_size(n, num_moments) : h2.rows();
compute::kpm_spmv(0, size, h2, r1, r0); // r0 = matrix * r1 - r0
r1.swap(r0);
collect(n, r1);
}
}
/**
Optimized implementation: interleave two consecutive moment calculations
See the diagonal version of this function for more information.
*/
template<class C, class Vector, class Matrix, requires_offdiagonal<C> = 1>
void interleaved(C& collect, Vector r0, Vector r1, Matrix const& h2,
SliceMap const& map, bool opt_size) {
auto const num_moments = collect.size();
assert(num_moments % 2 == 0);
// Interleave moments `n` and `n + 1` for better data locality
for (auto n = idx_t{2}; n < num_moments; n += 2) {
auto const max1 = opt_size ? map.index(n, num_moments) : map.last_index();
auto const max2 = opt_size ? map.index(n + 1, num_moments) : map.last_index();
for (auto k = idx_t{0}, start0 = idx_t{0}, start1 = idx_t{0}; k <= max1; ++k) {
auto const end0 = map[k];
auto const end1 = (k == max1) ? map[max2] : start0;
compute::kpm_spmv(start0, end0, h2, r1, r0);
compute::kpm_spmv(start1, end1, h2, r0, r1);
start1 = start0;
start0 = end0;
}
collect(n, r0);
collect(n + 1, r1);
}
}
}}} // namespace cpb::kpm::calc_moments
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/kpm/Core.hpp | .hpp | 3,086 | 93 | #pragma once
#include "Model.hpp"
#include "hamiltonian/Hamiltonian.hpp"
#include "kpm/Bounds.hpp"
#include "kpm/Config.hpp"
#include "kpm/OptimizedHamiltonian.hpp"
#include "kpm/Starter.hpp"
#include "kpm/Moments.hpp"
#include "kpm/Stats.hpp"
#include "utils/Chrono.hpp"
namespace cpb { namespace kpm {
/**
Does the actual work of computing KPM moments
Different derived classes are optimized for specific hardware (CPU, GPU).
*/
class Compute {
public:
class Interface {
public:
virtual ~Interface() = default;
virtual void moments(MomentsRef, Starter const&, AlgorithmConfig const&,
OptimizedHamiltonian const&) const = 0;
};
template<class T>
Compute(T x) : ptr(std::make_shared<T>(std::move(x))) {}
Interface const* operator->() const { return ptr.get(); }
private:
std::shared_ptr<Interface const> ptr;
};
/**
Low-level KPM implementation
No Model information (sublattice, positions, etc.), just the Hamiltonian matrix and indices.
*/
class Core {
public:
explicit Core(Hamiltonian const& h, Compute const& compute, Config const& config = {});
void set_hamiltonian(Hamiltonian const& h) ;
Config const& get_config() const { return config; }
Stats const& get_stats() const { return stats; }
/// The KPM scaling factors `a` and `b`
Scale<> scaling_factors() { return bounds.scaling_factors(); }
/// Information about what happened during the last calculation
std::string report(bool shortform = false) const;
/// Return KPM moments in the form `mu_n = <beta|op Tn(H)|alpha>`
ArrayXcd moments(idx_t num_moments, VectorXcd const& alpha, VectorXcd const& beta,
SparseMatrixXcd const& op);
/// LDOS at the given Hamiltonian indices for the energy range and broadening
ArrayXXdCM ldos(std::vector<idx_t> const& idx, ArrayXd const& energy, double broadening);
/// DOS for the given energy range and broadening
ArrayXd dos(ArrayXd const& energy, double broadening, idx_t num_random);
/// Green's function matrix element (row, col) for the given energy range
ArrayXcd greens(idx_t row, idx_t col, ArrayXd const& energy, double broadening);
/// Multiple Green's matrix elements for a single `row` and multiple `cols`
std::vector<ArrayXcd> greens_vector(idx_t row, std::vector<idx_t> const& cols,
ArrayXd const& energy, double broadening);
/// Kubo-Bastin conductivity in the directions defined by the `left` and `right` coordinates
ArrayXcd conductivity(ArrayXf const& left_coords, ArrayXf const& right_coords,
ArrayXd const& chemical_potential, double broadening,
double temperature, idx_t num_random, idx_t num_points);
private:
void timed_compute(MomentsRef, Starter const&, AlgorithmConfig const&);
private:
Hamiltonian hamiltonian;
Compute compute;
Config config;
Stats stats;
Bounds bounds;
OptimizedHamiltonian optimized_hamiltonian;
};
}} // namespace cpb::kpm
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/kpm/Starter.hpp | .hpp | 4,187 | 121 | #pragma once
#include "kpm/OptimizedHamiltonian.hpp"
#include "numeric/dense.hpp"
#include "compute/detail.hpp"
#include <mutex>
namespace cpb { namespace kpm {
/// Produce the r0 starter vector for the KPM procedure
struct Starter {
using Make = std::function<var::complex<VectorX> (var::scalar_tag)>;
Make make;
idx_t vector_size;
mutable idx_t count = 0; ///< the number of vector this starter has produced
std::unique_ptr<std::mutex> mutex = std14::make_unique<std::mutex>();
void lock() const { mutex->lock(); }
void unlock() const { mutex->unlock(); }
Starter(Make make, idx_t vector_size) : make(std::move(make)), vector_size(vector_size) {}
};
/// Starter vector equal to the constant `alpha` (`oh` is needed for reordering)
Starter constant_starter(OptimizedHamiltonian const& oh, VectorXcd const& alpha);
/// Unit vector starter (`oh` encodes the unit index)
Starter unit_starter(OptimizedHamiltonian const& oh);
/// Starter vector for the stochastic KPM procedure (`oh` is needed for size and reordering)
Starter random_starter(OptimizedHamiltonian const& oh, VariantCSR const& op = {});
/// Construct a concrete scalar type r0 vector based on a `Starter`
template<class scalar_t>
VectorX<scalar_t> make_r0(Starter const& starter, var::tag<VectorX<scalar_t>>, idx_t /*cols=1*/) {
++starter.count;
return starter.make(var::tag<scalar_t>{}).template get<VectorX<scalar_t>>();
}
template<class scalar_t>
MatrixX<scalar_t> make_r0(Starter const& starter, var::tag<MatrixX<scalar_t>>, idx_t cols) {
starter.count += cols;
auto r0 = MatrixX<scalar_t>(starter.vector_size, cols);
for (auto i = idx_t{0}; i < cols; ++i) {
r0.col(i) = starter.make(var::tag<scalar_t>{}).template get<VectorX<scalar_t>>();
}
return r0;
}
/// Return the vector following the starter: r1 = h2 * r0 * 0.5
/// -> multiply by 0.5 because h2 was pre-multiplied by 2
template<class scalar_t>
VectorX<scalar_t> make_r1(SparseMatrixX<scalar_t> const& h2, VectorX<scalar_t> const& r0) {
auto const size = h2.rows();
auto const data = h2.valuePtr();
auto const indices = h2.innerIndexPtr();
auto const indptr = h2.outerIndexPtr();
auto r1 = VectorX<scalar_t>(size);
for (auto row = 0; row < size; ++row) {
auto tmp = scalar_t{0};
for (auto n = indptr[row]; n < indptr[row + 1]; ++n) {
tmp += compute::detail::mul(data[n], r0[indices[n]]);
}
r1[row] = tmp * scalar_t{0.5};
}
return r1;
}
template<class scalar_t>
MatrixX<scalar_t> make_r1(SparseMatrixX<scalar_t> const& h2, MatrixX<scalar_t> const& r0) {
auto const size = h2.rows();
auto const data = h2.valuePtr();
auto const indices = h2.innerIndexPtr();
auto const indptr = h2.outerIndexPtr();
using Row = Eigen::Matrix<scalar_t, 1, Eigen::Dynamic>;
auto tmp = Row(r0.cols());
auto r1 = MatrixX<scalar_t>(r0.rows(), r0.cols());
for (auto row = 0; row < size; ++row) {
tmp.setZero();
for (auto n = indptr[row]; n < indptr[row + 1]; ++n) {
tmp += data[n] * r0.row(indices[n]);
}
r1.row(row) = tmp * scalar_t{0.5};
}
return r1;
}
template<class scalar_t>
VectorX<scalar_t> make_r1(num::EllMatrix<scalar_t> const& h2, VectorX<scalar_t> const& r0) {
auto const size = h2.rows();
auto r1 = VectorX<scalar_t>::Zero(size).eval();
for (auto n = 0; n < h2.nnz_per_row; ++n) {
for (auto row = 0; row < size; ++row) {
auto const a = h2.data(row, n);
auto const b = r0[h2.indices(row, n)];
r1[row] += compute::detail::mul(a, b) * scalar_t{0.5};
}
}
return r1;
}
template<class scalar_t>
MatrixX<scalar_t> make_r1(num::EllMatrix<scalar_t> const& h2, MatrixX<scalar_t> const& r0) {
auto const size = h2.rows();
auto r1 = MatrixX<scalar_t>::Zero(r0.rows(), r0.cols()).eval();
for (auto n = 0; n < h2.nnz_per_row; ++n) {
for (auto row = 0; row < size; ++row) {
auto const a = h2.data(row, n);
r1.row(row) += a * r0.row(h2.indices(row, n)) * scalar_t{0.5};
}
}
return r1;
}
}} // namespace cpb::kpm
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/kpm/OptimizedHamiltonian.hpp | .hpp | 8,361 | 199 | #pragma once
#include "hamiltonian/Hamiltonian.hpp"
#include "kpm/Bounds.hpp"
#include "kpm/Config.hpp"
#include "numeric/sparse.hpp"
#include "numeric/ellmatrix.hpp"
#include "support/variant.hpp"
#include "utils/Chrono.hpp"
#include "detail/macros.hpp"
namespace cpb { namespace kpm {
/**
Source and destination indices for Hamiltonian optimization and local KPM calculations
*/
struct Indices {
ArrayXi src;
ArrayXi dest;
Indices() = default;
Indices(idx_t source, idx_t destination) : src(1), dest(1) {
src[0] = static_cast<storage_idx_t>(source);
dest[0] = static_cast<storage_idx_t>(destination);
}
Indices(idx_t source, ArrayXi destination)
: src(1), dest(std::move(destination)) { src[0] = static_cast<storage_idx_t>(source); }
Indices(idx_t source, std::vector<idx_t> const& destination)
: Indices(source, eigen_cast<ArrayX>(destination).cast<storage_idx_t>()) {}
Indices(ArrayXi source, ArrayXi destination)
: src(std::move(source)), dest(std::move(destination)) {}
Indices(std::vector<idx_t> const& source, std::vector<idx_t> const& destination)
: Indices(eigen_cast<ArrayX>(source).cast<storage_idx_t>(),
eigen_cast<ArrayX>(destination).cast<storage_idx_t>()) {}
/// Indicates a single element on the main diagonal
bool is_diagonal() const {
return src.size() == dest.size() && (src == dest).all();
}
friend bool operator==(Indices const& l, Indices const& r) {
return l.src.size() == r.src.size() && (l.src == r.src).all()
&& l.dest.size() == r.dest.size() && (l.dest == r.dest).all();
}
};
/**
Optimized slice mapping for `optimal_size` and `interleaved` KPM algorithms.
*/
class SliceMap {
std::vector<storage_idx_t> data; ///< optimized Hamiltonian indices marking slice borders
idx_t src_offset = 0; ///< needed when there are multiple source indices (start at offset)
idx_t dest_offset = 0; ///< indicates the slice of the highest destination index
public:
/// Simple constructor for non-optimized case -> single slice equal to full system size
explicit SliceMap(idx_t system_size) { data = {static_cast<storage_idx_t>(system_size)}; }
/// Map for optimized matrix. The `optimized_idx` is needed to compute the `offset`
SliceMap(std::vector<storage_idx_t> border_indices, Indices const& optimized_idx);
/// Return an index into `data`, indicating the optimal system size for
/// the calculation of KPM moment number `n` out of total `num_moments`
idx_t index(idx_t n, idx_t num_moments) const {
assert(n < num_moments);
auto const mid = (num_moments - 1 + dest_offset - src_offset) / 2;
auto const max = std::min(last_index(), mid + src_offset);
if (n < mid) {
return std::min(max, n + src_offset); // the size grows in the beginning
} else { // constant in the middle and shrinking near the end as reverse `n`
return std::min(max, num_moments - 1 - n + dest_offset);
}
}
/// Last index into `data`
idx_t last_index() const { return static_cast<idx_t>(data.size()) - 1; }
/// Return the optimal system size for KPM moment number `n` out of total `num_moments`
idx_t optimal_size(idx_t n, idx_t num_moments) const {
return data[index(n, num_moments)];
}
/// Would calculating this number of moments ever do a full matrix-vector multiplication?
bool uses_full_system(idx_t num_moments) const {
return static_cast<idx_t>(data.size()) < num_moments / 2;
}
idx_t operator[](idx_t i) const { return data[i]; }
std::vector<storage_idx_t> const& get_data() const { return data; }
idx_t get_src_offset() const { return src_offset; }
idx_t get_dest_offset() const { return dest_offset; }
};
/**
Stores a scaled Hamiltonian `(H - b)/a` which limits it to (-1, 1) boundaries required for KPM.
In addition, three optimisations are applied (last two are optional, see `MatrixConfig`):
1) The matrix is multiplied by 2. This benefits most calculations (e.g. `y = 2*H*x - y`),
because the 2x multiplication is done only once, but it will need to be divided by 2
when the original element values are needed (very rarely).
2) Reorder the elements so that target indices are placed at the start of the matrix.
This produces a `SliceMap` which may be used to reduce calculation time by skipping
sparse matrix-vector multiplication of zero values or by interleaving calculations
of neighboring slices.
3) Convert the sparse matrix into the ELLPACK format. The sparse matrix-vector
multiplication algorithm for this format is much easier to vectorize compared
to the classic CSR format.
*/
class OptimizedHamiltonian {
public:
using VariantMatrix = var::complex<SparseMatrixX, num::EllMatrix>;
OptimizedHamiltonian(Hamiltonian const& h, MatrixFormat const& mf, bool reorder)
: original_h(h), slice_map(h.rows()), matrix_format(mf), is_reordered(reorder) {}
/// Create the optimized Hamiltonian targeting specific indices and scale factors
void optimize_for(Indices const& idx, Scale<> scale);
/// Apply new Hamiltonian index ordering to a container
template<class Vector>
void reorder(Vector& v) const {
if (reorder_map.empty()) { return; }
assert(reorder_map.size() == static_cast<size_t>(v.size()));
auto reordered_v = Vector(v.size());
for (auto i = idx_t{0}; i < reordered_v.size(); ++i) {
reordered_v[reorder_map[i]] = v[i];
}
v.swap(reordered_v);
}
template<class scalar_t>
void reorder(SparseMatrixX<scalar_t>& matrix) const {
if (reorder_map.empty()) { return; }
auto reordered_matrix = SparseMatrixX<scalar_t>(matrix.rows(), matrix.cols());
auto const reserve_per_row = static_cast<int>(sparse::max_nnz_per_row(matrix));
reordered_matrix.reserve(ArrayXi::Constant(matrix.rows(), reserve_per_row));
sparse::make_loop(matrix).for_each([&](idx_t row, idx_t col, scalar_t value) {
reordered_matrix.insert(reorder_map[row], reorder_map[col]) = value;
});
reordered_matrix.makeCompressed();
matrix.swap(reordered_matrix);
}
idx_t size() const { return original_h.rows(); }
Indices const& idx() const { return optimized_idx; }
SliceMap const& map() const { return slice_map; }
VariantMatrix const& matrix() const { return optimized_matrix; }
var::scalar_tag scalar_tag() const { return tag; }
private:
/// Just scale the Hamiltonian: H2 = (H - I*b) * (2/a)
template<class scalar_t>
void create_scaled(Indices const& idx, Scale<> scale);
/// Scale and reorder the Hamiltonian so that idx is at the start of the optimized matrix
template<class scalar_t>
void create_reordered(Indices const& idx, Scale<> scale);
/// Get optimized indices which map to the given originals
static Indices reorder_indices(Indices const& original_idx,
std::vector<storage_idx_t> const& reorder_map);
/// Total non-zeros processed when computing `num_moments` with or without size optimizations
size_t num_nonzeros(idx_t num_moments, bool optimal_size) const;
/// Same as above but with vector elements instead of sparse matrix non-zeros
size_t num_vec_elements(idx_t num_moments, bool optimal_size) const;
/// The amount of memory (in bytes) used by the Hamiltonian matrix
size_t matrix_memory() const;
/// Memory used by a single KPM vector
size_t vector_memory() const;
private:
Hamiltonian original_h; ///< original unoptimized Hamiltonian
Indices original_idx; ///< original target indices for which the optimization was done
VariantMatrix optimized_matrix; ///< reordered for faster compute
var::scalar_tag tag; ///< indicates the scalar type of the matrix
Indices optimized_idx; ///< reordered target indices in the optimized matrix
SliceMap slice_map; ///< slice border indices
std::vector<storage_idx_t> reorder_map; ///< mapping from original matrix indices to reordered indices
MatrixFormat matrix_format;
bool is_reordered;
Chrono timer;
friend struct Stats;
friend struct Optimize;
};
}} // namespace cpb::kpm
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/kpm/Kernel.hpp | .hpp | 3,493 | 91 | #pragma once
#include "numeric/dense.hpp"
namespace cpb { namespace kpm {
/// Moment calculations at higher optimization levels require specific rounding.
/// `n - 2` considers only moments in the main KPM loop. Divisible by 4 because
/// that is the strictest requirement imposed by `opt_size_and_interleaved`.
inline idx_t round_num_moments(idx_t n) {
if (n < 2) { return 2; }
while ((n - 2) % 4 != 0) { ++n; }
return n;
}
/**
Put the kernel in *Kernel* Polynomial Method
This provides the general kernel interface. For concrete implementations
see the `lorentz_kernel` and `jackson_kernel` functions below.
*/
struct Kernel {
/// Produce the KPM damping coefficients which depend on the number of expansion moments
std::function<ArrayXd(idx_t num_moments)> damping_coefficients;
/// The number of moments required to reconstruct a function at the specified scaled broadening
std::function<idx_t(double scaled_broadening)> required_num_moments;
/// Apply the kernel damping to an array of moments
template<class scalar_t>
void operator()(ArrayX<scalar_t>& moments) const {
using real_t = num::get_real_t<scalar_t>;
auto const N = static_cast<int>(moments.size());
moments *= damping_coefficients(N).template cast<real_t>();
}
template<class scalar_t>
void operator()(std::vector<ArrayX<scalar_t>>& moments) const {
for (auto& m : moments) { operator()(m); }
}
template<class scalar_t>
void operator()(ArrayXX<scalar_t>& moments) const {
using real_t = num::get_real_t<scalar_t>;
auto const N = static_cast<int>(moments.rows());
moments.colwise() *= damping_coefficients(N).template cast<real_t>().eval();
}
/// Apply the kernel damping to a matrix of moments
template<class scalar_t>
void operator()(MatrixX<scalar_t>& moments) const {
using real_t = num::get_real_t<scalar_t>;
assert(moments.rows() == moments.cols());
auto const N = static_cast<int>(moments.rows());
auto const g = damping_coefficients(N).template cast<real_t>().eval();
moments.array() *= g.replicate(1, N).rowwise() * g.transpose();
}
};
/**
The Jackson kernel
This is a good general-purpose kernel, appropriate for most applications. It imposes a
Gaussian broadening of `sigma = pi / N`. Therefore, the resolution of the reconstructed
function will improve directly with the number of moments N.
*/
Kernel jackson_kernel();
/**
The Lorentz kernel
This kernel is most appropriate for the expansion of the Green’s function because it most
closely mimics the divergences near the true eigenvalues of the Hamiltonian. The lambda
value is found empirically to be between 3 and 5, and it may be used to fine-tune the
smoothness of the convergence. The Lorentzian broadening is given by `lambda / N`.
*/
Kernel lorentz_kernel(double lambda = 4.0);
/**
The Dirichlet kernel
This kernel doesn't modify the moments at all. The resulting moments represent just
a truncated series which results in lots of oscillation in the reconstructed function.
Therefore, this kernel should almost never be used. It's only here in case the raw
moment values are needed for some other purpose. Note that `required_num_moments()`
returns `N = pi / sigma` for compatibility with the Jackson kernel, but there is no
actual broadening associated with the Dirichlet kernel.
*/
Kernel dirichlet_kernel();
}} // namespace cpb::kpm
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/kpm/Config.hpp | .hpp | 988 | 35 | #pragma once
#include "kpm/Kernel.hpp"
namespace cpb { namespace kpm {
/// Sparse matrix format for the optimized Hamiltonian
enum class MatrixFormat { CSR, ELL };
/**
Algorithm selection, see the corresponding functions in `calc_moments.hpp`
*/
struct AlgorithmConfig {
bool optimal_size;
bool interleaved;
/// Does the Hamiltonian matrix need to be reordered?
bool reorder() const { return optimal_size || interleaved; }
};
/**
KPM configuration struct with defaults
*/
struct Config {
float min_energy = 0.0f; ///< lowest eigenvalue of the Hamiltonian
float max_energy = 0.0f; ///< highest eigenvalue of the Hamiltonian
Kernel kernel = jackson_kernel(); ///< produces the damping coefficients
MatrixFormat matrix_format = MatrixFormat::ELL;
AlgorithmConfig algorithm = {/*optimal_size*/true, /*interleaved*/true};
float lanczos_precision = 0.002f; ///< how precise should the min/max energy estimation be
};
}} // namespace cpb::kpm
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/kpm/default/collectors.hpp | .hpp | 4,120 | 132 | #pragma once
#include "kpm/OptimizedHamiltonian.hpp"
#include "detail/macros.hpp"
#include "support/simd.hpp"
namespace cpb { namespace kpm {
template<class scalar_t>
class DiagonalCollector {
public:
using Vector = VectorX<scalar_t>;
using VectorRef = Eigen::Ref<Vector>;
ArrayX<scalar_t> moments;
scalar_t m0;
scalar_t m1;
DiagonalCollector(idx_t num_moments) : moments(num_moments) {}
idx_t size() const { return moments.size(); }
/// Collect the first 2 moments which are computer outside the main KPM loop
void initial(VectorRef r0, VectorRef r1);
/// Collect moments `n` and `n + 1` from the result vectors. Expects `n >= 2`.
void operator()(idx_t n, scalar_t m2, scalar_t m3);
/// Zero of the same scalar type as the moments
static constexpr scalar_t zero() { return scalar_t{0}; }
};
template<class scalar_t>
class BatchDiagonalCollector {
public:
using Vector = MatrixX<scalar_t>;
using VectorRef = Eigen::Ref<Vector>;
ArrayXX<scalar_t> moments;
simd::array<scalar_t> m0;
simd::array<scalar_t> m1;
BatchDiagonalCollector(idx_t num_moments, idx_t batch_size)
: moments(num_moments, batch_size) {}
idx_t size() const { return moments.rows(); }
void initial(VectorRef r0, VectorRef r1);
void operator()(idx_t n, simd::array<scalar_t> m2, simd::array<scalar_t> m3);
static constexpr simd::array<scalar_t> zero() { return {{0}}; }
};
/**
Moments collector interface for the off-diagonal algorithm.
Concrete implementations define what part of the KPM vectors
should be collected and/or apply operators.
*/
template<class scalar_t>
class OffDiagonalCollector {
public:
using Vector = VectorX<scalar_t>;
using VectorRef = Eigen::Ref<Vector>;
virtual ~OffDiagonalCollector() = default;
/// Number of moments
virtual idx_t size() const = 0;
/// Collect the first 2 moments which are computer outside the main KPM loop
virtual void initial(VectorRef r0, VectorRef r1) = 0;
/// Collect moment `n` from the result vector `r1`. Expects `n >= 2`.
virtual void operator()(idx_t n, VectorRef r1) = 0;
};
template<class scalar_t>
class GenericCollector : public OffDiagonalCollector<scalar_t> {
using VectorRef = typename OffDiagonalCollector<scalar_t>::VectorRef;
public:
ArrayX<scalar_t> moments;
VectorX<scalar_t> beta;
SparseMatrixX<scalar_t> op;
GenericCollector(idx_t num_moments, OptimizedHamiltonian const& oh, VectorXcd const& alpha_,
VectorXcd const& beta_, SparseMatrixXcd const& op_);
idx_t size() const override { return moments.size(); }
void initial(VectorRef r0, VectorRef r1) override;
void operator()(idx_t n, VectorRef r1) override;
};
template<class scalar_t>
class MultiUnitCollector : public OffDiagonalCollector<scalar_t> {
using VectorRef = typename OffDiagonalCollector<scalar_t>::VectorRef;
public:
Indices const& idx;
std::vector<ArrayX<scalar_t>> moments;
MultiUnitCollector(idx_t num_moments, Indices const& idx)
: idx(idx), moments(idx.dest.size(), ArrayX<scalar_t>(num_moments)) {}
idx_t size() const override { return moments[0].size(); }
void initial(VectorRef r0, VectorRef r1) override;
void operator()(idx_t n, VectorRef r1) override;
};
template<class scalar_t>
class DenseMatrixCollector : public OffDiagonalCollector<scalar_t> {
using VectorRef = typename OffDiagonalCollector<scalar_t>::VectorRef;
public:
SparseMatrixX<scalar_t> op;
MatrixX<scalar_t> moments;
DenseMatrixCollector(idx_t num_moments, OptimizedHamiltonian const& oh,
VariantCSR const& op_);
idx_t size() const override { return moments.rows(); }
void initial(VectorRef r0, VectorRef r1) override;
void operator()(idx_t n, VectorRef r1) override;
};
CPB_EXTERN_TEMPLATE_CLASS(DiagonalCollector)
CPB_EXTERN_TEMPLATE_CLASS(BatchDiagonalCollector)
CPB_EXTERN_TEMPLATE_CLASS(GenericCollector)
CPB_EXTERN_TEMPLATE_CLASS(MultiUnitCollector)
CPB_EXTERN_TEMPLATE_CLASS(DenseMatrixCollector)
}} // namespace cpb::kpm
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/kpm/default/Compute.hpp | .hpp | 818 | 30 | #pragma once
#include "kpm/Core.hpp"
namespace cpb { namespace kpm {
/**
Default CPU implementation for computing KPM moments, see `Core`
*/
class DefaultCompute : public Compute::Interface {
public:
using ProgressCallback = std::function<void (idx_t delta, idx_t total)>;
DefaultCompute(idx_t num_threads = -1, ProgressCallback progress_callback = {});
void moments(MomentsRef m, Starter const& s, AlgorithmConfig const& ac,
OptimizedHamiltonian const& oh) const override;
idx_t get_num_threads() const { return num_threads; }
void progress_start(idx_t total) const;
void progress_update(idx_t delta, idx_t total) const;
void progress_finish(idx_t total) const;
private:
idx_t num_threads;
ProgressCallback progress_callback;
};
}} // namespace cpb::kpm
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/numeric/constant.hpp | .hpp | 685 | 22 | #pragma once
#include <complex>
namespace cpb { namespace constant {
// imaginary one
constexpr std::complex<float> i1(0, 1);
// the omnipresent pi
constexpr float pi = 3.14159265358979323846f;
// electron charge [C]
constexpr float e = 1.602e-19f;
// reduced Planck constant [eV*s]
constexpr float hbar = 6.58211899e-16f;
// electron rest mass [kg]
constexpr float m0 = 9.10938188e-31f;
// vacuum permittivity [F/m == C/V/m]
constexpr float epsilon0 = 8.854e-12f;
// magnetic flux quantum (h/e)
constexpr float phi0 = 2 * pi*hbar;
// Boltzmann constant
constexpr float kb = 8.6173303e-5f;
}} // namespace cpb::constant
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/numeric/arrayref.hpp | .hpp | 11,030 | 253 | #pragma once
#include "detail/typelist.hpp"
#include "support/cppfuture.hpp"
#include "numeric/traits.hpp"
#include <algorithm>
#include <numeric>
#include <stdexcept>
#include <array>
namespace cpb { namespace num {
/// Array scalar type
enum class Tag : std::int8_t {f32, cf32, f64, cf64, b, i8, i16, i32, i64, u8, u16, u32, u64};
namespace detail {
template<class scalar_t> constexpr Tag get_tag();
template<> constexpr Tag get_tag<float>() { return Tag::f32; }
template<> constexpr Tag get_tag<std::complex<float>>() { return Tag::cf32; }
template<> constexpr Tag get_tag<double>() { return Tag::f64; }
template<> constexpr Tag get_tag<std::complex<double>>() { return Tag::cf64; }
template<> constexpr Tag get_tag<bool>() { return Tag::b; }
template<> constexpr Tag get_tag<std::int8_t>() { return Tag::i8; }
template<> constexpr Tag get_tag<std::int16_t>() { return Tag::i16; }
template<> constexpr Tag get_tag<std::int32_t>() { return Tag::i32; }
template<> constexpr Tag get_tag<std::int64_t>() { return Tag::i64; }
template<> constexpr Tag get_tag<std::uint8_t>() { return Tag::u8; }
template<> constexpr Tag get_tag<std::uint16_t>() { return Tag::u16; }
template<> constexpr Tag get_tag<std::uint32_t>() { return Tag::u32; }
template<> constexpr Tag get_tag<std::uint64_t>() { return Tag::u64; }
template<class T>
constexpr Tag get_tag() {
static_assert(std::is_integral<T>::value && (sizeof(T) == 8 || sizeof(T) == 4), "");
using type = std14::conditional_t<
std::is_signed<T>::value,
std14::conditional_t<sizeof(T) == 8, std::int64_t, std::int32_t>,
std14::conditional_t<sizeof(T) == 8, std::uint64_t, std::uint32_t>
>;
return get_tag<type>();
}
/// Reference to any 1D, 2D or 3D array with any scalar type supported by Tag
template<bool is_const>
struct BasicArrayRef {
using Ptr = std14::conditional_t<is_const, void const*, void*>;
using Dim = std::int8_t;
using Shape = std::array<idx_t, 3>;
Ptr data;
Tag tag;
Dim ndim;
bool is_row_major;
Shape shape;
BasicArrayRef(Ptr data, Tag tag, Dim ndim, bool rm, Shape shape)
: data(data), tag(tag), ndim(ndim), is_row_major(rm), shape(shape) {}
BasicArrayRef(Ptr data, Tag tag, Dim ndim, bool rm, idx_t x, idx_t y = 0, idx_t z = 0)
: BasicArrayRef(data, tag, ndim, rm, {{x, y, z}}) {}
template<class T>
BasicArrayRef(T* data, Dim ndim, bool rm, idx_t x, idx_t y = 0, idx_t z = 0)
: BasicArrayRef(data, get_tag<std14::remove_const_t<T>>(), ndim, rm, x, y, z) {}
/// Non-const array can be converted to const, but not the other way around
BasicArrayRef(BasicArrayRef<false> const& a)
: BasicArrayRef(a.data, a.tag, a.ndim, a.is_row_major, a.shape) {}
idx_t size() const { return std::accumulate(shape.begin(), shape.begin() + ndim,
idx_t{1}, std::multiplies<idx_t>()); }
};
template<class T>
using MakeArrayRef = BasicArrayRef<std::is_const<std14::remove_pointer_t<T>>::value>;
template<class T>
auto make_arrayref(T* data, std::int8_t ndim, bool is_row_major, idx_t x, idx_t y, idx_t z)
-> MakeArrayRef<T> {
return {data, get_tag<T>(), ndim, is_row_major, {{x, y, z}}};
}
} // namespace detail
/// Const or mutable reference to any 1D, 2D or 3D array with any scalar type
using ArrayConstRef = detail::BasicArrayRef<true>;
using ArrayRef = detail::BasicArrayRef<false>;
/// Reference to an array which is limited to a set of scalar types
template<class Scalar, class... Scalars>
struct VariantArrayConstRef : ArrayConstRef {
using First = Scalar;
using Types = TypeList<Scalar, Scalars...>;
VariantArrayConstRef(ArrayConstRef const& other) : ArrayConstRef(other) { check(other.tag); }
VariantArrayConstRef(ArrayRef const& other) : ArrayConstRef(other) { check(other.tag); }
void check(Tag candidate_tag) const {
auto const possible_tags = {detail::get_tag<Scalar>(), detail::get_tag<Scalars>()...};
auto const is_invalid = std::none_of(possible_tags.begin(), possible_tags.end(),
[&](Tag tag) { return candidate_tag == tag; });
if (is_invalid) {
throw std::runtime_error("Invalid VariantArrayConstRef assignment");
}
}
};
template<class Scalar, class... Scalars>
struct VariantArrayRef : ArrayRef {
using First = Scalar;
using Types = TypeList<Scalar, Scalars...>;
VariantArrayRef(ArrayRef const& other) : ArrayRef(other) {
auto const possible_tags = {detail::get_tag<Scalar>(), detail::get_tag<Scalars>()...};
auto const is_invalid = std::none_of(possible_tags.begin(), possible_tags.end(),
[&](Tag tag) { return other.tag == tag; });
if (is_invalid) {
throw std::runtime_error("Invalid VariantArrayRef assignment");
}
}
};
/// Return a 1D array reference from pointer and size
template<class T>
auto arrayref(T* data, idx_t size) -> detail::MakeArrayRef<T> {
return detail::make_arrayref(data, 1, true, size, 0, 0);
};
/// Return a 2D array reference from pointer and sizes
template<class T>
auto arrayref(T* data, idx_t x, idx_t y) -> detail::MakeArrayRef<T> {
return detail::make_arrayref(data, 2, true, x, y, 0);
};
/// Return a 3D array reference from pointer and sizes
template<class T>
auto arrayref(T* data, idx_t x, idx_t y, idx_t z) -> detail::MakeArrayRef<T> {
return detail::make_arrayref(data, 3, true, x, y, z);
};
// Common aliases
using RealArrayConstRef = VariantArrayConstRef<float, double>;
using ComplexArrayConstRef = VariantArrayConstRef<
float, double, std::complex<float>, std::complex<double>
>;
using RealArrayRef = VariantArrayRef<float, double>;
using ComplexArrayRef = VariantArrayRef<
float, double, std::complex<float>, std::complex<double>
>;
/**
Creates an actual container from an ArrayRef
To be specialized by concrete containers. The container can adopt the reference via
a proxy type (e.g. Eigen::Map) or it can create a copy of the ArrayRef's data.
*/
template<class Container>
struct MakeContainer {
// Intentionally unimplemented, specializations should do it
static Container make(ArrayConstRef const&);
static Container make(ArrayRef const&);
};
namespace detail {
template<template<class> class Container, class Variant>
using DeclContainer = decltype(
MakeContainer<Container<typename Variant::First>>::make(std::declval<Variant>())
);
template<class Function, template<class> class Container, class Variant>
using MatchResult = typename std::result_of<
Function(DeclContainer<Container, Variant>)
>::type;
template<class Result, template<class> class /*Container*/, class Variant, class Function>
Result try_match(Variant, Function, TypeList<>) {
throw std::runtime_error{"A match was not found"};
};
template<class Result, template<class> class Container, class Variant, class Function,
class Scalar, class... Tail>
Result try_match(Variant ref, Function lambda, TypeList<Scalar, Tail...>) {
if (ref.tag == detail::get_tag<Scalar>()) {
return lambda(MakeContainer<Container<Scalar>>::make(ref));
} else {
return try_match<Result, Container>(ref, lambda, TypeList<Tail...>{});
}
};
template<class Function, template<class> class Container1, template<class> class Container2,
class Variant1, class Variant2>
using Match2Result = typename std::result_of<
Function(DeclContainer<Container1, Variant1>, DeclContainer<Container2, Variant2>)
>::type;
template<class Result, template<class> class /*Container1*/,
template<class> class /*Container2*/, class Variant1, class Variant2, class Function>
Result try_match2(Variant1, Variant2, Function, TypeList<>) {
throw std::runtime_error{"A match was not found"};
};
template<class Result, template<class> class Container1, template<class> class Container2,
class Variant1, class Variant2, class Function,
class Scalar1, class Scalar2, class... Tail>
Result try_match2(Variant1 ref1, Variant2 ref2, Function lambda,
TypeList<TypeList<Scalar1, Scalar2>, Tail...>) {
if (ref1.tag == detail::get_tag<Scalar1>() && ref2.tag == detail::get_tag<Scalar2>()) {
return lambda(MakeContainer<Container1<Scalar1>>::make(ref1),
MakeContainer<Container2<Scalar2>>::make(ref2));
} else {
return try_match2<Result, Container1, Container2>(ref1, ref2, lambda,
TypeList<Tail...>{});
}
};
template<class List>
struct IsSamePrecision;
template<class T1, class T2>
struct IsSamePrecision<TypeList<T1, T2>> {
static constexpr auto value = std::is_same<
num::get_real_t<T1>, num::get_real_t<T2>
>::value;
};
} // namespace detail
/// Match a VariantArrayRef to a Container and pass it to Function
template<template<class> class Container, class Variant, class Function,
class Result = detail::MatchResult<Function, Container, Variant>>
Result match(Variant ref, Function lambda) {
return detail::try_match<Result, Container>(ref, lambda, typename Variant::Types{});
}
/// Match two VariantArrayRefs to Containers (in all combinations) and pass them to Function
template<template<class> class Container1, template<class> class Container2,
class Variant1, class Variant2, class Function,
class Result = detail::Match2Result<Function, Container1, Container2, Variant1, Variant2>>
Result match2(Variant1 ref1, Variant2 ref2, Function lambda) {
using List = tl::Combinations<typename Variant1::Types, typename Variant2::Types>;
return detail::try_match2<Result, Container1, Container2>(ref1, ref2, lambda, List{});
}
/// Same as match2, but only considers matches where both scalar types have the same precision
template<template<class> class Container1, template<class> class Container2,
class Variant1, class Variant2, class Function,
class Result = detail::Match2Result<Function, Container1, Container2, Variant1, Variant2>>
Result match2sp(Variant1 ref1, Variant2 ref2, Function lambda) {
using List = tl::Combinations<typename Variant1::Types, typename Variant2::Types>;
using FilteredList = tl::Filter<List, detail::IsSamePrecision>;
return detail::try_match2<Result, Container1, Container2>(ref1, ref2, lambda, FilteredList{});
}
}} // namespace cpb::num
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/numeric/dense.hpp | .hpp | 12,283 | 361 | #pragma once
#include "detail/config.hpp"
#include "numeric/traits.hpp"
#include "numeric/arrayref.hpp"
#include <Eigen/Core>
#include <algorithm>
#include <numeric>
#include <vector>
namespace cpb {
// add common math functions to the global namespace
using std::abs;
using std::exp;
using std::pow;
using std::sqrt;
using std::sin;
using std::cos;
using std::tan;
using std::asin;
using std::acos;
// add common Eigen types to the global namespace
using Eigen::Ref;
using Eigen::Map;
using Eigen::DenseBase;
using Eigen::Array3i;
using Eigen::ArrayXi;
using Eigen::ArrayXf;
using Eigen::ArrayXcf;
using Eigen::ArrayXd;
using Eigen::ArrayXcd;
using Eigen::ArrayXXi;
using Eigen::ArrayXXf;
using Eigen::ArrayXXcf;
using Eigen::ArrayXXd;
using Eigen::ArrayXXcd;
using Eigen::Vector3f;
using Eigen::VectorXi;
using Eigen::VectorXf;
using Eigen::VectorXcf;
using Eigen::VectorXd;
using Eigen::VectorXcd;
using Eigen::MatrixXf;
using Eigen::MatrixXcf;
using Eigen::MatrixXd;
using Eigen::MatrixXcd;
// convenient type aliases
using Cartesian = Eigen::Vector3f;
using Index3D = Eigen::Vector3i;
using Vector3b = Eigen::Matrix<bool, 3, 1>;
template<class T> using ArrayX = Eigen::Array<T, Eigen::Dynamic, 1>;
template<class T> using ArrayXX = Eigen::Array<T, Eigen::Dynamic, Eigen::Dynamic>;
template<class T> using VectorX = Eigen::Matrix<T, Eigen::Dynamic, 1>;
template<class T> using MatrixX = Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>;
template<class T>
using ColMajorArrayXX = Eigen::Array<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::ColMajor>;
template<class T>
using ColMajorMatrixX = Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::ColMajor>;
using ArrayXXdCM = ColMajorArrayXX<double>;
// array variants
using num::arrayref;
using num::ArrayConstRef;
using num::RealArrayConstRef;
using num::ComplexArrayConstRef;
using num::ArrayRef;
using num::RealArrayRef;
using num::ComplexArrayRef;
} // namespace cpb
namespace Eigen {
// add being() and end() to Eigen namespace
// this will enable using Eigen objects in ranged for loops
template<class Derived>
inline auto begin(EigenBase<Derived>& v) -> decltype(v.derived().data()) {
return v.derived().data();
}
template<class Derived>
inline auto end(EigenBase<Derived>& v) -> decltype(v.derived().data()) {
return v.derived().data() + v.size();
}
template<class Derived>
inline auto begin(const EigenBase<Derived>& v) -> decltype(v.derived().data()) {
return v.derived().data();
}
template<class Derived>
inline auto end(const EigenBase<Derived>& v) -> decltype(v.derived().data()) {
return v.derived().data() + v.size();
}
} // namespace Eigen
namespace cpb {
/**
Map std::vector-like object data to an Eigen type
*/
template<template<class> class EigenType, class Vector,
class scalar_t = typename Vector::value_type>
inline Eigen::Map<EigenType<scalar_t> const> eigen_cast(Vector const& v) {
return {v.data(), static_cast<idx_t>(v.size())};
}
// utility functions
template<class Derived>
inline auto sum(const DenseBase<Derived>& v) -> decltype(v.sum()) {
return v.sum();
}
template<class DerivedIn, class DerivedOut, class Fn>
inline void transform(const DenseBase<DerivedIn>& in, DenseBase<DerivedOut>& out, Fn func) {
std::transform(begin(in), end(in), begin(out), func);
}
template<class DerivedIn1, class DerivedIn2, class DerivedOut, class Fn>
inline void transform(const DenseBase<DerivedIn1>& in1, const DenseBase<DerivedIn2>& in2,
DenseBase<DerivedOut>& out, Fn func) {
std::transform(begin(in1), end(in1), begin(in2), begin(out), func);
}
/// Apply the function to the elements of the input container
/// and return the results in a new container of the given type
template<template<class...> class Container, class In, class Fn>
auto transform(In const& in, Fn func) -> Container<decltype(func(in[0]))> {
using Out = decltype(func(in[0]));
auto out = Container<Out>(in.size());
std::transform(begin(in), end(in), begin(out), func);
return out;
}
template<class Derived> inline bool any_of(const DenseBase<Derived>& v) { return v.any(); }
template<class Derived> inline bool all_of(const DenseBase<Derived>& v) { return v.all(); }
template<class Derived> inline bool none_of(const DenseBase<Derived>& v) { return !v.any(); }
template<bool is_const>
class BasicCartesianArrayRef {
public:
using Reference = std14::conditional_t<!is_const, Eigen::Ref<ArrayXf>,
Eigen::Ref<ArrayXf const>>;
BasicCartesianArrayRef(Reference x, Reference y, Reference z)
: x_ref(x), y_ref(y), z_ref(z) {}
BasicCartesianArrayRef(BasicCartesianArrayRef<false> const& o)
: x_ref(o.x()), y_ref(o.y()), z_ref(o.z()) {}
Reference const& x() const { return x_ref; }
Reference const& y() const { return y_ref; }
Reference const& z() const { return z_ref; }
Reference& x() { return x_ref; }
Reference& y() { return y_ref; }
Reference& z() { return z_ref; }
idx_t size() const { return x_ref.size(); }
private:
Reference x_ref, y_ref, z_ref;
};
using CartesianArrayConstRef = BasicCartesianArrayRef<true>;
using CartesianArrayRef = BasicCartesianArrayRef<false>;
class CartesianArray {
private:
struct CartesianRef {
float &x, &y, &z;
CartesianRef& operator=(const Cartesian& r) { x = r[0]; y = r[1]; z = r[2]; return *this; }
operator Cartesian() { return {x, y, z}; }
};
public:
CartesianArray() = default;
CartesianArray(idx_t size) : x(size), y(size), z(size) {}
CartesianArray(ArrayXf x, ArrayXf y, ArrayXf z)
: x(std::move(x)), y(std::move(y)), z(std::move(z)) {}
CartesianRef operator[](idx_t i) { return {x[i], y[i], z[i]}; }
Cartesian operator[](idx_t i) const { return {x[i], y[i], z[i]}; }
idx_t size() const { return x.size(); }
CartesianArrayConstRef head(idx_t size) const {
return {x.head(size), y.head(size), z.head(size)};
}
CartesianArrayRef head(idx_t size) {
return {x.head(size), y.head(size), z.head(size)};
}
CartesianArrayConstRef segment(idx_t start, idx_t size) const {
return {x.segment(start, size), y.segment(start, size), z.segment(start, size)};
}
CartesianArrayRef segment(idx_t start, idx_t size) {
return {x.segment(start, size), y.segment(start, size), z.segment(start, size)};
}
operator CartesianArrayConstRef() const { return {x, y, z}; }
operator CartesianArrayRef() { return {x, y, z}; }
template<class Fn>
void for_each(Fn lambda) {
lambda(x); lambda(y); lambda(z);
}
void resize(idx_t size) {
for_each([size](ArrayX<float>& a) { a.resize(size); });
}
void conservativeResize(idx_t size) {
for_each([size](ArrayX<float>& a) { a.conservativeResize(size); });
}
public:
ArrayX<float> x, y, z;
};
namespace num {
// ArrayRef's MakeContainer specializations for Eigen types
template<template<class, int...> class EigenType, class scalar_t, int cols, int... options>
struct MakeContainer<EigenType<scalar_t, 1, cols, options...>> {
using ConstMap = Eigen::Map<const EigenType<scalar_t, 1, cols, options...>>;
static ConstMap make(ArrayConstRef const& ref) {
return ConstMap{static_cast<scalar_t const*>(ref.data), ref.size()};
}
using Map = Eigen::Map<EigenType<scalar_t, 1, cols, options...>>;
static Map make(ArrayRef const& ref) {
return Map{static_cast<scalar_t*>(ref.data), ref.size()};
}
};
template<template<class, int...> class EigenType, class scalar_t, int rows, int... options>
struct MakeContainer<EigenType<scalar_t, rows, 1, options...>> {
using ConstMap = Eigen::Map<const EigenType<scalar_t, rows, 1, options...>>;
static ConstMap make(ArrayConstRef const& ref) {
return ConstMap{static_cast<scalar_t const*>(ref.data), ref.size()};
}
using Map = Eigen::Map<EigenType<scalar_t, rows, 1, options...>>;
static Map make(ArrayRef const& ref) {
return Map{static_cast<scalar_t*>(ref.data), ref.size()};
}
};
template<template<class, int...> class EigenType,
class scalar_t, int rows, int cols, int... options>
struct MakeContainer<EigenType<scalar_t, rows, cols, options...>> {
using ConstMap = Eigen::Map<const EigenType<scalar_t, rows, cols, options...>>;
static ConstMap make(ArrayConstRef const& ref) {
return ConstMap{static_cast<scalar_t const*>(ref.data), ref.shape[0], ref.shape[1]};
}
using Map = Eigen::Map<EigenType<scalar_t, rows, cols, options...>>;
static Map make(ArrayRef const& ref) {
return Map{static_cast<scalar_t*>(ref.data), ref.shape[0], ref.shape[1]};
}
};
/// Force cast a matrix to any scalar type (lose precision and/or imaginary part)
template<class scalar_t>
MatrixX<scalar_t> force_cast(MatrixXcd const& m) { return m.cast<scalar_t>(); }
template<>
inline MatrixX<double> force_cast<double>(MatrixXcd const& m) { return m.real(); }
template<>
inline MatrixX<float> force_cast<float>(MatrixXcd const& m) { return m.real().cast<float>(); }
template<class scalar_t>
VectorX<scalar_t> force_cast(VectorXcd const& m) { return m.cast<scalar_t>(); }
template<>
inline VectorX<double> force_cast<double>(VectorXcd const& m) { return m.real(); }
template<>
inline VectorX<float> force_cast<float>(VectorXcd const& m) { return m.real().cast<float>(); }
} // namespace num
template<class Derived>
ArrayConstRef arrayref(DenseBase<Derived> const& v) {
auto const& d = v.derived();
return {v.derived().data(),
Derived::IsVectorAtCompileTime ? 1 : 2,
Derived::IsRowMajor,
Derived::IsVectorAtCompileTime ? d.size() : d.rows(),
Derived::IsVectorAtCompileTime ? 0 : d.cols()};
};
template<class Derived>
auto arrayref(DenseBase<Derived>& v) -> num::detail::MakeArrayRef<decltype(v.derived().data())> {
auto& d = v.derived();
return {v.derived().data(),
Derived::IsVectorAtCompileTime ? 1 : 2,
Derived::IsRowMajor,
Derived::IsVectorAtCompileTime ? d.size() : d.rows(),
Derived::IsVectorAtCompileTime ? 0 : d.cols()};
};
template<class scalar_t>
ArrayConstRef arrayref(std::vector<scalar_t> const& v) {
return arrayref(v.data(), static_cast<idx_t>(v.size()));
}
template<class scalar_t>
ArrayRef arrayref(std::vector<scalar_t>& v) {
return arrayref(v.data(), 1, true, static_cast<idx_t>(v.size()));
}
/// Range from 0 to `size` of scalar type `T` which does not have to be an integral type
template<class T>
ArrayX<T> make_integer_range(idx_t size) {
auto result = ArrayX<T>(size);
for (auto n = 0; n < size; ++n) {
result[n] = static_cast<T>(n);
}
return result;
}
template<class Vector, class Bools>
Vector slice(Vector const& v, Bools const& keep) {
using std::begin; using std::end;
auto const original_size = v.size();
auto const result_size = std::accumulate(begin(keep), end(keep), idx_t{0});
auto result = Vector(result_size);
auto count = 0;
for (auto i = idx_t{0}; i < original_size; ++i) {
if (keep[i])
result[count++] = v[i];
}
return result;
};
/// Concatenate two 1D arrays/vectors
template<class Vector>
Vector concat(Ref<Vector const> v1, Ref<Vector const> v2) {
using std::begin; using std::end;
auto result = Vector(v1.size() + v2.size());
std::copy(begin(v1), end(v1), begin(result));
std::copy(begin(v2), end(v2), begin(result) + v1.size());
return result;
}
template<class Vector, typename R = Ref<Vector const>>
Vector concat(Vector const& v1, Vector const& v2) {
return concat(R(v1), R(v2));
}
/// Concatenate two Cartesian arrays
inline CartesianArray concat(CartesianArrayConstRef const& ca1,
CartesianArrayConstRef const& ca2) {
return {concat(ca1.x(), ca2.x()),
concat(ca1.y(), ca2.y()),
concat(ca1.z(), ca2.z())};
}
} // namespace cpb
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/numeric/traits.hpp | .hpp | 3,622 | 117 | #pragma once
#include <complex>
#include <string>
#include <type_traits>
#include <limits>
namespace cpb { namespace num {
namespace detail {
template<class T>
struct complex_traits {
static_assert(std::is_arithmetic<T>::value, "");
using real_t = T;
using complex_t = std::complex<T>;
static constexpr bool is_complex = false;
};
template<class T>
struct complex_traits<std::complex<T>> {
using real_t = T;
using complex_t = std::complex<T>;
static constexpr bool is_complex = true;
};
} // namespace detail
/**
Return the real type corresponding to the given scalar type
For example:
std::complex<float> -> float
float -> float
*/
template<class scalar_t>
using get_real_t = typename detail::complex_traits<scalar_t>::real_t;
/**
Return the complex type corresponding to the given scalar type
For example:
std::complex<float> -> std::complex<float>
float -> std::complex<float>
*/
template<class scalar_t>
using get_complex_t = typename detail::complex_traits<scalar_t>::complex_t;
/**
Is the given scalar type complex?
*/
template<class scalar_t>
inline constexpr bool is_complex() { return detail::complex_traits<scalar_t>::is_complex; }
/**
Return the complex conjugate in the same scalar type as the input
The standard `conj` function always returns `std::complex<T>`
which is not convenient for generic algorithms.
*/
template<class scalar_t>
inline scalar_t conjugate(scalar_t value) { return value; }
template<class scalar_t>
inline std::complex<scalar_t> conjugate(std::complex<scalar_t> value) { return std::conj(value); }
/**
Cast a `std::complex<double>` to another scalar
The conversion loses precision and/or the imaginary part, as intended.
*/
template<class scalar_t>
inline scalar_t force_cast(std::complex<double> v) { return v; }
template<>
inline std::complex<float> force_cast<std::complex<float>>(std::complex<double> v) {
return {static_cast<float>(v.real()), static_cast<float>(v.imag())};
}
template<>
inline double force_cast<double>(std::complex<double> v) { return v.real(); }
template<>
inline float force_cast<float>(std::complex<double> v) { return static_cast<float>(v.real()); }
/**
Return a human readable name of the scalar type
*/
template<class scalar_t> inline std::string scalar_name();
template<> inline std::string scalar_name<float>() { return "float"; }
template<> inline std::string scalar_name<double>() { return "double"; }
template<> inline std::string scalar_name<std::complex<float>>() { return "complex<float>"; }
template<> inline std::string scalar_name<std::complex<double>>() { return "complex<double>"; }
/**
Floating-point equality with precision in ULP (units in the last place)
*/
template<class T, class = typename std::enable_if<std::is_floating_point<T>::value, void>::type>
bool approx_equal(T x, T y, int ulp = 1) {
auto const diff = std::abs(x - y);
auto const scale = std::abs(x + y);
return diff <= std::numeric_limits<T>::epsilon() * scale * static_cast<T>(ulp)
|| diff <= std::numeric_limits<T>::min(); // subnormal case
}
/**
Return the minimum aligned size for the given scalar type and alignment in bytes
For example:
aligned_size<float, 16>(3) -> 4
aligned_size<std::complex<double>, 16>(2) -> 2
*/
template<class scalar_t, int align_bytes, class T>
T aligned_size(T size) {
static constexpr auto step = static_cast<T>(align_bytes / sizeof(scalar_t));
while (size % step != 0) {
++size;
}
return size;
};
}} // namespace cpb::num
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/numeric/sparseref.hpp | .hpp | 4,029 | 133 | #pragma once
#include "detail/config.hpp"
#include "numeric/arrayref.hpp"
namespace cpb { namespace num {
namespace detail {
/**
Reference to CSR matrix of any type
*/
struct BasicCsrConstRef {
int const rows;
int const cols;
int const nnz;
void const* const void_data;
storage_idx_t const* const indices;
storage_idx_t const* const indptr;
};
} // namespace detail
/**
Template reference to a CSR matrix with specific scalar type
*/
template<class scalar_t>
struct CsrConstRef : detail::BasicCsrConstRef {
using type = scalar_t;
CsrConstRef(int rows, int cols, int nnz, scalar_t const* data,
storage_idx_t const* indices, storage_idx_t const* indptr)
: detail::BasicCsrConstRef{rows, cols, nnz, data, indices, indptr} {}
scalar_t const* data() const { return static_cast<scalar_t const*>(void_data); }
};
/**
Tagged reference to a CSR matrix of any scalar type
*/
struct AnyCsrConstRef : detail::BasicCsrConstRef {
Tag const tag;
template<class scalar_t>
AnyCsrConstRef(CsrConstRef<scalar_t> const& other)
: detail::BasicCsrConstRef(other), tag(detail::get_tag<scalar_t>()) {}
ArrayConstRef data_ref() const { return {void_data, tag, 1, true, nnz}; }
ArrayConstRef indices_ref() const { return arrayref(indices, nnz); }
ArrayConstRef indptr_ref() const { return arrayref(indptr, rows + 1); }
};
/**
Template reference to a CSR matrix with a few possible scalar types
*/
template<class Scalar, class... Scalars>
struct VariantCsrConstRef : AnyCsrConstRef {
using Types = TypeList<Scalar, Scalars...>;
template<class scalar_t, class = std14::enable_if_t<tl::AnyOf<Types, scalar_t>::value>>
VariantCsrConstRef(CsrConstRef<scalar_t> const& other) : AnyCsrConstRef(other) {}
};
/**
Common VariantCsrConstRef aliases
*/
using RealCsrConstRef = VariantCsrConstRef<float, double>;
using ComplexCsrConstRef = VariantCsrConstRef<
float, double, std::complex<float>, std::complex<double>
>;
namespace detail {
/**
Reference to ELLPACK matrix of any type
*/
struct BasicEllConstRef {
int const rows;
int const cols;
int const nnz_per_row;
int const pitch;
void const* const void_data;
storage_idx_t const* const indices;
int size() const { return nnz_per_row * pitch; }
};
} // namespace detail
/**
Template reference to an ELLPACK matrix with one specific scalar type
*/
template<class scalar_t>
struct EllConstRef : detail::BasicEllConstRef {
using type = scalar_t;
EllConstRef(int rows, int cols, int nnz_per_row, int pitch,
scalar_t const* data, storage_idx_t const* indices)
: detail::BasicEllConstRef{rows, cols, nnz_per_row, pitch, data, indices} {}
scalar_t const* data() const { return static_cast<scalar_t const*>(void_data); }
};
/**
Tagged reference to an ELLPACK matrix of any scalar type
*/
struct AnyEllConstRef : detail::BasicEllConstRef {
Tag const tag;
template<class scalar_t>
AnyEllConstRef(EllConstRef<scalar_t> const& other)
: detail::BasicEllConstRef(other), tag(num::detail::get_tag<scalar_t>()) {}
ArrayConstRef data_ref() const { return {void_data, tag, 1, true, size()}; }
ArrayConstRef indices_ref() const { return arrayref(indices, size()); }
};
/**
Template reference to a ELLPACK matrix with a few possible scalar types
*/
template<class Scalar, class... Scalars>
struct VariantEllConstRef : AnyEllConstRef {
using Types = TypeList<Scalar, Scalars...>;
template<class scalar_t, class = std14::enable_if_t<tl::AnyOf<Types, scalar_t>::value>>
VariantEllConstRef(EllConstRef<scalar_t> const& other) : AnyEllConstRef(other) {}
};
/**
Common VariantEllConstRef aliases
*/
using RealEllConstRef = VariantEllConstRef<float, double>;
using ComplexEllConstRef = VariantEllConstRef<
float, double, std::complex<float>, std::complex<double>
>;
}} // namespace cpb::num
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/numeric/random.hpp | .hpp | 1,481 | 53 | #pragma once
#include "numeric/dense.hpp"
#include "support/cppfuture.hpp"
#include <random>
namespace cpb { namespace num {
namespace detail {
template<class Container>
using get_element_t = get_real_t<std14::decay_t<decltype(std::declval<Container>()[0])>>;
template<class scalar_t>
using select_distribution = std14::conditional_t<
std::is_floating_point<scalar_t>::value,
std::uniform_real_distribution<scalar_t>,
std::uniform_int_distribution<scalar_t>
>;
}
/**
Fill the container with uniformly distributed random data
*/
template<class Container>
void random_fill(Container& container, std::mt19937& generator) {
using scalar_t = detail::get_element_t<Container>;
static_assert(std::is_arithmetic<scalar_t>::value, "");
auto distribution = detail::select_distribution<scalar_t>();
for (auto& value : container) {
value = distribution(generator);
}
}
/**
Initialize `Container` with `args` and fill with random data uniformly distributed
on the interval [0, 1) for real numbers or [0, int_max] for integers
*/
template<class Container, class Size>
Container make_random(Size size, std::mt19937& generator) {
auto container = Container(size);
random_fill(container, generator);
return container;
}
template<class Container, class Size>
Container make_random(Size size) {
std::mt19937 generator;
return make_random<Container>(size, generator);
}
}} // namespace cpb::num
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/numeric/ellmatrix.hpp | .hpp | 2,564 | 85 | #pragma once
#include "numeric/dense.hpp"
#include "numeric/sparseref.hpp"
namespace cpb { namespace num {
/**
ELLPACK format sparse matrix
*/
template<class scalar_t>
class EllMatrix {
using DataArray = ColMajorArrayXX<scalar_t>;
using IndexArray = ColMajorArrayXX<storage_idx_t>;
static constexpr auto align_bytes = 32;
public:
idx_t _rows, _cols;
idx_t nnz_per_row;
DataArray data;
IndexArray indices;
public:
using Scalar = scalar_t;
using StorageIndex = storage_idx_t;
EllMatrix() = default;
EllMatrix(idx_t rows, idx_t cols, idx_t nnz_per_row)
: _rows(rows), _cols(cols), nnz_per_row(nnz_per_row) {
data.resize(aligned_size<scalar_t, align_bytes>(rows), nnz_per_row);
indices.resize(aligned_size<storage_idx_t, align_bytes>(rows), nnz_per_row);
}
idx_t rows() const { return _rows; }
idx_t cols() const { return _cols; }
idx_t nonZeros() const { return _rows * nnz_per_row; }
template<class F>
void for_each(F lambda) const {
for (auto n = 0; n < nnz_per_row; ++n) {
for (auto row = 0; row < _rows; ++row) {
lambda(row, indices(row, n), data(row, n));
}
}
}
template<class F>
void for_slice(idx_t start, idx_t end, F lambda) const {
for (auto n = 0; n < nnz_per_row; ++n) {
for (auto row = start; row < end; ++row) {
lambda(row, indices(row, n), data(row, n));
}
}
}
};
/// Return an ELLPACK matrix reference
template<class scalar_t>
inline EllConstRef<scalar_t> ellref(EllMatrix<scalar_t> const& m) {
return {m.rows(), m.cols(), m.nnz_per_row, static_cast<int>(m.data.rows()),
m.data.data(), m.indices.data()};
}
/// Convert an Eigen CSR matrix to ELLPACK
template<class scalar_t>
num::EllMatrix<scalar_t> csr_to_ell(SparseMatrixX<scalar_t> const& csr) {
auto ell = num::EllMatrix<scalar_t>(csr.rows(), csr.cols(),
sparse::max_nnz_per_row(csr));
auto const loop = sparse::make_loop(csr);
for (auto row = 0; row < csr.rows(); ++row) {
auto n = 0;
loop.for_each_in_row(row, [&](storage_idx_t col, scalar_t value) {
ell.data(row, n) = value;
ell.indices(row, n) = col;
++n;
});
for (; n < ell.nnz_per_row; ++n) {
ell.data(row, n) = scalar_t{0};
ell.indices(row, n) = (row > 0) ? ell.indices(row - 1, n) : 0;
}
}
return ell;
}
}} // namespace cpb::num
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/numeric/sparse.hpp | .hpp | 6,234 | 194 | #pragma once
#include "detail/config.hpp"
#include "numeric/sparseref.hpp"
#include <Eigen/SparseCore>
namespace cpb {
template <class scalar_t>
using SparseMatrixX = Eigen::SparseMatrix<scalar_t, Eigen::RowMajor, storage_idx_t>;
using SparseMatrixXf = SparseMatrixX<float>;
using SparseMatrixXcf = SparseMatrixX<std::complex<float>>;
using SparseMatrixXd = SparseMatrixX<double>;
using SparseMatrixXcd = SparseMatrixX<std::complex<double>>;
using num::RealCsrConstRef;
using num::ComplexCsrConstRef;
using num::RealEllConstRef;
using num::ComplexEllConstRef;
/**
Return a CSR matrix reference
*/
template<class scalar_t>
inline num::CsrConstRef<scalar_t> csrref(SparseMatrixX<scalar_t> const& m) {
return {static_cast<int>(m.rows()), static_cast<int>(m.cols()), static_cast<int>(m.nonZeros()),
m.valuePtr(), m.innerIndexPtr(), m.outerIndexPtr()};
};
template<class scalar_t>
class CompressedInserter {
public:
CompressedInserter(SparseMatrixX<scalar_t>& mat, idx_t size)
: matrix(mat) { matrix.reserve(size); }
void start_row() {
matrix.outerIndexPtr()[row++] = static_cast<storage_idx_t>(idx);
}
void start_row(idx_t row_index) {
while (row <= row_index)
matrix.outerIndexPtr()[row++] = static_cast<storage_idx_t>(idx);
}
void insert(idx_t column, scalar_t value) {
auto start_idx = matrix.outerIndexPtr()[row-1];
auto n = idx++;
while (n > start_idx && matrix.innerIndexPtr()[n - 1] > column) {
matrix.innerIndexPtr()[n] = matrix.innerIndexPtr()[n - 1];
matrix.valuePtr()[n] = matrix.valuePtr()[n - 1];
--n;
}
matrix.innerIndexPtr()[n] = static_cast<storage_idx_t>(column);
matrix.valuePtr()[n] = value;
}
void compress() {
// close outerIndexPtr
start_row(matrix.outerSize());
// trim valuePtr and innerIndexPtr
matrix.resizeNonZeros(idx);
}
private:
idx_t idx = 0;
idx_t row = 0;
SparseMatrixX<scalar_t>& matrix;
};
template<class scalar_t>
inline CompressedInserter<scalar_t> compressed_inserter(SparseMatrixX<scalar_t>& mat, idx_t size) {
return {mat, size};
}
template<class SparseMatrix, class Index>
inline auto sparse_row(const SparseMatrix& mat, Index outer_index)
-> typename SparseMatrix::InnerIterator
{
return {mat, outer_index};
}
namespace num {
template<class scalar_t>
SparseMatrixX<scalar_t> force_cast(SparseMatrixXcd const& m) { return m.cast<scalar_t>(); }
template<>
inline SparseMatrixX<double> force_cast<double>(SparseMatrixXcd const& m) { return m.real(); }
template<>
inline SparseMatrixX<float> force_cast<float>(SparseMatrixXcd const& m) {
return m.real().cast<float>();
}
} // namespace num
namespace sparse {
/// SparseMatrix wrapper with several functions for efficient CSR matrix element access
template<class scalar_t>
class Loop {
public:
Loop(SparseMatrixX<scalar_t> const& matrix)
: outer_size(matrix.outerSize()), data(matrix.valuePtr()),
indices(matrix.innerIndexPtr()), indptr(matrix.outerIndexPtr()) {}
/// Visit each index and value of the sparse matrix:
/// lambda(idx_t outer, idx_t inner, scalar_t value)
template<class F>
void for_each(F lambda) const {
for (auto outer = idx_t{0}; outer < outer_size; ++outer) {
for (auto idx = indptr[outer]; idx < indptr[outer + 1]; ++idx) {
lambda(outer, indices[idx], data[idx]);
}
}
}
/// Visit each index and value of the sparse matrix:
/// lambda(idx_t outer, idx_t inner, scalar_t value, idx_t buffer_position)
/// After every 'buffer_size' iterations, the 'process_buffer' function is called:
/// process_buffer(idx_t start_outer, idx_t start_data, idx_t last_buffer_size)
template<class F1, class F2>
void buffered_for_each(idx_t buffer_size, F1 lambda, F2 process_buffer) const {
auto n = idx_t{0};
auto previous_outer = idx_t{0};
auto previous_idx = static_cast<idx_t>(indptr[0]);
for (auto outer = idx_t{0}; outer < outer_size; ++outer) {
for (auto idx = indptr[outer]; idx < indptr[outer + 1]; ++idx, ++n) {
if (n == buffer_size) {
process_buffer(previous_outer, previous_idx, buffer_size);
previous_outer = outer;
previous_idx = idx;
n = 0;
}
lambda(outer, indices[idx], data[idx], n);
}
}
process_buffer(previous_outer, previous_idx, n);
}
/// Iterate over all elements in a single row (or column) at the 'outer' index:
/// lambda(idx_t inner, scalar_t value)
template<class F>
void for_each_in_row(idx_t outer, F lambda) const {
for (auto idx = indptr[outer]; idx < indptr[outer + 1]; ++idx) {
lambda(indices[idx], data[idx]);
}
}
/// Start iteration from some position given by 'outer' and 'data' indices
/// and loop for 'slice_size' iterations:
/// lambda(idx_t outer, idx_t inner, scalar_t value, idx_t current_iteration)
template<class F>
void slice_for_each(idx_t outer, idx_t idx, idx_t slice_size, F lambda) const {
auto n = idx_t{0};
for (; outer < outer_size; ++outer) {
for (; idx < indptr[outer + 1]; ++idx, ++n) {
if (n == slice_size)
return;
lambda(outer, indices[idx], data[idx], n);
}
}
}
private:
idx_t const outer_size;
scalar_t const* const data;
storage_idx_t const* const indices;
storage_idx_t const* const indptr;
};
template<class scalar_t>
inline Loop<scalar_t> make_loop(SparseMatrixX<scalar_t> const& m) { return {m}; }
/**
Return the maximum number of non-zeros per row
*/
template<class scalar_t>
idx_t max_nnz_per_row(SparseMatrixX<scalar_t> const& m) {
auto max = 0;
for (auto i = 0; i < m.outerSize(); ++i) {
auto const nnz = m.outerIndexPtr()[i + 1] - m.outerIndexPtr()[i];
if (nnz > max) {
max = nnz;
}
}
return max;
}
}} // namespace cpb::sparse
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/hamiltonian/Hamiltonian.hpp | .hpp | 6,535 | 179 | #pragma once
#include "hamiltonian/HamiltonianModifiers.hpp"
#include "numeric/dense.hpp"
#include "numeric/sparse.hpp"
#include "numeric/traits.hpp"
#include "numeric/constant.hpp"
#include "support/variant.hpp"
namespace cpb {
/**
Stores a CSR matrix variant of various scalar types: real or complex, single or double precision.
The internal storage is reference counted which makes instances of this class relatively cheap
to copy. The matrix itself is immutable (for safety with the reference counting).
*/
class VariantCSR {
using Variant = var::complex<SparseMatrixX>;
std::shared_ptr<Variant const> ptr;
public:
VariantCSR() = default;
template<class scalar_t>
VariantCSR(SparseMatrixX<scalar_t> const& m) : ptr(std::make_shared<Variant>(m)) {}
template<class scalar_t>
VariantCSR(SparseMatrixX<scalar_t>&& m) : ptr(std::make_shared<Variant>(m.markAsRValue())) {}
explicit operator bool() const { return static_cast<bool>(ptr); }
void reset() { ptr.reset(); }
template<class scalar_t>
auto get() const -> decltype(ptr->template get<SparseMatrixX<scalar_t>>()) {
return ptr->template get<SparseMatrixX<scalar_t>>();
}
template<class... Args>
auto match(Args&&... args) const -> decltype(ptr->match(std::forward<Args>(args)...)) {
return ptr->match(std::forward<Args>(args)...);
}
};
template<class scalar_t>
using SparseMatrixRC = std::shared_ptr<SparseMatrixX<scalar_t> const>;
/**
Stores a tight-binding Hamiltonian as a sparse matrix variant
with real or complex scalar type and single or double precision.
*/
class Hamiltonian {
public:
Hamiltonian() = default;
template<class scalar_t>
Hamiltonian(std::shared_ptr<SparseMatrixX<scalar_t>> p) : variant_matrix(std::move(p)) {}
template<class scalar_t>
Hamiltonian(std::shared_ptr<SparseMatrixX<scalar_t> const> p) : variant_matrix(std::move(p)) {}
var::complex<SparseMatrixRC> const& get_variant() const { return variant_matrix; }
explicit operator bool() const;
void reset();
ComplexCsrConstRef csrref() const;
idx_t non_zeros() const;
idx_t rows() const;
idx_t cols() const;
private:
var::complex<SparseMatrixRC> variant_matrix;
};
namespace detail {
template<class scalar_t>
void build_main(SparseMatrixX<scalar_t>& matrix, System const& system, Lattice const& lattice,
HamiltonianModifiers const& modifiers, bool simple_build) {
auto const size = system.hamiltonian_size();
matrix.resize(size, size);
if (simple_build) {
// Fast path: No generators were used (only unit cell replication + modifiers)
// so we can easily predict the maximum number of non-zero values per row.
auto const has_diagonal = lattice.has_diagonal_terms() || !modifiers.onsite.empty();
auto const num_per_row = lattice.max_hoppings() + has_diagonal;
matrix.reserve(ArrayXi::Constant(size, num_per_row));
modifiers.apply_to_onsite<scalar_t>(system, [&](idx_t i, idx_t j, scalar_t onsite) {
matrix.insert(i, j) = onsite;
});
modifiers.apply_to_hoppings<scalar_t>(system, [&](idx_t i, idx_t j, scalar_t hopping) {
matrix.insert(i, j) = hopping;
matrix.insert(j, i) = num::conjugate(hopping);
});
} else {
// Slow path: Users can do anything with generators which makes the number of non-zeros
// per row difficult to count (possible but not worth it over building from triplets).
auto triplets = std::vector<Eigen::Triplet<scalar_t>>();
triplets.reserve(system.hamiltonian_nnz());
// Helper lambda which does a `idx_t` -> `storage_idx_t` cast for the indices.
auto to_triplet = [](idx_t i, idx_t j, scalar_t value) -> Eigen::Triplet<scalar_t> {
return {static_cast<storage_idx_t>(i), static_cast<storage_idx_t>(j), value};
};
modifiers.apply_to_onsite<scalar_t>(system, [&](idx_t i, idx_t j, scalar_t onsite) {
triplets.push_back(to_triplet(i, j, onsite));
});
modifiers.apply_to_hoppings<scalar_t>(system, [&](idx_t i, idx_t j, scalar_t hopping) {
triplets.push_back(to_triplet(i, j, hopping));
triplets.push_back(to_triplet(j, i, num::conjugate(hopping)));
});
matrix.setFromTriplets(triplets.begin(), triplets.end());
}
}
template<class scalar_t>
void build_periodic(SparseMatrixX<scalar_t>& matrix, System const& system,
HamiltonianModifiers const& modifiers, Cartesian k_vector) {
for (auto n = size_t{0}, size = system.boundaries.size(); n < size; ++n) {
using constant::i1;
auto const& d = system.boundaries[n].shift;
auto const phase = num::force_cast<scalar_t>(exp(i1 * k_vector.dot(d)));
modifiers.apply_to_hoppings<scalar_t>(system, n, [&](idx_t i, idx_t j, scalar_t hopping) {
matrix.coeffRef(i, j) += hopping * phase;
matrix.coeffRef(j, i) += num::conjugate(hopping * phase);
});
}
}
/// Check that all the values in the matrix are finite
template<class scalar_t>
void throw_if_invalid(SparseMatrixX<scalar_t> const& m) {
auto const data = Eigen::Map<ArrayX<scalar_t> const>(m.valuePtr(), m.nonZeros());
if (!data.allFinite()) {
throw std::runtime_error("The Hamiltonian contains invalid values: NaN or INF.\n"
"Check the lattice and/or modifier functions.");
}
}
} // namespace detail
namespace ham {
template<class scalar_t>
inline SparseMatrixRC<scalar_t> get_shared_ptr(Hamiltonian const& h) {
return var::get<SparseMatrixRC<scalar_t>>(h.get_variant());
}
template<class scalar_t>
inline SparseMatrixX<scalar_t> const& get_reference(Hamiltonian const& h) {
return *var::get<SparseMatrixRC<scalar_t>>(h.get_variant());
}
template<class scalar_t>
inline bool is(Hamiltonian const& h) {
return h.get_variant().template is<SparseMatrixRC<scalar_t>>();
}
template<class scalar_t>
Hamiltonian make(System const& system, Lattice const& lattice,
HamiltonianModifiers const& modifiers, Cartesian k_vector, bool simple_build) {
auto matrix = std::make_shared<SparseMatrixX<scalar_t>>();
detail::build_main(*matrix, system, lattice, modifiers, simple_build);
detail::build_periodic(*matrix, system, modifiers, k_vector);
matrix->makeCompressed();
detail::throw_if_invalid(*matrix);
return matrix;
}
} // namespace ham
} // namespace cpb
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/hamiltonian/HamiltonianModifiers.hpp | .hpp | 11,713 | 292 | #pragma once
#include "system/System.hpp"
#include "numeric/dense.hpp"
#include "numeric/sparse.hpp"
#include "detail/macros.hpp"
#include "detail/algorithm.hpp"
#include <vector>
#include <memory>
namespace cpb {
/**
Thrown by a modifier if it determines that complex numbers must be
returned even though it was given real input data. The model will
catch this and switch the scalar type to complex.
*/
class ComplexOverride : public std::exception {
public:
char const* what() const noexcept override {
return "Trying to return a complex result from a real modifier.";
}
};
/**
Modify the onsite energy, e.g. to apply an electric field
*/
class OnsiteModifier {
public:
using Function = std::function<void(ComplexArrayRef energy, CartesianArrayConstRef positions,
string_view sublattice)>;
Function apply; ///< to be user-implemented
bool is_complex = false; ///< the modeled effect requires complex values
bool is_double = false; ///< the modeled effect requires double precision
OnsiteModifier(Function const& apply, bool is_complex = false, bool is_double = false)
: apply(apply), is_complex(is_complex), is_double(is_double) {}
explicit operator bool() const { return static_cast<bool>(apply); }
};
/**
Modify the hopping energy, e.g. to apply a magnetic field
*/
class HoppingModifier {
public:
using Function = std::function<void(ComplexArrayRef energy, CartesianArrayConstRef pos1,
CartesianArrayConstRef pos2, string_view hopping_family)>;
Function apply; ///< to be user-implemented
bool is_complex = false; ///< the modeled effect requires complex values
bool is_double = false; ///< the modeled effect requires double precision
HoppingModifier(Function const& apply, bool is_complex = false, bool is_double = false)
: apply(apply), is_complex(is_complex), is_double(is_double) {}
explicit operator bool() const { return static_cast<bool>(apply); }
};
/**
Container with some convenience functions
*/
struct HamiltonianModifiers {
std::vector<OnsiteModifier> onsite;
std::vector<HoppingModifier> hopping;
/// Do any of the modifiers require complex numbers?
bool any_complex() const;
/// Do any of the modifiers require double precision?
bool any_double() const;
/// Remove all modifiers
void clear();
/// Apply onsite modifiers to the given system and pass results to function:
/// lambda(int i, scalar_t onsite)
template<class scalar_t, class Fn>
void apply_to_onsite(System const& system, Fn lambda) const;
/// Apply hopping modifiers to the given system (or boundary) and pass results to:
/// lambda(int i, int j, scalar_t hopping)
template<class scalar_t, class Fn>
void apply_to_hoppings(System const& system, Fn fn) const {
apply_to_hoppings_impl<scalar_t>(system, system, fn);
};
template<class scalar_t, class Fn>
void apply_to_hoppings(System const& system, size_t boundary_index, Fn fn) const {
apply_to_hoppings_impl<scalar_t>(system, system.boundaries[boundary_index], fn);
};
private:
template<class scalar_t, class SystemOrBoundary, class Fn>
void apply_to_hoppings_impl(System const& system, SystemOrBoundary const& system_or_boundary,
Fn lambda) const;
};
namespace detail {
inline Cartesian shifted(Cartesian pos, System const&) { return pos; }
inline Cartesian shifted(Cartesian pos, System::Boundary const& b) { return pos - b.shift; }
}
template<class scalar_t, class Fn>
void HamiltonianModifiers::apply_to_onsite(System const& system, Fn lambda) const {
auto const has_intrinsic_onsite = system.site_registry.has_nonzero_energy();
if (!has_intrinsic_onsite && onsite.empty()) {
return;
}
for (auto const& sub : system.compressed_sublattices) {
auto const nsites = sub.num_sites();
auto const norb = sub.num_orbitals();
auto onsite_energy = ArrayX<scalar_t>::Zero(nsites * norb * norb).eval();
if (has_intrinsic_onsite) {
// Intrinsic lattice onsite energy -- just replicate the value at each site
auto const intrinsic_energy = num::force_cast<scalar_t>(
system.site_registry.energy(sub.id())
);
auto start = idx_t{0};
for (auto const& value : intrinsic_energy) {
onsite_energy.segment(start, nsites).setConstant(value);
start += nsites;
}
}
if (!onsite.empty()) {
// Apply all user-defined onsite modifier functions
auto onsite_ref = (norb == 1) ? arrayref(onsite_energy.data(), nsites)
: arrayref(onsite_energy.data(), norb, norb, nsites);
auto const position_ref = system.positions.segment(sub.sys_start(), nsites);
auto const sub_name = system.site_registry.name(sub.id());
for (auto const& modifier : onsite) {
modifier.apply(onsite_ref, position_ref, sub_name);
}
}
// Pass along each onsite value at the correct Hamiltonian row and column indices
auto const* data = onsite_energy.data();
for (auto i = idx_t{0}; i < norb; ++i) {
for (auto j = idx_t{0}; j < norb; ++j) {
for (auto idx = sub.ham_start(), end = sub.ham_end(); idx < end; idx += norb) {
auto const value = *data++;
if (value != scalar_t{0}) {
lambda(i + idx, j + idx, value);
}
}
}
}
} // for (auto const& sub : system.compressed_sublattices)
}
/**
Translate System indices into Hamiltonian indices
*/
class IndexTranslator {
public:
/// A translator is specific to a system and hopping family -- the matrix dimensions
IndexTranslator(System const& system, MatrixXcd const& hopping_matrix)
: term_size(hopping_matrix.rows(), hopping_matrix.cols()),
sys_start(system.compressed_sublattices.start_index(term_size.row),
system.compressed_sublattices.start_index(term_size.col)),
ham_start(system.to_hamiltonian_indices(sys_start.row)[0],
system.to_hamiltonian_indices(sys_start.col)[0]) {}
/// Loop over all Hamiltonian indices matching the System indices given in `coordinates`:
/// lambda(idx_t row, idx_t col, scalar_t value)
/// where `row` and `col` are the Hamiltonian indices.
template<class C, class F, class V, class scalar_t = typename V::Scalar> CPB_ALWAYS_INLINE
void for_each(C const& coordinates, V const& hopping_buffer, F lambda) const {
auto const* data = hopping_buffer.data();
for (auto i = ham_start.row; i < ham_start.row + term_size.row; ++i) {
for (auto j = ham_start.col; j < ham_start.col + term_size.col; ++j) {
for (auto const& coo : coordinates) {
auto const ham_row = i + (coo.row - sys_start.row) * term_size.row;
auto const ham_col = j + (coo.col - sys_start.col) * term_size.col;
auto const value = *data++;
if (value != scalar_t{0}) {
lambda(ham_row, ham_col, value);
}
}
}
}
}
private:
COO term_size; ///< size the hopping matrix (single hopping term)
COO sys_start; ///< start index in System coordinates
COO ham_start; ///< start index in Hamiltonian coordinates
};
/**
Buffer for intermediate hoppings and positions required by hopping modifiers
Applying modifiers to each hopping individually would be slow.
Passing all the values in one call would require a lot of memory.
Buffering the hoppings to balances performance and memory usage.
*/
template<class scalar_t>
struct HoppingBuffer {
static constexpr auto max_buffer_size = idx_t{100000};
idx_t size; ///< number of elements in the buffer
MatrixX<scalar_t> unit_hopping; ///< to be replicated `size` times
ArrayX<scalar_t> hoppings; ///< actually a 3D array: `size` * `unit.rows()` * `unit.cols()`
CartesianArray pos1; ///< hopping source position
CartesianArray pos2; ///< hopping destination position
HoppingBuffer(MatrixXcd const& unit_hopping, idx_t block_size)
: size(std::min(max_buffer_size / unit_hopping.size(), block_size)),
unit_hopping(num::force_cast<scalar_t>(unit_hopping)),
hoppings(size * unit_hopping.size()),
pos1(size), pos2(size) {}
/// Replicate each value from the `unit_hopping` matrix `num` times
void reset_hoppings(idx_t num) {
auto start = idx_t{0};
for (auto const& value : unit_hopping) {
hoppings.segment(start, num).setConstant(value);
start += num;
}
}
/// Return an `arrayref` of the first `num` elements (each element is a hopping matrix)
ComplexArrayRef hoppings_ref(idx_t num) {
auto const rows = unit_hopping.rows();
auto const cols = unit_hopping.cols();
return (rows == 1 && cols == 1) ? arrayref(hoppings.data(), num)
: arrayref(hoppings.data(), rows, cols, num);
}
};
template<class scalar_t, class SystemOrBoundary, class Fn>
void HamiltonianModifiers::apply_to_hoppings_impl(System const& system,
SystemOrBoundary const& system_or_boundary,
Fn lambda) const {
auto const& hopping_registry = system.hopping_registry;
// Fast path: Modifiers don't need to be applied and the single-orbital model
// allows direct mapping between sites and Hamiltonian matrix elements.
if (hopping.empty() && !hopping_registry.has_multiple_orbitals()) {
for (auto const& block : system_or_boundary.hopping_blocks) {
auto const energy = num::force_cast<scalar_t>(
hopping_registry.energy(block.family_id())
);
auto const value = energy(0, 0); // single orbital
for (auto const& coo : block.coordinates()) {
lambda(coo.row, coo.col, value);
}
}
return;
}
// Slow path: Apply modifiers and/or consider multiple orbitals which
// require translating between site and Hamiltonian matrix indices.
for (auto const& block : system_or_boundary.hopping_blocks) {
if (block.size() == 0) { continue; }
auto const& hopping_energy = hopping_registry.energy(block.family_id());
auto const hopping_name = hopping_registry.name(block.family_id());
auto const index_translator = IndexTranslator(system, hopping_energy);
auto buffer = HoppingBuffer<scalar_t>(hopping_energy, block.size());
for (auto const coo_slice : sliced(block.coordinates(), buffer.size)) {
auto size = idx_t{0};
for (auto const& coo : coo_slice) {
buffer.pos1[size] = system.positions[coo.row];
buffer.pos2[size] = detail::shifted(system.positions[coo.col], system_or_boundary);
++size;
}
buffer.reset_hoppings(size);
for (auto const& modifier : hopping) {
modifier.apply(buffer.hoppings_ref(size), buffer.pos1.head(size),
buffer.pos2.head(size), hopping_name);
}
index_translator.for_each(coo_slice, buffer.hoppings, lambda);
}
}
}
} // namespace cpb
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/compute/kernel_polynomial.hpp | .hpp | 12,253 | 327 | #pragma once
#include "numeric/dense.hpp"
#include "numeric/sparse.hpp"
#include "numeric/ellmatrix.hpp"
#include "numeric/traits.hpp"
#include "compute/detail.hpp"
#include "detail/macros.hpp"
#include "support/simd.hpp"
namespace cpb { namespace compute {
/**
KPM-specialized sparse matrix-vector multiplication (CSR, off-diagonal)
Equivalent to: y = matrix * x - y
*/
template<class scalar_t> CPB_ALWAYS_INLINE
void kpm_spmv(idx_t start, idx_t end, SparseMatrixX<scalar_t> const& matrix,
VectorX<scalar_t> const& x, VectorX<scalar_t>& y) {
auto const data = matrix.valuePtr();
auto const indices = matrix.innerIndexPtr();
auto const indptr = matrix.outerIndexPtr();
for (auto row = start; row < end; ++row) {
auto r = scalar_t{0};
for (auto n = indptr[row]; n < indptr[row + 1]; ++n) {
r += detail::mul(data[n], x[indices[n]]);
}
y[row] = r - y[row];
}
}
template<class scalar_t> CPB_ALWAYS_INLINE
void kpm_spmv(idx_t start, idx_t end, SparseMatrixX<scalar_t> const& matrix,
MatrixX<scalar_t> const& x, MatrixX<scalar_t>& y) {
auto const data = matrix.valuePtr();
auto const indices = matrix.innerIndexPtr();
auto const indptr = matrix.outerIndexPtr();
using Row = Eigen::Matrix<scalar_t, 1, Eigen::Dynamic>;
auto tmp = Row(x.cols());
for (auto row = start; row < end; ++row) {
tmp.setZero();
for (auto n = indptr[row]; n < indptr[row + 1]; ++n) {
tmp += data[n] * x.row(indices[n]);
}
y.row(row) = tmp - y.row(row);
}
}
/**
KPM-specialized sparse matrix-vector multiplication (CSR, diagonal)
Equivalent to:
y = matrix * x - y
m2 = x^2
m3 = dot(x, y)
*/
template<class scalar_t> CPB_ALWAYS_INLINE
void kpm_spmv_diagonal(idx_t start, idx_t end, SparseMatrixX<scalar_t> const& matrix,
VectorX<scalar_t> const& x, VectorX<scalar_t>& y,
scalar_t& m2, scalar_t& m3) {
kpm_spmv(start, end, matrix, x, y);
auto const size = end - start;
m2 += x.segment(start, size).squaredNorm();
m3 += y.segment(start, size).dot(x.segment(start, size));
}
template<class scalar_t> CPB_ALWAYS_INLINE
void kpm_spmv_diagonal(idx_t start, idx_t end, SparseMatrixX<scalar_t> const& matrix,
MatrixX<scalar_t> const& x, MatrixX<scalar_t>& y,
simd::array<scalar_t>& m2, simd::array<scalar_t>& m3) {
kpm_spmv(start, end, matrix, x, y);
auto const size = end - start;
auto const cols = x.cols();
for (auto i = 0; i < cols; ++i) {
m2[i] += x.col(i).segment(start, size).squaredNorm();
m3[i] += y.col(i).segment(start, size).dot(x.col(i).segment(start, size));
}
}
/**
KPM-specialized sparse matrix-vector multiplication (ELLPACK, off-diagonal)
Equivalent to: y = matrix * x - y
*/
#if SIMDPP_USE_NULL // generic version
template<class scalar_t> CPB_ALWAYS_INLINE
void kpm_spmv(idx_t start, idx_t end, num::EllMatrix<scalar_t> const& matrix,
VectorX<scalar_t> const& x, VectorX<scalar_t>& y) {
for (auto row = start; row < end; ++row) {
y[row] = -y[row];
}
for (auto n = 0; n < matrix.nnz_per_row; ++n) {
for (auto row = start; row < end; ++row) {
auto const a = matrix.data(row, n);
auto const b = x[matrix.indices(row, n)];
y[row] += detail::mul(a, b);
}
}
}
template<class scalar_t> CPB_ALWAYS_INLINE
void kpm_spmv(idx_t start, idx_t end, num::EllMatrix<scalar_t> const& matrix,
MatrixX<scalar_t> const& x, MatrixX<scalar_t>& y) {
for (auto row = start; row < end; ++row) {
y.row(row) = -y.row(row);
}
for (auto n = 0; n < matrix.nnz_per_row; ++n) {
for (auto row = start; row < end; ++row) {
y.row(row) += matrix.data(row, n) * x.row(matrix.indices(row, n));
}
}
}
#else // vectorized using SIMD intrinsics
template<class scalar_t, idx_t skip_last_n = 0,
idx_t step = simd::traits<scalar_t>::size> CPB_ALWAYS_INLINE
simd::split_loop_t<step> kpm_spmv(idx_t start, idx_t end, num::EllMatrix<scalar_t> const& matrix,
VectorX<scalar_t> const& x, VectorX<scalar_t>& y) {
using simd_register_t = simd::select_vector_t<scalar_t>;
auto const loop = simd::split_loop(y.data(), start, end);
auto const px0 = x.data();
for (auto n = 0; n < matrix.nnz_per_row - skip_last_n; ++n) {
auto data = &matrix.data(0, n) + start;
auto idx = &matrix.indices(0, n) + start;
auto py = y.data() + start;
for (auto _ = loop.start; _ < loop.peel_end; ++_, ++data, ++idx, ++py) {
auto const a = *data;
auto const b = px0[*idx];
auto const c = (n == 0) ? -*py : *py;
*py = detail::mul(a, b) + c;
}
for (auto _ = loop.peel_end; _ < loop.vec_end;
_ += step, data += step, idx += step, py += step) {
auto const a = simd::load<simd_register_t>(data);
auto const b = simd::gather<simd_register_t>(px0, idx);
auto c = simd::load<simd_register_t>(py);
if (n == 0) { c = simd::neg(c); }
simd::store(py, simd::madd_rc<scalar_t>(a, b, c));
}
for (auto _ = loop.vec_end; _ < loop.end; ++_, ++data, ++idx, ++py) {
auto const a = *data;
auto const b = px0[*idx];
auto const c = (n == 0) ? -*py : *py;
*py = detail::mul(a, b) + c;
}
}
return loop;
}
template<class scalar_t, idx_t skip_last_n = 0> CPB_ALWAYS_INLINE
void kpm_spmv(idx_t start, idx_t end, num::EllMatrix<scalar_t> const& matrix,
MatrixX<scalar_t> const& x, MatrixX<scalar_t>& y) {
using simd_register_t = simd::select_vector_t<scalar_t>;
static constexpr auto step = simd::traits<scalar_t>::size;
auto const px0 = x.data();
for (auto n = 0; n < matrix.nnz_per_row - skip_last_n; ++n) {
auto data = &matrix.data(0, n) + start;
auto data_end = &matrix.data(0, n) + end;
auto idx = &matrix.indices(0, n) + start;
auto py = y.data() + start * step;
for (; data < data_end; ++data, ++idx, py += step) {
auto const a = simd::load_splat_rc<simd_register_t>(data);
auto const b = simd::load<simd_register_t>(px0 + *idx * step);
auto c = simd::load<simd_register_t>(py);
if (n == 0) { c = simd::neg(c); }
simd::store(py, simd::madd_rc<scalar_t>(a, b, c));
}
}
}
#endif // SIMDPP_USE_NULL
/**
KPM-specialized sparse matrix-vector multiplication (ELLPACK, diagonal)
Equivalent to:
y = matrix * x - y
m2 = x^2
m3 = dot(x, y)
*/
#if SIMDPP_USE_NULL // generic version
template<class scalar_t> CPB_ALWAYS_INLINE
void kpm_spmv_diagonal(idx_t start, idx_t end, num::EllMatrix<scalar_t> const& matrix,
VectorX<scalar_t> const& x, VectorX<scalar_t>& y,
scalar_t& m2, scalar_t& m3) {
kpm_spmv(start, end, matrix, x, y);
auto const size = end - start;
m2 += x.segment(start, size).squaredNorm();
m3 += y.segment(start, size).dot(x.segment(start, size));
}
template<class scalar_t> CPB_ALWAYS_INLINE
void kpm_spmv_diagonal(idx_t start, idx_t end, num::EllMatrix<scalar_t> const& matrix,
MatrixX<scalar_t> const& x, MatrixX<scalar_t>& y,
simd::array<scalar_t>& m2, simd::array<scalar_t>& m3) {
kpm_spmv(start, end, matrix, x, y);
auto const size = end - start;
auto const cols = x.cols();
for (auto i = 0; i < cols; ++i) {
m2[i] += x.col(i).segment(start, size).squaredNorm();
m3[i] += y.col(i).segment(start, size).dot(x.col(i).segment(start, size));
}
}
#else // vectorized using SIMD intrinsics
template<class scalar_t> CPB_ALWAYS_INLINE
void kpm_spmv_diagonal(idx_t start, idx_t end, num::EllMatrix<scalar_t> const& matrix,
VectorX<scalar_t> const& x, VectorX<scalar_t>& y,
scalar_t& m2, scalar_t& m3) {
// Call the regular compute function, but skip the last loop iteration.
auto const loop = kpm_spmv<scalar_t, 1>(start, end, matrix, x, y);
// The last iteration will be done here together with the m2 and m3 sums.
// This saves memory bandwidth by reusing `y` data (`r2`) which is already
// in a register. While `x` data (`r1`) is not strictly reused, there is good
// locality between the `b = gather(x)` and `r2 = load(x)` operations which
// improves the cache hit rate. Overall, this offers a nice speed improvement.
using simd_register_t = simd::select_vector_t<scalar_t>;
static constexpr auto step = simd::traits<scalar_t>::size;
auto const n = matrix.nnz_per_row - 1;
auto data = &matrix.data(0, n) + start;
auto idx = &matrix.indices(0, n) + start;
auto px0 = x.data();
auto px = x.data() + start;
auto py = y.data() + start;
auto m2_vec = simd::make_float<simd_register_t>(0);
auto m3_vec = simd::make_float<simd_register_t>(0);
for (auto _ = loop.start; _ < loop.peel_end; ++_, ++data, ++idx, ++py, ++px) {
auto const a = *data;
auto const b = px0[*idx];
auto const c = (n == 0 ? -*py : *py);
auto const r1 = *px;
auto const r2 = a * b + c;
m2 += detail::square(r1);
m3 += detail::mul(num::conjugate(r2), r1);
*py = r2;
}
for (auto _ = loop.peel_end; _ < loop.vec_end;
_ += step, data += step, idx += step, py += step, px += step) {
auto const a = simd::load<simd_register_t>(data);
auto const b = simd::gather<simd_register_t>(px0, idx);
auto c = simd::load<simd_register_t>(py);
if (n == 0) { c = simd::neg(c); }
auto const r1 = simd::load<simd_register_t>(px);
auto const r2 = simd::madd_rc<scalar_t>(a, b, c);
m2_vec = m2_vec + r1 * r1;
m3_vec = simd::conjugate_madd_rc<scalar_t>(r2, r1, m3_vec);
simd::store(py, r2);
}
for (auto _ = loop.vec_end; _ < loop.end; ++_, ++data, ++idx, ++py, ++px) {
auto const a = *data;
auto const b = px0[*idx];
auto const c = (n == 0 ? -*py : *py);
auto const r1 = *px;
auto const r2 = a * b + c;
m2 += detail::square(r1);
m3 += detail::mul(num::conjugate(r2), r1);
*py = r2;
}
m2 += simd::reduce_add(m2_vec);
m3 += simd::reduce_add_rc<scalar_t>(m3_vec);
}
template<class scalar_t> CPB_ALWAYS_INLINE
void kpm_spmv_diagonal(idx_t start, idx_t end, num::EllMatrix<scalar_t> const& matrix,
MatrixX<scalar_t> const& x, MatrixX<scalar_t>& y,
simd::array<scalar_t>& m2, simd::array<scalar_t>& m3) {
kpm_spmv<scalar_t, 1>(start, end, matrix, x, y);
using simd_register_t = simd::select_vector_t<scalar_t>;
static constexpr auto step = simd::traits<scalar_t>::size;
auto const n = matrix.nnz_per_row - 1;
auto data = &matrix.data(0, n) + start;
auto data_end = &matrix.data(0, n) + end;
auto idx = &matrix.indices(0, n) + start;
auto px0 = x.data();
auto px = x.data() + start * step;
auto py = y.data() + start * step;
auto m2_vec = simd::make_float<simd_register_t>(0);
auto m3_vec = simd::make_float<simd_register_t>(0);
for (; data < data_end; ++data, ++idx, px += step, py += step) {
auto const a = simd::load_splat_rc<simd_register_t>(data);
auto const b = simd::load<simd_register_t>(px0 + *idx * step);
auto c = simd::load<simd_register_t>(py);
if (n == 0) { c = simd::neg(c); }
auto const r1 = simd::load<simd_register_t>(px);
auto const r2 = simd::madd_rc<scalar_t>(a, b, c);
m2_vec = m2_vec + r1 * r1;
m3_vec = simd::conjugate_madd_rc<scalar_t>(r2, r1, m3_vec);
simd::store(py, r2);
}
m2_vec = simd::reduce_imag<scalar_t>(m2_vec);
simd::store_u(m2.data(), simd::load_u<simd_register_t>(m2.data()) + m2_vec);
simd::store_u(m3.data(), simd::load_u<simd_register_t>(m3.data()) + m3_vec);
}
#endif // SIMDPP_USE_NULL
}} // namespace cpb::compute
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/compute/lanczos.hpp | .hpp | 5,685 | 157 | #pragma once
#ifdef CPB_USE_MKL
# include "mkl/lanczos.hpp"
#else
# include "eigen3/lanczos.hpp"
#endif
#include "numeric/sparse.hpp"
#include "numeric/random.hpp"
#include "compute/detail.hpp"
#include "support/simd.hpp"
namespace cpb { namespace compute {
/**
Lanczos-specialized sparse matrix-vector multiplication + dot product
Equivalent to:
tmp = matrix * v1
a = real(dot_product(tmp, v1))
v0 = tmp - b_prev * v0
return a
*/
template<class scalar_t, class real_t = num::get_real_t<scalar_t>> CPB_ALWAYS_INLINE
real_t lanczos_spmv(real_t b_prev, SparseMatrixX<scalar_t> const& matrix,
VectorX<scalar_t> const& v1, VectorX<scalar_t>& v0) {
auto const size = matrix.rows();
auto const data = matrix.valuePtr();
auto const indices = matrix.innerIndexPtr();
auto const indptr = matrix.outerIndexPtr();
auto a = real_t{0};
for (auto row = 0; row < size; ++row) {
auto tmp = scalar_t{0};
for (auto n = indptr[row]; n < indptr[row + 1]; ++n) {
tmp += detail::mul(data[n], v1[indices[n]]);
}
v0[row] = tmp - b_prev * v0[row];
a += detail::real_dot(tmp, v1[row]);
}
return a;
}
/**
Lanczos-specialized a * x + y
Equivalent to:
v0 -= a * v1
b = norm(v0)
return b
*/
#if SIMDPP_USE_NULL // generic version
template<class scalar_t, class real_t = num::get_real_t<scalar_t>> CPB_ALWAYS_INLINE
real_t lanczos_axpy(real_t a, VectorX<scalar_t> const& v1, VectorX<scalar_t>& v0) {
auto const size = v0.size();
auto norm2 = real_t{0};
for (auto i = 0; i < size; ++i) {
auto const l = v0[i] - a * v1[i];
norm2 += detail::square(l);
v0[i] = l;
}
return std::sqrt(norm2);
}
#else // vectorized using SIMD intrinsics
template<class scalar_t, class real_t = num::get_real_t<scalar_t>> CPB_ALWAYS_INLINE
real_t lanczos_axpy(real_t a, VectorX<scalar_t> const& v1, VectorX<scalar_t>& v0) {
using simd_register_t = simd::select_vector_t<scalar_t>;
auto const loop = simd::split_loop(v0.data(), 0, v0.size());
assert(loop.peel_end == 0); // all eigen vectors are properly aligned when starting from 0
auto norm2_vec = simd::make_float<simd_register_t>(0);
for (auto i = idx_t{0}; i < loop.vec_end; i += loop.step) {
auto const r0 = simd::load<simd_register_t>(v0.data() + i);
auto const r1 = simd::load<simd_register_t>(v1.data() + i);
auto const tmp = simd_register_t{r0 - a * r1};
norm2_vec = norm2_vec + tmp * tmp;
simd::store(v0.data() + i, tmp);
}
auto norm2_remainder = real_t{0};
for (auto i = loop.vec_end; i < loop.end; ++i) {
auto const tmp = v0[i] - a * v1[i];
norm2_remainder += detail::square(tmp);
v0[i] = tmp;
}
return std::sqrt(simd::reduce_add(norm2_vec) + norm2_remainder);
}
#endif // SIMDPP_USE_NULL
struct LanczosBounds {
double min; ///< the lowest eigenvalue
double max; ///< the highest eigenvalue
int loops; ///< number of iterations needed to converge
};
/// Use the Lanczos algorithm to find the min and max eigenvalues at given precision (%)
template<class scalar_t>
LanczosBounds minmax_eigenvalues(SparseMatrixX<scalar_t> const& matrix, double precision_percent) {
using real_t = num::get_real_t<scalar_t>;
simd::scope_disable_denormals guard;
auto v0 = VectorX<scalar_t>::Zero(matrix.rows()).eval();
auto v1 = num::make_random<VectorX<scalar_t>>(matrix.rows());
v1.normalize();
// Alpha and beta are the diagonals of the tridiagonal matrix.
// The final size is not known ahead of time, but it will be small.
auto alpha = std::vector<real_t>(); alpha.reserve(100);
auto beta = std::vector<real_t>(); beta.reserve(100);
// Energy values from the previous iteration. Used to test convergence.
// Initial values as far away from expected as possible.
auto previous_min = std::numeric_limits<real_t>::max();
auto previous_max = std::numeric_limits<real_t>::lowest();
auto const precision = static_cast<real_t>(precision_percent / 100);
constexpr auto loop_limit = 1000;
// This may iterate up to matrix_size, but since only the extreme eigenvalues are required it
// will converge very quickly. Exceeding `loop_limit` would suggest something is wrong.
for (int i = 0; i < loop_limit; ++i) {
// PART 1: Calculate tridiagonal matrix elements a and b
// =====================================================
auto const b_prev = !beta.empty() ? beta.back() : real_t{0};
auto const a = lanczos_spmv(b_prev, matrix, v1, v0);
auto const b = lanczos_axpy(a, v1, v0);
v0 *= 1 / b;
v0.swap(v1);
alpha.push_back(a);
beta.push_back(b);
// PART 2: Check if the largest magnitude eigenvalues have converged
// =================================================================
auto const eigenvalues = compute::tridiagonal_eigenvalues(eigen_cast<ArrayX>(alpha),
eigen_cast<ArrayX>(beta));
auto const min = eigenvalues.minCoeff();
auto const max = eigenvalues.maxCoeff();
auto const is_converged_min = abs((previous_min - min) / min) < precision;
auto const is_converged_max = abs((previous_max - max) / max) < precision;
if (is_converged_min && is_converged_max) {
return {min, max, i};
}
previous_min = min;
previous_max = max;
};
throw std::runtime_error{"Lanczos algorithm did not converge for the min/max eigenvalues."};
}
}} // namespace cpb::compute
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/compute/detail.hpp | .hpp | 1,368 | 44 | #pragma once
#include "detail/macros.hpp"
#include <complex>
namespace cpb { namespace compute { namespace detail {
/**
These functions are needed because std::complex<T> operator* does additional
checking which significantly slows down critical loops. This `mul` overload
does a raw multiplication where the user must make sure there are no numerical
complications.
*/
template<class real_t> CPB_ALWAYS_INLINE
real_t mul(real_t a, real_t b) { return a * b; }
template<class real_t> CPB_ALWAYS_INLINE
std::complex<real_t> mul(std::complex<real_t> a, std::complex<real_t> b) {
return {a.real() * b.real() - a.imag() * b.imag(),
a.real() * b.imag() + a.imag() * b.real()};
}
/**
Return only the real part of a dot product multiplication. Compared to
`mul(conj(a), b)`, this saves a few operations for complex scalars.
*/
template<class real_t> CPB_ALWAYS_INLINE
real_t real_dot(real_t a, real_t b) { return a * b; }
template<class real_t> CPB_ALWAYS_INLINE
real_t real_dot(std::complex<real_t> a, std::complex<real_t> b) {
return a.real() * b.real() + a.imag() * b.imag();
}
template<class real_t> CPB_ALWAYS_INLINE
real_t square(real_t a) { return a * a; }
template<class real_t> CPB_ALWAYS_INLINE
real_t square(std::complex<real_t> a) {
return a.real() * a.real() + a.imag() * a.imag();
}
}}} // namespace cpb::compute::detail
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/compute/mkl/lanczos.hpp | .hpp | 763 | 24 | #pragma once
#include "numeric/dense.hpp"
#include "compute/mkl/wrapper.hpp"
namespace cpb { namespace compute {
template<class Derived, class scalar_t = typename Derived::Scalar>
inline ArrayX<scalar_t> tridiagonal_eigenvalues(DenseBase<Derived> const& alpha,
DenseBase<Derived> const& beta) {
ArrayX<scalar_t> eigenvalues = alpha;
ArrayX<scalar_t> temp = beta;
auto const error_id = mkl::stev<scalar_t>::call(
LAPACK_COL_MAJOR, 'N', eigenvalues.size(), eigenvalues.data(),
temp.data(), nullptr, eigenvalues.size()
);
if (error_id)
throw std::runtime_error{"LAPACK stev() error: " + std::to_string(error_id)};
return eigenvalues;
}
}} // namespace cpb::compute
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/compute/mkl/wrapper.hpp | .hpp | 2,847 | 58 | #pragma once
#include <mkl.h>
#include <complex>
namespace cpb { namespace mkl {
namespace detail {
template<class scalar_t> struct typemap;
template<> struct typemap<float> { using type = float; };
template<> struct typemap<double> { using type = double; };
template<> struct typemap<std::complex<float>> { using type = MKL_Complex8; };
template<> struct typemap<std::complex<double>> { using type = MKL_Complex16; };
}
/// Get the corresponding MKL C API type from the C++ type `scalar_t`
template<class scalar_t>
using type = typename detail::typemap<scalar_t>::type;
/// Dot product
template<class scalar_t> struct dotc;
template<> struct dotc<std::complex<float>> { static constexpr auto call = cblas_cdotc_sub; };
template<> struct dotc<std::complex<double>> { static constexpr auto call = cblas_zdotc_sub; };
/// axpy: y = a*x + y
template<class scalar_t> struct axpy;
template<> struct axpy<float> { static constexpr auto call = cblas_saxpy; };
template<> struct axpy<double> { static constexpr auto call = cblas_daxpy; };
template<> struct axpy<std::complex<float>> { static constexpr auto call = cblas_caxpy; };
template<> struct axpy<std::complex<double>> { static constexpr auto call = cblas_zaxpy; };
/// Eigenvalue and eigenvectors of a real symmetry tridiagonal matrix
template<class scalar_t> struct stev;
template<> struct stev<float> { static constexpr auto call = LAPACKE_sstev; };
template<> struct stev<double> { static constexpr auto call = LAPACKE_dstev; };
/// CSR matrix vector multiplication
template<class scalar_t> struct csrmv;
template<> struct csrmv<float> { static constexpr auto call = mkl_scsrmv; };
template<> struct csrmv<double> { static constexpr auto call = mkl_dcsrmv; };
template<> struct csrmv<std::complex<float>> { static constexpr auto call = mkl_ccsrmv; };
template<> struct csrmv<std::complex<double>> { static constexpr auto call = mkl_zcsrmv; };
/// CSR general matrix vector multiplication
template<class scalar_t> struct csrgemv;
template<> struct csrgemv<float> { static constexpr auto call = mkl_cspblas_scsrgemv; };
template<> struct csrgemv<double> { static constexpr auto call = mkl_cspblas_dcsrgemv; };
template<> struct csrgemv<std::complex<float>> { static constexpr auto call = mkl_cspblas_ccsrgemv; };
template<> struct csrgemv<std::complex<double>> { static constexpr auto call = mkl_cspblas_zcsrgemv; };
template<class scalar_t> struct feast_hcsrev;
template<> struct feast_hcsrev<float> { static constexpr auto call = sfeast_scsrev; };
template<> struct feast_hcsrev<double> { static constexpr auto call = dfeast_scsrev; };
template<> struct feast_hcsrev<std::complex<float>> { static constexpr auto call = cfeast_hcsrev; };
template<> struct feast_hcsrev<std::complex<double>> { static constexpr auto call = zfeast_hcsrev; };
}} // namespace cpb::mkl
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/compute/eigen3/lanczos.hpp | .hpp | 2,873 | 93 | #pragma once
#include "numeric/dense.hpp"
#include <Eigen/Jacobi>
namespace cpb { namespace compute {
namespace detail {
template<class real_t>
static void tridiagonal_qr_step(real_t* diag, real_t* subdiag, int start, int end) {
auto td = (diag[end-1] - diag[end]) * real_t(0.5);
auto e = subdiag[end-1];
auto mu = diag[end];
if (td == 0) {
mu -= std::abs(e);
}
else {
auto e2 = Eigen::numext::abs2(subdiag[end-1]);
auto h = Eigen::numext::hypot(td, e);
if (e2 == 0)
mu -= (e / (td + (td>0 ? 1 : -1))) * (e / h);
else
mu -= e2 / (td + (td>0 ? h : -h));
}
auto x = diag[start] - mu;
auto z = subdiag[start];
for (auto k = start; k < end; ++k) {
Eigen::JacobiRotation<real_t> rot;
rot.makeGivens(x, z);
// do T = G' T G
auto sdk = rot.s() * diag[k] + rot.c() * subdiag[k];
auto dkp1 = rot.s() * subdiag[k] + rot.c() * diag[k+1];
diag[k] = rot.c() * (rot.c() * diag[k] - rot.s() * subdiag[k])
- rot.s() * (rot.c() * subdiag[k] - rot.s() * diag[k+1]);
diag[k+1] = rot.s() * sdk + rot.c() * dkp1;
subdiag[k] = rot.c() * sdk - rot.s() * dkp1;
if (k > start)
subdiag[k - 1] = rot.c() * subdiag[k-1] - rot.s() * z;
x = subdiag[k];
if (k < end - 1) {
z = -rot.s() * subdiag[k+1];
subdiag[k + 1] = rot.c() * subdiag[k+1];
}
}
}
}
template<class Derived, class scalar_t = typename Derived::Scalar>
inline ArrayX<scalar_t> tridiagonal_eigenvalues(const DenseBase<Derived>& alpha,
const DenseBase<Derived>& beta)
{
ArrayX<scalar_t> eigenvalues = alpha;
ArrayX<scalar_t> temp = beta;
auto start = 0;
auto end = static_cast<int>(eigenvalues.size()) - 1;
auto iter = 0;
constexpr auto max_iterations = 30;
while (end > 0) {
for (auto i = start; i < end; ++i) {
auto a = std::abs(temp[i]);
auto b = std::abs(eigenvalues[i]) + std::abs(eigenvalues[i + 1]);
// if a is much smaller than b
if (a < b * std::numeric_limits<scalar_t>::epsilon())
temp[i] = 0;
}
while (end > 0 && temp[end-1] == 0)
end--;
if (end <= 0)
break;
if (++iter > max_iterations * eigenvalues.size())
throw std::runtime_error{"Tridiagonal QR error"};
start = end - 1;
while (start > 0 && temp[start-1] != 0)
start--;
detail::tridiagonal_qr_step(eigenvalues.data(), temp.data(), start, end);
}
return eigenvalues;
}
}} // namespace cpb::compute
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/solver/FEAST.hpp | .hpp | 3,880 | 98 | #pragma once
#include "detail/config.hpp"
#ifdef CPB_USE_FEAST
#include "solver/Solver.hpp"
namespace cpb {
struct FEASTConfig {
// required user config
double energy_min = 0; ///< lowest eigenvalue
double energy_max = 0; ///< highest eigenvalue
int initial_size_guess = 0; ///< initial user guess for the subspace size
// optional user config
bool is_verbose = false; ///< [false] print information directly to stdout
bool recycle_subspace = false; ///< [false] use previous data as a starting point
// advanced optional user config
int contour_points = 8; ///< [8] complex integral contour point
int max_refinement_loops = 5; ///< [20] maximum number of refinement loops
int sp_stop_criteria = 3; ///< [5] single precision error trace stopping criteria
int dp_stop_criteria = 10; ///< [12] double precision error trace stopping criteria
bool residual_convergence = false; /**< [false] use residual stop criteria
instead of error trace criteria */
// implementation detail config
char matrix_format = 'F'; ///< full matrix 'F' or triangular: lower 'L' and upper 'U'
int system_size = 0; ///< size of the Hamiltonian matrix, i.e. number of atoms in the system
};
/**
Implementation of the FEAST eigensolver
*/
template<class scalar_t>
class FEAST : public SolverStrategy {
using real_t = num::get_real_t<scalar_t>;
using complex_t = num::get_complex_t<scalar_t>;
public:
struct Info {
int suggested_size; ///< post-calculation suggested subspace size
int final_size; ///< final subspace size
int refinement_loops = 0; ///< the number of refinement loops executed
real_t error_trace; ///< relative error on trace
real_t max_residual; ///< biggest residual
int return_code; ///< function return information and error codes
bool recycle_warning = false; ///< error with recycled subspace, the calculation was rerun
int recycle_warning_loops = 0; ///< total loop count including those reset after a warning
bool size_warning = false; ///< the initial subspace size was too small
};
public:
using Config = FEASTConfig;
explicit FEAST(SparseMatrixRC<scalar_t> hamiltonian, Config const& config = {})
: hamiltonian(std::move(hamiltonian)), config(config) {}
public: // overrides
bool change_hamiltonian(Hamiltonian const& h) override;
void solve() override;
std::string report(bool shortform) const override;
// map eigenvalues and wavefunctions to only expose results up to the usable subspace size
RealArrayConstRef eigenvalues() const override {
return arrayref(Map<const ArrayX<real_t>>(_eigenvalues.data(), info.final_size));
}
ComplexArrayConstRef eigenvectors() const override {
using MapType = Map<ColMajorArrayXX<scalar_t> const>;
return arrayref(MapType(_eigenvectors.data(), _eigenvectors.rows(), info.final_size));
}
private: // implementation
void init_feast(); ///< initialize FEAST parameters
void init_pardiso(); ///< initialize PARDISO (sparse linear solver) parameters
void call_feast(); ///< setup and call FEAST solver
void call_feast_impl(); ///< call for scalar_t specific solver
void force_clear(); ///< clear eigenvalue, eigenvector and residual data
private:
SparseMatrixRC<scalar_t> hamiltonian;
Config config;
ArrayX<real_t> _eigenvalues;
ColMajorArrayXX<scalar_t> _eigenvectors;
int fpm[128]; ///< FEAST init parameters
Info info;
ArrayX<real_t> residual; ///< relative residual
};
extern template class cpb::FEAST<float>;
extern template class cpb::FEAST<std::complex<float>>;
extern template class cpb::FEAST<double>;
extern template class cpb::FEAST<std::complex<double>>;
} // namespace cpb
#endif // CPB_USE_FEAST
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/solver/Solver.hpp | .hpp | 2,019 | 74 | #pragma once
#include "Model.hpp"
#include "hamiltonian/Hamiltonian.hpp"
#include "utils/Chrono.hpp"
#include "numeric/dense.hpp"
#include "detail/strategy.hpp"
#include <memory>
namespace cpb {
/**
Abstract base class for an eigensolver
*/
class SolverStrategy {
public:
virtual ~SolverStrategy() = default;
/// Returns false if the given Hamiltonian is the wrong type for this SolverStrategy
virtual bool change_hamiltonian(Hamiltonian const& h) = 0;
virtual void solve() = 0;
virtual std::string report(bool shortform) const = 0;
virtual RealArrayConstRef eigenvalues() const = 0;
virtual ComplexArrayConstRef eigenvectors() const = 0;
};
/**
Main solver interface
Internally it uses a SolverStrategy with the scalar of the given Hamiltonian.
*/
class BaseSolver {
public:
void solve();
void clear() { is_solved = false; }
std::string report(bool shortform) const;
void set_model(Model const&);
Model const& get_model() const { return model; }
std::shared_ptr<System const> system() const { return model.system(); }
RealArrayConstRef eigenvalues();
ComplexArrayConstRef eigenvectors();
ArrayXd calc_dos(ArrayXf energies, float broadening);
ArrayXd calc_spatial_ldos(float energy, float broadening);
protected:
using MakeStrategy = std::function<std::unique_ptr<SolverStrategy>(Hamiltonian const&)>;
BaseSolver(Model const& model, MakeStrategy const& make_strategy);
private:
Model model;
MakeStrategy make_strategy;
std::unique_ptr<SolverStrategy> strategy;
bool is_solved = false;
mutable Chrono calculation_timer; ///< last calculation time
};
template<template<class> class Strategy>
class Solver : public BaseSolver {
using Config = typename Strategy<float>::Config;
using MakeStrategy = detail::MakeStrategy<SolverStrategy, Strategy>;
public:
explicit Solver(Model const& model, Config const& config = {})
: BaseSolver(model, MakeStrategy(config)) {}
};
} // namespace cpb
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/detail/opaque_alias.hpp | .hpp | 1,530 | 45 | #pragma once
#include "detail/config.hpp"
#include "support/cppfuture.hpp"
namespace cpb { namespace detail {
/**
Type-safe integer alias
*/
template<class Tag, class T = storage_idx_t>
class OpaqueIntegerAlias {
using Self = OpaqueIntegerAlias;
// Don't create duplicate constructors
static constexpr auto has_constructor = std::is_same<T, idx_t>::value
|| std::is_same<T, size_t>::value;
public:
OpaqueIntegerAlias() = default;
explicit OpaqueIntegerAlias(idx_t value) : _value(static_cast<T>(value)) {}
explicit OpaqueIntegerAlias(size_t value) : _value(static_cast<T>(value)) {}
template<class U, class = std14::enable_if_t<std::is_same<U, T>::value && !has_constructor>>
explicit OpaqueIntegerAlias(U value) : _value(value) {}
template<class OtherTag>
explicit OpaqueIntegerAlias(OpaqueIntegerAlias<OtherTag, T> const& other)
: _value(other.value()) {}
T value() const { return _value; }
template<class U> U as() const { return static_cast<U>(_value); }
friend bool operator==(Self a, Self b) { return a._value == b._value; }
friend bool operator!=(Self a, Self b) { return !(a == b); }
friend bool operator< (Self a, Self b) { return a._value < b._value; }
friend bool operator> (Self a, Self b) { return b < a; }
friend bool operator>=(Self a, Self b) { return !(a < b); }
friend bool operator<=(Self a, Self b) { return !(a > b); }
private:
T _value;
};
}} // namespace cpb::detail
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/detail/strategy.hpp | .hpp | 1,619 | 49 | #pragma once
#include "hamiltonian/Hamiltonian.hpp"
#include "support/cppfuture.hpp"
namespace cpb { namespace detail {
/**
Function object which creates a new Strategy with the appropriate scalar type for the given Model
BaseStrategy is the abstract base, to which a pointer will be returned.
Strategy<scalar_t> must be instantiable with float/double and std::complex<float/double>.
*/
template<class BaseStrategy, template<class> class Strategy>
class MakeStrategy {
static_assert(std::is_base_of<BaseStrategy, Strategy<float>>::value, "");
using Config = typename Strategy<float>::Config;
Config config;
public:
explicit MakeStrategy(Config const& config) : config(config) {}
std::unique_ptr<BaseStrategy> operator()(Hamiltonian const& h) const {
std::unique_ptr<BaseStrategy> strategy;
if (!strategy) strategy = try_strategy<float>(h);
if (!strategy) strategy = try_strategy<std::complex<float>>(h);
if (!strategy) strategy = try_strategy<double>(h);
if (!strategy) strategy = try_strategy<std::complex<double>>(h);
if (!strategy) {
throw std::runtime_error("MakeStrategy: unknown Hamiltonian type.");
}
return strategy;
}
private:
template<class scalar_t>
std::unique_ptr<BaseStrategy> try_strategy(Hamiltonian const& h) const {
if (ham::is<scalar_t>(h)) {
return std::unique_ptr<BaseStrategy>{
std14::make_unique<Strategy<scalar_t>>(ham::get_shared_ptr<scalar_t>(h), config)
};
}
return {};
}
};
}} // namespace cpb::detail
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/detail/macros.hpp | .hpp | 1,498 | 39 | #pragma once
#define CPB_EXTERN_TEMPLATE_CLASS(T) \
extern template class T<float>; \
extern template class T<std::complex<float>>; \
extern template class T<double>; \
extern template class T<std::complex<double>>;
#define CPB_INSTANTIATE_TEMPLATE_CLASS(T) \
template class T<float>; \
template class T<std::complex<float>>; \
template class T<double>; \
template class T<std::complex<double>>;
#define CPB_EXTERN_TEMPLATE_CLASS_VARGS(T, ...) \
extern template class T<float, __VA_ARGS__>; \
extern template class T<std::complex<float>, __VA_ARGS__>; \
extern template class T<double, __VA_ARGS__>; \
extern template class T<std::complex<double>, __VA_ARGS__>;
#define CPB_INSTANTIATE_TEMPLATE_CLASS_VARGS(T, ...) \
template class T<float, __VA_ARGS__>; \
template class T<std::complex<float>, __VA_ARGS__>; \
template class T<double, __VA_ARGS__>; \
template class T<std::complex<double>, __VA_ARGS__>;
#ifndef __has_attribute
# define __has_attribute(x) 0 // Compatibility with non-clang compilers
#endif
#if __has_attribute(always_inline) || defined(__GNUC__)
# define CPB_ALWAYS_INLINE __attribute__((always_inline)) inline
#elif defined(_MSC_VER) || defined(__INTEL_COMPILER)
# define CPB_ALWAYS_INLINE __forceinline
#else
# define CPB_ALWAYS_INLINE inline
#endif
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/detail/thread.hpp | .hpp | 5,172 | 204 | #pragma once
#include <thread>
#include <queue>
#include <condition_variable>
namespace cpb { namespace detail {
template<class T>
class Queue {
public:
struct Maybe {
Maybe() = default;
Maybe(T&& value) : value(std::move(value)), is_valid(true) {}
operator bool() { return is_valid; }
T get() { return std::move(value); }
private:
T value;
bool is_valid = false;
};
public:
Queue() = default;
Queue(std::size_t max_size) : max_size(max_size) {}
Queue(const Queue&) = delete;
Queue& operator=(const Queue&) = delete;
void add_producer() {
std::unique_lock<std::mutex> lk(m);
num_producers++;
if (num_producers > 0)
is_closed = false;
}
void remove_producer() {
std::unique_lock<std::mutex> lk(m);
num_producers--;
if (num_producers <= 0)
is_closed = true;
lk.unlock();
consumption_cv.notify_all();
}
Maybe pop() {
std::unique_lock<std::mutex> lk(m);
consumption_cv.wait(lk, [&] { return !q.empty() || is_closed; });
if (q.empty())
return {};
auto val = q.front();
q.pop();
lk.unlock();
production_cv.notify_one();
return std::move(val);
}
void push(const T& item) {
std::unique_lock<std::mutex> lk(m);
production_cv.wait(lk, [&] { return q.size() < max_size; });
q.push(item);
lk.unlock();
consumption_cv.notify_one();
}
void push(T&& item) {
std::unique_lock<std::mutex> lk(m);
production_cv.wait(lk, [&] { return q.size() < max_size; });
q.push(std::move(item));
lk.unlock();
consumption_cv.notify_one();
}
std::size_t size() const { return q.size(); }
private:
std::queue<T> q;
std::mutex m;
std::condition_variable production_cv;
std::condition_variable consumption_cv;
bool is_closed = false;
int num_producers = 0;
std::size_t max_size = std::numeric_limits<std::size_t>::max();
};
template<class T>
class QueueGuard {
Queue<T>& wq;
public:
QueueGuard(Queue<T>& q) : wq(q) { wq.add_producer(); }
~QueueGuard() { wq.remove_producer(); }
};
#ifdef CPB_USE_MKL
# include <mkl.h>
class MKLDisableThreading {
public:
MKLDisableThreading(bool condition) : num_threads{mkl_get_max_threads()} {
if (condition)
mkl_set_num_threads(1);
}
~MKLDisableThreading() { mkl_set_num_threads(num_threads); }
private:
int num_threads;
};
#endif
} // namespace detail
template<class Produce, class Compute, class Retire>
void parallel_for(size_t size, size_t num_threads, size_t queue_size,
Produce produce, Compute compute, Retire retire) {
#ifdef CPB_USE_MKL
detail::MKLDisableThreading disable_mkl_internal_threading_if{num_threads > 1};
#endif
using Value = decltype(produce(size_t{}));
struct Job {
size_t id;
Value value;
};
detail::Queue<Job> work_queue{queue_size > 0 ? queue_size : num_threads};
detail::Queue<Job> retirement_queue{};
// This thread produces new jobs and adds them to the work queue
std::thread production_thread([&] {
detail::QueueGuard<Job> guard{work_queue};
for (auto id = size_t{0}; id < size; ++id) {
work_queue.push({id, produce(id)});
}
});
// Multiple compute threads consume the work queue
// and send the completed jobs to the retirement queue
auto work_threads = std::vector<std::thread>{num_threads};
for (auto& thread : work_threads) {
thread = std::thread([&] {
detail::QueueGuard<Job> guard{retirement_queue};
while (auto maybe_job = work_queue.pop()) {
auto job = maybe_job.get();
compute(job.value);
retirement_queue.push(std::move(job));
}
});
}
// This thread consumes the retirement queue
std::thread report_thread([&] {
while (auto maybe_job = retirement_queue.pop()) {
auto job = maybe_job.get();
retire(std::move(job.value), job.id);
}
});
production_thread.join();
for (auto& thread : work_threads) {
thread.join();
}
report_thread.join();
}
class ThreadPool {
public:
ThreadPool(idx_t num_threads) : workers(static_cast<size_t>(num_threads)) {
queue.add_producer();
for (auto& thread : workers) {
thread = std::thread([&] {
while (auto maybe_job = queue.pop()) {
maybe_job.get()();
}
});
}
}
~ThreadPool() { join(); }
template<class F>
void add(F&& f) {
queue.push(std::forward<F>(f));
}
void join() {
if (is_joined) { return; }
queue.remove_producer();
for (auto& thread : workers) {
thread.join();
}
is_joined = true;
}
private:
std::vector<std::thread> workers;
detail::Queue<std::function<void()>> queue;
bool is_joined = false;
};
} // namespace cpb
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/detail/sugar.hpp | .hpp | 590 | 25 | #pragma once
#include <initializer_list>
namespace cpb { namespace detail {
/// Prevents unused variable warnings when evaluating variadic parameter packs
template<class... Ts> void eval_unordered(Ts&&...) {}
template<class T> void eval_ordered(std::initializer_list<T>) {}
template<class It1, class It2>
struct range {
It1 _begin;
It2 _end;
It1 begin() const { return _begin; }
It2 end() const { return _end; }
};
} // namespace detail
template<class It1, class It2>
detail::range<It1, It2> make_range(It1 begin, It2 end) { return {begin, end}; }
} // namespace cpb
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/detail/algorithm.hpp | .hpp | 1,583 | 55 | #pragma once
#include "detail/config.hpp"
#include <algorithm>
namespace cpb {
/**
Slice a Vector into pieces of `slice_size`
*/
template<class Vector>
class Sliced {
struct Iterator {
using Self = Iterator;
using iterator_category = std::input_iterator_tag;
using difference_type = std::ptrdiff_t;
using value_type = Self const;
using reference = value_type&;
using pointer = value_type*;
using InnerIt = typename Vector::const_iterator;
InnerIt it, last;
idx_t step;
Iterator(InnerIt first, InnerIt last, idx_t step) : it(first), last(last), step(step) {}
Iterator(InnerIt last) : it(last) {}
InnerIt begin() const { return it; }
InnerIt end() const { return std::min(it + step, last); }
reference operator*() { return *this; }
pointer operator->() { return this; }
Self& operator++() { it = end(); return *this; }
friend bool operator==(Self const& l, Self const& r) { return l.it == r.it; }
friend bool operator!=(Self const& l, Self const& r) { return !(l == r); }
};
public:
Sliced(Vector const& vec, idx_t slice_size) : vec(vec), slice_size(slice_size) {}
Iterator begin() const { return {vec.begin(), vec.end(), slice_size}; }
Iterator end() const { return {vec.end()}; }
private:
Vector const& vec;
idx_t slice_size;
};
/// Iterate over slices of a vector
template<class Vector>
Sliced<Vector> sliced(Vector const& vec, idx_t slice_size) { return {vec, slice_size}; }
} // namespace cpb
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/detail/slice.hpp | .hpp | 1,918 | 62 | #pragma once
#include <algorithm>
#include <numeric>
namespace cpb {
/**
Holds the start and end index of an array slice in 1 dimension
*/
struct SliceIndex {
int start;
int end;
/// Entire array dimension
SliceIndex() : start(0), end(-1) {}
/// Single index -> intentionally implicit so that `array[7]` still works
SliceIndex(int index) : start(index), end(index + 1) {}
/// Slice range: to be used like `array[{2, 5}]`
SliceIndex(int start, int end) : start(start), end(end) {}
int size() const { return (end > start) ? (end - start) : 0; }
SliceIndex& operator+=(int n) { start += n; end += n; return *this; }
SliceIndex& operator-=(int n) { start -= n; end -= n; return *this; }
SliceIndex& operator++() { operator+=(1); return *this; }
SliceIndex operator++(int) { auto const copy = *this; operator++(); return copy; }
SliceIndex& operator--() { operator-=(1); return *this; }
SliceIndex operator--(int) { auto const copy = *this; operator--(); return copy; }
friend bool operator==(SliceIndex const& l, SliceIndex const& r) {
return (l.start == r.start) && (l.end == r.end);
}
friend bool operator!=(SliceIndex const& l, SliceIndex const& r) { return !(l == r); }
};
/**
Multidimensional slice
*/
template<int N>
class SliceIndexND {
SliceIndex data[N];
public:
SliceIndexND() = default;
SliceIndexND(std::initializer_list<SliceIndex> indices) {
std::copy_n(indices.begin(), N, data);
}
int size() const {
return std::accumulate(std::begin(data), std::end(data), 1,
[](int a, SliceIndex b) { return a * b.size(); });
}
constexpr int ndims() const { return N; }
SliceIndex& operator[](int i) { return data[i]; }
SliceIndex const& operator[](int i) const { return data[i]; }
};
using SliceIndex3D = SliceIndexND<3>;
} // namespace cpb
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/detail/config.hpp | .hpp | 534 | 18 | #pragma once
#include <cstddef>
#ifdef CPB_USE_MKL
#define EIGEN_USE_MKL_ALL
#define CPB_USE_FEAST
#endif
#define EIGEN_DONT_PARALLELIZE // disable Eigen's internal multi-threading (doesn't do much anyway)
#define EIGEN_MAX_ALIGN_BYTES 32 // always use AVX alignment
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE std::ptrdiff_t
#define EIGEN_DEFAULT_TO_ROW_MAJOR
namespace cpb {
using idx_t = std::ptrdiff_t; // type for general indexing and interfaces
using storage_idx_t = int; // type used when storing indices in containers
}
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/detail/typelist.hpp | .hpp | 2,205 | 87 | #pragma once
#include <type_traits>
namespace cpb {
template<class...> struct TypeList {};
namespace tl {
template<class List1, class List2>
struct ConcatImpl;
template<class List1, class List2>
using Concat = typename ConcatImpl<List1, List2>::type;
template<class... Ts1, class... Ts2>
struct ConcatImpl<TypeList<Ts1...>, TypeList<Ts2...>> {
using type = TypeList<Ts1..., Ts2...>;
};
template<class T1, class T2>
struct CombinationsImpl;
template<class T1, class T2>
using Combinations = typename CombinationsImpl<T1, T2>::type;
template<class T1, class... Ts2>
struct CombinationsImpl<TypeList<T1>, TypeList<Ts2...>> {
using type = TypeList<TypeList<T1, Ts2>...>;
};
template<class T1, class... Tail, class... Ts2>
struct CombinationsImpl<TypeList<T1, Tail...>, TypeList<Ts2...>> {
using type = Concat<
TypeList<TypeList<T1, Ts2>...>,
Combinations<
TypeList<Tail...>, TypeList<Ts2...>
>
>;
};
template<class List, template<class> class Predicate>
struct FilterImpl;
template<class List, template<class> class Predicate>
using Filter = typename FilterImpl<List, Predicate>::type;
template<template<class> class Predicate>
struct FilterImpl<TypeList<>, Predicate> {
using type = TypeList<>;
};
template<class T, class... Ts, template<class> class Predicate>
struct FilterImpl<TypeList<T, Ts...>, Predicate> {
using type = Concat<
typename std::conditional<Predicate<T>::value, TypeList<T>, TypeList<>>::type,
Filter<TypeList<Ts...>, Predicate>
>;
};
namespace impl {
template<bool...> struct Bools {};
template<class> struct AlwaysFalse { static constexpr bool value = false; };
template<class List, class Target>
struct AnyOf;
template<class Target, class... Ts>
struct AnyOf<TypeList<Ts...>, Target> {
static constexpr bool value = !std::is_same<
Bools<std::is_same<Ts, Target>::value...>,
Bools<AlwaysFalse<Ts>::value...>
>::value;
};
}
/**
Does any type in the list match the Target type?
*/
template<class List, class Target>
using AnyOf = std::integral_constant<bool, impl::AnyOf<List, Target>::value>;
}} // namespace cpb:tl
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/leads/Leads.hpp | .hpp | 2,011 | 62 | #pragma once
#include "leads/Spec.hpp"
#include "leads/Structure.hpp"
#include "leads/HamiltonianPair.hpp"
namespace cpb {
/**
Full description of a single lead
*/
class Lead {
leads::Spec specification;
leads::Structure structure;
leads::HamiltonianPair hamiltonian;
public:
Lead(leads::Spec const& spec, leads::Structure const& ls, leads::HamiltonianPair const& lh)
: specification(spec), structure(ls), hamiltonian(lh) {}
leads::Spec const& spec() const { return specification; }
std::vector<int> const& indices() const { return structure.indices; }
System const& system() const { return structure.system; }
Hamiltonian const& h0() const { return hamiltonian.h0; }
Hamiltonian const& h1() const { return hamiltonian.h1; }
};
/**
Container for all leads of a model
*/
class Leads {
std::vector<leads::Spec> specs;
std::vector<leads::Structure> structures;
std::vector<leads::HamiltonianPair> hamiltonians;
public:
/// The total number of leads
int size() const { return static_cast<int>(specs.size()); }
/// Description of lead number `i`
Lead operator[](size_t i) const { return {specs.at(i), structures.at(i), hamiltonians.at(i)}; }
/// Add a lead specified by `direction` and `shape`
void add(int direction, Shape const& shape) { specs.emplace_back(direction, shape); }
/// Modify the `foundation` so that all leads can be attached
void create_attachment_area(Foundation& foundation) const;
/// Create the structure of each lead
void make_structure(Foundation const& foundation);
/// Create a Hamiltonian pair for each lead
void make_hamiltonian(Lattice const& lattice, HamiltonianModifiers const& modifiers,
bool is_double, bool is_complex);
/// Clear any existing structural data, implies clearing Hamiltonian
void clear_structure();
/// Clear Hamiltonian, but leave structural data untouched
void clear_hamiltonian();
};
} // namespace cpb
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/leads/HamiltonianPair.hpp | .hpp | 2,508 | 68 | #pragma once
#include "hamiltonian/Hamiltonian.hpp"
namespace cpb { namespace leads {
namespace detail {
template<class scalar_t>
Hamiltonian make_h0(System const& lead_system, Lattice const& lattice,
HamiltonianModifiers const& modifiers) {
auto h0 = std::make_shared<SparseMatrixX<scalar_t>>();
cpb::detail::build_main(*h0, lead_system, lattice, modifiers, /*simple_build*/true);
h0->makeCompressed();
cpb::detail::throw_if_invalid(*h0);
return h0;
}
template<class scalar_t>
Hamiltonian make_h1(System const& system, Lattice const& lattice,
HamiltonianModifiers const& modifiers) {
auto h1 = std::make_shared<SparseMatrixX<scalar_t>>();
auto& matrix = *h1;
auto const size = system.hamiltonian_size();
matrix.resize(size, size);
matrix.reserve(ArrayXi::Constant(size, lattice.max_hoppings()));
modifiers.apply_to_hoppings<scalar_t>(system, 0, [&](idx_t i, idx_t j, scalar_t hopping) {
matrix.insert(i, j) = hopping;
});
h1->makeCompressed();
cpb::detail::throw_if_invalid(*h1);
return h1;
}
} // namespace detail
/**
Pair of Hamiltonians which describe the periodic structure of a lead
*/
struct HamiltonianPair {
Hamiltonian h0; ///< hoppings within the unit cell
Hamiltonian h1; ///< hoppings between unit cells
HamiltonianPair(System const& lead_system, Lattice const& lattice,
HamiltonianModifiers const& modifiers, bool is_double, bool is_complex) {
if (is_double) {
if (is_complex) {
h0 = detail::make_h0<std::complex<double>>(lead_system, lattice, modifiers);
h1 = detail::make_h1<std::complex<double>>(lead_system, lattice, modifiers);
} else {
h0 = detail::make_h0<double>(lead_system, lattice, modifiers);
h1 = detail::make_h1<double>(lead_system, lattice, modifiers);
}
} else {
if (is_complex) {
h0 = detail::make_h0<std::complex<float>>(lead_system, lattice, modifiers);
h1 = detail::make_h1<std::complex<float>>(lead_system, lattice, modifiers);
} else {
h0 = detail::make_h0<float>(lead_system, lattice, modifiers);
h1 = detail::make_h1<float>(lead_system, lattice, modifiers);
}
}
}
};
}} // namespace cpb::leads
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/leads/Structure.hpp | .hpp | 836 | 30 | #pragma once
#include "leads/Spec.hpp"
#include "system/System.hpp"
#include <vector>
namespace cpb { namespace leads {
/**
Lead structural information: sites, hoppings and connection to main system
*/
struct Structure {
std::vector<int> indices; ///< map from lead Hamiltonian indices to main system indices
System system; ///< description of lead sites and boundaries
Structure(Foundation const& foundation, Spec const& spec);
/// Return the lead index corresponding to the main system Hamiltonian index
int lead_index(int system_index) const {
auto const it = std::find(indices.begin(), indices.end(), system_index);
if (it == indices.end()) {
return -1;
} else {
return static_cast<int>(it - indices.begin());
}
}
};
}} // namespace cpb::leads
| Unknown |
2D | dean0x7d/pybinding | cppcore/include/leads/Spec.hpp | .hpp | 1,616 | 50 | #pragma once
#include "system/Shape.hpp"
#include "detail/slice.hpp"
#include "numeric/dense.hpp"
namespace cpb {
class Foundation;
namespace leads {
/**
Lead specification
The direction parameter needs to be one of: 1, 2, 3, -1, -2, -3.
The number indicates the lattice vector along which the lead is placed. It mustn't be
bigger than the number of lattice vectors. The sign indicates if the lead with go with
or opposite the given lattice vector.
*/
struct Spec {
int axis; ///< the crystal axis of this lead (lattice vector direction)
int sign; ///< +1 or -1: with or opposite the axis direction
Shape shape; ///< determines the attachment area with the main system
Spec(int direction, Shape const& shape);
};
/**
Create a lead attachment area in the foundation
*/
void create_attachment_area(Foundation& foundation, Spec const& spec);
namespace detail {
/// Return the slice of the foundation which contains the shape
SliceIndex3D shape_slice(Foundation const& foundation, Shape const& shape);
/// Compute the slice where the lead can be attached to the foundation
SliceIndex3D attachment_slice(Foundation const& foundation, Spec const& spec);
/// Describes the area where the lead should be attached to the foundation
struct Junction {
SliceIndex3D slice_index; ///< slice of the foundation where the lead can be attached
ArrayX<bool> is_valid; ///< valid lead sites within the slice (determined from lead shape)
Junction(Foundation const& foundation, Spec const& spec);
};
} // namespace detail
}} // namespace cpb::leads
| Unknown |
2D | dean0x7d/pybinding | cppcore/cuda/thrust.hpp | .hpp | 307 | 17 | #pragma once
#ifdef __clang__
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wall"
# pragma clang diagnostic ignored "-Wunused-parameter"
#endif
#include <thrust/host_vector.h>
#include <thrust/complex.h>
#ifdef __clang__
# pragma clang diagnostic pop
#endif
namespace thr = thrust;
| Unknown |
2D | dean0x7d/pybinding | cppcore/cuda/kpm/calc_moments.hpp | .hpp | 1,491 | 44 | #pragma once
#include "kpm/OptimizedSizes.hpp"
#include "detail/macros.hpp"
#include "numeric/sparseref.hpp"
#include "cuda/thrust.hpp"
namespace cpb { namespace cuda {
/**
The Cuda functions must be defined only in nvcc-compiled translation units,
but the declarations need to be visible to non-Cuda code as well. Since these
are templates, they need to be explicitly instantiated in a Cuda translation
unit for all relevant scalar types. To help with this, they are all wrapped in
a template class `I`. This way, a single explicit instantiation of `I` will
take care of everything. It's a bit weird but it works nicely.
*/
template<class scalar_t>
class I {
using real_t = num::get_real_t<scalar_t>;
using complex_t = num::get_complex_t<scalar_t>;
public:
/**
Diagonal KPM moments -- reference implementation, no optimizations
Calculates moments for a single matrix element (i, i) on the main diagonal.
It's 1.5x to 2x times faster than the general version.
*/
static thr::host_vector<scalar_t>
calc_diag_moments0(num::EllConstRef<scalar_t> ell, int i, int num_moments);
/**
Diagonal KPM moments -- with reordering optimization (optimal system size for each iteration)
*/
static thr::host_vector<scalar_t>
calc_diag_moments1(num::EllConstRef<scalar_t> ell, int i, int num_moments,
kpm::OptimizedSizes const& sizes);
};
CPB_EXTERN_TEMPLATE_CLASS(I)
}} // namespace cpb::cuda
| Unknown |
2D | dean0x7d/pybinding | cppcore/src/Lattice.cpp | .cpp | 12,652 | 332 | #include "Lattice.hpp"
#include <Eigen/Dense> // for `colPivHouseholderQr()`
#include <support/format.hpp>
using namespace fmt::literals;
namespace cpb {
namespace {
/// Visit all hopping terms in all families
template<class F>
void for_each_term(Lattice::Hoppings const& hoppings, F lambda) {
for (auto const& pair : hoppings) {
auto const& family = pair.second;
for (auto const& term : family.terms) {
lambda(family, term);
}
}
}
} // anonymous namespace
Lattice::Lattice(Cartesian a1, Cartesian a2, Cartesian a3) {
vectors.push_back(a1);
if (!a2.isZero()) { vectors.push_back(a2); }
if (!a3.isZero()) { vectors.push_back(a3); }
vectors.shrink_to_fit();
}
void Lattice::add_sublattice(string_view name, Cartesian position, double onsite_energy) {
add_sublattice(name, position, detail::canonical_onsite_energy(onsite_energy));
}
void Lattice::add_sublattice(string_view name, Cartesian position, VectorXd const& onsite_energy) {
add_sublattice(name, position, detail::canonical_onsite_energy(onsite_energy));
}
void Lattice::add_sublattice(string_view name, Cartesian position,
MatrixXcd const& onsite_energy) {
detail::check_onsite_energy(onsite_energy);
auto const hermitian_view = onsite_energy.selfadjointView<Eigen::Upper>();
auto const unique_id = make_unique_sublattice_id(name);
auto const alias_id = SubAliasID(unique_id);
sublattices[name] = {position, hermitian_view, unique_id, alias_id};
}
void Lattice::add_alias(string_view alias_name, string_view original_name, Cartesian position) {
auto const& original = sublattice(original_name);
auto const alias_id = SubAliasID(original.unique_id);
auto const unique_id = make_unique_sublattice_id(alias_name);
sublattices[alias_name] = {position, original.energy, unique_id, alias_id};
}
void Lattice::register_hopping_energy(std::string const& name, std::complex<double> energy) {
register_hopping_energy(name, detail::canonical_hopping_energy(energy));
}
void Lattice::register_hopping_energy(std::string const& name, MatrixXcd const& energy) {
if (name.empty()) { throw std::logic_error("Hopping name can't be blank"); }
detail::check_hopping_energy(energy);
auto const unique_id = HopID(hoppings.size());
auto const is_unique_name = hoppings.insert({name, {energy, unique_id, {}}}).second;
if (!is_unique_name) { throw std::logic_error("Hopping '" + name + "' already exists"); }
}
void Lattice::add_hopping(Index3D relative_index, string_view from_sub, string_view to_sub,
string_view hopping_family_name) {
if (from_sub == to_sub && relative_index == Index3D::Zero()) {
throw std::logic_error(
"Hoppings from/to the same sublattice must have a non-zero relative "
"index in at least one direction. Don't define onsite energy here."
);
}
auto const& from = sublattice(from_sub);
auto const& to = sublattice(to_sub);
auto const& hop_matrix = hopping_family(hopping_family_name).energy;
if (from.energy.rows() != hop_matrix.rows() || to.energy.cols() != hop_matrix.cols()) {
throw std::logic_error(
"Hopping size mismatch: from '{}' ({}) to '{}' ({}) with matrix '{}' ({}, {})"_format(
from_sub, from.energy.rows(), to_sub, to.energy.cols(),
hopping_family_name, hop_matrix.rows(), hop_matrix.cols()
)
);
}
auto const candidate = HoppingTerm{relative_index, from.unique_id, to.unique_id};
for_each_term(hoppings, [&](HoppingFamily const&, HoppingTerm const& existing) {
if (candidate == existing) {
throw std::logic_error("The specified hopping already exists.");
}
});
hoppings[hopping_family_name].terms.push_back(candidate);
}
void Lattice::add_hopping(Index3D relative_index, string_view from_sub, string_view to_sub,
std::complex<double> energy) {
add_hopping(relative_index, from_sub, to_sub, detail::canonical_hopping_energy(energy));
}
void Lattice::add_hopping(Index3D relative_index, string_view from_sub, string_view to_sub,
MatrixXcd const& energy) {
auto const hopping_name = [&] {
// Look for an existing hopping ID with the same energy
auto const it = std::find_if(hoppings.begin(), hoppings.end(), [&](Hoppings::reference r) {
auto const& e = r.second.energy;
return e.rows() == energy.rows() && e.cols() == energy.cols() && e == energy;
});
if (it != hoppings.end()) {
return it->first;
} else {
auto const name = "__anonymous__{}"_format(hoppings.size());
register_hopping_energy(name, energy);
return name;
}
}();
add_hopping(relative_index, from_sub, to_sub, hopping_name);
}
void Lattice::set_offset(Cartesian position) {
if (any_of(translate_coordinates(position).array().abs() > 0.55f)) {
throw std::logic_error("Lattice origin must not be moved by more than "
"half the length of a primitive lattice vector.");
}
offset = position;
}
Lattice::Sublattice const& Lattice::sublattice(std::string const& name) const {
auto const it = sublattices.find(name);
if (it == sublattices.end()) {
throw std::out_of_range("There is no sublattice named '{}'"_format(name));
}
return it->second;
}
Lattice::Sublattice const& Lattice::sublattice(SubID id) const {
using Pair = Sublattices::value_type;
auto const it = std::find_if(sublattices.begin(), sublattices.end(),
[&](Pair const& p) { return p.second.unique_id == id; });
if (it == sublattices.end()) {
throw std::out_of_range("There is no sublattice with ID = {}"_format(id.value()));
}
return it->second;
}
Lattice::HoppingFamily const& Lattice::hopping_family(std::string const& name) const {
auto const it = hoppings.find(name);
if (it == hoppings.end()) {
throw std::out_of_range("There is no hopping named '{}'"_format(name));
}
return it->second;
}
Lattice::HoppingFamily const& Lattice::hopping_family(HopID id) const {
using Pair = Hoppings::value_type;
auto const it = std::find_if(hoppings.begin(), hoppings.end(),
[&](Pair const& p) { return p.second.family_id == id; });
if (it == hoppings.end()) {
throw std::out_of_range("There is no hopping with ID = {}"_format(id.value()));
}
return it->second;
}
int Lattice::max_hoppings() const {
auto result = idx_t{0};
for (auto const& pair : sublattices) {
auto const& sub = pair.second;
// Include hoppings in onsite matrix (-1 for diagonal value which is not a hopping)
auto num_scalar_hoppings = sub.energy.cols() - 1;
// Conjugate term counts rows instead of columns
for_each_term(hoppings, [&](HoppingFamily const& family, HoppingTerm const& term) {
if (term.from == sub.unique_id) { num_scalar_hoppings += family.energy.cols(); }
if (term.to == sub.unique_id) { num_scalar_hoppings += family.energy.rows(); }
});
result = std::max(result, num_scalar_hoppings);
}
return static_cast<int>(result);
}
Cartesian Lattice::calc_position(Index3D index, string_view sublattice_name) const {
auto position = offset;
// Bravais lattice position
for (auto i = 0, size = ndim(); i < size; ++i) {
position += static_cast<float>(index[i]) * vectors[i];
}
if (!sublattice_name.empty()) {
position += sublattice(sublattice_name).position;
}
return position;
}
Vector3f Lattice::translate_coordinates(Cartesian position) const {
auto const size = ndim();
auto const lattice_matrix = [&]{
auto m = ColMajorMatrixX<float>(size, size);
for (auto i = 0; i < size; ++i) {
m.col(i) = vectors[i].head(size);
}
return m;
}();
// Solve `lattice_matrix * v = p`
auto const& p = position.head(size);
auto v = Vector3f(0, 0, 0);
v.head(size) = lattice_matrix.colPivHouseholderQr().solve(p);
return v;
}
Lattice Lattice::with_offset(Cartesian position) const {
auto new_lattice = *this;
new_lattice.set_offset(position);
return new_lattice;
}
Lattice Lattice::with_min_neighbors(int number) const {
auto new_lattice = *this;
new_lattice.min_neighbors = number;
return new_lattice;
}
bool Lattice::has_diagonal_terms() const {
return std::any_of(sublattices.begin(), sublattices.end(), [](Sublattices::const_reference r) {
return !r.second.energy.diagonal().isZero();
});
}
bool Lattice::has_onsite_energy() const {
return std::any_of(sublattices.begin(), sublattices.end(), [](Sublattices::const_reference r) {
return !r.second.energy.isZero();
});
}
OptimizedUnitCell Lattice::optimized_unit_cell() const {
return OptimizedUnitCell(*this);
}
SiteRegistry Lattice::site_registry() const {
auto const num_unique_subs = std::count_if(
sublattices.begin(), sublattices.end(),
[](Sublattices::const_reference r) {
return SubID(r.second.alias_id) == r.second.unique_id;
}
);
auto energies = std::vector<MatrixXcd>(num_unique_subs);
auto names = std::vector<std::string>(num_unique_subs);
for (auto const& pair : sublattices) {
auto const& sub = pair.second;
if (SubID(sub.alias_id) != sub.unique_id) { continue; }
energies.at(sub.unique_id.value()) = sub.energy;
names.at(sub.unique_id.value()) = pair.first;
}
return {energies, names};
}
HoppingRegistry Lattice::hopping_registry() const {
auto energies = std::vector<MatrixXcd>(hoppings.size());
auto names = std::vector<std::string>(hoppings.size());
for (auto const& pair : hoppings) {
auto const& hop = pair.second;
energies.at(hop.family_id.value()) = hop.energy;
names.at(hop.family_id.value()) = pair.first;
}
return {energies, names};
}
SubID Lattice::make_unique_sublattice_id(string_view name) {
if (name.empty()) { throw std::logic_error("Sublattice name can't be blank"); }
if (sublattices.find(name) != sublattices.end()) {
throw std::logic_error("Sublattice '" + name + "' already exists");
}
return SubID(sublattices.size());
}
OptimizedUnitCell::OptimizedUnitCell(Lattice const& lattice) : sites(lattice.nsub()) {
// Populate sites in ascending unique_id order
for (auto const& pair : lattice.get_sublattices()) {
auto const& sub = pair.second;
auto const idx = sub.unique_id.as<size_t>();
sites[idx] = {sub.position, /*norb*/static_cast<storage_idx_t>(sub.energy.cols()),
sub.unique_id, sub.alias_id, /*hoppings*/{}};
}
// Sites with equal `alias_id` will be merged in the final system. Stable sort
// ensures that the ascending unique_id order is preserved within alias groups.
std::stable_sort(sites.begin(), sites.end(), [](Site const& a, Site const& b) {
return a.alias_id < b.alias_id;
});
// Sort by number of orbitals, but make sure alias ordering from the previous step
// is preserved (stable sort). Aliases have the same `norb` so these sites will
// remain as consecutive elements in the final sorted vector.
std::stable_sort(sites.begin(), sites.end(), [](Site const& a, Site const& b) {
return a.norb < b.norb;
});
// Find the index in `sites` of a site with the given unique ID
auto find_index = [&](SubID unique_id) {
auto const it = std::find_if(sites.begin(), sites.end(), [&](Site const& s) {
return s.unique_id == unique_id;
});
assert(it != sites.end());
return static_cast<storage_idx_t>(it - sites.begin());
};
for_each_term(lattice.get_hoppings(), [&](Lattice::HoppingFamily const& hopping_family,
Lattice::HoppingTerm const& term) {
auto const idx1 = find_index(term.from);
auto const idx2 = find_index(term.to);
auto const id = hopping_family.family_id;
// The other sublattice has an opposite relative index (conjugate)
sites[idx1].hoppings.push_back({ term.relative_index, idx2, id, /*is_conjugate*/false});
sites[idx2].hoppings.push_back({-term.relative_index, idx1, id, /*is_conjugate*/true });
});
}
} // namespace cpb
| C++ |
2D | dean0x7d/pybinding | cppcore/src/KPM.cpp | .cpp | 5,880 | 151 | #include "KPM.hpp"
using namespace fmt::literals;
namespace cpb {
KPM::KPM(Model const& model, kpm::Compute const& compute, kpm::Config const& config)
: model(model.eval()), core(kpm::Core(model.hamiltonian(), compute, config)) {}
void KPM::set_model(Model const& new_model) {
model = new_model;
core.set_hamiltonian(model.hamiltonian());
}
std::string KPM::report(bool shortform) const {
return core.report(shortform) + " " + calculation_timer.str();
}
ArrayXcd KPM::moments(idx_t num_moments, VectorXcd const& alpha, VectorXcd const& beta,
SparseMatrixXcd const& op) const {
auto const ham_size = model.system()->hamiltonian_size();
auto const check_size = std::unordered_map<char const*, bool>{
{"alpha", alpha.size() == model.system()->hamiltonian_size()},
{"beta", beta.size() == 0 || beta.size() == model.system()->hamiltonian_size()},
{"operator", op.size() == 0 || (op.rows() == ham_size && op.cols() == ham_size)}
};
for (auto const& pair : check_size) {
if (!pair.second) {
throw std::runtime_error("Size mismatch between the model Hamiltonian and the given "
"argument '{}'"_format(pair.first));
}
}
if (!model.is_complex()) {
auto const check_scalar_type = std::unordered_map<char const*, bool>{
{"alpha", alpha.imag().isZero()},
{"beta", beta.imag().isZero()},
{"operator", Eigen::Map<ArrayXcd const>(op.valuePtr(), op.nonZeros()).imag().isZero()}
};
for (auto const& pair : check_scalar_type) {
if (!pair.second) {
throw std::runtime_error("The model Hamiltonian is real, but the given argument "
"'{}' is complex"_format(pair.first));
}
}
}
calculation_timer.tic();
auto moments = core.moments(num_moments, alpha, beta, op);
calculation_timer.toc();
return moments;
}
ArrayXXdCM KPM::calc_ldos(ArrayXd const& energy, double broadening, Cartesian position,
string_view sublattice, bool reduce) const {
auto const system_index = model.system()->find_nearest(position, sublattice);
auto const ham_idx = model.system()->to_hamiltonian_indices(system_index);
calculation_timer.tic();
auto results = core.ldos({begin(ham_idx), end(ham_idx)}, energy, broadening);
calculation_timer.toc();
return (reduce && results.cols() > 1) ? results.rowwise().sum() : results;
}
ArrayXXdCM KPM::calc_spatial_ldos(ArrayXd const& energy, double broadening, Shape const& shape,
string_view sublattice) const {
if (model.is_multiorbital()) {
throw std::runtime_error("This function doesn't currently support multi-orbital models");
}
auto const& system = *model.system();
calculation_timer.tic();
auto const indices = [&]{
auto const contains = shape.contains(system.positions);
auto const range = system.sublattice_range(sublattice);
auto v = std::vector<idx_t>();
v.reserve(std::count(contains.data() + range.start, contains.data() + range.end, true));
for (auto i = range.start; i < range.end; ++i) {
if (contains[i]) { v.push_back(i); }
}
return v;
}();
auto results = core.ldos(indices, energy, broadening);
calculation_timer.toc();
return results;
}
ArrayXd KPM::calc_dos(ArrayXd const& energy, double broadening, idx_t num_random) const {
calculation_timer.tic();
auto dos = core.dos(energy, broadening, num_random);
calculation_timer.toc();
return dos;
}
ArrayXcd KPM::calc_greens(idx_t row, idx_t col, ArrayXd const& energy, double broadening) const {
auto const size = model.hamiltonian().rows();
if (row < 0 || row > size || col < 0 || col > size) {
throw std::logic_error("KPM::calc_greens(i,j): invalid value for i or j.");
}
calculation_timer.tic();
auto greens_function = core.greens(row, col, energy, broadening);
calculation_timer.toc();
return greens_function;
}
std::vector<ArrayXcd> KPM::calc_greens_vector(idx_t row, std::vector<idx_t> const& cols,
ArrayXd const& energy, double broadening) const {
auto const size = model.hamiltonian().rows();
auto const row_error = row < 0 || row > size;
auto const col_error = std::any_of(cols.begin(), cols.end(),
[&](idx_t col) { return col < 0 || col > size; });
if (row_error || col_error) {
throw std::logic_error("KPM::calc_greens(i,j): invalid value for i or j.");
}
calculation_timer.tic();
auto greens_functions = core.greens_vector(row, cols, energy, broadening);
calculation_timer.toc();
return greens_functions;
}
ArrayXd KPM::calc_conductivity(ArrayXd const& chemical_potential, double broadening,
double temperature, string_view direction, idx_t num_random,
idx_t num_points) const {
auto const xyz = std::string("xyz");
if (direction.size() != 2 || xyz.find_first_of(direction) == std::string::npos) {
throw std::logic_error("Invalid direction: must be 'xx', 'xy', 'zz', or similar.");
}
auto const& system = *model.system();
auto const& p = model.is_multiorbital() ? system.expanded_positions() : system.positions;
auto map = std::unordered_map<char, ArrayXf const*>{{'x', &p.x}, {'y', &p.y}, {'z', &p.z}};
calculation_timer.tic();
auto result = core.conductivity(*map[direction[0]], *map[direction[1]], chemical_potential,
broadening, temperature, num_random, num_points);
calculation_timer.toc();
return result.real();
}
} // namespace cpb
| C++ |
2D | dean0x7d/pybinding | cppcore/src/Model.cpp | .cpp | 6,388 | 218 | #include "Model.hpp"
#include "system/Foundation.hpp"
#include "support/format.hpp"
namespace cpb {
Model::Model(Lattice const& lattice)
: lattice(lattice),
site_registry(lattice.site_registry()),
hopping_registry(lattice.hopping_registry()) {}
void Model::add(Primitive new_primitive) {
primitive = new_primitive;
for (auto i = lattice.ndim(); i < 3; ++i) {
if (primitive.size[i] != 1) {
throw std::logic_error("Primitive shape has more dimensions than the lattice");
}
}
clear_structure();
}
void Model::set_wave_vector(Cartesian const& new_wave_vector) {
if (wave_vector != new_wave_vector) {
wave_vector = new_wave_vector;
clear_hamiltonian();
}
}
void Model::add(Shape const& new_shape) {
shape = new_shape;
lattice.set_offset(lattice.get_offset() + shape.lattice_offset);
clear_structure();
}
void Model::add(TranslationalSymmetry const& translational_symmetry) {
symmetry = translational_symmetry;
clear_structure();
}
void Model::attach_lead(int direction, Shape const& shape) {
if (direction == 0) {
throw std::logic_error("Lead direction must be one of: 1, 2, 3, -1, -2, -3");
} else if (lattice.ndim() == 1) {
throw std::logic_error("Attaching leads to 1D lattices is not supported");
} else if (std::abs(direction) > lattice.ndim()) {
throw std::logic_error(fmt::format("Direction {} is not valid for a {}D lattice",
direction, lattice.ndim()));
}
_leads.add(direction, shape);
clear_structure();
}
void Model::add(SiteStateModifier const& m) {
structure_modifiers.emplace_back(m);
clear_structure();
}
void Model::add(PositionModifier const& m) {
structure_modifiers.emplace_back(m);
clear_structure();
}
void Model::add(OnsiteModifier const& m) {
hamiltonian_modifiers.onsite.push_back(m);
clear_hamiltonian();
}
void Model::add(HoppingModifier const& m) {
hamiltonian_modifiers.hopping.push_back(m);
clear_hamiltonian();
}
void Model::add(SiteGenerator const& g) {
structure_modifiers.emplace_back(g);
site_registry.register_family(g.name, g.energy);
clear_structure();
}
void Model::add(HoppingGenerator const& g) {
structure_modifiers.emplace_back(g);
hopping_registry.register_family(g.name, g.energy);
clear_structure();
}
bool Model::is_multiorbital() const {
return site_registry.has_multiple_orbitals() || hopping_registry.has_multiple_orbitals();
}
bool Model::is_double() const {
return hamiltonian_modifiers.any_double();
}
bool Model::is_complex() const {
return site_registry.any_complex_terms() || hopping_registry.any_complex_terms()
|| hamiltonian_modifiers.any_complex() || symmetry || complex_override;
}
std::shared_ptr<System const> const& Model::system() const {
if (!_system) {
system_build_time.timeit([&]{
_system = make_system();
});
}
return _system;
}
Hamiltonian const& Model::hamiltonian() const {
system();
if (!_hamiltonian) {
hamiltonian_build_time.timeit([&]{
_hamiltonian = make_hamiltonian();
});
}
return _hamiltonian;
}
Leads const& Model::leads() const {
system();
hamiltonian();
_leads.make_hamiltonian(lattice, hamiltonian_modifiers, is_double(), is_complex());
return _leads;
}
Model const& Model::eval() const {
system();
hamiltonian();
leads();
return *this;
}
std::string Model::report() {
auto const num_sites = fmt::with_suffix(static_cast<double>(system()->num_sites()));
auto const nnz = fmt::with_suffix(static_cast<double>(hamiltonian().non_zeros()));
return fmt::format("Built system with {} lattice sites, {}\n"
"The Hamiltonian has {} non-zero values, {}",
num_sites, system_build_time, nnz, hamiltonian_build_time);
}
std::shared_ptr<System> Model::make_system() const {
auto foundation = shape ? Foundation(lattice, shape)
: Foundation(lattice, primitive);
if (symmetry) {
symmetry.apply(foundation);
}
auto const it = std::find_if(structure_modifiers.begin(), structure_modifiers.end(),
[](StructureModifier const& m) { return requires_system(m); });
auto const foundation_modifiers = make_range(structure_modifiers.begin(), it);
auto const system_modifiers = make_range(it, structure_modifiers.end());
for (auto const& modifier : foundation_modifiers) {
apply(modifier, foundation);
}
_leads.create_attachment_area(foundation);
_leads.make_structure(foundation);
auto sys = std::make_shared<System>(site_registry, hopping_registry);
detail::populate_system(*sys, foundation);
if (symmetry) {
detail::populate_boundaries(*sys, foundation, symmetry);
}
for (auto const& modifier : system_modifiers) {
apply(modifier, *sys);
}
detail::remove_invalid(*sys);
if (sys->num_sites() == 0) { throw std::runtime_error{"Impossible system: 0 sites"}; }
return sys;
}
Hamiltonian Model::make_hamiltonian() const {
auto const& built_system = *system();
auto const& modifiers = hamiltonian_modifiers;
auto const& k = wave_vector;
auto const simple_build = std::none_of(
structure_modifiers.begin(), structure_modifiers.end(),
[](StructureModifier const& m) { return is_generator(m); }
);
if (!is_complex()) {
try {
if (!is_double()) {
return ham::make<float>(built_system, lattice, modifiers, k, simple_build);
} else {
return ham::make<double>(built_system, lattice, modifiers, k, simple_build);
}
} catch (ComplexOverride const&) {
complex_override = true;
}
}
if (!is_double()) {
return ham::make<std::complex<float>>(built_system, lattice, modifiers, k, simple_build);
} else {
return ham::make<std::complex<double>>(built_system, lattice, modifiers, k, simple_build);
}
}
void Model::clear_structure() {
_system.reset();
_leads.clear_structure();
clear_hamiltonian();
}
void Model::clear_hamiltonian() {
_hamiltonian.reset();
_leads.clear_hamiltonian();
}
} // namespace cpb
| C++ |
2D | dean0x7d/pybinding | cppcore/src/utils/Chrono.cpp | .cpp | 1,583 | 43 | #include "utils/Chrono.hpp"
#include "support/format.hpp"
using namespace std::chrono;
using namespace cpb;
std::string Chrono::str() const {
auto ret = std::string{};
if (elapsed < milliseconds{1}) {
ret = fmt::format("{:.2f}ms", duration_cast<duration<float, std::milli>>(elapsed).count());
} else if (elapsed < milliseconds{10}) {
ret = fmt::format("{:.1f}ms", duration_cast<duration<float, std::milli>>(elapsed).count());
} else if (elapsed < milliseconds{100}) {
ret = fmt::format("{}ms", duration_cast<milliseconds>(elapsed).count());
} else if (elapsed < seconds{10}) {
ret = fmt::format("{:.2f}s", duration_cast<duration<float>>(elapsed).count());
} else if (elapsed < seconds{20}) {
ret = fmt::format("{:.1f}s", duration_cast<duration<float>>(elapsed).count());
} else if (elapsed < seconds{60}) {
ret = fmt::format("{}s", duration_cast<seconds>(elapsed).count());
} else { // elapsed >= minutes{1}
auto const min = duration_cast<minutes>(elapsed);
auto const sec = duration_cast<seconds>(elapsed) - min;
if (min < minutes{60}) {
ret = fmt::format("{}:{:02}", min.count(), sec.count());
} else { // elapsed >= hours{1}
auto const hr = duration_cast<hours>(min);
ret = fmt::format("{}:{:02}:{:02}", hr.count(), (min - hr).count(), sec.count());
}
}
return ret;
}
Chrono& Chrono::print(std::string msg) {
if (!msg.empty())
msg += ": ";
fmt::print("{}{}\n", msg, str());
return *this;
}
| C++ |
2D | dean0x7d/pybinding | cppcore/src/system/StructureModifiers.cpp | .cpp | 2,160 | 70 | #include "system/StructureModifiers.hpp"
#include "system/Foundation.hpp"
#include "system/System.hpp"
namespace cpb {
void apply(SiteStateModifier const& m, Foundation& f) {
for (auto const& pair : f.get_lattice().get_sublattices()) {
auto slice = f[pair.second.unique_id];
m.apply(slice.get_states(), slice.get_positions(), pair.first);
}
if (m.min_neighbors > 0) {
remove_dangling(f, m.min_neighbors);
}
}
void apply(SiteStateModifier const& m, System& s) {
if (s.is_valid.size() == 0) {
s.is_valid = ArrayX<bool>::Constant(s.num_sites(), true);
}
for (auto const& sub : s.compressed_sublattices) {
m.apply(s.is_valid.segment(sub.sys_start(), sub.num_sites()),
s.positions.segment(sub.sys_start(), sub.num_sites()),
s.site_registry.name(sub.id()));
}
if (m.min_neighbors > 0) {
throw std::runtime_error("Eliminating dangling bonds after a generator "
"has not been implemented yet");
}
}
void apply(PositionModifier const& m, Foundation& f) {
for (auto const& pair : f.get_lattice().get_sublattices()) {
auto slice = f[pair.second.unique_id];
m.apply(slice.get_positions(), pair.first);
}
}
void apply(PositionModifier const& m, System& s) {
for (auto const& sub : s.compressed_sublattices) {
m.apply(s.positions.segment(sub.sys_start(), sub.num_sites()),
s.site_registry.name(sub.id()));
}
}
void apply(SiteGenerator const& g, System& s) {
detail::remove_invalid(s);
auto const new_positions = g.make(s);
auto const norb = g.energy.rows();
auto const nsites = new_positions.size();
s.compressed_sublattices.add(s.site_registry.id(g.name), norb, nsites);
s.hopping_blocks.add_sites(nsites);
s.positions = concat(s.positions, new_positions);
}
void apply(HoppingGenerator const& g, System& s) {
detail::remove_invalid(s);
auto pairs = g.make(s);
s.hopping_blocks.append(s.hopping_registry.id(g.name),
std::move(pairs.from), std::move(pairs.to));
}
} // namespace cpb
| C++ |
2D | dean0x7d/pybinding | cppcore/src/system/Registry.cpp | .cpp | 4,282 | 145 | #include "system/Registry.hpp"
#include "support/format.hpp"
using namespace fmt::literals;
namespace cpb {
namespace {
void check_energy(SiteRegistry const&, MatrixXcd const& energy) {
detail::check_onsite_energy(energy);
}
void check_energy(HoppingRegistry const&, MatrixXcd const& energy) {
detail::check_hopping_energy(energy);
}
template<class ID>
constexpr char const* kind() { return std::is_same<ID, SiteID>() ? "Site" : "Hopping"; }
} // anonymous namespace
template<class ID>
Registry<ID>::Registry(std::vector<MatrixXcd> energies, std::vector<std::string> names)
: energies(std::move(energies)), names(std::move(names)) {
assert(energies.size() == names.size());
}
template<class ID>
void Registry<ID>::register_family(std::string const& name, MatrixXcd const& energy) {
if (name.empty()) {
throw std::logic_error("{} family name can't be blank"_format(kind<ID>()));
}
check_energy(*this, energy);
auto const not_unique = std::find(names.begin(), names.end(), name) != names.end();
if (not_unique) {
throw std::logic_error("{} family '{}' already exists"_format(kind<ID>(), name));
}
names.push_back(name);
energies.push_back(energy);
}
template<class ID>
NameMap Registry<ID>::name_map() const {
auto map = NameMap();
for (auto i = size_t{0}; i < names.size(); ++i) {
map[names[i]] = static_cast<storage_idx_t>(i);
}
return map;
}
template<class ID>
string_view Registry<ID>::name(ID id) const {
auto const index = id.value();
if (index >= size()) {
throw std::out_of_range("There is no {} with ID = {}"_format(kind<ID>(), index));
}
return names[index];
}
template<class ID>
MatrixXcd const& Registry<ID>::energy(ID id) const {
auto const index = id.value();
if (index >= size()) {
throw std::out_of_range("There is no {} with ID = {}"_format(kind<ID>(), index));
}
return energies[index];
}
template<class ID>
ID Registry<ID>::id(string_view name) const {
auto const it = std::find(names.begin(), names.end(), name);
if (it == names.end()) {
throw std::out_of_range("There is no {} named '{}'"_format(kind<ID>(), name));
}
return ID(std::distance(names.begin(), it));
}
template<class ID>
bool Registry<ID>::has_nonzero_energy() const {
return std::any_of(energies.begin(), energies.end(), [](MatrixXcd const& energy) {
return !energy.isZero();
});
}
template<class ID>
bool Registry<ID>::any_complex_terms() const {
return std::any_of(energies.begin(), energies.end(), [](MatrixXcd const& energy) {
return !energy.imag().isZero();
});
}
template<class ID>
bool Registry<ID>::has_multiple_orbitals() const {
return std::any_of(energies.begin(), energies.end(), [](MatrixXcd const& energy) {
return energy.size() != 1;
});
}
template class Registry<SiteID>;
template class Registry<HopID>;
namespace detail {
void check_onsite_energy(MatrixXcd const& energy) {
if (energy.rows() != energy.cols()) {
throw std::logic_error("The onsite hopping term must be a real vector or a square matrix");
}
if (energy.rows() == 0) {
throw std::logic_error("The onsite hopping term can't be zero-dimensional");
}
if (!energy.diagonal().imag().isZero()) {
throw std::logic_error("The main diagonal of the onsite hopping term must be real");
}
if (!energy.isUpperTriangular() && energy != energy.adjoint()) {
throw std::logic_error("The onsite hopping matrix must be upper triangular or Hermitian");
}
}
MatrixXcd canonical_onsite_energy(std::complex<double> energy) {
return MatrixXcd::Constant(1, 1, energy);
}
MatrixXcd canonical_onsite_energy(VectorXd const& energy) {
auto const size = energy.size();
auto result = MatrixXcd::Zero(size, size).eval();
result.diagonal() = energy.cast<std::complex<double>>();
return result;
}
void check_hopping_energy(MatrixXcd const& energy) {
if (energy.rows() == 0 || energy.cols() == 0) {
throw std::logic_error("Hoppings can't be zero-dimensional");
}
}
MatrixXcd canonical_hopping_energy(std::complex<double> energy) {
return MatrixXcd::Constant(1, 1, energy);
}
} // namespace detail
} // namespace cpb
| C++ |
2D | dean0x7d/pybinding | cppcore/src/system/Symmetry.cpp | .cpp | 3,939 | 124 | #include "system/Symmetry.hpp"
#include "system/Foundation.hpp"
namespace cpb {
TranslationalSymmetry::TranslationalSymmetry(float a1, float a2, float a3)
: length(a1, a2, a3), enabled_directions(a1 >= 0, a2 >= 0, a3 >= 0) {}
bool SymmetryArea::contains(Index3D const& index) const {
return all_of(left.array() <= index.array()) && all_of(index.array() <= right.array());
}
SymmetryArea TranslationalSymmetry::area(Foundation const& foundation) const {
auto const& lattice = foundation.get_lattice();
auto const size = static_cast<Array3i>(foundation.get_spatial_size());
SymmetryArea a;
a.left.setZero();
a.right = size - 1;
a.middle.setZero();
// see if we have periodicities in any of the lattice vector directions
for (auto i = 0; i < lattice.ndim(); ++i) {
if (!enabled_directions[i]) {
continue;
}
// number of lattice sites in one period length
auto const num_sites = [&]{
auto const n = static_cast<int>(std::round(length[i] / lattice.vector(i).norm()));
return (n > 0) ? n : 1;
}();
// left and right borders of the periodic cell
a.left[i] = (size[i] - num_sites) / 2;
a.right[i] = a.left[i] + num_sites - 1;
// length of the periodic cell
a.middle[i] = num_sites;
}
return a;
}
std::vector<Translation> TranslationalSymmetry::translations(Foundation const& foundation) const {
auto const& lattice = foundation.get_lattice();
auto const symmetry_area = area(foundation);
std::vector<Translation> translations;
auto add_translation = [&](Index3D direction) {
if (direction == Index3D::Zero()) {
return; // not a valid translation
}
// check if the direction already exists
for (auto const& t : translations) {
if (t.direction == direction) {
return;
}
}
auto boundary_slice = SliceIndex3D();
for (auto n = 0, size = lattice.ndim(); n < size; ++n) {
if (direction[n] > 0)
boundary_slice[n] = symmetry_area.left[n];
else if (direction[n] < 0)
boundary_slice[n] = symmetry_area.right[n];
}
auto const shift_index = direction.cwiseProduct(symmetry_area.middle);
auto shift_length = Cartesian{0, 0, 0};
for (auto n = 0, size = lattice.ndim(); n < size; ++n) {
auto const shift = static_cast<float>(direction[n] * symmetry_area.middle[n]);
shift_length += shift * lattice.vector(n);
}
translations.push_back({direction, boundary_slice, shift_index, shift_length});
};
auto const masks = detail::make_masks(enabled_directions, lattice.ndim());
for (auto const& sublattice : lattice.optimized_unit_cell()) {
for (auto const& hopping : sublattice.hoppings) {
for (auto const& mask : masks) {
add_translation(hopping.relative_index.cwiseProduct(mask));
}
}
}
return translations;
}
void TranslationalSymmetry::apply(Foundation& foundation) const {
auto symmetry_area = area(foundation);
for (auto& site : foundation) {
site.set_valid(site.is_valid() && symmetry_area.contains(site.get_spatial_idx()));
}
}
namespace detail {
std::vector<Index3D> make_masks(Vector3b enabled_directions, int ndim) {
auto const dirs = [&]{
auto d = Index3D{enabled_directions.cast<int>()};
for (auto i = ndim; i < d.size(); ++i) {
d[i] = 0;
}
return d;
}();
auto masks = std::vector<Index3D>();
for (auto i = 0; i <= dirs[0]; ++i) {
for (auto j = 0; j <= dirs[1]; ++j) {
for (auto k = 0; k <= dirs[2]; ++k) {
masks.push_back({i, j, k});
}
}
}
return masks;
}
} // namespace detail
} // namespace cpb
| C++ |
2D | dean0x7d/pybinding | cppcore/src/system/Shape.cpp | .cpp | 4,293 | 118 | #include "system/Shape.hpp"
namespace cpb {
Primitive::Primitive(int a1, int a2, int a3) : size(a1, a2, a3) {
if (any_of(size.array() <= 0)) {
throw std::logic_error("Primitive: The size must be at least 1 in every direction.");
}
}
Shape::Shape(Vertices const& vertices, Contains const& contains)
: vertices(vertices), contains(contains) {
if (vertices.size() < 2)
throw std::logic_error("Shape: The bounding box must contain at least two vertices.");
}
Line::Line(Cartesian a, Cartesian b) : Shape({a, b}) {
contains = [a, b](CartesianArrayConstRef positions) -> ArrayX<bool> {
// Return `true` for all `positions` which are in the perpendicular space
// between the two end points of the line
return detail::is_acute_angle(a, b, positions)
&& detail::is_acute_angle(b, a, positions);
};
}
namespace detail {
ArrayX<bool> is_acute_angle(Cartesian a, Cartesian b, CartesianArrayConstRef c) {
// Vectors BA and BC which make the angle
auto const ba = Cartesian{a - b};
auto const bc_x = ArrayXf{c.x() - b.x()};
auto const bc_y = ArrayXf{c.y() - b.y()};
auto const bc_z = ArrayXf{c.z() - b.z()};
// Compute the cosine between the two vectors based on the dot product
auto const ba_dot_bc = ba.x() * bc_x + ba.y() * bc_y + ba.z() * bc_z;
auto const ba_length = ba.norm();
auto const bc_length = sqrt(bc_x.cwiseAbs2() + bc_y.cwiseAbs2() + bc_z.cwiseAbs2());
auto const cos_theta = ba_dot_bc / (ba_length * bc_length);
return cos_theta >= 0; // acute angle
};
WithinPolygon::WithinPolygon(Shape::Vertices const& vertices)
: x(vertices.size()), y(vertices.size()) {
for (auto i = size_t{0}, size = vertices.size(); i < size; ++i) {
x[i] = vertices[i].x();
y[i] = vertices[i].y();
}
}
ArrayX<bool> WithinPolygon::operator()(CartesianArrayConstRef positions) const {
// Raycasting algorithm checks if `positions` are inside this polygon
ArrayX<bool> is_within = ArrayX<bool>::Constant(positions.size(), false);
// Loop over all the sides of the polygon (neighbouring vertices)
auto const num_vertices = static_cast<int>(x.size());
for (auto i = 0, j = num_vertices - 1; i < num_vertices; j = i++) {
// Aliases for readability
auto const& x1 = x[i]; auto const& x2 = x[j];
auto const& y1 = y[i]; auto const& y2 = y[j];
// Check if ray is parallel to this side of the polygon
if (num::approx_equal(y1, y2)) {
continue; // avoid division by zero in the next step
}
// The slope of this side
auto const k = (x2 - x1) / (y2 - y1);
// Shoot the ray along the x direction and see if it passes between `y1` and `y2`
auto intersects_y = (y1 > positions.y()) != (y2 > positions.y());
// The ray is moving from left to right and may cross a side of the polygon
auto x_side = k * (positions.y() - y1) + x1;
auto intersects_x = positions.x() > x_side;
// Eigen doesn't support `operator!`, so this will have to do...
auto negate = is_within.select(
ArrayX<bool>::Constant(is_within.size(), false),
ArrayX<bool>::Constant(is_within.size(), true)
);
// Flip the states which intersect the side
is_within = (intersects_y && intersects_x).select(negate, is_within);
}
return is_within;
}
} // namespace detail
Polygon::Polygon(Vertices const& vertices)
: Shape(vertices, detail::WithinPolygon(vertices)) {}
namespace {
Shape::Vertices make_freeformshape_vertices(Cartesian width, Cartesian center) {
auto const v1 = static_cast<Cartesian>(center - 0.5f * width);
auto const v2 = static_cast<Cartesian>(center + 0.5f * width);
return {
{v1.x(), v1.y(), v1.z()},
{v2.x(), v1.y(), v1.z()},
{v1.x(), v2.y(), v1.z()},
{v2.x(), v2.y(), v1.z()},
{v1.x(), v1.y(), v2.z()},
{v2.x(), v1.y(), v2.z()},
{v1.x(), v2.y(), v2.z()},
{v2.x(), v2.y(), v2.z()}
};
}
} // anonymous namespace
FreeformShape::FreeformShape(Contains const& contains, Cartesian width, Cartesian center)
: Shape(make_freeformshape_vertices(width, center), contains) {}
} // namespace cpb
| C++ |
2D | dean0x7d/pybinding | cppcore/src/system/HoppingBlocks.cpp | .cpp | 3,311 | 108 | #include "system/HoppingBlocks.hpp"
namespace cpb {
HoppingBlocks::HoppingBlocks(idx_t num_sites, SerializedBlocks const& data, NameMap name_map)
: num_sites(num_sites), name_map(std::move(name_map)) {
blocks.reserve(data.size());
for (auto const& pair : data) {
auto const size = pair.first.size();
auto block = std::vector<COO>(static_cast<size_t>(size));
for (auto i = 0; i < size; ++i) {
block[i].row = pair.first[i];
block[i].col = pair.second[i];
}
blocks.push_back(std::move(block));
}
}
HoppingBlocks::SerializedBlocks HoppingBlocks::get_serialized_blocks() const {
auto data = SerializedBlocks();
for (auto const& block : *this) {
auto row = ArrayXi(block.size());
auto col = ArrayXi(block.size());
auto n = 0;
for (auto const& coo : block.coordinates()) {
row[n] = coo.row;
col[n] = coo.col;
++n;
}
data.emplace_back(std::move(row), std::move(col));
}
return data;
}
idx_t HoppingBlocks::nnz() const {
return std::accumulate(blocks.begin(), blocks.end(), idx_t{0}, [](idx_t n, Block const& b) {
return n + static_cast<idx_t>(b.size());
});
}
ArrayXi HoppingBlocks::count_neighbors() const {
auto counts = ArrayXi::Zero(num_sites).eval();
for (auto const& block : blocks) {
for (auto const& coo : block) {
counts[coo.row] += 1;
counts[coo.col] += 1;
}
}
return counts;
}
void HoppingBlocks::reserve(ArrayXi const& counts) {
assert(counts.size() <= static_cast<idx_t>(blocks.size()));
for (auto i = idx_t{0}; i < counts.size(); ++i) {
blocks[i].reserve(counts[i]);
}
}
void HoppingBlocks::append(HopID family_id, ArrayXi&& rows, ArrayXi&& cols) {
if (rows.size() != cols.size()) {
throw std::runtime_error("When generating hoppings, the number of "
"`from` and `to` indices must be equal");
}
auto& block = blocks[family_id.as<size_t>()];
block.reserve(block.size() + rows.size());
for (auto i = 0; i < rows.size(); ++i) {
auto m = rows[i];
auto n = cols[i];
if (m > n) { std::swap(m, n); } // upper triangular format
block.emplace_back(m, n);
}
// Maintain upper triangular format
std::sort(block.begin(), block.end());
block.erase(std::unique(block.begin(), block.end()), block.end());
}
void HoppingBlocks::filter(VectorX<bool> const& keep) {
using std::begin; using std::end;
num_sites = std::accumulate(begin(keep), end(keep), idx_t{0});
for (auto& block : blocks) {
block.erase(std::remove_if(block.begin(), block.end(), [&](COO coo) {
return !keep[coo.row] || !keep[coo.col];
}), block.end());
}
}
void HoppingBlocks::add_sites(idx_t num_new_sites) {
num_sites += num_new_sites;
}
HoppingCSR HoppingBlocks::tocsr() const {
auto csr = HoppingCSR(num_sites, num_sites);
csr.reserve(nnz());
for (auto const& block : *this) {
for (auto const& coo : block.coordinates()) {
csr.insert(coo.row, coo.col) = block.family_id().value();
}
}
csr.makeCompressed();
return csr.markAsRValue();
}
} // namespace cpb
| C++ |
2D | dean0x7d/pybinding | cppcore/src/system/CompressedSublattices.cpp | .cpp | 3,682 | 106 | #include "system/CompressedSublattices.hpp"
namespace cpb {
CompressedSublattices::CompressedSublattices(ArrayXi const& alias_ids, ArrayXi const& site_counts,
ArrayXi const& orbital_counts)
: data(alias_ids.size()) {
for (auto i = size_t{0}; i < data.size(); ++i) {
data[i].id = SiteID{alias_ids[i]};
data[i].num_sites = site_counts[i];
data[i].num_orbitals = orbital_counts[i];
}
}
void CompressedSublattices::add(SiteID id, idx_t norb, idx_t count) {
if (data.empty() || data.back().id != id) {
data.push_back({id, static_cast<storage_idx_t>(count), static_cast<storage_idx_t>(norb)});
} else {
data.back().num_sites += static_cast<storage_idx_t>(count);
}
}
void CompressedSublattices::filter(VectorX<bool> const& keep) {
using std::begin;
auto new_counts = std::vector<storage_idx_t>();
new_counts.reserve(data.size());
for (auto const& sub : *this) {
new_counts.push_back(std::accumulate(begin(keep) + sub.sys_start(),
begin(keep) + sub.sys_end(), storage_idx_t{0}));
}
for (auto i = size_t{0}; i < data.size(); ++i) {
data[i].num_sites = new_counts[i];
}
}
void CompressedSublattices::verify(idx_t num_sites) const {
using std::begin; using std::end;
auto const alias_ids_are_unique = [&]{
// alias_ids: [1, 0, 2] --> OK
// [1, 0, 2, 1] --> Bad, repeating ID
auto ids = alias_ids();
std::sort(begin(ids), end(ids));
auto const unique_size = std::unique(begin(ids), end(ids)) - begin(ids);
return static_cast<size_t>(unique_size) == data.size();
}();
auto const is_sorted_by_orb_count = [&]{
auto const norb = orbital_counts();
return std::is_sorted(begin(norb), end(norb));
}();
if (decompressed_size() != num_sites || !alias_ids_are_unique || !is_sorted_by_orb_count) {
throw std::runtime_error("CompressedSublatticeIDs: this should never happen");
}
}
idx_t CompressedSublattices::start_index(idx_t num_orbitals) const {
for (auto const& sub : *this) {
if (sub.num_orbitals() == num_orbitals) {
return sub.sys_start();
}
}
throw std::runtime_error("CompressedSublattices::start_index(): invalid num_orbitals");
}
idx_t CompressedSublattices::decompressed_size() const {
return std::accumulate(data.begin(), data.end(), idx_t{0}, [](idx_t n, Element const& v) {
return n + v.num_sites;
});
}
ArrayX<storage_idx_t> CompressedSublattices::decompressed() const {
auto sublattices = ArrayX<storage_idx_t>(decompressed_size());
for (auto const& sub : *this) {
sublattices.segment(sub.sys_start(), sub.num_sites()).setConstant(sub.id().value());
}
return sublattices;
}
ArrayXi CompressedSublattices::alias_ids() const {
auto result = ArrayXi(static_cast<idx_t>(data.size()));
std::transform(data.begin(), data.end(), result.data(),
[](Element const& v) { return v.id.value(); });
return result;
}
ArrayXi CompressedSublattices::site_counts() const {
auto result = ArrayXi(static_cast<idx_t>(data.size()));
std::transform(data.begin(), data.end(), result.data(),
[](Element const& v) { return v.num_sites; });
return result;
}
ArrayXi CompressedSublattices::orbital_counts() const {
auto result = ArrayXi(static_cast<idx_t>(data.size()));
std::transform(data.begin(), data.end(), result.data(),
[](Element const& v) { return v.num_orbitals; });
return result;
}
} // namespace cpb
| C++ |
2D | dean0x7d/pybinding | cppcore/src/system/System.cpp | .cpp | 6,450 | 178 | #include "system/System.hpp"
#include "system/Foundation.hpp"
#include "system/Symmetry.hpp"
namespace cpb {
idx_t System::hamiltonian_size() const {
auto result = idx_t{0};
for (auto const& sub : compressed_sublattices) {
result += sub.ham_size();
}
return result;
}
idx_t System::hamiltonian_nnz() const {
auto const onsite_nnz = std::accumulate(
compressed_sublattices.begin(), compressed_sublattices.end(), idx_t{0},
[](idx_t n, CompressedSublattices::It const& sub) {
return n + sub.num_sites() * sub.num_orbitals() * sub.num_orbitals();
}
);
auto const hopping_nnz = std::accumulate(
hopping_blocks.begin(), hopping_blocks.end(), idx_t{0},
[&](idx_t n, HoppingBlocks::Iterator const& block) {
auto const term_size = hopping_registry.energy(block.family_id()).size();
return n + static_cast<idx_t>(block.size()) * term_size;
}
);
return onsite_nnz + 2 * hopping_nnz;
}
ArrayXi System::to_hamiltonian_indices(idx_t system_index) const {
for (auto const& sub : compressed_sublattices) {
if (sub.sys_start() <= system_index && system_index < sub.sys_end()) {
auto const norb = sub.num_orbitals();
auto const offset = (system_index - sub.sys_start()) * norb;
auto const idx = static_cast<storage_idx_t>(sub.ham_start() + offset);
auto ret = ArrayXi(norb);
for (auto i = 0; i < norb; ++i) {
ret[i] = idx + i;
}
return ret;
}
}
throw std::runtime_error("to_hamiltonian_indices: this should never happen");
}
Range System::sublattice_range(string_view sublattice) const {
if (sublattice.empty()) {
return {0, num_sites()};
} else {
// Only check sites belonging to the target sublattice
auto const target_id = site_registry.id(sublattice);
auto const it = std::find_if(
compressed_sublattices.begin(), compressed_sublattices.end(),
[&](CompressedSublattices::It const& sub) { return sub.id() == target_id; }
);
if (it == compressed_sublattices.end()) {
throw std::runtime_error("System::sublattice_range() This should never happen");
}
return {it->sys_start(), it->sys_end()};
}
}
idx_t System::find_nearest(Cartesian target_position, string_view sublattice_name) const {
auto const range = sublattice_range(sublattice_name);
auto nearest_index = range.start;
auto min_distance = (positions[range.start] - target_position).norm();
for (auto i = range.start + 1; i < range.end; ++i) {
auto const distance = (positions[i] - target_position).norm();
if (distance < min_distance) {
min_distance = distance;
nearest_index = i;
}
}
return nearest_index;
}
CartesianArray System::expanded_positions() const {
auto ep = CartesianArray(hamiltonian_size());
for (auto const& sub : compressed_sublattices) {
auto const norb = sub.num_orbitals();
auto n = sub.ham_start();
for (auto i = sub.sys_start(); i < sub.sys_end(); ++i) {
ep.x.segment(n, norb).setConstant(positions.x[i]);
ep.y.segment(n, norb).setConstant(positions.y[i]);
ep.z.segment(n, norb).setConstant(positions.z[i]);
n += norb;
}
}
return ep;
}
namespace detail {
void populate_system(System& system, Foundation const& foundation) {
auto const& finalized_indices = foundation.get_finalized_indices();
auto const size = finalized_indices.size();
system.positions.resize(size);
system.hopping_blocks = {size, system.hopping_registry.name_map()};
system.hopping_blocks.reserve(finalized_indices.max_hoppings_per_family());
for (auto const& site : foundation) {
auto const index = finalized_indices[site];
if (index < 0) { continue; } // invalid site
system.positions[index] = site.get_position();
system.compressed_sublattices.add(SiteID{site.get_alias_id()}, site.get_norb());
site.for_each_neighbor([&](Site neighbor, Hopping hopping) {
auto const neighbor_index = finalized_indices[neighbor];
if (neighbor_index < 0) { return; } // invalid neighbor
if (!hopping.is_conjugate) { // only make half the matrix, other half is the conjugate
system.hopping_blocks.add(hopping.family_id, index, neighbor_index);
}
});
}
system.compressed_sublattices.verify(size);
}
void populate_boundaries(System& system, Foundation const& foundation,
TranslationalSymmetry const& symmetry) {
auto const& finalized_indices = foundation.get_finalized_indices();
auto const size = finalized_indices.size();
for (const auto& translation : symmetry.translations(foundation)) {
auto boundary = System::Boundary();
boundary.shift = translation.shift_lenght;
boundary.hopping_blocks = {size, system.hopping_registry.name_map()};
for (auto const& site : foundation[translation.boundary_slice]) {
auto const index = finalized_indices[site];
if (index < 0) { continue; }
// The site is shifted to the opposite edge of the translation unit
auto const shifted_site = site.shifted(translation.shift_index);
shifted_site.for_each_neighbor([&](Site neighbor, Hopping hopping) {
auto const neighbor_index = finalized_indices[neighbor];
if (neighbor_index < 0) { return; }
if (!hopping.is_conjugate) {
boundary.hopping_blocks.add(hopping.family_id, index, neighbor_index);
}
});
}
if (boundary.hopping_blocks.nnz() > 0) {
system.boundaries.push_back(std::move(boundary));
}
}
}
void remove_invalid(System& s) {
if (s.is_valid.size() == 0) { return; }
s.positions.x = slice(s.positions.x, s.is_valid);
s.positions.y = slice(s.positions.y, s.is_valid);
s.positions.z = slice(s.positions.z, s.is_valid);
s.compressed_sublattices.filter(s.is_valid);
s.hopping_blocks.filter(s.is_valid);
for (auto& b : s.boundaries) {
b.hopping_blocks.filter(s.is_valid);
}
s.is_valid.resize(0);
}
} // namespace detail
} // namespace cpb
| C++ |
2D | dean0x7d/pybinding | cppcore/src/system/Foundation.cpp | .cpp | 6,401 | 167 | #include "system/Foundation.hpp"
#include "system/Shape.hpp"
namespace cpb { namespace detail {
std::pair<Index3D, Index3D> find_bounds(Shape const& shape, Lattice const& lattice) {
Array3i lower_bound = Array3i::Constant(std::numeric_limits<int>::max());
Array3i upper_bound = Array3i::Constant(std::numeric_limits<int>::min());
for (auto const& point : shape.vertices) {
// Translate Cartesian coordinates `point` into lattice vector coordinates `v`
Array3i const v = lattice.translate_coordinates(point).cast<int>();
lower_bound = (v < lower_bound).select(v, lower_bound);
upper_bound = (v > upper_bound).select(v, upper_bound);
}
// Add +/- 1 padding to compensate for `cast<int>()` truncation
auto const ndim = lattice.ndim();
lower_bound.head(ndim) -= 1;
upper_bound.head(ndim) += 1;
return {lower_bound, upper_bound};
}
CartesianArray generate_positions(Cartesian origin, Index3D size, Lattice const& lattice) {
// The nested loops look messy, but it's the fastest way to calculate all the positions
// because the intermediate a, b, c positions are reused.
auto const nsub = lattice.nsub();
auto const num_sites = size.prod() * nsub;
auto const unit_cell = lattice.optimized_unit_cell();
auto positions = CartesianArray(num_sites);
auto idx = 0;
for (auto n = 0; n < nsub; ++n) {
Cartesian ps = origin + unit_cell[n].position;
for (auto c = 0; c < size[2]; ++c) {
Cartesian pc = (c == 0) ? ps : ps + static_cast<float>(c) * lattice.vector(2);
for (auto b = 0; b < size[1]; ++b) {
Cartesian pb = (b == 0) ? pc : pc + static_cast<float>(b) * lattice.vector(1);
for (auto a = 0; a < size[0]; ++a) {
Cartesian pa = pb + static_cast<float>(a) * lattice.vector(0);
positions[idx++] = pa;
} // a
} // b
} // c
} // n
return positions;
}
ArrayXi count_neighbors(Foundation const& foundation) {
ArrayXi neighbor_count(foundation.size());
auto const& unit_cell = foundation.get_optimized_unit_cell();
auto const spatial_size = foundation.get_spatial_size().array();
for (auto const& site : foundation) {
auto const& sublattice = unit_cell[site.get_sub_idx()];
auto num_neighbors = static_cast<storage_idx_t>(sublattice.hoppings.size());
// Reduce the neighbor count for sites on the edges
for (auto const& hopping : sublattice.hoppings) {
auto const index = Array3i(site.get_spatial_idx() + hopping.relative_index);
if ((index < 0).any() || (index >= spatial_size).any()) {
num_neighbors -= 1;
}
}
neighbor_count[site.get_flat_idx()] = num_neighbors;
}
return neighbor_count;
}
void clear_neighbors(Site& site, ArrayXi& neighbor_count, int min_neighbors) {
if (neighbor_count[site.get_flat_idx()] == 0) { return; }
site.for_each_neighbor([&](Site neighbor, Hopping) {
if (!neighbor.is_valid()) { return; }
auto const neighbor_idx = neighbor.get_flat_idx();
neighbor_count[neighbor_idx] -= 1;
if (neighbor_count[neighbor_idx] < min_neighbors) {
neighbor.set_valid(false);
// recursive call... but it will not be very deep
clear_neighbors(neighbor, neighbor_count, min_neighbors);
}
});
neighbor_count[site.get_flat_idx()] = 0;
}
} // namespace detail
void remove_dangling(Foundation& foundation, int min_neighbors) {
auto neighbor_count = detail::count_neighbors(foundation);
for (auto& site : foundation) {
if (!site.is_valid()) {
detail::clear_neighbors(site, neighbor_count, min_neighbors);
}
}
}
FinalizedIndices::FinalizedIndices(ArrayXi i, ArrayXi h, idx_t n)
: indices(std::move(i)), hopping_counts(std::move(h)), total_valid_sites(n) {}
Foundation::Foundation(Lattice const& lattice, Primitive const& primitive)
: lattice(lattice),
unit_cell(lattice.optimized_unit_cell()),
bounds(-primitive.size.array() / 2, (primitive.size.array() - 1) / 2),
spatial_size(primitive.size),
sub_size(lattice.nsub()),
positions(detail::generate_positions(lattice.calc_position(bounds.first), spatial_size, lattice)),
is_valid(ArrayX<bool>::Constant(size(), true)) {}
Foundation::Foundation(Lattice const& lattice, Shape const& shape)
: lattice(lattice),
unit_cell(lattice.optimized_unit_cell()),
bounds(detail::find_bounds(shape, lattice)),
spatial_size((bounds.second - bounds.first) + Index3D::Ones()),
sub_size(lattice.nsub()),
positions(detail::generate_positions(lattice.calc_position(bounds.first), spatial_size, lattice)),
is_valid(shape.contains(positions)) {
remove_dangling(*this, lattice.get_min_neighbors());
}
FinalizedIndices const& Foundation::get_finalized_indices() const {
if (finalized_indices) {
return finalized_indices;
}
auto indices = ArrayXi::Constant(size(), -1).eval();
auto hopping_counts = ArrayXi::Zero(lattice.nhop()).eval();
auto total_valid_sites = storage_idx_t{0};
// Each sublattice block has the same initial number of sites (block_size),
// but the number of final valid sites may differ.
auto const block_size = spatial_size.prod();
for (auto n = 0; n < sub_size; ++n) {
auto valid_sites_for_this_sublattice = 0;
// Assign final indices to all valid sites
for (auto i = n * block_size; i < (n + 1) * block_size; ++i) {
if (is_valid[i]) {
indices[i] = total_valid_sites;
++total_valid_sites;
++valid_sites_for_this_sublattice;
}
}
// Count the number of non-conjugate hoppings per family ID. This is
// overestimated, i.e. it includes some invalid hoppings, but it's a
// good quick estimate for memory reservation.
for (auto const& hop : unit_cell[n].hoppings) {
if (!hop.is_conjugate) {
hopping_counts[hop.family_id.value()] += valid_sites_for_this_sublattice;
}
}
}
finalized_indices = {std::move(indices), std::move(hopping_counts), total_valid_sites};
return finalized_indices;
}
} // namespace cpb
| C++ |
2D | dean0x7d/pybinding | cppcore/src/kpm/Stats.cpp | .cpp | 2,777 | 72 | #include "kpm/Stats.hpp"
#include "kpm/Config.hpp"
#include "kpm/OptimizedHamiltonian.hpp"
namespace cpb { namespace kpm {
namespace {
std::string hamiltonian_report(Stats const& s, bool shortform) {
auto const nnz_diff = static_cast<double>(s.nnz - s.opt_nnz);
auto const percent_removed = 100.0 * nnz_diff / static_cast<double>(s.nnz);
auto const not_efficient = s.uses_full_system ? "" : "*";
auto const fmt_str = shortform ? "{:.0f}%{}"
: "The reordering optimization was able to "
"remove {:.0f}%{} of the workload";
auto const msg = fmt::format(fmt_str, percent_removed, not_efficient);
return format_report(msg, s.hamiltonian_timer, shortform);
}
std::string moments_report(Stats const& s, bool shortform) {
auto const fmt_str = shortform ? "{} @ {}eps"
: "KPM calculated {} moments "
"at {} non-zero elements per second";
auto const msg = fmt::format(fmt_str,
fmt::with_suffix(s.num_moments),
fmt::with_suffix(s.eps()));
return format_report(msg, s.moments_timer, shortform);
}
}
void Stats::reset(idx_t num_moments, OptimizedHamiltonian const& oh,
AlgorithmConfig const& ac, idx_t multiplier) {
this->num_moments = num_moments;
uses_full_system = oh.map().uses_full_system(num_moments);
nnz = oh.num_nonzeros(num_moments, /*optimal_size*/false);
opt_nnz = oh.num_nonzeros(num_moments, ac.optimal_size);
vec = oh.num_vec_elements(num_moments, /*optimal_size*/false);
opt_vec = oh.num_vec_elements(num_moments, ac.optimal_size);
this->multiplier = static_cast<double>(multiplier);
matrix_memory = oh.matrix_memory();
vector_memory = oh.vector_memory();
hamiltonian_timer = oh.timer;
moments_timer = {};
}
double Stats::eps() const {
return multiplier * static_cast<double>(opt_nnz) / moments_timer.elapsed_seconds();
}
double Stats::ops(bool is_diagonal, bool non_unit_vector) const {
auto operations = size_t{0};
operations += nnz * 2; // 1 mul + 1 add per nnz
operations += vec; // 1 sub
if (is_diagonal) {
operations += vec * 4; // 2 * (1 mul + 1 add) for the dot products
} else if (non_unit_vector) {
operations += vec *2; // 1 mul + 1 add for the single dot product
}
return multiplier * static_cast<double>(operations) / moments_timer.elapsed_seconds();
}
std::string Stats::report(bool shortform) const {
return hamiltonian_report(*this, shortform) + moments_report(*this, shortform);
}
}} // namespace cpb::kpm
| C++ |
2D | dean0x7d/pybinding | cppcore/src/kpm/Core.cpp | .cpp | 6,991 | 159 | #include "kpm/Core.hpp"
#include "kpm/reconstruct.hpp"
namespace cpb { namespace kpm {
namespace {
Bounds reset_bounds(Hamiltonian const& h, Config const& config) {
if (config.min_energy == config.max_energy) {
return {h, config.lanczos_precision}; // will be automatically computed
} else {
return {config.min_energy, config.max_energy}; // user-defined bounds
}
}
} // anonymous namespace
Core::Core(Hamiltonian const& h, Compute const& compute, Config const& config)
: hamiltonian(h), compute(compute), config(config), bounds(reset_bounds(h, config)),
optimized_hamiltonian(h, config.matrix_format, config.algorithm.reorder()) {
if (config.min_energy > config.max_energy) {
throw std::invalid_argument("KPM: Invalid energy range specified (min > max).");
}
}
void Core::set_hamiltonian(Hamiltonian const& h) {
hamiltonian = h;
optimized_hamiltonian = {h, config.matrix_format, config.algorithm.reorder()};
bounds = reset_bounds(h, config);
}
std::string Core::report(bool shortform) const {
return bounds.report(shortform) + stats.report(shortform) + (shortform ? "|" : "Total time:");
}
ArrayXcd Core::moments(idx_t num_moments, VectorXcd const& alpha, VectorXcd const& beta,
SparseMatrixXcd const& op) {
auto specialized_algorithm = config.algorithm;
specialized_algorithm.optimal_size = false; // not applicable for this calculation
optimized_hamiltonian.optimize_for({0, 0}, bounds.scaling_factors());
stats.reset(num_moments, optimized_hamiltonian, specialized_algorithm);
auto const starter = constant_starter(optimized_hamiltonian, alpha);
if (beta.size() == 0 && op.size() == 0) {
auto moments = DiagonalMoments(round_num_moments(num_moments));
timed_compute(&moments, starter, specialized_algorithm);
apply_damping(moments, config.kernel);
return extract_data(moments, num_moments);
} else {
auto moments = GenericMoments(round_num_moments(num_moments), alpha, beta, op);
timed_compute(&moments, starter, specialized_algorithm);
apply_damping(moments, config.kernel);
return extract_data(moments, num_moments);
}
}
ArrayXXdCM Core::ldos(std::vector<idx_t> const& idx, ArrayXd const& energy, double broadening) {
auto const scale = bounds.scaling_factors();
auto const num_moments = config.kernel.required_num_moments(broadening / scale.a);
auto const num_indices = static_cast<idx_t>(idx.size());
optimized_hamiltonian.optimize_for({idx, idx}, scale);
stats.reset(num_moments, optimized_hamiltonian, config.algorithm, num_indices);
auto starter = unit_starter(optimized_hamiltonian);
auto moments = BatchDiagonalMoments(num_moments, num_indices, BatchConcatenator());
timed_compute(&moments, starter, config.algorithm);
apply_damping(moments, config.kernel);
return reconstruct<SpectralDensity>(moments, energy, scale);
}
ArrayXd Core::dos(ArrayXd const& energy, double broadening, idx_t num_random) {
auto const scale = bounds.scaling_factors();
auto const num_moments = config.kernel.required_num_moments(broadening / scale.a);
auto specialized_algorithm = config.algorithm;
specialized_algorithm.optimal_size = false; // not applicable for this calculation
optimized_hamiltonian.optimize_for({0, 0}, scale);
stats.reset(num_moments, optimized_hamiltonian, specialized_algorithm, num_random);
auto starter = random_starter(optimized_hamiltonian);
auto moments = BatchDiagonalMoments(num_moments, num_random, BatchAccumulator());
timed_compute(&moments, starter, specialized_algorithm);
apply_damping(moments, config.kernel);
return reconstruct<SpectralDensity>(moments, energy, scale);
}
ArrayXcd Core::greens(idx_t row, idx_t col, ArrayXd const& energy, double broadening) {
return std::move(greens_vector(row, {col}, energy, broadening).front());
}
std::vector<ArrayXcd> Core::greens_vector(idx_t row, std::vector<idx_t> const& cols,
ArrayXd const& energy, double broadening) {
assert(!cols.empty());
auto const scale = bounds.scaling_factors();
auto const num_moments = config.kernel.required_num_moments(broadening / scale.a);
auto& oh = optimized_hamiltonian;
oh.optimize_for({row, cols}, scale);
stats.reset(num_moments, oh, config.algorithm);
if (oh.idx().is_diagonal()) {
auto moments = DiagonalMoments(num_moments);
timed_compute(&moments, unit_starter(oh), config.algorithm);
apply_damping(moments, config.kernel);
return {reconstruct<GreensFunction>(moments, energy, scale)};
} else {
auto moments_vector = MultiUnitMoments(num_moments, oh.idx());
timed_compute(&moments_vector, unit_starter(oh), config.algorithm);
apply_damping(moments_vector, config.kernel);
return reconstruct<GreensFunction>(moments_vector, energy, scale);
}
}
ArrayXcd Core::conductivity(ArrayXf const& left_coords, ArrayXf const& right_coords,
ArrayXd const& chemical_potential, double broadening,
double temperature, idx_t num_random, idx_t num_points) {
auto const scale = bounds.scaling_factors();
auto const num_moments = config.kernel.required_num_moments(broadening / scale.a);
auto specialized_algorithm = config.algorithm;
specialized_algorithm.optimal_size = false; // not applicable for this calculation
optimized_hamiltonian.optimize_for({0, 0}, scale);
stats.reset(num_moments, optimized_hamiltonian, specialized_algorithm, num_random);
// On the left, the velocity operator is only applied to the starter
auto starter_l = random_starter(optimized_hamiltonian, velocity(hamiltonian, left_coords));
auto moments_l = DenseMatrixMoments(num_moments);
// On the right, the operator is applied at collection time to each vector
auto starter_r = random_starter(optimized_hamiltonian);
auto moments_r = DenseMatrixMoments(num_moments, velocity(hamiltonian, right_coords));
auto total_mu = MomentMultiplication(num_moments, optimized_hamiltonian.scalar_tag());
for (auto j = 0; j < num_random; ++j) {
timed_compute(&moments_l, starter_l, specialized_algorithm);
timed_compute(&moments_r, starter_r, specialized_algorithm);
total_mu.matrix_mul_add(moments_l, moments_r);
}
total_mu.normalize(num_random);
apply_damping(total_mu, config.kernel);
return reconstruct<KuboBastin>(total_mu, chemical_potential, bounds.linspaced(num_points),
temperature, scale);
}
void Core::timed_compute(MomentsRef m, Starter const& starter, AlgorithmConfig const& ac) {
stats.moments_timer.tic();
compute->moments(std::move(m), starter, ac, optimized_hamiltonian);
stats.moments_timer.toc_accumulate();
}
}} // namespace cpb::kpm
| C++ |
2D | dean0x7d/pybinding | cppcore/src/kpm/Kernel.cpp | .cpp | 1,831 | 52 | #include "kpm/Kernel.hpp"
#include "numeric/constant.hpp"
namespace cpb { namespace kpm {
Kernel jackson_kernel() {
return {
[](idx_t num_moments) -> ArrayXd {
auto const N = static_cast<double>(num_moments);
auto const Np = N + 1;
auto const ns = make_integer_range<double>(num_moments);
constexpr auto pi = double{constant::pi};
return ns.unaryExpr([&](double n) { // n is not an integer to get proper fp division
return ((Np - n) * cos(pi * n / Np) + sin(pi * n / Np) / tan(pi / Np)) / Np;
});
},
[](double scaled_broadening) {
auto const n = static_cast<idx_t>(constant::pi / scaled_broadening) + 1;
return round_num_moments(n);
}
};
}
Kernel lorentz_kernel(double lambda) {
if (lambda <= 0) { throw std::invalid_argument("Lorentz kernel: lambda must be positive."); }
return {
[=](idx_t num_moments) -> ArrayXd {
auto const N = static_cast<double>(num_moments);
auto const ns = make_integer_range<double>(num_moments);
return ns.unaryExpr([&](double n) { // n is not an integer to get proper fp division
return std::sinh(lambda * (1 - n / N)) / std::sinh(lambda);
});
},
[=](double scaled_broadening) {
auto const n = static_cast<idx_t>(lambda / scaled_broadening) + 1;
return round_num_moments(n);
}
};
}
Kernel dirichlet_kernel() {
return {
[](idx_t num_moments) -> ArrayXd { return ArrayXd::Ones(num_moments); },
[](double scaled_broadening) {
auto const n = static_cast<idx_t>(constant::pi / scaled_broadening) + 1;
return round_num_moments(n);
}
};
}
}} // namespace cpb::kpm
| C++ |
2D | dean0x7d/pybinding | cppcore/src/kpm/Starter.cpp | .cpp | 2,804 | 100 | #include "kpm/Starter.hpp"
#include "numeric/random.hpp"
namespace cpb { namespace kpm {
namespace {
struct ConstantStarter {
OptimizedHamiltonian const& oh;
VectorXcd const& alpha;
ConstantStarter(OptimizedHamiltonian const& oh, VectorXcd const& alpha)
: oh(oh), alpha(alpha) {}
var::complex<VectorX> operator()(var::scalar_tag tag) const { return tag.match(*this); }
template<class scalar_t>
var::complex<VectorX> operator()(var::tag<scalar_t>) const {
auto r0 = num::force_cast<scalar_t>(alpha);
oh.reorder(r0); // needed to maintain consistent results for all optimizations
return r0;
}
};
struct UnitStarter {
idx_t size;
ArrayXi sources;
idx_t i = 0;
UnitStarter(OptimizedHamiltonian const& oh) : size(oh.size()), sources(oh.idx().src) {}
var::complex<VectorX> operator()(var::scalar_tag tag) {
return var::apply_visitor(*this, tag);
}
template<class scalar_t>
var::complex<VectorX> operator()(var::tag<scalar_t>) {
auto r0 = VectorX<scalar_t>::Zero(size).eval();
if (i < sources.size()) {
r0[sources[i]] = 1;
++i;
}
return r0;
}
};
struct RandomStarter {
OptimizedHamiltonian const& oh;
VariantCSR op;
std::mt19937 generator;
RandomStarter(OptimizedHamiltonian const& oh, VariantCSR const& op) : oh(oh), op(op) {}
var::complex<VectorX> operator()(var::scalar_tag tag) {
return var::apply_visitor(*this, tag);
}
template<class real_t>
var::complex<VectorX> operator()(var::tag<real_t>) {
auto r0 = transform<VectorX>(
num::make_random<VectorX<real_t>>(oh.size(), generator),
[](real_t x) -> real_t { return (x < 0.5f) ? -1.f : 1.f; }
);
if (op) { r0 = op.get<real_t>() * r0; }
oh.reorder(r0); // needed to maintain consistent results for all optimizations
return r0;
}
template<class real_t>
var::complex<VectorX> operator()(var::tag<std::complex<real_t>>) {
auto const phase = num::make_random<ArrayX<real_t>>(oh.size(), generator);
auto const k = std::complex<real_t>{2 * constant::pi * constant::i1};
auto r0 = exp(k * phase).matrix().eval();
if (op) { r0 = op.get<std::complex<real_t>>() * r0; }
oh.reorder(r0);
return r0;
}
};
} // anonymous namespace
Starter constant_starter(OptimizedHamiltonian const& oh, VectorXcd const& alpha) {
return {ConstantStarter(oh, alpha), oh.size()};
}
Starter unit_starter(OptimizedHamiltonian const& oh) {
return {UnitStarter(oh), oh.size()};
}
Starter random_starter(OptimizedHamiltonian const& oh, VariantCSR const& op) {
return {RandomStarter(oh, op), oh.size()};
}
}} // namespace cpb::kpm
| C++ |
2D | dean0x7d/pybinding | cppcore/src/kpm/Moments.cpp | .cpp | 4,673 | 159 | #include "kpm/Moments.hpp"
namespace cpb { namespace kpm {
namespace {
struct BatchAccumulatorImpl {
BatchData& var_result;
idx_t idx;
idx_t num_vectors;
idx_t& count;
template<class scalar_t>
void operator()(ArrayX<scalar_t> const& a) {
if (count == 0) {
var_result = a;
} else {
auto& result = var_result.template get<ArrayX<scalar_t>>();
result += a;
}
++count;
if (count >= num_vectors && num_vectors != 1) {
auto& result = var_result.template get<ArrayX<scalar_t>>();
result /= static_cast<num::get_real_t<scalar_t>>(num_vectors);
count = 0;
}
}
template<class scalar_t>
void operator()(ArrayXX<scalar_t> const& a) {
if (count == 0) {
var_result = ArrayX<scalar_t>::Zero(a.rows()).eval();
}
auto const batch_size = a.cols();
auto const remaining = num_vectors - idx;
auto cols = remaining > batch_size ? batch_size : remaining;
auto& result = var_result.template get<ArrayX<scalar_t>>();
result += a.leftCols(cols).rowwise().sum();
count += batch_size;
if (count >= num_vectors && num_vectors != 1) {
result /= static_cast<num::get_real_t<scalar_t>>(num_vectors);
count = 0;
}
}
};
struct BatchConcatenatorImpl {
BatchData& var_result;
idx_t idx;
idx_t num_vectors;
idx_t& count;
template<class scalar_t>
void operator()(ArrayX<scalar_t> const& a) {
if (count == 0) {
var_result = ArrayXX<scalar_t>(a.size(), num_vectors);
}
auto& data = var_result.template get<ArrayXX<scalar_t>>();
data.col(idx) = a;
++count;
}
template<class scalar_t>
void operator()(ArrayXX<scalar_t> const& a) {
if (count == 0) {
var_result = ArrayXX<scalar_t>(a.rows(), num_vectors);
}
auto const batch_size = a.cols();
auto const remaining_cols = num_vectors - idx;
auto const cols = remaining_cols > batch_size ? batch_size : remaining_cols;
auto& data = var_result.template get<ArrayXX<scalar_t>>();
data.block(0, idx, data.rows(), cols) = a.leftCols(cols);
count += cols;
}
};
struct InitMatrix {
idx_t size;
template<class scalar_t>
var::complex<MatrixX> operator()(var::tag<scalar_t>) const {
return MatrixX<scalar_t>::Zero(size, size).eval();
}
};
struct MatrixMulAdd {
var::complex<MatrixX>& result;
var::complex<MatrixX> const& a;
template<class scalar_t>
void operator()(MatrixX<scalar_t> const& b) {
using T = MatrixX<scalar_t>;
result.template get<T>() += a.template get<T>() * b.adjoint();
}
};
struct Div {
idx_t n;
template<class T, class real_t = num::get_real_t<typename T::Scalar>>
void operator()(T& x) const { x /= static_cast<real_t>(n); }
};
} // anonymous namespace
void BatchAccumulator::operator()(BatchData& result, BatchData const& nd, idx_t idx, idx_t nvec) {
var::apply_visitor(BatchAccumulatorImpl{result, idx, nvec, count}, nd);
}
void BatchConcatenator::operator()(BatchData& result, BatchData const& nd, idx_t idx, idx_t nvec) {
var::apply_visitor(BatchConcatenatorImpl{result, idx, nvec, count}, nd);
}
MomentMultiplication::MomentMultiplication(idx_t num_moments, var::scalar_tag tag)
: data(var::apply_visitor(InitMatrix{num_moments}, tag)) {}
void MomentMultiplication::matrix_mul_add(DenseMatrixMoments const& a,
DenseMatrixMoments const& b) {
var::apply_visitor(MatrixMulAdd{data, a.data}, b.data);
}
void MomentMultiplication::normalize(idx_t total) {
var::apply_visitor(Div{total}, data);
}
struct Velocity {
ArrayXf const& alpha;
template<class scalar_t>
VariantCSR operator()(SparseMatrixRC<scalar_t> const& ham) const {
auto result = *ham;
auto const data = result.valuePtr();
auto const indices = result.innerIndexPtr();
auto const indptr = result.outerIndexPtr();
auto const size = result.rows();
for (auto row = idx_t{0}; row < size; ++row) {
for (auto n = indptr[row]; n < indptr[row + 1]; ++n) {
const auto col = indices[n];
data[n] *= static_cast<scalar_t>(alpha[row] - alpha[col]);
}
}
return std::move(result);
}
};
VariantCSR velocity(Hamiltonian const& hamiltonian, ArrayXf const& alpha) {
return var::apply_visitor(Velocity{alpha}, hamiltonian.get_variant());
}
}} // namespace cpb::kpm
| C++ |
2D | dean0x7d/pybinding | cppcore/src/kpm/Bounds.cpp | .cpp | 1,113 | 42 | #include "kpm/Bounds.hpp"
#include "kpm/Stats.hpp"
#include "compute/lanczos.hpp"
namespace cpb { namespace kpm {
namespace {
struct MinMaxEigenvalues {
double precision_percent;
template<class scalar_t>
compute::LanczosBounds operator()(SparseMatrixRC<scalar_t> const& ph) const {
return compute::minmax_eigenvalues(*ph, precision_percent);
}
};
} // anonymous namespace
void Bounds::compute_bounds() {
if (!hamiltonian || min != max) { return; }
timer.tic();
auto const lanczos = hamiltonian.get_variant().match(MinMaxEigenvalues{precision_percent});
timer.toc();
min = lanczos.min;
max = lanczos.max;
lanczos_loops = lanczos.loops;
}
std::string Bounds::report(bool shortform) const {
auto const fmt_str = shortform ? "{:.2f}, {:.2f}, {}"
: "Spectrum bounds found ({:.2f}, {:.2f} eV) "
"using Lanczos procedure with {} loops";
auto const msg = fmt::format(fmt_str, min, max, lanczos_loops);
return format_report(msg, timer, shortform);
}
}} // namespace cpb::kpm
| C++ |
2D | dean0x7d/pybinding | cppcore/src/kpm/OptimizedHamiltonian.cpp | .cpp | 8,861 | 243 | #include "kpm/OptimizedHamiltonian.hpp"
namespace cpb { namespace kpm {
SliceMap::SliceMap(std::vector<storage_idx_t> indices, Indices const& optimized_idx)
: data(std::move(indices)) {
auto find_offset = [&](ArrayXi const& idx) {
assert(idx.size() != 0);
auto const max_index = *std::max_element(begin(idx), end(idx));
auto const it = std::find_if(data.begin(), data.end(),
[&](storage_idx_t index) { return index > max_index; });
assert(it != data.end());
return static_cast<idx_t>(it - data.begin());
};
src_offset = find_offset(optimized_idx.src);
dest_offset = find_offset(optimized_idx.dest);
}
struct Optimize {
OptimizedHamiltonian& oh;
Indices const& idx;
Scale<> scale;
template<class scalar_t>
void operator()(SparseMatrixRC<scalar_t> const&) {
if (oh.is_reordered) {
oh.create_reordered<scalar_t>(idx, scale);
} else {
oh.create_scaled<scalar_t>(idx, scale);
}
if (oh.matrix_format == MatrixFormat::ELL) {
auto const& csr = oh.optimized_matrix.template get<SparseMatrixX<scalar_t>>();
oh.optimized_matrix = num::csr_to_ell(csr);
}
oh.tag = var::tag<scalar_t>{};
}
};
void OptimizedHamiltonian::optimize_for(Indices const& idx, Scale<> scale) {
if (original_idx == idx) {
return; // already optimized for this idx
}
timer.tic();
original_h.get_variant().match(Optimize{*this, idx, scale});
timer.toc();
original_idx = idx;
}
template<class scalar_t>
void OptimizedHamiltonian::create_scaled(Indices const& idx, Scale<> s) {
using real_t = num::get_real_t<scalar_t>;
auto const scale = Scale<real_t>(s);
auto const& h = ham::get_reference<scalar_t>(original_h);
auto h2 = SparseMatrixX<scalar_t>();
if (scale.b == 0) { // just scale, no b offset
h2 = h * (2 / scale.a);
} else { // scale and offset
auto I = SparseMatrixX<scalar_t>{h.rows(), h.cols()};
I.setIdentity();
h2 = (h - I * scale.b) * (2 / scale.a);
}
h2.makeCompressed();
optimized_matrix = h2.markAsRValue();
optimized_idx = idx;
}
template<class scalar_t>
void OptimizedHamiltonian::create_reordered(Indices const& idx, Scale<> s) {
using real_t = num::get_real_t<scalar_t>;
auto scale = Scale<real_t>(s);
auto const& h = ham::get_reference<scalar_t>(original_h);
auto const system_size = h.rows();
auto const inverted_a = real_t{2 / scale.a};
auto h2 = SparseMatrixX<scalar_t>(system_size, system_size);
// Reserve the same nnz per row as the original + 1 in case the scaling adds diagonal elements
h2.reserve(VectorX<idx_t>::Constant(system_size, sparse::max_nnz_per_row(h) + 1));
// Note: The following "queue" and "map" use vectors instead of other container types because
// they serve a very simple purpose. Using preallocated vectors results in better
// performance (this is not an assumption, it has been tested).
// The index queue will contain the indices that need to be checked next
auto index_queue = std::vector<storage_idx_t>();
index_queue.reserve(system_size);
index_queue.push_back(idx.src[0]); // starting from the given index
// Map from original matrix indices to reordered matrix indices
reorder_map = std::vector<storage_idx_t>(system_size, -1); // reset all to invalid state
// The point of the reordering is to have the target become index number 0
reorder_map[idx.src[0]] = 0;
// As the reordered matrix is filled, the slice border indices are recorded
auto slice_border_indices = std::vector<storage_idx_t>();
slice_border_indices.push_back(1);
// Fill the reordered matrix row by row
auto const h_view = sparse::make_loop(h);
for (auto h2_row = 0; h2_row < system_size; ++h2_row) {
auto diagonal_inserted = false;
// Loop over elements in the row of the original matrix
// corresponding to the h2_row of the reordered matrix
auto const row = index_queue[h2_row];
h_view.for_each_in_row(row, [&](storage_idx_t col, scalar_t value) {
// This may be a new index, map it
if (reorder_map[col] < 0) {
reorder_map[col] = static_cast<storage_idx_t>(index_queue.size());
index_queue.push_back(col);
}
// Get the reordered column index
auto const h2_col = reorder_map[col];
// Calculate the new value that will be inserted into the scaled/reordered matrix
auto h2_value = value * inverted_a;
if (row == col) { // diagonal elements
h2_value -= scale.b * inverted_a;
diagonal_inserted = true;
}
h2.insert(h2_row, h2_col) = h2_value;
});
// A diagonal element may need to be inserted into the reordered matrix
// even if the original matrix doesn't have an element on the main diagonal
if (scale.b != 0 && !diagonal_inserted) {
h2.insert(h2_row, h2_row) = -scale.b * inverted_a;
}
// Reached the end of a slice
if (h2_row == slice_border_indices.back() - 1) {
slice_border_indices.push_back(static_cast<storage_idx_t>(index_queue.size()));
}
}
h2.makeCompressed();
optimized_matrix = h2.markAsRValue();
slice_border_indices.pop_back(); // the last element is a duplicate of the second to last
slice_border_indices.shrink_to_fit();
optimized_idx = reorder_indices(idx, reorder_map);
slice_map = {std::move(slice_border_indices), optimized_idx};
}
Indices OptimizedHamiltonian::reorder_indices(Indices const& original_idx,
std::vector<storage_idx_t> const& map) {
return {transform<ArrayX>(original_idx.src, [&](storage_idx_t i) { return map[i]; }),
transform<ArrayX>(original_idx.dest, [&](storage_idx_t i) { return map[i]; })};
}
namespace {
/// Return the number of non-zeros present up to `rows`
struct NonZeros {
idx_t rows;
template<class scalar_t>
size_t operator()(SparseMatrixX<scalar_t> const& csr) {
return static_cast<size_t>(csr.outerIndexPtr()[rows]);
}
template<class scalar_t>
size_t operator()(num::EllMatrix<scalar_t> const& ell) {
return static_cast<size_t>(rows * ell.nnz_per_row);
}
};
}
size_t OptimizedHamiltonian::num_nonzeros(idx_t num_moments, bool optimal_size) const {
auto result = size_t{0};
if (!optimal_size) {
result = num_moments * var::apply_visitor(NonZeros{size()}, optimized_matrix);
} else {
for (auto n = 0; n < num_moments; ++n) {
auto const opt_size = slice_map.optimal_size(n, num_moments);
auto const num_nonzeros = var::apply_visitor(NonZeros{opt_size}, optimized_matrix);
result += num_nonzeros;
}
}
if (optimized_idx.is_diagonal()) {
result /= 2;
}
return result;
}
size_t OptimizedHamiltonian::num_vec_elements(idx_t num_moments, bool optimal_size) const {
auto result = size_t{0};
if (!optimal_size) {
result = num_moments * size();
} else {
for (auto n = 0; n < num_moments; ++n) {
result += static_cast<size_t>(slice_map.optimal_size(n, num_moments));
}
}
if (optimized_idx.is_diagonal()) {
result /= 2;
}
return result;
}
namespace {
/// Return the data size in bytes
struct MatrixMemory {
template<class scalar_t>
size_t operator()(SparseMatrixX<scalar_t> const& csr) const {
using index_t = typename SparseMatrixX<scalar_t>::StorageIndex;
auto const nnz = static_cast<size_t>(csr.nonZeros());
auto const row_starts = static_cast<size_t>(csr.rows() + 1);
return nnz * sizeof(scalar_t) + nnz * sizeof(index_t) + row_starts * sizeof(index_t);
}
template<class scalar_t>
size_t operator()(num::EllMatrix<scalar_t> const& ell) const {
using index_t = typename num::EllMatrix<scalar_t>::StorageIndex;
auto const nnz = static_cast<size_t>(ell.nonZeros());
return nnz * sizeof(scalar_t) + nnz * sizeof(index_t);
}
};
struct VectorMemory {
template<class scalar_t>
size_t operator()(SparseMatrixRC<scalar_t> const&) const { return sizeof(scalar_t); }
};
}
size_t OptimizedHamiltonian::matrix_memory() const {
return var::apply_visitor(MatrixMemory{}, optimized_matrix);
}
size_t OptimizedHamiltonian::vector_memory() const {
return size() * original_h.get_variant().match(VectorMemory{});
}
}} // namespace cpb::kpm
| C++ |
2D | dean0x7d/pybinding | cppcore/src/kpm/default/collectors.cpp | .cpp | 3,879 | 115 | #include "kpm/default/collectors.hpp"
namespace cpb { namespace kpm {
template<class scalar_t>
void DiagonalCollector<scalar_t>::initial(VectorRef r0, VectorRef r1) {
m0 = moments[0] = r0.squaredNorm() * scalar_t{0.5};
m1 = moments[1] = r1.dot(r0);
}
template<class scalar_t>
void DiagonalCollector<scalar_t>::operator()(idx_t n, scalar_t m2, scalar_t m3) {
moments[2 * (n - 1)] = scalar_t{2} * (m2 - m0);
moments[2 * (n - 1) + 1] = scalar_t{2} * m3 - m1;
}
template<class scalar_t>
void BatchDiagonalCollector<scalar_t>::initial(VectorRef r0, VectorRef r1) {
auto const size = m0.size();
for (auto i = size_t{0}; i < size; ++i) {
moments(0, i) = m0[i] = r0.col(i).squaredNorm() * scalar_t{0.5};
moments(1, i) = m1[i] = r1.col(i).dot(r0.col(i));
}
}
template<class scalar_t>
void BatchDiagonalCollector<scalar_t>::operator()(idx_t n, simd::array<scalar_t> m2,
simd::array<scalar_t> m3) {
auto const size = m0.size();
for (auto i = size_t{0}; i < size; ++i) {
moments(2 * (n - 1), i) = scalar_t{2} * (m2[i] - m0[i]);
moments(2 * (n - 1) + 1, i) = scalar_t{2} * m3[i] - m1[i];
}
}
template<class scalar_t>
GenericCollector<scalar_t>::GenericCollector(idx_t num_moments, OptimizedHamiltonian const& oh,
VectorXcd const& alpha_, VectorXcd const& beta_,
SparseMatrixXcd const& op_) : moments(num_moments) {
beta = num::force_cast<scalar_t>(beta_.size() != 0 ? beta_ : alpha_);
oh.reorder(beta);
if (op_.size() != 0){
op = num::force_cast<scalar_t>(op_);
oh.reorder(op);
}
}
template<class scalar_t>
void GenericCollector<scalar_t>::initial(VectorRef r0, VectorRef r1) {
moments[0] = (op.size() != 0) ? beta.dot(op * r0) : beta.dot(r0);
moments[0] *= 0.5f;
moments[1] = (op.size() != 0) ? beta.dot(op * r1) : beta.dot(r1);
}
template<class scalar_t>
void GenericCollector<scalar_t>::operator()(idx_t n, VectorRef r1) {
moments[n] = (op.size() != 0) ? beta.dot(op * r1) : beta.dot(r1);
}
template<class scalar_t>
void MultiUnitCollector<scalar_t>::initial(VectorRef r0, VectorRef r1) {
using real_t = num::get_real_t<scalar_t>;
for (auto i = 0; i < idx.dest.size(); ++i) {
moments[i][0] = r0[idx.dest[i]] * real_t{0.5}; // 0.5 is special the moment zero
moments[i][1] = r1[idx.dest[i]];
}
}
template<class scalar_t>
void MultiUnitCollector<scalar_t>::operator()(idx_t n, VectorRef r1) {
for (auto i = 0; i < idx.dest.size(); ++i) {
moments[i][n] = r1[idx.dest[i]];
}
}
template<class scalar_t>
DenseMatrixCollector<scalar_t>::DenseMatrixCollector(
idx_t num_moments, OptimizedHamiltonian const& oh, VariantCSR const& op_
) : moments(num_moments, oh.size()) {
if (op_) {
op = op_.template get<scalar_t>();
oh.reorder(op);
}
}
template<class scalar_t>
void DenseMatrixCollector<scalar_t>::initial(VectorRef r0, VectorRef r1) {
using real_t = num::get_real_t<scalar_t>;
if (op.size() != 0){
moments.row(0) = op * r0 * real_t{0.5}; // 0.5 is special for the moment zero
moments.row(1) = op * r1;
} else {
moments.row(0) = r0 * real_t{0.5}; // 0.5 is special for the moment zero
moments.row(1) = r1;
}
}
template<class scalar_t>
void DenseMatrixCollector<scalar_t>::operator()(idx_t n, VectorRef r1) {
if (op.size() != 0) {
moments.row(n) = op * r1;
} else {
moments.row(n) = r1;
}
}
CPB_INSTANTIATE_TEMPLATE_CLASS(DiagonalCollector)
CPB_INSTANTIATE_TEMPLATE_CLASS(BatchDiagonalCollector)
CPB_INSTANTIATE_TEMPLATE_CLASS(GenericCollector)
CPB_INSTANTIATE_TEMPLATE_CLASS(MultiUnitCollector)
CPB_INSTANTIATE_TEMPLATE_CLASS(DenseMatrixCollector)
}} // namespace cpb::kpm
| C++ |
2D | dean0x7d/pybinding | cppcore/src/kpm/default/Compute.cpp | .cpp | 4,757 | 147 | #include "kpm/default/Compute.hpp"
#include "kpm/default/collectors.hpp"
#include "compute/kernel_polynomial.hpp"
#include "kpm/calc_moments.hpp"
#include "detail/thread.hpp"
namespace cpb { namespace kpm {
namespace {
template<class Matrix>
struct SelectAlgorithm {
using scalar_t = typename Matrix::Scalar;
Matrix const& h2;
Starter const& starter;
AlgorithmConfig const& config;
OptimizedHamiltonian const& oh;
DefaultCompute const& compute;
template<template<class> class C, class Vector = typename C<scalar_t>::Vector>
idx_t with(C<scalar_t>& collect) const {
simd::scope_disable_denormals guard;
starter.lock();
auto const idx = starter.count;
auto r0 = make_r0(starter, var::tag<Vector>{}, simd::traits<scalar_t>::size);
starter.unlock();
auto r1 = make_r1(h2, r0);
collect.initial(r0, r1);
if (config.interleaved) {
calc_moments::interleaved(collect, std::move(r0), std::move(r1),
h2, oh.map(), config.optimal_size);
} else {
calc_moments::basic(collect, std::move(r0), std::move(r1),
h2, oh.map(), config.optimal_size);
}
return idx;
}
void operator()(DiagonalMoments* m) {
auto collect = DiagonalCollector<scalar_t>(m->num_moments);
with<DiagonalCollector>(collect);
m->data = std::move(collect.moments);
}
void operator()(BatchDiagonalMoments* m) {
constexpr auto batch_size = static_cast<idx_t>(simd::traits<scalar_t>::size);
auto const num_threads = compute.get_num_threads();
auto num_batches = m->num_vectors / batch_size;
auto num_singles = m->num_vectors % batch_size;
// Heuristic: prefer SIMD execution when there's a low number of threads
if (num_singles > num_threads * batch_size / 2) {
num_batches += 1;
num_singles = 0;
}
ThreadPool pool(num_threads);
compute.progress_start(m->num_vectors);
for (auto i = 0; i < num_batches; ++i) {
pool.add([&]() {
auto collect = BatchDiagonalCollector<scalar_t>(m->num_moments, batch_size);
auto const idx = with<BatchDiagonalCollector>(collect);
m->add(collect.moments, idx);
compute.progress_update(batch_size, m->num_vectors);
});
}
for (auto i = 0; i < num_singles; ++i) {
pool.add([&]() {
auto collect = DiagonalCollector<scalar_t>(m->num_moments);
auto const idx = with<DiagonalCollector>(collect);
m->add(collect.moments, idx);
compute.progress_update(1, m->num_vectors);
});
}
pool.join();
compute.progress_finish(m->num_vectors);
}
void operator()(GenericMoments* m) {
auto collect = GenericCollector<scalar_t>(m->num_moments, oh, m->alpha, m->beta, m->op);
with<OffDiagonalCollector>(collect);
m->data = std::move(collect.moments);
}
void operator()(MultiUnitMoments* m) {
auto collect = MultiUnitCollector<scalar_t>(m->num_moments, m->idx);
with<OffDiagonalCollector>(collect);
m->data = std::move(collect.moments);
}
void operator()(DenseMatrixMoments* m) {
auto collect = DenseMatrixCollector<scalar_t>(m->num_moments, oh, m->op);
with<OffDiagonalCollector>(collect);
m->data = std::move(collect.moments);
}
};
struct SelectMatrix {
MomentsRef m;
Starter const& s;
AlgorithmConfig const& ac;
OptimizedHamiltonian const& oh;
DefaultCompute const& compute;
template<class Matrix>
void operator()(Matrix const& h2) {
var::apply_visitor(SelectAlgorithm<Matrix>{h2, s, ac, oh, compute}, m);
}
};
} // anonymous namespace
DefaultCompute::DefaultCompute(idx_t num_threads, ProgressCallback progress_callback)
: num_threads(num_threads > 0 ? num_threads : std::thread::hardware_concurrency()),
progress_callback(progress_callback) {}
void DefaultCompute::moments(MomentsRef m, Starter const& s, AlgorithmConfig const& ac,
OptimizedHamiltonian const& oh) const {
var::apply_visitor(SelectMatrix{std::move(m), s, ac, oh, *this}, oh.matrix());
}
void DefaultCompute::progress_start(idx_t total) const {
progress_update(-1, total);
}
void DefaultCompute::progress_update(idx_t delta, idx_t total) const {
if (!progress_callback) { return; }
progress_callback(delta, total);
}
void DefaultCompute::progress_finish(idx_t total) const {
progress_update(total, total);
}
}} // namespace cpb::kpm
| C++ |
2D | dean0x7d/pybinding | cppcore/src/hamiltonian/Hamiltonian.cpp | .cpp | 1,497 | 63 | #include "hamiltonian/Hamiltonian.hpp"
namespace cpb {
namespace {
struct IsValid {
template<class scalar_t>
bool operator()(SparseMatrixRC<scalar_t> const& p) const { return p != nullptr; }
};
struct Reset {
template<class scalar_t>
void operator()(SparseMatrixRC<scalar_t>& p) const { p.reset(); }
};
struct GetSparseRef {
template<class scalar_t>
ComplexCsrConstRef operator()(SparseMatrixRC<scalar_t> const& m) const { return csrref(*m); }
};
struct NonZeros {
template<class scalar_t>
idx_t operator()(SparseMatrixRC<scalar_t> const& m) const { return m->nonZeros(); }
};
struct Rows {
template<class scalar_t>
idx_t operator()(SparseMatrixRC<scalar_t> const& m) const { return m->rows(); }
};
struct Cols {
template<class scalar_t>
idx_t operator()(SparseMatrixRC<scalar_t> const& m) const { return m->cols(); }
};
} // namespace
Hamiltonian::operator bool() const {
return var::apply_visitor(IsValid(), variant_matrix);
}
void Hamiltonian::reset() {
return var::apply_visitor(Reset(), variant_matrix);
}
ComplexCsrConstRef Hamiltonian::csrref() const {
return var::apply_visitor(GetSparseRef(), variant_matrix);
}
idx_t Hamiltonian::non_zeros() const {
return var::apply_visitor(NonZeros(), variant_matrix);
}
idx_t Hamiltonian::rows() const {
return var::apply_visitor(Rows(), variant_matrix);
}
idx_t Hamiltonian::cols() const {
return var::apply_visitor(Cols(), variant_matrix);
}
} // namespace cpb
| C++ |
2D | dean0x7d/pybinding | cppcore/src/hamiltonian/HamiltonianModifiers.cpp | .cpp | 951 | 31 | #include "hamiltonian/HamiltonianModifiers.hpp"
namespace cpb {
bool HamiltonianModifiers::any_complex() const {
const auto complex_potential = std::any_of(
onsite.begin(), onsite.end(), [](OnsiteModifier const& o) { return o.is_complex; }
);
auto const complex_hoppings = std::any_of(
hopping.begin(), hopping.end(), [](HoppingModifier const& h) { return h.is_complex; }
);
return complex_potential || complex_hoppings;
}
bool HamiltonianModifiers::any_double() const {
auto const double_potential = std::any_of(
onsite.begin(), onsite.end(), [](OnsiteModifier const& o) { return o.is_double; }
);
auto const double_hoppings = std::any_of(
hopping.begin(), hopping.end(), [](HoppingModifier const& h) { return h.is_double; }
);
return double_potential || double_hoppings;
}
void HamiltonianModifiers::clear() {
onsite.clear();
hopping.clear();
}
} // namespace cpb
| C++ |
2D | dean0x7d/pybinding | cppcore/src/solver/Solver.cpp | .cpp | 3,095 | 104 | #include "solver/Solver.hpp"
namespace cpb { namespace compute {
struct CalcDOS {
ArrayXf const& target_energies;
float broadening;
template<class Array>
ArrayXd operator()(Array En) {
auto const scale = 1 / (broadening * sqrt(2 * constant::pi));
auto const constant = -0.5f / pow(broadening, 2);
// DOS(E) = 1 / (broadening * sqrt(2pi)) * sum(exp(-0.5 * (En-E)^2 / broadening^2))
ArrayXd dos(target_energies.size());
transform(target_energies, dos, [&](float E) {
auto gaussian = exp((En - E).square() * constant);
return scale * sum(gaussian);
});
return dos;
}
};
struct CalcSpatialLDOS {
float target_energy;
float broadening;
template<class Array1D, class Array2D>
ArrayXd operator()(Array1D En, Array2D psi) {
using scalar_t = typename Array1D::Scalar;
auto const scale = 1 / (broadening * sqrt(2 * constant::pi));
auto const constant = -0.5f / pow(broadening, 2);
// DOS(r) = 1 / (b * sqrt(2pi)) * sum(|psi(r)|^2 * exp(-0.5 * (En-E)^2 / b^2))
ArrayXd ldos(psi.rows());
for (auto i = 0; i < ldos.size(); ++i) {
ArrayX<scalar_t> psi2 = psi.row(i).abs2();
auto gaussian = exp((En - target_energy).square() * constant);
ldos[i] = scale * sum(psi2 * gaussian);
}
return ldos;
}
};
} // namespace compute
BaseSolver::BaseSolver(Model const& model, MakeStrategy const& make_strategy)
: model(model.eval()), make_strategy(make_strategy),
strategy(make_strategy(model.hamiltonian())) {}
void BaseSolver::set_model(Model const& new_model) {
is_solved = false;
model = new_model;
if (strategy) {// try to assign a new Hamiltonian to the existing Solver strategy
bool success = strategy->change_hamiltonian(model.hamiltonian());
if (!success) { // fails if the they have incompatible scalar types
strategy.reset();
}
}
if (!strategy) { // creates a SolverStrategy with a scalar type suited to the Hamiltonian
strategy = make_strategy(model.hamiltonian());
}
}
void BaseSolver::solve() {
if (is_solved)
return;
calculation_timer.tic();
strategy->solve();
calculation_timer.toc();
is_solved = true;
}
RealArrayConstRef BaseSolver::eigenvalues() {
solve();
return strategy->eigenvalues();
}
ComplexArrayConstRef BaseSolver::eigenvectors() {
solve();
return strategy->eigenvectors();
}
ArrayXd BaseSolver::calc_dos(ArrayXf target_energies, float broadening) {
return num::match<ArrayX>(eigenvalues(), compute::CalcDOS{target_energies, broadening});
}
ArrayXd BaseSolver::calc_spatial_ldos(float target_energy, float broadening) {
return num::match2sp<ArrayX, ColMajorArrayXX>(
eigenvalues(), eigenvectors(),
compute::CalcSpatialLDOS{target_energy, broadening}
);
}
std::string BaseSolver::report(bool shortform) const {
return strategy->report(shortform) + " " + calculation_timer.str();
}
} // namespace cpb
| C++ |
2D | dean0x7d/pybinding | cppcore/src/solver/FEAST.cpp | .cpp | 8,997 | 250 | #include "solver/FEAST.hpp"
#ifdef CPB_USE_FEAST
# include "support/format.hpp"
# include "compute/mkl/wrapper.hpp"
using namespace fmt::literals;
using namespace cpb;
template<class scalar_t>
void FEAST<scalar_t>::solve() {
// size of the matrix
config.system_size = hamiltonian->rows();
// reset info flags
info.recycle_warning = false;
info.recycle_warning_loops = 0;
info.size_warning = false;
// call the solver
call_feast();
if (config.recycle_subspace) {
// check for errors in case of recycled subspace
while (info.refinement_loops >= config.max_refinement_loops || info.return_code == 3) {
// refinement loop count is greater than allowed or subspace is too small
info.recycle_warning = true;
// make sure we don't do this forever
info.recycle_warning_loops += info.refinement_loops;
if (info.recycle_warning_loops > 2 * config.max_refinement_loops)
throw std::runtime_error{"FEAST: failed to converge within desired loop count."};
// clearData() will increase suggested_size back to initial_size_guess
// but if that was already the case, try to increase the subspace size
if (info.suggested_size == config.initial_size_guess)
config.initial_size_guess *= 1.7;
force_clear();
// rerun calculation with cleared data
call_feast();
}
}
// check for any return code errors
if (info.return_code != 0) {
if (info.return_code == 3) {
// FEAST error: Subspace guess M0 is too small
info.size_warning = true;
while (info.return_code == 3) {
// try to increase the subspace size and rerun calculation
config.initial_size_guess *= 1.7;
force_clear();
call_feast();
// ran into a different error while trying to recover
if (info.return_code != 3 && info.return_code != 0)
throw std::runtime_error{"FEAST: Subspace guess is too small. Failed to recover."};
}
} else if (info.return_code == 1) {
// not really an error: "No eigenvalues found in the given energy range."
} else {
throw std::runtime_error{"FEAST error code: " + std::to_string(info.return_code)};
}
}
info.max_residual = residual.head(info.final_size).maxCoeff();
if (info.recycle_warning)
info.refinement_loops += info.recycle_warning_loops;
}
template<class scalar_t>
std::string FEAST<scalar_t>::report(bool is_shortform) const {
std::string report;
if (info.size_warning)
report += fmt::format("Resized initial guess: {}\n", config.initial_size_guess);
std::string fmt_string;
if (is_shortform) {
fmt_string = "Subspace({final_size}|{suggested_size}|{ratio:.2f}), "
"Refinement({loops}|{error_trace:.2e}|{residual:.2e})";
} else {
fmt_string = "Final subspace size is {final_size} | "
"Suggested size is {suggested_size} ({ratio:.2f} ratio)\n"
"Converged after {loops} refinement loop(s)\n"
"Error trace: {error_trace:.2e} | Max. residual: {residual:.2e}\n"
"\nCompleted in";
}
auto const ratio = static_cast<double>(info.suggested_size) / info.final_size;
report += fmt::format(
fmt_string, "final_size"_a=info.final_size, "suggested_size"_a=info.suggested_size,
"ratio"_a=ratio, "loops"_a=info.refinement_loops, "error_trace"_a=info.error_trace,
"residual"_a=info.max_residual
);
return report;
}
template<class scalar_t>
bool FEAST<scalar_t>::change_hamiltonian(Hamiltonian const& h) {
if (!ham::is<scalar_t>(h)) {
return false;
}
hamiltonian = ham::get_shared_ptr<scalar_t>(h);
if (!config.recycle_subspace) {
force_clear();
}
return true;
}
template<class scalar_t>
void FEAST<scalar_t>::force_clear()
{
_eigenvalues.resize(0);
_eigenvectors.resize(0, 0);
residual.resize(0);
}
template<class scalar_t>
void FEAST<scalar_t>::init_feast()
{
feastinit(fpm);
fpm[0] = config.is_verbose ? 1 : 0;
// the subspace can only be recycled if we actually have data to recycle
int can_recycle = (_eigenvalues.size() != 0) ? 1 : 0;
fpm[4] = config.recycle_subspace ? can_recycle : 0;
fpm[1] = config.contour_points;
fpm[2] = config.dp_stop_criteria;
fpm[3] = config.max_refinement_loops;
fpm[5] = config.residual_convergence ? 1 : 0;
fpm[6] = config.sp_stop_criteria;
}
template<class scalar_t>
void FEAST<scalar_t>::init_pardiso()
{
fpm[63] = 0; // disabled
int* iparm = &fpm[64];
iparm[0] = 1; // use non-defaults
iparm[1] = 2; // *** try 3
// 2 // _reserved
iparm[3] = 0; // preconditioned CGS/CG
iparm[4] = 0; // user permutation // must be 0
iparm[5] = 0; // write solution on x
// 6 // _output
iparm[7] = 0; // iterative refinement steps // best perf. 0
// 8 // _reserved
iparm[9] = 8;
iparm[10] = 0; // scaling vectors
iparm[11] = 0; // solve with transposed or conjugate transposed
iparm[12] = 1; // matching
// 13-16 // _output
iparm[17] = 0; // report the number of non-zeros in the factors (-1)
iparm[18] = 0; // more reporting
// 19 // _output
iparm[20] = 1; // pivoting // best perf. 1 for real
// 21-22 // _output
iparm[23] = 1; // *** parallel
iparm[24] = 0; // *** parallel
// 25 // _reserved
iparm[26] = 0; // check for index errors
iparm[27] = 0; // 1 for single precision
// 28 // _reserved
// 29 // _output
iparm[30] = 0; // partial solve and computing selected components of the solution vectors
// 31-32 // _reserved
iparm[33] = 0; // CNR // 2 is good for some reason
iparm[34] = 0; // zero-base indexing
// 35-58 // _reserved
iparm[59] = 0; // out-of-core mode
// 60-61 // _reserved
// 62 // _output
// 63 // _reserved
}
template<class scalar_t>
void FEAST<scalar_t>::call_feast()
{
init_feast();
// init_pardiso();
// prepare resources for the results
if (_eigenvalues.size() == 0)
{
// make sure the subspace isn't bigger than the system (or negative)
if (config.initial_size_guess > config.system_size || config.initial_size_guess < 0)
config.initial_size_guess = config.system_size;
_eigenvalues.resize(config.initial_size_guess);
info.suggested_size = config.initial_size_guess;
}
if (residual.size() == 0)
residual.resize(config.initial_size_guess);
if (_eigenvectors.size() == 0)
_eigenvectors.resize(config.system_size, config.initial_size_guess);
// solve real or complex Hamiltonian
call_feast_impl();
}
template<class scalar_t>
void FEAST<scalar_t>::call_feast_impl() {
auto const& h_matrix = *hamiltonian;
// convert to one-based index
ArrayXi const outer_starts =
Map<ArrayXi const>(h_matrix.outerIndexPtr(), h_matrix.outerSize() + 1) + 1;
ArrayXi const inner_indices =
Map<ArrayXi const>(h_matrix.innerIndexPtr(), h_matrix.nonZeros()) + 1;
using mkl_scalar_t = mkl::type<scalar_t>;
auto const data = reinterpret_cast<mkl_scalar_t const*>(h_matrix.valuePtr());
auto eigvectors = reinterpret_cast<mkl_scalar_t*>(_eigenvectors.data());
auto const emin = static_cast<real_t>(config.energy_min);
auto const emax = static_cast<real_t>(config.energy_max);
mkl::feast_hcsrev<scalar_t>::call(
&config.matrix_format, // (in) full matrix
&config.system_size, // (in) size of the matrix
data, // (in) sparse matrix values
outer_starts.data(), // (in)
inner_indices.data(), // (in)
fpm, // (in) FEAST parameters
&info.error_trace, // (out) relative error on trace
&info.refinement_loops, // (out) the number of refinement loops executed
&emin, // (in) lower bound
&emax, // (in) upper bound
&info.suggested_size, // (in/out) subspace size guess
_eigenvalues.data(), // (out) eigenvalues
eigvectors, // (in/out) eigenvectors
&info.final_size, // (out) total number of eigenvalues found
residual.data(), // (out) relative residual vector (must be length of M0)
&info.return_code // (out) info or error code
);
}
template class cpb::FEAST<float>;
template class cpb::FEAST<std::complex<float>>;
template class cpb::FEAST<double>;
template class cpb::FEAST<std::complex<double>>;
#else // CPB_USE_FEAST
void _suppress_FEAST_has_no_symbols_warning() {}
#endif // CPB_USE_FEAST
| C++ |
2D | dean0x7d/pybinding | cppcore/src/leads/Leads.cpp | .cpp | 955 | 42 | #include "leads/Leads.hpp"
namespace cpb {
void Leads::create_attachment_area(Foundation& foundation) const {
for (auto const& spec : specs) {
leads::create_attachment_area(foundation, spec);
}
}
void Leads::make_structure(Foundation const& foundation) {
if (!structures.empty()) {
return;
}
for (auto const& spec : specs) {
structures.emplace_back(foundation, spec);
}
}
void Leads::make_hamiltonian(Lattice const& lattice, HamiltonianModifiers const& modifiers,
bool is_double, bool is_complex) {
if (!hamiltonians.empty()) {
return;
}
for (auto const& structure : structures) {
hamiltonians.emplace_back(structure.system, lattice, modifiers, is_double, is_complex);
}
}
void Leads::clear_structure() {
structures.clear();
clear_hamiltonian();
}
void Leads::clear_hamiltonian() {
hamiltonians.clear();
}
} // namespace cpb
| C++ |
2D | dean0x7d/pybinding | cppcore/src/leads/Structure.cpp | .cpp | 3,003 | 81 | #include "leads/Structure.hpp"
#include "system/Foundation.hpp"
namespace cpb { namespace leads {
Structure::Structure(Foundation const& foundation, Spec const& lead)
: system(foundation.get_lattice().site_registry(),
foundation.get_lattice().hopping_registry()) {
auto const& lattice = foundation.get_lattice();
auto const& finalized_indices = foundation.get_finalized_indices();
auto const shift = Cartesian(static_cast<float>(lead.sign) * lattice.vector(lead.axis));
auto const junction = detail::Junction(foundation, lead);
auto const slice = foundation[junction.slice_index];
indices = [&]{
auto indices = std::vector<int>();
indices.reserve(static_cast<size_t>(junction.is_valid.count()));
for (auto const& site : slice) {
if (junction.is_valid[site.get_slice_idx()]) {
indices.push_back(finalized_indices[site]);
}
}
return indices;
}();
/*system*/ {
auto const size = static_cast<int>(indices.size());
system.positions.resize(size);
system.hopping_blocks = {size, system.hopping_registry.name_map()};
for (auto const& site : slice) {
if (!junction.is_valid[site.get_slice_idx()]) {
continue;
}
auto const index = lead_index(finalized_indices[site]);
system.positions[index] = site.get_position() + shift;
system.compressed_sublattices.add(SiteID{site.get_alias_id()}, site.get_norb());
site.for_each_neighbor([&](Site neighbor, Hopping hopping) {
auto const neighbor_index = lead_index(finalized_indices[neighbor]);
if (neighbor_index >= 0 && !hopping.is_conjugate) {
system.hopping_blocks.add(hopping.family_id, index, neighbor_index);
}
});
}
system.compressed_sublattices.verify(size);
}
system.boundaries.push_back([&]{
auto const size = static_cast<int>(indices.size());
auto hopping_blocks = HoppingBlocks(size, system.hopping_registry.name_map());
for (auto const& site : slice) {
if (!junction.is_valid[site.get_slice_idx()]) {
continue;
}
auto const shifted_site = [&]{
Index3D shift_index = Index3D::Zero();
shift_index[lead.axis] = lead.sign;
return site.shifted(shift_index);
}();
auto const index = lead_index(finalized_indices[site]);
shifted_site.for_each_neighbor([&](Site neighbor, Hopping hopping) {
auto const neighbor_index = lead_index(finalized_indices[neighbor]);
if (neighbor_index >= 0) {
hopping_blocks.add(hopping.family_id, index, neighbor_index);
}
});
}
return System::Boundary{hopping_blocks, shift};
}());
}
}} // namespace cpb::leads
| C++ |
2D | dean0x7d/pybinding | cppcore/src/leads/Spec.cpp | .cpp | 3,670 | 106 | #include "leads/Spec.hpp"
#include "system/Foundation.hpp"
namespace cpb { namespace leads {
Spec::Spec(int direction, Shape const& shape)
: axis(abs(direction) - 1),
sign(direction != 0 ? direction / abs(direction) : 0),
shape(shape) {}
void create_attachment_area(Foundation& foundation, Spec const& spec) {
auto const size = foundation.get_spatial_size();
auto const step = -spec.sign;
auto const end = (step > 0) ? size[spec.axis] : -1;
auto junction = detail::Junction(foundation, spec);
auto slice = foundation[junction.slice_index];
// Fill in the foundation until all lead sites can be connected to foundation sites
for (; slice[spec.axis] != end; slice[spec.axis] += step) {
for (auto& site : slice) {
if (!junction.is_valid[site.get_slice_idx()])
continue;
if (!site.is_valid()) {
// Empty sites should be filled in for the lead
site.set_valid(true);
} else {
// Stop once we hit an existing site on the lead's path
junction.is_valid[site.get_slice_idx()] = false;
}
}
if (none_of(junction.is_valid)) {
break;
}
}
if (slice[spec.axis] == end) {
throw std::runtime_error("Can't attach lead: partially misses main structure");
}
}
namespace detail {
SliceIndex3D shape_slice(Foundation const& foundation, Shape const& shape) {
auto const size = foundation.get_spatial_size();
auto const foundation_bounds = foundation.get_bounds();
auto const lead_bounds = cpb::detail::find_bounds(shape, foundation.get_lattice());
auto slice_index = SliceIndex3D();
for (auto i = 0; i < slice_index.ndims(); ++i) {
auto const lead_start = lead_bounds.first[i] - foundation_bounds.first[i];
auto const lead_end = (lead_bounds.second[i] + 1) - foundation_bounds.first[i];
slice_index[i] = {std::max(lead_start, 0), std::min(lead_end, size[i])};
}
return slice_index;
}
SliceIndex3D attachment_slice(Foundation const& foundation, Spec const& spec) {
auto const size = foundation.get_spatial_size();
auto const step = -spec.sign;
auto const start = (step > 0) ? 0 : size[spec.axis] - 1;
auto const end = (step > 0) ? size[spec.axis] : -1;
auto slice_index = shape_slice(foundation, spec.shape);
auto slice = foundation[slice_index];
// The first index on the lead's axis where there are any existing valid sites
auto const lead_start = [&]{
for (slice[spec.axis] = start; slice[spec.axis] != end; slice[spec.axis] += step) {
for (auto& site : slice) {
if (site.is_valid()) {
return slice[spec.axis];
}
}
}
return slice[spec.axis];
}();
if (lead_start == end) {
throw std::runtime_error("Can't attach lead: completely misses main structure");
}
slice_index[spec.axis] = lead_start;
return slice_index;
}
Junction::Junction(Foundation const& foundation, Spec const& spec)
: slice_index(attachment_slice(foundation, spec)) {
// The lead's shape.contains() should be invoked in the center of the shape slice
auto si3d = shape_slice(foundation, spec.shape);
auto const si = si3d[spec.axis];
si3d[spec.axis] = (si.start + si.end) / 2;
auto const slice = foundation[si3d];
is_valid = spec.shape.contains(slice.positions());
if (none_of(is_valid)) {
throw std::runtime_error("Can't attach lead: no sites in lead junction");
}
}
} // namespace detail
}} // namespace cpb::leads
| C++ |
2D | dean0x7d/pybinding | cppcore/tests/test_leads.cpp | .cpp | 1,446 | 41 | #include <catch.hpp>
#include "fixtures.hpp"
using namespace cpb;
/// Return the data array of a Hamiltonian CSR matrix
template<class scalar_t = float>
ArrayXf matrix_data(Hamiltonian const& h) {
auto const matrix = ham::get_reference<scalar_t>(h);
return Eigen::Map<ArrayXf const>(matrix.valuePtr(), matrix.nonZeros());
}
TEST_CASE("Attach leads") {
auto const width = 2.0f;
auto const height = 3.0f;
auto model = Model(lattice::square(), shape::rectangle(width, height));
REQUIRE(model.system()->num_sites() == 6);
model.attach_lead(-1, Line({0, -height/2, 0}, {0, height/2, 0}));
model.attach_lead(+1, Line({0, -height/2, 0}, {0, height/2, 0}));
REQUIRE(model.leads().size() == 2);
REQUIRE_THAT(model.lead(0).indices(), Catch::Equals(std::vector<int>{0, 2, 4}));
REQUIRE_THAT(model.lead(1).indices(), Catch::Equals(std::vector<int>{1, 3, 5}));
SECTION("Hoppings grow from lead 0 to system") {
model.add(field::linear_hopping());
auto const h = matrix_data<>(model.hamiltonian());
auto const h1 = matrix_data<>(model.lead(0).h1());
REQUIRE(h1.minCoeff() < h.minCoeff());
}
SECTION("Onsite potential grows from system to lead 1") {
model.add(field::linear_onsite());
auto const h = matrix_data<>(model.hamiltonian());
auto const h0 = matrix_data<>(model.lead(1).h0());
REQUIRE(h.maxCoeff() < h0.maxCoeff());
}
}
| C++ |
2D | dean0x7d/pybinding | cppcore/tests/catch.cpp | .cpp | 47 | 3 | #define CATCH_CONFIG_MAIN
#include <catch.hpp>
| C++ |
2D | dean0x7d/pybinding | cppcore/tests/test_shape.cpp | .cpp | 1,770 | 54 | #include <catch.hpp>
#include "fixtures.hpp"
using namespace cpb;
TEST_CASE("Primitive") {
REQUIRE_THROWS_WITH(Model(lattice::square(), Primitive(2, 2, 2)),
Catch::Contains("more dimensions than the lattice"));
}
TEST_CASE("FreeformShape", "[shape]") {
auto const contains = [](CartesianArrayConstRef p) -> ArrayX<bool> { return p.x() > 0.5f; };
auto const shape = FreeformShape(contains, {1, 1, 1}, {0.5f, 0.5f, 0.5f});
SECTION("Bounding box") {
auto expected_vertices = Shape::Vertices{
{0, 0, 0},
{1, 0, 0},
{0, 1, 0},
{1, 1, 0},
{0, 0, 1},
{1, 0, 1},
{0, 1, 1},
{1, 1, 1},
};
REQUIRE(shape.vertices == expected_vertices);
}
SECTION("Contains") {
auto const size = 4;
auto const v = ArrayXf::LinSpaced(size, 0, 1).eval();
auto const p = CartesianArray(v, v, v);
auto expected = ArrayX<bool>(size);
expected << false, false, true, true;
REQUIRE(all_of(shape.contains(p) == expected));
}
}
TEST_CASE("Shape-imposed lattice offset") {
auto shape = shape::rectangle(2.4f, 2.4f);
auto const model = Model(lattice::square(), shape);
auto const& system = *model.system();
shape.lattice_offset = {-0.1f, 0.5f, .0f};
auto const offset_model = Model(lattice::square(), shape);
auto const& offset_system = *offset_model.system();
REQUIRE(model.get_lattice().get_offset().isZero());
REQUIRE(offset_model.get_lattice().get_offset().isApprox(shape.lattice_offset));
REQUIRE(system.positions.x.minCoeff() > offset_system.positions.x.minCoeff());
REQUIRE(system.num_sites() > offset_system.num_sites());
}
| C++ |
2D | dean0x7d/pybinding | cppcore/tests/fixtures.cpp | .cpp | 7,718 | 246 | #include "fixtures.hpp"
using namespace cpb;
namespace lattice {
Lattice square(float a, float t) {
auto lattice = Lattice({a, 0, 0}, {0, a, 0});
lattice.add_sublattice("A", {0, 0, 0}, 4 * t);
lattice.register_hopping_energy("-t", -t);
lattice.add_hopping({0, 1, 0}, "A", "A", "-t");
lattice.add_hopping({1, 0, 0}, "A", "A", "-t");
return lattice;
}
Lattice square_2atom(float a, float t1, float t2) {
auto lattice = Lattice({a, 0, 0}, {0, a, 0});
lattice.add_sublattice("A", {0, 0, 0});
lattice.add_sublattice("B", {0.5f * a, 0.5f * a, 0});
lattice.register_hopping_energy("t1", t1);
lattice.register_hopping_energy("t2", t2);
lattice.add_hopping({0, 0, 0}, "A", "B", "t1");
lattice.add_hopping({1, 1, 0}, "A", "B", "t1");
lattice.add_hopping({1, 0, 0}, "A", "A", "t2");
return lattice;
}
Lattice square_multiorbital() {
auto lattice = Lattice({1, 0, 0}, {0, 1, 0});
lattice.add_sublattice("A", {0, 0, 0}, VectorXd::Constant(2, 0.0).eval());
lattice.add_sublattice("B", {0, 0, 0}, VectorXd::Constant(1, 0.0).eval());
lattice.add_sublattice("C", {0, 0, 0}, VectorXd::Constant(2, 0.0).eval());
lattice.add_sublattice("D", {0, 0, 0}, VectorXd::Constant(3, 0.0).eval());
lattice.register_hopping_energy("t22", MatrixXcd::Constant(2, 2, 1.0));
lattice.register_hopping_energy("t12", MatrixXcd::Constant(1, 2, 1.0));
lattice.register_hopping_energy("t13", MatrixXcd::Constant(1, 3, 1.0));
lattice.register_hopping_energy("t23", MatrixXcd::Constant(2, 3, 1.0));
lattice.register_hopping_energy("t32", MatrixXcd::Constant(3, 2, 1.0));
lattice.add_hopping({0, 0, 0}, "A", "C", "t22");
lattice.add_hopping({0, 0, 0}, "B", "A", "t12");
lattice.add_hopping({1, 0, 0}, "B", "D", "t13");
lattice.add_hopping({1, 0, 0}, "A", "A", "t22");
lattice.add_hopping({0, 0, 0}, "C", "D", "t23");
lattice.add_hopping({0, 1, 0}, "D", "A", "t32");
return lattice;
}
Lattice checkerboard_multiorbital() {
constexpr auto i1 = num::get_complex_t<double>{constant::i1};
auto lattice = Lattice({1, 0, 0}, {0, 1, 0});
// complex multi-orbital hopping and complex onsite energy
auto hopping = MatrixXcd(2, 2);
hopping << 2.0 + 2.0 * i1, 3.0 + 3.0 * i1, 4.0 + 4.0 * i1, 5.0 + 5.0 * i1;
auto delta = MatrixXcd(2, 2);
delta << 1.0, i1, -i1, 1.0;
lattice.add_sublattice("A", { 0, 0, 0}, (-delta).eval());
lattice.add_sublattice("B", {0.5, 0.5, 0}, delta);
lattice.register_hopping_energy("t", hopping);
lattice.add_hopping({ 0, 0, 0}, "A", "B", "t");
lattice.add_hopping({ 0, -1, 0}, "A", "B", "t");
lattice.add_hopping({-1, 0, 0}, "A", "B", "t");
lattice.add_hopping({-1, -1, 0}, "A", "B", "t");
return lattice;
}
Lattice hexagonal_complex() {
constexpr auto i1 = num::get_complex_t<double>{constant::i1};
// lattice vectors
auto a1 = Cartesian{ 0.5f, 0.5f * sqrt(3.0f), 0};
auto a2 = Cartesian{-0.5f, 0.5f * sqrt(3.0f), 0};
// positions
auto const pos_a = Cartesian{0, 0, 0};
auto const pos_b = Cartesian{0, -1.0f/3 * sqrt(3.0f), 0};
auto lattice = Lattice(a1, a2);
lattice.add_sublattice("A", pos_a);
lattice.add_sublattice("B", pos_b);
// complex hoppings
lattice.register_hopping_energy("t1", -i1);
lattice.register_hopping_energy("t2", 2.0 * i1);
lattice.register_hopping_energy("t3", 3.0 * i1);
lattice.add_hopping({0, 0, 0}, "A", "B", "t1");
lattice.add_hopping({0, 1, 0}, "A", "B", "t2");
lattice.add_hopping({1, 0, 0}, "A", "B", "t3");
return lattice;
}
} // namespace lattice
namespace graphene {
Lattice monolayer() {
auto lattice = Lattice({a, 0, 0}, {a/2, a/2 * sqrt(3.0f), 0});
lattice.add_sublattice("A", {0, -a_cc/2, 0});
lattice.add_sublattice("B", {0, a_cc/2, 0});
lattice.register_hopping_energy("t", t);
lattice.add_hopping({0, 0, 0}, "A", "B", "t");
lattice.add_hopping({1, -1, 0}, "A", "B", "t");
lattice.add_hopping({0, -1, 0}, "A", "B", "t");
return lattice;
}
} // namespace graphene
namespace shape {
Shape rectangle(float x, float y) {
auto const x0 = x / 2;
auto const y0 = y / 2;
return Polygon({{x0, y0, 0}, {x0, -y0, 0}, {-x0, -y0, 0}, {-x0, y0, 0}});
}
} // namespace shape
namespace field {
namespace {
struct OnsiteEnergyOp {
float value;
template<class Array>
void operator()(Array energy) const {
using scalar_t = typename Array::Scalar;
energy.setConstant(static_cast<scalar_t>(value));
}
};
}
cpb::OnsiteModifier constant_potential(float value) {
return {[value](ComplexArrayRef energy, CartesianArrayConstRef, string_view) {
num::match<ArrayX>(energy, OnsiteEnergyOp{value});
}};
}
namespace {
struct MagneticFieldOp {
float magnitude;
CartesianArrayConstRef pos1;
CartesianArrayConstRef pos2;
static constexpr auto scale = 1e-18f;
template<class Array>
void operator()(Array) const {}
template<class real_t>
void operator()(Map<ArrayX<std::complex<real_t>>> energy) const {
using scalar_t = std::complex<real_t>;
auto const k = static_cast<scalar_t>(scale * 2 * constant::pi / constant::phi0);
auto const vp_x = 0.5f * magnitude * (pos1.y() + pos2.y());
auto const peierls = vp_x * (pos1.x() - pos2.x());
energy *= exp(scalar_t{constant::i1} * k * peierls.template cast<scalar_t>());
}
};
}
cpb::HoppingModifier constant_magnetic_field(float value) {
return {[value](ComplexArrayRef energy, CartesianArrayConstRef pos1,
CartesianArrayConstRef pos2, string_view) {
num::match<ArrayX>(energy, MagneticFieldOp{value, pos1, pos2});
}, /*is_complex*/true, /*is_double*/false};
}
namespace {
struct LinearOnsite {
float k;
Eigen::Ref<ArrayXf const> x;
template<class Array>
void operator()(Array energy) const {
using scalar_t = typename Array::Scalar;
energy = (k * x).template cast<scalar_t>();
}
};
}
cpb::OnsiteModifier linear_onsite(float k) {
return {[k](ComplexArrayRef energy, CartesianArrayConstRef pos, string_view) {
num::match<ArrayX>(energy, LinearOnsite{k, pos.x()});
}};
}
namespace {
struct LinearHopping {
float k;
ArrayXf x;
template<class Array>
void operator()(Array energy) const {
using scalar_t = typename Array::Scalar;
energy = (k * x).template cast<scalar_t>();
}
};
}
cpb::HoppingModifier linear_hopping(float k) {
return {[k](ComplexArrayRef energy, CartesianArrayConstRef pos1,
CartesianArrayConstRef pos2, string_view) {
num::match<ArrayX>(energy, LinearHopping{k, 0.5f * (pos1.x() + pos2.x())});
}, /*is_complex*/false, /*is_double*/false};
}
cpb::HoppingModifier force_double_precision() {
auto nop = [](ComplexArrayRef, CartesianArrayConstRef, CartesianArrayConstRef, string_view) {};
return cpb::HoppingModifier(nop, /*is_complex*/false, /*is_double*/true);
}
cpb::HoppingModifier force_complex_numbers() {
auto nop = [](ComplexArrayRef, CartesianArrayConstRef, CartesianArrayConstRef, string_view) {};
return cpb::HoppingModifier(nop, /*is_complex*/true, /*is_double*/false);
}
} // namespace field
namespace generator {
cpb::HoppingGenerator do_nothing_hopping(std::string const& name) {
return {name, 0.0, [](System const&) {
return HoppingGenerator::Result{ArrayXi{}, ArrayXi{}};
}};
}
} // namespace generator
| C++ |
2D | dean0x7d/pybinding | cppcore/tests/test_system.cpp | .cpp | 6,777 | 185 | #include <catch.hpp>
#include "fixtures.hpp"
using namespace cpb;
TEST_CASE("CompressedSublattices") {
auto inject = [](CompressedSublattices& cs, idx_t size, SiteID id, idx_t norb) {
for (auto i = 0; i < size; ++i) {
cs.add(id, norb);
}
};
constexpr auto size = 30;
auto cs = CompressedSublattices();
auto ds = VectorX<storage_idx_t>(size);
inject(cs, 10, SiteID{1}, 1);
ds.segment(0, 10).setConstant(1);
inject(cs, 15, SiteID{0}, 2);
ds.segment(10, 15).setConstant(0);
inject(cs, 2, SiteID{2}, 2);
ds.segment(25, 2).setConstant(2);
inject(cs, 3, SiteID{4}, 3);
ds.segment(27, 3).setConstant(4);
REQUIRE(cs.decompressed().matrix() == ds);
REQUIRE_NOTHROW(cs.verify(size));
REQUIRE(cs.alias_ids().size() == 4);
REQUIRE(cs.decompressed_size() == size);
REQUIRE(cs.start_index(1) == 0);
REQUIRE(cs.start_index(2) == 10);
REQUIRE(cs.start_index(3) == 27);
REQUIRE_THROWS_WITH(cs.start_index(4), Catch::Contains("invalid num_orbitals"));
}
TEST_CASE("HoppingBlocks") {
auto model = Model(lattice::square(), Primitive(6, 4));
auto const& hb = model.system()->hopping_blocks;
auto const neighbor_counts = hb.count_neighbors();
REQUIRE(neighbor_counts.sum() == 2 * 4 + 3 * 12 + 4 * 8);
}
TEST_CASE("to_hamiltonian_indices") {
auto vec = [](std::initializer_list<storage_idx_t> const& init) -> VectorXi {
auto v = VectorXi(static_cast<idx_t>(init.size()));
std::copy(init.begin(), init.end(), v.data());
return v;
};
SECTION("single-orbital") {
auto const model = Model(lattice::square(), Primitive(3, 3));
auto const& system = *model.system();
REQUIRE(system.num_sites() == 9);
REQUIRE(system.hamiltonian_size() == 9);
REQUIRE(system.hamiltonian_nnz() == 33);
REQUIRE(system.to_hamiltonian_indices(0).matrix() == vec({0}));
REQUIRE(system.to_hamiltonian_indices(4).matrix() == vec({4}));
REQUIRE(system.to_hamiltonian_indices(8).matrix() == vec({8}));
}
SECTION("multi-orbital") {
auto const model = Model(lattice::square_multiorbital(), Primitive(1, 2));
auto const& system = *model.system();
REQUIRE(system.num_sites() == 8);
REQUIRE(system.hamiltonian_size() == 16);
REQUIRE(system.hamiltonian_nnz() == 96);
REQUIRE(system.to_hamiltonian_indices(0).matrix() == vec({0}));
REQUIRE(system.to_hamiltonian_indices(1).matrix() == vec({1}));
REQUIRE(system.to_hamiltonian_indices(2).matrix() == vec({2, 3}));
REQUIRE(system.to_hamiltonian_indices(3).matrix() == vec({4, 5}));
REQUIRE(system.to_hamiltonian_indices(4).matrix() == vec({6, 7}));
REQUIRE(system.to_hamiltonian_indices(5).matrix() == vec({8, 9}));
REQUIRE(system.to_hamiltonian_indices(6).matrix() == vec({10, 11, 12}));
REQUIRE(system.to_hamiltonian_indices(7).matrix() == vec({13, 14, 15}));
}
}
TEST_CASE("complex_valued_hoppings") {
SECTION("single-orbital-complex") {
using constant::i1;
auto const lattice = lattice::hexagonal_complex();
// distance from A to three neighbor sites
auto const d1 = Cartesian{0.0f, -1.0f / 3 * sqrt(3.0f), 0.0f};
auto const d2 = Cartesian{d1 + Cartesian{ 0.5f, 0.5f * sqrt(3.0f), 0.0f}};
auto const d3 = Cartesian{d1 + Cartesian{-0.5f, 0.5f * sqrt(3.0f), 0.0f}};
// hoppings
auto const t1 = -i1;
auto const t2 = 2.0f * i1;
auto const t3 = 3.0f * i1;
auto model = Model(lattice, TranslationalSymmetry(1, 1));
using constant::pi;
// set the wavevector to K point
auto const k_vector = Cartesian{2 * pi, 0, 0};
model.set_wave_vector(k_vector);
auto const& system = *model.system();
auto const& matrix = ham::get_reference<std::complex<float>>(model.hamiltonian());
auto const expected_hopping = t1 * exp(i1 * k_vector.dot(d1)) +
t2 * exp(i1 * k_vector.dot(d2)) +
t3 * exp(i1 * k_vector.dot(d3));
REQUIRE(system.num_sites() == 2);
REQUIRE(system.hamiltonian_size() == 2);
REQUIRE(system.hamiltonian_nnz() == 4);
REQUIRE(num::approx_equal(matrix.coeff(0, 1).real(), expected_hopping.real()));
REQUIRE(num::approx_equal(matrix.coeff(0, 1).imag(), expected_hopping.imag()));
REQUIRE(num::approx_equal(matrix.coeff(1, 0).real(), expected_hopping.real()));
REQUIRE(num::approx_equal(matrix.coeff(1, 0).imag(), -expected_hopping.imag()));
}
SECTION("multi-orbital-complex") {
auto const model = Model(lattice::checkerboard_multiorbital(),
TranslationalSymmetry(1, 1),
field::force_double_precision());
auto const& system = *model.system();
auto const& matrix = ham::get_reference<std::complex<double>>(model.hamiltonian());
constexpr auto i1 = num::get_complex_t<double>{constant::i1};
auto expected_hopping = MatrixXcd(2, 2);
expected_hopping << 2.0 + 2.0 * i1,
3.0 + 3.0 * i1,
4.0 + 4.0 * i1,
5.0 + 5.0 * i1;
REQUIRE(system.num_sites() == 2);
REQUIRE(system.hamiltonian_size() == 4);
REQUIRE(system.hamiltonian_nnz() == 16);
REQUIRE(matrix.block(0, 0, 2, 2).isApprox((-matrix.block(2, 2, 2, 2)).eval()));
REQUIRE(matrix.block(0, 2, 2, 2).isApprox(4.0 * expected_hopping));
}
}
TEST_CASE("sublattice_range") {
auto const model = Model(lattice::square_multiorbital(), shape::rectangle(1, 2));
auto const& system = *model.system();
auto const ra = system.sublattice_range("A");
REQUIRE(ra.start == 2);
REQUIRE(ra.end == 4);
auto const rb = system.sublattice_range("B");
REQUIRE(rb.start == 0);
REQUIRE(rb.end == 2);
auto const rc = system.sublattice_range("C");
REQUIRE(rc.start == 4);
REQUIRE(rc.end == 6);
auto const rd = system.sublattice_range("D");
REQUIRE(rd.start == 6);
REQUIRE(rd.end == 8);
}
TEST_CASE("expanded_positions") {
auto const model = Model(lattice::square_multiorbital());
auto const& sys = *model.system();
auto const& pos = sys.positions;
auto const& ep = sys.expanded_positions();
REQUIRE(pos.size() == 4);
REQUIRE(ep.size() == 8);
REQUIRE(ep[0] == pos[0]);
REQUIRE(ep[1] == pos[0]);
REQUIRE(ep[2] == pos[1]);
REQUIRE(ep[3] == pos[2]);
REQUIRE(ep[4] == pos[2]);
REQUIRE(ep[5] == pos[3]);
REQUIRE(ep[6] == pos[3]);
REQUIRE(ep[7] == pos[3]);
}
| C++ |
2D | dean0x7d/pybinding | cppcore/tests/test_kpm.cpp | .cpp | 14,572 | 321 | #include <catch.hpp>
#include "fixtures.hpp"
#include "KPM.hpp"
using namespace cpb;
Model make_test_model(bool is_double = false, bool is_complex = false) {
auto model = Model(graphene::monolayer(), shape::rectangle(0.6f, 0.8f),
field::constant_potential(1));
if (is_double) {
model.add(field::force_double_precision());
}
if (is_complex) {
model.add(field::constant_magnetic_field(1e4));
}
return model;
}
TEST_CASE("OptimizedHamiltonian reordering", "[kpm]") {
auto const model = make_test_model();
auto const num_sites = model.system()->num_sites();
auto bounds = kpm::Bounds(model.hamiltonian(), kpm::Config{}.lanczos_precision);
auto size_indices = [](kpm::OptimizedHamiltonian const& oh, int num_moments) {
auto v = std::vector<idx_t>(num_moments);
for (auto n = 0; n < num_moments; ++n) {
v[n] = oh.map().index(n, num_moments);
}
return v;
};
auto equals = [](std::vector<idx_t> const& v) { return Catch::Equals(v); };
SECTION("Diagonal single") {
auto oh = kpm::OptimizedHamiltonian(model.hamiltonian(), kpm::MatrixFormat::CSR, true);
auto const i = model.system()->find_nearest({0, 0.07f, 0}, "B");
oh.optimize_for({i, i}, bounds.scaling_factors());
REQUIRE(oh.idx().src[0] == 0);
REQUIRE(oh.idx().dest[0] == 0);
REQUIRE(oh.idx().is_diagonal());
REQUIRE(oh.map().get_data().front() == 1);
REQUIRE(oh.map().get_data().back() == num_sites);
REQUIRE(oh.map().get_data().size() == 5);
REQUIRE(oh.map().get_src_offset() == 0);
REQUIRE(oh.map().get_dest_offset() == 0);
REQUIRE_THAT(size_indices(oh, 6), equals({0, 1, 2, 2, 1, 0}));
REQUIRE_THAT(size_indices(oh, 9), equals({0, 1, 2, 3, 4, 3, 2, 1, 0}));
REQUIRE_THAT(size_indices(oh, 12), equals({0, 1, 2, 3, 4, 4, 4, 4, 3, 2, 1, 0}));
}
SECTION("Diagonal multi 1") {
auto oh = kpm::OptimizedHamiltonian(model.hamiltonian(), kpm::MatrixFormat::CSR, true);
auto const i1 = model.system()->find_nearest({0, -0.07f, 0}, "A");
auto const i2 = model.system()->find_nearest({0, 0.07f, 0}, "B");
REQUIRE(i1 != i2);
auto const idx = std::vector<idx_t>{i1, i2};
oh.optimize_for({idx, idx}, bounds.scaling_factors());
REQUIRE(oh.idx().src[0] == 0);
REQUIRE(oh.idx().src[1] == 3);
REQUIRE(oh.idx().dest[0] == 0);
REQUIRE(oh.idx().dest[1] == 3);
REQUIRE(oh.idx().is_diagonal());
REQUIRE(oh.map().get_data().front() == 1);
REQUIRE(oh.map().get_data().back() == num_sites);
REQUIRE(oh.map().get_data().size() == 5);
REQUIRE(oh.map().get_src_offset() == 1);
REQUIRE(oh.map().get_dest_offset() == 1);
REQUIRE_THAT(size_indices(oh, 6), equals({1, 2, 3, 3, 2, 1}));
REQUIRE_THAT(size_indices(oh, 9), equals({1, 2, 3, 4, 4, 4, 3, 2, 1}));
REQUIRE_THAT(size_indices(oh, 12), equals({1, 2, 3, 4, 4, 4, 4, 4, 4, 3, 2, 1}));
}
SECTION("Diagonal multi 2") {
auto oh = kpm::OptimizedHamiltonian(model.hamiltonian(), kpm::MatrixFormat::CSR, true);
auto const i1 = model.system()->find_nearest({0, 0.07f, 0}, "B");
auto const i2 = model.system()->find_nearest({0, -0.07f, 0}, "A");
auto const i3 = model.system()->find_nearest({0, 0.35f, 0}, "A");
auto const idx = std::vector<idx_t>{i1, i2, i3};
oh.optimize_for({idx, idx}, bounds.scaling_factors());
REQUIRE(oh.idx().src[0] == 0);
REQUIRE(oh.idx().src[1] == 1);
REQUIRE(oh.idx().src[2] == 15);
REQUIRE(oh.idx().dest[0] == 0);
REQUIRE(oh.idx().dest[1] == 1);
REQUIRE(oh.idx().dest[2] == 15);
REQUIRE(oh.idx().is_diagonal());
REQUIRE(oh.map().get_data().front() == 1);
REQUIRE(oh.map().get_data().back() == num_sites);
REQUIRE(oh.map().get_data().size() == 5);
REQUIRE(oh.map().get_src_offset() == 3);
REQUIRE(oh.map().get_dest_offset() == 3);
REQUIRE_THAT(size_indices(oh, 6), equals({3, 4, 4, 4, 4, 3}));
REQUIRE_THAT(size_indices(oh, 9), equals({3, 4, 4, 4, 4, 4, 4, 4, 3}));
REQUIRE_THAT(size_indices(oh, 12), equals({3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3}));
}
SECTION("Off-diagonal single") {
auto oh = kpm::OptimizedHamiltonian(model.hamiltonian(), kpm::MatrixFormat::CSR, true);
auto const i = model.system()->find_nearest({0, 0.35f, 0}, "A");
auto const j = model.system()->find_nearest({0, 0.07f, 0}, "B");
oh.optimize_for({i, j}, bounds.scaling_factors());
REQUIRE(oh.idx().src[0] == 0);
REQUIRE(oh.idx().dest[0] == 8);
REQUIRE(oh.idx().is_diagonal() == false);
REQUIRE(oh.map().get_data().front() == 1);
REQUIRE(oh.map().get_data().back() == num_sites);
REQUIRE(oh.map().get_data().size() == 8);
REQUIRE(oh.map().get_src_offset() == 0);
REQUIRE(oh.map().get_dest_offset() == 3);
REQUIRE_THAT(size_indices(oh, 6), equals({0, 1, 2, 3, 4, 3}));
REQUIRE_THAT(size_indices(oh, 9), equals({0, 1, 2, 3, 4, 5, 5, 4, 3}));
REQUIRE_THAT(size_indices(oh, 12), equals({0, 1, 2, 3, 4, 5, 6, 7, 6, 5, 4, 3}));
REQUIRE_THAT(size_indices(oh, 14), equals({0, 1, 2, 3, 4, 5, 6, 7, 7, 7, 6, 5, 4, 3}));
}
SECTION("Off-diagonal multi 1") {
auto oh = kpm::OptimizedHamiltonian(model.hamiltonian(), kpm::MatrixFormat::CSR, true);
auto const i = model.system()->find_nearest({0, 0.35f, 0}, "A");
auto const j1 = model.system()->find_nearest({0, 0.07f, 0}, "B");
auto const j2 = model.system()->find_nearest({0.12f, 0.14f, 0}, "A");
auto const j3 = model.system()->find_nearest({0.12f, 0.28f, 0}, "B");
oh.optimize_for({i, std::vector<idx_t>{j1, j2, j3}}, bounds.scaling_factors());
REQUIRE(oh.idx().src[0] == 0);
REQUIRE(oh.idx().dest[0] == 8);
REQUIRE(oh.idx().dest[1] == 5);
REQUIRE(oh.idx().dest[2] == 2);
REQUIRE(oh.map().get_data().front() == 1);
REQUIRE(oh.map().get_data().back() == num_sites);
REQUIRE(oh.map().get_data().size() == 8);
REQUIRE(oh.map().get_src_offset() == 0);
REQUIRE(oh.map().get_dest_offset() == 3);
REQUIRE_THAT(size_indices(oh, 6), equals({0, 1, 2, 3, 4, 3}));
REQUIRE_THAT(size_indices(oh, 9), equals({0, 1, 2, 3, 4, 5, 5, 4, 3}));
REQUIRE_THAT(size_indices(oh, 12), equals({0, 1, 2, 3, 4, 5, 6, 7, 6, 5, 4, 3}));
}
SECTION("Off-diagonal multi 2") {
auto oh = kpm::OptimizedHamiltonian(model.hamiltonian(), kpm::MatrixFormat::CSR, true);
auto const i1 = model.system()->find_nearest({0, 0.35f, 0}, "A");
auto const i2 = model.system()->find_nearest({0, -0.35f, 0}, "B");
auto const idx1 = std::vector<idx_t>{i1, i2};
auto const j1 = model.system()->find_nearest({ 0.12f, 0.28f, 0}, "B");
auto const j2 = model.system()->find_nearest({-0.12f, 0.28f, 0}, "B");
auto const idx2 = std::vector<idx_t>{j1, j2};
oh.optimize_for({idx1, idx2}, bounds.scaling_factors());
REQUIRE(oh.idx().src[0] == 0);
REQUIRE(oh.idx().src[1] == 18);
REQUIRE(oh.idx().dest[0] == 2);
REQUIRE(oh.idx().dest[1] == 1);
REQUIRE(oh.idx().is_diagonal() == false);
REQUIRE(oh.map().get_data().front() == 1);
REQUIRE(oh.map().get_data().back() == num_sites);
REQUIRE(oh.map().get_data().size() == 8);
REQUIRE(oh.map().get_src_offset() == 7);
REQUIRE(oh.map().get_dest_offset() == 1);
REQUIRE_THAT(size_indices(oh, 6), equals({6, 5, 4, 3, 2, 1}));
REQUIRE_THAT(size_indices(oh, 9), equals({7, 7, 7, 6, 5, 4, 3, 2, 1}));
REQUIRE_THAT(size_indices(oh, 12), equals({7, 7, 7, 7, 7, 7, 6, 5, 4, 3, 2, 1}));
}
}
TEST_CASE("OptimizedHamiltonian scaling") {
auto model = Model(graphene::monolayer(), shape::rectangle(0.6f, 0.8f));
auto oh = kpm::OptimizedHamiltonian(model.hamiltonian(), kpm::MatrixFormat::CSR, true);
auto bounds = kpm::Bounds(-12, 10); // ensures `scale.b != 0`
oh.optimize_for({0, 0}, bounds.scaling_factors());
auto scaled = oh.matrix().get<SparseMatrixX<float>>();
// Because `scale.b != 0` the scaled Hamiltonian matrix should get
// a non-zero diagonal even if the original matrix didn't have one.
REQUIRE(scaled.nonZeros() == model.hamiltonian().non_zeros() + model.hamiltonian().rows());
}
struct TestGreensResult {
ArrayXcd g_ii, g_ij;
TestGreensResult() = default;
};
struct TestDosResult {
ArrayXd dos1, dos2, dos3, dos20;
TestDosResult() = default;
};
std::vector<TestGreensResult> test_kpm_core(kpm::Compute const& compute,
std::vector<kpm::Config> const& configs) {
constexpr auto pi = double{constant::pi};
auto results = std::vector<TestGreensResult>();
for (auto is_double_precision : {false, true}) {
for (auto is_complex : {false, true}) {
INFO("double: " << is_double_precision << ", complex: " << is_complex);
auto const model = make_test_model(is_double_precision, is_complex);
auto const num_sites = model.system()->num_sites();
auto const i = num_sites / 2;
auto const j = num_sites / 4;
auto const energy_range = ArrayXd::LinSpaced(10, -0.3, 0.3);
auto const broadening = 0.8;
auto const cols = std::vector<idx_t>{i, j, j+1, j+2};
auto const precision = Eigen::NumTraits<float>::dummy_precision();
auto unoptimized_greens = TestGreensResult();
auto unoptimized_dos = TestDosResult();
for (auto opt_level = size_t{0}; opt_level < configs.size(); ++opt_level) {
INFO("opt_level: " << opt_level);
auto core = kpm::Core(model.hamiltonian(), compute, configs[opt_level]);
auto const gs = core.greens_vector(i, cols, energy_range, broadening);
REQUIRE(gs.size() == cols.size());
REQUIRE_FALSE(gs[0].isApprox(gs[1], precision));
REQUIRE_FALSE(gs[1].isApprox(gs[2], precision));
core.set_hamiltonian(model.hamiltonian());
auto const g_ii = core.greens(i, i, energy_range, broadening);
REQUIRE(g_ii.isApprox(gs[0], precision));
auto const g_ij = core.greens(i, j, energy_range, broadening);
REQUIRE(g_ij.isApprox(gs[1], precision));
if (!is_complex) {
auto const g_ji = core.greens(j, i, energy_range, broadening);
REQUIRE(g_ij.isApprox(g_ji, precision));
}
auto const ldos0 = core.ldos({i}, energy_range, broadening);
auto const ldos1 = core.ldos({j}, energy_range, broadening);
REQUIRE(ldos0.isApprox(-1/pi * g_ii.imag(), precision));
REQUIRE_FALSE(ldos0.isApprox(ldos1, precision));
auto const ldos2 = core.ldos({i, j, i, j, i, j, i, j, i, j},
energy_range, broadening);
REQUIRE(ldos2.cols() == 10);
for (auto n = 0; n < 10; n += 2) {
REQUIRE(ldos0.isApprox(ldos2.col(n + 0), precision));
REQUIRE(ldos1.isApprox(ldos2.col(n + 1), precision));
}
if (opt_level == 0) {
unoptimized_greens = {g_ii, g_ij};
} else {
REQUIRE(g_ii.isApprox(unoptimized_greens.g_ii, precision));
REQUIRE(g_ij.isApprox(unoptimized_greens.g_ij, precision));
}
auto const dos1 = core.dos(energy_range, broadening, 1);
auto const dos2 = core.dos(energy_range, broadening, 2);
auto const dos3 = core.dos(energy_range, broadening, 3);
auto const dos20 = core.dos(energy_range, broadening, 20);
REQUIRE_FALSE(dos1.isApprox(dos2, precision));
REQUIRE_FALSE(dos2.isApprox(dos3, precision));
REQUIRE_FALSE(dos3.isApprox(dos20, precision));
REQUIRE_FALSE(dos20.isApprox(dos1, precision));
if (opt_level == 0) {
unoptimized_dos = {dos1, dos2, dos3, dos20};
} else {
REQUIRE(dos1.isApprox(unoptimized_dos.dos1, precision));
REQUIRE(dos2.isApprox(unoptimized_dos.dos2, precision));
REQUIRE(dos3.isApprox(unoptimized_dos.dos3, precision));
REQUIRE(dos20.isApprox(unoptimized_dos.dos20, precision));
}
} // for opt_level
results.push_back(unoptimized_greens);
} // for is_complex
} // for is_double_precision
return results;
}
TEST_CASE("KPM core", "[kpm]") {
auto make_config = [](kpm::MatrixFormat matrix_format, bool optimal_size, bool interleaved) {
auto config = kpm::Config{};
config.matrix_format = matrix_format;
config.algorithm.optimal_size = optimal_size;
config.algorithm.interleaved = interleaved;
return config;
};
#ifndef CPB_USE_CUDA
test_kpm_core(kpm::DefaultCompute(), {
make_config(kpm::MatrixFormat::CSR, false, false),
make_config(kpm::MatrixFormat::CSR, true, false),
make_config(kpm::MatrixFormat::CSR, false, true),
make_config(kpm::MatrixFormat::CSR, true, true),
make_config(kpm::MatrixFormat::ELL, false, false),
make_config(kpm::MatrixFormat::ELL, true, false),
make_config(kpm::MatrixFormat::ELL, false, true),
make_config(kpm::MatrixFormat::ELL, true, true),
});
#else
auto const cpu_results = test_kpm_strategy<kpm::DefaultStrategy>({
make_config(kpm::MatrixFormat::ELL, true, true)
});
auto const cuda_results = test_kpm_strategy<kpm::CudaStrategy>({
make_config(kpm::MatrixFormat::ELL, false, false),
make_config(kpm::MatrixFormat::ELL, true, false)
});
auto const precision = Eigen::NumTraits<float>::dummy_precision();
for (auto i = 0u; i < cuda_results.size(); ++i) {
REQUIRE(cuda_results[i].g_ii.isApprox(cpu_results[0].g_ii, precision));
REQUIRE(cuda_results[i].g_ij.isApprox(cpu_results[0].g_ij, precision));
}
#endif // CPB_USE_CUDA
}
| C++ |
2D | dean0x7d/pybinding | cppcore/tests/test_numeric.cpp | .cpp | 2,850 | 86 | #include <catch.hpp>
#include "numeric/dense.hpp"
using namespace cpb;
struct ArrayRefTestOp {
template<class Vector>
num::Tag operator()(Vector v) const {
v[3] = 0;
return num::detail::get_tag<typename Vector::Scalar>();
}
};
TEST_CASE("ArrayRef and match", "[arrayref]") {
Eigen::Vector4d v1(0, 1, 2, 3);
auto ref1 = RealArrayRef{arrayref(v1)};
REQUIRE(num::match<VectorX>(ref1, ArrayRefTestOp{}) == num::Tag::f64);
REQUIRE(v1[3] == .0);
Eigen::VectorXcf v2(4); v2 << 0, 1, 2, 3;
REQUIRE_THROWS_WITH(RealArrayRef{arrayref(v2)}, "Invalid VariantArrayRef assignment");
auto ref2 = ComplexArrayRef{arrayref(v2)};
REQUIRE(num::match<VectorX>(ref2, ArrayRefTestOp{}) == num::Tag::cf32);
REQUIRE(v2[3] == .0f);
}
struct ArrayRefTestOp2 {
template<class Vector1, class Vector2>
num::Tag operator()(Vector1 v1, Vector2 v2) const {
using Scalar2 = typename Vector2::Scalar;
v2[3] =v1.template cast<Scalar2>()[0];
return num::detail::get_tag<Scalar2>();
}
};
TEST_CASE("ArrayRef and match2", "[arrayref]") {
Eigen::Vector4f v1(0, 1, 2, 3);
auto ref1 = RealArrayConstRef{arrayref(v1)};
Eigen::Vector4cd v2(0, 1, 2, 3);
auto ref2 = ComplexArrayRef{arrayref(v2)};
REQUIRE((num::match2<VectorX, VectorX>(ref1, ref2, ArrayRefTestOp2{})) == num::Tag::cf64);
REQUIRE(v2[3] == .0);
REQUIRE_THROWS_WITH((num::match2sp<VectorX, VectorX>(ref1, ref2, ArrayRefTestOp2{})),
"A match was not found");
}
TEST_CASE("Aligned size") {
REQUIRE((num::aligned_size<float, 16>(4) == 4));
REQUIRE((num::aligned_size<std::complex<double>, 16>(2) == 2));
REQUIRE((num::aligned_size<std::complex<float>, 32>(9) == 12));
}
TEST_CASE("concat") {
auto const x1 = ArrayXf::Constant(3, 1).eval();
auto const x2 = ArrayXf::LinSpaced(3, 2, 4).eval();
auto expected_x = ArrayXf(6);
expected_x << 1, 1, 1, 2, 3, 4;
auto const result_x = concat(x1, x2);
REQUIRE(result_x.isApprox(expected_x));
auto const y1 = ArrayXf::Constant(3, 2).eval();
auto const y2 = ArrayXf::LinSpaced(3, 3, 5).eval();
auto expected_y = ArrayXf(6);
expected_y << 2, 2, 2, 3, 4, 5;
auto const result_y = concat(y1, y2);
REQUIRE(result_y.isApprox(expected_y));
auto const z1 = ArrayXf::Constant(3, 0).eval();
auto const z2 = ArrayXf::Constant(3, -1).eval();
auto expected_z = ArrayXf(6);
expected_z << 0, 0, 0, -1, -1, -1;
auto const result_z = concat(z1, z2);
REQUIRE(result_z.isApprox(expected_z));
auto const r1 = CartesianArray(x1, y1, z1);
auto const r2 = CartesianArray(x2, y2, z2);
auto const result = concat(r1, r2);
REQUIRE(result.x.isApprox(expected_x));
REQUIRE(result.y.isApprox(expected_y));
REQUIRE(result.z.isApprox(expected_z));
}
| C++ |
2D | dean0x7d/pybinding | cppcore/tests/test_detail.cpp | .cpp | 2,085 | 59 | #include <catch.hpp>
#include <complex>
#include "Model.hpp"
#include "detail/algorithm.hpp"
using namespace cpb;
namespace static_test_typelist {
using List = TypeList<float, double, std::complex<float>, std::complex<double>>;
static_assert(tl::AnyOf<List, float>::value, "");
static_assert(!tl::AnyOf<List, int>::value, "");
}
TEST_CASE("Symmetry masks") {
SECTION("1") {
auto const masks = detail::make_masks({true, false, false}, 1);
REQUIRE_THAT(masks, Catch::Equals(std::vector<Index3D>{{0, 0, 0}, {1, 0, 0}}));
}
SECTION("2-1") {
auto const masks = detail::make_masks({false, true, false}, 2);
REQUIRE_THAT(masks, Catch::Equals(std::vector<Index3D>{{0, 0, 0}, {0, 1, 0}}));
}
SECTION("2-2") {
auto const masks = detail::make_masks({true, true, false}, 2);
REQUIRE_THAT(masks, Catch::Equals(std::vector<Index3D>{
{0, 0, 0}, {0, 1, 0}, {1, 0, 0}, {1, 1, 0}
}));
}
SECTION("3") {
auto const masks = detail::make_masks({true, true, true}, 3);
REQUIRE_THAT(masks, Catch::Equals(std::vector<Index3D>{
{0, 0, 0}, {0, 0, 1}, {0, 1, 0}, {0, 1, 1},
{1, 0, 0}, {1, 0, 1}, {1, 1, 0}, {1, 1, 1}
}));
}
}
TEST_CASE("sliced") {
auto const v = []{
auto result = std::vector<int>(10);
std::iota(result.begin(), result.end(), 0);
return result;
}();
REQUIRE_THAT(v, Catch::Equals(std::vector<int>{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}));
auto vectors = std::vector<std::vector<int>>();
for (auto const& slice : sliced(v, 3)) {
auto tmp = std::vector<int>();
std::copy(slice.begin(), slice.end(), std::back_inserter(tmp));
vectors.push_back(tmp);
}
REQUIRE(vectors.size() == 4);
REQUIRE_THAT(vectors[0], Catch::Equals(std::vector<int>{0, 1, 2}));
REQUIRE_THAT(vectors[1], Catch::Equals(std::vector<int>{3, 4, 5}));
REQUIRE_THAT(vectors[2], Catch::Equals(std::vector<int>{6, 7, 8}));
REQUIRE_THAT(vectors[3], Catch::Equals(std::vector<int>{9}));
}
| C++ |
2D | dean0x7d/pybinding | cppcore/tests/test_lattice.cpp | .cpp | 10,139 | 229 | #include <catch.hpp>
#include "Model.hpp"
using namespace cpb;
TEST_CASE("Lattice") {
auto lattice = Lattice({1, 0, 0}, {0, 1, 0});
REQUIRE(lattice.ndim() == 2);
REQUIRE(lattice.get_vectors().capacity() == 2);
REQUIRE(lattice.max_hoppings() == 0);
SECTION("Add sublattices") {
REQUIRE_THROWS_WITH(lattice.add_sublattice("", {0, 0, 0}),
"Sublattice name can't be blank");
lattice.add_sublattice("A", {0, 0, 0});
REQUIRE_FALSE(lattice.has_diagonal_terms());
REQUIRE_FALSE(lattice.has_onsite_energy());
REQUIRE_THROWS_WITH(lattice.add_sublattice("A", {0, 0, 0}),
"Sublattice 'A' already exists");
lattice.add_sublattice("B", {0, 0, 0}, 1.0);
REQUIRE(lattice.has_diagonal_terms());
REQUIRE(lattice.has_onsite_energy());
lattice.add_alias("B2", "B", {1, 0, 0});
REQUIRE_THROWS_WITH(lattice.add_alias("B3", "bad_name", {2, 0, 0}),
"There is no sublattice named 'bad_name'");
REQUIRE_THROWS_WITH(lattice.add_alias("B4", "B4", {2, 0, 0}),
"There is no sublattice named 'B4'");
}
SECTION("Add multi-orbital sublattice") {
REQUIRE_FALSE(Model(lattice).is_multiorbital());
lattice.add_sublattice("A", {0, 0, 0}, VectorXd::Constant(2, 0.0).eval());
REQUIRE(Model(lattice).is_multiorbital());
REQUIRE_FALSE(lattice.has_diagonal_terms());
REQUIRE_FALSE(lattice.has_onsite_energy());
auto const zero_dim = MatrixXcd::Zero(0, 0).eval();
REQUIRE_THROWS_WITH(lattice.add_sublattice("zero_dim", {0, 0, 0}, zero_dim),
"The onsite hopping term can't be zero-dimensional");
auto const not_square = MatrixXcd::Zero(2, 3).eval();
REQUIRE_THROWS_WITH(lattice.add_sublattice("not_square", {0, 0, 0}, not_square),
"The onsite hopping term must be a real vector or a square matrix");
auto complex_diagonal = MatrixXcd::Zero(2, 2).eval();
complex_diagonal(0, 0) = std::complex<double>{0.0, 1.0};
REQUIRE_THROWS_WITH(lattice.add_sublattice("complex_diag", {0, 0, 0}, complex_diagonal),
"The main diagonal of the onsite hopping term must be real");
auto not_hermitian = MatrixXcd::Zero(2, 2).eval();
not_hermitian(0, 1) = std::complex<double>{0.0, 1.0};
not_hermitian(1, 0) = std::complex<double>{0.0, 1.0};
REQUIRE_THROWS_WITH(lattice.add_sublattice("not_hermitian", {0, 0, 0}, not_hermitian),
"The onsite hopping matrix must be upper triangular or Hermitian");
auto not_symmetric = MatrixXcd::Zero(2, 2).eval();
not_symmetric(0, 1) = std::complex<double>{1.0, 0.0};
not_symmetric(1, 0) = std::complex<double>{2.0, 0.0};
REQUIRE_THROWS_WITH(lattice.add_sublattice("not_symmetric", {0, 0, 0}, not_symmetric),
"The onsite hopping matrix must be upper triangular or Hermitian");
auto upper_triangular = MatrixXcd::Zero(2, 2).eval();
upper_triangular(0, 1) = std::complex<double>{0.0, 1.0};
REQUIRE_NOTHROW(lattice.add_sublattice("upper_triangular", {0, 0, 0}, upper_triangular));
REQUIRE_FALSE(lattice.has_diagonal_terms());
REQUIRE(lattice.has_onsite_energy());
REQUIRE_NOTHROW(lattice.add_sublattice("diagonal", {0, 0, 0}, VectorXd::Ones(3).eval()));
REQUIRE(lattice.has_diagonal_terms());
REQUIRE(lattice.has_onsite_energy());
}
SECTION("Register hoppings") {
REQUIRE_THROWS_WITH(lattice.register_hopping_energy("", 0.0),
"Hopping name can't be blank");
lattice.register_hopping_energy("t1", 1.0);
REQUIRE_FALSE(Model(lattice).is_complex());
REQUIRE_THROWS_WITH(lattice.register_hopping_energy("t1", 1.0),
"Hopping 't1' already exists");
lattice.register_hopping_energy("t2", std::complex<double>{0, 1.0});
REQUIRE(Model(lattice).is_complex());
}
SECTION("Add scalar hoppings") {
lattice.add_sublattice("A", {0, 0, 0});
lattice.add_sublattice("B", {0, 0, 0});
lattice.register_hopping_energy("t1", 1.0);
REQUIRE_THROWS_WITH(lattice.add_hopping({0, 0, 0}, "A", "A", "t1"),
Catch::Contains("Don't define onsite energy here"));
REQUIRE_THROWS_WITH(lattice.add_hopping({0, 0, 0}, "bad_name", "A", "t1"),
"There is no sublattice named 'bad_name'");
REQUIRE_THROWS_WITH(lattice.add_hopping({0, 0, 0}, "A", "B", "bad_name"),
"There is no hopping named 'bad_name'");
lattice.add_hopping({1, 0, 0}, "A", "A", "t1");
REQUIRE_THROWS_WITH(lattice.add_hopping({1, 0, 0}, "A", "A", "t1"),
"The specified hopping already exists.");
REQUIRE(lattice.max_hoppings() == 2);
lattice.add_hopping({1, 0, 0}, "A", "B", "t1");
REQUIRE(lattice.max_hoppings() == 3);
lattice.add_hopping({1, 0, 0}, "B", "B", "t1");
REQUIRE(lattice.max_hoppings() == 3);
lattice.add_hopping({1, 1, 0}, "A", "A", 2.0);
REQUIRE(lattice.get_hoppings().size() == 2);
lattice.add_hopping({1, 1, 0}, "A", "B", 2.0);
REQUIRE(lattice.get_hoppings().size() == 2);
}
SECTION("Add matrix hoppings") {
lattice.add_sublattice("A", {0, 0, 0}, VectorXd::Constant(2, 0.0).eval());
lattice.add_sublattice("B", {0, 0, 0}, VectorXd::Constant(2, 0.0).eval());
lattice.add_sublattice("C", {0, 0, 0}, VectorXd::Constant(3, 0.0).eval());
lattice.register_hopping_energy("t22", MatrixXcd::Constant(2, 2, 1.0));
lattice.register_hopping_energy("t23", MatrixXcd::Constant(2, 3, 1.0));
lattice.register_hopping_energy("t32", MatrixXcd::Constant(3, 2, 1.0));
REQUIRE(lattice.max_hoppings() == 2);
lattice.add_hopping({0, 0, 0}, "A", "B", "t22");
REQUIRE(lattice.max_hoppings() == 3);
lattice.add_hopping({1, 0, 0}, "A", "A", "t22");
REQUIRE(lattice.max_hoppings() == 7);
lattice.add_hopping({0, 0, 0}, "A", "C", "t23");
REQUIRE(lattice.max_hoppings() == 10);
lattice.add_hopping({1, 0, 0}, "C", "A", "t32");
REQUIRE(lattice.max_hoppings() == 13);
REQUIRE_THROWS_WITH(lattice.add_hopping({0, 0, 0}, "A", "A", "t22"),
Catch::Contains("Don't define onsite energy here."));
REQUIRE_THROWS_WITH(lattice.add_hopping({0, 0, 0}, "B", "C", "t22"),
"Hopping size mismatch: from 'B' (2) to 'C' (3) "
"with matrix 't22' (2, 2)");
REQUIRE_THROWS_WITH(lattice.add_hopping({0, 0, 0}, "C", "B", "t23"),
"Hopping size mismatch: from 'C' (3) to 'B' (2) "
"with matrix 't23' (2, 3)");
REQUIRE_THROWS_WITH(lattice.register_hopping_energy("zero_dim", MatrixXcd::Zero(0, 0)),
"Hoppings can't be zero-dimensional");
}
SECTION("Calculate position") {
lattice.add_sublattice("A", {0, 0, 0.5});
REQUIRE(lattice.calc_position({1, 2, 0}, "A").isApprox(Cartesian(1, 2, 0.5)));
}
SECTION("Set offset") {
REQUIRE_NOTHROW(lattice.set_offset({0.5f, 0.5f, 0}));
REQUIRE_THROWS_WITH(lattice.set_offset({0.6f, 0, 0}),
Catch::Contains("must not be moved by more than half"));
REQUIRE_THROWS_WITH(lattice.set_offset({0, -0.6f, 0}),
Catch::Contains("must not be moved by more than half"));
auto const copy = lattice.with_offset({0.5f, 0, 0});
REQUIRE(copy.calc_position({1, 2, 0}).isApprox(Cartesian(1.5f, 2, 0)));
}
SECTION("Min neighbors") {
auto const copy = lattice.with_min_neighbors(3);
REQUIRE(copy.get_min_neighbors() == 3);
}
}
TEST_CASE("Lattice translate coordinates") {
auto const lattice = Lattice({1, 0, 0}, {1, 1, 0});
REQUIRE(lattice.translate_coordinates({1, 0, 0}).isApprox(Vector3f(1, 0, 0)));
REQUIRE(lattice.translate_coordinates({1.5, 0.5, 0}).isApprox(Vector3f(1, 0.5, 0)));
REQUIRE(lattice.translate_coordinates({0, 0, 1}).isApprox(Vector3f(0, 0, 0)));
}
TEST_CASE("Optimized unit cell") {
auto lattice = Lattice({1, 0, 0});
auto add_sublattice = [&lattice](string_view name, int norb) {
lattice.add_sublattice(name, Cartesian{0, 0, 0}, VectorXd::Constant(norb, 0.0).eval());
};
auto add_alias = [&lattice](string_view name, string_view original) {
lattice.add_alias(name, original, Cartesian{0, 0, 0});
};
auto alias_ids = [&lattice]() {
auto const unit_cell = lattice.optimized_unit_cell();
auto v = std::vector<storage_idx_t>(lattice.nsub());
std::transform(unit_cell.begin(), unit_cell.end(), v.begin(),
[](OptimizedUnitCell::Site const& site) { return site.alias_id.value(); });
return v;
};
auto equals = [](std::vector<storage_idx_t> const& v) { return Catch::Equals(v); };
add_sublattice("0", 1);
add_sublattice("1", 1);
add_alias("2", "0");
REQUIRE_THAT(alias_ids(), equals({0, 0, 1}));
add_sublattice("3", 2);
REQUIRE_THAT(alias_ids(), equals({0, 0, 1, 3}));
add_sublattice("4", 1);
REQUIRE_THAT(alias_ids(), equals({0, 0, 1, 4, 3}));
add_sublattice("5", 3);
add_alias("6", "3");
REQUIRE_THAT(alias_ids(), equals({0, 0, 1, 4, 3, 3, 5}));
add_sublattice("7", 2);
add_sublattice("8", 2);
add_sublattice("9", 3);
add_sublattice("10", 5);
add_sublattice("11", 4);
add_sublattice("12", 1);
REQUIRE_THAT(alias_ids(), equals({0, 0, 1, 4, 12, 3, 3, 7, 8, 5, 9, 11, 10}));
add_alias("13", "1");
add_alias("14", "3");
add_alias("15", "7");
add_alias("16", "10");
REQUIRE_THAT(alias_ids(), equals({0, 0, 1, 1, 4, 12, 3, 3, 3, 7, 7, 8, 5, 9, 11, 10, 10}));
}
| C++ |
2D | dean0x7d/pybinding | cppcore/tests/test_modifiers.cpp | .cpp | 10,490 | 295 | #include <catch.hpp>
#include "fixtures.hpp"
using namespace cpb;
TEST_CASE("SiteStateModifier") {
auto model = Model(lattice::square_2atom(), Primitive(2));
REQUIRE(model.system()->num_sites() == 4);
auto count = std::unordered_map<std::string, idx_t>();
auto remove_site = [&](Eigen::Ref<ArrayX<bool>> state, CartesianArrayConstRef, string_view s) {
count[s] = state.size();
if (s == "A" && state.size() != 0) {
state[0] = false;
}
};
SECTION("Apply to foundation") {
model.add(SiteStateModifier(remove_site));
REQUIRE(model.system()->num_sites() == 3);
REQUIRE(count["A"] == 2);
REQUIRE(count["B"] == 2);
model.add(SiteStateModifier(remove_site, 1));
REQUIRE(model.system()->num_sites() == 2);
REQUIRE(count["A"] == 2);
REQUIRE(count["B"] == 2);
model.add(SiteStateModifier(remove_site, 2));
REQUIRE_THROWS(model.system());
REQUIRE(count["A"] == 2);
REQUIRE(count["B"] == 2);
}
SECTION("Apply to system") {
model.add(SiteStateModifier(remove_site));
model.add(generator::do_nothing_hopping());
model.add(SiteStateModifier(remove_site));
REQUIRE(model.system()->num_sites() == 2);
REQUIRE(count["A"] == 1);
REQUIRE(count["B"] == 2);
model.add(generator::do_nothing_hopping("_t2"));
model.add(SiteStateModifier(remove_site));
REQUIRE(model.system()->num_sites() == 2);
REQUIRE(count["A"] == 0);
REQUIRE(count["B"] == 2);
model.add(SiteStateModifier(remove_site, 1));
REQUIRE_THROWS_WITH(model.system(), Catch::Contains("has not been implemented yet"));
REQUIRE(count["A"] == 0);
REQUIRE(count["B"] == 2);
}
}
TEST_CASE("SitePositionModifier") {
auto model = Model(lattice::square_2atom(), shape::rectangle(2, 2));
REQUIRE(model.system()->num_sites() == 6);
REQUIRE(model.system()->positions.y[1] == Approx(-1));
auto count = std::unordered_map<std::string, idx_t>();
constexpr auto moved_pos = 10.0f;
auto move_site = PositionModifier([&](CartesianArrayRef position, string_view sublattice) {
count[sublattice] = position.size();
if (sublattice == "B") {
position.y().setConstant(moved_pos);
}
});
SECTION("Apply to foundation") {
model.add(move_site);
model.eval();
REQUIRE(count["A"] == 25);
REQUIRE(count["B"] == 25);
REQUIRE(model.system()->num_sites() == 6);
REQUIRE(model.system()->positions.y.segment(4, 2).isApproxToConstant(moved_pos));
}
SECTION("Apply to system") {
model.add(generator::do_nothing_hopping());
model.add(move_site);
model.eval();
REQUIRE(count["A"] == 4);
REQUIRE(count["B"] == 2);
REQUIRE(model.system()->num_sites() == 6);
REQUIRE(model.system()->positions.y.segment(4, 2).isApproxToConstant(moved_pos));
}
}
TEST_CASE("State and position modifier ordering") {
auto model = Model(lattice::square_2atom(), Primitive(2));
auto delete_site = SiteStateModifier([](Eigen::Ref<ArrayX<bool>> state,
CartesianArrayConstRef position,
string_view sublattice) {
if (sublattice == "A" && position.x()[0] < 0) {
state[0] = false;
}
});
auto move_site = PositionModifier([](CartesianArrayRef position, string_view sublattice) {
if (sublattice == "A") {
position.x()[0] = 10;
}
});
SECTION("State before position") {
REQUIRE(model.system()->num_sites() == 4);
REQUIRE(model.system()->positions.x[0] == Approx(-1));
REQUIRE(model.system()->positions.x[1] == Approx(0));
model.add(delete_site);
model.add(move_site);
REQUIRE(model.system()->num_sites() == 3);
REQUIRE(model.system()->positions.x[0] == Approx(0));
}
SECTION("Position before state") {
REQUIRE(model.system()->num_sites() == 4);
REQUIRE(model.system()->positions.x[0] == Approx(-1));
REQUIRE(model.system()->positions.x[1] == Approx(0));
model.add(move_site);
model.add(delete_site);
REQUIRE(model.system()->num_sites() == 4);
REQUIRE(model.system()->positions.x[0] == Approx(10));
REQUIRE(model.system()->positions.x[1] == Approx(0));
}
}
struct OnsiteEnergyOp {
template<class Array>
void operator()(Array energy) {
energy.setConstant(1);
}
};
TEST_CASE("OnsiteEnergyModifier") {
auto model = Model(lattice::square_2atom());
auto const& h_init = model.hamiltonian();
REQUIRE(h_init.rows() == 2);
REQUIRE(h_init.non_zeros() == 2);
model.add(OnsiteModifier([](ComplexArrayRef energy, CartesianArrayConstRef, string_view) {
num::match<ArrayX>(energy, OnsiteEnergyOp{});
}));
auto const& h = model.hamiltonian();
REQUIRE(h.rows() == 2);
REQUIRE(h.non_zeros() == 4);
}
struct HoppingEnergyOp {
template<class Array>
void operator()(Array energy) {
energy.setZero();
}
};
TEST_CASE("HoppingEnergyModifier") {
auto model = Model(lattice::square_2atom());
auto const& h_init = model.hamiltonian();
REQUIRE(h_init.rows() == 2);
REQUIRE(h_init.non_zeros() == 2);
model.add(HoppingModifier([](ComplexArrayRef energy, CartesianArrayConstRef,
CartesianArrayConstRef, string_view) {
num::match<ArrayX>(energy, HoppingEnergyOp{});
}));
auto const& h = model.hamiltonian();
REQUIRE(h.rows() == 2);
REQUIRE(h.non_zeros() == 0);
}
TEST_CASE("SiteGenerator") {
auto model = Model([]{
auto lattice = Lattice({1, 0, 0}, {0, 1, 0});
lattice.add_sublattice("A", {0, 0, 0});
lattice.add_sublattice("B", {0, 0, 0});
lattice.register_hopping_energy("t1", 1.0);
return lattice;
}());
REQUIRE_FALSE(model.is_complex());
REQUIRE(model.get_lattice().get_hoppings().size() == 1);
REQUIRE(model.system()->hopping_blocks.nnz() == 0);
SECTION("Errors") {
auto const noop = [](System const&) { return CartesianArray(); };
auto const complex_vector = MatrixXcd::Constant(1, 2, 2.0);
REQUIRE_THROWS_WITH(model.add(SiteGenerator("C", complex_vector, noop)),
Catch::Contains("must be a real vector or a square matrix"));
auto const complex_matrix = MatrixXcd::Constant(2, 2, {1.0, 1.0});
REQUIRE_THROWS_WITH(model.add(SiteGenerator("C", complex_matrix, noop)),
Catch::Contains("diagonal of the onsite hopping term must be real"));
}
SECTION("Structure") {
auto const energy = MatrixXcd::Constant(1, 1, 2.0);
model.add(SiteGenerator("C", energy, [](System const&) {
auto const size = 5;
auto x = ArrayXf::Constant(size, 1);
auto y = ArrayXf::LinSpaced(size, 1, 5);
auto z = ArrayXf::Constant(size, 0);
return CartesianArray(x, y, z);
}));
REQUIRE_FALSE(model.is_complex());
REQUIRE(model.get_lattice().get_sublattices().size() == 2);
REQUIRE(model.get_site_registry().size() == 3);
REQUIRE(model.system()->compressed_sublattices.alias_ids().size() == 3);
REQUIRE(model.system()->num_sites() == 7);
REQUIRE(model.system()->positions[0].isApprox(Cartesian{0, 0, 0}));
REQUIRE(model.system()->positions[1].isApprox(Cartesian{0, 0, 0}));
REQUIRE(model.system()->positions[2].isApprox(Cartesian{1, 1, 0}));
REQUIRE(model.system()->positions[3].isApprox(Cartesian{1, 2, 0}));
REQUIRE(model.system()->positions[4].isApprox(Cartesian{1, 3, 0}));
REQUIRE(model.system()->positions[5].isApprox(Cartesian{1, 4, 0}));
REQUIRE(model.system()->positions[6].isApprox(Cartesian{1, 5, 0}));
auto const names = model.get_site_registry().name_map();
auto const it = names.find("C");
REQUIRE(it != names.end());
auto const id = it->second;
auto const& cs = model.system()->compressed_sublattices;
REQUIRE(cs.alias_ids()[2] == id);
REQUIRE(cs.orbital_counts()[2] == 1);
REQUIRE(cs.site_counts()[2] == 5);
}
}
TEST_CASE("HoppingGenerator") {
auto model = Model([]{
auto lattice = Lattice({1, 0, 0}, {0, 1, 0});
lattice.add_sublattice("A", {0, 0, 0});
lattice.add_sublattice("B", {0, 0, 0});
lattice.register_hopping_energy("t1", 1.0);
return lattice;
}());
REQUIRE_FALSE(model.is_complex());
REQUIRE(model.get_lattice().get_hoppings().size() == 1);
REQUIRE(model.system()->hopping_blocks.nnz() == 0);
SECTION("Add real generator") {
model.add(HoppingGenerator("t2", 2.0, [](System const&) {
auto r = HoppingGenerator::Result{ArrayXi(1), ArrayXi(1)};
r.from << 0;
r.to << 1;
return r;
}));
REQUIRE_FALSE(model.is_complex());
REQUIRE(model.get_lattice().get_hoppings().size() == 1);
REQUIRE(model.get_hopping_registry().size() == 2);
REQUIRE(model.system()->hopping_blocks.nnz() == 1);
auto const hop_names = model.get_hopping_registry().name_map();
auto const hopping_it = hop_names.find("t2");
REQUIRE(hopping_it != hop_names.end());
auto const hopping_id = hopping_it->second;
REQUIRE(model.system()->hopping_blocks.tocsr().coeff(0, 1) == hopping_id);
}
SECTION("Add complex generator") {
model.add(HoppingGenerator("t2", std::complex<double>{0.0, 1.0}, [](System const&) {
return HoppingGenerator::Result{ArrayXi(), ArrayXi()};
}));
REQUIRE(model.is_complex());
REQUIRE(model.system()->hopping_blocks.nnz() == 0);
}
SECTION("Upper triangular form should be preserved") {
model.add(HoppingGenerator("t2", 2.0, [](System const&) {
auto r = HoppingGenerator::Result{ArrayXi(2), ArrayXi(2)};
r.from << 0, 1;
r.to << 1, 0;
return r;
}));
REQUIRE(model.system()->hopping_blocks.nnz() == 1);
auto const csr = model.system()->hopping_blocks.tocsr();
REQUIRE(csr.coeff(0, 1) == 1);
REQUIRE(csr.coeff(1, 0) == 0);
}
}
| C++ |
2D | dean0x7d/pybinding | cppcore/tests/test_compute.cpp | .cpp | 5,711 | 163 | #include <catch.hpp>
#include "compute/lanczos.hpp"
#include "compute/kernel_polynomial.hpp"
#include "fixtures.hpp"
using namespace cpb;
TEST_CASE("Lanczos", "[lanczos]") {
auto const model = Model(graphene::monolayer(), Primitive(5, 5),
TranslationalSymmetry(1, 1));
auto const& matrix = ham::get_reference<std::complex<float>>(model.hamiltonian());
auto loop_counters = std::vector<int>(3);
for (auto& count : loop_counters) {
auto const bounds = compute::minmax_eigenvalues(matrix, 1e-3f);
auto const expected = abs(3 * graphene::t);
REQUIRE(bounds.max == Approx(expected));
REQUIRE(bounds.min == Approx(-expected));
count = bounds.loops;
}
auto const all_equal = std::all_of(loop_counters.begin(), loop_counters.end(),
[&](int c) { return c == loop_counters.front(); });
REQUIRE(all_equal);
}
template<class scalar_t>
SparseMatrixX<scalar_t> make_random_csr(idx_t rows, idx_t cols) {
using real_t = num::get_real_t<scalar_t>;
using complex_t = num::get_complex_t<scalar_t>;
auto generator = std::mt19937();
auto distribution = std::uniform_real_distribution<real_t>(0.0, 1.0);
auto triplets = std::vector<Eigen::Triplet<scalar_t>>();
for (auto i = storage_idx_t{0}; i < rows; ++i) {
for (auto j = storage_idx_t{0}; j < cols; ++j) {
auto value = static_cast<scalar_t>(distribution(generator));
var::variant<real_t*, complex_t*>(&value).match(
[](real_t*) { },
[](complex_t* p) { p->imag(real_t{0.5} * p->real()); }
);
if (abs(value) < 0.1) { triplets.emplace_back(i, j, value); }
}
}
auto m = SparseMatrixX<scalar_t>(rows, cols);
m.setFromTriplets(triplets.begin(), triplets.end());
m.makeCompressed();
return m.markAsRValue();
}
template<class real_t>
bool approx_equal(real_t a, real_t b) {
return a == Approx(b);
}
template<class real_t>
bool approx_equal(std::complex<real_t> a, std::complex<real_t> b) {
return a.real() == Approx(b.real()) && a.imag() == Approx(b.imag());
}
template<class scalar_t, size_t size>
bool approx_equal(std::array<scalar_t, size> const& a, std::array<scalar_t, size> const& b) {
auto ma = Eigen::Map<ArrayX<scalar_t> const>(a.data(), size);
auto mb = Eigen::Map<ArrayX<scalar_t> const>(b.data(), size);
return ma.isApprox(mb);
};
template<class scalar_t>
SparseMatrixX<scalar_t> convert_sparse(SparseMatrixX<scalar_t> const& m,
var::tag<SparseMatrixX<scalar_t>>) {
return m;
}
template<class scalar_t>
num::EllMatrix<scalar_t> convert_sparse(SparseMatrixX<scalar_t> const& m,
var::tag<num::EllMatrix<scalar_t>>) {
return num::csr_to_ell(m);
}
template<class SparseMatrix, class scalar_t = typename SparseMatrix::Scalar>
void test_kpm_spmv(idx_t size) {
constexpr auto cols = static_cast<idx_t>(simd::traits<scalar_t>::size);
auto const ref_matrix = make_random_csr<scalar_t>(size, size);
auto const matrix = convert_sparse(ref_matrix, var::tag<SparseMatrix>{});
auto const x = VectorX<scalar_t>::Random(size).eval();
auto const y = VectorX<scalar_t>::Random(size).eval();
auto const xx = MatrixX<scalar_t>::Random(size, cols).eval();
auto const yy = MatrixX<scalar_t>::Random(size, cols).eval();
auto const expected_r = (ref_matrix * x - y).eval();
auto const expected_m2 = static_cast<scalar_t>(x.squaredNorm());
auto const expected_m3 = static_cast<scalar_t>(expected_r.dot(x));
auto const expected_rr = (ref_matrix * xx - yy).eval();
auto const expected_m22 = [&] {
auto m = simd::array<scalar_t>{{0}};
for (auto i = 0; i < cols; ++i) {
m[i] = xx.col(i).squaredNorm();
}
return m;
}();
auto const expected_m33 = [&] {
auto m = simd::array<scalar_t>{{0}};
for (auto i = 0; i < cols; ++i) {
m[i] = expected_rr.col(i).dot(xx.col(i));
}
return m;
}();
auto r = VectorX<scalar_t>();
auto rr = MatrixX<scalar_t>();
auto m2 = scalar_t{0};
auto m3 = scalar_t{0};
auto m22 = simd::array<scalar_t>{{0}};
auto m33 = simd::array<scalar_t>{{0}};
auto reset_variables = [&]() {
r = y;
rr = yy;
m2 = scalar_t{0};
m3 = scalar_t{0};
m22 = simd::array<scalar_t>{{0}};
m33 = simd::array<scalar_t>{{0}};
};
reset_variables();
compute::kpm_spmv(0, size, matrix, x, r);
REQUIRE(r.isApprox(expected_r));
reset_variables();
compute::kpm_spmv(0, size, matrix, xx, rr);
REQUIRE(rr.isApprox(expected_rr));
reset_variables();
compute::kpm_spmv_diagonal(0, size, matrix, x, r, m2, m3);
REQUIRE(r.isApprox(expected_r));
REQUIRE(approx_equal(m2, expected_m2));
REQUIRE(approx_equal(m3, expected_m3));
reset_variables();
compute::kpm_spmv_diagonal(0, size, matrix, xx, rr, m22, m33);
REQUIRE(rr.isApprox(expected_rr));
REQUIRE(approx_equal(m22, expected_m22));
REQUIRE(approx_equal(m33, expected_m33));
}
TEST_CASE("KPM SpMV") {
constexpr auto size = 100;
test_kpm_spmv<SparseMatrixX<float>>(size);
test_kpm_spmv<SparseMatrixX<std::complex<float>>>(size);
test_kpm_spmv<SparseMatrixX<double>>(size);
test_kpm_spmv<SparseMatrixX<std::complex<double>>>(size);
test_kpm_spmv<num::EllMatrix<float>>(size);
test_kpm_spmv<num::EllMatrix<std::complex<float>>>(size);
test_kpm_spmv<num::EllMatrix<double>>(size);
test_kpm_spmv<num::EllMatrix<std::complex<double>>>(size);
}
| C++ |
2D | dean0x7d/pybinding | cppcore/tests/fixtures.hpp | .hpp | 1,144 | 48 | #pragma once
#include "Model.hpp"
namespace lattice {
cpb::Lattice square(float a = 1.f, float t = 1.f);
cpb::Lattice square_2atom(float a = 1.f, float t1 = 1.f, float t2 = 2.f);
cpb::Lattice square_multiorbital();
cpb::Lattice checkerboard_multiorbital();
cpb::Lattice hexagonal_complex();
} // namespace lattice
namespace graphene {
static constexpr auto a = 0.24595f; // [nm] unit cell length
static constexpr auto a_cc = 0.142f; // [nm] carbon-carbon distance
static constexpr auto t = -2.8f; // [eV] nearest neighbor hopping
cpb::Lattice monolayer();
} // namespace graphene
namespace shape {
cpb::Shape rectangle(float x, float y);
} // namespace shape
namespace field {
cpb::OnsiteModifier constant_potential(float value);
cpb::HoppingModifier constant_magnetic_field(float value);
cpb::OnsiteModifier linear_onsite(float k = 1.f);
cpb::HoppingModifier linear_hopping(float k = 1.f);
cpb::HoppingModifier force_double_precision();
cpb::HoppingModifier force_complex_numbers();
} // namespace field
namespace generator {
cpb::HoppingGenerator do_nothing_hopping(std::string const& name = "_t");
} // namespace generator
| Unknown |
2D | dean0x7d/pybinding | pybinding/__about__.py | .py | 359 | 11 | """Package for numerical tight-binding calculations in solid state physics"""
__title__ = "pybinding"
__version__ = "0.9.6.dev"
__summary__ = "Package for tight-binding calculations"
__url__ = "https://github.com/dean0x7d/pybinding"
__author__ = "Dean Moldovan"
__copyright__ = "2015-2020, " + __author__
__email__ = "dean0x7d@gmail.com"
__license__ = "BSD"
| Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.