keyword
stringclasses
7 values
repo_name
stringlengths
8
98
file_path
stringlengths
4
244
file_extension
stringclasses
29 values
file_size
int64
0
84.1M
line_count
int64
0
1.6M
content
stringlengths
1
84.1M
language
stringclasses
14 values
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/snippets/SelfAdjointEigenSolver_SelfAdjointEigenSolver.cpp
.cpp
362
8
SelfAdjointEigenSolver<Matrix4f> es; Matrix4f X = Matrix4f::Random(4,4); Matrix4f A = X + X.transpose(); es.compute(A); cout << "The eigenvalues of A are: " << es.eigenvalues().transpose() << endl; es.compute(A + Matrix4f::Identity(4,4)); // re-use es to compute eigenvalues of A+I cout << "The eigenvalues of A+I are: " << es.eigenvalues().transpose() << endl;
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/snippets/Matrix_setZero_int.cpp
.cpp
45
4
VectorXf v; v.setZero(3); cout << v << endl;
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/snippets/MatrixBase_rightCols_int.cpp
.cpp
239
7
Array44i a = Array44i::Random(); cout << "Here is the array a:" << endl << a << endl; cout << "Here is a.rightCols(2):" << endl; cout << a.rightCols(2) << endl; a.rightCols(2).setZero(); cout << "Now the array a is:" << endl << a << endl;
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/snippets/Matrix_setOnes_int_int.cpp
.cpp
48
4
MatrixXf m; m.setOnes(3, 3); cout << m << endl;
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/snippets/Cwise_log.cpp
.cpp
43
3
Array3d v(1,2,3); cout << v.log() << endl;
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/snippets/MatrixBase_template_int_int_topLeftCorner.cpp
.cpp
265
7
Matrix4i m = Matrix4i::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is m.topLeftCorner<2,2>():" << endl; cout << m.topLeftCorner<2,2>() << endl; m.topLeftCorner<2,2>().setZero(); cout << "Now the matrix m is:" << endl << m << endl;
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/snippets/Cwise_greater_equal.cpp
.cpp
52
3
Array3d v(1,2,3), w(3,2,1); cout << (v>=w) << endl;
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/snippets/DenseBase_setLinSpaced.cpp
.cpp
60
4
VectorXf v; v.setLinSpaced(5,0.5f,1.5f); cout << v << endl;
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/snippets/Cwise_cosh.cpp
.cpp
64
3
ArrayXd v = ArrayXd::LinSpaced(5,0,1); cout << cosh(v) << endl;
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/snippets/ComplexSchur_matrixT.cpp
.cpp
263
5
MatrixXcf A = MatrixXcf::Random(4,4); cout << "Here is a random 4x4 matrix, A:" << endl << A << endl << endl; ComplexSchur<MatrixXcf> schurOfA(A, false); // false means do not compute U cout << "The triangular matrix T is:" << endl << schurOfA.matrixT() << endl;
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/snippets/DirectionWise_replicate_int.cpp
.cpp
179
5
Vector3i v = Vector3i::Random(); cout << "Here is the vector v:" << endl << v << endl; cout << "v.rowwise().replicate(5) = ..." << endl; cout << v.rowwise().replicate(5) << endl;
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/snippets/MatrixBase_replicate_int_int.cpp
.cpp
163
5
Vector3i v = Vector3i::Random(); cout << "Here is the vector v:" << endl << v << endl; cout << "v.replicate(2,5) = ..." << endl; cout << v.replicate(2,5) << endl;
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/snippets/MatrixBase_cwiseProduct.cpp
.cpp
153
5
Matrix3i a = Matrix3i::Random(), b = Matrix3i::Random(); Matrix3i c = a.cwiseProduct(b); cout << "a:\n" << a << "\nb:\n" << b << "\nc:\n" << c << endl;
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/snippets/MatrixBase_cwiseEqual.cpp
.cpp
276
8
MatrixXi m(2,2); m << 1, 0, 1, 1; cout << "Comparing m with identity matrix:" << endl; cout << m.cwiseEqual(MatrixXi::Identity(2,2)) << endl; Index count = m.cwiseEqual(MatrixXi::Identity(2,2)).count(); cout << "Number of coefficients that are equal: " << count << endl;
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/snippets/Cwise_exp.cpp
.cpp
43
3
Array3d v(1,2,3); cout << v.exp() << endl;
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/snippets/ComplexSchur_compute.cpp
.cpp
301
7
MatrixXcf A = MatrixXcf::Random(4,4); ComplexSchur<MatrixXcf> schur(4); schur.compute(A); cout << "The matrix T in the decomposition of A is:" << endl << schur.matrixT() << endl; schur.compute(A.inverse()); cout << "The matrix T in the decomposition of A^(-1) is:" << endl << schur.matrixT() << endl;
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_broadcast_1nn.cpp
.cpp
440
25
#include <iostream> #include <Eigen/Dense> using namespace std; using namespace Eigen; int main() { Eigen::MatrixXf m(2,4); Eigen::VectorXf v(2); m << 1, 23, 6, 9, 3, 11, 7, 2; v << 2, 3; MatrixXf::Index index; // find nearest neighbour (m.colwise() - v).colwise().squaredNorm().minCoeff(&index); cout << "Nearest neighbour is column " << index << ":" << endl; cout << m.col(index) << endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/DenseBase_template_int_middleCols.cpp
.cpp
283
16
#include <Eigen/Core> #include <iostream> using namespace Eigen; using namespace std; int main(void) { int const N = 5; MatrixXi A(N,N); A.setRandom(); cout << "A =\n" << A << '\n' << endl; cout << "A(:,1..3) =\n" << A.middleCols<3>(1) << endl; return 0; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/tut_arithmetic_redux_basic.cpp
.cpp
529
17
#include <iostream> #include <Eigen/Dense> using namespace std; int main() { Eigen::Matrix2d mat; mat << 1, 2, 3, 4; cout << "Here is mat.sum(): " << mat.sum() << endl; cout << "Here is mat.prod(): " << mat.prod() << endl; cout << "Here is mat.mean(): " << mat.mean() << endl; cout << "Here is mat.minCoeff(): " << mat.minCoeff() << endl; cout << "Here is mat.maxCoeff(): " << mat.maxCoeff() << endl; cout << "Here is mat.trace(): " << mat.trace() << endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/tut_arithmetic_matrix_mul.cpp
.cpp
612
20
#include <iostream> #include <Eigen/Dense> using namespace Eigen; int main() { Matrix2d mat; mat << 1, 2, 3, 4; Vector2d u(-1,1), v(2,0); std::cout << "Here is mat*mat:\n" << mat*mat << std::endl; std::cout << "Here is mat*u:\n" << mat*u << std::endl; std::cout << "Here is u^T*mat:\n" << u.transpose()*mat << std::endl; std::cout << "Here is u^T*v:\n" << u.transpose()*v << std::endl; std::cout << "Here is u*v^T:\n" << u*v.transpose() << std::endl; std::cout << "Let's multiply mat by itself" << std::endl; mat = mat*mat; std::cout << "Now mat is mat:\n" << mat << std::endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_rowwise.cpp
.cpp
244
14
#include <iostream> #include <Eigen/Dense> using namespace std; int main() { Eigen::MatrixXf mat(2,4); mat << 1, 2, 6, 9, 3, 1, 7, 2; std::cout << "Row's maximum: " << std::endl << mat.rowwise().maxCoeff() << std::endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/tut_arithmetic_dot_cross.cpp
.cpp
393
16
#include <iostream> #include <Eigen/Dense> using namespace Eigen; using namespace std; int main() { Vector3d v(1,2,3); Vector3d w(0,1,2); cout << "Dot product: " << v.dot(w) << endl; double dp = v.adjoint()*w; // automatic conversion of the inner product to a scalar cout << "Dot product via a matrix product: " << dp << endl; cout << "Cross product:\n" << v.cross(w) << endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_reductions_operatornorm.cpp
.cpp
447
19
#include <Eigen/Dense> #include <iostream> using namespace Eigen; using namespace std; int main() { MatrixXf m(2,2); m << 1,-2, -3,4; cout << "1-norm(m) = " << m.cwiseAbs().colwise().sum().maxCoeff() << " == " << m.colwise().lpNorm<1>().maxCoeff() << endl; cout << "infty-norm(m) = " << m.cwiseAbs().rowwise().sum().maxCoeff() << " == " << m.rowwise().lpNorm<1>().maxCoeff() << endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_visitors.cpp
.cpp
531
27
#include <iostream> #include <Eigen/Dense> using namespace std; using namespace Eigen; int main() { Eigen::MatrixXf m(2,2); m << 1, 2, 3, 4; //get location of maximum MatrixXf::Index maxRow, maxCol; float max = m.maxCoeff(&maxRow, &maxCol); //get location of minimum MatrixXf::Index minRow, minCol; float min = m.minCoeff(&minRow, &minCol); cout << "Max: " << max << ", at: " << maxRow << "," << maxCol << endl; cout << "Min: " << min << ", at: " << minRow << "," << minCol << endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/Tutorial_BlockOperations_colrow.cpp
.cpp
390
18
#include <Eigen/Dense> #include <iostream> using namespace std; int main() { Eigen::MatrixXf m(3,3); m << 1,2,3, 4,5,6, 7,8,9; cout << "Here is the matrix m:" << endl << m << endl; cout << "2nd Row: " << m.row(1) << endl; m.col(2) += 3 * m.col(0); cout << "After adding 3 times the first column into the third column, the matrix m is:\n"; cout << m << endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/class_CwiseBinaryOp.cpp
.cpp
526
19
#include <Eigen/Core> #include <iostream> using namespace Eigen; using namespace std; // define a custom template binary functor template<typename Scalar> struct MakeComplexOp { EIGEN_EMPTY_STRUCT_CTOR(MakeComplexOp) typedef complex<Scalar> result_type; complex<Scalar> operator()(const Scalar& a, const Scalar& b) const { return complex<Scalar>(a,b); } }; int main(int, char**) { Matrix4d m1 = Matrix4d::Random(), m2 = Matrix4d::Random(); cout << m1.binaryExpr(m2, MakeComplexOp<double>()) << endl; return 0; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/Tutorial_ArrayClass_addition.cpp
.cpp
400
24
#include <Eigen/Dense> #include <iostream> using namespace Eigen; using namespace std; int main() { ArrayXXf a(3,3); ArrayXXf b(3,3); a << 1,2,3, 4,5,6, 7,8,9; b << 1,2,3, 1,2,3, 1,2,3; // Adding two arrays cout << "a + b = " << endl << a + b << endl << endl; // Subtracting a scalar from an array cout << "a - 2 = " << endl << a - 2 << endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/make_circulant2.cpp
.cpp
1,320
53
#include <Eigen/Core> #include <iostream> using namespace Eigen; // [circulant_func] template<class ArgType> class circulant_functor { const ArgType &m_vec; public: circulant_functor(const ArgType& arg) : m_vec(arg) {} const typename ArgType::Scalar& operator() (Index row, Index col) const { Index index = row - col; if (index < 0) index += m_vec.size(); return m_vec(index); } }; // [circulant_func] // [square] template<class ArgType> struct circulant_helper { typedef Matrix<typename ArgType::Scalar, ArgType::SizeAtCompileTime, ArgType::SizeAtCompileTime, ColMajor, ArgType::MaxSizeAtCompileTime, ArgType::MaxSizeAtCompileTime> MatrixType; }; // [square] // [makeCirculant] template <class ArgType> CwiseNullaryOp<circulant_functor<ArgType>, typename circulant_helper<ArgType>::MatrixType> makeCirculant(const Eigen::MatrixBase<ArgType>& arg) { typedef typename circulant_helper<ArgType>::MatrixType MatrixType; return MatrixType::NullaryExpr(arg.size(), arg.size(), circulant_functor<ArgType>(arg.derived())); } // [makeCirculant] // [main] int main() { Eigen::VectorXd vec(4); vec << 1, 2, 4, 8; Eigen::MatrixXd mat; mat = makeCirculant(vec); std::cout << mat << std::endl; } // [main]
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/TutorialLinAlgSVDSolve.cpp
.cpp
405
16
#include <iostream> #include <Eigen/Dense> using namespace std; using namespace Eigen; int main() { MatrixXf A = MatrixXf::Random(3, 2); cout << "Here is the matrix A:\n" << A << endl; VectorXf b = VectorXf::Random(3); cout << "Here is the right hand side b:\n" << b << endl; cout << "The least-squares solution is:\n" << A.bdcSvd(ComputeThinU | ComputeThinV).solve(b) << endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/Tutorial_ArrayClass_cwise_other.cpp
.cpp
410
20
#include <Eigen/Dense> #include <iostream> using namespace Eigen; using namespace std; int main() { ArrayXf a = ArrayXf::Random(5); a *= 2; cout << "a =" << endl << a << endl; cout << "a.abs() =" << endl << a.abs() << endl; cout << "a.abs().sqrt() =" << endl << a.abs().sqrt() << endl; cout << "a.min(a.abs().sqrt()) =" << endl << a.min(a.abs().sqrt()) << endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/QuickStart_example.cpp
.cpp
206
15
#include <iostream> #include <Eigen/Dense> using Eigen::MatrixXd; int main() { MatrixXd m(2,2); m(0,0) = 3; m(1,0) = 2.5; m(0,1) = -1; m(1,1) = m(1,0) + m(0,1); std::cout << m << std::endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/matrixfree_cg.cpp
.cpp
4,275
130
#include <iostream> #include <Eigen/Core> #include <Eigen/Dense> #include <Eigen/IterativeLinearSolvers> #include <unsupported/Eigen/IterativeSolvers> class MatrixReplacement; using Eigen::SparseMatrix; namespace Eigen { namespace internal { // MatrixReplacement looks-like a SparseMatrix, so let's inherits its traits: template<> struct traits<MatrixReplacement> : public Eigen::internal::traits<Eigen::SparseMatrix<double> > {}; } } // Example of a matrix-free wrapper from a user type to Eigen's compatible type // For the sake of simplicity, this example simply wrap a Eigen::SparseMatrix. class MatrixReplacement : public Eigen::EigenBase<MatrixReplacement> { public: // Required typedefs, constants, and method: typedef double Scalar; typedef double RealScalar; typedef int StorageIndex; enum { ColsAtCompileTime = Eigen::Dynamic, MaxColsAtCompileTime = Eigen::Dynamic, IsRowMajor = false }; Index rows() const { return mp_mat->rows(); } Index cols() const { return mp_mat->cols(); } template<typename Rhs> Eigen::Product<MatrixReplacement,Rhs,Eigen::AliasFreeProduct> operator*(const Eigen::MatrixBase<Rhs>& x) const { return Eigen::Product<MatrixReplacement,Rhs,Eigen::AliasFreeProduct>(*this, x.derived()); } // Custom API: MatrixReplacement() : mp_mat(0) {} void attachMyMatrix(const SparseMatrix<double> &mat) { mp_mat = &mat; } const SparseMatrix<double> my_matrix() const { return *mp_mat; } private: const SparseMatrix<double> *mp_mat; }; // Implementation of MatrixReplacement * Eigen::DenseVector though a specialization of internal::generic_product_impl: namespace Eigen { namespace internal { template<typename Rhs> struct generic_product_impl<MatrixReplacement, Rhs, SparseShape, DenseShape, GemvProduct> // GEMV stands for matrix-vector : generic_product_impl_base<MatrixReplacement,Rhs,generic_product_impl<MatrixReplacement,Rhs> > { typedef typename Product<MatrixReplacement,Rhs>::Scalar Scalar; template<typename Dest> static void scaleAndAddTo(Dest& dst, const MatrixReplacement& lhs, const Rhs& rhs, const Scalar& alpha) { // This method should implement "dst += alpha * lhs * rhs" inplace, // however, for iterative solvers, alpha is always equal to 1, so let's not bother about it. assert(alpha==Scalar(1) && "scaling is not implemented"); EIGEN_ONLY_USED_FOR_DEBUG(alpha); // Here we could simply call dst.noalias() += lhs.my_matrix() * rhs, // but let's do something fancier (and less efficient): for(Index i=0; i<lhs.cols(); ++i) dst += rhs(i) * lhs.my_matrix().col(i); } }; } } int main() { int n = 10; Eigen::SparseMatrix<double> S = Eigen::MatrixXd::Random(n,n).sparseView(0.5,1); S = S.transpose()*S; MatrixReplacement A; A.attachMyMatrix(S); Eigen::VectorXd b(n), x; b.setRandom(); // Solve Ax = b using various iterative solver with matrix-free version: { Eigen::ConjugateGradient<MatrixReplacement, Eigen::Lower|Eigen::Upper, Eigen::IdentityPreconditioner> cg; cg.compute(A); x = cg.solve(b); std::cout << "CG: #iterations: " << cg.iterations() << ", estimated error: " << cg.error() << std::endl; } { Eigen::BiCGSTAB<MatrixReplacement, Eigen::IdentityPreconditioner> bicg; bicg.compute(A); x = bicg.solve(b); std::cout << "BiCGSTAB: #iterations: " << bicg.iterations() << ", estimated error: " << bicg.error() << std::endl; } { Eigen::GMRES<MatrixReplacement, Eigen::IdentityPreconditioner> gmres; gmres.compute(A); x = gmres.solve(b); std::cout << "GMRES: #iterations: " << gmres.iterations() << ", estimated error: " << gmres.error() << std::endl; } { Eigen::DGMRES<MatrixReplacement, Eigen::IdentityPreconditioner> gmres; gmres.compute(A); x = gmres.solve(b); std::cout << "DGMRES: #iterations: " << gmres.iterations() << ", estimated error: " << gmres.error() << std::endl; } { Eigen::MINRES<MatrixReplacement, Eigen::Lower|Eigen::Upper, Eigen::IdentityPreconditioner> minres; minres.compute(A); x = minres.solve(b); std::cout << "MINRES: #iterations: " << minres.iterations() << ", estimated error: " << minres.error() << std::endl; } }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/DenseBase_middleRows_int.cpp
.cpp
282
16
#include <Eigen/Core> #include <iostream> using namespace Eigen; using namespace std; int main(void) { int const N = 5; MatrixXi A(N,N); A.setRandom(); cout << "A =\n" << A << '\n' << endl; cout << "A(2..3,:) =\n" << A.middleRows(2,2) << endl; return 0; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/Cwise_erf.cpp
.cpp
189
10
#include <Eigen/Core> #include <unsupported/Eigen/SpecialFunctions> #include <iostream> using namespace Eigen; int main() { Array4d v(-0.5,2,0,-7); std::cout << v.erf() << std::endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/Tutorial_ArrayClass_mult.cpp
.cpp
237
17
#include <Eigen/Dense> #include <iostream> using namespace Eigen; using namespace std; int main() { ArrayXXf a(2,2); ArrayXXf b(2,2); a << 1,2, 3,4; b << 5,6, 7,8; cout << "a * b = " << endl << a * b << endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/DenseBase_template_int_middleRows.cpp
.cpp
283
16
#include <Eigen/Core> #include <iostream> using namespace Eigen; using namespace std; int main(void) { int const N = 5; MatrixXi A(N,N); A.setRandom(); cout << "A =\n" << A << '\n' << endl; cout << "A(1..3,:) =\n" << A.middleRows<3>(1) << endl; return 0; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/class_FixedVectorBlock.cpp
.cpp
673
28
#include <Eigen/Core> #include <iostream> using namespace Eigen; using namespace std; template<typename Derived> Eigen::VectorBlock<Derived, 2> firstTwo(MatrixBase<Derived>& v) { return Eigen::VectorBlock<Derived, 2>(v.derived(), 0); } template<typename Derived> const Eigen::VectorBlock<const Derived, 2> firstTwo(const MatrixBase<Derived>& v) { return Eigen::VectorBlock<const Derived, 2>(v.derived(), 0); } int main(int, char**) { Matrix<int,1,6> v; v << 1,2,3,4,5,6; cout << firstTwo(4*v) << endl; // calls the const version firstTwo(v) *= 2; // calls the non-const version cout << "Now the vector v is:" << endl << v << endl; return 0; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/Cwise_erfc.cpp
.cpp
190
10
#include <Eigen/Core> #include <unsupported/Eigen/SpecialFunctions> #include <iostream> using namespace Eigen; int main() { Array4d v(-0.5,2,0,-7); std::cout << v.erfc() << std::endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/TutorialLinAlgExComputeSolveError.cpp
.cpp
371
15
#include <iostream> #include <Eigen/Dense> using namespace std; using namespace Eigen; int main() { MatrixXd A = MatrixXd::Random(100,100); MatrixXd b = MatrixXd::Random(100,50); MatrixXd x = A.fullPivLu().solve(b); double relative_error = (A*x - b).norm() / b.norm(); // norm() is L2 norm cout << "The relative error is:\n" << relative_error << endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/Tutorial_ArrayClass_interop.cpp
.cpp
444
23
#include <Eigen/Dense> #include <iostream> using namespace Eigen; using namespace std; int main() { MatrixXf m(2,2); MatrixXf n(2,2); MatrixXf result(2,2); m << 1,2, 3,4; n << 5,6, 7,8; result = (m.array() + 4).matrix() * m; cout << "-- Combination 1: --" << endl << result << endl << endl; result = (m.array() * n.array()).matrix() * m; cout << "-- Combination 2: --" << endl << result << endl << endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/make_circulant.cpp
.cpp
366
12
/* This program is presented in several fragments in the doc page. Every fragment is in its own file; this file simply combines them. */ #include "make_circulant.cpp.preamble" #include "make_circulant.cpp.traits" #include "make_circulant.cpp.expression" #include "make_circulant.cpp.evaluator" #include "make_circulant.cpp.entry" #include "make_circulant.cpp.main"
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/tut_matrix_resize.cpp
.cpp
489
19
#include <iostream> #include <Eigen/Dense> using namespace Eigen; int main() { MatrixXd m(2,5); m.resize(4,3); std::cout << "The matrix m is of size " << m.rows() << "x" << m.cols() << std::endl; std::cout << "It has " << m.size() << " coefficients" << std::endl; VectorXd v(2); v.resize(5); std::cout << "The vector v is of size " << v.size() << std::endl; std::cout << "As a matrix, v is of size " << v.rows() << "x" << v.cols() << std::endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/Tutorial_BlockOperations_corner.cpp
.cpp
448
18
#include <Eigen/Dense> #include <iostream> using namespace std; int main() { Eigen::Matrix4f m; m << 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,11,12, 13,14,15,16; cout << "m.leftCols(2) =" << endl << m.leftCols(2) << endl << endl; cout << "m.bottomRows<2>() =" << endl << m.bottomRows<2>() << endl << endl; m.topLeftCorner(1,3) = m.bottomRightCorner(3,1).transpose(); cout << "After assignment, m = " << endl << m << endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/TutorialLinAlgExSolveLDLT.cpp
.cpp
356
17
#include <iostream> #include <Eigen/Dense> using namespace std; using namespace Eigen; int main() { Matrix2f A, b; A << 2, -1, -1, 3; b << 1, 2, 3, 1; cout << "Here is the matrix A:\n" << A << endl; cout << "Here is the right hand side b:\n" << b << endl; Matrix2f x = A.ldlt().solve(b); cout << "The solution is:\n" << x << endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_maxnorm.cpp
.cpp
502
21
#include <iostream> #include <Eigen/Dense> using namespace std; using namespace Eigen; int main() { MatrixXf mat(2,4); mat << 1, 2, 6, 9, 3, 1, 7, 2; MatrixXf::Index maxIndex; float maxNorm = mat.colwise().sum().maxCoeff(&maxIndex); std::cout << "Maximum sum at position " << maxIndex << std::endl; std::cout << "The corresponding vector is: " << std::endl; std::cout << mat.col( maxIndex ) << std::endl; std::cout << "And its sum is is: " << maxNorm << std::endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/class_VectorBlock.cpp
.cpp
775
28
#include <Eigen/Core> #include <iostream> using namespace Eigen; using namespace std; template<typename Derived> Eigen::VectorBlock<Derived> segmentFromRange(MatrixBase<Derived>& v, int start, int end) { return Eigen::VectorBlock<Derived>(v.derived(), start, end-start); } template<typename Derived> const Eigen::VectorBlock<const Derived> segmentFromRange(const MatrixBase<Derived>& v, int start, int end) { return Eigen::VectorBlock<const Derived>(v.derived(), start, end-start); } int main(int, char**) { Matrix<int,1,6> v; v << 1,2,3,4,5,6; cout << segmentFromRange(2*v, 2, 4) << endl; // calls the const version segmentFromRange(v, 1, 3) *= 5; // calls the non-const version cout << "Now the vector v is:" << endl << v << endl; return 0; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/Cwise_lgamma.cpp
.cpp
192
10
#include <Eigen/Core> #include <unsupported/Eigen/SpecialFunctions> #include <iostream> using namespace Eigen; int main() { Array4d v(0.5,10,0,-1); std::cout << v.lgamma() << std::endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/class_Block.cpp
.cpp
737
28
#include <Eigen/Core> #include <iostream> using namespace Eigen; using namespace std; template<typename Derived> Eigen::Block<Derived> topLeftCorner(MatrixBase<Derived>& m, int rows, int cols) { return Eigen::Block<Derived>(m.derived(), 0, 0, rows, cols); } template<typename Derived> const Eigen::Block<const Derived> topLeftCorner(const MatrixBase<Derived>& m, int rows, int cols) { return Eigen::Block<const Derived>(m.derived(), 0, 0, rows, cols); } int main(int, char**) { Matrix4d m = Matrix4d::Identity(); cout << topLeftCorner(4*m, 2, 3) << endl; // calls the const version topLeftCorner(m, 2, 3) *= 5; // calls the non-const version cout << "Now the matrix m is:" << endl << m << endl; return 0; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/Tutorial_BlockOperations_block_assignment.cpp
.cpp
523
19
#include <Eigen/Dense> #include <iostream> using namespace std; using namespace Eigen; int main() { Array22f m; m << 1,2, 3,4; Array44f a = Array44f::Constant(0.6); cout << "Here is the array a:" << endl << a << endl << endl; a.block<2,2>(1,1) = m; cout << "Here is now a with m copied into its central 2x2 block:" << endl << a << endl << endl; a.block(0,0,2,3) = a.block(2,1,2,3); cout << "Here is now a with bottom-right 2x3 block copied into top-left 2x3 block:" << endl << a << endl << endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_broadcast_simple.cpp
.cpp
356
22
#include <iostream> #include <Eigen/Dense> using namespace std; int main() { Eigen::MatrixXf mat(2,4); Eigen::VectorXf v(2); mat << 1, 2, 6, 9, 3, 1, 7, 2; v << 0, 1; //add v to each column of m mat.colwise() += v; std::cout << "Broadcasting result: " << std::endl; std::cout << mat << std::endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/tut_arithmetic_add_sub.cpp
.cpp
471
23
#include <iostream> #include <Eigen/Dense> using namespace Eigen; int main() { Matrix2d a; a << 1, 2, 3, 4; MatrixXd b(2,2); b << 2, 3, 1, 4; std::cout << "a + b =\n" << a + b << std::endl; std::cout << "a - b =\n" << a - b << std::endl; std::cout << "Doing a += b;" << std::endl; a += b; std::cout << "Now a =\n" << a << std::endl; Vector3d v(1,2,3); Vector3d w(1,0,0); std::cout << "-v + w - v =\n" << -v + w - v << std::endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_broadcast_simple_rowwise.cpp
.cpp
361
21
#include <iostream> #include <Eigen/Dense> using namespace std; int main() { Eigen::MatrixXf mat(2,4); Eigen::VectorXf v(4); mat << 1, 2, 6, 9, 3, 1, 7, 2; v << 0,1,2,3; //add v to each row of m mat.rowwise() += v.transpose(); std::cout << "Broadcasting result: " << std::endl; std::cout << mat << std::endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/class_FixedBlock.cpp
.cpp
697
28
#include <Eigen/Core> #include <iostream> using namespace Eigen; using namespace std; template<typename Derived> Eigen::Block<Derived, 2, 2> topLeft2x2Corner(MatrixBase<Derived>& m) { return Eigen::Block<Derived, 2, 2>(m.derived(), 0, 0); } template<typename Derived> const Eigen::Block<const Derived, 2, 2> topLeft2x2Corner(const MatrixBase<Derived>& m) { return Eigen::Block<const Derived, 2, 2>(m.derived(), 0, 0); } int main(int, char**) { Matrix3d m = Matrix3d::Identity(); cout << topLeft2x2Corner(4*m) << endl; // calls the const version topLeft2x2Corner(m) *= 2; // calls the non-const version cout << "Now the matrix m is:" << endl << m << endl; return 0; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/tut_arithmetic_scalar_mul_div.cpp
.cpp
353
18
#include <iostream> #include <Eigen/Dense> using namespace Eigen; int main() { Matrix2d a; a << 1, 2, 3, 4; Vector3d v(1,2,3); std::cout << "a * 2.5 =\n" << a * 2.5 << std::endl; std::cout << "0.1 * v =\n" << 0.1 * v << std::endl; std::cout << "Doing v *= 2;" << std::endl; v *= 2; std::cout << "Now v =\n" << v << std::endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/function_taking_ref.cpp
.cpp
594
20
#include <iostream> #include <Eigen/SVD> using namespace Eigen; using namespace std; float inv_cond(const Ref<const MatrixXf>& a) { const VectorXf sing_vals = a.jacobiSvd().singularValues(); return sing_vals(sing_vals.size()-1) / sing_vals(0); } int main() { Matrix4f m = Matrix4f::Random(); cout << "matrix m:" << endl << m << endl << endl; cout << "inv_cond(m): " << inv_cond(m) << endl; cout << "inv_cond(m(1:3,1:3)): " << inv_cond(m.topLeftCorner(3,3)) << endl; cout << "inv_cond(m+I): " << inv_cond(m+Matrix4f::Identity()) << endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/TutorialLinAlgRankRevealing.cpp
.cpp
600
21
#include <iostream> #include <Eigen/Dense> using namespace std; using namespace Eigen; int main() { Matrix3f A; A << 1, 2, 5, 2, 1, 4, 3, 0, 3; cout << "Here is the matrix A:\n" << A << endl; FullPivLU<Matrix3f> lu_decomp(A); cout << "The rank of A is " << lu_decomp.rank() << endl; cout << "Here is a matrix whose columns form a basis of the null-space of A:\n" << lu_decomp.kernel() << endl; cout << "Here is a matrix whose columns form a basis of the column-space of A:\n" << lu_decomp.image(A) << endl; // yes, have to pass the original A }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/class_CwiseUnaryOp_ptrfun.cpp
.cpp
371
21
#include <Eigen/Core> #include <iostream> using namespace Eigen; using namespace std; // define function to be applied coefficient-wise double ramp(double x) { if (x > 0) return x; else return 0; } int main(int, char**) { Matrix4d m1 = Matrix4d::Random(); cout << m1 << endl << "becomes: " << endl << m1.unaryExpr(ptr_fun(ramp)) << endl; return 0; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/function_taking_eigenbase.cpp
.cpp
418
19
#include <iostream> #include <Eigen/Core> using namespace Eigen; template <typename Derived> void print_size(const EigenBase<Derived>& b) { std::cout << "size (rows, cols): " << b.size() << " (" << b.rows() << ", " << b.cols() << ")" << std::endl; } int main() { Vector3f v; print_size(v); // v.asDiagonal() returns a 3x3 diagonal matrix pseudo-expression print_size(v.asDiagonal()); }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/tut_matrix_resize_fixed_size.cpp
.cpp
229
13
#include <iostream> #include <Eigen/Dense> using namespace Eigen; int main() { Matrix4d m; m.resize(4,4); // no operation std::cout << "The matrix m is of size " << m.rows() << "x" << m.cols() << std::endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/Tutorial_simple_example_dynamic_size.cpp
.cpp
680
23
#include <Eigen/Core> #include <iostream> using namespace Eigen; int main() { for (int size=1; size<=4; ++size) { MatrixXi m(size,size+1); // a (size)x(size+1)-matrix of int's for (int j=0; j<m.cols(); ++j) // loop over columns for (int i=0; i<m.rows(); ++i) // loop over rows m(i,j) = i+j*size; // to access matrix coefficients, // use operator()(int,int) std::cout << m << "\n\n"; } VectorXf v(4); // a vector of 4 float's // to access vector coefficients, use either operator () or operator [] v[0] = 1; v[1] = 2; v(2) = 3; v(3) = 4; std::cout << "\nv:\n" << v << std::endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/TutorialLinAlgComputeTwice.cpp
.cpp
622
24
#include <iostream> #include <Eigen/Dense> using namespace std; using namespace Eigen; int main() { Matrix2f A, b; LLT<Matrix2f> llt; A << 2, -1, -1, 3; b << 1, 2, 3, 1; cout << "Here is the matrix A:\n" << A << endl; cout << "Here is the right hand side b:\n" << b << endl; cout << "Computing LLT decomposition..." << endl; llt.compute(A); cout << "The solution is:\n" << llt.solve(b) << endl; A(1,1)++; cout << "The matrix A is now:\n" << A << endl; cout << "Computing LLT decomposition..." << endl; llt.compute(A); cout << "The solution is now:\n" << llt.solve(b) << endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/TutorialLinAlgSelfAdjointEigenSolver.cpp
.cpp
534
19
#include <iostream> #include <Eigen/Dense> using namespace std; using namespace Eigen; int main() { Matrix2f A; A << 1, 2, 2, 3; cout << "Here is the matrix A:\n" << A << endl; SelfAdjointEigenSolver<Matrix2f> eigensolver(A); if (eigensolver.info() != Success) abort(); cout << "The eigenvalues of A are:\n" << eigensolver.eigenvalues() << endl; cout << "Here's a matrix whose columns are eigenvectors of A \n" << "corresponding to these eigenvalues:\n" << eigensolver.eigenvectors() << endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/QuickStart_example2_dynamic.cpp
.cpp
305
16
#include <iostream> #include <Eigen/Dense> using namespace Eigen; using namespace std; int main() { MatrixXd m = MatrixXd::Random(3,3); m = (m + MatrixXd::Constant(3,3,1.2)) * 50; cout << "m =" << endl << m << endl; VectorXd v(3); v << 1, 2, 3; cout << "m * v =" << endl << m * v << endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/Tutorial_ArrayClass_interop_matrix.cpp
.cpp
591
27
#include <Eigen/Dense> #include <iostream> using namespace Eigen; using namespace std; int main() { MatrixXf m(2,2); MatrixXf n(2,2); MatrixXf result(2,2); m << 1,2, 3,4; n << 5,6, 7,8; result = m * n; cout << "-- Matrix m*n: --" << endl << result << endl << endl; result = m.array() * n.array(); cout << "-- Array m*n: --" << endl << result << endl << endl; result = m.cwiseProduct(n); cout << "-- With cwiseProduct: --" << endl << result << endl << endl; result = m.array() + 4; cout << "-- Array m + 4: --" << endl << result << endl << endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/TemplateKeyword_simple.cpp
.cpp
508
21
#include <Eigen/Dense> #include <iostream> using namespace Eigen; void copyUpperTriangularPart(MatrixXf& dst, const MatrixXf& src) { dst.triangularView<Upper>() = src.triangularView<Upper>(); } int main() { MatrixXf m1 = MatrixXf::Ones(4,4); MatrixXf m2 = MatrixXf::Random(4,4); std::cout << "m2 before copy:" << std::endl; std::cout << m2 << std::endl << std::endl; copyUpperTriangularPart(m2, m1); std::cout << "m2 after copy:" << std::endl; std::cout << m2 << std::endl << std::endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/Tutorial_PartialLU_solve.cpp
.cpp
401
19
#include <Eigen/Core> #include <Eigen/LU> #include <iostream> using namespace std; using namespace Eigen; int main() { Matrix3f A; Vector3f b; A << 1,2,3, 4,5,6, 7,8,10; b << 3, 3, 4; cout << "Here is the matrix A:" << endl << A << endl; cout << "Here is the vector b:" << endl << b << endl; Vector3f x = A.lu().solve(b); cout << "The solution is:" << endl << x << endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/Tutorial_simple_example_fixed_size.cpp
.cpp
282
16
#include <Eigen/Core> #include <iostream> using namespace Eigen; int main() { Matrix3f m3; m3 << 1, 2, 3, 4, 5, 6, 7, 8, 9; Matrix4f m4 = Matrix4f::Identity(); Vector4i v4(1, 2, 3, 4); std::cout << "m3\n" << m3 << "\nm4:\n" << m4 << "\nv4:\n" << v4 << std::endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/Tutorial_BlockOperations_print_block.cpp
.cpp
413
21
#include <Eigen/Dense> #include <iostream> using namespace std; int main() { Eigen::MatrixXf m(4,4); m << 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12, 13,14,15,16; cout << "Block in the middle" << endl; cout << m.block<2,2>(1,1) << endl << endl; for (int i = 1; i <= 3; ++i) { cout << "Block of size " << i << "x" << i << endl; cout << m.block(0,0,i,i) << endl << endl; } }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/QuickStart_example2_fixed.cpp
.cpp
289
16
#include <iostream> #include <Eigen/Dense> using namespace Eigen; using namespace std; int main() { Matrix3d m = Matrix3d::Random(); m = (m + Matrix3d::Constant(1.2)) * 50; cout << "m =" << endl << m << endl; Vector3d v(1,2,3); cout << "m * v =" << endl << m * v << endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/CustomizingEigen_Inheritance.cpp
.cpp
766
31
#include <Eigen/Core> #include <iostream> class MyVectorType : public Eigen::VectorXd { public: MyVectorType(void):Eigen::VectorXd() {} // This constructor allows you to construct MyVectorType from Eigen expressions template<typename OtherDerived> MyVectorType(const Eigen::MatrixBase<OtherDerived>& other) : Eigen::VectorXd(other) { } // This method allows you to assign Eigen expressions to MyVectorType template<typename OtherDerived> MyVectorType& operator=(const Eigen::MatrixBase <OtherDerived>& other) { this->Eigen::VectorXd::operator=(other); return *this; } }; int main() { MyVectorType v = MyVectorType::Ones(4); v(2) += 10; v = 2 * v; std::cout << v.transpose() << std::endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/TutorialInplaceLU.cpp
.cpp
1,589
62
#include <iostream> struct init { init() { std::cout << "[" << "init" << "]" << std::endl; } }; init init_obj; // [init] #include <iostream> #include <Eigen/Dense> using namespace std; using namespace Eigen; int main() { MatrixXd A(2,2); A << 2, -1, 1, 3; cout << "Here is the input matrix A before decomposition:\n" << A << endl; cout << "[init]" << endl; cout << "[declaration]" << endl; PartialPivLU<Ref<MatrixXd> > lu(A); cout << "Here is the input matrix A after decomposition:\n" << A << endl; cout << "[declaration]" << endl; cout << "[matrixLU]" << endl; cout << "Here is the matrix storing the L and U factors:\n" << lu.matrixLU() << endl; cout << "[matrixLU]" << endl; cout << "[solve]" << endl; MatrixXd A0(2,2); A0 << 2, -1, 1, 3; VectorXd b(2); b << 1, 2; VectorXd x = lu.solve(b); cout << "Residual: " << (A0 * x - b).norm() << endl; cout << "[solve]" << endl; cout << "[modifyA]" << endl; A << 3, 4, -2, 1; x = lu.solve(b); cout << "Residual: " << (A0 * x - b).norm() << endl; cout << "[modifyA]" << endl; cout << "[recompute]" << endl; A0 = A; // save A lu.compute(A); x = lu.solve(b); cout << "Residual: " << (A0 * x - b).norm() << endl; cout << "[recompute]" << endl; cout << "[recompute_bis0]" << endl; MatrixXd A1(2,2); A1 << 5,-2,3,4; lu.compute(A1); cout << "Here is the input matrix A1 after decomposition:\n" << A1 << endl; cout << "[recompute_bis0]" << endl; cout << "[recompute_bis1]" << endl; x = lu.solve(b); cout << "Residual: " << (A1 * x - b).norm() << endl; cout << "[recompute_bis1]" << endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/TutorialLinAlgExSolveColPivHouseholderQR.cpp
.cpp
381
18
#include <iostream> #include <Eigen/Dense> using namespace std; using namespace Eigen; int main() { Matrix3f A; Vector3f b; A << 1,2,3, 4,5,6, 7,8,10; b << 3, 3, 4; cout << "Here is the matrix A:\n" << A << endl; cout << "Here is the vector b:\n" << b << endl; Vector3f x = A.colPivHouseholderQr().solve(b); cout << "The solution is:\n" << x << endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_reductions_norm.cpp
.cpp
675
29
#include <Eigen/Dense> #include <iostream> using namespace std; using namespace Eigen; int main() { VectorXf v(2); MatrixXf m(2,2), n(2,2); v << -1, 2; m << 1,-2, -3,4; cout << "v.squaredNorm() = " << v.squaredNorm() << endl; cout << "v.norm() = " << v.norm() << endl; cout << "v.lpNorm<1>() = " << v.lpNorm<1>() << endl; cout << "v.lpNorm<Infinity>() = " << v.lpNorm<Infinity>() << endl; cout << endl; cout << "m.squaredNorm() = " << m.squaredNorm() << endl; cout << "m.norm() = " << m.norm() << endl; cout << "m.lpNorm<1>() = " << m.lpNorm<1>() << endl; cout << "m.lpNorm<Infinity>() = " << m.lpNorm<Infinity>() << endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/tut_matrix_coefficient_accessors.cpp
.cpp
343
19
#include <iostream> #include <Eigen/Dense> using namespace Eigen; int main() { MatrixXd m(2,2); m(0,0) = 3; m(1,0) = 2.5; m(0,1) = -1; m(1,1) = m(1,0) + m(0,1); std::cout << "Here is the matrix m:\n" << m << std::endl; VectorXd v(2); v(0) = 4; v(1) = v(0) - 1; std::cout << "Here is the vector v:\n" << v << std::endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/TutorialLinAlgInverseDeterminant.cpp
.cpp
348
17
#include <iostream> #include <Eigen/Dense> using namespace std; using namespace Eigen; int main() { Matrix3f A; A << 1, 2, 1, 2, 1, 0, -1, 1, 2; cout << "Here is the matrix A:\n" << A << endl; cout << "The determinant of A is " << A.determinant() << endl; cout << "The inverse of A is:\n" << A.inverse() << endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/DenseBase_middleCols_int.cpp
.cpp
282
16
#include <Eigen/Core> #include <iostream> using namespace Eigen; using namespace std; int main(void) { int const N = 5; MatrixXi A(N,N); A.setRandom(); cout << "A =\n" << A << '\n' << endl; cout << "A(1..3,:) =\n" << A.middleCols(1,3) << endl; return 0; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_colwise.cpp
.cpp
247
14
#include <iostream> #include <Eigen/Dense> using namespace std; int main() { Eigen::MatrixXf mat(2,4); mat << 1, 2, 6, 9, 3, 1, 7, 2; std::cout << "Column's maximum: " << std::endl << mat.colwise().maxCoeff() << std::endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/TemplateKeyword_flexible.cpp
.cpp
677
23
#include <Eigen/Dense> #include <iostream> using namespace Eigen; template <typename Derived1, typename Derived2> void copyUpperTriangularPart(MatrixBase<Derived1>& dst, const MatrixBase<Derived2>& src) { /* Note the 'template' keywords in the following line! */ dst.template triangularView<Upper>() = src.template triangularView<Upper>(); } int main() { MatrixXi m1 = MatrixXi::Ones(5,5); MatrixXi m2 = MatrixXi::Random(4,4); std::cout << "m2 before copy:" << std::endl; std::cout << m2 << std::endl << std::endl; copyUpperTriangularPart(m2, m1.topLeftCorner(4,4)); std::cout << "m2 after copy:" << std::endl; std::cout << m2 << std::endl << std::endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/TutorialLinAlgSetThreshold.cpp
.cpp
377
17
#include <iostream> #include <Eigen/Dense> using namespace std; using namespace Eigen; int main() { Matrix2d A; A << 2, 1, 2, 0.9999999999; FullPivLU<Matrix2d> lu(A); cout << "By default, the rank of A is found to be " << lu.rank() << endl; lu.setThreshold(1e-5); cout << "With threshold 1e-5, the rank of A is found to be " << lu.rank() << endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/class_CwiseUnaryOp.cpp
.cpp
561
20
#include <Eigen/Core> #include <iostream> using namespace Eigen; using namespace std; // define a custom template unary functor template<typename Scalar> struct CwiseClampOp { CwiseClampOp(const Scalar& inf, const Scalar& sup) : m_inf(inf), m_sup(sup) {} const Scalar operator()(const Scalar& x) const { return x<m_inf ? m_inf : (x>m_sup ? m_sup : x); } Scalar m_inf, m_sup; }; int main(int, char**) { Matrix4d m1 = Matrix4d::Random(); cout << m1 << endl << "becomes: " << endl << m1.unaryExpr(CwiseClampOp<double>(-0.5,0.5)) << endl; return 0; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_reductions_bool.cpp
.cpp
513
22
#include <Eigen/Dense> #include <iostream> using namespace std; using namespace Eigen; int main() { ArrayXXf a(2,2); a << 1,2, 3,4; cout << "(a > 0).all() = " << (a > 0).all() << endl; cout << "(a > 0).any() = " << (a > 0).any() << endl; cout << "(a > 0).count() = " << (a > 0).count() << endl; cout << endl; cout << "(a > 2).all() = " << (a > 2).all() << endl; cout << "(a > 2).any() = " << (a > 2).any() << endl; cout << "(a > 2).count() = " << (a > 2).count() << endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/Tutorial_ArrayClass_accessors.cpp
.cpp
466
25
#include <Eigen/Dense> #include <iostream> using namespace Eigen; using namespace std; int main() { ArrayXXf m(2,2); // assign some values coefficient by coefficient m(0,0) = 1.0; m(0,1) = 2.0; m(1,0) = 3.0; m(1,1) = m(0,1) + m(1,0); // print values to standard output cout << m << endl << endl; // using the comma-initializer is also allowed m << 1.0,2.0, 3.0,4.0; // print values to standard output cout << m << endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/Tutorial_BlockOperations_vector.cpp
.cpp
348
15
#include <Eigen/Dense> #include <iostream> using namespace std; int main() { Eigen::ArrayXf v(6); v << 1, 2, 3, 4, 5, 6; cout << "v.head(3) =" << endl << v.head(3) << endl << endl; cout << "v.tail<3>() = " << endl << v.tail<3>() << endl << endl; v.segment(1,4) *= 2; cout << "after 'v.segment(1,4) *= 2', v =" << endl << v << endl; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/doc/examples/nullary_indexing.cpp
.cpp
2,438
67
#include <Eigen/Core> #include <iostream> using namespace Eigen; // [functor] template<class ArgType, class RowIndexType, class ColIndexType> class indexing_functor { const ArgType &m_arg; const RowIndexType &m_rowIndices; const ColIndexType &m_colIndices; public: typedef Matrix<typename ArgType::Scalar, RowIndexType::SizeAtCompileTime, ColIndexType::SizeAtCompileTime, ArgType::Flags&RowMajorBit?RowMajor:ColMajor, RowIndexType::MaxSizeAtCompileTime, ColIndexType::MaxSizeAtCompileTime> MatrixType; indexing_functor(const ArgType& arg, const RowIndexType& row_indices, const ColIndexType& col_indices) : m_arg(arg), m_rowIndices(row_indices), m_colIndices(col_indices) {} const typename ArgType::Scalar& operator() (Index row, Index col) const { return m_arg(m_rowIndices[row], m_colIndices[col]); } }; // [functor] // [function] template <class ArgType, class RowIndexType, class ColIndexType> CwiseNullaryOp<indexing_functor<ArgType,RowIndexType,ColIndexType>, typename indexing_functor<ArgType,RowIndexType,ColIndexType>::MatrixType> indexing(const Eigen::MatrixBase<ArgType>& arg, const RowIndexType& row_indices, const ColIndexType& col_indices) { typedef indexing_functor<ArgType,RowIndexType,ColIndexType> Func; typedef typename Func::MatrixType MatrixType; return MatrixType::NullaryExpr(row_indices.size(), col_indices.size(), Func(arg.derived(), row_indices, col_indices)); } // [function] int main() { std::cout << "[main1]\n"; Eigen::MatrixXi A = Eigen::MatrixXi::Random(4,4); Array3i ri(1,2,1); ArrayXi ci(6); ci << 3,2,1,0,0,2; Eigen::MatrixXi B = indexing(A, ri, ci); std::cout << "A =" << std::endl; std::cout << A << std::endl << std::endl; std::cout << "A([" << ri.transpose() << "], [" << ci.transpose() << "]) =" << std::endl; std::cout << B << std::endl; std::cout << "[main1]\n"; std::cout << "[main2]\n"; B = indexing(A, ri+1, ci); std::cout << "A(ri+1,ci) =" << std::endl; std::cout << B << std::endl << std::endl; #if __cplusplus >= 201103L B = indexing(A, ArrayXi::LinSpaced(13,0,12).unaryExpr([](int x){return x%4;}), ArrayXi::LinSpaced(4,0,3)); std::cout << "A(ArrayXi::LinSpaced(13,0,12).unaryExpr([](int x){return x%4;}), ArrayXi::LinSpaced(4,0,3)) =" << std::endl; std::cout << B << std::endl << std::endl; #endif std::cout << "[main2]\n"; }
C++
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/OrderingMethods/Ordering.h
.h
5,229
158
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ORDERING_H #define EIGEN_ORDERING_H namespace Eigen { #include "Eigen_Colamd.h" namespace internal { /** \internal * \ingroup OrderingMethods_Module * \param[in] A the input non-symmetric matrix * \param[out] symmat the symmetric pattern A^T+A from the input matrix \a A. * FIXME: The values should not be considered here */ template<typename MatrixType> void ordering_helper_at_plus_a(const MatrixType& A, MatrixType& symmat) { MatrixType C; C = A.transpose(); // NOTE: Could be costly for (int i = 0; i < C.rows(); i++) { for (typename MatrixType::InnerIterator it(C, i); it; ++it) it.valueRef() = 0.0; } symmat = C + A; } } #ifndef EIGEN_MPL2_ONLY /** \ingroup OrderingMethods_Module * \class AMDOrdering * * Functor computing the \em approximate \em minimum \em degree ordering * If the matrix is not structurally symmetric, an ordering of A^T+A is computed * \tparam StorageIndex The type of indices of the matrix * \sa COLAMDOrdering */ template <typename StorageIndex> class AMDOrdering { public: typedef PermutationMatrix<Dynamic, Dynamic, StorageIndex> PermutationType; /** Compute the permutation vector from a sparse matrix * This routine is much faster if the input matrix is column-major */ template <typename MatrixType> void operator()(const MatrixType& mat, PermutationType& perm) { // Compute the symmetric pattern SparseMatrix<typename MatrixType::Scalar, ColMajor, StorageIndex> symm; internal::ordering_helper_at_plus_a(mat,symm); // Call the AMD routine //m_mat.prune(keep_diag()); internal::minimum_degree_ordering(symm, perm); } /** Compute the permutation with a selfadjoint matrix */ template <typename SrcType, unsigned int SrcUpLo> void operator()(const SparseSelfAdjointView<SrcType, SrcUpLo>& mat, PermutationType& perm) { SparseMatrix<typename SrcType::Scalar, ColMajor, StorageIndex> C; C = mat; // Call the AMD routine // m_mat.prune(keep_diag()); //Remove the diagonal elements internal::minimum_degree_ordering(C, perm); } }; #endif // EIGEN_MPL2_ONLY /** \ingroup OrderingMethods_Module * \class NaturalOrdering * * Functor computing the natural ordering (identity) * * \note Returns an empty permutation matrix * \tparam StorageIndex The type of indices of the matrix */ template <typename StorageIndex> class NaturalOrdering { public: typedef PermutationMatrix<Dynamic, Dynamic, StorageIndex> PermutationType; /** Compute the permutation vector from a column-major sparse matrix */ template <typename MatrixType> void operator()(const MatrixType& /*mat*/, PermutationType& perm) { perm.resize(0); } }; /** \ingroup OrderingMethods_Module * \class COLAMDOrdering * * \tparam StorageIndex The type of indices of the matrix * * Functor computing the \em column \em approximate \em minimum \em degree ordering * The matrix should be in column-major and \b compressed format (see SparseMatrix::makeCompressed()). */ template<typename StorageIndex> class COLAMDOrdering { public: typedef PermutationMatrix<Dynamic, Dynamic, StorageIndex> PermutationType; typedef Matrix<StorageIndex, Dynamic, 1> IndexVector; /** Compute the permutation vector \a perm form the sparse matrix \a mat * \warning The input sparse matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()). */ template <typename MatrixType> void operator() (const MatrixType& mat, PermutationType& perm) { eigen_assert(mat.isCompressed() && "COLAMDOrdering requires a sparse matrix in compressed mode. Call .makeCompressed() before passing it to COLAMDOrdering"); StorageIndex m = StorageIndex(mat.rows()); StorageIndex n = StorageIndex(mat.cols()); StorageIndex nnz = StorageIndex(mat.nonZeros()); // Get the recommended value of Alen to be used by colamd StorageIndex Alen = internal::colamd_recommended(nnz, m, n); // Set the default parameters double knobs [COLAMD_KNOBS]; StorageIndex stats [COLAMD_STATS]; internal::colamd_set_defaults(knobs); IndexVector p(n+1), A(Alen); for(StorageIndex i=0; i <= n; i++) p(i) = mat.outerIndexPtr()[i]; for(StorageIndex i=0; i < nnz; i++) A(i) = mat.innerIndexPtr()[i]; // Call Colamd routine to compute the ordering StorageIndex info = internal::colamd(m, n, Alen, A.data(), p.data(), knobs, stats); EIGEN_UNUSED_VARIABLE(info); eigen_assert( info && "COLAMD failed " ); perm.resize(n); for (StorageIndex i = 0; i < n; i++) perm.indices()(p(i)) = i; } }; } // end namespace Eigen #endif
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/OrderingMethods/Amd.h
.h
16,396
446
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr> /* NOTE: this routine has been adapted from the CSparse library: Copyright (c) 2006, Timothy A. Davis. http://www.suitesparse.com CSparse is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. CSparse is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this Module; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "../Core/util/NonMPL2.h" #ifndef EIGEN_SPARSE_AMD_H #define EIGEN_SPARSE_AMD_H namespace Eigen { namespace internal { template<typename T> inline T amd_flip(const T& i) { return -i-2; } template<typename T> inline T amd_unflip(const T& i) { return i<0 ? amd_flip(i) : i; } template<typename T0, typename T1> inline bool amd_marked(const T0* w, const T1& j) { return w[j]<0; } template<typename T0, typename T1> inline void amd_mark(const T0* w, const T1& j) { return w[j] = amd_flip(w[j]); } /* clear w */ template<typename StorageIndex> static StorageIndex cs_wclear (StorageIndex mark, StorageIndex lemax, StorageIndex *w, StorageIndex n) { StorageIndex k; if(mark < 2 || (mark + lemax < 0)) { for(k = 0; k < n; k++) if(w[k] != 0) w[k] = 1; mark = 2; } return (mark); /* at this point, w[0..n-1] < mark holds */ } /* depth-first search and postorder of a tree rooted at node j */ template<typename StorageIndex> StorageIndex cs_tdfs(StorageIndex j, StorageIndex k, StorageIndex *head, const StorageIndex *next, StorageIndex *post, StorageIndex *stack) { StorageIndex i, p, top = 0; if(!head || !next || !post || !stack) return (-1); /* check inputs */ stack[0] = j; /* place j on the stack */ while (top >= 0) /* while (stack is not empty) */ { p = stack[top]; /* p = top of stack */ i = head[p]; /* i = youngest child of p */ if(i == -1) { top--; /* p has no unordered children left */ post[k++] = p; /* node p is the kth postordered node */ } else { head[p] = next[i]; /* remove i from children of p */ stack[++top] = i; /* start dfs on child node i */ } } return k; } /** \internal * \ingroup OrderingMethods_Module * Approximate minimum degree ordering algorithm. * * \param[in] C the input selfadjoint matrix stored in compressed column major format. * \param[out] perm the permutation P reducing the fill-in of the input matrix \a C * * Note that the input matrix \a C must be complete, that is both the upper and lower parts have to be stored, as well as the diagonal entries. * On exit the values of C are destroyed */ template<typename Scalar, typename StorageIndex> void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,StorageIndex>& C, PermutationMatrix<Dynamic,Dynamic,StorageIndex>& perm) { using std::sqrt; StorageIndex d, dk, dext, lemax = 0, e, elenk, eln, i, j, k, k1, k2, k3, jlast, ln, dense, nzmax, mindeg = 0, nvi, nvj, nvk, mark, wnvi, ok, nel = 0, p, p1, p2, p3, p4, pj, pk, pk1, pk2, pn, q, t, h; StorageIndex n = StorageIndex(C.cols()); dense = std::max<StorageIndex> (16, StorageIndex(10 * sqrt(double(n)))); /* find dense threshold */ dense = (std::min)(n-2, dense); StorageIndex cnz = StorageIndex(C.nonZeros()); perm.resize(n+1); t = cnz + cnz/5 + 2*n; /* add elbow room to C */ C.resizeNonZeros(t); // get workspace ei_declare_aligned_stack_constructed_variable(StorageIndex,W,8*(n+1),0); StorageIndex* len = W; StorageIndex* nv = W + (n+1); StorageIndex* next = W + 2*(n+1); StorageIndex* head = W + 3*(n+1); StorageIndex* elen = W + 4*(n+1); StorageIndex* degree = W + 5*(n+1); StorageIndex* w = W + 6*(n+1); StorageIndex* hhead = W + 7*(n+1); StorageIndex* last = perm.indices().data(); /* use P as workspace for last */ /* --- Initialize quotient graph ---------------------------------------- */ StorageIndex* Cp = C.outerIndexPtr(); StorageIndex* Ci = C.innerIndexPtr(); for(k = 0; k < n; k++) len[k] = Cp[k+1] - Cp[k]; len[n] = 0; nzmax = t; for(i = 0; i <= n; i++) { head[i] = -1; // degree list i is empty last[i] = -1; next[i] = -1; hhead[i] = -1; // hash list i is empty nv[i] = 1; // node i is just one node w[i] = 1; // node i is alive elen[i] = 0; // Ek of node i is empty degree[i] = len[i]; // degree of node i } mark = internal::cs_wclear<StorageIndex>(0, 0, w, n); /* clear w */ /* --- Initialize degree lists ------------------------------------------ */ for(i = 0; i < n; i++) { bool has_diag = false; for(p = Cp[i]; p<Cp[i+1]; ++p) if(Ci[p]==i) { has_diag = true; break; } d = degree[i]; if(d == 1 && has_diag) /* node i is empty */ { elen[i] = -2; /* element i is dead */ nel++; Cp[i] = -1; /* i is a root of assembly tree */ w[i] = 0; } else if(d > dense || !has_diag) /* node i is dense or has no structural diagonal element */ { nv[i] = 0; /* absorb i into element n */ elen[i] = -1; /* node i is dead */ nel++; Cp[i] = amd_flip (n); nv[n]++; } else { if(head[d] != -1) last[head[d]] = i; next[i] = head[d]; /* put node i in degree list d */ head[d] = i; } } elen[n] = -2; /* n is a dead element */ Cp[n] = -1; /* n is a root of assembly tree */ w[n] = 0; /* n is a dead element */ while (nel < n) /* while (selecting pivots) do */ { /* --- Select node of minimum approximate degree -------------------- */ for(k = -1; mindeg < n && (k = head[mindeg]) == -1; mindeg++) {} if(next[k] != -1) last[next[k]] = -1; head[mindeg] = next[k]; /* remove k from degree list */ elenk = elen[k]; /* elenk = |Ek| */ nvk = nv[k]; /* # of nodes k represents */ nel += nvk; /* nv[k] nodes of A eliminated */ /* --- Garbage collection ------------------------------------------- */ if(elenk > 0 && cnz + mindeg >= nzmax) { for(j = 0; j < n; j++) { if((p = Cp[j]) >= 0) /* j is a live node or element */ { Cp[j] = Ci[p]; /* save first entry of object */ Ci[p] = amd_flip (j); /* first entry is now amd_flip(j) */ } } for(q = 0, p = 0; p < cnz; ) /* scan all of memory */ { if((j = amd_flip (Ci[p++])) >= 0) /* found object j */ { Ci[q] = Cp[j]; /* restore first entry of object */ Cp[j] = q++; /* new pointer to object j */ for(k3 = 0; k3 < len[j]-1; k3++) Ci[q++] = Ci[p++]; } } cnz = q; /* Ci[cnz...nzmax-1] now free */ } /* --- Construct new element ---------------------------------------- */ dk = 0; nv[k] = -nvk; /* flag k as in Lk */ p = Cp[k]; pk1 = (elenk == 0) ? p : cnz; /* do in place if elen[k] == 0 */ pk2 = pk1; for(k1 = 1; k1 <= elenk + 1; k1++) { if(k1 > elenk) { e = k; /* search the nodes in k */ pj = p; /* list of nodes starts at Ci[pj]*/ ln = len[k] - elenk; /* length of list of nodes in k */ } else { e = Ci[p++]; /* search the nodes in e */ pj = Cp[e]; ln = len[e]; /* length of list of nodes in e */ } for(k2 = 1; k2 <= ln; k2++) { i = Ci[pj++]; if((nvi = nv[i]) <= 0) continue; /* node i dead, or seen */ dk += nvi; /* degree[Lk] += size of node i */ nv[i] = -nvi; /* negate nv[i] to denote i in Lk*/ Ci[pk2++] = i; /* place i in Lk */ if(next[i] != -1) last[next[i]] = last[i]; if(last[i] != -1) /* remove i from degree list */ { next[last[i]] = next[i]; } else { head[degree[i]] = next[i]; } } if(e != k) { Cp[e] = amd_flip (k); /* absorb e into k */ w[e] = 0; /* e is now a dead element */ } } if(elenk != 0) cnz = pk2; /* Ci[cnz...nzmax] is free */ degree[k] = dk; /* external degree of k - |Lk\i| */ Cp[k] = pk1; /* element k is in Ci[pk1..pk2-1] */ len[k] = pk2 - pk1; elen[k] = -2; /* k is now an element */ /* --- Find set differences ----------------------------------------- */ mark = internal::cs_wclear<StorageIndex>(mark, lemax, w, n); /* clear w if necessary */ for(pk = pk1; pk < pk2; pk++) /* scan 1: find |Le\Lk| */ { i = Ci[pk]; if((eln = elen[i]) <= 0) continue;/* skip if elen[i] empty */ nvi = -nv[i]; /* nv[i] was negated */ wnvi = mark - nvi; for(p = Cp[i]; p <= Cp[i] + eln - 1; p++) /* scan Ei */ { e = Ci[p]; if(w[e] >= mark) { w[e] -= nvi; /* decrement |Le\Lk| */ } else if(w[e] != 0) /* ensure e is a live element */ { w[e] = degree[e] + wnvi; /* 1st time e seen in scan 1 */ } } } /* --- Degree update ------------------------------------------------ */ for(pk = pk1; pk < pk2; pk++) /* scan2: degree update */ { i = Ci[pk]; /* consider node i in Lk */ p1 = Cp[i]; p2 = p1 + elen[i] - 1; pn = p1; for(h = 0, d = 0, p = p1; p <= p2; p++) /* scan Ei */ { e = Ci[p]; if(w[e] != 0) /* e is an unabsorbed element */ { dext = w[e] - mark; /* dext = |Le\Lk| */ if(dext > 0) { d += dext; /* sum up the set differences */ Ci[pn++] = e; /* keep e in Ei */ h += e; /* compute the hash of node i */ } else { Cp[e] = amd_flip (k); /* aggressive absorb. e->k */ w[e] = 0; /* e is a dead element */ } } } elen[i] = pn - p1 + 1; /* elen[i] = |Ei| */ p3 = pn; p4 = p1 + len[i]; for(p = p2 + 1; p < p4; p++) /* prune edges in Ai */ { j = Ci[p]; if((nvj = nv[j]) <= 0) continue; /* node j dead or in Lk */ d += nvj; /* degree(i) += |j| */ Ci[pn++] = j; /* place j in node list of i */ h += j; /* compute hash for node i */ } if(d == 0) /* check for mass elimination */ { Cp[i] = amd_flip (k); /* absorb i into k */ nvi = -nv[i]; dk -= nvi; /* |Lk| -= |i| */ nvk += nvi; /* |k| += nv[i] */ nel += nvi; nv[i] = 0; elen[i] = -1; /* node i is dead */ } else { degree[i] = std::min<StorageIndex> (degree[i], d); /* update degree(i) */ Ci[pn] = Ci[p3]; /* move first node to end */ Ci[p3] = Ci[p1]; /* move 1st el. to end of Ei */ Ci[p1] = k; /* add k as 1st element in of Ei */ len[i] = pn - p1 + 1; /* new len of adj. list of node i */ h %= n; /* finalize hash of i */ next[i] = hhead[h]; /* place i in hash bucket */ hhead[h] = i; last[i] = h; /* save hash of i in last[i] */ } } /* scan2 is done */ degree[k] = dk; /* finalize |Lk| */ lemax = std::max<StorageIndex>(lemax, dk); mark = internal::cs_wclear<StorageIndex>(mark+lemax, lemax, w, n); /* clear w */ /* --- Supernode detection ------------------------------------------ */ for(pk = pk1; pk < pk2; pk++) { i = Ci[pk]; if(nv[i] >= 0) continue; /* skip if i is dead */ h = last[i]; /* scan hash bucket of node i */ i = hhead[h]; hhead[h] = -1; /* hash bucket will be empty */ for(; i != -1 && next[i] != -1; i = next[i], mark++) { ln = len[i]; eln = elen[i]; for(p = Cp[i]+1; p <= Cp[i] + ln-1; p++) w[Ci[p]] = mark; jlast = i; for(j = next[i]; j != -1; ) /* compare i with all j */ { ok = (len[j] == ln) && (elen[j] == eln); for(p = Cp[j] + 1; ok && p <= Cp[j] + ln - 1; p++) { if(w[Ci[p]] != mark) ok = 0; /* compare i and j*/ } if(ok) /* i and j are identical */ { Cp[j] = amd_flip (i); /* absorb j into i */ nv[i] += nv[j]; nv[j] = 0; elen[j] = -1; /* node j is dead */ j = next[j]; /* delete j from hash bucket */ next[jlast] = j; } else { jlast = j; /* j and i are different */ j = next[j]; } } } } /* --- Finalize new element------------------------------------------ */ for(p = pk1, pk = pk1; pk < pk2; pk++) /* finalize Lk */ { i = Ci[pk]; if((nvi = -nv[i]) <= 0) continue;/* skip if i is dead */ nv[i] = nvi; /* restore nv[i] */ d = degree[i] + dk - nvi; /* compute external degree(i) */ d = std::min<StorageIndex> (d, n - nel - nvi); if(head[d] != -1) last[head[d]] = i; next[i] = head[d]; /* put i back in degree list */ last[i] = -1; head[d] = i; mindeg = std::min<StorageIndex> (mindeg, d); /* find new minimum degree */ degree[i] = d; Ci[p++] = i; /* place i in Lk */ } nv[k] = nvk; /* # nodes absorbed into k */ if((len[k] = p-pk1) == 0) /* length of adj list of element k*/ { Cp[k] = -1; /* k is a root of the tree */ w[k] = 0; /* k is now a dead element */ } if(elenk != 0) cnz = p; /* free unused space in Lk */ } /* --- Postordering ----------------------------------------------------- */ for(i = 0; i < n; i++) Cp[i] = amd_flip (Cp[i]);/* fix assembly tree */ for(j = 0; j <= n; j++) head[j] = -1; for(j = n; j >= 0; j--) /* place unordered nodes in lists */ { if(nv[j] > 0) continue; /* skip if j is an element */ next[j] = head[Cp[j]]; /* place j in list of its parent */ head[Cp[j]] = j; } for(e = n; e >= 0; e--) /* place elements in lists */ { if(nv[e] <= 0) continue; /* skip unless e is an element */ if(Cp[e] != -1) { next[e] = head[Cp[e]]; /* place e in list of its parent */ head[Cp[e]] = e; } } for(k = 0, i = 0; i <= n; i++) /* postorder the assembly tree */ { if(Cp[i] == -1) k = internal::cs_tdfs<StorageIndex>(i, k, head, next, perm.indices().data(), w); } perm.indices().conservativeResize(n); } } // namespace internal } // end namespace Eigen #endif // EIGEN_SPARSE_AMD_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/OrderingMethods/Eigen_Colamd.h
.h
62,266
1,844
// // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Desire Nuentsa Wakam <desire.nuentsa_wakam@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. // This file is modified from the colamd/symamd library. The copyright is below // The authors of the code itself are Stefan I. Larimore and Timothy A. // Davis (davis@cise.ufl.edu), University of Florida. The algorithm was // developed in collaboration with John Gilbert, Xerox PARC, and Esmond // Ng, Oak Ridge National Laboratory. // // Date: // // September 8, 2003. Version 2.3. // // Acknowledgements: // // This work was supported by the National Science Foundation, under // grants DMS-9504974 and DMS-9803599. // // Notice: // // Copyright (c) 1998-2003 by the University of Florida. // All Rights Reserved. // // THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY // EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. // // Permission is hereby granted to use, copy, modify, and/or distribute // this program, provided that the Copyright, this License, and the // Availability of the original version is retained on all copies and made // accessible to the end-user of any code or package that includes COLAMD // or any modified version of COLAMD. // // Availability: // // The colamd/symamd library is available at // // http://www.suitesparse.com #ifndef EIGEN_COLAMD_H #define EIGEN_COLAMD_H namespace internal { /* Ensure that debugging is turned off: */ #ifndef COLAMD_NDEBUG #define COLAMD_NDEBUG #endif /* NDEBUG */ /* ========================================================================== */ /* === Knob and statistics definitions ====================================== */ /* ========================================================================== */ /* size of the knobs [ ] array. Only knobs [0..1] are currently used. */ #define COLAMD_KNOBS 20 /* number of output statistics. Only stats [0..6] are currently used. */ #define COLAMD_STATS 20 /* knobs [0] and stats [0]: dense row knob and output statistic. */ #define COLAMD_DENSE_ROW 0 /* knobs [1] and stats [1]: dense column knob and output statistic. */ #define COLAMD_DENSE_COL 1 /* stats [2]: memory defragmentation count output statistic */ #define COLAMD_DEFRAG_COUNT 2 /* stats [3]: colamd status: zero OK, > 0 warning or notice, < 0 error */ #define COLAMD_STATUS 3 /* stats [4..6]: error info, or info on jumbled columns */ #define COLAMD_INFO1 4 #define COLAMD_INFO2 5 #define COLAMD_INFO3 6 /* error codes returned in stats [3]: */ #define COLAMD_OK (0) #define COLAMD_OK_BUT_JUMBLED (1) #define COLAMD_ERROR_A_not_present (-1) #define COLAMD_ERROR_p_not_present (-2) #define COLAMD_ERROR_nrow_negative (-3) #define COLAMD_ERROR_ncol_negative (-4) #define COLAMD_ERROR_nnz_negative (-5) #define COLAMD_ERROR_p0_nonzero (-6) #define COLAMD_ERROR_A_too_small (-7) #define COLAMD_ERROR_col_length_negative (-8) #define COLAMD_ERROR_row_index_out_of_bounds (-9) #define COLAMD_ERROR_out_of_memory (-10) #define COLAMD_ERROR_internal_error (-999) /* ========================================================================== */ /* === Definitions ========================================================== */ /* ========================================================================== */ #define ONES_COMPLEMENT(r) (-(r)-1) /* -------------------------------------------------------------------------- */ #define COLAMD_EMPTY (-1) /* Row and column status */ #define ALIVE (0) #define DEAD (-1) /* Column status */ #define DEAD_PRINCIPAL (-1) #define DEAD_NON_PRINCIPAL (-2) /* Macros for row and column status update and checking. */ #define ROW_IS_DEAD(r) ROW_IS_MARKED_DEAD (Row[r].shared2.mark) #define ROW_IS_MARKED_DEAD(row_mark) (row_mark < ALIVE) #define ROW_IS_ALIVE(r) (Row [r].shared2.mark >= ALIVE) #define COL_IS_DEAD(c) (Col [c].start < ALIVE) #define COL_IS_ALIVE(c) (Col [c].start >= ALIVE) #define COL_IS_DEAD_PRINCIPAL(c) (Col [c].start == DEAD_PRINCIPAL) #define KILL_ROW(r) { Row [r].shared2.mark = DEAD ; } #define KILL_PRINCIPAL_COL(c) { Col [c].start = DEAD_PRINCIPAL ; } #define KILL_NON_PRINCIPAL_COL(c) { Col [c].start = DEAD_NON_PRINCIPAL ; } /* ========================================================================== */ /* === Colamd reporting mechanism =========================================== */ /* ========================================================================== */ // == Row and Column structures == template <typename IndexType> struct colamd_col { IndexType start ; /* index for A of first row in this column, or DEAD */ /* if column is dead */ IndexType length ; /* number of rows in this column */ union { IndexType thickness ; /* number of original columns represented by this */ /* col, if the column is alive */ IndexType parent ; /* parent in parent tree super-column structure, if */ /* the column is dead */ } shared1 ; union { IndexType score ; /* the score used to maintain heap, if col is alive */ IndexType order ; /* pivot ordering of this column, if col is dead */ } shared2 ; union { IndexType headhash ; /* head of a hash bucket, if col is at the head of */ /* a degree list */ IndexType hash ; /* hash value, if col is not in a degree list */ IndexType prev ; /* previous column in degree list, if col is in a */ /* degree list (but not at the head of a degree list) */ } shared3 ; union { IndexType degree_next ; /* next column, if col is in a degree list */ IndexType hash_next ; /* next column, if col is in a hash list */ } shared4 ; }; template <typename IndexType> struct Colamd_Row { IndexType start ; /* index for A of first col in this row */ IndexType length ; /* number of principal columns in this row */ union { IndexType degree ; /* number of principal & non-principal columns in row */ IndexType p ; /* used as a row pointer in init_rows_cols () */ } shared1 ; union { IndexType mark ; /* for computing set differences and marking dead rows*/ IndexType first_column ;/* first column in row (used in garbage collection) */ } shared2 ; }; /* ========================================================================== */ /* === Colamd recommended memory size ======================================= */ /* ========================================================================== */ /* The recommended length Alen of the array A passed to colamd is given by the COLAMD_RECOMMENDED (nnz, n_row, n_col) macro. It returns -1 if any argument is negative. 2*nnz space is required for the row and column indices of the matrix. colamd_c (n_col) + colamd_r (n_row) space is required for the Col and Row arrays, respectively, which are internal to colamd. An additional n_col space is the minimal amount of "elbow room", and nnz/5 more space is recommended for run time efficiency. This macro is not needed when using symamd. Explicit typecast to IndexType added Sept. 23, 2002, COLAMD version 2.2, to avoid gcc -pedantic warning messages. */ template <typename IndexType> inline IndexType colamd_c(IndexType n_col) { return IndexType( ((n_col) + 1) * sizeof (colamd_col<IndexType>) / sizeof (IndexType) ) ; } template <typename IndexType> inline IndexType colamd_r(IndexType n_row) { return IndexType(((n_row) + 1) * sizeof (Colamd_Row<IndexType>) / sizeof (IndexType)); } // Prototypes of non-user callable routines template <typename IndexType> static IndexType init_rows_cols (IndexType n_row, IndexType n_col, Colamd_Row<IndexType> Row [], colamd_col<IndexType> col [], IndexType A [], IndexType p [], IndexType stats[COLAMD_STATS] ); template <typename IndexType> static void init_scoring (IndexType n_row, IndexType n_col, Colamd_Row<IndexType> Row [], colamd_col<IndexType> Col [], IndexType A [], IndexType head [], double knobs[COLAMD_KNOBS], IndexType *p_n_row2, IndexType *p_n_col2, IndexType *p_max_deg); template <typename IndexType> static IndexType find_ordering (IndexType n_row, IndexType n_col, IndexType Alen, Colamd_Row<IndexType> Row [], colamd_col<IndexType> Col [], IndexType A [], IndexType head [], IndexType n_col2, IndexType max_deg, IndexType pfree); template <typename IndexType> static void order_children (IndexType n_col, colamd_col<IndexType> Col [], IndexType p []); template <typename IndexType> static void detect_super_cols (colamd_col<IndexType> Col [], IndexType A [], IndexType head [], IndexType row_start, IndexType row_length ) ; template <typename IndexType> static IndexType garbage_collection (IndexType n_row, IndexType n_col, Colamd_Row<IndexType> Row [], colamd_col<IndexType> Col [], IndexType A [], IndexType *pfree) ; template <typename IndexType> static inline IndexType clear_mark (IndexType n_row, Colamd_Row<IndexType> Row [] ) ; /* === No debugging ========================================================= */ #define COLAMD_DEBUG0(params) ; #define COLAMD_DEBUG1(params) ; #define COLAMD_DEBUG2(params) ; #define COLAMD_DEBUG3(params) ; #define COLAMD_DEBUG4(params) ; #define COLAMD_ASSERT(expression) ((void) 0) /** * \brief Returns the recommended value of Alen * * Returns recommended value of Alen for use by colamd. * Returns -1 if any input argument is negative. * The use of this routine or macro is optional. * Note that the macro uses its arguments more than once, * so be careful for side effects, if you pass expressions as arguments to COLAMD_RECOMMENDED. * * \param nnz nonzeros in A * \param n_row number of rows in A * \param n_col number of columns in A * \return recommended value of Alen for use by colamd */ template <typename IndexType> inline IndexType colamd_recommended ( IndexType nnz, IndexType n_row, IndexType n_col) { if ((nnz) < 0 || (n_row) < 0 || (n_col) < 0) return (-1); else return (2 * (nnz) + colamd_c (n_col) + colamd_r (n_row) + (n_col) + ((nnz) / 5)); } /** * \brief set default parameters The use of this routine is optional. * * Colamd: rows with more than (knobs [COLAMD_DENSE_ROW] * n_col) * entries are removed prior to ordering. Columns with more than * (knobs [COLAMD_DENSE_COL] * n_row) entries are removed prior to * ordering, and placed last in the output column ordering. * * COLAMD_DENSE_ROW and COLAMD_DENSE_COL are defined as 0 and 1, * respectively, in colamd.h. Default values of these two knobs * are both 0.5. Currently, only knobs [0] and knobs [1] are * used, but future versions may use more knobs. If so, they will * be properly set to their defaults by the future version of * colamd_set_defaults, so that the code that calls colamd will * not need to change, assuming that you either use * colamd_set_defaults, or pass a (double *) NULL pointer as the * knobs array to colamd or symamd. * * \param knobs parameter settings for colamd */ static inline void colamd_set_defaults(double knobs[COLAMD_KNOBS]) { /* === Local variables ================================================== */ int i ; if (!knobs) { return ; /* no knobs to initialize */ } for (i = 0 ; i < COLAMD_KNOBS ; i++) { knobs [i] = 0 ; } knobs [COLAMD_DENSE_ROW] = 0.5 ; /* ignore rows over 50% dense */ knobs [COLAMD_DENSE_COL] = 0.5 ; /* ignore columns over 50% dense */ } /** * \brief Computes a column ordering using the column approximate minimum degree ordering * * Computes a column ordering (Q) of A such that P(AQ)=LU or * (AQ)'AQ=LL' have less fill-in and require fewer floating point * operations than factorizing the unpermuted matrix A or A'A, * respectively. * * * \param n_row number of rows in A * \param n_col number of columns in A * \param Alen, size of the array A * \param A row indices of the matrix, of size ALen * \param p column pointers of A, of size n_col+1 * \param knobs parameter settings for colamd * \param stats colamd output statistics and error codes */ template <typename IndexType> static bool colamd(IndexType n_row, IndexType n_col, IndexType Alen, IndexType *A, IndexType *p, double knobs[COLAMD_KNOBS], IndexType stats[COLAMD_STATS]) { /* === Local variables ================================================== */ IndexType i ; /* loop index */ IndexType nnz ; /* nonzeros in A */ IndexType Row_size ; /* size of Row [], in integers */ IndexType Col_size ; /* size of Col [], in integers */ IndexType need ; /* minimum required length of A */ Colamd_Row<IndexType> *Row ; /* pointer into A of Row [0..n_row] array */ colamd_col<IndexType> *Col ; /* pointer into A of Col [0..n_col] array */ IndexType n_col2 ; /* number of non-dense, non-empty columns */ IndexType n_row2 ; /* number of non-dense, non-empty rows */ IndexType ngarbage ; /* number of garbage collections performed */ IndexType max_deg ; /* maximum row degree */ double default_knobs [COLAMD_KNOBS] ; /* default knobs array */ /* === Check the input arguments ======================================== */ if (!stats) { COLAMD_DEBUG0 (("colamd: stats not present\n")) ; return (false) ; } for (i = 0 ; i < COLAMD_STATS ; i++) { stats [i] = 0 ; } stats [COLAMD_STATUS] = COLAMD_OK ; stats [COLAMD_INFO1] = -1 ; stats [COLAMD_INFO2] = -1 ; if (!A) /* A is not present */ { stats [COLAMD_STATUS] = COLAMD_ERROR_A_not_present ; COLAMD_DEBUG0 (("colamd: A not present\n")) ; return (false) ; } if (!p) /* p is not present */ { stats [COLAMD_STATUS] = COLAMD_ERROR_p_not_present ; COLAMD_DEBUG0 (("colamd: p not present\n")) ; return (false) ; } if (n_row < 0) /* n_row must be >= 0 */ { stats [COLAMD_STATUS] = COLAMD_ERROR_nrow_negative ; stats [COLAMD_INFO1] = n_row ; COLAMD_DEBUG0 (("colamd: nrow negative %d\n", n_row)) ; return (false) ; } if (n_col < 0) /* n_col must be >= 0 */ { stats [COLAMD_STATUS] = COLAMD_ERROR_ncol_negative ; stats [COLAMD_INFO1] = n_col ; COLAMD_DEBUG0 (("colamd: ncol negative %d\n", n_col)) ; return (false) ; } nnz = p [n_col] ; if (nnz < 0) /* nnz must be >= 0 */ { stats [COLAMD_STATUS] = COLAMD_ERROR_nnz_negative ; stats [COLAMD_INFO1] = nnz ; COLAMD_DEBUG0 (("colamd: number of entries negative %d\n", nnz)) ; return (false) ; } if (p [0] != 0) { stats [COLAMD_STATUS] = COLAMD_ERROR_p0_nonzero ; stats [COLAMD_INFO1] = p [0] ; COLAMD_DEBUG0 (("colamd: p[0] not zero %d\n", p [0])) ; return (false) ; } /* === If no knobs, set default knobs =================================== */ if (!knobs) { colamd_set_defaults (default_knobs) ; knobs = default_knobs ; } /* === Allocate the Row and Col arrays from array A ===================== */ Col_size = colamd_c (n_col) ; Row_size = colamd_r (n_row) ; need = 2*nnz + n_col + Col_size + Row_size ; if (need > Alen) { /* not enough space in array A to perform the ordering */ stats [COLAMD_STATUS] = COLAMD_ERROR_A_too_small ; stats [COLAMD_INFO1] = need ; stats [COLAMD_INFO2] = Alen ; COLAMD_DEBUG0 (("colamd: Need Alen >= %d, given only Alen = %d\n", need,Alen)); return (false) ; } Alen -= Col_size + Row_size ; Col = (colamd_col<IndexType> *) &A [Alen] ; Row = (Colamd_Row<IndexType> *) &A [Alen + Col_size] ; /* === Construct the row and column data structures ===================== */ if (!Eigen::internal::init_rows_cols (n_row, n_col, Row, Col, A, p, stats)) { /* input matrix is invalid */ COLAMD_DEBUG0 (("colamd: Matrix invalid\n")) ; return (false) ; } /* === Initialize scores, kill dense rows/columns ======================= */ Eigen::internal::init_scoring (n_row, n_col, Row, Col, A, p, knobs, &n_row2, &n_col2, &max_deg) ; /* === Order the supercolumns =========================================== */ ngarbage = Eigen::internal::find_ordering (n_row, n_col, Alen, Row, Col, A, p, n_col2, max_deg, 2*nnz) ; /* === Order the non-principal columns ================================== */ Eigen::internal::order_children (n_col, Col, p) ; /* === Return statistics in stats ======================================= */ stats [COLAMD_DENSE_ROW] = n_row - n_row2 ; stats [COLAMD_DENSE_COL] = n_col - n_col2 ; stats [COLAMD_DEFRAG_COUNT] = ngarbage ; COLAMD_DEBUG0 (("colamd: done.\n")) ; return (true) ; } /* ========================================================================== */ /* === NON-USER-CALLABLE ROUTINES: ========================================== */ /* ========================================================================== */ /* There are no user-callable routines beyond this point in the file */ /* ========================================================================== */ /* === init_rows_cols ======================================================= */ /* ========================================================================== */ /* Takes the column form of the matrix in A and creates the row form of the matrix. Also, row and column attributes are stored in the Col and Row structs. If the columns are un-sorted or contain duplicate row indices, this routine will also sort and remove duplicate row indices from the column form of the matrix. Returns false if the matrix is invalid, true otherwise. Not user-callable. */ template <typename IndexType> static IndexType init_rows_cols /* returns true if OK, or false otherwise */ ( /* === Parameters ======================================================= */ IndexType n_row, /* number of rows of A */ IndexType n_col, /* number of columns of A */ Colamd_Row<IndexType> Row [], /* of size n_row+1 */ colamd_col<IndexType> Col [], /* of size n_col+1 */ IndexType A [], /* row indices of A, of size Alen */ IndexType p [], /* pointers to columns in A, of size n_col+1 */ IndexType stats [COLAMD_STATS] /* colamd statistics */ ) { /* === Local variables ================================================== */ IndexType col ; /* a column index */ IndexType row ; /* a row index */ IndexType *cp ; /* a column pointer */ IndexType *cp_end ; /* a pointer to the end of a column */ IndexType *rp ; /* a row pointer */ IndexType *rp_end ; /* a pointer to the end of a row */ IndexType last_row ; /* previous row */ /* === Initialize columns, and check column pointers ==================== */ for (col = 0 ; col < n_col ; col++) { Col [col].start = p [col] ; Col [col].length = p [col+1] - p [col] ; if ((Col [col].length) < 0) // extra parentheses to work-around gcc bug 10200 { /* column pointers must be non-decreasing */ stats [COLAMD_STATUS] = COLAMD_ERROR_col_length_negative ; stats [COLAMD_INFO1] = col ; stats [COLAMD_INFO2] = Col [col].length ; COLAMD_DEBUG0 (("colamd: col %d length %d < 0\n", col, Col [col].length)) ; return (false) ; } Col [col].shared1.thickness = 1 ; Col [col].shared2.score = 0 ; Col [col].shared3.prev = COLAMD_EMPTY ; Col [col].shared4.degree_next = COLAMD_EMPTY ; } /* p [0..n_col] no longer needed, used as "head" in subsequent routines */ /* === Scan columns, compute row degrees, and check row indices ========= */ stats [COLAMD_INFO3] = 0 ; /* number of duplicate or unsorted row indices*/ for (row = 0 ; row < n_row ; row++) { Row [row].length = 0 ; Row [row].shared2.mark = -1 ; } for (col = 0 ; col < n_col ; col++) { last_row = -1 ; cp = &A [p [col]] ; cp_end = &A [p [col+1]] ; while (cp < cp_end) { row = *cp++ ; /* make sure row indices within range */ if (row < 0 || row >= n_row) { stats [COLAMD_STATUS] = COLAMD_ERROR_row_index_out_of_bounds ; stats [COLAMD_INFO1] = col ; stats [COLAMD_INFO2] = row ; stats [COLAMD_INFO3] = n_row ; COLAMD_DEBUG0 (("colamd: row %d col %d out of bounds\n", row, col)) ; return (false) ; } if (row <= last_row || Row [row].shared2.mark == col) { /* row index are unsorted or repeated (or both), thus col */ /* is jumbled. This is a notice, not an error condition. */ stats [COLAMD_STATUS] = COLAMD_OK_BUT_JUMBLED ; stats [COLAMD_INFO1] = col ; stats [COLAMD_INFO2] = row ; (stats [COLAMD_INFO3]) ++ ; COLAMD_DEBUG1 (("colamd: row %d col %d unsorted/duplicate\n",row,col)); } if (Row [row].shared2.mark != col) { Row [row].length++ ; } else { /* this is a repeated entry in the column, */ /* it will be removed */ Col [col].length-- ; } /* mark the row as having been seen in this column */ Row [row].shared2.mark = col ; last_row = row ; } } /* === Compute row pointers ============================================= */ /* row form of the matrix starts directly after the column */ /* form of matrix in A */ Row [0].start = p [n_col] ; Row [0].shared1.p = Row [0].start ; Row [0].shared2.mark = -1 ; for (row = 1 ; row < n_row ; row++) { Row [row].start = Row [row-1].start + Row [row-1].length ; Row [row].shared1.p = Row [row].start ; Row [row].shared2.mark = -1 ; } /* === Create row form ================================================== */ if (stats [COLAMD_STATUS] == COLAMD_OK_BUT_JUMBLED) { /* if cols jumbled, watch for repeated row indices */ for (col = 0 ; col < n_col ; col++) { cp = &A [p [col]] ; cp_end = &A [p [col+1]] ; while (cp < cp_end) { row = *cp++ ; if (Row [row].shared2.mark != col) { A [(Row [row].shared1.p)++] = col ; Row [row].shared2.mark = col ; } } } } else { /* if cols not jumbled, we don't need the mark (this is faster) */ for (col = 0 ; col < n_col ; col++) { cp = &A [p [col]] ; cp_end = &A [p [col+1]] ; while (cp < cp_end) { A [(Row [*cp++].shared1.p)++] = col ; } } } /* === Clear the row marks and set row degrees ========================== */ for (row = 0 ; row < n_row ; row++) { Row [row].shared2.mark = 0 ; Row [row].shared1.degree = Row [row].length ; } /* === See if we need to re-create columns ============================== */ if (stats [COLAMD_STATUS] == COLAMD_OK_BUT_JUMBLED) { COLAMD_DEBUG0 (("colamd: reconstructing column form, matrix jumbled\n")) ; /* === Compute col pointers ========================================= */ /* col form of the matrix starts at A [0]. */ /* Note, we may have a gap between the col form and the row */ /* form if there were duplicate entries, if so, it will be */ /* removed upon the first garbage collection */ Col [0].start = 0 ; p [0] = Col [0].start ; for (col = 1 ; col < n_col ; col++) { /* note that the lengths here are for pruned columns, i.e. */ /* no duplicate row indices will exist for these columns */ Col [col].start = Col [col-1].start + Col [col-1].length ; p [col] = Col [col].start ; } /* === Re-create col form =========================================== */ for (row = 0 ; row < n_row ; row++) { rp = &A [Row [row].start] ; rp_end = rp + Row [row].length ; while (rp < rp_end) { A [(p [*rp++])++] = row ; } } } /* === Done. Matrix is not (or no longer) jumbled ====================== */ return (true) ; } /* ========================================================================== */ /* === init_scoring ========================================================= */ /* ========================================================================== */ /* Kills dense or empty columns and rows, calculates an initial score for each column, and places all columns in the degree lists. Not user-callable. */ template <typename IndexType> static void init_scoring ( /* === Parameters ======================================================= */ IndexType n_row, /* number of rows of A */ IndexType n_col, /* number of columns of A */ Colamd_Row<IndexType> Row [], /* of size n_row+1 */ colamd_col<IndexType> Col [], /* of size n_col+1 */ IndexType A [], /* column form and row form of A */ IndexType head [], /* of size n_col+1 */ double knobs [COLAMD_KNOBS],/* parameters */ IndexType *p_n_row2, /* number of non-dense, non-empty rows */ IndexType *p_n_col2, /* number of non-dense, non-empty columns */ IndexType *p_max_deg /* maximum row degree */ ) { /* === Local variables ================================================== */ IndexType c ; /* a column index */ IndexType r, row ; /* a row index */ IndexType *cp ; /* a column pointer */ IndexType deg ; /* degree of a row or column */ IndexType *cp_end ; /* a pointer to the end of a column */ IndexType *new_cp ; /* new column pointer */ IndexType col_length ; /* length of pruned column */ IndexType score ; /* current column score */ IndexType n_col2 ; /* number of non-dense, non-empty columns */ IndexType n_row2 ; /* number of non-dense, non-empty rows */ IndexType dense_row_count ; /* remove rows with more entries than this */ IndexType dense_col_count ; /* remove cols with more entries than this */ IndexType min_score ; /* smallest column score */ IndexType max_deg ; /* maximum row degree */ IndexType next_col ; /* Used to add to degree list.*/ /* === Extract knobs ==================================================== */ dense_row_count = numext::maxi(IndexType(0), numext::mini(IndexType(knobs [COLAMD_DENSE_ROW] * n_col), n_col)) ; dense_col_count = numext::maxi(IndexType(0), numext::mini(IndexType(knobs [COLAMD_DENSE_COL] * n_row), n_row)) ; COLAMD_DEBUG1 (("colamd: densecount: %d %d\n", dense_row_count, dense_col_count)) ; max_deg = 0 ; n_col2 = n_col ; n_row2 = n_row ; /* === Kill empty columns =============================================== */ /* Put the empty columns at the end in their natural order, so that LU */ /* factorization can proceed as far as possible. */ for (c = n_col-1 ; c >= 0 ; c--) { deg = Col [c].length ; if (deg == 0) { /* this is a empty column, kill and order it last */ Col [c].shared2.order = --n_col2 ; KILL_PRINCIPAL_COL (c) ; } } COLAMD_DEBUG1 (("colamd: null columns killed: %d\n", n_col - n_col2)) ; /* === Kill dense columns =============================================== */ /* Put the dense columns at the end, in their natural order */ for (c = n_col-1 ; c >= 0 ; c--) { /* skip any dead columns */ if (COL_IS_DEAD (c)) { continue ; } deg = Col [c].length ; if (deg > dense_col_count) { /* this is a dense column, kill and order it last */ Col [c].shared2.order = --n_col2 ; /* decrement the row degrees */ cp = &A [Col [c].start] ; cp_end = cp + Col [c].length ; while (cp < cp_end) { Row [*cp++].shared1.degree-- ; } KILL_PRINCIPAL_COL (c) ; } } COLAMD_DEBUG1 (("colamd: Dense and null columns killed: %d\n", n_col - n_col2)) ; /* === Kill dense and empty rows ======================================== */ for (r = 0 ; r < n_row ; r++) { deg = Row [r].shared1.degree ; COLAMD_ASSERT (deg >= 0 && deg <= n_col) ; if (deg > dense_row_count || deg == 0) { /* kill a dense or empty row */ KILL_ROW (r) ; --n_row2 ; } else { /* keep track of max degree of remaining rows */ max_deg = numext::maxi(max_deg, deg) ; } } COLAMD_DEBUG1 (("colamd: Dense and null rows killed: %d\n", n_row - n_row2)) ; /* === Compute initial column scores ==================================== */ /* At this point the row degrees are accurate. They reflect the number */ /* of "live" (non-dense) columns in each row. No empty rows exist. */ /* Some "live" columns may contain only dead rows, however. These are */ /* pruned in the code below. */ /* now find the initial matlab score for each column */ for (c = n_col-1 ; c >= 0 ; c--) { /* skip dead column */ if (COL_IS_DEAD (c)) { continue ; } score = 0 ; cp = &A [Col [c].start] ; new_cp = cp ; cp_end = cp + Col [c].length ; while (cp < cp_end) { /* get a row */ row = *cp++ ; /* skip if dead */ if (ROW_IS_DEAD (row)) { continue ; } /* compact the column */ *new_cp++ = row ; /* add row's external degree */ score += Row [row].shared1.degree - 1 ; /* guard against integer overflow */ score = numext::mini(score, n_col) ; } /* determine pruned column length */ col_length = (IndexType) (new_cp - &A [Col [c].start]) ; if (col_length == 0) { /* a newly-made null column (all rows in this col are "dense" */ /* and have already been killed) */ COLAMD_DEBUG2 (("Newly null killed: %d\n", c)) ; Col [c].shared2.order = --n_col2 ; KILL_PRINCIPAL_COL (c) ; } else { /* set column length and set score */ COLAMD_ASSERT (score >= 0) ; COLAMD_ASSERT (score <= n_col) ; Col [c].length = col_length ; Col [c].shared2.score = score ; } } COLAMD_DEBUG1 (("colamd: Dense, null, and newly-null columns killed: %d\n", n_col-n_col2)) ; /* At this point, all empty rows and columns are dead. All live columns */ /* are "clean" (containing no dead rows) and simplicial (no supercolumns */ /* yet). Rows may contain dead columns, but all live rows contain at */ /* least one live column. */ /* === Initialize degree lists ========================================== */ /* clear the hash buckets */ for (c = 0 ; c <= n_col ; c++) { head [c] = COLAMD_EMPTY ; } min_score = n_col ; /* place in reverse order, so low column indices are at the front */ /* of the lists. This is to encourage natural tie-breaking */ for (c = n_col-1 ; c >= 0 ; c--) { /* only add principal columns to degree lists */ if (COL_IS_ALIVE (c)) { COLAMD_DEBUG4 (("place %d score %d minscore %d ncol %d\n", c, Col [c].shared2.score, min_score, n_col)) ; /* === Add columns score to DList =============================== */ score = Col [c].shared2.score ; COLAMD_ASSERT (min_score >= 0) ; COLAMD_ASSERT (min_score <= n_col) ; COLAMD_ASSERT (score >= 0) ; COLAMD_ASSERT (score <= n_col) ; COLAMD_ASSERT (head [score] >= COLAMD_EMPTY) ; /* now add this column to dList at proper score location */ next_col = head [score] ; Col [c].shared3.prev = COLAMD_EMPTY ; Col [c].shared4.degree_next = next_col ; /* if there already was a column with the same score, set its */ /* previous pointer to this new column */ if (next_col != COLAMD_EMPTY) { Col [next_col].shared3.prev = c ; } head [score] = c ; /* see if this score is less than current min */ min_score = numext::mini(min_score, score) ; } } /* === Return number of remaining columns, and max row degree =========== */ *p_n_col2 = n_col2 ; *p_n_row2 = n_row2 ; *p_max_deg = max_deg ; } /* ========================================================================== */ /* === find_ordering ======================================================== */ /* ========================================================================== */ /* Order the principal columns of the supercolumn form of the matrix (no supercolumns on input). Uses a minimum approximate column minimum degree ordering method. Not user-callable. */ template <typename IndexType> static IndexType find_ordering /* return the number of garbage collections */ ( /* === Parameters ======================================================= */ IndexType n_row, /* number of rows of A */ IndexType n_col, /* number of columns of A */ IndexType Alen, /* size of A, 2*nnz + n_col or larger */ Colamd_Row<IndexType> Row [], /* of size n_row+1 */ colamd_col<IndexType> Col [], /* of size n_col+1 */ IndexType A [], /* column form and row form of A */ IndexType head [], /* of size n_col+1 */ IndexType n_col2, /* Remaining columns to order */ IndexType max_deg, /* Maximum row degree */ IndexType pfree /* index of first free slot (2*nnz on entry) */ ) { /* === Local variables ================================================== */ IndexType k ; /* current pivot ordering step */ IndexType pivot_col ; /* current pivot column */ IndexType *cp ; /* a column pointer */ IndexType *rp ; /* a row pointer */ IndexType pivot_row ; /* current pivot row */ IndexType *new_cp ; /* modified column pointer */ IndexType *new_rp ; /* modified row pointer */ IndexType pivot_row_start ; /* pointer to start of pivot row */ IndexType pivot_row_degree ; /* number of columns in pivot row */ IndexType pivot_row_length ; /* number of supercolumns in pivot row */ IndexType pivot_col_score ; /* score of pivot column */ IndexType needed_memory ; /* free space needed for pivot row */ IndexType *cp_end ; /* pointer to the end of a column */ IndexType *rp_end ; /* pointer to the end of a row */ IndexType row ; /* a row index */ IndexType col ; /* a column index */ IndexType max_score ; /* maximum possible score */ IndexType cur_score ; /* score of current column */ unsigned int hash ; /* hash value for supernode detection */ IndexType head_column ; /* head of hash bucket */ IndexType first_col ; /* first column in hash bucket */ IndexType tag_mark ; /* marker value for mark array */ IndexType row_mark ; /* Row [row].shared2.mark */ IndexType set_difference ; /* set difference size of row with pivot row */ IndexType min_score ; /* smallest column score */ IndexType col_thickness ; /* "thickness" (no. of columns in a supercol) */ IndexType max_mark ; /* maximum value of tag_mark */ IndexType pivot_col_thickness ; /* number of columns represented by pivot col */ IndexType prev_col ; /* Used by Dlist operations. */ IndexType next_col ; /* Used by Dlist operations. */ IndexType ngarbage ; /* number of garbage collections performed */ /* === Initialization and clear mark ==================================== */ max_mark = INT_MAX - n_col ; /* INT_MAX defined in <limits.h> */ tag_mark = Eigen::internal::clear_mark (n_row, Row) ; min_score = 0 ; ngarbage = 0 ; COLAMD_DEBUG1 (("colamd: Ordering, n_col2=%d\n", n_col2)) ; /* === Order the columns ================================================ */ for (k = 0 ; k < n_col2 ; /* 'k' is incremented below */) { /* === Select pivot column, and order it ============================ */ /* make sure degree list isn't empty */ COLAMD_ASSERT (min_score >= 0) ; COLAMD_ASSERT (min_score <= n_col) ; COLAMD_ASSERT (head [min_score] >= COLAMD_EMPTY) ; /* get pivot column from head of minimum degree list */ while (min_score < n_col && head [min_score] == COLAMD_EMPTY) { min_score++ ; } pivot_col = head [min_score] ; COLAMD_ASSERT (pivot_col >= 0 && pivot_col <= n_col) ; next_col = Col [pivot_col].shared4.degree_next ; head [min_score] = next_col ; if (next_col != COLAMD_EMPTY) { Col [next_col].shared3.prev = COLAMD_EMPTY ; } COLAMD_ASSERT (COL_IS_ALIVE (pivot_col)) ; COLAMD_DEBUG3 (("Pivot col: %d\n", pivot_col)) ; /* remember score for defrag check */ pivot_col_score = Col [pivot_col].shared2.score ; /* the pivot column is the kth column in the pivot order */ Col [pivot_col].shared2.order = k ; /* increment order count by column thickness */ pivot_col_thickness = Col [pivot_col].shared1.thickness ; k += pivot_col_thickness ; COLAMD_ASSERT (pivot_col_thickness > 0) ; /* === Garbage_collection, if necessary ============================= */ needed_memory = numext::mini(pivot_col_score, n_col - k) ; if (pfree + needed_memory >= Alen) { pfree = Eigen::internal::garbage_collection (n_row, n_col, Row, Col, A, &A [pfree]) ; ngarbage++ ; /* after garbage collection we will have enough */ COLAMD_ASSERT (pfree + needed_memory < Alen) ; /* garbage collection has wiped out the Row[].shared2.mark array */ tag_mark = Eigen::internal::clear_mark (n_row, Row) ; } /* === Compute pivot row pattern ==================================== */ /* get starting location for this new merged row */ pivot_row_start = pfree ; /* initialize new row counts to zero */ pivot_row_degree = 0 ; /* tag pivot column as having been visited so it isn't included */ /* in merged pivot row */ Col [pivot_col].shared1.thickness = -pivot_col_thickness ; /* pivot row is the union of all rows in the pivot column pattern */ cp = &A [Col [pivot_col].start] ; cp_end = cp + Col [pivot_col].length ; while (cp < cp_end) { /* get a row */ row = *cp++ ; COLAMD_DEBUG4 (("Pivot col pattern %d %d\n", ROW_IS_ALIVE (row), row)) ; /* skip if row is dead */ if (ROW_IS_DEAD (row)) { continue ; } rp = &A [Row [row].start] ; rp_end = rp + Row [row].length ; while (rp < rp_end) { /* get a column */ col = *rp++ ; /* add the column, if alive and untagged */ col_thickness = Col [col].shared1.thickness ; if (col_thickness > 0 && COL_IS_ALIVE (col)) { /* tag column in pivot row */ Col [col].shared1.thickness = -col_thickness ; COLAMD_ASSERT (pfree < Alen) ; /* place column in pivot row */ A [pfree++] = col ; pivot_row_degree += col_thickness ; } } } /* clear tag on pivot column */ Col [pivot_col].shared1.thickness = pivot_col_thickness ; max_deg = numext::maxi(max_deg, pivot_row_degree) ; /* === Kill all rows used to construct pivot row ==================== */ /* also kill pivot row, temporarily */ cp = &A [Col [pivot_col].start] ; cp_end = cp + Col [pivot_col].length ; while (cp < cp_end) { /* may be killing an already dead row */ row = *cp++ ; COLAMD_DEBUG3 (("Kill row in pivot col: %d\n", row)) ; KILL_ROW (row) ; } /* === Select a row index to use as the new pivot row =============== */ pivot_row_length = pfree - pivot_row_start ; if (pivot_row_length > 0) { /* pick the "pivot" row arbitrarily (first row in col) */ pivot_row = A [Col [pivot_col].start] ; COLAMD_DEBUG3 (("Pivotal row is %d\n", pivot_row)) ; } else { /* there is no pivot row, since it is of zero length */ pivot_row = COLAMD_EMPTY ; COLAMD_ASSERT (pivot_row_length == 0) ; } COLAMD_ASSERT (Col [pivot_col].length > 0 || pivot_row_length == 0) ; /* === Approximate degree computation =============================== */ /* Here begins the computation of the approximate degree. The column */ /* score is the sum of the pivot row "length", plus the size of the */ /* set differences of each row in the column minus the pattern of the */ /* pivot row itself. The column ("thickness") itself is also */ /* excluded from the column score (we thus use an approximate */ /* external degree). */ /* The time taken by the following code (compute set differences, and */ /* add them up) is proportional to the size of the data structure */ /* being scanned - that is, the sum of the sizes of each column in */ /* the pivot row. Thus, the amortized time to compute a column score */ /* is proportional to the size of that column (where size, in this */ /* context, is the column "length", or the number of row indices */ /* in that column). The number of row indices in a column is */ /* monotonically non-decreasing, from the length of the original */ /* column on input to colamd. */ /* === Compute set differences ====================================== */ COLAMD_DEBUG3 (("** Computing set differences phase. **\n")) ; /* pivot row is currently dead - it will be revived later. */ COLAMD_DEBUG3 (("Pivot row: ")) ; /* for each column in pivot row */ rp = &A [pivot_row_start] ; rp_end = rp + pivot_row_length ; while (rp < rp_end) { col = *rp++ ; COLAMD_ASSERT (COL_IS_ALIVE (col) && col != pivot_col) ; COLAMD_DEBUG3 (("Col: %d\n", col)) ; /* clear tags used to construct pivot row pattern */ col_thickness = -Col [col].shared1.thickness ; COLAMD_ASSERT (col_thickness > 0) ; Col [col].shared1.thickness = col_thickness ; /* === Remove column from degree list =========================== */ cur_score = Col [col].shared2.score ; prev_col = Col [col].shared3.prev ; next_col = Col [col].shared4.degree_next ; COLAMD_ASSERT (cur_score >= 0) ; COLAMD_ASSERT (cur_score <= n_col) ; COLAMD_ASSERT (cur_score >= COLAMD_EMPTY) ; if (prev_col == COLAMD_EMPTY) { head [cur_score] = next_col ; } else { Col [prev_col].shared4.degree_next = next_col ; } if (next_col != COLAMD_EMPTY) { Col [next_col].shared3.prev = prev_col ; } /* === Scan the column ========================================== */ cp = &A [Col [col].start] ; cp_end = cp + Col [col].length ; while (cp < cp_end) { /* get a row */ row = *cp++ ; row_mark = Row [row].shared2.mark ; /* skip if dead */ if (ROW_IS_MARKED_DEAD (row_mark)) { continue ; } COLAMD_ASSERT (row != pivot_row) ; set_difference = row_mark - tag_mark ; /* check if the row has been seen yet */ if (set_difference < 0) { COLAMD_ASSERT (Row [row].shared1.degree <= max_deg) ; set_difference = Row [row].shared1.degree ; } /* subtract column thickness from this row's set difference */ set_difference -= col_thickness ; COLAMD_ASSERT (set_difference >= 0) ; /* absorb this row if the set difference becomes zero */ if (set_difference == 0) { COLAMD_DEBUG3 (("aggressive absorption. Row: %d\n", row)) ; KILL_ROW (row) ; } else { /* save the new mark */ Row [row].shared2.mark = set_difference + tag_mark ; } } } /* === Add up set differences for each column ======================= */ COLAMD_DEBUG3 (("** Adding set differences phase. **\n")) ; /* for each column in pivot row */ rp = &A [pivot_row_start] ; rp_end = rp + pivot_row_length ; while (rp < rp_end) { /* get a column */ col = *rp++ ; COLAMD_ASSERT (COL_IS_ALIVE (col) && col != pivot_col) ; hash = 0 ; cur_score = 0 ; cp = &A [Col [col].start] ; /* compact the column */ new_cp = cp ; cp_end = cp + Col [col].length ; COLAMD_DEBUG4 (("Adding set diffs for Col: %d.\n", col)) ; while (cp < cp_end) { /* get a row */ row = *cp++ ; COLAMD_ASSERT(row >= 0 && row < n_row) ; row_mark = Row [row].shared2.mark ; /* skip if dead */ if (ROW_IS_MARKED_DEAD (row_mark)) { continue ; } COLAMD_ASSERT (row_mark > tag_mark) ; /* compact the column */ *new_cp++ = row ; /* compute hash function */ hash += row ; /* add set difference */ cur_score += row_mark - tag_mark ; /* integer overflow... */ cur_score = numext::mini(cur_score, n_col) ; } /* recompute the column's length */ Col [col].length = (IndexType) (new_cp - &A [Col [col].start]) ; /* === Further mass elimination ================================= */ if (Col [col].length == 0) { COLAMD_DEBUG4 (("further mass elimination. Col: %d\n", col)) ; /* nothing left but the pivot row in this column */ KILL_PRINCIPAL_COL (col) ; pivot_row_degree -= Col [col].shared1.thickness ; COLAMD_ASSERT (pivot_row_degree >= 0) ; /* order it */ Col [col].shared2.order = k ; /* increment order count by column thickness */ k += Col [col].shared1.thickness ; } else { /* === Prepare for supercolumn detection ==================== */ COLAMD_DEBUG4 (("Preparing supercol detection for Col: %d.\n", col)) ; /* save score so far */ Col [col].shared2.score = cur_score ; /* add column to hash table, for supercolumn detection */ hash %= n_col + 1 ; COLAMD_DEBUG4 ((" Hash = %d, n_col = %d.\n", hash, n_col)) ; COLAMD_ASSERT (hash <= n_col) ; head_column = head [hash] ; if (head_column > COLAMD_EMPTY) { /* degree list "hash" is non-empty, use prev (shared3) of */ /* first column in degree list as head of hash bucket */ first_col = Col [head_column].shared3.headhash ; Col [head_column].shared3.headhash = col ; } else { /* degree list "hash" is empty, use head as hash bucket */ first_col = - (head_column + 2) ; head [hash] = - (col + 2) ; } Col [col].shared4.hash_next = first_col ; /* save hash function in Col [col].shared3.hash */ Col [col].shared3.hash = (IndexType) hash ; COLAMD_ASSERT (COL_IS_ALIVE (col)) ; } } /* The approximate external column degree is now computed. */ /* === Supercolumn detection ======================================== */ COLAMD_DEBUG3 (("** Supercolumn detection phase. **\n")) ; Eigen::internal::detect_super_cols (Col, A, head, pivot_row_start, pivot_row_length) ; /* === Kill the pivotal column ====================================== */ KILL_PRINCIPAL_COL (pivot_col) ; /* === Clear mark =================================================== */ tag_mark += (max_deg + 1) ; if (tag_mark >= max_mark) { COLAMD_DEBUG2 (("clearing tag_mark\n")) ; tag_mark = Eigen::internal::clear_mark (n_row, Row) ; } /* === Finalize the new pivot row, and column scores ================ */ COLAMD_DEBUG3 (("** Finalize scores phase. **\n")) ; /* for each column in pivot row */ rp = &A [pivot_row_start] ; /* compact the pivot row */ new_rp = rp ; rp_end = rp + pivot_row_length ; while (rp < rp_end) { col = *rp++ ; /* skip dead columns */ if (COL_IS_DEAD (col)) { continue ; } *new_rp++ = col ; /* add new pivot row to column */ A [Col [col].start + (Col [col].length++)] = pivot_row ; /* retrieve score so far and add on pivot row's degree. */ /* (we wait until here for this in case the pivot */ /* row's degree was reduced due to mass elimination). */ cur_score = Col [col].shared2.score + pivot_row_degree ; /* calculate the max possible score as the number of */ /* external columns minus the 'k' value minus the */ /* columns thickness */ max_score = n_col - k - Col [col].shared1.thickness ; /* make the score the external degree of the union-of-rows */ cur_score -= Col [col].shared1.thickness ; /* make sure score is less or equal than the max score */ cur_score = numext::mini(cur_score, max_score) ; COLAMD_ASSERT (cur_score >= 0) ; /* store updated score */ Col [col].shared2.score = cur_score ; /* === Place column back in degree list ========================= */ COLAMD_ASSERT (min_score >= 0) ; COLAMD_ASSERT (min_score <= n_col) ; COLAMD_ASSERT (cur_score >= 0) ; COLAMD_ASSERT (cur_score <= n_col) ; COLAMD_ASSERT (head [cur_score] >= COLAMD_EMPTY) ; next_col = head [cur_score] ; Col [col].shared4.degree_next = next_col ; Col [col].shared3.prev = COLAMD_EMPTY ; if (next_col != COLAMD_EMPTY) { Col [next_col].shared3.prev = col ; } head [cur_score] = col ; /* see if this score is less than current min */ min_score = numext::mini(min_score, cur_score) ; } /* === Resurrect the new pivot row ================================== */ if (pivot_row_degree > 0) { /* update pivot row length to reflect any cols that were killed */ /* during super-col detection and mass elimination */ Row [pivot_row].start = pivot_row_start ; Row [pivot_row].length = (IndexType) (new_rp - &A[pivot_row_start]) ; Row [pivot_row].shared1.degree = pivot_row_degree ; Row [pivot_row].shared2.mark = 0 ; /* pivot row is no longer dead */ } } /* === All principal columns have now been ordered ====================== */ return (ngarbage) ; } /* ========================================================================== */ /* === order_children ======================================================= */ /* ========================================================================== */ /* The find_ordering routine has ordered all of the principal columns (the representatives of the supercolumns). The non-principal columns have not yet been ordered. This routine orders those columns by walking up the parent tree (a column is a child of the column which absorbed it). The final permutation vector is then placed in p [0 ... n_col-1], with p [0] being the first column, and p [n_col-1] being the last. It doesn't look like it at first glance, but be assured that this routine takes time linear in the number of columns. Although not immediately obvious, the time taken by this routine is O (n_col), that is, linear in the number of columns. Not user-callable. */ template <typename IndexType> static inline void order_children ( /* === Parameters ======================================================= */ IndexType n_col, /* number of columns of A */ colamd_col<IndexType> Col [], /* of size n_col+1 */ IndexType p [] /* p [0 ... n_col-1] is the column permutation*/ ) { /* === Local variables ================================================== */ IndexType i ; /* loop counter for all columns */ IndexType c ; /* column index */ IndexType parent ; /* index of column's parent */ IndexType order ; /* column's order */ /* === Order each non-principal column ================================== */ for (i = 0 ; i < n_col ; i++) { /* find an un-ordered non-principal column */ COLAMD_ASSERT (COL_IS_DEAD (i)) ; if (!COL_IS_DEAD_PRINCIPAL (i) && Col [i].shared2.order == COLAMD_EMPTY) { parent = i ; /* once found, find its principal parent */ do { parent = Col [parent].shared1.parent ; } while (!COL_IS_DEAD_PRINCIPAL (parent)) ; /* now, order all un-ordered non-principal columns along path */ /* to this parent. collapse tree at the same time */ c = i ; /* get order of parent */ order = Col [parent].shared2.order ; do { COLAMD_ASSERT (Col [c].shared2.order == COLAMD_EMPTY) ; /* order this column */ Col [c].shared2.order = order++ ; /* collaps tree */ Col [c].shared1.parent = parent ; /* get immediate parent of this column */ c = Col [c].shared1.parent ; /* continue until we hit an ordered column. There are */ /* guarranteed not to be anymore unordered columns */ /* above an ordered column */ } while (Col [c].shared2.order == COLAMD_EMPTY) ; /* re-order the super_col parent to largest order for this group */ Col [parent].shared2.order = order ; } } /* === Generate the permutation ========================================= */ for (c = 0 ; c < n_col ; c++) { p [Col [c].shared2.order] = c ; } } /* ========================================================================== */ /* === detect_super_cols ==================================================== */ /* ========================================================================== */ /* Detects supercolumns by finding matches between columns in the hash buckets. Check amongst columns in the set A [row_start ... row_start + row_length-1]. The columns under consideration are currently *not* in the degree lists, and have already been placed in the hash buckets. The hash bucket for columns whose hash function is equal to h is stored as follows: if head [h] is >= 0, then head [h] contains a degree list, so: head [h] is the first column in degree bucket h. Col [head [h]].headhash gives the first column in hash bucket h. otherwise, the degree list is empty, and: -(head [h] + 2) is the first column in hash bucket h. For a column c in a hash bucket, Col [c].shared3.prev is NOT a "previous column" pointer. Col [c].shared3.hash is used instead as the hash number for that column. The value of Col [c].shared4.hash_next is the next column in the same hash bucket. Assuming no, or "few" hash collisions, the time taken by this routine is linear in the sum of the sizes (lengths) of each column whose score has just been computed in the approximate degree computation. Not user-callable. */ template <typename IndexType> static void detect_super_cols ( /* === Parameters ======================================================= */ colamd_col<IndexType> Col [], /* of size n_col+1 */ IndexType A [], /* row indices of A */ IndexType head [], /* head of degree lists and hash buckets */ IndexType row_start, /* pointer to set of columns to check */ IndexType row_length /* number of columns to check */ ) { /* === Local variables ================================================== */ IndexType hash ; /* hash value for a column */ IndexType *rp ; /* pointer to a row */ IndexType c ; /* a column index */ IndexType super_c ; /* column index of the column to absorb into */ IndexType *cp1 ; /* column pointer for column super_c */ IndexType *cp2 ; /* column pointer for column c */ IndexType length ; /* length of column super_c */ IndexType prev_c ; /* column preceding c in hash bucket */ IndexType i ; /* loop counter */ IndexType *rp_end ; /* pointer to the end of the row */ IndexType col ; /* a column index in the row to check */ IndexType head_column ; /* first column in hash bucket or degree list */ IndexType first_col ; /* first column in hash bucket */ /* === Consider each column in the row ================================== */ rp = &A [row_start] ; rp_end = rp + row_length ; while (rp < rp_end) { col = *rp++ ; if (COL_IS_DEAD (col)) { continue ; } /* get hash number for this column */ hash = Col [col].shared3.hash ; COLAMD_ASSERT (hash <= n_col) ; /* === Get the first column in this hash bucket ===================== */ head_column = head [hash] ; if (head_column > COLAMD_EMPTY) { first_col = Col [head_column].shared3.headhash ; } else { first_col = - (head_column + 2) ; } /* === Consider each column in the hash bucket ====================== */ for (super_c = first_col ; super_c != COLAMD_EMPTY ; super_c = Col [super_c].shared4.hash_next) { COLAMD_ASSERT (COL_IS_ALIVE (super_c)) ; COLAMD_ASSERT (Col [super_c].shared3.hash == hash) ; length = Col [super_c].length ; /* prev_c is the column preceding column c in the hash bucket */ prev_c = super_c ; /* === Compare super_c with all columns after it ================ */ for (c = Col [super_c].shared4.hash_next ; c != COLAMD_EMPTY ; c = Col [c].shared4.hash_next) { COLAMD_ASSERT (c != super_c) ; COLAMD_ASSERT (COL_IS_ALIVE (c)) ; COLAMD_ASSERT (Col [c].shared3.hash == hash) ; /* not identical if lengths or scores are different */ if (Col [c].length != length || Col [c].shared2.score != Col [super_c].shared2.score) { prev_c = c ; continue ; } /* compare the two columns */ cp1 = &A [Col [super_c].start] ; cp2 = &A [Col [c].start] ; for (i = 0 ; i < length ; i++) { /* the columns are "clean" (no dead rows) */ COLAMD_ASSERT (ROW_IS_ALIVE (*cp1)) ; COLAMD_ASSERT (ROW_IS_ALIVE (*cp2)) ; /* row indices will same order for both supercols, */ /* no gather scatter nessasary */ if (*cp1++ != *cp2++) { break ; } } /* the two columns are different if the for-loop "broke" */ if (i != length) { prev_c = c ; continue ; } /* === Got it! two columns are identical =================== */ COLAMD_ASSERT (Col [c].shared2.score == Col [super_c].shared2.score) ; Col [super_c].shared1.thickness += Col [c].shared1.thickness ; Col [c].shared1.parent = super_c ; KILL_NON_PRINCIPAL_COL (c) ; /* order c later, in order_children() */ Col [c].shared2.order = COLAMD_EMPTY ; /* remove c from hash bucket */ Col [prev_c].shared4.hash_next = Col [c].shared4.hash_next ; } } /* === Empty this hash bucket ======================================= */ if (head_column > COLAMD_EMPTY) { /* corresponding degree list "hash" is not empty */ Col [head_column].shared3.headhash = COLAMD_EMPTY ; } else { /* corresponding degree list "hash" is empty */ head [hash] = COLAMD_EMPTY ; } } } /* ========================================================================== */ /* === garbage_collection =================================================== */ /* ========================================================================== */ /* Defragments and compacts columns and rows in the workspace A. Used when all avaliable memory has been used while performing row merging. Returns the index of the first free position in A, after garbage collection. The time taken by this routine is linear is the size of the array A, which is itself linear in the number of nonzeros in the input matrix. Not user-callable. */ template <typename IndexType> static IndexType garbage_collection /* returns the new value of pfree */ ( /* === Parameters ======================================================= */ IndexType n_row, /* number of rows */ IndexType n_col, /* number of columns */ Colamd_Row<IndexType> Row [], /* row info */ colamd_col<IndexType> Col [], /* column info */ IndexType A [], /* A [0 ... Alen-1] holds the matrix */ IndexType *pfree /* &A [0] ... pfree is in use */ ) { /* === Local variables ================================================== */ IndexType *psrc ; /* source pointer */ IndexType *pdest ; /* destination pointer */ IndexType j ; /* counter */ IndexType r ; /* a row index */ IndexType c ; /* a column index */ IndexType length ; /* length of a row or column */ /* === Defragment the columns =========================================== */ pdest = &A[0] ; for (c = 0 ; c < n_col ; c++) { if (COL_IS_ALIVE (c)) { psrc = &A [Col [c].start] ; /* move and compact the column */ COLAMD_ASSERT (pdest <= psrc) ; Col [c].start = (IndexType) (pdest - &A [0]) ; length = Col [c].length ; for (j = 0 ; j < length ; j++) { r = *psrc++ ; if (ROW_IS_ALIVE (r)) { *pdest++ = r ; } } Col [c].length = (IndexType) (pdest - &A [Col [c].start]) ; } } /* === Prepare to defragment the rows =================================== */ for (r = 0 ; r < n_row ; r++) { if (ROW_IS_ALIVE (r)) { if (Row [r].length == 0) { /* this row is of zero length. cannot compact it, so kill it */ COLAMD_DEBUG3 (("Defrag row kill\n")) ; KILL_ROW (r) ; } else { /* save first column index in Row [r].shared2.first_column */ psrc = &A [Row [r].start] ; Row [r].shared2.first_column = *psrc ; COLAMD_ASSERT (ROW_IS_ALIVE (r)) ; /* flag the start of the row with the one's complement of row */ *psrc = ONES_COMPLEMENT (r) ; } } } /* === Defragment the rows ============================================== */ psrc = pdest ; while (psrc < pfree) { /* find a negative number ... the start of a row */ if (*psrc++ < 0) { psrc-- ; /* get the row index */ r = ONES_COMPLEMENT (*psrc) ; COLAMD_ASSERT (r >= 0 && r < n_row) ; /* restore first column index */ *psrc = Row [r].shared2.first_column ; COLAMD_ASSERT (ROW_IS_ALIVE (r)) ; /* move and compact the row */ COLAMD_ASSERT (pdest <= psrc) ; Row [r].start = (IndexType) (pdest - &A [0]) ; length = Row [r].length ; for (j = 0 ; j < length ; j++) { c = *psrc++ ; if (COL_IS_ALIVE (c)) { *pdest++ = c ; } } Row [r].length = (IndexType) (pdest - &A [Row [r].start]) ; } } /* ensure we found all the rows */ COLAMD_ASSERT (debug_rows == 0) ; /* === Return the new value of pfree ==================================== */ return ((IndexType) (pdest - &A [0])) ; } /* ========================================================================== */ /* === clear_mark =========================================================== */ /* ========================================================================== */ /* Clears the Row [].shared2.mark array, and returns the new tag_mark. Return value is the new tag_mark. Not user-callable. */ template <typename IndexType> static inline IndexType clear_mark /* return the new value for tag_mark */ ( /* === Parameters ======================================================= */ IndexType n_row, /* number of rows in A */ Colamd_Row<IndexType> Row [] /* Row [0 ... n_row-1].shared2.mark is set to zero */ ) { /* === Local variables ================================================== */ IndexType r ; for (r = 0 ; r < n_row ; r++) { if (ROW_IS_ALIVE (r)) { Row [r].shared2.mark = 0 ; } } return (1) ; } } // namespace internal #endif
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/CholmodSupport/CholmodSupport.h
.h
22,307
640
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CHOLMODSUPPORT_H #define EIGEN_CHOLMODSUPPORT_H namespace Eigen { namespace internal { template<typename Scalar> struct cholmod_configure_matrix; template<> struct cholmod_configure_matrix<double> { template<typename CholmodType> static void run(CholmodType& mat) { mat.xtype = CHOLMOD_REAL; mat.dtype = CHOLMOD_DOUBLE; } }; template<> struct cholmod_configure_matrix<std::complex<double> > { template<typename CholmodType> static void run(CholmodType& mat) { mat.xtype = CHOLMOD_COMPLEX; mat.dtype = CHOLMOD_DOUBLE; } }; // Other scalar types are not yet suppotred by Cholmod // template<> struct cholmod_configure_matrix<float> { // template<typename CholmodType> // static void run(CholmodType& mat) { // mat.xtype = CHOLMOD_REAL; // mat.dtype = CHOLMOD_SINGLE; // } // }; // // template<> struct cholmod_configure_matrix<std::complex<float> > { // template<typename CholmodType> // static void run(CholmodType& mat) { // mat.xtype = CHOLMOD_COMPLEX; // mat.dtype = CHOLMOD_SINGLE; // } // }; } // namespace internal /** Wraps the Eigen sparse matrix \a mat into a Cholmod sparse matrix object. * Note that the data are shared. */ template<typename _Scalar, int _Options, typename _StorageIndex> cholmod_sparse viewAsCholmod(Ref<SparseMatrix<_Scalar,_Options,_StorageIndex> > mat) { cholmod_sparse res; res.nzmax = mat.nonZeros(); res.nrow = mat.rows(); res.ncol = mat.cols(); res.p = mat.outerIndexPtr(); res.i = mat.innerIndexPtr(); res.x = mat.valuePtr(); res.z = 0; res.sorted = 1; if(mat.isCompressed()) { res.packed = 1; res.nz = 0; } else { res.packed = 0; res.nz = mat.innerNonZeroPtr(); } res.dtype = 0; res.stype = -1; if (internal::is_same<_StorageIndex,int>::value) { res.itype = CHOLMOD_INT; } else if (internal::is_same<_StorageIndex,long>::value) { res.itype = CHOLMOD_LONG; } else { eigen_assert(false && "Index type not supported yet"); } // setup res.xtype internal::cholmod_configure_matrix<_Scalar>::run(res); res.stype = 0; return res; } template<typename _Scalar, int _Options, typename _Index> const cholmod_sparse viewAsCholmod(const SparseMatrix<_Scalar,_Options,_Index>& mat) { cholmod_sparse res = viewAsCholmod(Ref<SparseMatrix<_Scalar,_Options,_Index> >(mat.const_cast_derived())); return res; } template<typename _Scalar, int _Options, typename _Index> const cholmod_sparse viewAsCholmod(const SparseVector<_Scalar,_Options,_Index>& mat) { cholmod_sparse res = viewAsCholmod(Ref<SparseMatrix<_Scalar,_Options,_Index> >(mat.const_cast_derived())); return res; } /** Returns a view of the Eigen sparse matrix \a mat as Cholmod sparse matrix. * The data are not copied but shared. */ template<typename _Scalar, int _Options, typename _Index, unsigned int UpLo> cholmod_sparse viewAsCholmod(const SparseSelfAdjointView<const SparseMatrix<_Scalar,_Options,_Index>, UpLo>& mat) { cholmod_sparse res = viewAsCholmod(Ref<SparseMatrix<_Scalar,_Options,_Index> >(mat.matrix().const_cast_derived())); if(UpLo==Upper) res.stype = 1; if(UpLo==Lower) res.stype = -1; return res; } /** Returns a view of the Eigen \b dense matrix \a mat as Cholmod dense matrix. * The data are not copied but shared. */ template<typename Derived> cholmod_dense viewAsCholmod(MatrixBase<Derived>& mat) { EIGEN_STATIC_ASSERT((internal::traits<Derived>::Flags&RowMajorBit)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); typedef typename Derived::Scalar Scalar; cholmod_dense res; res.nrow = mat.rows(); res.ncol = mat.cols(); res.nzmax = res.nrow * res.ncol; res.d = Derived::IsVectorAtCompileTime ? mat.derived().size() : mat.derived().outerStride(); res.x = (void*)(mat.derived().data()); res.z = 0; internal::cholmod_configure_matrix<Scalar>::run(res); return res; } /** Returns a view of the Cholmod sparse matrix \a cm as an Eigen sparse matrix. * The data are not copied but shared. */ template<typename Scalar, int Flags, typename StorageIndex> MappedSparseMatrix<Scalar,Flags,StorageIndex> viewAsEigen(cholmod_sparse& cm) { return MappedSparseMatrix<Scalar,Flags,StorageIndex> (cm.nrow, cm.ncol, static_cast<StorageIndex*>(cm.p)[cm.ncol], static_cast<StorageIndex*>(cm.p), static_cast<StorageIndex*>(cm.i),static_cast<Scalar*>(cm.x) ); } enum CholmodMode { CholmodAuto, CholmodSimplicialLLt, CholmodSupernodalLLt, CholmodLDLt }; /** \ingroup CholmodSupport_Module * \class CholmodBase * \brief The base class for the direct Cholesky factorization of Cholmod * \sa class CholmodSupernodalLLT, class CholmodSimplicialLDLT, class CholmodSimplicialLLT */ template<typename _MatrixType, int _UpLo, typename Derived> class CholmodBase : public SparseSolverBase<Derived> { protected: typedef SparseSolverBase<Derived> Base; using Base::derived; using Base::m_isInitialized; public: typedef _MatrixType MatrixType; enum { UpLo = _UpLo }; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef MatrixType CholMatrixType; typedef typename MatrixType::StorageIndex StorageIndex; enum { ColsAtCompileTime = MatrixType::ColsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime }; public: CholmodBase() : m_cholmodFactor(0), m_info(Success), m_factorizationIsOk(false), m_analysisIsOk(false) { EIGEN_STATIC_ASSERT((internal::is_same<double,RealScalar>::value), CHOLMOD_SUPPORTS_DOUBLE_PRECISION_ONLY); m_shiftOffset[0] = m_shiftOffset[1] = 0.0; cholmod_start(&m_cholmod); } explicit CholmodBase(const MatrixType& matrix) : m_cholmodFactor(0), m_info(Success), m_factorizationIsOk(false), m_analysisIsOk(false) { EIGEN_STATIC_ASSERT((internal::is_same<double,RealScalar>::value), CHOLMOD_SUPPORTS_DOUBLE_PRECISION_ONLY); m_shiftOffset[0] = m_shiftOffset[1] = 0.0; cholmod_start(&m_cholmod); compute(matrix); } ~CholmodBase() { if(m_cholmodFactor) cholmod_free_factor(&m_cholmodFactor, &m_cholmod); cholmod_finish(&m_cholmod); } inline StorageIndex cols() const { return internal::convert_index<StorageIndex, Index>(m_cholmodFactor->n); } inline StorageIndex rows() const { return internal::convert_index<StorageIndex, Index>(m_cholmodFactor->n); } /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was succesful, * \c NumericalIssue if the matrix.appears to be negative. */ ComputationInfo info() const { eigen_assert(m_isInitialized && "Decomposition is not initialized."); return m_info; } /** Computes the sparse Cholesky decomposition of \a matrix */ Derived& compute(const MatrixType& matrix) { analyzePattern(matrix); factorize(matrix); return derived(); } /** Performs a symbolic decomposition on the sparsity pattern of \a matrix. * * This function is particularly useful when solving for several problems having the same structure. * * \sa factorize() */ void analyzePattern(const MatrixType& matrix) { if(m_cholmodFactor) { cholmod_free_factor(&m_cholmodFactor, &m_cholmod); m_cholmodFactor = 0; } cholmod_sparse A = viewAsCholmod(matrix.template selfadjointView<UpLo>()); m_cholmodFactor = cholmod_analyze(&A, &m_cholmod); this->m_isInitialized = true; this->m_info = Success; m_analysisIsOk = true; m_factorizationIsOk = false; } /** Performs a numeric decomposition of \a matrix * * The given matrix must have the same sparsity pattern as the matrix on which the symbolic decomposition has been performed. * * \sa analyzePattern() */ void factorize(const MatrixType& matrix) { eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); cholmod_sparse A = viewAsCholmod(matrix.template selfadjointView<UpLo>()); cholmod_factorize_p(&A, m_shiftOffset, 0, 0, m_cholmodFactor, &m_cholmod); // If the factorization failed, minor is the column at which it did. On success minor == n. this->m_info = (m_cholmodFactor->minor == m_cholmodFactor->n ? Success : NumericalIssue); m_factorizationIsOk = true; } /** Returns a reference to the Cholmod's configuration structure to get a full control over the performed operations. * See the Cholmod user guide for details. */ cholmod_common& cholmod() { return m_cholmod; } #ifndef EIGEN_PARSED_BY_DOXYGEN /** \internal */ template<typename Rhs,typename Dest> void _solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const { eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()"); const Index size = m_cholmodFactor->n; EIGEN_UNUSED_VARIABLE(size); eigen_assert(size==b.rows()); // Cholmod needs column-major stoarge without inner-stride, which corresponds to the default behavior of Ref. Ref<const Matrix<typename Rhs::Scalar,Dynamic,Dynamic,ColMajor> > b_ref(b.derived()); cholmod_dense b_cd = viewAsCholmod(b_ref); cholmod_dense* x_cd = cholmod_solve(CHOLMOD_A, m_cholmodFactor, &b_cd, &m_cholmod); if(!x_cd) { this->m_info = NumericalIssue; return; } // TODO optimize this copy by swapping when possible (be careful with alignment, etc.) dest = Matrix<Scalar,Dest::RowsAtCompileTime,Dest::ColsAtCompileTime>::Map(reinterpret_cast<Scalar*>(x_cd->x),b.rows(),b.cols()); cholmod_free_dense(&x_cd, &m_cholmod); } /** \internal */ template<typename RhsDerived, typename DestDerived> void _solve_impl(const SparseMatrixBase<RhsDerived> &b, SparseMatrixBase<DestDerived> &dest) const { eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()"); const Index size = m_cholmodFactor->n; EIGEN_UNUSED_VARIABLE(size); eigen_assert(size==b.rows()); // note: cs stands for Cholmod Sparse Ref<SparseMatrix<typename RhsDerived::Scalar,ColMajor,typename RhsDerived::StorageIndex> > b_ref(b.const_cast_derived()); cholmod_sparse b_cs = viewAsCholmod(b_ref); cholmod_sparse* x_cs = cholmod_spsolve(CHOLMOD_A, m_cholmodFactor, &b_cs, &m_cholmod); if(!x_cs) { this->m_info = NumericalIssue; return; } // TODO optimize this copy by swapping when possible (be careful with alignment, etc.) dest.derived() = viewAsEigen<typename DestDerived::Scalar,ColMajor,typename DestDerived::StorageIndex>(*x_cs); cholmod_free_sparse(&x_cs, &m_cholmod); } #endif // EIGEN_PARSED_BY_DOXYGEN /** Sets the shift parameter that will be used to adjust the diagonal coefficients during the numerical factorization. * * During the numerical factorization, an offset term is added to the diagonal coefficients:\n * \c d_ii = \a offset + \c d_ii * * The default is \a offset=0. * * \returns a reference to \c *this. */ Derived& setShift(const RealScalar& offset) { m_shiftOffset[0] = double(offset); return derived(); } /** \returns the determinant of the underlying matrix from the current factorization */ Scalar determinant() const { using std::exp; return exp(logDeterminant()); } /** \returns the log determinant of the underlying matrix from the current factorization */ Scalar logDeterminant() const { using std::log; using numext::real; eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()"); RealScalar logDet = 0; Scalar *x = static_cast<Scalar*>(m_cholmodFactor->x); if (m_cholmodFactor->is_super) { // Supernodal factorization stored as a packed list of dense column-major blocs, // as described by the following structure: // super[k] == index of the first column of the j-th super node StorageIndex *super = static_cast<StorageIndex*>(m_cholmodFactor->super); // pi[k] == offset to the description of row indices StorageIndex *pi = static_cast<StorageIndex*>(m_cholmodFactor->pi); // px[k] == offset to the respective dense block StorageIndex *px = static_cast<StorageIndex*>(m_cholmodFactor->px); Index nb_super_nodes = m_cholmodFactor->nsuper; for (Index k=0; k < nb_super_nodes; ++k) { StorageIndex ncols = super[k + 1] - super[k]; StorageIndex nrows = pi[k + 1] - pi[k]; Map<const Array<Scalar,1,Dynamic>, 0, InnerStride<> > sk(x + px[k], ncols, InnerStride<>(nrows+1)); logDet += sk.real().log().sum(); } } else { // Simplicial factorization stored as standard CSC matrix. StorageIndex *p = static_cast<StorageIndex*>(m_cholmodFactor->p); Index size = m_cholmodFactor->n; for (Index k=0; k<size; ++k) logDet += log(real( x[p[k]] )); } if (m_cholmodFactor->is_ll) logDet *= 2.0; return logDet; }; template<typename Stream> void dumpMemory(Stream& /*s*/) {} protected: mutable cholmod_common m_cholmod; cholmod_factor* m_cholmodFactor; double m_shiftOffset[2]; mutable ComputationInfo m_info; int m_factorizationIsOk; int m_analysisIsOk; }; /** \ingroup CholmodSupport_Module * \class CholmodSimplicialLLT * \brief A simplicial direct Cholesky (LLT) factorization and solver based on Cholmod * * This class allows to solve for A.X = B sparse linear problems via a simplicial LL^T Cholesky factorization * using the Cholmod library. * This simplicial variant is equivalent to Eigen's built-in SimplicialLLT class. Therefore, it has little practical interest. * The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices * X and B can be either dense or sparse. * * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower * or Upper. Default is Lower. * * \implsparsesolverconcept * * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed. * * \warning Only double precision real and complex scalar types are supported by Cholmod. * * \sa \ref TutorialSparseSolverConcept, class CholmodSupernodalLLT, class SimplicialLLT */ template<typename _MatrixType, int _UpLo = Lower> class CholmodSimplicialLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLLT<_MatrixType, _UpLo> > { typedef CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLLT> Base; using Base::m_cholmod; public: typedef _MatrixType MatrixType; CholmodSimplicialLLT() : Base() { init(); } CholmodSimplicialLLT(const MatrixType& matrix) : Base() { init(); this->compute(matrix); } ~CholmodSimplicialLLT() {} protected: void init() { m_cholmod.final_asis = 0; m_cholmod.supernodal = CHOLMOD_SIMPLICIAL; m_cholmod.final_ll = 1; } }; /** \ingroup CholmodSupport_Module * \class CholmodSimplicialLDLT * \brief A simplicial direct Cholesky (LDLT) factorization and solver based on Cholmod * * This class allows to solve for A.X = B sparse linear problems via a simplicial LDL^T Cholesky factorization * using the Cholmod library. * This simplicial variant is equivalent to Eigen's built-in SimplicialLDLT class. Therefore, it has little practical interest. * The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices * X and B can be either dense or sparse. * * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower * or Upper. Default is Lower. * * \implsparsesolverconcept * * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed. * * \warning Only double precision real and complex scalar types are supported by Cholmod. * * \sa \ref TutorialSparseSolverConcept, class CholmodSupernodalLLT, class SimplicialLDLT */ template<typename _MatrixType, int _UpLo = Lower> class CholmodSimplicialLDLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLDLT<_MatrixType, _UpLo> > { typedef CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLDLT> Base; using Base::m_cholmod; public: typedef _MatrixType MatrixType; CholmodSimplicialLDLT() : Base() { init(); } CholmodSimplicialLDLT(const MatrixType& matrix) : Base() { init(); this->compute(matrix); } ~CholmodSimplicialLDLT() {} protected: void init() { m_cholmod.final_asis = 1; m_cholmod.supernodal = CHOLMOD_SIMPLICIAL; } }; /** \ingroup CholmodSupport_Module * \class CholmodSupernodalLLT * \brief A supernodal Cholesky (LLT) factorization and solver based on Cholmod * * This class allows to solve for A.X = B sparse linear problems via a supernodal LL^T Cholesky factorization * using the Cholmod library. * This supernodal variant performs best on dense enough problems, e.g., 3D FEM, or very high order 2D FEM. * The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices * X and B can be either dense or sparse. * * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower * or Upper. Default is Lower. * * \implsparsesolverconcept * * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed. * * \warning Only double precision real and complex scalar types are supported by Cholmod. * * \sa \ref TutorialSparseSolverConcept */ template<typename _MatrixType, int _UpLo = Lower> class CholmodSupernodalLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSupernodalLLT<_MatrixType, _UpLo> > { typedef CholmodBase<_MatrixType, _UpLo, CholmodSupernodalLLT> Base; using Base::m_cholmod; public: typedef _MatrixType MatrixType; CholmodSupernodalLLT() : Base() { init(); } CholmodSupernodalLLT(const MatrixType& matrix) : Base() { init(); this->compute(matrix); } ~CholmodSupernodalLLT() {} protected: void init() { m_cholmod.final_asis = 1; m_cholmod.supernodal = CHOLMOD_SUPERNODAL; } }; /** \ingroup CholmodSupport_Module * \class CholmodDecomposition * \brief A general Cholesky factorization and solver based on Cholmod * * This class allows to solve for A.X = B sparse linear problems via a LL^T or LDL^T Cholesky factorization * using the Cholmod library. The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices * X and B can be either dense or sparse. * * This variant permits to change the underlying Cholesky method at runtime. * On the other hand, it does not provide access to the result of the factorization. * The default is to let Cholmod automatically choose between a simplicial and supernodal factorization. * * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower * or Upper. Default is Lower. * * \implsparsesolverconcept * * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed. * * \warning Only double precision real and complex scalar types are supported by Cholmod. * * \sa \ref TutorialSparseSolverConcept */ template<typename _MatrixType, int _UpLo = Lower> class CholmodDecomposition : public CholmodBase<_MatrixType, _UpLo, CholmodDecomposition<_MatrixType, _UpLo> > { typedef CholmodBase<_MatrixType, _UpLo, CholmodDecomposition> Base; using Base::m_cholmod; public: typedef _MatrixType MatrixType; CholmodDecomposition() : Base() { init(); } CholmodDecomposition(const MatrixType& matrix) : Base() { init(); this->compute(matrix); } ~CholmodDecomposition() {} void setMode(CholmodMode mode) { switch(mode) { case CholmodAuto: m_cholmod.final_asis = 1; m_cholmod.supernodal = CHOLMOD_AUTO; break; case CholmodSimplicialLLt: m_cholmod.final_asis = 0; m_cholmod.supernodal = CHOLMOD_SIMPLICIAL; m_cholmod.final_ll = 1; break; case CholmodSupernodalLLt: m_cholmod.final_asis = 1; m_cholmod.supernodal = CHOLMOD_SUPERNODAL; break; case CholmodLDLt: m_cholmod.final_asis = 1; m_cholmod.supernodal = CHOLMOD_SIMPLICIAL; break; default: break; } } protected: void init() { m_cholmod.final_asis = 1; m_cholmod.supernodal = CHOLMOD_AUTO; } }; } // end namespace Eigen #endif // EIGEN_CHOLMODSUPPORT_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Cholesky/LLT_LAPACKE.h
.h
3,974
100
/* Copyright (c) 2011, Intel Corporation. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************** * Content : Eigen bindings to LAPACKe * LLt decomposition based on LAPACKE_?potrf function. ******************************************************************************** */ #ifndef EIGEN_LLT_LAPACKE_H #define EIGEN_LLT_LAPACKE_H namespace Eigen { namespace internal { template<typename Scalar> struct lapacke_llt; #define EIGEN_LAPACKE_LLT(EIGTYPE, BLASTYPE, LAPACKE_PREFIX) \ template<> struct lapacke_llt<EIGTYPE> \ { \ template<typename MatrixType> \ static inline Index potrf(MatrixType& m, char uplo) \ { \ lapack_int matrix_order; \ lapack_int size, lda, info, StorageOrder; \ EIGTYPE* a; \ eigen_assert(m.rows()==m.cols()); \ /* Set up parameters for ?potrf */ \ size = convert_index<lapack_int>(m.rows()); \ StorageOrder = MatrixType::Flags&RowMajorBit?RowMajor:ColMajor; \ matrix_order = StorageOrder==RowMajor ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \ a = &(m.coeffRef(0,0)); \ lda = convert_index<lapack_int>(m.outerStride()); \ \ info = LAPACKE_##LAPACKE_PREFIX##potrf( matrix_order, uplo, size, (BLASTYPE*)a, lda ); \ info = (info==0) ? -1 : info>0 ? info-1 : size; \ return info; \ } \ }; \ template<> struct llt_inplace<EIGTYPE, Lower> \ { \ template<typename MatrixType> \ static Index blocked(MatrixType& m) \ { \ return lapacke_llt<EIGTYPE>::potrf(m, 'L'); \ } \ template<typename MatrixType, typename VectorType> \ static Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \ { return Eigen::internal::llt_rank_update_lower(mat, vec, sigma); } \ }; \ template<> struct llt_inplace<EIGTYPE, Upper> \ { \ template<typename MatrixType> \ static Index blocked(MatrixType& m) \ { \ return lapacke_llt<EIGTYPE>::potrf(m, 'U'); \ } \ template<typename MatrixType, typename VectorType> \ static Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \ { \ Transpose<MatrixType> matt(mat); \ return llt_inplace<EIGTYPE, Lower>::rankUpdate(matt, vec.conjugate(), sigma); \ } \ }; EIGEN_LAPACKE_LLT(double, double, d) EIGEN_LAPACKE_LLT(float, float, s) EIGEN_LAPACKE_LLT(dcomplex, lapack_complex_double, z) EIGEN_LAPACKE_LLT(scomplex, lapack_complex_float, c) } // end namespace internal } // end namespace Eigen #endif // EIGEN_LLT_LAPACKE_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Cholesky/LDLT.h
.h
24,480
674
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2009 Keir Mierle <mierle@gmail.com> // Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com> // Copyright (C) 2011 Timothy E. Holy <tim.holy@gmail.com > // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_LDLT_H #define EIGEN_LDLT_H namespace Eigen { namespace internal { template<typename MatrixType, int UpLo> struct LDLT_Traits; // PositiveSemiDef means positive semi-definite and non-zero; same for NegativeSemiDef enum SignMatrix { PositiveSemiDef, NegativeSemiDef, ZeroSign, Indefinite }; } /** \ingroup Cholesky_Module * * \class LDLT * * \brief Robust Cholesky decomposition of a matrix with pivoting * * \tparam _MatrixType the type of the matrix of which to compute the LDL^T Cholesky decomposition * \tparam _UpLo the triangular part that will be used for the decompositon: Lower (default) or Upper. * The other triangular part won't be read. * * Perform a robust Cholesky decomposition of a positive semidefinite or negative semidefinite * matrix \f$ A \f$ such that \f$ A = P^TLDL^*P \f$, where P is a permutation matrix, L * is lower triangular with a unit diagonal and D is a diagonal matrix. * * The decomposition uses pivoting to ensure stability, so that L will have * zeros in the bottom right rank(A) - n submatrix. Avoiding the square root * on D also stabilizes the computation. * * Remember that Cholesky decompositions are not rank-revealing. Also, do not use a Cholesky * decomposition to determine whether a system of equations has a solution. * * This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism. * * \sa MatrixBase::ldlt(), SelfAdjointView::ldlt(), class LLT */ template<typename _MatrixType, int _UpLo> class LDLT { public: typedef _MatrixType MatrixType; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, UpLo = _UpLo }; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar; typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 typedef typename MatrixType::StorageIndex StorageIndex; typedef Matrix<Scalar, RowsAtCompileTime, 1, 0, MaxRowsAtCompileTime, 1> TmpMatrixType; typedef Transpositions<RowsAtCompileTime, MaxRowsAtCompileTime> TranspositionType; typedef PermutationMatrix<RowsAtCompileTime, MaxRowsAtCompileTime> PermutationType; typedef internal::LDLT_Traits<MatrixType,UpLo> Traits; /** \brief Default Constructor. * * The default constructor is useful in cases in which the user intends to * perform decompositions via LDLT::compute(const MatrixType&). */ LDLT() : m_matrix(), m_transpositions(), m_sign(internal::ZeroSign), m_isInitialized(false) {} /** \brief Default Constructor with memory preallocation * * Like the default constructor but with preallocation of the internal data * according to the specified problem \a size. * \sa LDLT() */ explicit LDLT(Index size) : m_matrix(size, size), m_transpositions(size), m_temporary(size), m_sign(internal::ZeroSign), m_isInitialized(false) {} /** \brief Constructor with decomposition * * This calculates the decomposition for the input \a matrix. * * \sa LDLT(Index size) */ template<typename InputType> explicit LDLT(const EigenBase<InputType>& matrix) : m_matrix(matrix.rows(), matrix.cols()), m_transpositions(matrix.rows()), m_temporary(matrix.rows()), m_sign(internal::ZeroSign), m_isInitialized(false) { compute(matrix.derived()); } /** \brief Constructs a LDLT factorization from a given matrix * * This overloaded constructor is provided for \link InplaceDecomposition inplace decomposition \endlink when \c MatrixType is a Eigen::Ref. * * \sa LDLT(const EigenBase&) */ template<typename InputType> explicit LDLT(EigenBase<InputType>& matrix) : m_matrix(matrix.derived()), m_transpositions(matrix.rows()), m_temporary(matrix.rows()), m_sign(internal::ZeroSign), m_isInitialized(false) { compute(matrix.derived()); } /** Clear any existing decomposition * \sa rankUpdate(w,sigma) */ void setZero() { m_isInitialized = false; } /** \returns a view of the upper triangular matrix U */ inline typename Traits::MatrixU matrixU() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return Traits::getU(m_matrix); } /** \returns a view of the lower triangular matrix L */ inline typename Traits::MatrixL matrixL() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return Traits::getL(m_matrix); } /** \returns the permutation matrix P as a transposition sequence. */ inline const TranspositionType& transpositionsP() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return m_transpositions; } /** \returns the coefficients of the diagonal matrix D */ inline Diagonal<const MatrixType> vectorD() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return m_matrix.diagonal(); } /** \returns true if the matrix is positive (semidefinite) */ inline bool isPositive() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return m_sign == internal::PositiveSemiDef || m_sign == internal::ZeroSign; } /** \returns true if the matrix is negative (semidefinite) */ inline bool isNegative(void) const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return m_sign == internal::NegativeSemiDef || m_sign == internal::ZeroSign; } /** \returns a solution x of \f$ A x = b \f$ using the current decomposition of A. * * This function also supports in-place solves using the syntax <tt>x = decompositionObject.solve(x)</tt> . * * \note_about_checking_solutions * * More precisely, this method solves \f$ A x = b \f$ using the decomposition \f$ A = P^T L D L^* P \f$ * by solving the systems \f$ P^T y_1 = b \f$, \f$ L y_2 = y_1 \f$, \f$ D y_3 = y_2 \f$, * \f$ L^* y_4 = y_3 \f$ and \f$ P x = y_4 \f$ in succession. If the matrix \f$ A \f$ is singular, then * \f$ D \f$ will also be singular (all the other matrices are invertible). In that case, the * least-square solution of \f$ D y_3 = y_2 \f$ is computed. This does not mean that this function * computes the least-square solution of \f$ A x = b \f$ is \f$ A \f$ is singular. * * \sa MatrixBase::ldlt(), SelfAdjointView::ldlt() */ template<typename Rhs> inline const Solve<LDLT, Rhs> solve(const MatrixBase<Rhs>& b) const { eigen_assert(m_isInitialized && "LDLT is not initialized."); eigen_assert(m_matrix.rows()==b.rows() && "LDLT::solve(): invalid number of rows of the right hand side matrix b"); return Solve<LDLT, Rhs>(*this, b.derived()); } template<typename Derived> bool solveInPlace(MatrixBase<Derived> &bAndX) const; template<typename InputType> LDLT& compute(const EigenBase<InputType>& matrix); /** \returns an estimate of the reciprocal condition number of the matrix of * which \c *this is the LDLT decomposition. */ RealScalar rcond() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return internal::rcond_estimate_helper(m_l1_norm, *this); } template <typename Derived> LDLT& rankUpdate(const MatrixBase<Derived>& w, const RealScalar& alpha=1); /** \returns the internal LDLT decomposition matrix * * TODO: document the storage layout */ inline const MatrixType& matrixLDLT() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return m_matrix; } MatrixType reconstructedMatrix() const; /** \returns the adjoint of \c *this, that is, a const reference to the decomposition itself as the underlying matrix is self-adjoint. * * This method is provided for compatibility with other matrix decompositions, thus enabling generic code such as: * \code x = decomposition.adjoint().solve(b) \endcode */ const LDLT& adjoint() const { return *this; }; inline Index rows() const { return m_matrix.rows(); } inline Index cols() const { return m_matrix.cols(); } /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was succesful, * \c NumericalIssue if the factorization failed because of a zero pivot. */ ComputationInfo info() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return m_info; } #ifndef EIGEN_PARSED_BY_DOXYGEN template<typename RhsType, typename DstType> EIGEN_DEVICE_FUNC void _solve_impl(const RhsType &rhs, DstType &dst) const; #endif protected: static void check_template_parameters() { EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); } /** \internal * Used to compute and store the Cholesky decomposition A = L D L^* = U^* D U. * The strict upper part is used during the decomposition, the strict lower * part correspond to the coefficients of L (its diagonal is equal to 1 and * is not stored), and the diagonal entries correspond to D. */ MatrixType m_matrix; RealScalar m_l1_norm; TranspositionType m_transpositions; TmpMatrixType m_temporary; internal::SignMatrix m_sign; bool m_isInitialized; ComputationInfo m_info; }; namespace internal { template<int UpLo> struct ldlt_inplace; template<> struct ldlt_inplace<Lower> { template<typename MatrixType, typename TranspositionType, typename Workspace> static bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, SignMatrix& sign) { using std::abs; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename TranspositionType::StorageIndex IndexType; eigen_assert(mat.rows()==mat.cols()); const Index size = mat.rows(); bool found_zero_pivot = false; bool ret = true; if (size <= 1) { transpositions.setIdentity(); if(size==0) sign = ZeroSign; else if (numext::real(mat.coeff(0,0)) > static_cast<RealScalar>(0) ) sign = PositiveSemiDef; else if (numext::real(mat.coeff(0,0)) < static_cast<RealScalar>(0)) sign = NegativeSemiDef; else sign = ZeroSign; return true; } for (Index k = 0; k < size; ++k) { // Find largest diagonal element Index index_of_biggest_in_corner; mat.diagonal().tail(size-k).cwiseAbs().maxCoeff(&index_of_biggest_in_corner); index_of_biggest_in_corner += k; transpositions.coeffRef(k) = IndexType(index_of_biggest_in_corner); if(k != index_of_biggest_in_corner) { // apply the transposition while taking care to consider only // the lower triangular part Index s = size-index_of_biggest_in_corner-1; // trailing size after the biggest element mat.row(k).head(k).swap(mat.row(index_of_biggest_in_corner).head(k)); mat.col(k).tail(s).swap(mat.col(index_of_biggest_in_corner).tail(s)); std::swap(mat.coeffRef(k,k),mat.coeffRef(index_of_biggest_in_corner,index_of_biggest_in_corner)); for(Index i=k+1;i<index_of_biggest_in_corner;++i) { Scalar tmp = mat.coeffRef(i,k); mat.coeffRef(i,k) = numext::conj(mat.coeffRef(index_of_biggest_in_corner,i)); mat.coeffRef(index_of_biggest_in_corner,i) = numext::conj(tmp); } if(NumTraits<Scalar>::IsComplex) mat.coeffRef(index_of_biggest_in_corner,k) = numext::conj(mat.coeff(index_of_biggest_in_corner,k)); } // partition the matrix: // A00 | - | - // lu = A10 | A11 | - // A20 | A21 | A22 Index rs = size - k - 1; Block<MatrixType,Dynamic,1> A21(mat,k+1,k,rs,1); Block<MatrixType,1,Dynamic> A10(mat,k,0,1,k); Block<MatrixType,Dynamic,Dynamic> A20(mat,k+1,0,rs,k); if(k>0) { temp.head(k) = mat.diagonal().real().head(k).asDiagonal() * A10.adjoint(); mat.coeffRef(k,k) -= (A10 * temp.head(k)).value(); if(rs>0) A21.noalias() -= A20 * temp.head(k); } // In some previous versions of Eigen (e.g., 3.2.1), the scaling was omitted if the pivot // was smaller than the cutoff value. However, since LDLT is not rank-revealing // we should only make sure that we do not introduce INF or NaN values. // Remark that LAPACK also uses 0 as the cutoff value. RealScalar realAkk = numext::real(mat.coeffRef(k,k)); bool pivot_is_valid = (abs(realAkk) > RealScalar(0)); if(k==0 && !pivot_is_valid) { // The entire diagonal is zero, there is nothing more to do // except filling the transpositions, and checking whether the matrix is zero. sign = ZeroSign; for(Index j = 0; j<size; ++j) { transpositions.coeffRef(j) = IndexType(j); ret = ret && (mat.col(j).tail(size-j-1).array()==Scalar(0)).all(); } return ret; } if((rs>0) && pivot_is_valid) A21 /= realAkk; else if(rs>0) ret = ret && (A21.array()==Scalar(0)).all(); if(found_zero_pivot && pivot_is_valid) ret = false; // factorization failed else if(!pivot_is_valid) found_zero_pivot = true; if (sign == PositiveSemiDef) { if (realAkk < static_cast<RealScalar>(0)) sign = Indefinite; } else if (sign == NegativeSemiDef) { if (realAkk > static_cast<RealScalar>(0)) sign = Indefinite; } else if (sign == ZeroSign) { if (realAkk > static_cast<RealScalar>(0)) sign = PositiveSemiDef; else if (realAkk < static_cast<RealScalar>(0)) sign = NegativeSemiDef; } } return ret; } // Reference for the algorithm: Davis and Hager, "Multiple Rank // Modifications of a Sparse Cholesky Factorization" (Algorithm 1) // Trivial rearrangements of their computations (Timothy E. Holy) // allow their algorithm to work for rank-1 updates even if the // original matrix is not of full rank. // Here only rank-1 updates are implemented, to reduce the // requirement for intermediate storage and improve accuracy template<typename MatrixType, typename WDerived> static bool updateInPlace(MatrixType& mat, MatrixBase<WDerived>& w, const typename MatrixType::RealScalar& sigma=1) { using numext::isfinite; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; const Index size = mat.rows(); eigen_assert(mat.cols() == size && w.size()==size); RealScalar alpha = 1; // Apply the update for (Index j = 0; j < size; j++) { // Check for termination due to an original decomposition of low-rank if (!(isfinite)(alpha)) break; // Update the diagonal terms RealScalar dj = numext::real(mat.coeff(j,j)); Scalar wj = w.coeff(j); RealScalar swj2 = sigma*numext::abs2(wj); RealScalar gamma = dj*alpha + swj2; mat.coeffRef(j,j) += swj2/alpha; alpha += swj2/dj; // Update the terms of L Index rs = size-j-1; w.tail(rs) -= wj * mat.col(j).tail(rs); if(gamma != 0) mat.col(j).tail(rs) += (sigma*numext::conj(wj)/gamma)*w.tail(rs); } return true; } template<typename MatrixType, typename TranspositionType, typename Workspace, typename WType> static bool update(MatrixType& mat, const TranspositionType& transpositions, Workspace& tmp, const WType& w, const typename MatrixType::RealScalar& sigma=1) { // Apply the permutation to the input w tmp = transpositions * w; return ldlt_inplace<Lower>::updateInPlace(mat,tmp,sigma); } }; template<> struct ldlt_inplace<Upper> { template<typename MatrixType, typename TranspositionType, typename Workspace> static EIGEN_STRONG_INLINE bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, SignMatrix& sign) { Transpose<MatrixType> matt(mat); return ldlt_inplace<Lower>::unblocked(matt, transpositions, temp, sign); } template<typename MatrixType, typename TranspositionType, typename Workspace, typename WType> static EIGEN_STRONG_INLINE bool update(MatrixType& mat, TranspositionType& transpositions, Workspace& tmp, WType& w, const typename MatrixType::RealScalar& sigma=1) { Transpose<MatrixType> matt(mat); return ldlt_inplace<Lower>::update(matt, transpositions, tmp, w.conjugate(), sigma); } }; template<typename MatrixType> struct LDLT_Traits<MatrixType,Lower> { typedef const TriangularView<const MatrixType, UnitLower> MatrixL; typedef const TriangularView<const typename MatrixType::AdjointReturnType, UnitUpper> MatrixU; static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); } static inline MatrixU getU(const MatrixType& m) { return MatrixU(m.adjoint()); } }; template<typename MatrixType> struct LDLT_Traits<MatrixType,Upper> { typedef const TriangularView<const typename MatrixType::AdjointReturnType, UnitLower> MatrixL; typedef const TriangularView<const MatrixType, UnitUpper> MatrixU; static inline MatrixL getL(const MatrixType& m) { return MatrixL(m.adjoint()); } static inline MatrixU getU(const MatrixType& m) { return MatrixU(m); } }; } // end namespace internal /** Compute / recompute the LDLT decomposition A = L D L^* = U^* D U of \a matrix */ template<typename MatrixType, int _UpLo> template<typename InputType> LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const EigenBase<InputType>& a) { check_template_parameters(); eigen_assert(a.rows()==a.cols()); const Index size = a.rows(); m_matrix = a.derived(); // Compute matrix L1 norm = max abs column sum. m_l1_norm = RealScalar(0); // TODO move this code to SelfAdjointView for (Index col = 0; col < size; ++col) { RealScalar abs_col_sum; if (_UpLo == Lower) abs_col_sum = m_matrix.col(col).tail(size - col).template lpNorm<1>() + m_matrix.row(col).head(col).template lpNorm<1>(); else abs_col_sum = m_matrix.col(col).head(col).template lpNorm<1>() + m_matrix.row(col).tail(size - col).template lpNorm<1>(); if (abs_col_sum > m_l1_norm) m_l1_norm = abs_col_sum; } m_transpositions.resize(size); m_isInitialized = false; m_temporary.resize(size); m_sign = internal::ZeroSign; m_info = internal::ldlt_inplace<UpLo>::unblocked(m_matrix, m_transpositions, m_temporary, m_sign) ? Success : NumericalIssue; m_isInitialized = true; return *this; } /** Update the LDLT decomposition: given A = L D L^T, efficiently compute the decomposition of A + sigma w w^T. * \param w a vector to be incorporated into the decomposition. * \param sigma a scalar, +1 for updates and -1 for "downdates," which correspond to removing previously-added column vectors. Optional; default value is +1. * \sa setZero() */ template<typename MatrixType, int _UpLo> template<typename Derived> LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Derived>& w, const typename LDLT<MatrixType,_UpLo>::RealScalar& sigma) { typedef typename TranspositionType::StorageIndex IndexType; const Index size = w.rows(); if (m_isInitialized) { eigen_assert(m_matrix.rows()==size); } else { m_matrix.resize(size,size); m_matrix.setZero(); m_transpositions.resize(size); for (Index i = 0; i < size; i++) m_transpositions.coeffRef(i) = IndexType(i); m_temporary.resize(size); m_sign = sigma>=0 ? internal::PositiveSemiDef : internal::NegativeSemiDef; m_isInitialized = true; } internal::ldlt_inplace<UpLo>::update(m_matrix, m_transpositions, m_temporary, w, sigma); return *this; } #ifndef EIGEN_PARSED_BY_DOXYGEN template<typename _MatrixType, int _UpLo> template<typename RhsType, typename DstType> void LDLT<_MatrixType,_UpLo>::_solve_impl(const RhsType &rhs, DstType &dst) const { eigen_assert(rhs.rows() == rows()); // dst = P b dst = m_transpositions * rhs; // dst = L^-1 (P b) matrixL().solveInPlace(dst); // dst = D^-1 (L^-1 P b) // more precisely, use pseudo-inverse of D (see bug 241) using std::abs; const typename Diagonal<const MatrixType>::RealReturnType vecD(vectorD()); // In some previous versions, tolerance was set to the max of 1/highest (or rather numeric_limits::min()) // and the maximal diagonal entry * epsilon as motivated by LAPACK's xGELSS: // RealScalar tolerance = numext::maxi(vecD.array().abs().maxCoeff() * NumTraits<RealScalar>::epsilon(),RealScalar(1) / NumTraits<RealScalar>::highest()); // However, LDLT is not rank revealing, and so adjusting the tolerance wrt to the highest // diagonal element is not well justified and leads to numerical issues in some cases. // Moreover, Lapack's xSYTRS routines use 0 for the tolerance. // Using numeric_limits::min() gives us more robustness to denormals. RealScalar tolerance = (std::numeric_limits<RealScalar>::min)(); for (Index i = 0; i < vecD.size(); ++i) { if(abs(vecD(i)) > tolerance) dst.row(i) /= vecD(i); else dst.row(i).setZero(); } // dst = L^-T (D^-1 L^-1 P b) matrixU().solveInPlace(dst); // dst = P^-1 (L^-T D^-1 L^-1 P b) = A^-1 b dst = m_transpositions.transpose() * dst; } #endif /** \internal use x = ldlt_object.solve(x); * * This is the \em in-place version of solve(). * * \param bAndX represents both the right-hand side matrix b and result x. * * \returns true always! If you need to check for existence of solutions, use another decomposition like LU, QR, or SVD. * * This version avoids a copy when the right hand side matrix b is not * needed anymore. * * \sa LDLT::solve(), MatrixBase::ldlt() */ template<typename MatrixType,int _UpLo> template<typename Derived> bool LDLT<MatrixType,_UpLo>::solveInPlace(MatrixBase<Derived> &bAndX) const { eigen_assert(m_isInitialized && "LDLT is not initialized."); eigen_assert(m_matrix.rows() == bAndX.rows()); bAndX = this->solve(bAndX); return true; } /** \returns the matrix represented by the decomposition, * i.e., it returns the product: P^T L D L^* P. * This function is provided for debug purpose. */ template<typename MatrixType, int _UpLo> MatrixType LDLT<MatrixType,_UpLo>::reconstructedMatrix() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); const Index size = m_matrix.rows(); MatrixType res(size,size); // P res.setIdentity(); res = transpositionsP() * res; // L^* P res = matrixU() * res; // D(L^*P) res = vectorD().real().asDiagonal() * res; // L(DL^*P) res = matrixL() * res; // P^T (LDL^*P) res = transpositionsP().transpose() * res; return res; } /** \cholesky_module * \returns the Cholesky decomposition with full pivoting without square root of \c *this * \sa MatrixBase::ldlt() */ template<typename MatrixType, unsigned int UpLo> inline const LDLT<typename SelfAdjointView<MatrixType, UpLo>::PlainObject, UpLo> SelfAdjointView<MatrixType, UpLo>::ldlt() const { return LDLT<PlainObject,UpLo>(m_matrix); } /** \cholesky_module * \returns the Cholesky decomposition with full pivoting without square root of \c *this * \sa SelfAdjointView::ldlt() */ template<typename Derived> inline const LDLT<typename MatrixBase<Derived>::PlainObject> MatrixBase<Derived>::ldlt() const { return LDLT<PlainObject>(derived()); } } // end namespace Eigen #endif // EIGEN_LDLT_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Cholesky/LLT.h
.h
18,395
543
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_LLT_H #define EIGEN_LLT_H namespace Eigen { namespace internal{ template<typename MatrixType, int UpLo> struct LLT_Traits; } /** \ingroup Cholesky_Module * * \class LLT * * \brief Standard Cholesky decomposition (LL^T) of a matrix and associated features * * \tparam _MatrixType the type of the matrix of which we are computing the LL^T Cholesky decomposition * \tparam _UpLo the triangular part that will be used for the decompositon: Lower (default) or Upper. * The other triangular part won't be read. * * This class performs a LL^T Cholesky decomposition of a symmetric, positive definite * matrix A such that A = LL^* = U^*U, where L is lower triangular. * * While the Cholesky decomposition is particularly useful to solve selfadjoint problems like D^*D x = b, * for that purpose, we recommend the Cholesky decomposition without square root which is more stable * and even faster. Nevertheless, this standard Cholesky decomposition remains useful in many other * situations like generalised eigen problems with hermitian matrices. * * Remember that Cholesky decompositions are not rank-revealing. This LLT decomposition is only stable on positive definite matrices, * use LDLT instead for the semidefinite case. Also, do not use a Cholesky decomposition to determine whether a system of equations * has a solution. * * Example: \include LLT_example.cpp * Output: \verbinclude LLT_example.out * * \b Performance: for best performance, it is recommended to use a column-major storage format * with the Lower triangular part (the default), or, equivalently, a row-major storage format * with the Upper triangular part. Otherwise, you might get a 20% slowdown for the full factorization * step, and rank-updates can be up to 3 times slower. * * This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism. * * Note that during the decomposition, only the lower (or upper, as defined by _UpLo) triangular part of A is considered. * Therefore, the strict lower part does not have to store correct values. * * \sa MatrixBase::llt(), SelfAdjointView::llt(), class LDLT */ template<typename _MatrixType, int _UpLo> class LLT { public: typedef _MatrixType MatrixType; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime }; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar; typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 typedef typename MatrixType::StorageIndex StorageIndex; enum { PacketSize = internal::packet_traits<Scalar>::size, AlignmentMask = int(PacketSize)-1, UpLo = _UpLo }; typedef internal::LLT_Traits<MatrixType,UpLo> Traits; /** * \brief Default Constructor. * * The default constructor is useful in cases in which the user intends to * perform decompositions via LLT::compute(const MatrixType&). */ LLT() : m_matrix(), m_isInitialized(false) {} /** \brief Default Constructor with memory preallocation * * Like the default constructor but with preallocation of the internal data * according to the specified problem \a size. * \sa LLT() */ explicit LLT(Index size) : m_matrix(size, size), m_isInitialized(false) {} template<typename InputType> explicit LLT(const EigenBase<InputType>& matrix) : m_matrix(matrix.rows(), matrix.cols()), m_isInitialized(false) { compute(matrix.derived()); } /** \brief Constructs a LDLT factorization from a given matrix * * This overloaded constructor is provided for \link InplaceDecomposition inplace decomposition \endlink when * \c MatrixType is a Eigen::Ref. * * \sa LLT(const EigenBase&) */ template<typename InputType> explicit LLT(EigenBase<InputType>& matrix) : m_matrix(matrix.derived()), m_isInitialized(false) { compute(matrix.derived()); } /** \returns a view of the upper triangular matrix U */ inline typename Traits::MatrixU matrixU() const { eigen_assert(m_isInitialized && "LLT is not initialized."); return Traits::getU(m_matrix); } /** \returns a view of the lower triangular matrix L */ inline typename Traits::MatrixL matrixL() const { eigen_assert(m_isInitialized && "LLT is not initialized."); return Traits::getL(m_matrix); } /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. * * Since this LLT class assumes anyway that the matrix A is invertible, the solution * theoretically exists and is unique regardless of b. * * Example: \include LLT_solve.cpp * Output: \verbinclude LLT_solve.out * * \sa solveInPlace(), MatrixBase::llt(), SelfAdjointView::llt() */ template<typename Rhs> inline const Solve<LLT, Rhs> solve(const MatrixBase<Rhs>& b) const { eigen_assert(m_isInitialized && "LLT is not initialized."); eigen_assert(m_matrix.rows()==b.rows() && "LLT::solve(): invalid number of rows of the right hand side matrix b"); return Solve<LLT, Rhs>(*this, b.derived()); } template<typename Derived> void solveInPlace(const MatrixBase<Derived> &bAndX) const; template<typename InputType> LLT& compute(const EigenBase<InputType>& matrix); /** \returns an estimate of the reciprocal condition number of the matrix of * which \c *this is the Cholesky decomposition. */ RealScalar rcond() const { eigen_assert(m_isInitialized && "LLT is not initialized."); eigen_assert(m_info == Success && "LLT failed because matrix appears to be negative"); return internal::rcond_estimate_helper(m_l1_norm, *this); } /** \returns the LLT decomposition matrix * * TODO: document the storage layout */ inline const MatrixType& matrixLLT() const { eigen_assert(m_isInitialized && "LLT is not initialized."); return m_matrix; } MatrixType reconstructedMatrix() const; /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was succesful, * \c NumericalIssue if the matrix.appears not to be positive definite. */ ComputationInfo info() const { eigen_assert(m_isInitialized && "LLT is not initialized."); return m_info; } /** \returns the adjoint of \c *this, that is, a const reference to the decomposition itself as the underlying matrix is self-adjoint. * * This method is provided for compatibility with other matrix decompositions, thus enabling generic code such as: * \code x = decomposition.adjoint().solve(b) \endcode */ const LLT& adjoint() const { return *this; }; inline Index rows() const { return m_matrix.rows(); } inline Index cols() const { return m_matrix.cols(); } template<typename VectorType> LLT rankUpdate(const VectorType& vec, const RealScalar& sigma = 1); #ifndef EIGEN_PARSED_BY_DOXYGEN template<typename RhsType, typename DstType> EIGEN_DEVICE_FUNC void _solve_impl(const RhsType &rhs, DstType &dst) const; #endif protected: static void check_template_parameters() { EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); } /** \internal * Used to compute and store L * The strict upper part is not used and even not initialized. */ MatrixType m_matrix; RealScalar m_l1_norm; bool m_isInitialized; ComputationInfo m_info; }; namespace internal { template<typename Scalar, int UpLo> struct llt_inplace; template<typename MatrixType, typename VectorType> static Index llt_rank_update_lower(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) { using std::sqrt; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::ColXpr ColXpr; typedef typename internal::remove_all<ColXpr>::type ColXprCleaned; typedef typename ColXprCleaned::SegmentReturnType ColXprSegment; typedef Matrix<Scalar,Dynamic,1> TempVectorType; typedef typename TempVectorType::SegmentReturnType TempVecSegment; Index n = mat.cols(); eigen_assert(mat.rows()==n && vec.size()==n); TempVectorType temp; if(sigma>0) { // This version is based on Givens rotations. // It is faster than the other one below, but only works for updates, // i.e., for sigma > 0 temp = sqrt(sigma) * vec; for(Index i=0; i<n; ++i) { JacobiRotation<Scalar> g; g.makeGivens(mat(i,i), -temp(i), &mat(i,i)); Index rs = n-i-1; if(rs>0) { ColXprSegment x(mat.col(i).tail(rs)); TempVecSegment y(temp.tail(rs)); apply_rotation_in_the_plane(x, y, g); } } } else { temp = vec; RealScalar beta = 1; for(Index j=0; j<n; ++j) { RealScalar Ljj = numext::real(mat.coeff(j,j)); RealScalar dj = numext::abs2(Ljj); Scalar wj = temp.coeff(j); RealScalar swj2 = sigma*numext::abs2(wj); RealScalar gamma = dj*beta + swj2; RealScalar x = dj + swj2/beta; if (x<=RealScalar(0)) return j; RealScalar nLjj = sqrt(x); mat.coeffRef(j,j) = nLjj; beta += swj2/dj; // Update the terms of L Index rs = n-j-1; if(rs) { temp.tail(rs) -= (wj/Ljj) * mat.col(j).tail(rs); if(gamma != 0) mat.col(j).tail(rs) = (nLjj/Ljj) * mat.col(j).tail(rs) + (nLjj * sigma*numext::conj(wj)/gamma)*temp.tail(rs); } } } return -1; } template<typename Scalar> struct llt_inplace<Scalar, Lower> { typedef typename NumTraits<Scalar>::Real RealScalar; template<typename MatrixType> static Index unblocked(MatrixType& mat) { using std::sqrt; eigen_assert(mat.rows()==mat.cols()); const Index size = mat.rows(); for(Index k = 0; k < size; ++k) { Index rs = size-k-1; // remaining size Block<MatrixType,Dynamic,1> A21(mat,k+1,k,rs,1); Block<MatrixType,1,Dynamic> A10(mat,k,0,1,k); Block<MatrixType,Dynamic,Dynamic> A20(mat,k+1,0,rs,k); RealScalar x = numext::real(mat.coeff(k,k)); if (k>0) x -= A10.squaredNorm(); if (x<=RealScalar(0)) return k; mat.coeffRef(k,k) = x = sqrt(x); if (k>0 && rs>0) A21.noalias() -= A20 * A10.adjoint(); if (rs>0) A21 /= x; } return -1; } template<typename MatrixType> static Index blocked(MatrixType& m) { eigen_assert(m.rows()==m.cols()); Index size = m.rows(); if(size<32) return unblocked(m); Index blockSize = size/8; blockSize = (blockSize/16)*16; blockSize = (std::min)((std::max)(blockSize,Index(8)), Index(128)); for (Index k=0; k<size; k+=blockSize) { // partition the matrix: // A00 | - | - // lu = A10 | A11 | - // A20 | A21 | A22 Index bs = (std::min)(blockSize, size-k); Index rs = size - k - bs; Block<MatrixType,Dynamic,Dynamic> A11(m,k, k, bs,bs); Block<MatrixType,Dynamic,Dynamic> A21(m,k+bs,k, rs,bs); Block<MatrixType,Dynamic,Dynamic> A22(m,k+bs,k+bs,rs,rs); Index ret; if((ret=unblocked(A11))>=0) return k+ret; if(rs>0) A11.adjoint().template triangularView<Upper>().template solveInPlace<OnTheRight>(A21); if(rs>0) A22.template selfadjointView<Lower>().rankUpdate(A21,typename NumTraits<RealScalar>::Literal(-1)); // bottleneck } return -1; } template<typename MatrixType, typename VectorType> static Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma) { return Eigen::internal::llt_rank_update_lower(mat, vec, sigma); } }; template<typename Scalar> struct llt_inplace<Scalar, Upper> { typedef typename NumTraits<Scalar>::Real RealScalar; template<typename MatrixType> static EIGEN_STRONG_INLINE Index unblocked(MatrixType& mat) { Transpose<MatrixType> matt(mat); return llt_inplace<Scalar, Lower>::unblocked(matt); } template<typename MatrixType> static EIGEN_STRONG_INLINE Index blocked(MatrixType& mat) { Transpose<MatrixType> matt(mat); return llt_inplace<Scalar, Lower>::blocked(matt); } template<typename MatrixType, typename VectorType> static Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma) { Transpose<MatrixType> matt(mat); return llt_inplace<Scalar, Lower>::rankUpdate(matt, vec.conjugate(), sigma); } }; template<typename MatrixType> struct LLT_Traits<MatrixType,Lower> { typedef const TriangularView<const MatrixType, Lower> MatrixL; typedef const TriangularView<const typename MatrixType::AdjointReturnType, Upper> MatrixU; static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); } static inline MatrixU getU(const MatrixType& m) { return MatrixU(m.adjoint()); } static bool inplace_decomposition(MatrixType& m) { return llt_inplace<typename MatrixType::Scalar, Lower>::blocked(m)==-1; } }; template<typename MatrixType> struct LLT_Traits<MatrixType,Upper> { typedef const TriangularView<const typename MatrixType::AdjointReturnType, Lower> MatrixL; typedef const TriangularView<const MatrixType, Upper> MatrixU; static inline MatrixL getL(const MatrixType& m) { return MatrixL(m.adjoint()); } static inline MatrixU getU(const MatrixType& m) { return MatrixU(m); } static bool inplace_decomposition(MatrixType& m) { return llt_inplace<typename MatrixType::Scalar, Upper>::blocked(m)==-1; } }; } // end namespace internal /** Computes / recomputes the Cholesky decomposition A = LL^* = U^*U of \a matrix * * \returns a reference to *this * * Example: \include TutorialLinAlgComputeTwice.cpp * Output: \verbinclude TutorialLinAlgComputeTwice.out */ template<typename MatrixType, int _UpLo> template<typename InputType> LLT<MatrixType,_UpLo>& LLT<MatrixType,_UpLo>::compute(const EigenBase<InputType>& a) { check_template_parameters(); eigen_assert(a.rows()==a.cols()); const Index size = a.rows(); m_matrix.resize(size, size); if (!internal::is_same_dense(m_matrix, a.derived())) m_matrix = a.derived(); // Compute matrix L1 norm = max abs column sum. m_l1_norm = RealScalar(0); // TODO move this code to SelfAdjointView for (Index col = 0; col < size; ++col) { RealScalar abs_col_sum; if (_UpLo == Lower) abs_col_sum = m_matrix.col(col).tail(size - col).template lpNorm<1>() + m_matrix.row(col).head(col).template lpNorm<1>(); else abs_col_sum = m_matrix.col(col).head(col).template lpNorm<1>() + m_matrix.row(col).tail(size - col).template lpNorm<1>(); if (abs_col_sum > m_l1_norm) m_l1_norm = abs_col_sum; } m_isInitialized = true; bool ok = Traits::inplace_decomposition(m_matrix); m_info = ok ? Success : NumericalIssue; return *this; } /** Performs a rank one update (or dowdate) of the current decomposition. * If A = LL^* before the rank one update, * then after it we have LL^* = A + sigma * v v^* where \a v must be a vector * of same dimension. */ template<typename _MatrixType, int _UpLo> template<typename VectorType> LLT<_MatrixType,_UpLo> LLT<_MatrixType,_UpLo>::rankUpdate(const VectorType& v, const RealScalar& sigma) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorType); eigen_assert(v.size()==m_matrix.cols()); eigen_assert(m_isInitialized); if(internal::llt_inplace<typename MatrixType::Scalar, UpLo>::rankUpdate(m_matrix,v,sigma)>=0) m_info = NumericalIssue; else m_info = Success; return *this; } #ifndef EIGEN_PARSED_BY_DOXYGEN template<typename _MatrixType,int _UpLo> template<typename RhsType, typename DstType> void LLT<_MatrixType,_UpLo>::_solve_impl(const RhsType &rhs, DstType &dst) const { dst = rhs; solveInPlace(dst); } #endif /** \internal use x = llt_object.solve(x); * * This is the \em in-place version of solve(). * * \param bAndX represents both the right-hand side matrix b and result x. * * This version avoids a copy when the right hand side matrix b is not needed anymore. * * \warning The parameter is only marked 'const' to make the C++ compiler accept a temporary expression here. * This function will const_cast it, so constness isn't honored here. * * \sa LLT::solve(), MatrixBase::llt() */ template<typename MatrixType, int _UpLo> template<typename Derived> void LLT<MatrixType,_UpLo>::solveInPlace(const MatrixBase<Derived> &bAndX) const { eigen_assert(m_isInitialized && "LLT is not initialized."); eigen_assert(m_matrix.rows()==bAndX.rows()); matrixL().solveInPlace(bAndX); matrixU().solveInPlace(bAndX); } /** \returns the matrix represented by the decomposition, * i.e., it returns the product: L L^*. * This function is provided for debug purpose. */ template<typename MatrixType, int _UpLo> MatrixType LLT<MatrixType,_UpLo>::reconstructedMatrix() const { eigen_assert(m_isInitialized && "LLT is not initialized."); return matrixL() * matrixL().adjoint().toDenseMatrix(); } /** \cholesky_module * \returns the LLT decomposition of \c *this * \sa SelfAdjointView::llt() */ template<typename Derived> inline const LLT<typename MatrixBase<Derived>::PlainObject> MatrixBase<Derived>::llt() const { return LLT<PlainObject>(derived()); } /** \cholesky_module * \returns the LLT decomposition of \c *this * \sa SelfAdjointView::llt() */ template<typename MatrixType, unsigned int UpLo> inline const LLT<typename SelfAdjointView<MatrixType, UpLo>::PlainObject, UpLo> SelfAdjointView<MatrixType, UpLo>::llt() const { return LLT<PlainObject,UpLo>(m_matrix); } } // end namespace Eigen #endif // EIGEN_LLT_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h
.h
7,762
217
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_LEAST_SQUARE_CONJUGATE_GRADIENT_H #define EIGEN_LEAST_SQUARE_CONJUGATE_GRADIENT_H namespace Eigen { namespace internal { /** \internal Low-level conjugate gradient algorithm for least-square problems * \param mat The matrix A * \param rhs The right hand side vector b * \param x On input and initial solution, on output the computed solution. * \param precond A preconditioner being able to efficiently solve for an * approximation of A'Ax=b (regardless of b) * \param iters On input the max number of iteration, on output the number of performed iterations. * \param tol_error On input the tolerance error, on output an estimation of the relative error. */ template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner> EIGEN_DONT_INLINE void least_square_conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x, const Preconditioner& precond, Index& iters, typename Dest::RealScalar& tol_error) { using std::sqrt; using std::abs; typedef typename Dest::RealScalar RealScalar; typedef typename Dest::Scalar Scalar; typedef Matrix<Scalar,Dynamic,1> VectorType; RealScalar tol = tol_error; Index maxIters = iters; Index m = mat.rows(), n = mat.cols(); VectorType residual = rhs - mat * x; VectorType normal_residual = mat.adjoint() * residual; RealScalar rhsNorm2 = (mat.adjoint()*rhs).squaredNorm(); if(rhsNorm2 == 0) { x.setZero(); iters = 0; tol_error = 0; return; } RealScalar threshold = tol*tol*rhsNorm2; RealScalar residualNorm2 = normal_residual.squaredNorm(); if (residualNorm2 < threshold) { iters = 0; tol_error = sqrt(residualNorm2 / rhsNorm2); return; } VectorType p(n); p = precond.solve(normal_residual); // initial search direction VectorType z(n), tmp(m); RealScalar absNew = numext::real(normal_residual.dot(p)); // the square of the absolute value of r scaled by invM Index i = 0; while(i < maxIters) { tmp.noalias() = mat * p; Scalar alpha = absNew / tmp.squaredNorm(); // the amount we travel on dir x += alpha * p; // update solution residual -= alpha * tmp; // update residual normal_residual = mat.adjoint() * residual; // update residual of the normal equation residualNorm2 = normal_residual.squaredNorm(); if(residualNorm2 < threshold) break; z = precond.solve(normal_residual); // approximately solve for "A'A z = normal_residual" RealScalar absOld = absNew; absNew = numext::real(normal_residual.dot(z)); // update the absolute value of r RealScalar beta = absNew / absOld; // calculate the Gram-Schmidt value used to create the new search direction p = z + beta * p; // update search direction i++; } tol_error = sqrt(residualNorm2 / rhsNorm2); iters = i; } } template< typename _MatrixType, typename _Preconditioner = LeastSquareDiagonalPreconditioner<typename _MatrixType::Scalar> > class LeastSquaresConjugateGradient; namespace internal { template< typename _MatrixType, typename _Preconditioner> struct traits<LeastSquaresConjugateGradient<_MatrixType,_Preconditioner> > { typedef _MatrixType MatrixType; typedef _Preconditioner Preconditioner; }; } /** \ingroup IterativeLinearSolvers_Module * \brief A conjugate gradient solver for sparse (or dense) least-square problems * * This class allows to solve for A x = b linear problems using an iterative conjugate gradient algorithm. * The matrix A can be non symmetric and rectangular, but the matrix A' A should be positive-definite to guaranty stability. * Otherwise, the SparseLU or SparseQR classes might be preferable. * The matrix A and the vectors x and b can be either dense or sparse. * * \tparam _MatrixType the type of the matrix A, can be a dense or a sparse matrix. * \tparam _Preconditioner the type of the preconditioner. Default is LeastSquareDiagonalPreconditioner * * \implsparsesolverconcept * * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations() * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations * and NumTraits<Scalar>::epsilon() for the tolerance. * * This class can be used as the direct solver classes. Here is a typical usage example: \code int m=1000000, n = 10000; VectorXd x(n), b(m); SparseMatrix<double> A(m,n); // fill A and b LeastSquaresConjugateGradient<SparseMatrix<double> > lscg; lscg.compute(A); x = lscg.solve(b); std::cout << "#iterations: " << lscg.iterations() << std::endl; std::cout << "estimated error: " << lscg.error() << std::endl; // update b, and solve again x = lscg.solve(b); \endcode * * By default the iterations start with x=0 as an initial guess of the solution. * One can control the start using the solveWithGuess() method. * * \sa class ConjugateGradient, SparseLU, SparseQR */ template< typename _MatrixType, typename _Preconditioner> class LeastSquaresConjugateGradient : public IterativeSolverBase<LeastSquaresConjugateGradient<_MatrixType,_Preconditioner> > { typedef IterativeSolverBase<LeastSquaresConjugateGradient> Base; using Base::matrix; using Base::m_error; using Base::m_iterations; using Base::m_info; using Base::m_isInitialized; public: typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef _Preconditioner Preconditioner; public: /** Default constructor. */ LeastSquaresConjugateGradient() : Base() {} /** Initialize the solver with matrix \a A for further \c Ax=b solving. * * This constructor is a shortcut for the default constructor followed * by a call to compute(). * * \warning this class stores a reference to the matrix A as well as some * precomputed values that depend on it. Therefore, if \a A is changed * this class becomes invalid. Call compute() to update it with the new * matrix A, or modify a copy of A. */ template<typename MatrixDerived> explicit LeastSquaresConjugateGradient(const EigenBase<MatrixDerived>& A) : Base(A.derived()) {} ~LeastSquaresConjugateGradient() {} /** \internal */ template<typename Rhs,typename Dest> void _solve_with_guess_impl(const Rhs& b, Dest& x) const { m_iterations = Base::maxIterations(); m_error = Base::m_tolerance; for(Index j=0; j<b.cols(); ++j) { m_iterations = Base::maxIterations(); m_error = Base::m_tolerance; typename Dest::ColXpr xj(x,j); internal::least_square_conjugate_gradient(matrix(), b.col(j), xj, Base::m_preconditioner, m_iterations, m_error); } m_isInitialized = true; m_info = m_error <= Base::m_tolerance ? Success : NoConvergence; } /** \internal */ using Base::_solve_impl; template<typename Rhs,typename Dest> void _solve_impl(const MatrixBase<Rhs>& b, Dest& x) const { x.setZero(); _solve_with_guess_impl(b.derived(),x); } }; } // end namespace Eigen #endif // EIGEN_LEAST_SQUARE_CONJUGATE_GRADIENT_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
.h
15,234
463
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr> // Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_INCOMPLETE_LUT_H #define EIGEN_INCOMPLETE_LUT_H namespace Eigen { namespace internal { /** \internal * Compute a quick-sort split of a vector * On output, the vector row is permuted such that its elements satisfy * abs(row(i)) >= abs(row(ncut)) if i<ncut * abs(row(i)) <= abs(row(ncut)) if i>ncut * \param row The vector of values * \param ind The array of index for the elements in @p row * \param ncut The number of largest elements to keep **/ template <typename VectorV, typename VectorI> Index QuickSplit(VectorV &row, VectorI &ind, Index ncut) { typedef typename VectorV::RealScalar RealScalar; using std::swap; using std::abs; Index mid; Index n = row.size(); /* length of the vector */ Index first, last ; ncut--; /* to fit the zero-based indices */ first = 0; last = n-1; if (ncut < first || ncut > last ) return 0; do { mid = first; RealScalar abskey = abs(row(mid)); for (Index j = first + 1; j <= last; j++) { if ( abs(row(j)) > abskey) { ++mid; swap(row(mid), row(j)); swap(ind(mid), ind(j)); } } /* Interchange for the pivot element */ swap(row(mid), row(first)); swap(ind(mid), ind(first)); if (mid > ncut) last = mid - 1; else if (mid < ncut ) first = mid + 1; } while (mid != ncut ); return 0; /* mid is equal to ncut */ } }// end namespace internal /** \ingroup IterativeLinearSolvers_Module * \class IncompleteLUT * \brief Incomplete LU factorization with dual-threshold strategy * * \implsparsesolverconcept * * During the numerical factorization, two dropping rules are used : * 1) any element whose magnitude is less than some tolerance is dropped. * This tolerance is obtained by multiplying the input tolerance @p droptol * by the average magnitude of all the original elements in the current row. * 2) After the elimination of the row, only the @p fill largest elements in * the L part and the @p fill largest elements in the U part are kept * (in addition to the diagonal element ). Note that @p fill is computed from * the input parameter @p fillfactor which is used the ratio to control the fill_in * relatively to the initial number of nonzero elements. * * The two extreme cases are when @p droptol=0 (to keep all the @p fill*2 largest elements) * and when @p fill=n/2 with @p droptol being different to zero. * * References : Yousef Saad, ILUT: A dual threshold incomplete LU factorization, * Numerical Linear Algebra with Applications, 1(4), pp 387-402, 1994. * * NOTE : The following implementation is derived from the ILUT implementation * in the SPARSKIT package, Copyright (C) 2005, the Regents of the University of Minnesota * released under the terms of the GNU LGPL: * http://www-users.cs.umn.edu/~saad/software/SPARSKIT/README * However, Yousef Saad gave us permission to relicense his ILUT code to MPL2. * See the Eigen mailing list archive, thread: ILUT, date: July 8, 2012: * http://listengine.tuxfamily.org/lists.tuxfamily.org/eigen/2012/07/msg00064.html * alternatively, on GMANE: * http://comments.gmane.org/gmane.comp.lib.eigen/3302 */ template <typename _Scalar, typename _StorageIndex = int> class IncompleteLUT : public SparseSolverBase<IncompleteLUT<_Scalar, _StorageIndex> > { protected: typedef SparseSolverBase<IncompleteLUT> Base; using Base::m_isInitialized; public: typedef _Scalar Scalar; typedef _StorageIndex StorageIndex; typedef typename NumTraits<Scalar>::Real RealScalar; typedef Matrix<Scalar,Dynamic,1> Vector; typedef Matrix<StorageIndex,Dynamic,1> VectorI; typedef SparseMatrix<Scalar,RowMajor,StorageIndex> FactorType; enum { ColsAtCompileTime = Dynamic, MaxColsAtCompileTime = Dynamic }; public: IncompleteLUT() : m_droptol(NumTraits<Scalar>::dummy_precision()), m_fillfactor(10), m_analysisIsOk(false), m_factorizationIsOk(false) {} template<typename MatrixType> explicit IncompleteLUT(const MatrixType& mat, const RealScalar& droptol=NumTraits<Scalar>::dummy_precision(), int fillfactor = 10) : m_droptol(droptol),m_fillfactor(fillfactor), m_analysisIsOk(false),m_factorizationIsOk(false) { eigen_assert(fillfactor != 0); compute(mat); } Index rows() const { return m_lu.rows(); } Index cols() const { return m_lu.cols(); } /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was succesful, * \c NumericalIssue if the matrix.appears to be negative. */ ComputationInfo info() const { eigen_assert(m_isInitialized && "IncompleteLUT is not initialized."); return m_info; } template<typename MatrixType> void analyzePattern(const MatrixType& amat); template<typename MatrixType> void factorize(const MatrixType& amat); /** * Compute an incomplete LU factorization with dual threshold on the matrix mat * No pivoting is done in this version * **/ template<typename MatrixType> IncompleteLUT& compute(const MatrixType& amat) { analyzePattern(amat); factorize(amat); return *this; } void setDroptol(const RealScalar& droptol); void setFillfactor(int fillfactor); template<typename Rhs, typename Dest> void _solve_impl(const Rhs& b, Dest& x) const { x = m_Pinv * b; x = m_lu.template triangularView<UnitLower>().solve(x); x = m_lu.template triangularView<Upper>().solve(x); x = m_P * x; } protected: /** keeps off-diagonal entries; drops diagonal entries */ struct keep_diag { inline bool operator() (const Index& row, const Index& col, const Scalar&) const { return row!=col; } }; protected: FactorType m_lu; RealScalar m_droptol; int m_fillfactor; bool m_analysisIsOk; bool m_factorizationIsOk; ComputationInfo m_info; PermutationMatrix<Dynamic,Dynamic,StorageIndex> m_P; // Fill-reducing permutation PermutationMatrix<Dynamic,Dynamic,StorageIndex> m_Pinv; // Inverse permutation }; /** * Set control parameter droptol * \param droptol Drop any element whose magnitude is less than this tolerance **/ template<typename Scalar, typename StorageIndex> void IncompleteLUT<Scalar,StorageIndex>::setDroptol(const RealScalar& droptol) { this->m_droptol = droptol; } /** * Set control parameter fillfactor * \param fillfactor This is used to compute the number @p fill_in of largest elements to keep on each row. **/ template<typename Scalar, typename StorageIndex> void IncompleteLUT<Scalar,StorageIndex>::setFillfactor(int fillfactor) { this->m_fillfactor = fillfactor; } template <typename Scalar, typename StorageIndex> template<typename _MatrixType> void IncompleteLUT<Scalar,StorageIndex>::analyzePattern(const _MatrixType& amat) { // Compute the Fill-reducing permutation // Since ILUT does not perform any numerical pivoting, // it is highly preferable to keep the diagonal through symmetric permutations. #ifndef EIGEN_MPL2_ONLY // To this end, let's symmetrize the pattern and perform AMD on it. SparseMatrix<Scalar,ColMajor, StorageIndex> mat1 = amat; SparseMatrix<Scalar,ColMajor, StorageIndex> mat2 = amat.transpose(); // FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice. // on the other hand for a really non-symmetric pattern, mat2*mat1 should be prefered... SparseMatrix<Scalar,ColMajor, StorageIndex> AtA = mat2 + mat1; AMDOrdering<StorageIndex> ordering; ordering(AtA,m_P); m_Pinv = m_P.inverse(); // cache the inverse permutation #else // If AMD is not available, (MPL2-only), then let's use the slower COLAMD routine. SparseMatrix<Scalar,ColMajor, StorageIndex> mat1 = amat; COLAMDOrdering<StorageIndex> ordering; ordering(mat1,m_Pinv); m_P = m_Pinv.inverse(); #endif m_analysisIsOk = true; m_factorizationIsOk = false; m_isInitialized = true; } template <typename Scalar, typename StorageIndex> template<typename _MatrixType> void IncompleteLUT<Scalar,StorageIndex>::factorize(const _MatrixType& amat) { using std::sqrt; using std::swap; using std::abs; using internal::convert_index; eigen_assert((amat.rows() == amat.cols()) && "The factorization should be done on a square matrix"); Index n = amat.cols(); // Size of the matrix m_lu.resize(n,n); // Declare Working vectors and variables Vector u(n) ; // real values of the row -- maximum size is n -- VectorI ju(n); // column position of the values in u -- maximum size is n VectorI jr(n); // Indicate the position of the nonzero elements in the vector u -- A zero location is indicated by -1 // Apply the fill-reducing permutation eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); SparseMatrix<Scalar,RowMajor, StorageIndex> mat; mat = amat.twistedBy(m_Pinv); // Initialization jr.fill(-1); ju.fill(0); u.fill(0); // number of largest elements to keep in each row: Index fill_in = (amat.nonZeros()*m_fillfactor)/n + 1; if (fill_in > n) fill_in = n; // number of largest nonzero elements to keep in the L and the U part of the current row: Index nnzL = fill_in/2; Index nnzU = nnzL; m_lu.reserve(n * (nnzL + nnzU + 1)); // global loop over the rows of the sparse matrix for (Index ii = 0; ii < n; ii++) { // 1 - copy the lower and the upper part of the row i of mat in the working vector u Index sizeu = 1; // number of nonzero elements in the upper part of the current row Index sizel = 0; // number of nonzero elements in the lower part of the current row ju(ii) = convert_index<StorageIndex>(ii); u(ii) = 0; jr(ii) = convert_index<StorageIndex>(ii); RealScalar rownorm = 0; typename FactorType::InnerIterator j_it(mat, ii); // Iterate through the current row ii for (; j_it; ++j_it) { Index k = j_it.index(); if (k < ii) { // copy the lower part ju(sizel) = convert_index<StorageIndex>(k); u(sizel) = j_it.value(); jr(k) = convert_index<StorageIndex>(sizel); ++sizel; } else if (k == ii) { u(ii) = j_it.value(); } else { // copy the upper part Index jpos = ii + sizeu; ju(jpos) = convert_index<StorageIndex>(k); u(jpos) = j_it.value(); jr(k) = convert_index<StorageIndex>(jpos); ++sizeu; } rownorm += numext::abs2(j_it.value()); } // 2 - detect possible zero row if(rownorm==0) { m_info = NumericalIssue; return; } // Take the 2-norm of the current row as a relative tolerance rownorm = sqrt(rownorm); // 3 - eliminate the previous nonzero rows Index jj = 0; Index len = 0; while (jj < sizel) { // In order to eliminate in the correct order, // we must select first the smallest column index among ju(jj:sizel) Index k; Index minrow = ju.segment(jj,sizel-jj).minCoeff(&k); // k is relative to the segment k += jj; if (minrow != ju(jj)) { // swap the two locations Index j = ju(jj); swap(ju(jj), ju(k)); jr(minrow) = convert_index<StorageIndex>(jj); jr(j) = convert_index<StorageIndex>(k); swap(u(jj), u(k)); } // Reset this location jr(minrow) = -1; // Start elimination typename FactorType::InnerIterator ki_it(m_lu, minrow); while (ki_it && ki_it.index() < minrow) ++ki_it; eigen_internal_assert(ki_it && ki_it.col()==minrow); Scalar fact = u(jj) / ki_it.value(); // drop too small elements if(abs(fact) <= m_droptol) { jj++; continue; } // linear combination of the current row ii and the row minrow ++ki_it; for (; ki_it; ++ki_it) { Scalar prod = fact * ki_it.value(); Index j = ki_it.index(); Index jpos = jr(j); if (jpos == -1) // fill-in element { Index newpos; if (j >= ii) // dealing with the upper part { newpos = ii + sizeu; sizeu++; eigen_internal_assert(sizeu<=n); } else // dealing with the lower part { newpos = sizel; sizel++; eigen_internal_assert(sizel<=ii); } ju(newpos) = convert_index<StorageIndex>(j); u(newpos) = -prod; jr(j) = convert_index<StorageIndex>(newpos); } else u(jpos) -= prod; } // store the pivot element u(len) = fact; ju(len) = convert_index<StorageIndex>(minrow); ++len; jj++; } // end of the elimination on the row ii // reset the upper part of the pointer jr to zero for(Index k = 0; k <sizeu; k++) jr(ju(ii+k)) = -1; // 4 - partially sort and insert the elements in the m_lu matrix // sort the L-part of the row sizel = len; len = (std::min)(sizel, nnzL); typename Vector::SegmentReturnType ul(u.segment(0, sizel)); typename VectorI::SegmentReturnType jul(ju.segment(0, sizel)); internal::QuickSplit(ul, jul, len); // store the largest m_fill elements of the L part m_lu.startVec(ii); for(Index k = 0; k < len; k++) m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k); // store the diagonal element // apply a shifting rule to avoid zero pivots (we are doing an incomplete factorization) if (u(ii) == Scalar(0)) u(ii) = sqrt(m_droptol) * rownorm; m_lu.insertBackByOuterInnerUnordered(ii, ii) = u(ii); // sort the U-part of the row // apply the dropping rule first len = 0; for(Index k = 1; k < sizeu; k++) { if(abs(u(ii+k)) > m_droptol * rownorm ) { ++len; u(ii + len) = u(ii + k); ju(ii + len) = ju(ii + k); } } sizeu = len + 1; // +1 to take into account the diagonal element len = (std::min)(sizeu, nnzU); typename Vector::SegmentReturnType uu(u.segment(ii+1, sizeu-1)); typename VectorI::SegmentReturnType juu(ju.segment(ii+1, sizeu-1)); internal::QuickSplit(uu, juu, len); // store the largest elements of the U part for(Index k = ii + 1; k < ii + len; k++) m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k); } m_lu.finalize(); m_lu.makeCompressed(); m_factorizationIsOk = true; m_info = Success; } } // end namespace Eigen #endif // EIGEN_INCOMPLETE_LUT_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h
.h
15,062
401
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr> // Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_INCOMPLETE_CHOlESKY_H #define EIGEN_INCOMPLETE_CHOlESKY_H #include <vector> #include <list> namespace Eigen { /** * \brief Modified Incomplete Cholesky with dual threshold * * References : C-J. Lin and J. J. Moré, Incomplete Cholesky Factorizations with * Limited memory, SIAM J. Sci. Comput. 21(1), pp. 24-45, 1999 * * \tparam Scalar the scalar type of the input matrices * \tparam _UpLo The triangular part that will be used for the computations. It can be Lower * or Upper. Default is Lower. * \tparam _OrderingType The ordering method to use, either AMDOrdering<> or NaturalOrdering<>. Default is AMDOrdering<int>, * unless EIGEN_MPL2_ONLY is defined, in which case the default is NaturalOrdering<int>. * * \implsparsesolverconcept * * It performs the following incomplete factorization: \f$ S P A P' S \approx L L' \f$ * where L is a lower triangular factor, S is a diagonal scaling matrix, and P is a * fill-in reducing permutation as computed by the ordering method. * * \b Shifting \b strategy: Let \f$ B = S P A P' S \f$ be the scaled matrix on which the factorization is carried out, * and \f$ \beta \f$ be the minimum value of the diagonal. If \f$ \beta > 0 \f$ then, the factorization is directly performed * on the matrix B. Otherwise, the factorization is performed on the shifted matrix \f$ B + (\sigma+|\beta| I \f$ where * \f$ \sigma \f$ is the initial shift value as returned and set by setInitialShift() method. The default value is \f$ \sigma = 10^{-3} \f$. * If the factorization fails, then the shift in doubled until it succeed or a maximum of ten attempts. If it still fails, as returned by * the info() method, then you can either increase the initial shift, or better use another preconditioning technique. * */ template <typename Scalar, int _UpLo = Lower, typename _OrderingType = #ifndef EIGEN_MPL2_ONLY AMDOrdering<int> #else NaturalOrdering<int> #endif > class IncompleteCholesky : public SparseSolverBase<IncompleteCholesky<Scalar,_UpLo,_OrderingType> > { protected: typedef SparseSolverBase<IncompleteCholesky<Scalar,_UpLo,_OrderingType> > Base; using Base::m_isInitialized; public: typedef typename NumTraits<Scalar>::Real RealScalar; typedef _OrderingType OrderingType; typedef typename OrderingType::PermutationType PermutationType; typedef typename PermutationType::StorageIndex StorageIndex; typedef SparseMatrix<Scalar,ColMajor,StorageIndex> FactorType; typedef Matrix<Scalar,Dynamic,1> VectorSx; typedef Matrix<RealScalar,Dynamic,1> VectorRx; typedef Matrix<StorageIndex,Dynamic, 1> VectorIx; typedef std::vector<std::list<StorageIndex> > VectorList; enum { UpLo = _UpLo }; enum { ColsAtCompileTime = Dynamic, MaxColsAtCompileTime = Dynamic }; public: /** Default constructor leaving the object in a partly non-initialized stage. * * You must call compute() or the pair analyzePattern()/factorize() to make it valid. * * \sa IncompleteCholesky(const MatrixType&) */ IncompleteCholesky() : m_initialShift(1e-3),m_factorizationIsOk(false) {} /** Constructor computing the incomplete factorization for the given matrix \a matrix. */ template<typename MatrixType> IncompleteCholesky(const MatrixType& matrix) : m_initialShift(1e-3),m_factorizationIsOk(false) { compute(matrix); } /** \returns number of rows of the factored matrix */ Index rows() const { return m_L.rows(); } /** \returns number of columns of the factored matrix */ Index cols() const { return m_L.cols(); } /** \brief Reports whether previous computation was successful. * * It triggers an assertion if \c *this has not been initialized through the respective constructor, * or a call to compute() or analyzePattern(). * * \returns \c Success if computation was successful, * \c NumericalIssue if the matrix appears to be negative. */ ComputationInfo info() const { eigen_assert(m_isInitialized && "IncompleteCholesky is not initialized."); return m_info; } /** \brief Set the initial shift parameter \f$ \sigma \f$. */ void setInitialShift(RealScalar shift) { m_initialShift = shift; } /** \brief Computes the fill reducing permutation vector using the sparsity pattern of \a mat */ template<typename MatrixType> void analyzePattern(const MatrixType& mat) { OrderingType ord; PermutationType pinv; ord(mat.template selfadjointView<UpLo>(), pinv); if(pinv.size()>0) m_perm = pinv.inverse(); else m_perm.resize(0); m_L.resize(mat.rows(), mat.cols()); m_analysisIsOk = true; m_isInitialized = true; m_info = Success; } /** \brief Performs the numerical factorization of the input matrix \a mat * * The method analyzePattern() or compute() must have been called beforehand * with a matrix having the same pattern. * * \sa compute(), analyzePattern() */ template<typename MatrixType> void factorize(const MatrixType& mat); /** Computes or re-computes the incomplete Cholesky factorization of the input matrix \a mat * * It is a shortcut for a sequential call to the analyzePattern() and factorize() methods. * * \sa analyzePattern(), factorize() */ template<typename MatrixType> void compute(const MatrixType& mat) { analyzePattern(mat); factorize(mat); } // internal template<typename Rhs, typename Dest> void _solve_impl(const Rhs& b, Dest& x) const { eigen_assert(m_factorizationIsOk && "factorize() should be called first"); if (m_perm.rows() == b.rows()) x = m_perm * b; else x = b; x = m_scale.asDiagonal() * x; x = m_L.template triangularView<Lower>().solve(x); x = m_L.adjoint().template triangularView<Upper>().solve(x); x = m_scale.asDiagonal() * x; if (m_perm.rows() == b.rows()) x = m_perm.inverse() * x; } /** \returns the sparse lower triangular factor L */ const FactorType& matrixL() const { eigen_assert("m_factorizationIsOk"); return m_L; } /** \returns a vector representing the scaling factor S */ const VectorRx& scalingS() const { eigen_assert("m_factorizationIsOk"); return m_scale; } /** \returns the fill-in reducing permutation P (can be empty for a natural ordering) */ const PermutationType& permutationP() const { eigen_assert("m_analysisIsOk"); return m_perm; } protected: FactorType m_L; // The lower part stored in CSC VectorRx m_scale; // The vector for scaling the matrix RealScalar m_initialShift; // The initial shift parameter bool m_analysisIsOk; bool m_factorizationIsOk; ComputationInfo m_info; PermutationType m_perm; private: inline void updateList(Ref<const VectorIx> colPtr, Ref<VectorIx> rowIdx, Ref<VectorSx> vals, const Index& col, const Index& jk, VectorIx& firstElt, VectorList& listCol); }; // Based on the following paper: // C-J. Lin and J. J. Moré, Incomplete Cholesky Factorizations with // Limited memory, SIAM J. Sci. Comput. 21(1), pp. 24-45, 1999 // http://ftp.mcs.anl.gov/pub/tech_reports/reports/P682.pdf template<typename Scalar, int _UpLo, typename OrderingType> template<typename _MatrixType> void IncompleteCholesky<Scalar,_UpLo, OrderingType>::factorize(const _MatrixType& mat) { using std::sqrt; eigen_assert(m_analysisIsOk && "analyzePattern() should be called first"); // Dropping strategy : Keep only the p largest elements per column, where p is the number of elements in the column of the original matrix. Other strategies will be added // Apply the fill-reducing permutation computed in analyzePattern() if (m_perm.rows() == mat.rows() ) // To detect the null permutation { // The temporary is needed to make sure that the diagonal entry is properly sorted FactorType tmp(mat.rows(), mat.cols()); tmp = mat.template selfadjointView<_UpLo>().twistedBy(m_perm); m_L.template selfadjointView<Lower>() = tmp.template selfadjointView<Lower>(); } else { m_L.template selfadjointView<Lower>() = mat.template selfadjointView<_UpLo>(); } Index n = m_L.cols(); Index nnz = m_L.nonZeros(); Map<VectorSx> vals(m_L.valuePtr(), nnz); //values Map<VectorIx> rowIdx(m_L.innerIndexPtr(), nnz); //Row indices Map<VectorIx> colPtr( m_L.outerIndexPtr(), n+1); // Pointer to the beginning of each row VectorIx firstElt(n-1); // for each j, points to the next entry in vals that will be used in the factorization VectorList listCol(n); // listCol(j) is a linked list of columns to update column j VectorSx col_vals(n); // Store a nonzero values in each column VectorIx col_irow(n); // Row indices of nonzero elements in each column VectorIx col_pattern(n); col_pattern.fill(-1); StorageIndex col_nnz; // Computes the scaling factors m_scale.resize(n); m_scale.setZero(); for (Index j = 0; j < n; j++) for (Index k = colPtr[j]; k < colPtr[j+1]; k++) { m_scale(j) += numext::abs2(vals(k)); if(rowIdx[k]!=j) m_scale(rowIdx[k]) += numext::abs2(vals(k)); } m_scale = m_scale.cwiseSqrt().cwiseSqrt(); for (Index j = 0; j < n; ++j) if(m_scale(j)>(std::numeric_limits<RealScalar>::min)()) m_scale(j) = RealScalar(1)/m_scale(j); else m_scale(j) = 1; // TODO disable scaling if not needed, i.e., if it is roughly uniform? (this will make solve() faster) // Scale and compute the shift for the matrix RealScalar mindiag = NumTraits<RealScalar>::highest(); for (Index j = 0; j < n; j++) { for (Index k = colPtr[j]; k < colPtr[j+1]; k++) vals[k] *= (m_scale(j)*m_scale(rowIdx[k])); eigen_internal_assert(rowIdx[colPtr[j]]==j && "IncompleteCholesky: only the lower triangular part must be stored"); mindiag = numext::mini(numext::real(vals[colPtr[j]]), mindiag); } FactorType L_save = m_L; RealScalar shift = 0; if(mindiag <= RealScalar(0.)) shift = m_initialShift - mindiag; m_info = NumericalIssue; // Try to perform the incomplete factorization using the current shift int iter = 0; do { // Apply the shift to the diagonal elements of the matrix for (Index j = 0; j < n; j++) vals[colPtr[j]] += shift; // jki version of the Cholesky factorization Index j=0; for (; j < n; ++j) { // Left-looking factorization of the j-th column // First, load the j-th column into col_vals Scalar diag = vals[colPtr[j]]; // It is assumed that only the lower part is stored col_nnz = 0; for (Index i = colPtr[j] + 1; i < colPtr[j+1]; i++) { StorageIndex l = rowIdx[i]; col_vals(col_nnz) = vals[i]; col_irow(col_nnz) = l; col_pattern(l) = col_nnz; col_nnz++; } { typename std::list<StorageIndex>::iterator k; // Browse all previous columns that will update column j for(k = listCol[j].begin(); k != listCol[j].end(); k++) { Index jk = firstElt(*k); // First element to use in the column eigen_internal_assert(rowIdx[jk]==j); Scalar v_j_jk = numext::conj(vals[jk]); jk += 1; for (Index i = jk; i < colPtr[*k+1]; i++) { StorageIndex l = rowIdx[i]; if(col_pattern[l]<0) { col_vals(col_nnz) = vals[i] * v_j_jk; col_irow[col_nnz] = l; col_pattern(l) = col_nnz; col_nnz++; } else col_vals(col_pattern[l]) -= vals[i] * v_j_jk; } updateList(colPtr,rowIdx,vals, *k, jk, firstElt, listCol); } } // Scale the current column if(numext::real(diag) <= 0) { if(++iter>=10) return; // increase shift shift = numext::maxi(m_initialShift,RealScalar(2)*shift); // restore m_L, col_pattern, and listCol vals = Map<const VectorSx>(L_save.valuePtr(), nnz); rowIdx = Map<const VectorIx>(L_save.innerIndexPtr(), nnz); colPtr = Map<const VectorIx>(L_save.outerIndexPtr(), n+1); col_pattern.fill(-1); for(Index i=0; i<n; ++i) listCol[i].clear(); break; } RealScalar rdiag = sqrt(numext::real(diag)); vals[colPtr[j]] = rdiag; for (Index k = 0; k<col_nnz; ++k) { Index i = col_irow[k]; //Scale col_vals(k) /= rdiag; //Update the remaining diagonals with col_vals vals[colPtr[i]] -= numext::abs2(col_vals(k)); } // Select the largest p elements // p is the original number of elements in the column (without the diagonal) Index p = colPtr[j+1] - colPtr[j] - 1 ; Ref<VectorSx> cvals = col_vals.head(col_nnz); Ref<VectorIx> cirow = col_irow.head(col_nnz); internal::QuickSplit(cvals,cirow, p); // Insert the largest p elements in the matrix Index cpt = 0; for (Index i = colPtr[j]+1; i < colPtr[j+1]; i++) { vals[i] = col_vals(cpt); rowIdx[i] = col_irow(cpt); // restore col_pattern: col_pattern(col_irow(cpt)) = -1; cpt++; } // Get the first smallest row index and put it after the diagonal element Index jk = colPtr(j)+1; updateList(colPtr,rowIdx,vals,j,jk,firstElt,listCol); } if(j==n) { m_factorizationIsOk = true; m_info = Success; } } while(m_info!=Success); } template<typename Scalar, int _UpLo, typename OrderingType> inline void IncompleteCholesky<Scalar,_UpLo, OrderingType>::updateList(Ref<const VectorIx> colPtr, Ref<VectorIx> rowIdx, Ref<VectorSx> vals, const Index& col, const Index& jk, VectorIx& firstElt, VectorList& listCol) { if (jk < colPtr(col+1) ) { Index p = colPtr(col+1) - jk; Index minpos; rowIdx.segment(jk,p).minCoeff(&minpos); minpos += jk; if (rowIdx(minpos) != rowIdx(jk)) { //Swap std::swap(rowIdx(jk),rowIdx(minpos)); std::swap(vals(jk),vals(minpos)); } firstElt(col) = internal::convert_index<StorageIndex,Index>(jk); listCol[rowIdx(jk)].push_back(internal::convert_index<StorageIndex,Index>(col)); } } } // end namespace Eigen #endif
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
.h
7,253
229
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_BICGSTAB_H #define EIGEN_BICGSTAB_H namespace Eigen { namespace internal { /** \internal Low-level bi conjugate gradient stabilized algorithm * \param mat The matrix A * \param rhs The right hand side vector b * \param x On input and initial solution, on output the computed solution. * \param precond A preconditioner being able to efficiently solve for an * approximation of Ax=b (regardless of b) * \param iters On input the max number of iteration, on output the number of performed iterations. * \param tol_error On input the tolerance error, on output an estimation of the relative error. * \return false in the case of numerical issue, for example a break down of BiCGSTAB. */ template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner> bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x, const Preconditioner& precond, Index& iters, typename Dest::RealScalar& tol_error) { using std::sqrt; using std::abs; typedef typename Dest::RealScalar RealScalar; typedef typename Dest::Scalar Scalar; typedef Matrix<Scalar,Dynamic,1> VectorType; RealScalar tol = tol_error; Index maxIters = iters; Index n = mat.cols(); VectorType r = rhs - mat * x; VectorType r0 = r; RealScalar r0_sqnorm = r0.squaredNorm(); RealScalar rhs_sqnorm = rhs.squaredNorm(); if(rhs_sqnorm == 0) { x.setZero(); return true; } Scalar rho = 1; Scalar alpha = 1; Scalar w = 1; VectorType v = VectorType::Zero(n), p = VectorType::Zero(n); VectorType y(n), z(n); VectorType kt(n), ks(n); VectorType s(n), t(n); RealScalar tol2 = tol*tol*rhs_sqnorm; RealScalar eps2 = NumTraits<Scalar>::epsilon()*NumTraits<Scalar>::epsilon(); Index i = 0; Index restarts = 0; while ( r.squaredNorm() > tol2 && i<maxIters ) { Scalar rho_old = rho; rho = r0.dot(r); if (abs(rho) < eps2*r0_sqnorm) { // The new residual vector became too orthogonal to the arbitrarily chosen direction r0 // Let's restart with a new r0: r = rhs - mat * x; r0 = r; rho = r0_sqnorm = r.squaredNorm(); if(restarts++ == 0) i = 0; } Scalar beta = (rho/rho_old) * (alpha / w); p = r + beta * (p - w * v); y = precond.solve(p); v.noalias() = mat * y; alpha = rho / r0.dot(v); s = r - alpha * v; z = precond.solve(s); t.noalias() = mat * z; RealScalar tmp = t.squaredNorm(); if(tmp>RealScalar(0)) w = t.dot(s) / tmp; else w = Scalar(0); x += alpha * y + w * z; r = s - w * t; ++i; } tol_error = sqrt(r.squaredNorm()/rhs_sqnorm); iters = i; return true; } } template< typename _MatrixType, typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> > class BiCGSTAB; namespace internal { template< typename _MatrixType, typename _Preconditioner> struct traits<BiCGSTAB<_MatrixType,_Preconditioner> > { typedef _MatrixType MatrixType; typedef _Preconditioner Preconditioner; }; } /** \ingroup IterativeLinearSolvers_Module * \brief A bi conjugate gradient stabilized solver for sparse square problems * * This class allows to solve for A.x = b sparse linear problems using a bi conjugate gradient * stabilized algorithm. The vectors x and b can be either dense or sparse. * * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix. * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner * * \implsparsesolverconcept * * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations() * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations * and NumTraits<Scalar>::epsilon() for the tolerance. * * The tolerance corresponds to the relative residual error: |Ax-b|/|b| * * \b Performance: when using sparse matrices, best performance is achied for a row-major sparse matrix format. * Moreover, in this case multi-threading can be exploited if the user code is compiled with OpenMP enabled. * See \ref TopicMultiThreading for details. * * This class can be used as the direct solver classes. Here is a typical usage example: * \include BiCGSTAB_simple.cpp * * By default the iterations start with x=0 as an initial guess of the solution. * One can control the start using the solveWithGuess() method. * * BiCGSTAB can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink. * * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner */ template< typename _MatrixType, typename _Preconditioner> class BiCGSTAB : public IterativeSolverBase<BiCGSTAB<_MatrixType,_Preconditioner> > { typedef IterativeSolverBase<BiCGSTAB> Base; using Base::matrix; using Base::m_error; using Base::m_iterations; using Base::m_info; using Base::m_isInitialized; public: typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef _Preconditioner Preconditioner; public: /** Default constructor. */ BiCGSTAB() : Base() {} /** Initialize the solver with matrix \a A for further \c Ax=b solving. * * This constructor is a shortcut for the default constructor followed * by a call to compute(). * * \warning this class stores a reference to the matrix A as well as some * precomputed values that depend on it. Therefore, if \a A is changed * this class becomes invalid. Call compute() to update it with the new * matrix A, or modify a copy of A. */ template<typename MatrixDerived> explicit BiCGSTAB(const EigenBase<MatrixDerived>& A) : Base(A.derived()) {} ~BiCGSTAB() {} /** \internal */ template<typename Rhs,typename Dest> void _solve_with_guess_impl(const Rhs& b, Dest& x) const { bool failed = false; for(Index j=0; j<b.cols(); ++j) { m_iterations = Base::maxIterations(); m_error = Base::m_tolerance; typename Dest::ColXpr xj(x,j); if(!internal::bicgstab(matrix(), b.col(j), xj, Base::m_preconditioner, m_iterations, m_error)) failed = true; } m_info = failed ? NumericalIssue : m_error <= Base::m_tolerance ? Success : NoConvergence; m_isInitialized = true; } /** \internal */ using Base::_solve_impl; template<typename Rhs,typename Dest> void _solve_impl(const MatrixBase<Rhs>& b, Dest& x) const { x.resize(this->rows(),b.cols()); x.setZero(); _solve_with_guess_impl(b,x); } protected: }; } // end namespace Eigen #endif // EIGEN_BICGSTAB_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
.h
9,289
247
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CONJUGATE_GRADIENT_H #define EIGEN_CONJUGATE_GRADIENT_H namespace Eigen { namespace internal { /** \internal Low-level conjugate gradient algorithm * \param mat The matrix A * \param rhs The right hand side vector b * \param x On input and initial solution, on output the computed solution. * \param precond A preconditioner being able to efficiently solve for an * approximation of Ax=b (regardless of b) * \param iters On input the max number of iteration, on output the number of performed iterations. * \param tol_error On input the tolerance error, on output an estimation of the relative error. */ template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner> EIGEN_DONT_INLINE void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x, const Preconditioner& precond, Index& iters, typename Dest::RealScalar& tol_error) { using std::sqrt; using std::abs; typedef typename Dest::RealScalar RealScalar; typedef typename Dest::Scalar Scalar; typedef Matrix<Scalar,Dynamic,1> VectorType; RealScalar tol = tol_error; Index maxIters = iters; Index n = mat.cols(); VectorType residual = rhs - mat * x; //initial residual RealScalar rhsNorm2 = rhs.squaredNorm(); if(rhsNorm2 == 0) { x.setZero(); iters = 0; tol_error = 0; return; } const RealScalar considerAsZero = (std::numeric_limits<RealScalar>::min)(); RealScalar threshold = numext::maxi(tol*tol*rhsNorm2,considerAsZero); RealScalar residualNorm2 = residual.squaredNorm(); if (residualNorm2 < threshold) { iters = 0; tol_error = sqrt(residualNorm2 / rhsNorm2); return; } VectorType p(n); p = precond.solve(residual); // initial search direction VectorType z(n), tmp(n); RealScalar absNew = numext::real(residual.dot(p)); // the square of the absolute value of r scaled by invM Index i = 0; while(i < maxIters) { tmp.noalias() = mat * p; // the bottleneck of the algorithm Scalar alpha = absNew / p.dot(tmp); // the amount we travel on dir x += alpha * p; // update solution residual -= alpha * tmp; // update residual residualNorm2 = residual.squaredNorm(); if(residualNorm2 < threshold) break; z = precond.solve(residual); // approximately solve for "A z = residual" RealScalar absOld = absNew; absNew = numext::real(residual.dot(z)); // update the absolute value of r RealScalar beta = absNew / absOld; // calculate the Gram-Schmidt value used to create the new search direction p = z + beta * p; // update search direction i++; } tol_error = sqrt(residualNorm2 / rhsNorm2); iters = i; } } template< typename _MatrixType, int _UpLo=Lower, typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> > class ConjugateGradient; namespace internal { template< typename _MatrixType, int _UpLo, typename _Preconditioner> struct traits<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> > { typedef _MatrixType MatrixType; typedef _Preconditioner Preconditioner; }; } /** \ingroup IterativeLinearSolvers_Module * \brief A conjugate gradient solver for sparse (or dense) self-adjoint problems * * This class allows to solve for A.x = b linear problems using an iterative conjugate gradient algorithm. * The matrix A must be selfadjoint. The matrix A and the vectors x and b can be either dense or sparse. * * \tparam _MatrixType the type of the matrix A, can be a dense or a sparse matrix. * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower, * \c Upper, or \c Lower|Upper in which the full matrix entries will be considered. * Default is \c Lower, best performance is \c Lower|Upper. * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner * * \implsparsesolverconcept * * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations() * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations * and NumTraits<Scalar>::epsilon() for the tolerance. * * The tolerance corresponds to the relative residual error: |Ax-b|/|b| * * \b Performance: Even though the default value of \c _UpLo is \c Lower, significantly higher performance is * achieved when using a complete matrix and \b Lower|Upper as the \a _UpLo template parameter. Moreover, in this * case multi-threading can be exploited if the user code is compiled with OpenMP enabled. * See \ref TopicMultiThreading for details. * * This class can be used as the direct solver classes. Here is a typical usage example: \code int n = 10000; VectorXd x(n), b(n); SparseMatrix<double> A(n,n); // fill A and b ConjugateGradient<SparseMatrix<double>, Lower|Upper> cg; cg.compute(A); x = cg.solve(b); std::cout << "#iterations: " << cg.iterations() << std::endl; std::cout << "estimated error: " << cg.error() << std::endl; // update b, and solve again x = cg.solve(b); \endcode * * By default the iterations start with x=0 as an initial guess of the solution. * One can control the start using the solveWithGuess() method. * * ConjugateGradient can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink. * * \sa class LeastSquaresConjugateGradient, class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner */ template< typename _MatrixType, int _UpLo, typename _Preconditioner> class ConjugateGradient : public IterativeSolverBase<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> > { typedef IterativeSolverBase<ConjugateGradient> Base; using Base::matrix; using Base::m_error; using Base::m_iterations; using Base::m_info; using Base::m_isInitialized; public: typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef _Preconditioner Preconditioner; enum { UpLo = _UpLo }; public: /** Default constructor. */ ConjugateGradient() : Base() {} /** Initialize the solver with matrix \a A for further \c Ax=b solving. * * This constructor is a shortcut for the default constructor followed * by a call to compute(). * * \warning this class stores a reference to the matrix A as well as some * precomputed values that depend on it. Therefore, if \a A is changed * this class becomes invalid. Call compute() to update it with the new * matrix A, or modify a copy of A. */ template<typename MatrixDerived> explicit ConjugateGradient(const EigenBase<MatrixDerived>& A) : Base(A.derived()) {} ~ConjugateGradient() {} /** \internal */ template<typename Rhs,typename Dest> void _solve_with_guess_impl(const Rhs& b, Dest& x) const { typedef typename Base::MatrixWrapper MatrixWrapper; typedef typename Base::ActualMatrixType ActualMatrixType; enum { TransposeInput = (!MatrixWrapper::MatrixFree) && (UpLo==(Lower|Upper)) && (!MatrixType::IsRowMajor) && (!NumTraits<Scalar>::IsComplex) }; typedef typename internal::conditional<TransposeInput,Transpose<const ActualMatrixType>, ActualMatrixType const&>::type RowMajorWrapper; EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(MatrixWrapper::MatrixFree,UpLo==(Lower|Upper)),MATRIX_FREE_CONJUGATE_GRADIENT_IS_COMPATIBLE_WITH_UPPER_UNION_LOWER_MODE_ONLY); typedef typename internal::conditional<UpLo==(Lower|Upper), RowMajorWrapper, typename MatrixWrapper::template ConstSelfAdjointViewReturnType<UpLo>::Type >::type SelfAdjointWrapper; m_iterations = Base::maxIterations(); m_error = Base::m_tolerance; for(Index j=0; j<b.cols(); ++j) { m_iterations = Base::maxIterations(); m_error = Base::m_tolerance; typename Dest::ColXpr xj(x,j); RowMajorWrapper row_mat(matrix()); internal::conjugate_gradient(SelfAdjointWrapper(row_mat), b.col(j), xj, Base::m_preconditioner, m_iterations, m_error); } m_isInitialized = true; m_info = m_error <= Base::m_tolerance ? Success : NoConvergence; } /** \internal */ using Base::_solve_impl; template<typename Rhs,typename Dest> void _solve_impl(const MatrixBase<Rhs>& b, Dest& x) const { x.setZero(); _solve_with_guess_impl(b.derived(),x); } protected: }; } // end namespace Eigen #endif // EIGEN_CONJUGATE_GRADIENT_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h
.h
4,158
116
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SOLVEWITHGUESS_H #define EIGEN_SOLVEWITHGUESS_H namespace Eigen { template<typename Decomposition, typename RhsType, typename GuessType> class SolveWithGuess; /** \class SolveWithGuess * \ingroup IterativeLinearSolvers_Module * * \brief Pseudo expression representing a solving operation * * \tparam Decomposition the type of the matrix or decomposion object * \tparam Rhstype the type of the right-hand side * * This class represents an expression of A.solve(B) * and most of the time this is the only way it is used. * */ namespace internal { template<typename Decomposition, typename RhsType, typename GuessType> struct traits<SolveWithGuess<Decomposition, RhsType, GuessType> > : traits<Solve<Decomposition,RhsType> > {}; } template<typename Decomposition, typename RhsType, typename GuessType> class SolveWithGuess : public internal::generic_xpr_base<SolveWithGuess<Decomposition,RhsType,GuessType>, MatrixXpr, typename internal::traits<RhsType>::StorageKind>::type { public: typedef typename internal::traits<SolveWithGuess>::Scalar Scalar; typedef typename internal::traits<SolveWithGuess>::PlainObject PlainObject; typedef typename internal::generic_xpr_base<SolveWithGuess<Decomposition,RhsType,GuessType>, MatrixXpr, typename internal::traits<RhsType>::StorageKind>::type Base; typedef typename internal::ref_selector<SolveWithGuess>::type Nested; SolveWithGuess(const Decomposition &dec, const RhsType &rhs, const GuessType &guess) : m_dec(dec), m_rhs(rhs), m_guess(guess) {} EIGEN_DEVICE_FUNC Index rows() const { return m_dec.cols(); } EIGEN_DEVICE_FUNC Index cols() const { return m_rhs.cols(); } EIGEN_DEVICE_FUNC const Decomposition& dec() const { return m_dec; } EIGEN_DEVICE_FUNC const RhsType& rhs() const { return m_rhs; } EIGEN_DEVICE_FUNC const GuessType& guess() const { return m_guess; } protected: const Decomposition &m_dec; const RhsType &m_rhs; const GuessType &m_guess; private: Scalar coeff(Index row, Index col) const; Scalar coeff(Index i) const; }; namespace internal { // Evaluator of SolveWithGuess -> eval into a temporary template<typename Decomposition, typename RhsType, typename GuessType> struct evaluator<SolveWithGuess<Decomposition,RhsType, GuessType> > : public evaluator<typename SolveWithGuess<Decomposition,RhsType,GuessType>::PlainObject> { typedef SolveWithGuess<Decomposition,RhsType,GuessType> SolveType; typedef typename SolveType::PlainObject PlainObject; typedef evaluator<PlainObject> Base; evaluator(const SolveType& solve) : m_result(solve.rows(), solve.cols()) { ::new (static_cast<Base*>(this)) Base(m_result); m_result = solve.guess(); solve.dec()._solve_with_guess_impl(solve.rhs(), m_result); } protected: PlainObject m_result; }; // Specialization for "dst = dec.solveWithGuess(rhs)" // NOTE we need to specialize it for Dense2Dense to avoid ambiguous specialization error and a Sparse2Sparse specialization must exist somewhere template<typename DstXprType, typename DecType, typename RhsType, typename GuessType, typename Scalar> struct Assignment<DstXprType, SolveWithGuess<DecType,RhsType,GuessType>, internal::assign_op<Scalar,Scalar>, Dense2Dense> { typedef SolveWithGuess<DecType,RhsType,GuessType> SrcXprType; static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &) { Index dstRows = src.rows(); Index dstCols = src.cols(); if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) dst.resize(dstRows, dstCols); dst = src.guess(); src.dec()._solve_with_guess_impl(src.rhs(), dst/*, src.guess()*/); } }; } // end namepsace internal } // end namespace Eigen #endif // EIGEN_SOLVEWITHGUESS_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h
.h
6,755
227
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_BASIC_PRECONDITIONERS_H #define EIGEN_BASIC_PRECONDITIONERS_H namespace Eigen { /** \ingroup IterativeLinearSolvers_Module * \brief A preconditioner based on the digonal entries * * This class allows to approximately solve for A.x = b problems assuming A is a diagonal matrix. * In other words, this preconditioner neglects all off diagonal entries and, in Eigen's language, solves for: \code A.diagonal().asDiagonal() . x = b \endcode * * \tparam _Scalar the type of the scalar. * * \implsparsesolverconcept * * This preconditioner is suitable for both selfadjoint and general problems. * The diagonal entries are pre-inverted and stored into a dense vector. * * \note A variant that has yet to be implemented would attempt to preserve the norm of each column. * * \sa class LeastSquareDiagonalPreconditioner, class ConjugateGradient */ template <typename _Scalar> class DiagonalPreconditioner { typedef _Scalar Scalar; typedef Matrix<Scalar,Dynamic,1> Vector; public: typedef typename Vector::StorageIndex StorageIndex; enum { ColsAtCompileTime = Dynamic, MaxColsAtCompileTime = Dynamic }; DiagonalPreconditioner() : m_isInitialized(false) {} template<typename MatType> explicit DiagonalPreconditioner(const MatType& mat) : m_invdiag(mat.cols()) { compute(mat); } Index rows() const { return m_invdiag.size(); } Index cols() const { return m_invdiag.size(); } template<typename MatType> DiagonalPreconditioner& analyzePattern(const MatType& ) { return *this; } template<typename MatType> DiagonalPreconditioner& factorize(const MatType& mat) { m_invdiag.resize(mat.cols()); for(int j=0; j<mat.outerSize(); ++j) { typename MatType::InnerIterator it(mat,j); while(it && it.index()!=j) ++it; if(it && it.index()==j && it.value()!=Scalar(0)) m_invdiag(j) = Scalar(1)/it.value(); else m_invdiag(j) = Scalar(1); } m_isInitialized = true; return *this; } template<typename MatType> DiagonalPreconditioner& compute(const MatType& mat) { return factorize(mat); } /** \internal */ template<typename Rhs, typename Dest> void _solve_impl(const Rhs& b, Dest& x) const { x = m_invdiag.array() * b.array() ; } template<typename Rhs> inline const Solve<DiagonalPreconditioner, Rhs> solve(const MatrixBase<Rhs>& b) const { eigen_assert(m_isInitialized && "DiagonalPreconditioner is not initialized."); eigen_assert(m_invdiag.size()==b.rows() && "DiagonalPreconditioner::solve(): invalid number of rows of the right hand side matrix b"); return Solve<DiagonalPreconditioner, Rhs>(*this, b.derived()); } ComputationInfo info() { return Success; } protected: Vector m_invdiag; bool m_isInitialized; }; /** \ingroup IterativeLinearSolvers_Module * \brief Jacobi preconditioner for LeastSquaresConjugateGradient * * This class allows to approximately solve for A' A x = A' b problems assuming A' A is a diagonal matrix. * In other words, this preconditioner neglects all off diagonal entries and, in Eigen's language, solves for: \code (A.adjoint() * A).diagonal().asDiagonal() * x = b \endcode * * \tparam _Scalar the type of the scalar. * * \implsparsesolverconcept * * The diagonal entries are pre-inverted and stored into a dense vector. * * \sa class LeastSquaresConjugateGradient, class DiagonalPreconditioner */ template <typename _Scalar> class LeastSquareDiagonalPreconditioner : public DiagonalPreconditioner<_Scalar> { typedef _Scalar Scalar; typedef typename NumTraits<Scalar>::Real RealScalar; typedef DiagonalPreconditioner<_Scalar> Base; using Base::m_invdiag; public: LeastSquareDiagonalPreconditioner() : Base() {} template<typename MatType> explicit LeastSquareDiagonalPreconditioner(const MatType& mat) : Base() { compute(mat); } template<typename MatType> LeastSquareDiagonalPreconditioner& analyzePattern(const MatType& ) { return *this; } template<typename MatType> LeastSquareDiagonalPreconditioner& factorize(const MatType& mat) { // Compute the inverse squared-norm of each column of mat m_invdiag.resize(mat.cols()); if(MatType::IsRowMajor) { m_invdiag.setZero(); for(Index j=0; j<mat.outerSize(); ++j) { for(typename MatType::InnerIterator it(mat,j); it; ++it) m_invdiag(it.index()) += numext::abs2(it.value()); } for(Index j=0; j<mat.cols(); ++j) if(numext::real(m_invdiag(j))>RealScalar(0)) m_invdiag(j) = RealScalar(1)/numext::real(m_invdiag(j)); } else { for(Index j=0; j<mat.outerSize(); ++j) { RealScalar sum = mat.col(j).squaredNorm(); if(sum>RealScalar(0)) m_invdiag(j) = RealScalar(1)/sum; else m_invdiag(j) = RealScalar(1); } } Base::m_isInitialized = true; return *this; } template<typename MatType> LeastSquareDiagonalPreconditioner& compute(const MatType& mat) { return factorize(mat); } ComputationInfo info() { return Success; } protected: }; /** \ingroup IterativeLinearSolvers_Module * \brief A naive preconditioner which approximates any matrix as the identity matrix * * \implsparsesolverconcept * * \sa class DiagonalPreconditioner */ class IdentityPreconditioner { public: IdentityPreconditioner() {} template<typename MatrixType> explicit IdentityPreconditioner(const MatrixType& ) {} template<typename MatrixType> IdentityPreconditioner& analyzePattern(const MatrixType& ) { return *this; } template<typename MatrixType> IdentityPreconditioner& factorize(const MatrixType& ) { return *this; } template<typename MatrixType> IdentityPreconditioner& compute(const MatrixType& ) { return *this; } template<typename Rhs> inline const Rhs& solve(const Rhs& b) const { return b; } ComputationInfo info() { return Success; } }; } // end namespace Eigen #endif // EIGEN_BASIC_PRECONDITIONERS_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
.h
11,527
395
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ITERATIVE_SOLVER_BASE_H #define EIGEN_ITERATIVE_SOLVER_BASE_H namespace Eigen { namespace internal { template<typename MatrixType> struct is_ref_compatible_impl { private: template <typename T0> struct any_conversion { template <typename T> any_conversion(const volatile T&); template <typename T> any_conversion(T&); }; struct yes {int a[1];}; struct no {int a[2];}; template<typename T> static yes test(const Ref<const T>&, int); template<typename T> static no test(any_conversion<T>, ...); public: static MatrixType ms_from; enum { value = sizeof(test<MatrixType>(ms_from, 0))==sizeof(yes) }; }; template<typename MatrixType> struct is_ref_compatible { enum { value = is_ref_compatible_impl<typename remove_all<MatrixType>::type>::value }; }; template<typename MatrixType, bool MatrixFree = !internal::is_ref_compatible<MatrixType>::value> class generic_matrix_wrapper; // We have an explicit matrix at hand, compatible with Ref<> template<typename MatrixType> class generic_matrix_wrapper<MatrixType,false> { public: typedef Ref<const MatrixType> ActualMatrixType; template<int UpLo> struct ConstSelfAdjointViewReturnType { typedef typename ActualMatrixType::template ConstSelfAdjointViewReturnType<UpLo>::Type Type; }; enum { MatrixFree = false }; generic_matrix_wrapper() : m_dummy(0,0), m_matrix(m_dummy) {} template<typename InputType> generic_matrix_wrapper(const InputType &mat) : m_matrix(mat) {} const ActualMatrixType& matrix() const { return m_matrix; } template<typename MatrixDerived> void grab(const EigenBase<MatrixDerived> &mat) { m_matrix.~Ref<const MatrixType>(); ::new (&m_matrix) Ref<const MatrixType>(mat.derived()); } void grab(const Ref<const MatrixType> &mat) { if(&(mat.derived()) != &m_matrix) { m_matrix.~Ref<const MatrixType>(); ::new (&m_matrix) Ref<const MatrixType>(mat); } } protected: MatrixType m_dummy; // used to default initialize the Ref<> object ActualMatrixType m_matrix; }; // MatrixType is not compatible with Ref<> -> matrix-free wrapper template<typename MatrixType> class generic_matrix_wrapper<MatrixType,true> { public: typedef MatrixType ActualMatrixType; template<int UpLo> struct ConstSelfAdjointViewReturnType { typedef ActualMatrixType Type; }; enum { MatrixFree = true }; generic_matrix_wrapper() : mp_matrix(0) {} generic_matrix_wrapper(const MatrixType &mat) : mp_matrix(&mat) {} const ActualMatrixType& matrix() const { return *mp_matrix; } void grab(const MatrixType &mat) { mp_matrix = &mat; } protected: const ActualMatrixType *mp_matrix; }; } /** \ingroup IterativeLinearSolvers_Module * \brief Base class for linear iterative solvers * * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner */ template< typename Derived> class IterativeSolverBase : public SparseSolverBase<Derived> { protected: typedef SparseSolverBase<Derived> Base; using Base::m_isInitialized; public: typedef typename internal::traits<Derived>::MatrixType MatrixType; typedef typename internal::traits<Derived>::Preconditioner Preconditioner; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::StorageIndex StorageIndex; typedef typename MatrixType::RealScalar RealScalar; enum { ColsAtCompileTime = MatrixType::ColsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime }; public: using Base::derived; /** Default constructor. */ IterativeSolverBase() { init(); } /** Initialize the solver with matrix \a A for further \c Ax=b solving. * * This constructor is a shortcut for the default constructor followed * by a call to compute(). * * \warning this class stores a reference to the matrix A as well as some * precomputed values that depend on it. Therefore, if \a A is changed * this class becomes invalid. Call compute() to update it with the new * matrix A, or modify a copy of A. */ template<typename MatrixDerived> explicit IterativeSolverBase(const EigenBase<MatrixDerived>& A) : m_matrixWrapper(A.derived()) { init(); compute(matrix()); } ~IterativeSolverBase() {} /** Initializes the iterative solver for the sparsity pattern of the matrix \a A for further solving \c Ax=b problems. * * Currently, this function mostly calls analyzePattern on the preconditioner. In the future * we might, for instance, implement column reordering for faster matrix vector products. */ template<typename MatrixDerived> Derived& analyzePattern(const EigenBase<MatrixDerived>& A) { grab(A.derived()); m_preconditioner.analyzePattern(matrix()); m_isInitialized = true; m_analysisIsOk = true; m_info = m_preconditioner.info(); return derived(); } /** Initializes the iterative solver with the numerical values of the matrix \a A for further solving \c Ax=b problems. * * Currently, this function mostly calls factorize on the preconditioner. * * \warning this class stores a reference to the matrix A as well as some * precomputed values that depend on it. Therefore, if \a A is changed * this class becomes invalid. Call compute() to update it with the new * matrix A, or modify a copy of A. */ template<typename MatrixDerived> Derived& factorize(const EigenBase<MatrixDerived>& A) { eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); grab(A.derived()); m_preconditioner.factorize(matrix()); m_factorizationIsOk = true; m_info = m_preconditioner.info(); return derived(); } /** Initializes the iterative solver with the matrix \a A for further solving \c Ax=b problems. * * Currently, this function mostly initializes/computes the preconditioner. In the future * we might, for instance, implement column reordering for faster matrix vector products. * * \warning this class stores a reference to the matrix A as well as some * precomputed values that depend on it. Therefore, if \a A is changed * this class becomes invalid. Call compute() to update it with the new * matrix A, or modify a copy of A. */ template<typename MatrixDerived> Derived& compute(const EigenBase<MatrixDerived>& A) { grab(A.derived()); m_preconditioner.compute(matrix()); m_isInitialized = true; m_analysisIsOk = true; m_factorizationIsOk = true; m_info = m_preconditioner.info(); return derived(); } /** \internal */ Index rows() const { return matrix().rows(); } /** \internal */ Index cols() const { return matrix().cols(); } /** \returns the tolerance threshold used by the stopping criteria. * \sa setTolerance() */ RealScalar tolerance() const { return m_tolerance; } /** Sets the tolerance threshold used by the stopping criteria. * * This value is used as an upper bound to the relative residual error: |Ax-b|/|b|. * The default value is the machine precision given by NumTraits<Scalar>::epsilon() */ Derived& setTolerance(const RealScalar& tolerance) { m_tolerance = tolerance; return derived(); } /** \returns a read-write reference to the preconditioner for custom configuration. */ Preconditioner& preconditioner() { return m_preconditioner; } /** \returns a read-only reference to the preconditioner. */ const Preconditioner& preconditioner() const { return m_preconditioner; } /** \returns the max number of iterations. * It is either the value setted by setMaxIterations or, by default, * twice the number of columns of the matrix. */ Index maxIterations() const { return (m_maxIterations<0) ? 2*matrix().cols() : m_maxIterations; } /** Sets the max number of iterations. * Default is twice the number of columns of the matrix. */ Derived& setMaxIterations(Index maxIters) { m_maxIterations = maxIters; return derived(); } /** \returns the number of iterations performed during the last solve */ Index iterations() const { eigen_assert(m_isInitialized && "ConjugateGradient is not initialized."); return m_iterations; } /** \returns the tolerance error reached during the last solve. * It is a close approximation of the true relative residual error |Ax-b|/|b|. */ RealScalar error() const { eigen_assert(m_isInitialized && "ConjugateGradient is not initialized."); return m_error; } /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A * and \a x0 as an initial solution. * * \sa solve(), compute() */ template<typename Rhs,typename Guess> inline const SolveWithGuess<Derived, Rhs, Guess> solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const { eigen_assert(m_isInitialized && "Solver is not initialized."); eigen_assert(derived().rows()==b.rows() && "solve(): invalid number of rows of the right hand side matrix b"); return SolveWithGuess<Derived, Rhs, Guess>(derived(), b.derived(), x0); } /** \returns Success if the iterations converged, and NoConvergence otherwise. */ ComputationInfo info() const { eigen_assert(m_isInitialized && "IterativeSolverBase is not initialized."); return m_info; } /** \internal */ template<typename Rhs, typename DestDerived> void _solve_impl(const Rhs& b, SparseMatrixBase<DestDerived> &aDest) const { eigen_assert(rows()==b.rows()); Index rhsCols = b.cols(); Index size = b.rows(); DestDerived& dest(aDest.derived()); typedef typename DestDerived::Scalar DestScalar; Eigen::Matrix<DestScalar,Dynamic,1> tb(size); Eigen::Matrix<DestScalar,Dynamic,1> tx(cols()); // We do not directly fill dest because sparse expressions have to be free of aliasing issue. // For non square least-square problems, b and dest might not have the same size whereas they might alias each-other. typename DestDerived::PlainObject tmp(cols(),rhsCols); for(Index k=0; k<rhsCols; ++k) { tb = b.col(k); tx = derived().solve(tb); tmp.col(k) = tx.sparseView(0); } dest.swap(tmp); } protected: void init() { m_isInitialized = false; m_analysisIsOk = false; m_factorizationIsOk = false; m_maxIterations = -1; m_tolerance = NumTraits<Scalar>::epsilon(); } typedef internal::generic_matrix_wrapper<MatrixType> MatrixWrapper; typedef typename MatrixWrapper::ActualMatrixType ActualMatrixType; const ActualMatrixType& matrix() const { return m_matrixWrapper.matrix(); } template<typename InputType> void grab(const InputType &A) { m_matrixWrapper.grab(A); } MatrixWrapper m_matrixWrapper; Preconditioner m_preconditioner; Index m_maxIterations; RealScalar m_tolerance; mutable RealScalar m_error; mutable Index m_iterations; mutable ComputationInfo m_info; mutable bool m_analysisIsOk, m_factorizationIsOk; }; } // end namespace Eigen #endif // EIGEN_ITERATIVE_SOLVER_BASE_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/SparseCholesky/SimplicialCholesky.h
.h
24,017
690
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2012 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SIMPLICIAL_CHOLESKY_H #define EIGEN_SIMPLICIAL_CHOLESKY_H namespace Eigen { enum SimplicialCholeskyMode { SimplicialCholeskyLLT, SimplicialCholeskyLDLT }; namespace internal { template<typename CholMatrixType, typename InputMatrixType> struct simplicial_cholesky_grab_input { typedef CholMatrixType const * ConstCholMatrixPtr; static void run(const InputMatrixType& input, ConstCholMatrixPtr &pmat, CholMatrixType &tmp) { tmp = input; pmat = &tmp; } }; template<typename MatrixType> struct simplicial_cholesky_grab_input<MatrixType,MatrixType> { typedef MatrixType const * ConstMatrixPtr; static void run(const MatrixType& input, ConstMatrixPtr &pmat, MatrixType &/*tmp*/) { pmat = &input; } }; } // end namespace internal /** \ingroup SparseCholesky_Module * \brief A base class for direct sparse Cholesky factorizations * * This is a base class for LL^T and LDL^T Cholesky factorizations of sparse matrices that are * selfadjoint and positive definite. These factorizations allow for solving A.X = B where * X and B can be either dense or sparse. * * In order to reduce the fill-in, a symmetric permutation P is applied prior to the factorization * such that the factorized matrix is P A P^-1. * * \tparam Derived the type of the derived class, that is the actual factorization type. * */ template<typename Derived> class SimplicialCholeskyBase : public SparseSolverBase<Derived> { typedef SparseSolverBase<Derived> Base; using Base::m_isInitialized; public: typedef typename internal::traits<Derived>::MatrixType MatrixType; typedef typename internal::traits<Derived>::OrderingType OrderingType; enum { UpLo = internal::traits<Derived>::UpLo }; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::StorageIndex StorageIndex; typedef SparseMatrix<Scalar,ColMajor,StorageIndex> CholMatrixType; typedef CholMatrixType const * ConstCholMatrixPtr; typedef Matrix<Scalar,Dynamic,1> VectorType; typedef Matrix<StorageIndex,Dynamic,1> VectorI; enum { ColsAtCompileTime = MatrixType::ColsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime }; public: using Base::derived; /** Default constructor */ SimplicialCholeskyBase() : m_info(Success), m_shiftOffset(0), m_shiftScale(1) {} explicit SimplicialCholeskyBase(const MatrixType& matrix) : m_info(Success), m_shiftOffset(0), m_shiftScale(1) { derived().compute(matrix); } ~SimplicialCholeskyBase() { } Derived& derived() { return *static_cast<Derived*>(this); } const Derived& derived() const { return *static_cast<const Derived*>(this); } inline Index cols() const { return m_matrix.cols(); } inline Index rows() const { return m_matrix.rows(); } /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was succesful, * \c NumericalIssue if the matrix.appears to be negative. */ ComputationInfo info() const { eigen_assert(m_isInitialized && "Decomposition is not initialized."); return m_info; } /** \returns the permutation P * \sa permutationPinv() */ const PermutationMatrix<Dynamic,Dynamic,StorageIndex>& permutationP() const { return m_P; } /** \returns the inverse P^-1 of the permutation P * \sa permutationP() */ const PermutationMatrix<Dynamic,Dynamic,StorageIndex>& permutationPinv() const { return m_Pinv; } /** Sets the shift parameters that will be used to adjust the diagonal coefficients during the numerical factorization. * * During the numerical factorization, the diagonal coefficients are transformed by the following linear model:\n * \c d_ii = \a offset + \a scale * \c d_ii * * The default is the identity transformation with \a offset=0, and \a scale=1. * * \returns a reference to \c *this. */ Derived& setShift(const RealScalar& offset, const RealScalar& scale = 1) { m_shiftOffset = offset; m_shiftScale = scale; return derived(); } #ifndef EIGEN_PARSED_BY_DOXYGEN /** \internal */ template<typename Stream> void dumpMemory(Stream& s) { int total = 0; s << " L: " << ((total+=(m_matrix.cols()+1) * sizeof(int) + m_matrix.nonZeros()*(sizeof(int)+sizeof(Scalar))) >> 20) << "Mb" << "\n"; s << " diag: " << ((total+=m_diag.size() * sizeof(Scalar)) >> 20) << "Mb" << "\n"; s << " tree: " << ((total+=m_parent.size() * sizeof(int)) >> 20) << "Mb" << "\n"; s << " nonzeros: " << ((total+=m_nonZerosPerCol.size() * sizeof(int)) >> 20) << "Mb" << "\n"; s << " perm: " << ((total+=m_P.size() * sizeof(int)) >> 20) << "Mb" << "\n"; s << " perm^-1: " << ((total+=m_Pinv.size() * sizeof(int)) >> 20) << "Mb" << "\n"; s << " TOTAL: " << (total>> 20) << "Mb" << "\n"; } /** \internal */ template<typename Rhs,typename Dest> void _solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const { eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()"); eigen_assert(m_matrix.rows()==b.rows()); if(m_info!=Success) return; if(m_P.size()>0) dest = m_P * b; else dest = b; if(m_matrix.nonZeros()>0) // otherwise L==I derived().matrixL().solveInPlace(dest); if(m_diag.size()>0) dest = m_diag.asDiagonal().inverse() * dest; if (m_matrix.nonZeros()>0) // otherwise U==I derived().matrixU().solveInPlace(dest); if(m_P.size()>0) dest = m_Pinv * dest; } template<typename Rhs,typename Dest> void _solve_impl(const SparseMatrixBase<Rhs> &b, SparseMatrixBase<Dest> &dest) const { internal::solve_sparse_through_dense_panels(derived(), b, dest); } #endif // EIGEN_PARSED_BY_DOXYGEN protected: /** Computes the sparse Cholesky decomposition of \a matrix */ template<bool DoLDLT> void compute(const MatrixType& matrix) { eigen_assert(matrix.rows()==matrix.cols()); Index size = matrix.cols(); CholMatrixType tmp(size,size); ConstCholMatrixPtr pmat; ordering(matrix, pmat, tmp); analyzePattern_preordered(*pmat, DoLDLT); factorize_preordered<DoLDLT>(*pmat); } template<bool DoLDLT> void factorize(const MatrixType& a) { eigen_assert(a.rows()==a.cols()); Index size = a.cols(); CholMatrixType tmp(size,size); ConstCholMatrixPtr pmat; if(m_P.size()==0 && (UpLo&Upper)==Upper) { // If there is no ordering, try to directly use the input matrix without any copy internal::simplicial_cholesky_grab_input<CholMatrixType,MatrixType>::run(a, pmat, tmp); } else { tmp.template selfadjointView<Upper>() = a.template selfadjointView<UpLo>().twistedBy(m_P); pmat = &tmp; } factorize_preordered<DoLDLT>(*pmat); } template<bool DoLDLT> void factorize_preordered(const CholMatrixType& a); void analyzePattern(const MatrixType& a, bool doLDLT) { eigen_assert(a.rows()==a.cols()); Index size = a.cols(); CholMatrixType tmp(size,size); ConstCholMatrixPtr pmat; ordering(a, pmat, tmp); analyzePattern_preordered(*pmat,doLDLT); } void analyzePattern_preordered(const CholMatrixType& a, bool doLDLT); void ordering(const MatrixType& a, ConstCholMatrixPtr &pmat, CholMatrixType& ap); /** keeps off-diagonal entries; drops diagonal entries */ struct keep_diag { inline bool operator() (const Index& row, const Index& col, const Scalar&) const { return row!=col; } }; mutable ComputationInfo m_info; bool m_factorizationIsOk; bool m_analysisIsOk; CholMatrixType m_matrix; VectorType m_diag; // the diagonal coefficients (LDLT mode) VectorI m_parent; // elimination tree VectorI m_nonZerosPerCol; PermutationMatrix<Dynamic,Dynamic,StorageIndex> m_P; // the permutation PermutationMatrix<Dynamic,Dynamic,StorageIndex> m_Pinv; // the inverse permutation RealScalar m_shiftOffset; RealScalar m_shiftScale; }; template<typename _MatrixType, int _UpLo = Lower, typename _Ordering = AMDOrdering<typename _MatrixType::StorageIndex> > class SimplicialLLT; template<typename _MatrixType, int _UpLo = Lower, typename _Ordering = AMDOrdering<typename _MatrixType::StorageIndex> > class SimplicialLDLT; template<typename _MatrixType, int _UpLo = Lower, typename _Ordering = AMDOrdering<typename _MatrixType::StorageIndex> > class SimplicialCholesky; namespace internal { template<typename _MatrixType, int _UpLo, typename _Ordering> struct traits<SimplicialLLT<_MatrixType,_UpLo,_Ordering> > { typedef _MatrixType MatrixType; typedef _Ordering OrderingType; enum { UpLo = _UpLo }; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::StorageIndex StorageIndex; typedef SparseMatrix<Scalar, ColMajor, StorageIndex> CholMatrixType; typedef TriangularView<const CholMatrixType, Eigen::Lower> MatrixL; typedef TriangularView<const typename CholMatrixType::AdjointReturnType, Eigen::Upper> MatrixU; static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); } static inline MatrixU getU(const MatrixType& m) { return MatrixU(m.adjoint()); } }; template<typename _MatrixType,int _UpLo, typename _Ordering> struct traits<SimplicialLDLT<_MatrixType,_UpLo,_Ordering> > { typedef _MatrixType MatrixType; typedef _Ordering OrderingType; enum { UpLo = _UpLo }; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::StorageIndex StorageIndex; typedef SparseMatrix<Scalar, ColMajor, StorageIndex> CholMatrixType; typedef TriangularView<const CholMatrixType, Eigen::UnitLower> MatrixL; typedef TriangularView<const typename CholMatrixType::AdjointReturnType, Eigen::UnitUpper> MatrixU; static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); } static inline MatrixU getU(const MatrixType& m) { return MatrixU(m.adjoint()); } }; template<typename _MatrixType, int _UpLo, typename _Ordering> struct traits<SimplicialCholesky<_MatrixType,_UpLo,_Ordering> > { typedef _MatrixType MatrixType; typedef _Ordering OrderingType; enum { UpLo = _UpLo }; }; } /** \ingroup SparseCholesky_Module * \class SimplicialLLT * \brief A direct sparse LLT Cholesky factorizations * * This class provides a LL^T Cholesky factorizations of sparse matrices that are * selfadjoint and positive definite. The factorization allows for solving A.X = B where * X and B can be either dense or sparse. * * In order to reduce the fill-in, a symmetric permutation P is applied prior to the factorization * such that the factorized matrix is P A P^-1. * * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower * or Upper. Default is Lower. * \tparam _Ordering The ordering method to use, either AMDOrdering<> or NaturalOrdering<>. Default is AMDOrdering<> * * \implsparsesolverconcept * * \sa class SimplicialLDLT, class AMDOrdering, class NaturalOrdering */ template<typename _MatrixType, int _UpLo, typename _Ordering> class SimplicialLLT : public SimplicialCholeskyBase<SimplicialLLT<_MatrixType,_UpLo,_Ordering> > { public: typedef _MatrixType MatrixType; enum { UpLo = _UpLo }; typedef SimplicialCholeskyBase<SimplicialLLT> Base; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::StorageIndex StorageIndex; typedef SparseMatrix<Scalar,ColMajor,Index> CholMatrixType; typedef Matrix<Scalar,Dynamic,1> VectorType; typedef internal::traits<SimplicialLLT> Traits; typedef typename Traits::MatrixL MatrixL; typedef typename Traits::MatrixU MatrixU; public: /** Default constructor */ SimplicialLLT() : Base() {} /** Constructs and performs the LLT factorization of \a matrix */ explicit SimplicialLLT(const MatrixType& matrix) : Base(matrix) {} /** \returns an expression of the factor L */ inline const MatrixL matrixL() const { eigen_assert(Base::m_factorizationIsOk && "Simplicial LLT not factorized"); return Traits::getL(Base::m_matrix); } /** \returns an expression of the factor U (= L^*) */ inline const MatrixU matrixU() const { eigen_assert(Base::m_factorizationIsOk && "Simplicial LLT not factorized"); return Traits::getU(Base::m_matrix); } /** Computes the sparse Cholesky decomposition of \a matrix */ SimplicialLLT& compute(const MatrixType& matrix) { Base::template compute<false>(matrix); return *this; } /** Performs a symbolic decomposition on the sparcity of \a matrix. * * This function is particularly useful when solving for several problems having the same structure. * * \sa factorize() */ void analyzePattern(const MatrixType& a) { Base::analyzePattern(a, false); } /** Performs a numeric decomposition of \a matrix * * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed. * * \sa analyzePattern() */ void factorize(const MatrixType& a) { Base::template factorize<false>(a); } /** \returns the determinant of the underlying matrix from the current factorization */ Scalar determinant() const { Scalar detL = Base::m_matrix.diagonal().prod(); return numext::abs2(detL); } }; /** \ingroup SparseCholesky_Module * \class SimplicialLDLT * \brief A direct sparse LDLT Cholesky factorizations without square root. * * This class provides a LDL^T Cholesky factorizations without square root of sparse matrices that are * selfadjoint and positive definite. The factorization allows for solving A.X = B where * X and B can be either dense or sparse. * * In order to reduce the fill-in, a symmetric permutation P is applied prior to the factorization * such that the factorized matrix is P A P^-1. * * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower * or Upper. Default is Lower. * \tparam _Ordering The ordering method to use, either AMDOrdering<> or NaturalOrdering<>. Default is AMDOrdering<> * * \implsparsesolverconcept * * \sa class SimplicialLLT, class AMDOrdering, class NaturalOrdering */ template<typename _MatrixType, int _UpLo, typename _Ordering> class SimplicialLDLT : public SimplicialCholeskyBase<SimplicialLDLT<_MatrixType,_UpLo,_Ordering> > { public: typedef _MatrixType MatrixType; enum { UpLo = _UpLo }; typedef SimplicialCholeskyBase<SimplicialLDLT> Base; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::StorageIndex StorageIndex; typedef SparseMatrix<Scalar,ColMajor,StorageIndex> CholMatrixType; typedef Matrix<Scalar,Dynamic,1> VectorType; typedef internal::traits<SimplicialLDLT> Traits; typedef typename Traits::MatrixL MatrixL; typedef typename Traits::MatrixU MatrixU; public: /** Default constructor */ SimplicialLDLT() : Base() {} /** Constructs and performs the LLT factorization of \a matrix */ explicit SimplicialLDLT(const MatrixType& matrix) : Base(matrix) {} /** \returns a vector expression of the diagonal D */ inline const VectorType vectorD() const { eigen_assert(Base::m_factorizationIsOk && "Simplicial LDLT not factorized"); return Base::m_diag; } /** \returns an expression of the factor L */ inline const MatrixL matrixL() const { eigen_assert(Base::m_factorizationIsOk && "Simplicial LDLT not factorized"); return Traits::getL(Base::m_matrix); } /** \returns an expression of the factor U (= L^*) */ inline const MatrixU matrixU() const { eigen_assert(Base::m_factorizationIsOk && "Simplicial LDLT not factorized"); return Traits::getU(Base::m_matrix); } /** Computes the sparse Cholesky decomposition of \a matrix */ SimplicialLDLT& compute(const MatrixType& matrix) { Base::template compute<true>(matrix); return *this; } /** Performs a symbolic decomposition on the sparcity of \a matrix. * * This function is particularly useful when solving for several problems having the same structure. * * \sa factorize() */ void analyzePattern(const MatrixType& a) { Base::analyzePattern(a, true); } /** Performs a numeric decomposition of \a matrix * * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed. * * \sa analyzePattern() */ void factorize(const MatrixType& a) { Base::template factorize<true>(a); } /** \returns the determinant of the underlying matrix from the current factorization */ Scalar determinant() const { return Base::m_diag.prod(); } }; /** \deprecated use SimplicialLDLT or class SimplicialLLT * \ingroup SparseCholesky_Module * \class SimplicialCholesky * * \sa class SimplicialLDLT, class SimplicialLLT */ template<typename _MatrixType, int _UpLo, typename _Ordering> class SimplicialCholesky : public SimplicialCholeskyBase<SimplicialCholesky<_MatrixType,_UpLo,_Ordering> > { public: typedef _MatrixType MatrixType; enum { UpLo = _UpLo }; typedef SimplicialCholeskyBase<SimplicialCholesky> Base; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::StorageIndex StorageIndex; typedef SparseMatrix<Scalar,ColMajor,StorageIndex> CholMatrixType; typedef Matrix<Scalar,Dynamic,1> VectorType; typedef internal::traits<SimplicialCholesky> Traits; typedef internal::traits<SimplicialLDLT<MatrixType,UpLo> > LDLTTraits; typedef internal::traits<SimplicialLLT<MatrixType,UpLo> > LLTTraits; public: SimplicialCholesky() : Base(), m_LDLT(true) {} explicit SimplicialCholesky(const MatrixType& matrix) : Base(), m_LDLT(true) { compute(matrix); } SimplicialCholesky& setMode(SimplicialCholeskyMode mode) { switch(mode) { case SimplicialCholeskyLLT: m_LDLT = false; break; case SimplicialCholeskyLDLT: m_LDLT = true; break; default: break; } return *this; } inline const VectorType vectorD() const { eigen_assert(Base::m_factorizationIsOk && "Simplicial Cholesky not factorized"); return Base::m_diag; } inline const CholMatrixType rawMatrix() const { eigen_assert(Base::m_factorizationIsOk && "Simplicial Cholesky not factorized"); return Base::m_matrix; } /** Computes the sparse Cholesky decomposition of \a matrix */ SimplicialCholesky& compute(const MatrixType& matrix) { if(m_LDLT) Base::template compute<true>(matrix); else Base::template compute<false>(matrix); return *this; } /** Performs a symbolic decomposition on the sparcity of \a matrix. * * This function is particularly useful when solving for several problems having the same structure. * * \sa factorize() */ void analyzePattern(const MatrixType& a) { Base::analyzePattern(a, m_LDLT); } /** Performs a numeric decomposition of \a matrix * * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed. * * \sa analyzePattern() */ void factorize(const MatrixType& a) { if(m_LDLT) Base::template factorize<true>(a); else Base::template factorize<false>(a); } /** \internal */ template<typename Rhs,typename Dest> void _solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const { eigen_assert(Base::m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()"); eigen_assert(Base::m_matrix.rows()==b.rows()); if(Base::m_info!=Success) return; if(Base::m_P.size()>0) dest = Base::m_P * b; else dest = b; if(Base::m_matrix.nonZeros()>0) // otherwise L==I { if(m_LDLT) LDLTTraits::getL(Base::m_matrix).solveInPlace(dest); else LLTTraits::getL(Base::m_matrix).solveInPlace(dest); } if(Base::m_diag.size()>0) dest = Base::m_diag.real().asDiagonal().inverse() * dest; if (Base::m_matrix.nonZeros()>0) // otherwise I==I { if(m_LDLT) LDLTTraits::getU(Base::m_matrix).solveInPlace(dest); else LLTTraits::getU(Base::m_matrix).solveInPlace(dest); } if(Base::m_P.size()>0) dest = Base::m_Pinv * dest; } /** \internal */ template<typename Rhs,typename Dest> void _solve_impl(const SparseMatrixBase<Rhs> &b, SparseMatrixBase<Dest> &dest) const { internal::solve_sparse_through_dense_panels(*this, b, dest); } Scalar determinant() const { if(m_LDLT) { return Base::m_diag.prod(); } else { Scalar detL = Diagonal<const CholMatrixType>(Base::m_matrix).prod(); return numext::abs2(detL); } } protected: bool m_LDLT; }; template<typename Derived> void SimplicialCholeskyBase<Derived>::ordering(const MatrixType& a, ConstCholMatrixPtr &pmat, CholMatrixType& ap) { eigen_assert(a.rows()==a.cols()); const Index size = a.rows(); pmat = &ap; // Note that ordering methods compute the inverse permutation if(!internal::is_same<OrderingType,NaturalOrdering<Index> >::value) { { CholMatrixType C; C = a.template selfadjointView<UpLo>(); OrderingType ordering; ordering(C,m_Pinv); } if(m_Pinv.size()>0) m_P = m_Pinv.inverse(); else m_P.resize(0); ap.resize(size,size); ap.template selfadjointView<Upper>() = a.template selfadjointView<UpLo>().twistedBy(m_P); } else { m_Pinv.resize(0); m_P.resize(0); if(int(UpLo)==int(Lower) || MatrixType::IsRowMajor) { // we have to transpose the lower part to to the upper one ap.resize(size,size); ap.template selfadjointView<Upper>() = a.template selfadjointView<UpLo>(); } else internal::simplicial_cholesky_grab_input<CholMatrixType,MatrixType>::run(a, pmat, ap); } } } // end namespace Eigen #endif // EIGEN_SIMPLICIAL_CHOLESKY_H
Unknown